[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[lvm-devel] [PATCH LVM2] (7/12) update parallel_areas for on-going allocation



This patch adds _update_parallel_areas() to update parallel_areas list
based on the current allocation.

o For some cases (e.g. lvcreate), ah->parallel_areas weren't initialized.
  So now, _alloc_init() initialize it if there is none.
  build_parallel_areas() is allowed to return empty list.

o _update_parallel_areas() and other sub functions are added.
  It takes the range and PV then reflects them into existing parallel_areas.


$ diffstat -p1 07.update_parallel.patch
 lib/metadata/lv_manip.c |  150 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 150 insertions(+)


Thanks,
-- 
Jun'ichi Nomura, NEC Corporation of America


diff -X dontdiff -urp LVM2.06.add_pvs/lib/metadata/lv_manip.c LVM2.07.update_parallel/lib/metadata/lv_manip.c
--- LVM2.06.add_pvs/lib/metadata/lv_manip.c	2006-10-13 21:19:01.000000000 -0400
+++ LVM2.07.update_parallel/lib/metadata/lv_manip.c	2006-10-13 22:28:22.000000000 -0400
@@ -603,6 +603,9 @@ static struct alloc_handle *_alloc_init(
 
 	ah->mirrored_pv = mirrored_pv;
 	ah->mirrored_pe = mirrored_pe;
+	if (!parallel_areas)
+		/* set up empty parallel_areas if not provided */
+		parallel_areas = build_parallel_areas_from_lv(cmd, NULL);
 	ah->parallel_areas = parallel_areas;
 
 	return ah;
@@ -787,6 +790,140 @@ static struct seg_pvs* _find_seg_pvs(str
 }
 
 /*
+ * create empty seg_pvs
+ * if pv is given, add it to the list
+ */
+static struct seg_pvs * _create_new_seg_pvs(struct cmd_context *cmd,
+					    uint32_t le, uint32_t len,
+					    struct physical_volume *pv)
+{
+	struct seg_pvs *spvs;
+
+	if (!(spvs = dm_pool_zalloc(cmd->mem, sizeof(*spvs)))) {
+		log_error("allocation failed");
+		return NULL;
+	}
+
+	list_init(&spvs->pvs);
+	spvs->le = le;
+	spvs->len = len;
+
+	if (pv) {
+		struct pv_segment peg;
+		peg.pv = pv;
+		if (!_add_pvs(cmd, &peg, 0, spvs))
+			return NULL;
+	}
+
+	return spvs;
+}
+
+/*
+ * copy a list of struct pv_list
+ */
+static int _copy_pv_list(struct cmd_context *cmd,
+			 struct list *new, struct list *orig)
+{
+	struct pv_list *pvl1, *pvl2;
+
+	list_iterate_items(pvl1, orig) {
+		if (!(pvl2 = dm_pool_alloc(cmd->mem, sizeof(*pvl2)))) {
+			log_error("pv_list allocation failed");
+			return 0;
+		}
+		*pvl2 = *pvl1;
+		list_add(new, &pvl2->list);
+	}
+	return 1;
+}
+
+/*
+ * split given seg_pvs at the le
+ * returns newly allocated seg_pvs (latter half)
+ */
+static struct seg_pvs * _split_seg_pvs(struct cmd_context *cmd,
+				       struct seg_pvs *orig_spvs,
+				       uint32_t new_le)
+{
+	struct seg_pvs *spvs;
+
+	if (new_le < orig_spvs->le || new_le >= orig_spvs->le + orig_spvs->len)
+		return NULL;
+
+	if (!(spvs = _create_new_seg_pvs(cmd, new_le,
+			    orig_spvs->le + orig_spvs->len - new_le, NULL)))
+		return NULL;
+
+	orig_spvs->len = new_le - orig_spvs->le;
+
+	if (!_copy_pv_list(cmd, &spvs->pvs, &orig_spvs->pvs))
+		return NULL;
+
+	list_add_h(&orig_spvs->list, &spvs->list);
+	return spvs;
+}
+
+/*
+ * add pv for the range (le, len) to the parallel_areas list
+ */
+static int _update_parallel_areas(struct cmd_context *cmd,
+				  struct list *parallel_areas,
+				  uint32_t le, uint32_t len,
+				  struct physical_volume *pv)
+{
+	struct seg_pvs *spvs;
+	struct pv_segment peg;
+
+	spvs = _find_seg_pvs(parallel_areas, le);
+
+	if (!spvs) {
+		/* parallel_areas should not be sparse */
+		if (le && !list_empty(parallel_areas) &&
+		    !_find_seg_pvs(parallel_areas, le - 1)) {
+			log_debug("sparse spvs list requested: %d", le);
+			return 0;
+		}
+		if (!(spvs = _create_new_seg_pvs(cmd, le, len, pv)))
+			return 0;
+
+		list_add(parallel_areas, &spvs->list);
+		goto out;
+	} else if (le != spvs->le || len != spvs->len) {
+		uint32_t next_le, new_len;
+		
+		if (le + len > spvs->le + spvs->len) {
+			/* recursively handle overflowed range */
+			next_le = spvs->le + spvs->len;
+			new_len = le + len - next_le;
+			if (!_update_parallel_areas(cmd, parallel_areas,
+					       next_le, new_len, pv))
+				return 0;
+		} else if (le + len < spvs->le + spvs->len) {
+			if (!_split_seg_pvs(cmd, spvs, le + len))
+				return 0;
+		}
+
+		if (le != spvs->le) { /* le > spvs->le */
+			/* Need to split spvs */
+			if (!(spvs = _split_seg_pvs(cmd, spvs, le)))
+				return 0;
+			list_add(parallel_areas, &spvs->list);
+		}
+
+	}
+
+	/* check if we already have the pv */
+	peg.pv = pv;
+	if (!_add_pvs(cmd, &peg, 0, spvs))
+		return 0;
+
+      out:
+	if (!_log_parallel_areas(cmd->mem, parallel_areas))
+		stack;
+	return 1;
+}
+
+/*
  * This function takes a list of pv_areas and adds them to allocated_areas.
  * If the complete area is not needed then it gets split.
  * The part used is removed from the pv_map so it can't be allocated twice.
@@ -809,6 +946,15 @@ static int _alloc_parallel_area(struct a
 		if (areas[s]->count < area_len)
 			area_len = areas[s]->count;
 	
+	/* update constraints */
+	for (s = 0; s < req->area_count; s++)
+		if (!_update_parallel_areas(ah->cmd, ah->parallel_areas,
+				      ah->le + req->allocated * req->multiple,
+				      req->flags & ALLOC_AVOID_ALL_PARALLEL ?
+						UINT32_MAX : area_len * req->multiple,
+				      areas[s]->map->pv))
+			return 0;
+
 	/* allocate */
 	for (s = 0; s < req->area_count; s++) {
 		aa[s].pv = areas[s]->map->pv;
@@ -1882,6 +2028,10 @@ struct list *build_parallel_areas_from_l
 
 	list_init(parallel_areas);
 
+	/* if the lv is empty, just return the empty list */
+	if (!lv || !lv->le_count)
+		return parallel_areas;
+
 	do {
 		if (!(spvs = dm_pool_zalloc(cmd->mem, sizeof(*spvs)))) {
 			log_error("allocation failed");

[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]