[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[lvm-devel] [PATCH LVM2] (1/12) add flattened index to _for_each_pv()



This patch adds "flattened index" to _for_each_pv().

o Using this index, you can map the stacked LV segment into
  the array of PVs.
  This is useful for allocation code to find index of areas[]
  corresponding to the prev_lvseg.

o _for_each_pv() doesn't execute call back function if it's NULL.
  It's useful if you just need to count the number of PVs.
  (_count_parallel_areas).

o The patch adds "start_index" and "end_index" to restrict
  the function call back while maintaining the flattened index.

o Also the patch adds "walk_log_lv" flag instead of reusing
  !only_single_area_segments as workaround, because it is no
  longer correct.


$ diffstat -p1 01.for_each_pv.patch
 lib/metadata/lv_manip.c |   76 ++++++++++++++++++++++++++++++++++++---------
 1 file changed, 57 insertions(+), 19 deletions(-)


Thanks,
-- 
Jun'ichi Nomura, NEC Corporation of America


diff -X dontdiff -urp LVM2.00.orig/lib/metadata/lv_manip.c LVM2.01.for_each_pv/lib/metadata/lv_manip.c
--- LVM2.00.orig/lib/metadata/lv_manip.c	2006-10-09 21:06:56.000000000 -0400
+++ LVM2.01.for_each_pv/lib/metadata/lv_manip.c	2006-10-13 20:12:51.000000000 -0400
@@ -677,18 +677,28 @@ static int _alloc_parallel_area(struct a
  * fn should return 0 on error, 1 to continue scanning or >1 to terminate without error.
  * In the last case, this function passes on the return code.
  */
+/*
+ * 'flattened_area_index' provides unique index for each pv.
+ * 'start_index' and 'end_index' can be used to restrict fn call
+ * on the specific range of indices.
+ * The loop will go on regardless of the restriction so that
+ * the index can be calculated correctly.
+ */
+/* FIXME: who needs first_area, max_areas and only_single_area_segments ? */
 static int _for_each_pv(struct cmd_context *cmd, struct logical_volume *lv,
 			uint32_t le, uint32_t len, uint32_t *max_seg_len,
 			uint32_t first_area, uint32_t max_areas,
-			int top_level_area_index,
+			uint32_t *flattened_area_index,
+			uint32_t start_index, uint32_t end_index,
 			int only_single_area_segments,
+			int walk_log_lv,
 			int (*fn)(struct cmd_context *cmd,
 				  struct pv_segment *peg, uint32_t s,
 				  void *data),
 			void *data)
 {
 	struct lv_segment *seg;
-	uint32_t s;
+	uint32_t s, tmp_flattened_area_index;
 	uint32_t remaining_seg_len, area_len, area_multiple;
 	int r = 1;
 
@@ -698,6 +708,12 @@ static int _for_each_pv(struct cmd_conte
 		return 0;
 	}
 
+	/* set up index counter temporarily */
+	if (!flattened_area_index) {
+		flattened_area_index = &tmp_flattened_area_index;
+		*flattened_area_index = 0;
+	}
+
 	/* Remaining logical length of segment */
 	remaining_seg_len = seg->len - (le - seg->le);
 
@@ -720,21 +736,29 @@ static int _for_each_pv(struct cmd_conte
 					       area_len, max_seg_len,
 					       only_single_area_segments ? 0 : 0,
 					       only_single_area_segments ? 1 : 0,
-					       top_level_area_index != -1 ? top_level_area_index : s,
-					       only_single_area_segments, fn,
+					       flattened_area_index,
+					       start_index, end_index,
+					       only_single_area_segments,
+					       walk_log_lv, fn,
 					       data)))
 				stack;
-		} else if (seg_type(seg, s) == AREA_PV)
-			if (!(r = fn(cmd, seg_pvseg(seg, s), top_level_area_index != -1 ? top_level_area_index : s, data)))
+		} else if (seg_type(seg, s) == AREA_PV) {
+			if (*flattened_area_index >= start_index &&
+			    *flattened_area_index <= end_index &&
+			    fn && !(r = fn(cmd, seg_pvseg(seg, s),
+					   *flattened_area_index, data)))
 				stack;
+			(*flattened_area_index)++;
+		}
 		if (r != 1)
 			return r;
 	}
 
-	/* FIXME only_single_area_segments used as workaround to skip log LV - needs new param? */
-	if (!only_single_area_segments && seg_is_mirrored(seg) && seg->log_lv) {
+	if (walk_log_lv && seg_is_mirrored(seg) && seg->log_lv) {
 		if (!(r = _for_each_pv(cmd, seg->log_lv, 0, MIRROR_LOG_SIZE,
-				       NULL, 0, 0, 0, only_single_area_segments,
+				       NULL, 0, 0, flattened_area_index,
+				       start_index, end_index,
+				       only_single_area_segments, walk_log_lv,
 				       fn, data)))
 			stack;
 		if (r != 1)
@@ -746,6 +770,18 @@ static int _for_each_pv(struct cmd_conte
 	return 1;
 }
 
+static int _count_parallel_areas(struct lv_segment *lvseg, uint32_t *count)
+{
+	*count = 0;
+
+	if (!_for_each_pv(NULL, lvseg->lv,
+			  lvseg->le + lvseg->len - 1, 1, NULL,
+			  0, 0, count, 0, UINT32_MAX, 0, 1, NULL, NULL))
+		return 0;
+
+	return 1;
+}
+
 static int _comp_area(const void *l, const void *r)
 {
 	const struct pv_area *lhs = *((const struct pv_area **) l);
@@ -829,10 +865,9 @@ static int _check_cling(struct cmd_conte
 	pvmatch.areas_size = areas_size;
 	pvmatch.pva = pva;
 
-	/* FIXME Cope with stacks by flattening */
 	if (!(r = _for_each_pv(cmd, prev_lvseg->lv,
 			       prev_lvseg->le + prev_lvseg->len - 1, 1, NULL,
-			       0, 0, -1, 1,
+			       0, 0, NULL, 0, UINT32_MAX, 0, 1,
 			       _is_condition, &pvmatch)))
 		stack;
 
@@ -857,10 +892,9 @@ static int _check_contiguous(struct cmd_
 	pvmatch.areas_size = areas_size;
 	pvmatch.pva = pva;
 
-	/* FIXME Cope with stacks by flattening */
 	if (!(r = _for_each_pv(cmd, prev_lvseg->lv,
 			       prev_lvseg->le + prev_lvseg->len - 1, 1, NULL,
-			       0, 0, -1, 1,
+			       0, 0, NULL, 0, UINT32_MAX, 0, 1,
 			       _is_condition, &pvmatch)))
 		stack;
 
@@ -885,7 +919,7 @@ static int _find_parallel_space(struct a
 	unsigned already_found_one = 0;
 	unsigned contiguous = 0, cling = 0, preferred_count = 0;
 	unsigned ix;
-	unsigned ix_offset = 0;	/* Offset for non-preferred allocations */
+	uint32_t ix_offset = 0;	/* Offset for non-preferred allocations */
 	uint32_t max_parallel;	/* Maximum extents to allocate */
 	uint32_t next_le;
 	struct seg_pvs *spvs;
@@ -896,7 +930,8 @@ static int _find_parallel_space(struct a
 
 	/* Are there any preceding segments we must follow on from? */
 	if (prev_lvseg) {
-		ix_offset = prev_lvseg->area_count;
+		if (!_count_parallel_areas(prev_lvseg, &ix_offset))
+			return 0;
 		if ((alloc == ALLOC_CONTIGUOUS))
 			contiguous = 1;
 		else if ((alloc == ALLOC_CLING))
@@ -1092,9 +1127,12 @@ static int _allocate(struct alloc_handle
 	}
 
 	/* Upper bound if none of the PVs in prev_lvseg is in pvms */
-	/* FIXME Work size out properly */
-	if (prev_lvseg)
-		areas_size += prev_lvseg->area_count;
+	if (prev_lvseg) {
+		uint32_t prev_lvseg_areas;
+		if (!_count_parallel_areas(prev_lvseg, &prev_lvseg_areas))
+			return 0;
+		areas_size += prev_lvseg_areas;
+	}
 
 	/* Allocate an array of pv_areas to hold the largest space on each PV */
 	if (!(areas = dm_malloc(sizeof(*areas) * areas_size))) {
@@ -1621,7 +1659,7 @@ struct list *build_parallel_areas_from_l
 		/* Find next segment end */
 		/* FIXME Unnecessary nesting! */
 		if (!_for_each_pv(cmd, lv, current_le, spvs->len, &spvs->len,
-				  0, 0, -1, 0, _add_pvs, (void *) spvs)) {
+				  0, 0, NULL, 0, UINT32_MAX, 0, 1, _add_pvs, (void *) spvs)) {
 			stack;
 			return NULL;
 		}

[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]