[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[lvm-devel] [PATCH LVM2] (11/12) extending parallel_area to cover log device



This patch extends parallel_area to include log device appropriately.

o The log device is considered as covering the whole range of the segment
  including newly allocated extents.
  However, current parallel_areas list is not composed as so.

  For example, if you have a mirrored LV with length 10,
  parallel_areas would say: there are parallel PVs for the extents from
  0 to 9.
  If you are going to extend the mirrored LV, nothing constraints
  extents allocated for le 10 or higher.

  In this case, the existing log device should be taken as parallel to
  all extents including le 10 or higher.

o _for_each_pv() is changed to accept flags which controls what type
  of segments _for_each_pv() walks through.


$ diffstat -p1 11.log_coverage.patch
 lib/metadata/lv_manip.c |   71 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 56 insertions(+), 15 deletions(-)


Thanks,
-- 
Jun'ichi Nomura, NEC Corporation of America


diff -X dontdiff -urp LVM2.10.calcfree/lib/metadata/lv_manip.c LVM2.11.log_coverage/lib/metadata/lv_manip.c
--- LVM2.10.calcfree/lib/metadata/lv_manip.c	2006-10-13 22:28:35.000000000 -0400
+++ LVM2.11.log_coverage/lib/metadata/lv_manip.c	2006-10-13 22:47:34.000000000 -0400
@@ -604,8 +604,8 @@ static struct alloc_handle *_alloc_init(
 	ah->mirrored_pv = mirrored_pv;
 	ah->mirrored_pe = mirrored_pe;
 	if (!parallel_areas)
-		/* set up empty parallel_areas if not provided */
-		parallel_areas = build_parallel_areas_from_lv(cmd, NULL);
+		/* set up parallel_areas if not provided */
+		parallel_areas = build_parallel_areas_from_lv(cmd, lv);
 	ah->parallel_areas = parallel_areas;
 
 	return ah;
@@ -1044,13 +1044,18 @@ static int _alloc_parallel_area(struct a
  * the index can be calculated correctly.
  */
 /* FIXME: who needs first_area, max_areas and only_single_area_segments ? */
+
+/* flags to control _for_each_pv() iteration */
+#define FOR_EACH_PV_ANY		0x000000ffUL
+#define FOR_EACH_PV_NORMAL	0x00000001UL
+#define FOR_EACH_PV_LOG		0x00000002UL
+#define FOR_EACH_PV_SINGLE_AREA_SEG	0x00000100UL
 static int _for_each_pv(struct cmd_context *cmd, struct logical_volume *lv,
 			uint32_t le, uint32_t len, uint32_t *max_seg_len,
 			uint32_t first_area, uint32_t max_areas,
 			uint32_t *flattened_area_index,
 			uint32_t start_index, uint32_t end_index,
-			int only_single_area_segments,
-			int walk_log_lv,
+			unsigned flags,
 			int (*fn)(struct cmd_context *cmd,
 				  struct pv_segment *peg, uint32_t s,
 				  void *data),
@@ -1059,6 +1064,7 @@ static int _for_each_pv(struct cmd_conte
 	struct lv_segment *seg;
 	uint32_t s, tmp_flattened_area_index;
 	uint32_t remaining_seg_len, area_len, area_multiple;
+	int only_single_area_segments = (flags & FOR_EACH_PV_SINGLE_AREA_SEG);
 	int r = 1;
 
 	if (!(seg = find_seg_by_le(lv, le))) {
@@ -1073,6 +1079,10 @@ static int _for_each_pv(struct cmd_conte
 		*flattened_area_index = 0;
 	}
 
+	/* skip normal area iteration */
+	if (!(flags & FOR_EACH_PV_NORMAL))
+		goto other_types;
+
 	/* Remaining logical length of segment */
 	remaining_seg_len = seg->len - (le - seg->le);
 
@@ -1097,8 +1107,7 @@ static int _for_each_pv(struct cmd_conte
 					       only_single_area_segments ? 1 : 0,
 					       flattened_area_index,
 					       start_index, end_index,
-					       only_single_area_segments,
-					       walk_log_lv, fn,
+					       flags, fn,
 					       data)))
 				stack;
 		} else if (seg_type(seg, s) == AREA_PV) {
@@ -1113,11 +1122,12 @@ static int _for_each_pv(struct cmd_conte
 			return r;
 	}
 
-	if (walk_log_lv && seg_is_mirrored(seg) && seg->log_lv) {
+      other_types:
+	if ((flags & FOR_EACH_PV_LOG) && seg_is_mirrored(seg) && seg->log_lv) {
 		if (!(r = _for_each_pv(cmd, seg->log_lv, 0, MIRROR_LOG_SIZE,
 				       NULL, 0, 0, flattened_area_index,
 				       start_index, end_index,
-				       only_single_area_segments, walk_log_lv,
+				       flags | FOR_EACH_PV_ANY,
 				       fn, data)))
 			stack;
 		if (r != 1)
@@ -1135,7 +1145,8 @@ static int _count_parallel_areas(struct 
 
 	if (!_for_each_pv(NULL, lvseg->lv,
 			  lvseg->le + lvseg->len - 1, 1, NULL,
-			  0, 0, count, 0, UINT32_MAX, 0, 1, NULL, NULL))
+			  0, 0, count, 0, UINT32_MAX, FOR_EACH_PV_ANY,
+			  NULL, NULL))
 		return 0;
 
 	return 1;
@@ -1232,8 +1243,8 @@ static int _check_cling(struct cmd_conte
 
 	if (!(r = _for_each_pv(cmd, prev_lvseg->lv,
 			       prev_lvseg->le + prev_lvseg->len - 1, 1, NULL,
-			       0, 0, NULL, start_index, end_index, 0, 1,
-			       _is_condition, &pvmatch)))
+			       0, 0, NULL, start_index, end_index,
+			       FOR_EACH_PV_ANY, _is_condition, &pvmatch)))
 		stack;
 
 	if (r != 2)
@@ -1260,8 +1271,8 @@ static int _check_contiguous(struct cmd_
 
 	if (!(r = _for_each_pv(cmd, prev_lvseg->lv,
 			       prev_lvseg->le + prev_lvseg->len - 1, 1, NULL,
-			       0, 0, NULL, start_index, end_index, 0, 1,
-			       _is_condition, &pvmatch)))
+			       0, 0, NULL, start_index, end_index,
+			       FOR_EACH_PV_ANY, _is_condition, &pvmatch)))
 		stack;
 
 	if (r != 2)
@@ -2111,7 +2122,7 @@ struct list *build_parallel_areas_from_l
 {
 	struct list *parallel_areas;
 	struct seg_pvs *spvs;
-	uint32_t current_le = 0;
+	uint32_t current_le = 0, log_count = 0;
 
 	if (!(parallel_areas = dm_pool_alloc(cmd->mem, sizeof(*parallel_areas)))) {
 		log_error("parallel_areas allocation failed");
@@ -2140,7 +2151,8 @@ struct list *build_parallel_areas_from_l
 		/* Find next segment end */
 		/* FIXME Unnecessary nesting! */
 		if (!_for_each_pv(cmd, lv, current_le, spvs->len, &spvs->len,
-				  0, 0, NULL, 0, UINT32_MAX, 0, 1, _add_pvs, (void *) spvs)) {
+				  0, 0, NULL, 0, UINT32_MAX, FOR_EACH_PV_ANY,
+				  _add_pvs, (void *) spvs)) {
 			stack;
 			return NULL;
 		}
@@ -2148,6 +2160,35 @@ struct list *build_parallel_areas_from_l
 		current_le = spvs->le + spvs->len;
 	} while (current_le < lv->le_count);
 
+	/* add log pvs to the parallel list */
+	if (!_for_each_pv(cmd, lv, 0, UINT32_MAX, NULL,
+			  0, 0, &log_count, 0, UINT32_MAX, FOR_EACH_PV_LOG,
+			  NULL, NULL)) {
+		stack;
+		return NULL;
+	}
+	if (log_count) {
+		if (!(spvs = dm_pool_zalloc(cmd->mem, sizeof(*spvs)))) {
+			log_error("allocation failed");
+			return NULL;
+		}
+
+		list_init(&spvs->pvs);
+
+		/* log lv is considered to cover whole range */
+		spvs->le = lv->le_count;
+		spvs->len = UINT32_MAX - lv->le_count;
+
+		list_add(parallel_areas, &spvs->list);
+
+		if (!_for_each_pv(cmd, lv, 0, UINT32_MAX, NULL,
+				  0, 0, NULL, 0, UINT32_MAX, FOR_EACH_PV_LOG,
+				  _add_pvs, (void *) spvs)) {
+			stack;
+			return NULL;
+		}
+	}
+
 	/* FIXME Merge adjacent segments with identical PV lists (avoids need for contiguous allocation attempts between successful allocations) */
 
 	return parallel_areas;

[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]