[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[lvm-devel] [PATCH LVM2] (2/12) rewrite with struct allocation request



This patch introduces 'struct allocaction_request' to generalize
the allocation request currently embedded in struct alloc_handle.

o The struct is in this form:
   struct allocation_request {
        uint32_t len; /* requested length of single area */
        uint32_t area_count; /* number of areas with same length */
        uint32_t multiple;      /* LV size is extended with (len * multiple) */
        uint32_t allocated; /* allocated length of single area */
        int flags;
        int index;      /* index to alloced_areas[] */
   };

  For example, if you do 'lvcreate -l3 -m1', the first allocation
  request would be:
     { len = 3, area_count = 2, multiple = 1, index = 0 }
  and for the 2nd allocation (i.e. log) would be:
     { len = 1, area_count = 1, multiple = 1, index = 2 }

  _handle_allocation_requests() will call _find_parallel_space() for
  each struct allocation_request.

  The flags is used to describe preference and also be used later
  to find out for what purpose the request was.

o _find_parallel_space() is simplified since we don't need to carry
  around the attempt-wide "*allocated" or "max_parallel" and a lot of
  division and multiplication.

o lv_add_log_segment() is changed to search alloced_areas for log.
  It should be easily extended to cope with multiple log allocation.

o Set up of struct allocation_request is done in _alloc_init().
  It seems better to do it out side of allocate_extents()
  and allocate_extents() to take the array of struct allocation_request.


$ diffstat -p1 02.alloc_requests.patch
 lib/metadata/lv_manip.c |  459 ++++++++++++++++++++++++++++++++-------------
 1 file changed, 310 insertions(+), 149 deletions(-)


Thanks,
-- 
Jun'ichi Nomura, NEC Corporation of America


diff -X dontdiff -urp LVM2.01.for_each_pv/lib/metadata/lv_manip.c LVM2.02.alloc_requests/lib/metadata/lv_manip.c
--- LVM2.01.for_each_pv/lib/metadata/lv_manip.c	2006-10-13 20:12:51.000000000 -0400
+++ LVM2.02.alloc_requests/lib/metadata/lv_manip.c	2006-10-13 20:58:46.000000000 -0400
@@ -1,6 +1,7 @@
 /*
  * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
- * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2006 NEC Corporation
  *
  * This file is part of LVM2.
  *
@@ -384,6 +385,25 @@ int lv_remove(struct logical_volume *lv)
 }
 
 /*
+ * Allocation
+ *
+ * The allocation attempt can be either of 2 modes:
+ *    - horizontal mode
+ *        create new LV (e.g. lvcreate)
+ *        or add new parallel LV to the existing LV (e.g. lvconvert -m1)
+ *    - vertical mode
+ *        extend existing LV in length  (e.g. lvextend)
+ *
+ * In horizontal mode, constraints of the allocation is provided as
+ * parallel_areas which represents the set of PV areas used by existing LV
+ * or being allocated. New allocation should avoid them.
+ *
+ * In vertical mode, constraints also include the prev_lvseg which represents
+ * the last part of the LV.
+ * Allocator tries to make newly allocated extents contiguous or cling
+ * to the existing segments depending on its policy.
+ */
+/*
  * A set of contiguous physical extents allocated
  */
 struct alloced_area {
@@ -396,22 +416,70 @@ struct alloced_area {
 
 /*
  * Details of an allocation attempt
+ *
+ * "alloc_handle" is used to represent the whole attempt of the allocation.
+ * "allocation_request" represents the unit request.
+ * It's warranted that extents for areas in the single allocation_request
+ * are taken from different PVs.
+ *
+ * The "len" of the allocation_request is the number of the extents
+ * required for single area. "allocated" shows how many extents are
+ * already allocated for this request.
+ *
+ * Actual length extended for the allocated LV is "len" multiplied by
+ * "multiple".
+ * For example, request for 2 stripes will have "multiple=2".
+ *
+ * The allocation_request allocates "area_count" of areas, each has "len"
+ * extents.
+ * Mirrors and stripes will have "area_count" larger than 1.
+ *
+ * "flags" is used to represent the preference.
+ *
+ * "index" corresponds to the index of alloced_areas[].
+ * If the allocation is extention, "index" also matches to the flattened
+ * index provided by _for_each_pv().
  */
+
+struct allocation_request {
+	uint32_t len; /* requested length of single area */
+	uint32_t area_count; /* number of areas with same length */
+	uint32_t multiple;	/* LV size is extended with (len * multiple) */
+	uint32_t allocated; /* allocated length of single area */
+	int flags;
+	int index;	/* index to alloced_areas[] */
+};
+
+/* flags used for allocation_request */
+#define ALLOC_AVOID_ALL_PARALLEL	0x00000001UL
+#define ALLOC_AVOID_SAME_SLICE		0x00000000UL
+#define ALLOC_PREFER_SMALL		0x00000002UL
+#define ALLOC_PREFER_LARGE		0x00000000UL
+#define ALLOC_TYPE_NORMAL	(ALLOC_PREFER_LARGE|ALLOC_AVOID_SAME_SLICE)
+#define ALLOC_TYPE_LOG		(ALLOC_PREFER_SMALL|ALLOC_AVOID_ALL_PARALLEL)
+
 struct alloc_handle {
 	struct cmd_context *cmd;
 	struct dm_pool *mem;
 
 	alloc_policy_t alloc;		/* Overall policy */
-	uint32_t area_count;		/* Number of parallel areas */
-	uint32_t area_multiple;		/* seg->len = area_len * area_multiple */
-	uint32_t log_count;		/* Number of parallel 1-extent logs */
+	uint32_t area_count;		/* Sum of area_count of all requests */
 	uint32_t total_area_len;	/* Total number of parallel extents */
 
+	/* allocation constraints */
 	struct physical_volume *mirrored_pv;	/* FIXME Remove this */
 	uint32_t mirrored_pe;			/* FIXME Remove this */
 	struct list *parallel_areas;	/* PVs to avoid */
+	uint32_t le;	/* the last LE of existing LV */
+
+	/* allocation requests */
+	int nr_requests;			/* number of requests */
+	struct allocation_request *requests;	/* array of requests */
+
+	/* allocation status */
+	uint32_t requested;	/* Total number of requested extents */
+	uint32_t allocated;	/* Total number of allocated extents */
 
-	struct alloced_area log_area;	/* Extent used for log */
 	struct list alloced_areas[0];	/* Lists of areas in each stripe */
 };
 
@@ -427,10 +495,13 @@ static struct alloc_handle *_alloc_init(
 					uint32_t log_count,
 					struct physical_volume *mirrored_pv,
 					uint32_t mirrored_pe,
-					struct list *parallel_areas)
+					struct list *parallel_areas,
+					struct logical_volume *lv,
+					uint32_t extents)
 {
 	struct alloc_handle *ah;
-	uint32_t s, area_count;
+	struct allocation_request *req = NULL;
+	uint32_t s, area_count, nr_requests, requested;
 
 	if (stripes > 1 && mirrors > 1) {
 		log_error("Striped mirrors are not supported yet");
@@ -449,17 +520,61 @@ static struct alloc_handle *_alloc_init(
 		return NULL;
 	}
 
-	if (segtype_is_virtual(segtype))
-		area_count = 0;
-	else if (mirrors > 1)
-		area_count = mirrors;
-	else if (mirrored_pv)
-		area_count = 1;
-	else
-		area_count = stripes;
+	/*
+	 * Set up requests from given information
+	 *
+	 * FIXME:
+	 *   To allow generic combination of allocation_requests,
+	 *   request composition should be out side of allocate_extents().
+	 */
+	nr_requests = (segtype_is_virtual(segtype) ? 0 : 1) + (log_count ? 1 : 0);
+	if (nr_requests &&
+	    !(req = dm_pool_zalloc(mem, sizeof(struct allocation_request) * nr_requests))) {
+		log_error("allocation handle allocation failed");
+		return NULL;
+	}
+
+	/* req[0] : request for main lv */
+	if (!segtype_is_virtual(segtype)) {
+		if (mirrors > 1)
+			req[0].area_count = mirrors;
+		else if (mirrored_pv)
+			req[0].area_count = 1;
+		else
+			req[0].area_count = stripes;
+		req[0].multiple = segtype_is_striped(segtype) ? stripes : 1;
+		req[0].len = extents / req[0].multiple;
+		req[0].flags = ALLOC_TYPE_NORMAL;
+		req[0].allocated = 0;
+		req[0].index = 0;
+	}
+
+	/* req[1] : request for log device */
+	if (log_count) {
+		req[1].len = MIRROR_LOG_SIZE; /* FIXME: Calculate correctly */
+		req[1].area_count = log_count;
+		req[1].multiple = 1;
+		req[1].flags = ALLOC_TYPE_LOG;
+		req[1].allocated = 0;
+		req[1].index = req[0].area_count;
+	}
+
+	/* size the total request */
+	area_count = 0;
+	requested = 0;
+	for (s = 0; s < nr_requests; s++) {
+		area_count += req[s].area_count;
+		requested += req[s].len * req[s].area_count;
+	}
+
 
+	/*
+	 * Set up allocation handle
+	 */
 	if (!(ah = dm_pool_zalloc(mem, sizeof(*ah) + sizeof(ah->alloced_areas[0]) * area_count))) {
 		log_error("allocation handle allocation failed");
+		if (req)
+			dm_pool_free(ah->cmd->mem, req);
 		return NULL;
 	}
 
@@ -473,10 +588,15 @@ static struct alloc_handle *_alloc_init(
 		return NULL;
 	}
 
+	/* request set up */
+	ah->nr_requests = nr_requests;
+	ah->requests = req;
 	ah->area_count = area_count;
-	ah->log_count = log_count;
+	ah->requested = requested;
+	ah->allocated = 0;
+	ah->le = lv ? lv->le_count : 0;
+
 	ah->alloc = alloc;
-	ah->area_multiple = segtype_is_striped(segtype) ? ah->area_count : 1;
 
 	for (s = 0; s < ah->area_count; s++)
 		list_init(&ah->alloced_areas[s]);
@@ -620,52 +740,59 @@ static int _setup_alloced_segments(struc
 }
 
 /*
+ * returns seg_pvs which contains the le from the list
+ */
+static struct seg_pvs* _find_seg_pvs(struct list *list, uint32_t le)
+{
+	struct seg_pvs *spvs;
+
+	if (!list)
+		return NULL;
+
+	list_iterate_items(spvs, list) {
+		if (le >= spvs->le + spvs->len)
+			continue;
+		/* assume spvs list is sorted and not sparse */
+		return spvs;
+	}
+
+	return NULL;
+}
+
+/*
  * This function takes a list of pv_areas and adds them to allocated_areas.
  * If the complete area is not needed then it gets split.
  * The part used is removed from the pv_map so it can't be allocated twice.
  */
-static int _alloc_parallel_area(struct alloc_handle *ah, uint32_t needed,
-				struct pv_area **areas,
-				uint32_t *ix, struct pv_area *log_area)
+static int _alloc_parallel_area(struct alloc_handle *ah,
+				struct allocation_request *req,
+				uint32_t area_len,
+				struct pv_area **areas)
 {
-	uint32_t area_len, smallest, remaining;
 	uint32_t s;
 	struct alloced_area *aa;
 
-	remaining = needed - *ix;
-	area_len = remaining / ah->area_multiple;
-
-	smallest = areas[ah->area_count - 1]->count;
-
-	if (area_len > smallest)
-		area_len = smallest;
-
-	if (!(aa = dm_pool_alloc(ah->mem, sizeof(*aa) *
-			      (ah->area_count + (log_area ? 1 : 0))))) {
+	if (!(aa = dm_pool_alloc(ah->mem, sizeof(*aa) * req->area_count))) {
 		log_error("alloced_area allocation failed");
 		return 0;
 	}
 
-	for (s = 0; s < ah->area_count; s++) {
+	/* adjust the allocation size */
+	for (s = 0; s < req->area_count; s++)
+		if (areas[s]->count < area_len)
+			area_len = areas[s]->count;
+	
+	/* allocate */
+	for (s = 0; s < req->area_count; s++) {
 		aa[s].pv = areas[s]->map->pv;
 		aa[s].pe = areas[s]->start;
 		aa[s].len = area_len;
-		list_add(&ah->alloced_areas[s], &aa[s].list);
-	}
-
-	ah->total_area_len += area_len;
-
-	for (s = 0; s < ah->area_count; s++)
+		list_add(&ah->alloced_areas[s + req->index], &aa[s].list);
 		consume_pv_area(areas[s], area_len);
-
-	if (log_area) {
-		ah->log_area.pv = log_area->map->pv;
-		ah->log_area.pe = log_area->start;
-		ah->log_area.len = MIRROR_LOG_SIZE;	/* FIXME Calculate & check this */
-		consume_pv_area(log_area, ah->log_area.len);
 	}
 
-	*ix += area_len * ah->area_multiple;
+	ah->allocated += area_len * req->area_count;
+	req->allocated += area_len;
 
 	return 1;
 }
@@ -855,6 +982,7 @@ static int _is_condition(struct cmd_cont
  */
 static int _check_cling(struct cmd_context *cmd,
 			struct lv_segment *prev_lvseg, struct pv_area *pva,
+			uint32_t start_index, uint32_t end_index,
 			struct pv_area **areas, uint32_t areas_size)
 {
 	struct pv_match pvmatch;
@@ -867,7 +995,7 @@ static int _check_cling(struct cmd_conte
 
 	if (!(r = _for_each_pv(cmd, prev_lvseg->lv,
 			       prev_lvseg->le + prev_lvseg->len - 1, 1, NULL,
-			       0, 0, NULL, 0, UINT32_MAX, 0, 1,
+			       0, 0, NULL, start_index, end_index, 0, 1,
 			       _is_condition, &pvmatch)))
 		stack;
 
@@ -882,6 +1010,7 @@ static int _check_cling(struct cmd_conte
  */
 static int _check_contiguous(struct cmd_context *cmd,
 			     struct lv_segment *prev_lvseg, struct pv_area *pva,
+			     uint32_t start_index, uint32_t end_index,
 			     struct pv_area **areas, uint32_t areas_size)
 {
 	struct pv_match pvmatch;
@@ -894,7 +1023,7 @@ static int _check_contiguous(struct cmd_
 
 	if (!(r = _for_each_pv(cmd, prev_lvseg->lv,
 			       prev_lvseg->le + prev_lvseg->len - 1, 1, NULL,
-			       0, 0, NULL, 0, UINT32_MAX, 0, 1,
+			       0, 0, NULL, start_index, end_index, 0, 1,
 			       _is_condition, &pvmatch)))
 		stack;
 
@@ -907,27 +1036,24 @@ static int _check_contiguous(struct cmd_
 /*
  * Choose sets of parallel areas to use, respecting any constraints.
  */
-static int _find_parallel_space(struct alloc_handle *ah, alloc_policy_t alloc,
+static int _find_parallel_space(struct alloc_handle *ah, 
+				struct allocation_request *req,
+				alloc_policy_t alloc,
 				struct list *pvms, struct pv_area **areas,
 				uint32_t areas_size, unsigned can_split,
-				struct lv_segment *prev_lvseg,
-				uint32_t *allocated, uint32_t needed)
+				struct lv_segment *prev_lvseg)
 {
 	struct pv_map *pvm;
 	struct pv_area *pva;
 	struct pv_list *pvl;
 	unsigned already_found_one = 0;
 	unsigned contiguous = 0, cling = 0, preferred_count = 0;
-	unsigned ix;
+	unsigned ix, areas_index;
 	uint32_t ix_offset = 0;	/* Offset for non-preferred allocations */
-	uint32_t max_parallel;	/* Maximum extents to allocate */
-	uint32_t next_le;
+	uint32_t req_len, goal, current_le;
 	struct seg_pvs *spvs;
 	struct list *parallel_pvs;
 
-	/* FIXME Do calculations on free extent counts before selecting space */
-	/* FIXME Select log PV appropriately if there isn't one yet */
-
 	/* Are there any preceding segments we must follow on from? */
 	if (prev_lvseg) {
 		if (!_count_parallel_areas(prev_lvseg, &ix_offset))
@@ -940,7 +1066,8 @@ static int _find_parallel_space(struct a
 			ix_offset = 0;
 	}
 
-	/* FIXME This algorithm needs a lot of cleaning up! */
+	goal = ah->le + (req->len * req->multiple);
+
 	/* FIXME anywhere doesn't find all space yet */
 	/* ix_offset holds the number of allocations that must be contiguous */
 	/* ix holds the number of areas found on other PVs */
@@ -948,24 +1075,19 @@ static int _find_parallel_space(struct a
 		ix = 0;
 		preferred_count = 0;
 
-		parallel_pvs = NULL;
-		max_parallel = needed;
-
 		/*
-		 * If there are existing parallel PVs, avoid them and reduce
-		 * the maximum we can allocate in one go accordingly.
+		 * If there are existing parallel PVs, pick up subset
+		 * which is parallel to this allocation (parallel_pvs)
+		 * and reduce the maximum we can allocate in one go
+		 * accordingly. (req_len)
 		 */
-		if (ah->parallel_areas) {
-			next_le = (prev_lvseg ? prev_lvseg->le + prev_lvseg->len : 0) + *allocated / ah->area_multiple;
-			list_iterate_items(spvs, ah->parallel_areas) {
-				if (next_le >= spvs->le + spvs->len)
-					continue;
-
-				if (max_parallel > (spvs->le + spvs->len) * ah->area_multiple)
-					max_parallel = (spvs->le + spvs->len) * ah->area_multiple;
-				parallel_pvs = &spvs->pvs;
-				break;
-			}
+		parallel_pvs = NULL;
+		current_le = ah->le + (req->allocated * req->multiple);
+		req_len = req->len - req->allocated;
+		if ((spvs = _find_seg_pvs(ah->parallel_areas, current_le))) {
+			if (goal > spvs->le + spvs->len)
+				req_len = (spvs->le + spvs->len - current_le) / req->multiple;
+			parallel_pvs = &spvs->pvs;
 		}
 
 		/*
@@ -979,11 +1101,6 @@ static int _find_parallel_space(struct a
 				continue;	/* Next PV */
 
 			if (alloc != ALLOC_ANYWHERE) {
-				/* Don't allocate onto the log pv */
-				if (ah->log_count &&
-				    pvm->pv == ah->log_area.pv)
-					continue;	/* Next PV */
-
 				/* Avoid PVs used by existing parallel areas */
 				if (parallel_pvs)
 					list_iterate_items(pvl, parallel_pvs)
@@ -998,7 +1115,10 @@ static int _find_parallel_space(struct a
 					if (prev_lvseg &&
 					    _check_contiguous(ah->cmd,
 							      prev_lvseg,
-							      pva, areas,
+							      pva, 
+							      req->index,
+							      req->index + req->area_count - 1,
+							      areas,
 							      areas_size)) {
 						preferred_count++;
 						goto next_pv;
@@ -1010,7 +1130,10 @@ static int _find_parallel_space(struct a
 					if (prev_lvseg &&
 					    _check_cling(ah->cmd,
 							   prev_lvseg,
-							   pva, areas,
+							   pva,
+							   req->index,
+							   req->index + req->area_count - 1,
+							   areas,
 							   areas_size)) {
 						preferred_count++;
 					}
@@ -1018,15 +1141,11 @@ static int _find_parallel_space(struct a
 				}
 
 				/* Is it big enough on its own? */
-				if (pva->count * ah->area_multiple <
-				    max_parallel - *allocated &&
-				    ((!can_split && !ah->log_count) ||
-				     (already_found_one &&
-				      !(alloc == ALLOC_ANYWHERE))))
+				if (pva->count < req->len - req->allocated &&
+				    (!can_split || already_found_one))
 					goto next_pv;
 
-				if (!already_found_one ||
-				    alloc == ALLOC_ANYWHERE) {
+				if (!already_found_one) {
 					ix++;
 					already_found_one = 1;
 				}
@@ -1040,14 +1159,10 @@ static int _find_parallel_space(struct a
 				break;
 		}
 
-		if ((contiguous || cling) && (preferred_count < ix_offset))
-			break;
-
-		/* Only allocate log_area the first time around */
-		if (ix + ix_offset < ah->area_count +
-			    ((ah->log_count && !ah->log_area.len) ?
-				ah->log_count : 0))
-			/* FIXME With ALLOC_ANYWHERE, need to split areas */
+		if (contiguous || cling) {
+			if (preferred_count < req->area_count)
+				break;
+		} else if (ix < req->area_count)
 			break;
 
 		/* sort the areas so we allocate from the biggest */
@@ -1055,18 +1170,39 @@ static int _find_parallel_space(struct a
 			qsort(areas + ix_offset, ix, sizeof(*areas),
 			      _comp_area);
 
-		/* First time around, use smallest area as log_area */
-		/* FIXME decide which PV to use at top of function instead */
-		if (!_alloc_parallel_area(ah, max_parallel, areas,
-					  allocated,
-					  (ah->log_count && !ah->log_area.len) ?
-						*(areas + ix_offset + ix - 1) :
-						NULL)) {
+		areas_index = ix_offset;
+		if (contiguous || cling)
+			areas_index = req->index;
+
+		if (!_alloc_parallel_area(ah, req, req_len, &areas[areas_index])) {
 			stack;
 			return 0;
 		}
 
-	} while (!contiguous && *allocated != needed && can_split);
+	} while (!contiguous && req->allocated != req->len && can_split);
+
+	return 1;
+}
+
+static int _handle_allocation_requests(struct alloc_handle *ah,
+				alloc_policy_t alloc,
+				struct list *pvms, struct pv_area **areas,
+				uint32_t areas_size, unsigned can_split,
+				struct lv_segment *prev_lvseg)
+{
+	int i;
+
+	for (i = 0; i < ah->nr_requests; i++) {
+		if (ah->requests[i].len == ah->requests[i].allocated)
+			/* this request is already completed */
+			continue;
+		if (!_find_parallel_space(ah, &ah->requests[i], alloc,
+				  pvms, areas, areas_size, can_split,
+				  prev_lvseg)) {
+			stack;
+			return 0;
+		}
+	}
 
 	return 1;
 }
@@ -1079,13 +1215,9 @@ static int _find_parallel_space(struct a
 static int _allocate(struct alloc_handle *ah,
 		     struct volume_group *vg,
 		     struct logical_volume *lv, uint32_t status,
-		     uint32_t new_extents,
-		     struct list *allocatable_pvs,
-		     uint32_t stripes, uint32_t mirrors,
-		     const struct segment_type *segtype)
+		     struct list *allocatable_pvs)
 {
 	struct pv_area **areas;
-	uint32_t allocated = lv ? lv->le_count : 0;
 	uint32_t old_allocated;
 	struct lv_segment *prev_lvseg = NULL;
 	unsigned can_split = 1;	/* Are we allowed more than one segment? */
@@ -1093,7 +1225,7 @@ static int _allocate(struct alloc_handle
 	struct list *pvms;
 	uint32_t areas_size;
 
-	if (allocated >= new_extents && !ah->log_count) {
+	if (ah->allocated >= ah->requested) {
 		log_error("_allocate called with no work to do!");
 		return 1;
 	}
@@ -1112,18 +1244,20 @@ static int _allocate(struct alloc_handle
 		return 0;
 	}
 
+	/* FIXME Calculate free extent counts before trying allocation */
+
 	if (!_log_parallel_areas(ah->mem, ah->parallel_areas))
 		stack;
 
 	areas_size = list_size(pvms);
-	if (areas_size < ah->area_count + ah->log_count) {
+	if (areas_size < ah->area_count) {
 		if (ah->alloc != ALLOC_ANYWHERE) {
 			log_error("Not enough PVs with free space available "
 				  "for parallel allocation.");
 			log_error("Consider --alloc anywhere if desperate.");
 			return 0;
 		}
-		areas_size = ah->area_count + ah->log_count;
+		areas_size = ah->area_count;
 	}
 
 	/* Upper bound if none of the PVs in prev_lvseg is in pvms */
@@ -1140,57 +1274,52 @@ static int _allocate(struct alloc_handle
 		return 0;
 	}
 
-	old_allocated = allocated;
-	if (!_find_parallel_space(ah, ALLOC_CONTIGUOUS, pvms, areas,
-				  areas_size, can_split,
-				  prev_lvseg, &allocated, new_extents)) {
+	old_allocated = ah->allocated;
+	if (!_handle_allocation_requests(ah, ALLOC_CONTIGUOUS, pvms, areas,
+				  areas_size, can_split, prev_lvseg)) {
 		stack;
 		goto out;
 	}
 
-	if ((allocated == new_extents) || (ah->alloc == ALLOC_CONTIGUOUS) ||
-	    (!can_split && (allocated != old_allocated)))
+	if ((ah->allocated == ah->requested) || (ah->alloc == ALLOC_CONTIGUOUS) ||
+	    (!can_split && (ah->allocated != old_allocated)))
 		goto finished;
 
-	old_allocated = allocated;
-	if (!_find_parallel_space(ah, ALLOC_CLING, pvms, areas,
-				  areas_size, can_split,
-				  prev_lvseg, &allocated, new_extents)) {
+	old_allocated = ah->allocated;
+	if (!_handle_allocation_requests(ah, ALLOC_CLING, pvms, areas,
+				  areas_size, can_split, prev_lvseg)) {
 		stack;
 		goto out;
 	}
 
-	if ((allocated == new_extents) || (ah->alloc == ALLOC_CLING) ||
-	    (!can_split && (allocated != old_allocated)))
+	if ((ah->allocated == ah->requested) || (ah->alloc == ALLOC_CLING) ||
+	    (!can_split && (ah->allocated != old_allocated)))
 		goto finished;
 
-	old_allocated = allocated;
-	if (!_find_parallel_space(ah, ALLOC_NORMAL, pvms, areas,
-				  areas_size, can_split,
-				  prev_lvseg, &allocated, new_extents)) {
+	old_allocated = ah->allocated;
+	if (!_handle_allocation_requests(ah, ALLOC_NORMAL, pvms, areas,
+				  areas_size, can_split, prev_lvseg)) {
 		stack;
 		goto out;
 	}
 
-	if ((allocated == new_extents) || (ah->alloc == ALLOC_NORMAL) ||
-	    (!can_split && (allocated != old_allocated)))
+	if ((ah->allocated == ah->requested) || (ah->alloc == ALLOC_NORMAL) ||
+	    (!can_split && (ah->allocated != old_allocated)))
 		goto finished;
 
-	if (!_find_parallel_space(ah, ALLOC_ANYWHERE, pvms, areas,
-				  areas_size, can_split,
-				  prev_lvseg, &allocated, new_extents)) {
+	if (!_handle_allocation_requests(ah, ALLOC_ANYWHERE, pvms, areas,
+				  areas_size, can_split, prev_lvseg)) {
 		stack;
 		goto out;
 	}
 
       finished:
-	if (allocated != new_extents) {
+	if (ah->allocated != ah->requested) {
 		log_error("Insufficient suitable %sallocatable extents "
 			  "for logical volume %s: %u more required",
 			  can_split ? "" : "contiguous ",
 			  lv ? lv->name : "",
-			  (new_extents - allocated) * ah->area_count
-			  / ah->area_multiple);
+			  ah->requested - ah->allocated);
 		goto out;
 	}
 
@@ -1261,19 +1390,20 @@ struct alloc_handle *allocate_extents(st
 
 	if (!(ah = _alloc_init(vg->cmd, vg->cmd->mem, segtype, alloc, mirrors,
 			       stripes, log_count, mirrored_pv,
-			       mirrored_pe, parallel_areas))) {
+			       mirrored_pe, parallel_areas, lv, extents))) {
 		stack;
 		return NULL;
 	}
 
 	if (!segtype_is_virtual(segtype) &&
-	    !_allocate(ah, vg, lv, status, (lv ? lv->le_count : 0) + extents,
-		       allocatable_pvs, stripes, mirrors, segtype)) {
+	    !_allocate(ah, vg, lv, status, allocatable_pvs)) {
 		stack;
 		alloc_destroy(ah);
 		return NULL;
 	}
 
+	/* FIXME? */
+	ah->total_area_len = extents;
 	return ah;
 }
 
@@ -1326,33 +1456,64 @@ int lv_add_segment(struct alloc_handle *
 }
 
 /*
+ * search alloced_areas for log device.
+ * For multiple log device, set index to restart the search.
+ */
+static struct list * _find_next_log_allocation(struct alloc_handle *ah,
+					       uint32_t *start_index)
+{
+	uint32_t s;
+
+	for (s = start_index ? *start_index : 0; s < ah->nr_requests; s++) {
+		if (ah->requests[s].flags & ALLOC_TYPE_LOG) {
+			if (start_index)
+				*start_index = s;
+			return &ah->alloced_areas[ah->requests[s].index];
+		}
+	}
+
+	return NULL;
+}
+
+/*
  * Turn an empty LV into a mirror log.
  */
 int lv_add_log_segment(struct alloc_handle *ah, struct logical_volume *log_lv)
 {
 	struct lv_segment *seg;
+	struct list *log_alloc;
+	struct alloced_area *log_area;
 
 	if (list_size(&log_lv->segments)) {
 		log_error("Log segments can only be added to an empty LV");
 		return 0;
 	}
 
-	if (!(seg = alloc_lv_segment(log_lv->vg->cmd->mem,
-				     get_segtype_from_string(log_lv->vg->cmd,
-							     "striped"),
-				     log_lv, 0, ah->log_area.len, MIRROR_LOG,
-				     0, NULL, 1, ah->log_area.len, 0, 0, 0))) {
-		log_error("Couldn't allocate new mirror log segment.");
+	/* FIXME: assumes only one log allocation */
+	if (!(log_alloc= _find_next_log_allocation(ah, NULL))) {
+		log_error("Log area is not allocated\n");
 		return 0;
 	}
 
-	if (!set_lv_segment_area_pv(seg, 0, ah->log_area.pv, ah->log_area.pe)) {
-		stack;
-		return 0;
+	list_iterate_items(log_area, log_alloc) {
+		if (!(seg = alloc_lv_segment(log_lv->vg->cmd->mem,
+				     get_segtype_from_string(log_lv->vg->cmd,
+							     "striped"),
+				     log_lv, 0, log_area->len, MIRROR_LOG,
+				     0, NULL, 1, log_area->len, 0, 0, 0))) {
+			log_error("Couldn't allocate new mirror log segment.");
+			return 0;
+		}
+
+		if (!set_lv_segment_area_pv(seg, 0, log_area->pv, log_area->pe)) {
+			stack;
+			return 0;
+		}
+
+		list_add(&log_lv->segments, &seg->list);
+		log_lv->le_count += log_area->len;
 	}
 
-	list_add(&log_lv->segments, &seg->list);
-	log_lv->le_count += ah->log_area.len;
 	log_lv->size += (uint64_t) log_lv->le_count *log_lv->vg->extent_size;
 
 	if (log_lv->vg->fid->fmt->ops->lv_setup &&

[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]