[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[PATCH] RHEL4.8 #233050 - ports of lvm PV size clamping from RHEL5



Hi,

lvm partitioning (clamping of PV size) again, this time for 4.8.
This is fix for 4.8 #233050. It required some commits to be
ported from 5.3:

commit cc9d0f4e57b11a37fc5d33d0374509e43a97840c
commit 8b4c702d0c2c6130c5263a4944405efa1301ced9
commit 4f9c6e49d113a88a28c55c51bb5eab6ad756612b
commit a68ab7d2823836ee90171cf419864e248ad99ce7
commit 4aa9ca1c35b867fa5a4d94c41591700ca7ab5edb
(5.3 #415871)
commit 08233b0c42f8b453ff7d8bde03c9adc57d92d7ed
(5.3 #463780)

I tested the patch with reproducer from bz and reproducers
from #415871 and #463780 which are the last two commits
of the list above.

Note: I didn't port patches fixing other corner cases hit and
fixed in 5.3:
- wiping old lvm metadata
 https://bugzilla.redhat.com/show_bug.cgi?id=468431
 https://bugzilla.redhat.com/show_bug.cgi?id=469700
- handling corner case when clamped size == partition size
 https://bugzilla.redhat.com/show_bug.cgi?id=468944
- computing size of preexisting VG in UI
 https://bugzilla.redhat.com/show_bug.cgi?id=217913
- lvm on raid chunk alignment
 https://bugzilla.redhat.com/show_bug.cgi?id=463431

Radek

diff --git a/autopart.py b/autopart.py
index e25b366..af576f5 100644
--- a/autopart.py
+++ b/autopart.py
@@ -500,17 +500,25 @@ def growLogicalVolumes(diskset, requests):
 	    log("No growable logical volumes defined in VG %s.", vgreq)
 	    continue
 
-	log("VG %s has these growable logical volumes: %s",  vgreq.volumeGroupName, growreqs)
+        log("VG %s has these growable logical volumes: %s",  vgreq.volumeGroupName, reduce(lambda x,y: x + [y.uniqueID], growreqs, []))
 
 #	print "VG %s has these growable logical volumes: %s" % (vgreq.volumeGroupName, growreqs)
 
+	# get remaining free space
+        if DEBUG_LVM_GROW:
+            vgfree = lvm.getVGFreeSpace(vgreq, requests, diskset)
+            log("Free space in VG after initial partition formation = %s", (vgfree,))
+
 	# store size we are starting at
 	initsize = {}
 	cursize = {}
 	for req in growreqs:
 	    size = req.getActualSize(requests, diskset)
+            size = lvm.clampPVSize(size, vgreq.pesize)
 	    initsize[req.logicalVolumeName] = size
 	    cursize[req.logicalVolumeName] = size
+            if req.maxSizeMB:
+                req.maxSizeMB = lvm.clampPVSize(req.maxSizeMB, vgreq.pesize)
 #	    print "init sizes",req.logicalVolumeName, size
             if DEBUG_LVM_GROW:
 		log("init sizes for %s: %s",req.logicalVolumeName, size)
@@ -564,11 +572,14 @@ def growLogicalVolumes(diskset, requests):
 		
 		fraction = float(req.getStartSize())/float(totsize)
 
+		newsize = lvm.clampPVSize(vgfree*fraction, vgreq.pesize)
+                newsize += cursize[req.logicalVolumeName]
+
 		newsize = cursize[req.logicalVolumeName] + vgfree*fraction
 		if req.maxSizeMB:
 		    newsize = min(newsize, req.maxSizeMB)
 		    
-		req.size = lvm.clampLVSizeRequest(newsize, vgreq.pesize)
+		req.size = newsize
 		if req.size != cursize[req.logicalVolumeName]:
 		    nochange = 0
 
diff --git a/iw/lvm_dialog_gui.py b/iw/lvm_dialog_gui.py
index 2739300..a03fa34 100644
--- a/iw/lvm_dialog_gui.py
+++ b/iw/lvm_dialog_gui.py
@@ -45,7 +45,7 @@ class VolumeGroupEditor:
         else:
             pvlist = alt_pvlist
 	tspace = self.computeVGSize(pvlist, pesize)
-	uspace = self.computeLVSpaceNeeded(self.logvolreqs)
+	uspace = self.computeLVSpaceNeeded(self.logvolreqs, pesize)
 	fspace =  tspace - uspace
 
 	return (tspace, uspace, fspace)
@@ -71,8 +71,13 @@ class VolumeGroupEditor:
 	first = 1
         pvlist = self.getSelectedPhysicalVolumes(self.lvmlist.get_model())
 	for id in pvlist:
+            try:
+                pesize = int(self.peCombo.get_active_value())
+            except:
+                pesize = 32768
 	    pvreq = self.partitions.getRequestByID(id)
 	    pvsize = pvreq.getActualSize(self.partitions, self.diskset)
+            pvsize = lvm.clampPVSize(pvsize, pesize)
 	    if first:
 		minpvsize = pvsize
 		first = 0
@@ -82,8 +87,8 @@ class VolumeGroupEditor:
 	return minpvsize
 
 
-    def reclampLV(self, newpe):
-        """ given a new pe value, set logical volume sizes accordingly
+    def reclampLV(self, oldpe, newpe):
+        """ given an old and a new pe value, set logical volume sizes accordingly
 
         newpe - (int) new value of PE, in KB
         """
@@ -96,7 +101,7 @@ class VolumeGroupEditor:
         used = 0
 	resize = 0
         for lv in self.logvolreqs:
-            osize = lv.getActualSize(self.partitions, self.diskset)
+            osize = lv.getActualSize(self.partitions, self.diskset, pesize=oldpe)
             oldused = oldused + osize
             nsize = lvm.clampLVSizeRequest(osize, newpe, roundup=1)
 	    if nsize != osize:
@@ -130,7 +135,7 @@ class VolumeGroupEditor:
 		return 0
         
         for lv in self.logvolreqs:
-            osize = lv.getActualSize(self.partitions, self.diskset)
+            osize = lv.getActualSize(self.partitions, self.diskset, pesize=oldpe)
             nsize = lvm.clampLVSizeRequest(osize, newpe, roundup=1)
             lv.setSize(nsize)
 
@@ -192,7 +197,7 @@ class VolumeGroupEditor:
 
 	# now see if we need to fixup effect PV and LV sizes based on PE
         if curval > lastval:
-            rc = self.reclampLV(curval)
+            rc = self.reclampLV(lastval, curval)
             if not rc:
 		widget.set_active(lastidx)
 		return 0
@@ -201,7 +206,8 @@ class VolumeGroupEditor:
 	else:
 	    maxlv = lvm.getMaxLVSize(curval)
 	    for lv in self.logvolreqs:
-		lvsize = lv.getActualSize(self.partitions, self.diskset)
+		lvsize = lv.getActualSize(self.partitions, self.diskset,
+                             pesize=lastval)
 		if lvsize > maxlv:
 		    self.intf.messageWindow(_("Not enough space"),
 					    _("The physical extent size "
@@ -416,7 +422,8 @@ class VolumeGroupEditor:
             sizeEntry = gtk.Entry(16)
             lbl.set_mnemonic_widget(sizeEntry)
             if logrequest:
-                sizeEntry.set_text("%g" % (logrequest.getActualSize(self.partitions, self.diskset),))
+                pesize = int(self.peCombo.get_active_value())
+                sizeEntry.set_text("%Ld" % (logrequest.getActualSize(self.partitions, self.diskset, pesize=pesize),))
         else:
             lbl = createAlignedLabel(_("Size (MB):"))
             sizeEntry = gtk.Label(str(logrequest.size))
@@ -432,7 +439,7 @@ class VolumeGroupEditor:
 
             # add in size of current logical volume if it has a size
             if logrequest and not isNew:
-                maxlv = maxlv + logrequest.getActualSize(self.partitions, self.diskset)
+                maxlv = maxlv + logrequest.getActualSize(self.partitions, self.diskset, pesize=pesize)
             maxlabel = createAlignedLabel(_("(Max size is %s MB)") % (maxlv,))
             maintable.attach(maxlabel, 1, 2, row, row + 1)
 
@@ -637,7 +644,7 @@ class VolumeGroupEditor:
 		tmplogreqs.append(l)
 
 	    tmplogreqs.append(request)
-	    neededSpaceMB = self.computeLVSpaceNeeded(tmplogreqs)
+	    neededSpaceMB = self.computeLVSpaceNeeded(tmplogreqs, pesize)
 
 	    if neededSpaceMB > availSpaceMB:
 		self.intf.messageWindow(_("Not enough space"),
@@ -768,26 +775,28 @@ class VolumeGroupEditor:
 	availSpaceMB = 0
 	for id in pvlist:
 	    pvreq = self.partitions.getRequestByID(id)
-	    pvsize = pvreq.getActualSize(self.partitions, self.diskset)
-	    pvsize = lvm.clampPVSize(pvsize, curpe)
+            pvsize = pvreq.getActualSize(self.partitions, self.diskset)
+            pvsize = lvm.clampPVSize(pvsize, curpe)
 
 	    # have to clamp pvsize to multiple of PE
 	    availSpaceMB = availSpaceMB + pvsize
 
+        log("computeVGSize: vgsize is %s" % (availSpaceMB,))
 	return availSpaceMB
 
-    def computeLVSpaceNeeded(self, logreqs):
+    def computeLVSpaceNeeded(self, logreqs, pesize):
 	neededSpaceMB = 0
 	for lv in logreqs:
-	    neededSpaceMB = neededSpaceMB + lv.getActualSize(self.partitions, self.diskset)
+	    neededSpaceMB = neededSpaceMB + lv.getActualSize(self.partitions, self.diskset, pesize=pesize)
 
 	return neededSpaceMB
 
     def updateLogVolStore(self):
         self.logvolstore.clear()
+        pesize = int(self.peCombo.get_active_value())
         for lv in self.logvolreqs:
             iter = self.logvolstore.append()
-            size = lv.getActualSize(self.partitions, self.diskset)
+            size = lv.getActualSize(self.partitions, self.diskset, pesize=pesize)
             lvname = lv.logicalVolumeName
             mntpt = lv.mountpoint
             if lvname:
@@ -847,7 +856,7 @@ class VolumeGroupEditor:
 	    pvlist = self.getSelectedPhysicalVolumes(self.lvmlist.get_model())
 	    pesize = int(self.peCombo.get_active_value())
 	    availSpaceMB = self.computeVGSize(pvlist, pesize)
-	    neededSpaceMB = self.computeLVSpaceNeeded(self.logvolreqs)
+	    neededSpaceMB = self.computeLVSpaceNeeded(self.logvolreqs, pesize)
 
 	    if neededSpaceMB > availSpaceMB:
 		self.intf.messageWindow(_("Not enough space"),
@@ -869,7 +878,7 @@ class VolumeGroupEditor:
 	    if self.origvgrequest:
 		origvname = self.origvgrequest.volumeGroupName
 	    else:
-		origname = None
+		origvname = None
 
 	    if origvname != volname:
 		tmpreq = VolumeGroupRequestSpec(physvols = pvlist,
@@ -1051,7 +1060,8 @@ class VolumeGroupEditor:
 		    self.logvolstore.set_value(iter, 1, lvrequest.mountpoint)
 		else:
 		    self.logvolstore.set_value(iter, 1, "")
-		self.logvolstore.set_value(iter, 2, "%g" % (lvrequest.getActualSize(self.partitions, self.diskset)))
+                pesize = int(self.peCombo.get_active_value())
+		self.logvolstore.set_value(iter, 2, "%Ld" % (lvrequest.getActualSize(self.partitions, self.diskset, pesize=pesize)))
 
 	self.logvollist = gtk.TreeView(self.logvolstore)
         col = gtk.TreeViewColumn(_("Logical Volume Name"),
diff --git a/lvm.py b/lvm.py
index 43e533c..4caaa49 100644
--- a/lvm.py
+++ b/lvm.py
@@ -152,6 +152,13 @@ def vgremove(vgname):
     if flags.test or lvmDevicePresent == 0:
         return
 
+    # find the Physical Volumes which make up this Volume Group, so we
+    # can prune and recreate them.
+    pvs = []
+    for pv in pvlist():
+        if pv[1] == vgname:
+            pvs.append(pv[0])
+
     # we'll try to deactivate... if it fails, we'll probably fail on
     # the removal too... but it's worth a shot
     try:
@@ -161,12 +168,38 @@ def vgremove(vgname):
 
     args = ["lvm", "vgremove", vgname]
 
+    log(string.join(args, ' '))
     rc = iutil.execWithRedirect(args[0], args,
                                 stdout = output,
                                 stderr = output,
                                 searchPath = 1)
     if rc:
         raise SystemError, "vgremove failed"
+    # now iterate all the PVs we've just freed up, so we reclaim the metadata
+    # space.  This is an LVM bug, AFAICS.
+    for pvname in pvs:
+        args = ["lvm", "pvremove", pvname]
+
+        log(string.join(args, ' '))
+        rc = iutil.execWithRedirect(args[0], args,
+                                    stdout = output,
+                                    stderr = output,
+                                    searchPath = 1)
+
+        if rc:
+            raise SystemError, "pvremove failed"
+
+        args = ["lvm", "pvcreate", "-ff", "-y", "-v", pvname]
+
+        log(string.join(args, ' '))
+        rc = iutil.execWithRedirect(args[0], args,
+                                    stdout = output,
+                                    stderr = output,
+                                    searchPath = 1)
+
+        if rc:
+            raise SystemError, "pvcreate failed for %s" % (pvname,)
+
 
 def lvlist():
     global lvmDevicePresent
@@ -312,13 +345,10 @@ def clampLVSizeRequest(size, pe, roundup=0):
     """
 
     if roundup:
-	factor = 1
-    else:
-	factor = 0
-    if ((size*1024L) % pe) == 0:
-	return size
+        func = math.ceil
     else:
-	return ((long((size*1024L)/pe)+factor)*pe)/1024
+        func = math.floor
+    return (long(func((size*1024L)/pe))*pe)/1024
 
 def clampPVSize(pvsize, pesize):
     """Given a PV size and a PE, returns the usable space of the PV.
@@ -329,27 +359,9 @@ def clampPVSize(pvsize, pesize):
     pesize - PE size (in KB)
     """
 
-    # calculate the number of physical extents.  this is size / pesize
-    # with an appropriate factor for kb/mb matchup
-    numpes = math.floor(pvsize * 1024 / pesize)
-
-    # now, calculate our "real" overhead.  4 bytes for each PE + 128K
-    overhead = (4 * numpes / 1024) + 128
-
-    # now, heuristically, the max of ceil(pesize + 2*overhead) and
-    # ceil(2*overhead) is greater than the real overhead, so we won't
-    # get people in a situation where they overcommit the vg
-    one = math.ceil(pesize + 2 * overhead)
-    two = math.ceil(2 * overhead)
-
-    # now we have to do more unit conversion since our overhead in in KB
-    if one > two:
-        usable = pvsize - math.ceil(one / 1024.0)
-    else:
-        usable = pvsize - math.ceil(two / 1024.0)
-
-    # finally, clamp to being at a pesize boundary
-    return (long(usable*1024/pesize)*pesize)/1024
+    # we want Kbytes as a float for our math
+    pvsize *= 1024.0
+    return long((math.floor(pvsize / pesize) * pesize) / 1024)
 
 def getMaxLVSize(pe):
     """Given a PE size in KB, returns maximum size (in MB) of a logical volume.
@@ -409,5 +421,10 @@ def getVGUsedSpace(vgreq, requests, diskset):
 
 def getVGFreeSpace(vgreq, requests, diskset):
     used = getVGUsedSpace(vgreq, requests, diskset)
+    log("used space is %s" % (used,))
+     
+    total = vgreq.getActualSize(requests, diskset)
+    log("actual space is %s" % (total,))
+    return total - used
     
     return vgreq.getActualSize(requests, diskset) - used
diff --git a/partRequests.py b/partRequests.py
index 3b7603a..2942fa6 100644
--- a/partRequests.py
+++ b/partRequests.py
@@ -752,21 +752,19 @@ class VolumeGroupRequestSpec(RequestSpec):
     def getActualSize(self, partitions, diskset):
         """Return the actual size allocated for the request in megabytes."""
 
-        # this seems like a bogus check too...
-        if self.physicalVolumes is None:
-            return 0
-
         # if we have a preexisting size, use it
         if self.preexist and self.preexist_size:
-            totalspace = ((self.preexist_size / self.pesize) *
-                          self.pesize)
+            totalspace = lvm.clampPVSize(self.preexist_size, self.pesize)
         else:
             totalspace = 0
             for pvid in self.physicalVolumes:
                 pvreq = partitions.getRequestByID(pvid)
                 size = pvreq.getActualSize(partitions, diskset)
-                size = lvm.clampPVSize(size, self.pesize)
-                totalspace = totalspace + size
+                #log("size for pv %s is %s" % (pvid, size))
+                clamped = lvm.clampPVSize(size, self.pesize)
+                log("  got pv.size of %s, clamped to %s" % (size,clamped))
+                #log("  clamped size is %s" % (size,))
+                totalspace = totalspace + clamped
 
         return totalspace
 
@@ -862,16 +860,30 @@ class LogicalVolumeRequestSpec(RequestSpec):
                                              existing = self.preexist)
         return self.dev
 
-    def getActualSize(self, partitions, diskset):
+    def getActualSize(self, partitions, diskset, pesize=None):
         """Return the actual size allocated for the request in megabytes."""
+        retval = 0
+        vgreq = partitions.getRequestByID(self.volumeGroup)
+
+        if not pesize:
+            pesize = vgreq.pesize
+
         if self.percent:
-            vgreq = partitions.getRequestByID(self.volumeGroup)
 	    vgsize = vgreq.getActualSize(partitions, diskset)
 	    lvsize = int(self.percent * 0.01 * vgsize)
-	    lvsize = lvm.clampLVSizeRequest(lvsize, vgreq.pesize)
-            return lvsize
+            #lvsize = lvm.clampLVSizeRequest(lvsize, vgreq.pesize)
+            retval = lvsize
         else:
-            return self.size
+            retval = self.size
+
+        # lvm tools round up lvolums using the volume group pv size.
+        # If the logical volume is preexisting do NOT touch it.
+        if not self.preexist:
+            retval = lvm.clampLVSizeRequest(retval, pesize)
+
+        return retval
+
+
 
     def getStartSize(self):
         """Return the starting size allocated for the request in megabytes."""

[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]