[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[PATCH 3/4] Delay setting up lvs until other devices are scanned



lvs were setup (activated) as soon as the first pv of their vg
was found, however now that our udev rules only look at pvs we need
to delay setting them up atleast till we have all pvs (iow they
are complete), as we need to have info about all lvs in a vg for our
snapshot and stripe handling code.

It is even necessary to delay setting up lvs till scanning all other devices
is done:

1) We wont know if there are duplicate vg names until all pvs are scanned
   (and we dont want to activate lvs of vgs with duplicate vg names)

2) Our lvm device filter expression (which filters out ignored disks /
   parititions) wont be complete until all disks / pvs are scanned

   An example, lets take a machine connected through a SAN, trough this
   SAN it sees disks sda - sdj. Each of these 10 disks contains a standard
   (single disk) auto partition install, each with a vg name VolGroup, iow
   when we see all disk we see 10 vgs named VolGroup. But the user selects
   only sde in the filter ui. Lets assume we scan disks in order sda - sde
   (which is not always true) now when sde2 (the pv) gets scanned the
   lvm device filter expression has been set up to ignore sda - sdd. If
   we were to activate the lv at this point, lvm would still see disks
   sde - sdj, and bring up the lvs of one of the 6 remaining vgs,
   not necessarily choosing the vg on sde.

   If however we delay activating the lvs until all other devices are
   scanned, then the lvm device filter expression will filter out
   sda - sdd and sdf - sdj, so lvm only sees sde and thus do the right
   thing.

Related: rhbz#591469
---
 storage/devices.py    |   16 ++++++++----
 storage/devicetree.py |   65 +++++++++++++++++++++++++++++++++++++------------
 2 files changed, 60 insertions(+), 21 deletions(-)

diff --git a/storage/devices.py b/storage/devices.py
index c24f4cd..ffd50c8 100644
--- a/storage/devices.py
+++ b/storage/devices.py
@@ -1711,7 +1711,7 @@ class LVMVolumeGroupDevice(DMDevice):
 
     def __init__(self, name, parents, size=None, free=None,
                  peSize=None, peCount=None, peFree=None, pvCount=None,
-                 lvNames=[], uuid=None, exists=None, sysfsPath=''):
+                 uuid=None, exists=None, sysfsPath=''):
         """ Create a LVMVolumeGroupDevice instance.
 
             Arguments:
@@ -1732,7 +1732,6 @@ class LVMVolumeGroupDevice(DMDevice):
                     peFree -- number of free extents
                     peCount -- total number of extents
                     pvCount -- number of PVs in this VG
-                    lvNames -- the names of this VG's LVs
                     uuid -- the VG's UUID
 
         """
@@ -1756,7 +1755,10 @@ class LVMVolumeGroupDevice(DMDevice):
         self.peCount = numeric_type(peCount)
         self.peFree = numeric_type(peFree)
         self.pvCount = numeric_type(pvCount)
-        self.lvNames = lvNames
+        self.lv_names = []
+        self.lv_uuids = []
+        self.lv_sizes = []
+        self.lv_attr = []
 
         # circular references, here I come
         self._lvs = []
@@ -1771,14 +1773,14 @@ class LVMVolumeGroupDevice(DMDevice):
         s = DMDevice.__str__(self)
         s += ("  free = %(free)s  PE Size = %(peSize)s  PE Count = %(peCount)s\n"
               "  PE Free = %(peFree)s  PV Count = %(pvCount)s\n"
-              "  LV Names = %(lvNames)s  modified = %(modified)s\n"
+              "  LV Names = %(lv_names)s  modified = %(modified)s\n"
               "  extents = %(extents)s  free space = %(freeSpace)s\n"
               "  free extents = %(freeExtents)s\n"
               "  PVs = %(pvs)s\n"
               "  LVs = %(lvs)s" %
               {"free": self.free, "peSize": self.peSize, "peCount": self.peCount,
                "peFree": self.peFree, "pvCount": self.pvCount,
-               "lvNames": self.lvNames, "modified": self.isModified,
+               "lv_names": self.lv_names, "modified": self.isModified,
                "extents": self.extents, "freeSpace": self.freeSpace,
                "freeExtents": self.freeExtents, "pvs": self.pvs, "lvs": self.lvs})
         return s
@@ -1791,6 +1793,10 @@ class LVMVolumeGroupDevice(DMDevice):
                   "pvCount": self.pvCount, "extents": self.extents,
                   "freeSpace": self.freeSpace,
                   "freeExtents": self.freeExtents,
+                  "lv_names": self.lv_names,
+                  "lv_uuids": self.lv_uuids,
+                  "lv_sizes": self.lv_sizes,
+                  "lv_attr": self.lv_attr,
                   "lvNames": [lv.name for lv in self.lvs]})
         return d
 
diff --git a/storage/devicetree.py b/storage/devicetree.py
index 9254fbb..39f1dcc 100644
--- a/storage/devicetree.py
+++ b/storage/devicetree.py
@@ -1452,12 +1452,21 @@ class DeviceTree(object):
             log.warning("luks device %s already in the tree"
                         % device.format.mapName)
 
-    def handleVgLvs(self, vg_device, lv_names, lv_uuids, lv_sizes, lv_attr):
+    def handleVgLvs(self, vg_device):
+        ret = False
         vg_name = vg_device.name
+        lv_names = vg_device.lv_names
+        lv_uuids = vg_device.lv_uuids
+        lv_sizes = vg_device.lv_sizes
+        lv_attr = vg_device.lv_attr
+
+        if not vg_device.complete:
+            log.warning("Skipping LVs for incomplete VG %s" % vg_name)
+            return False
 
         if not lv_names:
             log.debug("no LVs listed for VG %s" % vg_name)
-            return
+            return False
 
         # make a list of indices with snapshots at the end
         indices = range(len(lv_names))
@@ -1519,10 +1528,13 @@ class DeviceTree(object):
 
                 try:
                     lv_device.setup()
+                    ret = True
                 except DeviceError as (msg, name):
                     log.info("setup of %s failed: %s"
                                         % (lv_device.name, msg))
 
+        return ret
+
     def handleUdevLVMPVFormat(self, info, device):
         log_method_call(self, name=device.name, type=device.format.type)
         # lookup/create the VG and LVs
@@ -1535,11 +1547,6 @@ class DeviceTree(object):
         vg_device = self.getDeviceByName(vg_name)
         if vg_device:
             vg_device._addDevice(device)
-            for lv in vg_device.lvs:
-                try:
-                    lv.setup()
-                except DeviceError as (msg, name):
-                    log.info("setup of %s failed: %s" % (lv.name, msg))
         else:
             try:
                 vg_uuid = udev_device_get_vg_uuid(info)
@@ -1565,16 +1572,27 @@ class DeviceTree(object):
                                              exists=True)
             self._addDevice(vg_device)
 
-            try:
-                lv_names = udev_device_get_lv_names(info)
-                lv_uuids = udev_device_get_lv_uuids(info)
-                lv_sizes = udev_device_get_lv_sizes(info)
-                lv_attr = udev_device_get_lv_attr(info)
-            except KeyError as e:
-                log.warning("invalid data for %s: %s" % (device.name, e))
-                return
+        # Now we add any lv info found in this pv to the vg_device, we
+        # do this for all pvs as pvs only contain lv info for lvs which they
+        # contain themselves
+        try:
+            lv_names = udev_device_get_lv_names(info)
+            lv_uuids = udev_device_get_lv_uuids(info)
+            lv_sizes = udev_device_get_lv_sizes(info)
+            lv_attr = udev_device_get_lv_attr(info)
+        except KeyError as e:
+            log.warning("invalid data for %s: %s" % (device.name, e))
+            return
+
+        for i in range(len(lv_names)):
+            # Skip empty and already added lvs
+            if not lv_names[i] or lv_names[i] in vg_device.lv_names:
+                continue
 
-            self.handleVgLvs(vg_device, lv_names, lv_uuids, lv_sizes, lv_attr)
+            vg_device.lv_names.append(lv_names[i])
+            vg_device.lv_uuids.append(lv_uuids[i])
+            vg_device.lv_sizes.append(lv_sizes[i])
+            vg_device.lv_attr.append(lv_attr[i])
 
     def handleUdevMDMemberFormat(self, info, device):
         log_method_call(self, name=device.name, type=device.format.type)
@@ -1936,6 +1954,15 @@ class DeviceTree(object):
 
         self.intf.unusedRaidMembersWarning(self.unusedRaidMembers)
 
+    def _setupLvs(self):
+        ret = False
+
+        for device in self.getDevicesByType("lvmvg"):
+            if self.handleVgLvs(device):
+                ret = True
+
+        return ret
+
     def populate(self):
         """ Locate all storage devices. """
 
@@ -2008,6 +2035,12 @@ class DeviceTree(object):
                     devices.append(new_device)
 
             if len(devices) == 0:
+                # nothing is changing -- time to setup lvm lvs and scan them
+                # we delay this till all other devices are scanned so that
+                # 1) the lvm filter for ignored disks is completely setup
+                # 2) we have checked all devs for duplicate vg names
+                if self._setupLvs():
+                    continue
                 # nothing is changing -- we are finished building devices
                 break
 
-- 
1.7.0.1


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]