Esempio n. 1
0
    def do_partitioning(self):
        for parted in self.partition_scheme.parteds:
            pu.make_label(parted.name, parted.label)
            for prt in parted.partititons:
                pu.make_partition(prt.device, prt.begin, prt.end, prt.type)
                for flag in prt.flags:
                    pu.set_partition_flag(prt.device, prt.count, flag)

        # creating meta disks
        for md in self.partition_scheme.mds:
            mu.mdcreate(md.name, md.level, *md.devices)

        # creating physical volumes
        for pv in self.partition_scheme.pvs:
            lu.pvcreate(pv.name)

        # creating volume groups
        for vg in self.partition_scheme.vgs:
            lu.vgcreate(vg.name, *vg.pvnames)

        # creating logical volumes
        for lv in self.partition_scheme.lvs:
            lu.lvcreate(lv.vgname, lv.name, lv.size)

        # making file systems
        for fs in self.partition_scheme.fss:
            fu.make_fs(fs.type, fs.options, fs.label, fs.device)
Esempio n. 2
0
    def do_partitioning(self):
        for parted in self.partition_scheme.parteds:
            pu.make_label(parted.name, parted.label)
            for prt in parted.partitions:
                pu.make_partition(prt.device, prt.begin, prt.end, prt.type)
                for flag in prt.flags:
                    pu.set_partition_flag(prt.device, prt.count, flag)
                if prt.guid:
                    pu.set_gpt_type(prt.device, prt.count, prt.guid)

        # creating meta disks
        for md in self.partition_scheme.mds:
            mu.mdcreate(md.name, md.level, *md.devices)

        # creating physical volumes
        for pv in self.partition_scheme.pvs:
            lu.pvcreate(pv.name)

        # creating volume groups
        for vg in self.partition_scheme.vgs:
            lu.vgcreate(vg.name, *vg.pvnames)

        # creating logical volumes
        for lv in self.partition_scheme.lvs:
            lu.lvcreate(lv.vgname, lv.name, lv.size)

        # making file systems
        for fs in self.partition_scheme.fss:
            fu.make_fs(fs.type, fs.options, fs.label, fs.device)
Esempio n. 3
0
    def test_mdcreate_ok(self, mock_exec, mock_mddisplay,
                         mock_bdevs, mock_mdclean):
        # should check if md already exists
        # should check if md level is valid
        # should check if all necessary devices exist
        # should check if all devices are not parts of some md
        # should clean md metadata which possibly are on all devices
        # should run mdadm command to create new md

        mock_mddisplay.return_value = \
            [{'name': '/dev/md10', 'devices': ['/dev/fake10']},
             # should also accept devices with missing 'devices' entry
             {'name': '/dev/md11'}]
        mock_bdevs.return_value = [{'device': '/dev/fake1'},
                                   {'device': '/dev/fake2'}]

        mu.mdcreate('/dev/md0', 'mirror', '/dev/fake1', '/dev/fake2')
        mock_mdclean_expected_calls = [mock.call('/dev/fake1'),
                                       mock.call('/dev/fake2')]
        self.assertEqual(mock_mdclean_expected_calls,
                         mock_mdclean.call_args_list)
        mock_exec.assert_called_once_with(
            'mdadm', '--create', '--force', '/dev/md0', '-e0.90',
            '--level=mirror',
            '--raid-devices=2', '/dev/fake1', '/dev/fake2',
            check_exit_code=[0])
Esempio n. 4
0
    def test_mdcreate_ok(self, mock_exec, mock_mddisplay, mock_bdevs,
                         mock_mdclean):
        # should check if md already exists
        # should check if md level is valid
        # should check if all necessary devices exist
        # should check if all devices are not parts of some md
        # should clean md metadata which possibly are on all devices
        # should run mdadm command to create new md

        mock_mddisplay.return_value = \
            [{'name': '/dev/md10', 'devices': ['/dev/fake10']}]
        mock_bdevs.return_value = [{
            'device': '/dev/fake1'
        }, {
            'device': '/dev/fake2'
        }]

        mu.mdcreate('/dev/md0', 'mirror', '/dev/fake1', '/dev/fake2')
        mock_exec.assert_called_once_with('mdadm',
                                          '--create',
                                          '--force',
                                          '/dev/md0',
                                          '-e1.2',
                                          '--level=mirror',
                                          '--raid-devices=2',
                                          '/dev/fake1',
                                          '/dev/fake2',
                                          check_exit_code=[0])
Esempio n. 5
0
 def test_mdcreate_device_clean(self, mock_mddisplay,
                                mock_bdevs, mock_mdclean, mock_exec):
     # should clean md metadata on all devices before building new md
     mock_mddisplay.return_value = []
     mock_bdevs.return_value = [{'device': '/dev/fake1'},
                                {'device': '/dev/fake2'}]
     mu.mdcreate('/dev/md0', 'mirror', '/dev/fake1', '/dev/fake2')
     expected_calls = [mock.call('/dev/fake1'), mock.call('/dev/fake2')]
     self.assertEqual(mock_mdclean.call_args_list, expected_calls)
Esempio n. 6
0
 def test_mdcreate_device_clean(self, mock_mddisplay, mock_bdevs,
                                mock_mdclean, mock_exec):
     # should clean md metadata on all devices before building new md
     mock_mddisplay.return_value = []
     mock_bdevs.return_value = [{
         'device': '/dev/fake1'
     }, {
         'device': '/dev/fake2'
     }]
     mu.mdcreate('/dev/md0', 'mirror', '/dev/fake1', '/dev/fake2')
     expected_calls = [mock.call('/dev/fake1'), mock.call('/dev/fake2')]
     self.assertEqual(mock_mdclean.call_args_list, expected_calls)
Esempio n. 7
0
    def test_mdcreate_ok(self, mock_exec, mock_mddisplay,
                         mock_bdevs, mock_mdclean):
        # should check if md already exists
        # should check if md level is valid
        # should check if all necessary devices exist
        # should check if all devices are not parts of some md
        # should clean md metadata which possibly are on all devices
        # should run mdadm command to create new md

        mock_mddisplay.return_value = \
            [{'name': '/dev/md10', 'devices': ['/dev/fake10']}]
        mock_bdevs.return_value = [{'device': '/dev/fake1'},
                                   {'device': '/dev/fake2'}]

        mu.mdcreate('/dev/md0', 'mirror', '/dev/fake1', '/dev/fake2')
        mock_exec.assert_called_once_with(
            'mdadm', '--create', '--force', '/dev/md0', '-e1.2',
            '--level=mirror',
            '--raid-devices=2', '/dev/fake1', '/dev/fake2',
            check_exit_code=[0])
Esempio n. 8
0
    def test_mdcreate_ok(self, mock_exec, mock_mddisplay, mock_bdevs,
                         mock_mdclean):
        # should check if md already exists
        # should check if md level is valid
        # should check if all necessary devices exist
        # should check if all devices are not parts of some md
        # should clean md metadata which possibly are on all devices
        # should run mdadm command to create new md

        mock_mddisplay.return_value = \
            [{'name': '/dev/md10', 'devices': ['/dev/fake10']},
             # should also accept devices with missing 'devices' entry
             {'name': '/dev/md11'}]
        mock_bdevs.return_value = [{
            'device': '/dev/fake1'
        }, {
            'device': '/dev/fake2'
        }]

        mu.mdcreate('/dev/md0', 'mirror', '/dev/fake1', '/dev/fake2')
        mock_mdclean_expected_calls = [
            mock.call('/dev/fake1'),
            mock.call('/dev/fake2')
        ]
        self.assertEqual(mock_mdclean_expected_calls,
                         mock_mdclean.call_args_list)
        mock_exec.assert_called_once_with('mdadm',
                                          '--create',
                                          '--force',
                                          '/dev/md0',
                                          '-e0.90',
                                          '--level=mirror',
                                          '--raid-devices=2',
                                          '/dev/fake1',
                                          '/dev/fake2',
                                          check_exit_code=[0])
Esempio n. 9
0
    def do_partitioning(self):
        LOG.debug('--- Partitioning disks (do_partitioning) ---')
        # If disks are not wiped out at all, it is likely they contain lvm
        # and md metadata which will prevent re-creating a partition table
        # with 'device is busy' error.
        mu.mdclean_all()
        lu.lvremove_all()
        lu.vgremove_all()
        lu.pvremove_all()

        for parted in self.partition_scheme.parteds:
            for prt in parted.partitions:
                # We wipe out the beginning of every new partition
                # even before creating it. It allows us to avoid possible
                # interactive dialog if some data (metadata or file system)
                # present on this new partition and it also allows udev not
                # hanging trying to parse this data.
                utils.execute('dd',
                              'if=/dev/zero',
                              'bs=1M',
                              'seek=%s' % max(prt.begin - 1, 0),
                              'count=2',
                              'of=%s' % prt.device,
                              check_exit_code=[0])

            pu.make_label(parted.name, parted.label)
            for prt in parted.partitions:
                pu.make_partition(prt.device, prt.begin, prt.end, prt.type)
                for flag in prt.flags:
                    pu.set_partition_flag(prt.device, prt.count, flag)
                if prt.guid:
                    pu.set_gpt_type(prt.device, prt.count, prt.guid)

        # If one creates partitions with the same boundaries as last time,
        # there might be md and lvm metadata on those partitions. To prevent
        # failing of creating md and lvm devices we need to make sure
        # unused metadata are wiped out.
        mu.mdclean_all()
        lu.lvremove_all()
        lu.vgremove_all()
        lu.pvremove_all()

        # creating meta disks
        for md in self.partition_scheme.mds:
            mu.mdcreate(md.name, md.level, *md.devices)

        # creating physical volumes
        for pv in self.partition_scheme.pvs:
            lu.pvcreate(pv.name,
                        metadatasize=pv.metadatasize,
                        metadatacopies=pv.metadatacopies)

        # creating volume groups
        for vg in self.partition_scheme.vgs:
            lu.vgcreate(vg.name, *vg.pvnames)

        # creating logical volumes
        for lv in self.partition_scheme.lvs:
            lu.lvcreate(lv.vgname, lv.name, lv.size)

        # making file systems
        for fs in self.partition_scheme.fss:
            found_images = [
                img for img in self.image_scheme.images
                if img.target_device == fs.device
            ]
            if not found_images:
                fu.make_fs(fs.type, fs.options, fs.label, fs.device)
Esempio n. 10
0
    def do_partitioning(self):
        LOG.debug('--- Partitioning disks (do_partitioning) ---')
        # If disks are not wiped out at all, it is likely they contain lvm
        # and md metadata which will prevent re-creating a partition table
        # with 'device is busy' error.
        mu.mdclean_all()
        lu.lvremove_all()
        lu.vgremove_all()
        lu.pvremove_all()

        # Here is udev's rules blacklisting to be done:
        # by adding symlinks to /dev/null in /etc/udev/rules.d for already
        # existent rules in /lib/.
        # 'parted' generates too many udev events in short period of time
        # so we should increase processing speed for those events,
        # otherwise partitioning is doomed.
        empty_rule_path = os.path.join(CONF.udev_rules_dir,
                                       os.path.basename(CONF.udev_empty_rule))
        with open(empty_rule_path, 'w') as f:
            f.write('#\n')
        LOG.debug("Enabling udev's rules blacklisting")
        for rule in os.listdir(CONF.udev_rules_lib_dir):
            dst = os.path.join(CONF.udev_rules_dir, rule)
            if os.path.isdir(dst):
                continue
            if dst.endswith('.rules'):
                # for successful blacklisting already existent file with name
                # from /etc which overlaps with /lib should be renamed prior
                # symlink creation.
                try:
                    if os.path.exists(dst):
                        os.rename(dst, dst[:-len('.rules')] +
                                  CONF.udev_rename_substr)
                except OSError:
                    LOG.debug("Skipping udev rule %s blacklising" % dst)
                else:
                    os.symlink(empty_rule_path, dst)
        utils.execute('udevadm', 'control', '--reload-rules',
                      check_exit_code=[0])

        for parted in self.driver.partition_scheme.parteds:
            for prt in parted.partitions:
                # We wipe out the beginning of every new partition
                # right after creating it. It allows us to avoid possible
                # interactive dialog if some data (metadata or file system)
                # present on this new partition and it also allows udev not
                # hanging trying to parse this data.
                utils.execute('dd', 'if=/dev/zero', 'bs=1M',
                              'seek=%s' % max(prt.begin - 3, 0), 'count=5',
                              'of=%s' % prt.device, check_exit_code=[0])
                # Also wipe out the ending of every new partition.
                # Different versions of md stores metadata in different places.
                # Adding exit code 1 to be accepted as for handling situation
                # when 'no space left on device' occurs.
                utils.execute('dd', 'if=/dev/zero', 'bs=1M',
                              'seek=%s' % max(prt.end - 3, 0), 'count=5',
                              'of=%s' % prt.device, check_exit_code=[0, 1])

        for parted in self.driver.partition_scheme.parteds:
            pu.make_label(parted.name, parted.label)
            for prt in parted.partitions:
                pu.make_partition(prt.device, prt.begin, prt.end, prt.type)
                for flag in prt.flags:
                    pu.set_partition_flag(prt.device, prt.count, flag)
                if prt.guid:
                    pu.set_gpt_type(prt.device, prt.count, prt.guid)
                # If any partition to be created doesn't exist it's an error.
                # Probably it's again 'device or resource busy' issue.
                if not os.path.exists(prt.name):
                    raise errors.PartitionNotFoundError(
                        'Partition %s not found after creation' % prt.name)

        # disable udev's rules blacklisting
        LOG.debug("Disabling udev's rules blacklisting")
        for rule in os.listdir(CONF.udev_rules_dir):
            src = os.path.join(CONF.udev_rules_dir, rule)
            if os.path.isdir(src):
                continue
            if src.endswith('.rules'):
                if os.path.islink(src):
                    try:
                        os.remove(src)
                    except OSError:
                        LOG.debug(
                            "Skipping udev rule %s de-blacklisting" % src)
            elif src.endswith(CONF.udev_rename_substr):
                try:
                    if os.path.exists(src):
                        os.rename(src, src[:-len(CONF.udev_rename_substr)] +
                                  '.rules')
                except OSError:
                    LOG.debug("Skipping udev rule %s de-blacklisting" % src)
        utils.execute('udevadm', 'control', '--reload-rules',
                      check_exit_code=[0])
        #NOTE(agordeev): re-create all the links which were skipped by udev
        # while blacklisted
        # NOTE(agordeev): do subsystem match, otherwise it will stuck
        utils.execute('udevadm', 'trigger', '--subsystem-match=block',
                      check_exit_code=[0])
        utils.execute('udevadm', 'settle', '--quiet', check_exit_code=[0])

        # If one creates partitions with the same boundaries as last time,
        # there might be md and lvm metadata on those partitions. To prevent
        # failing of creating md and lvm devices we need to make sure
        # unused metadata are wiped out.
        mu.mdclean_all()
        lu.lvremove_all()
        lu.vgremove_all()
        lu.pvremove_all()

        # creating meta disks
        for md in self.driver.partition_scheme.mds:
            mu.mdcreate(md.name, md.level, *md.devices)

        # creating physical volumes
        for pv in self.driver.partition_scheme.pvs:
            lu.pvcreate(pv.name, metadatasize=pv.metadatasize,
                        metadatacopies=pv.metadatacopies)

        # creating volume groups
        for vg in self.driver.partition_scheme.vgs:
            lu.vgcreate(vg.name, *vg.pvnames)

        # creating logical volumes
        for lv in self.driver.partition_scheme.lvs:
            lu.lvcreate(lv.vgname, lv.name, lv.size)

        # making file systems
        for fs in self.driver.partition_scheme.fss:
            found_images = [img for img in self.driver.image_scheme.images
                            if img.target_device == fs.device]
            if not found_images:
                fu.make_fs(fs.type, fs.options, fs.label, fs.device)
Esempio n. 11
0
    def do_partitioning(self):
        LOG.debug('--- Partitioning disks (do_partitioning) ---')
        # If disks are not wiped out at all, it is likely they contain lvm
        # and md metadata which will prevent re-creating a partition table
        # with 'device is busy' error.
        mu.mdclean_all()
        lu.lvremove_all()
        lu.vgremove_all()
        lu.pvremove_all()

        # Here is udev's rules blacklisting to be done:
        # by adding symlinks to /dev/null in /etc/udev/rules.d for already
        # existent rules in /lib/.
        # 'parted' generates too many udev events in short period of time
        # so we should increase processing speed for those events,
        # otherwise partitioning is doomed.
        empty_rule_path = os.path.join(CONF.udev_rules_dir,
                                       os.path.basename(CONF.udev_empty_rule))
        with open(empty_rule_path, 'w') as f:
            f.write('#\n')
        LOG.debug("Enabling udev's rules blacklisting")
        for rule in os.listdir(CONF.udev_rules_lib_dir):
            dst = os.path.join(CONF.udev_rules_dir, rule)
            if os.path.isdir(dst):
                continue
            if dst.endswith('.rules'):
                # for successful blacklisting already existent file with name
                # from /etc which overlaps with /lib should be renamed prior
                # symlink creation.
                try:
                    if os.path.exists(dst):
                        os.rename(
                            dst,
                            dst[:-len('.rules')] + CONF.udev_rename_substr)
                except OSError:
                    LOG.debug("Skipping udev rule %s blacklising" % dst)
                else:
                    os.symlink(empty_rule_path, dst)
        utils.execute('udevadm',
                      'control',
                      '--reload-rules',
                      check_exit_code=[0])

        for parted in self.driver.partition_scheme.parteds:
            for prt in parted.partitions:
                # We wipe out the beginning of every new partition
                # right after creating it. It allows us to avoid possible
                # interactive dialog if some data (metadata or file system)
                # present on this new partition and it also allows udev not
                # hanging trying to parse this data.
                utils.execute('dd',
                              'if=/dev/zero',
                              'bs=1M',
                              'seek=%s' % max(prt.begin - 3, 0),
                              'count=5',
                              'of=%s' % prt.device,
                              check_exit_code=[0])
                # Also wipe out the ending of every new partition.
                # Different versions of md stores metadata in different places.
                # Adding exit code 1 to be accepted as for handling situation
                # when 'no space left on device' occurs.
                utils.execute('dd',
                              'if=/dev/zero',
                              'bs=1M',
                              'seek=%s' % max(prt.end - 3, 0),
                              'count=5',
                              'of=%s' % prt.device,
                              check_exit_code=[0, 1])

        for parted in self.driver.partition_scheme.parteds:
            pu.make_label(parted.name, parted.label)
            for prt in parted.partitions:
                pu.make_partition(prt.device, prt.begin, prt.end, prt.type)
                for flag in prt.flags:
                    pu.set_partition_flag(prt.device, prt.count, flag)
                if prt.guid:
                    pu.set_gpt_type(prt.device, prt.count, prt.guid)
                # If any partition to be created doesn't exist it's an error.
                # Probably it's again 'device or resource busy' issue.
                if not os.path.exists(prt.name):
                    raise errors.PartitionNotFoundError(
                        'Partition %s not found after creation' % prt.name)

        # disable udev's rules blacklisting
        LOG.debug("Disabling udev's rules blacklisting")
        for rule in os.listdir(CONF.udev_rules_dir):
            src = os.path.join(CONF.udev_rules_dir, rule)
            if os.path.isdir(src):
                continue
            if src.endswith('.rules'):
                if os.path.islink(src):
                    try:
                        os.remove(src)
                    except OSError:
                        LOG.debug("Skipping udev rule %s de-blacklisting" %
                                  src)
            elif src.endswith(CONF.udev_rename_substr):
                try:
                    if os.path.exists(src):
                        os.rename(
                            src,
                            src[:-len(CONF.udev_rename_substr)] + '.rules')
                except OSError:
                    LOG.debug("Skipping udev rule %s de-blacklisting" % src)
        utils.execute('udevadm',
                      'control',
                      '--reload-rules',
                      check_exit_code=[0])
        #NOTE(agordeev): re-create all the links which were skipped by udev
        # while blacklisted
        utils.execute('udevadm', 'trigger', check_exit_code=[0])
        utils.execute('udevadm', 'settle', '--quiet', check_exit_code=[0])

        # If one creates partitions with the same boundaries as last time,
        # there might be md and lvm metadata on those partitions. To prevent
        # failing of creating md and lvm devices we need to make sure
        # unused metadata are wiped out.
        mu.mdclean_all()
        lu.lvremove_all()
        lu.vgremove_all()
        lu.pvremove_all()

        # creating meta disks
        for md in self.driver.partition_scheme.mds:
            mu.mdcreate(md.name, md.level, *md.devices)

        # creating physical volumes
        for pv in self.driver.partition_scheme.pvs:
            lu.pvcreate(pv.name,
                        metadatasize=pv.metadatasize,
                        metadatacopies=pv.metadatacopies)

        # creating volume groups
        for vg in self.driver.partition_scheme.vgs:
            lu.vgcreate(vg.name, *vg.pvnames)

        # creating logical volumes
        for lv in self.driver.partition_scheme.lvs:
            lu.lvcreate(lv.vgname, lv.name, lv.size)

        # making file systems
        for fs in self.driver.partition_scheme.fss:
            found_images = [
                img for img in self.driver.image_scheme.images
                if img.target_device == fs.device
            ]
            if not found_images:
                fu.make_fs(fs.type, fs.options, fs.label, fs.device)