예제 #1
0
    def __init__(self, kickstart=False):

        if kickstart:
            self.ksparser = pykickstart.parser.KickstartParser(makeVersion())
            self.storage = blivet.Blivet(ksdata=self.ksparser.handler)
        else:
            self.storage = blivet.Blivet()

        blivet.formats.fs.NTFS._formattable = True

        self.storage.reset()
        self.storage.devicetree.populate()
        self.storage.devicetree.getActiveMounts()
        self.update_min_sizes_info()
예제 #2
0
    def test_new_lv_from_lvs(self):
        b = blivet.Blivet()
        pv = StorageDevice("pv1",
                           fmt=blivet.formats.get_format("lvmpv"),
                           size=Size("1 GiB"),
                           exists=True)
        vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
        lv1 = LVMLogicalVolumeDevice("data_lv",
                                     parents=[vg],
                                     size=Size("500 MiB"),
                                     exists=True)
        lv2 = LVMLogicalVolumeDevice("metadata_lv",
                                     parents=[vg],
                                     size=Size("50 MiB"),
                                     exists=True)

        for dev in (pv, vg, lv1, lv2):
            b.devicetree._add_device(dev)

        # check that all the above devices are in the expected places
        self.assertEqual(set(b.devices), {pv, vg, lv1, lv2})
        self.assertEqual(set(b.vgs), {vg})
        self.assertEqual(set(b.lvs), {lv1, lv2})
        self.assertEqual(set(b.vgs[0].lvs), {lv1, lv2})

        self.assertEqual(vg.size, Size("1020 MiB"))
        self.assertEqual(lv1.size, Size("500 MiB"))
        self.assertEqual(lv2.size, Size("50 MiB"))

        # combine the two LVs into a thin pool (the LVs should become its internal LVs)
        pool = b.new_lv_from_lvs(vg,
                                 name="pool",
                                 seg_type="thin-pool",
                                 from_lvs=(lv1, lv2))

        # add the pool LV into the devicetree
        b.devicetree._add_device(pool)

        self.assertEqual(set(b.devices), {pv, vg, pool})
        self.assertEqual(set(b.vgs), {vg})
        self.assertEqual(set(b.lvs), {pool})
        self.assertEqual(set(b.vgs[0].lvs), {pool})
        self.assertEqual(set(b.vgs[0].lvs[0]._internal_lvs), {lv1, lv2})

        self.assertTrue(lv1.is_internal_lv)
        self.assertEqual(lv1.int_lv_type, LVMInternalLVtype.data)
        self.assertEqual(lv1.size, Size("500 MiB"))
        self.assertTrue(lv2.is_internal_lv)
        self.assertEqual(lv2.int_lv_type, LVMInternalLVtype.meta)
        self.assertEqual(lv2.size, Size("50 MiB"))

        self.assertEqual(pool.name, "testvg-pool")
        self.assertEqual(pool.size, Size("500 MiB"))
        self.assertEqual(pool.metadata_size, Size("50 MiB"))
        self.assertIs(pool.vg, vg)

        with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
            with patch.object(pool, "_pre_create"):
                pool.create()
                self.assertTrue(lvm.thpool_convert.called)
예제 #3
0
 def testNoKey(self):
     errors = []
     b = blivet.Blivet()
     b.createDevice(
         devices.LUKSDevice("name", format=formats.luks.LUKS(), parents=[]))
     errors += b._verifyLUKSDevicesHaveKey()
     self.assertNotEqual(errors, [])
예제 #4
0
    def test_suggest_device_name(self):
        b = blivet.Blivet()

        with patch("blivet.devicetree.DeviceTree.names", []):
            name = b.suggest_device_name()
            self.assertEqual(name, "00")

            name = b.suggest_device_name(prefix="blivet")
            self.assertEqual(name, "blivet00")

            name = b.suggest_device_name(mountpoint="/")
            self.assertEqual(name, "root")

            name = b.suggest_device_name(prefix="blivet", mountpoint="/")
            self.assertEqual(name, "blivet_root")

            name = b.suggest_device_name(
                parent=blivet.devices.Device(name="parent"), mountpoint="/")
            self.assertEqual(name, "root")

        with patch("blivet.devicetree.DeviceTree.names", ["00"]):
            name = b.suggest_device_name()
            self.assertEqual(name, "01")

        with patch("blivet.devicetree.DeviceTree.names", ["parent-root"]):
            name = b.suggest_device_name(
                parent=blivet.devices.Device(name="parent"), mountpoint="/")
            self.assertEqual(name, "root00")
예제 #5
0
    def setUp(self):
        self.storage = blivet.Blivet()

        # device status
        blivet.devices.StorageDevice.status = False
        blivet.devices.DMDevice.status = False
        blivet.devices.LUKSDevice.status = False
        blivet.devices.LVMVolumeGroupDevice.status = False
        blivet.devices.MDRaidArrayDevice.status = False
        blivet.devices.FileDevice.status = False

        # prevent PartitionDevice from trying to dig around in the partition's
        # geometry
        blivet.devices.PartitionDevice._setTargetSize = StorageDevice._setTargetSize
        blivet.devices.PartitionDevice.maxSize = StorageDevice.maxSize

        def partition_probe(device):
            if isinstance(device._partedPartition, Mock):
                # don't clobber a Mock we already set up here
                part_mock = device._partedPartition
            else:
                part_mock = Mock()

            attrs = {
                "getLength.return_value": int(device._size),
                "getDeviceNodeName.return_value": device.name,
                "type": parted.PARTITION_NORMAL
            }
            part_mock.configure_mock(**attrs)
            device._partedPartition = part_mock
            device._currentSize = device._size
            device._partType = parted.PARTITION_NORMAL
            device._bootable = False

        PartitionDevice.probe = partition_probe
예제 #6
0
    def storage(self):
        if not self._storage:
            import blivet
            self._storage = blivet.Blivet(ksdata=self.ksdata)

            if self.instClass.defaultFS:
                self._storage.setDefaultFSType(self.instClass.defaultFS)

        return self._storage
예제 #7
0
    def setUp(self):
        self.b = blivet.Blivet()
        self.factory1 = devicefactory.get_device_factory(
            self.b, devicefactory.DEVICE_TYPE_MD, Size(spec="1 GiB"))

        self.factory2 = devicefactory.get_device_factory(
            self.b,
            devicefactory.DEVICE_TYPE_MD,
            Size(spec="1 GiB"),
            raid_level=0)
예제 #8
0
 def __init__(self):
     super(BlivetDiskConfig, self).__init__()
     self._blivet = blivet.Blivet()
     self._blivet.reset()
     self._mounts = []
     self._initialized_disks = set()
     # Impose an ordering based on the order of partitions in the config.
     # Otherwise blivet somewhat arbitrarily chooses the ordering, which
     # can result in unintuitive partition layouts.
     self._next_weight = 0
예제 #9
0
    def restore_state(self):
        """Get the second machine snapshot, compare it to the first one and perform
           the cleaning. Return list of devices that could not be removed."""
        new_state_snapshot = self._get_snapshot()
        diff = self._get_diff(self.org_state_snapshot, new_state_snapshot)

        if diff:
            print("These devices were not properly cleaned by tests:\n" +
                  "\n".join(diff))
            print("Removing by force...")
            # Put information into the syslog to be able to track possible issues
            syslog.syslog(
                "Following devices were not removed after UDisks2 D-Bus" +
                " tests and will be removed by force: %s" % str(diff)[1:-1])

        import blivet

        # we need at least blivet 2.0 to do this cleanup
        if LooseVersion(blivet.__version__) >= LooseVersion("2.0.0"):
            blvt = blivet.Blivet()
            blvt.reset()
            for device in diff:
                # kill all processes that are using the device
                # get list of mountpoints from blivet mountpoint dictionary
                mountpoints = [
                    mpoint for mpoint, dev in
                    blivet.mounts.mounts_cache.mountpoints.items()
                    if dev == device
                ]

                for mountpoint in mountpoints:
                    self._run_command("fuser -km %s" % mountpoint)

            # just try to remove everything
            blvt.config.exclusive_disks = diff
            blvt.reset()
            blvt.devicetree.teardown_all()

        self._run_command("modprobe -r scsi_debug")
        self._run_command("targetcli clearconfig confirm=True")
        for disk_file in glob.glob("/var/tmp/udisks_test_disk*"):
            os.unlink(disk_file)

        cleaned_state_snapshot = self._get_snapshot()
        not_cleaned = self._get_diff(self.org_state_snapshot,
                                     cleaned_state_snapshot)

        if not_cleaned:
            print("Failed to remove following devices:\n" +
                  "\n".join(not_cleaned))
        else:
            print("Devices successfully removed.")

        return not_cleaned
예제 #10
0
    def storage(self):
        if not self._storage:
            import blivet
            import blivet.arch

            self._storage = blivet.Blivet(ksdata=self.ksdata)
            self._set_default_fstype(self._storage)

            if blivet.arch.is_s390():
                self._load_plugin_s390()

        return self._storage
예제 #11
0
    def setUp(self):
        self.storage = blivet.Blivet()

        # device status
        self.storage_status = blivet.devices.StorageDevice.status
        self.dm_status = blivet.devices.DMDevice.status
        self.luks_status = blivet.devices.LUKSDevice.status
        self.vg_status = blivet.devices.LVMVolumeGroupDevice.status
        self.md_status = blivet.devices.MDRaidArrayDevice.status
        self.file_status = blivet.devices.FileDevice.status
        blivet.devices.StorageDevice.status = False
        blivet.devices.DMDevice.status = False
        blivet.devices.LUKSDevice.status = False
        blivet.devices.LVMVolumeGroupDevice.status = False
        blivet.devices.MDRaidArrayDevice.status = False
        blivet.devices.FileDevice.status = False

        # prevent PartitionDevice from trying to dig around in the partition's
        # geometry
        self.partition_set_target = PartitionDevice._set_target_size
        self.partition_align_target = PartitionDevice.align_target_size
        self.partition_max = PartitionDevice.max_size
        self.partition_min = PartitionDevice.min_size
        blivet.devices.PartitionDevice._set_target_size = StorageDevice._set_target_size
        blivet.devices.PartitionDevice.align_target_size = StorageDevice.align_target_size
        blivet.devices.PartitionDevice.max_size = StorageDevice.max_size
        blivet.devices.PartitionDevice.min_size = StorageDevice.min_size

        self.addCleanup(self._clean_up)

        def partition_probe(device):
            if isinstance(device._parted_partition, Mock):
                # don't clobber a Mock we already set up here
                part_mock = device._parted_partition
            else:
                part_mock = Mock()

            attrs = {
                "getLength.return_value": int(device._size),
                "getDeviceNodeName.return_value": device.name,
                "type": parted.PARTITION_NORMAL
            }
            part_mock.configure_mock(**attrs)
            device._parted_partition = part_mock
            device._current_size = device._size
            device._part_type = parted.PARTITION_NORMAL
            device._bootable = False

        self.partition_probe = PartitionDevice.probe
        PartitionDevice.probe = partition_probe

        self.get_active_mounts = blivet.formats.fs.mounts_cache._get_active_mounts
        blivet.formats.fs.mounts_cache._get_active_mounts = Mock()
예제 #12
0
    def setUp(self):
        if self.device_type is None:
            raise unittest.SkipTest("abstract base class")

        self.b = blivet.Blivet()  # don't populate it
        self.disk_files = [create_sparse_tempfile("factorytest", Size("2 GiB")),
                           create_sparse_tempfile("factorytest", Size("2 GiB"))]
        for filename in self.disk_files:
            disk = DiskFile(filename)
            self.b.devicetree._add_device(disk)
            self.b.initialize_disk(disk)

        self.addCleanup(self._clean_up_disk_files)
예제 #13
0
    def setupDisks(self, ksdata):
        self._blivet = blivet.Blivet(ksdata=ksdata)

        # See comment in super class's method.
        from pyanaconda.bootloader import get_bootloader
        self._blivet._bootloader = get_bootloader()

        for component in self._reusedComponents:
            self._disks.update(component._disks)

        for (name, image) in self._disks.items():
            self._blivet.config.diskImages[name] = image

        self._blivet.reset()
예제 #14
0
    def test_suggest_container_name(self):
        b = blivet.Blivet()

        with patch("blivet.devicetree.DeviceTree.names", []):
            name = b.suggest_container_name(prefix="blivet")
            self.assertEqual(name, "blivet")

        with patch("blivet.devicetree.DeviceTree.names", ["blivet"]):
            name = b.suggest_container_name(prefix="blivet")
            self.assertEqual(name, "blivet00")

        with patch("blivet.devicetree.DeviceTree.names",
                   ["blivet"] + ["blivet%02d" % i for i in range(100)]):
            with self.assertRaises(RuntimeError):
                b.suggest_container_name(prefix="blivet")
예제 #15
0
    def test_new_vdo_pool(self):
        b = blivet.Blivet()
        pv = StorageDevice("pv1",
                           fmt=blivet.formats.get_format("lvmpv"),
                           size=Size("10 GiB"),
                           exists=True)
        vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)

        for dev in (pv, vg):
            b.devicetree._add_device(dev)

        # check that all the above devices are in the expected places
        self.assertEqual(set(b.devices), {pv, vg})
        self.assertEqual(set(b.vgs), {vg})

        self.assertEqual(vg.size, Size("10236 MiB"))

        with self.assertRaises(ValueError):
            vdopool = b.new_lv(name="vdopool",
                               vdo_pool=True,
                               parents=[vg],
                               compression=True,
                               deduplication=True,
                               size=blivet.size.Size("1 GiB"))

        vdopool = b.new_lv(name="vdopool",
                           vdo_pool=True,
                           parents=[vg],
                           compression=True,
                           deduplication=True,
                           size=blivet.size.Size("8 GiB"))

        vdolv = b.new_lv(name="vdolv",
                         vdo_lv=True,
                         parents=[vdopool],
                         size=blivet.size.Size("40 GiB"))

        b.create_device(vdopool)
        b.create_device(vdolv)

        self.assertEqual(vdopool.children[0], vdolv)
        self.assertEqual(vdolv.parents[0], vdopool)
        self.assertListEqual(vg.lvs, [vdopool, vdolv])
예제 #16
0
파일: anaconda.py 프로젝트: bcl/anaconda
    def storage(self):
        if not self._storage:
            import blivet
            import blivet.arch
            from gi.repository import BlockDev as blockdev
            self._storage = blivet.Blivet(ksdata=self.ksdata)

            if self.instClass.defaultFS:
                self._storage.setDefaultFSType(self.instClass.defaultFS)

            if blivet.arch.isS390():
                # want to make sure s390 plugin is loaded
                if "s390" not in blockdev.get_available_plugin_names():
                    plugin = blockdev.PluginSpec()
                    plugin.name = blockdev.Plugin.S390
                    plugin.so_name = None
                    blockdev.reinit([plugin], reload=False)

        return self._storage
예제 #17
0
    def setupDisks(self, ksdata):
        """Create all disk images given by self.disksToCreate and initialize
           the storage module.  Subclasses may override this method, but they
           should be sure to call the base method as well.
        """
        self._blivet = blivet.Blivet(ksdata=ksdata)

        # blivet only sets up the bootloader in installer_mode.  We don't
        # want installer_mode, though, because that involves running lots
        # of programs on the host and setting up all sorts of other things.
        # Thus, we set it up manually.
        from pyanaconda.bootloader import get_bootloader
        self._blivet._bootloader = get_bootloader()

        for (name, size) in self.disksToCreate:
            self._disks[name] = blivet.util.create_sparse_tempfile(name, size)
            self._blivet.config.diskImages[name] = self._disks[name]

        self._blivet.reset()
예제 #18
0
    def test_vdo_dependencies(self):
        blivet.tasks.availability.CACHE_AVAILABILITY = False

        b = blivet.Blivet()
        pv = StorageDevice("pv1",
                           fmt=blivet.formats.get_format("lvmpv"),
                           size=Size("10 GiB"),
                           exists=True)
        vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)

        for dev in (pv, vg):
            b.devicetree._add_device(dev)

        # check that all the above devices are in the expected places
        self.assertEqual(set(b.devices), {pv, vg})
        self.assertEqual(set(b.vgs), {vg})

        self.assertEqual(vg.size, Size("10236 MiB"))

        vdopool = b.new_lv(name="vdopool",
                           vdo_pool=True,
                           parents=[vg],
                           compression=True,
                           deduplication=True,
                           size=blivet.size.Size("8 GiB"))

        vdolv = b.new_lv(name="vdolv",
                         vdo_lv=True,
                         parents=[vdopool],
                         size=blivet.size.Size("40 GiB"))

        # Dependencies check: for VDO types these should be combination of "normal"
        # LVM dependencies (LVM libblockdev plugin + kpartx and DM plugin from DMDevice)
        # and LVM VDO technology from the LVM plugin
        lvm_vdo_dependencies = [
            "kpartx", "libblockdev dm plugin", "libblockdev lvm plugin",
            "libblockdev lvm plugin (vdo technology)"
        ]
        pool_deps = [d.name for d in vdopool.external_dependencies]
        six.assertCountEqual(self, pool_deps, lvm_vdo_dependencies)

        vdolv_deps = [d.name for d in vdolv.external_dependencies]
        six.assertCountEqual(self, vdolv_deps, lvm_vdo_dependencies)

        # same dependencies should be returned when checking with class not instance
        pool_type_deps = [
            d.name for d in LVMVDOPoolMixin.type_external_dependencies()
        ]
        six.assertCountEqual(self, pool_type_deps, lvm_vdo_dependencies)

        vdolv_type_deps = [
            d.name
            for d in LVMVDOLogicalVolumeMixin.type_external_dependencies()
        ]
        six.assertCountEqual(self, vdolv_type_deps, lvm_vdo_dependencies)

        # just to be sure LVM VDO specific code didn't break "normal" LVs
        normallv = b.new_lv(name="lvol0",
                            parents=[vg],
                            size=blivet.size.Size("1 GiB"))

        normalvl_deps = [d.name for d in normallv.external_dependencies]
        six.assertCountEqual(
            self, normalvl_deps,
            ["kpartx", "libblockdev dm plugin", "libblockdev lvm plugin"])

        with patch("blivet.devices.lvm.LVMVDOPoolMixin._external_dependencies",
                   new=[
                       blivet.tasks.availability.unavailable_resource(
                           "VDO unavailability test")
                   ]):
            with patch(
                    "blivet.devices.lvm.LVMVDOLogicalVolumeMixin._external_dependencies",
                    new=[
                        blivet.tasks.availability.unavailable_resource(
                            "VDO unavailability test")
                    ]):

                pool_deps = [d.name for d in vdopool.unavailable_dependencies]
                self.assertEqual(pool_deps, ["VDO unavailability test"])

                vdolv_deps = [d.name for d in vdolv.unavailable_dependencies]
                self.assertEqual(vdolv_deps, ["VDO unavailability test"])

                # same dependencies should be returned when checking with class not instance
                pool_type_deps = [
                    d.name
                    for d in LVMVDOPoolMixin.unavailable_type_dependencies()
                ]
                six.assertCountEqual(self, pool_type_deps,
                                     ["VDO unavailability test"])

                vdolv_type_deps = [
                    d.name for d in
                    LVMVDOLogicalVolumeMixin.unavailable_type_dependencies()
                ]
                six.assertCountEqual(self, vdolv_type_deps,
                                     ["VDO unavailability test"])

                normallv_deps = [
                    d.name for d in normallv.unavailable_dependencies
                ]
                self.assertEqual(normallv_deps, [])

                with self.assertRaises(errors.DependencyError):
                    b.create_device(vdopool)
                    b.create_device(vdolv)

                b.create_device(normallv)
예제 #19
0
    def run(self):
        logger.log(
            "info",
            NS.publisher_id,
            {"message": "%s running" % self.__class__.__name__}
        )

        gluster_brick_dir = NS.gluster.objects.GlusterBrickDir()
        gluster_brick_dir.save()

        cluster = NS.tendrl.objects.Cluster(
            integration_id=NS.tendrl_context.integration_id
        ).load()
        if cluster.cluster_network in [None, ""]:
            try:
                node_networks = NS.tendrl.objects.NodeNetwork().load_all()
                cluster.cluster_network = node_networks[0].subnet
                cluster.save()
            except etcd.EtcdKeyNotFound as ex:
                logger.log(
                    "error",
                    NS.publisher_id,
                    {"message": "Failed to sync cluster network details"}
                )
        _sleep = 0
        while not self._complete.is_set():
            # To detect out of band deletes
            # refresh gluster object inventory at config['sync_interval']
            SYNC_TTL = int(NS.config.data.get("sync_interval", 10)) + 100
            NS.node_context = NS.node_context.load()
            NS.tendrl_context = NS.tendrl_context.load()
            if _sleep > 5:
                _sleep = int(NS.config.data.get("sync_interval", 10))
            else:
                _sleep += 1

            try:
                _cluster = NS.tendrl.objects.Cluster(
                    integration_id=NS.tendrl_context.integration_id
                ).load()
                if (_cluster.status == "importing" and
                    _cluster.current_job['status'] == 'failed') or \
                    _cluster.status == "unmanaging" or \
                    _cluster.status == "set_volume_profiling":
                    continue

                _cnc = NS.tendrl.objects.ClusterNodeContext(
                    node_id=NS.node_context.node_id
                ).load()
                _cnc.is_managed = "yes"
                _cnc.save()
                subprocess.call(
                    [
                        'gluster',
                        'get-state',
                        'glusterd',
                        'odir',
                        '/var/run',
                        'file',
                        'glusterd-state',
                        'detail'
                    ]
                )
                raw_data = ini2json.ini_to_dict(
                    '/var/run/glusterd-state'
                )
                subprocess.call(['rm', '-rf', '/var/run/glusterd-state'])
                subprocess.call(
                    [
                        'gluster',
                        'get-state',
                        'glusterd',
                        'odir',
                        '/var/run',
                        'file',
                        'glusterd-state-vol-opts',
                        'volumeoptions'
                    ]
                )
                raw_data_options = ini2json.ini_to_dict(
                    '/var/run/glusterd-state-vol-opts'
                )
                subprocess.call(
                    [
                        'rm',
                        '-rf',
                        '/var/run/glusterd-state-vol-opts'
                    ]
                )
                sync_object = NS.gluster.objects.\
                    SyncObject(data=json.dumps(raw_data))
                sync_object.save()

                if "Peers" in raw_data:
                    index = 1
                    peers = raw_data["Peers"]
                    disconnected_hosts = []
                    while True:
                        try:
                            peer = NS.tendrl.\
                                objects.GlusterPeer(
                                    peer_uuid=peers['peer%s.uuid' % index],
                                    hostname=peers[
                                        'peer%s.primary_hostname' % index
                                    ],
                                    state=peers['peer%s.state' % index],
                                    connected=peers['peer%s.connected' % index]
                                )
                            try:
                                stored_peer_status = None
                                # find peer detail using hostname
                                ip = socket.gethostbyname(
                                    peers['peer%s.primary_hostname' % index]
                                )
                                node_id = etcd_utils.read(
                                    "/indexes/ip/%s" % ip
                                ).value
                                stored_peer = NS.tendrl.objects.GlusterPeer(
                                    peer_uuid=peers['peer%s.uuid' % index],
                                    node_id=node_id
                                ).load()
                                stored_peer_status = stored_peer.connected
                                current_status = peers[
                                    'peer%s.connected' % index
                                ]
                                if stored_peer_status and \
                                    current_status != stored_peer_status:
                                    msg = (
                                        "Peer %s in cluster %s "
                                        "is %s"
                                    ) % (
                                        peers[
                                            'peer%s.primary_hostname' %
                                            index
                                        ],
                                        _cluster.short_name,
                                        current_status
                                    )
                                    instance = "peer_%s" % peers[
                                        'peer%s.primary_hostname' % index
                                    ]
                                    event_utils.emit_event(
                                        "peer_status",
                                        current_status,
                                        msg,
                                        instance,
                                        'WARNING' if current_status !=
                                        'Connected'
                                        else 'INFO'
                                    )
                                    # save current status in actual peer
                                    # directory also
                                    stored_peer.connected = current_status
                                    stored_peer.save()
                                    # Disconnected host name to
                                    # raise brick alert
                                    if current_status.lower() == \
                                        "disconnected":
                                        disconnected_hosts.append(
                                            peers[
                                                'peer%s.primary_hostname' %
                                                index
                                            ]
                                        )
                            except etcd.EtcdKeyNotFound:
                                pass
                            SYNC_TTL += 5
                            peer.save(ttl=SYNC_TTL)
                            index += 1
                        except KeyError:
                            break
                    # Raise an alert for bricks when peer disconnected
                    # or node goes down
                    for disconnected_host in disconnected_hosts:
                        brick_status_alert(
                            disconnected_host
                        )
                if "Volumes" in raw_data:
                    index = 1
                    volumes = raw_data['Volumes']
                    # instantiating blivet class, this will be used for
                    # getting brick_device_details
                    b = blivet.Blivet()

                    # reset blivet during every sync to get latest information
                    # about storage devices in the machine
                    b.reset()
                    devicetree = b.devicetree
                    total_brick_count = 0
                    while True:
                        try:
                            b_count = sync_volumes(
                                volumes, index,
                                raw_data_options.get('Volume Options'),
                                SYNC_TTL + VOLUME_TTL,
                                _cluster.short_name,
                                devicetree
                            )
                            index += 1
                            SYNC_TTL += 1
                            total_brick_count += b_count - 1
                        except KeyError:
                            global VOLUME_TTL
                            # from second sync volume ttl is
                            # SYNC_TTL + (no.volumes) * 20 +
                            # (no.of.bricks) * 10 + 160
                            if index > 1:
                                volume_count = index - 1
                                # When all nodes are down we are updating all
                                # volumes are down, node status TTL is 160,
                                # So make sure volumes are present in etcd
                                # while raising volume down alert
                                VOLUME_TTL = (volume_count * 20) + (
                                    total_brick_count * 10) + 160
                            break
                    # populate the volume specific options
                    reg_ex = re.compile("^volume[0-9]+.options+")
                    options = {}
                    for key in volumes.keys():
                        if reg_ex.match(key):
                            options[key] = volumes[key]
                    for key in options.keys():
                        volname = key.split('.')[0]
                        vol_id = volumes['%s.id' % volname]
                        dict1 = {}
                        for k, v in options.items():
                            if k.startswith('%s.options' % volname):
                                dict1['.'.join(k.split(".")[2:])] = v
                                options.pop(k, None)
                        volume = NS.tendrl.objects.GlusterVolume(
                            NS.tendrl_context.integration_id,
                            vol_id=vol_id
                        ).load()
                        if volume.options is not None:
                            dest = dict(volume.options)
                            dest.update(dict1)
                            volume.options = dest
                            volume.save()

                # Sync cluster global details
                if "provisioner/%s" % NS.tendrl_context.integration_id \
                    in NS.node_context.tags:
                    all_volumes = NS.tendrl.objects.GlusterVolume(
                        NS.tendrl_context.integration_id
                    ).load_all() or []
                    volumes = []
                    for volume in all_volumes:
                        if not str(volume.deleted).lower() == "true" and \
                            volume.current_job.get('status', '') \
                            in ['', 'finished', 'failed'] and \
                            volume.vol_id not in [None, ''] and \
                            volume.name not in [None, '']:
                            # only for first sync refresh volume TTL
                            # It will increase TTL based on no.of volumes
                            if _cnc.first_sync_done in [None, "no", ""]:
                                etcd_utils.refresh(
                                    volume.value,
                                    SYNC_TTL + VOLUME_TTL
                                )
                            volumes.append(volume)
                    cluster_status.sync_cluster_status(
                        volumes, SYNC_TTL + VOLUME_TTL
                    )
                    utilization.sync_utilization_details(volumes)
                    client_connections.sync_volume_connections(volumes)
                    georep_details.aggregate_session_status()
                    try:
                        evt.process_events()
                    except etcd.EtcdKeyNotFound:
                        pass
                    rebalance_status.sync_volume_rebalance_status(volumes)
                    rebalance_status.sync_volume_rebalance_estimated_time(
                        volumes
                    )
                    snapshots.sync_volume_snapshots(
                        raw_data['Volumes'],
                        int(NS.config.data.get(
                            "sync_interval", 10
                        )) + len(volumes) * 4
                    )
                    # update alert count
                    update_cluster_alert_count()
                # check and enable volume profiling
                if "provisioner/%s" % NS.tendrl_context.integration_id in \
                    NS.node_context.tags:
                    self._enable_disable_volume_profiling()

                _cluster = NS.tendrl.objects.Cluster(
                    integration_id=NS.tendrl_context.integration_id
                ).load()
                if _cluster.exists():
                    _cluster = _cluster.load()
                    _cluster.last_sync = str(tendrl_now())
                    # Mark the first sync done flag
                    _cnc = NS.tendrl.objects.ClusterNodeContext(
                        node_id=NS.node_context.node_id
                    ).load()
                    if _cnc.first_sync_done in [None, "no"]:
                        _cnc.first_sync_done = "yes"
                        _cnc.save()
                    if _cluster.current_job.get(
                        'status', ''
                    ) in ['', 'finished', 'failed'] and \
                        _cluster.status in [None, ""]:
                        _cluster.save()
            except Exception as ex:
                Event(
                    ExceptionMessage(
                        priority="error",
                        publisher=NS.publisher_id,
                        payload={"message": "gluster sds state sync error",
                                 "exception": ex
                                 }
                    )
                )
            try:
                etcd_utils.read(
                    '/clusters/%s/_sync_now' %
                    NS.tendrl_context.integration_id
                )
                continue
            except etcd.EtcdKeyNotFound:
                pass

            time.sleep(_sleep)

        logger.log(
            "debug",
            NS.publisher_id,
            {"message": "%s complete" % self.__class__.__name__}
        )
예제 #20
0
파일: storagedev.py 프로젝트: igoihman/vdsm
def createBrick(brickName,
                mountPoint,
                devNameList,
                fsType=DEFAULT_FS_TYPE,
                raidParams={}):
    def _getDeviceList(devNameList):
        return [
            blivetEnv.devicetree.getDeviceByName(devName.split("/")[-1])
            for devName in devNameList
        ]

    def _createPV(deviceList, alignment):
        for dev in deviceList:
            # bz#1178705: Blivet always creates pv with 1MB dataalignment
            # Workaround: Till blivet fixes the issue, we use lvm pvcreate
            rc, out, err = commands.execCmd([
                _pvCreateCommandPath.cmd, '--dataalignment',
                '%sk' % alignment, dev.path
            ])
            if rc:
                raise ge.GlusterHostStorageDevicePVCreateFailedException(
                    dev.path, alignment, rc, out, err)
        _reset_blivet(blivetEnv)
        return _getDeviceList([dev.name for dev in deviceList])

    def _createVG(vgName, deviceList, stripeSize):
        # bz#1198568: Blivet always creates vg with 1MB stripe size
        # Workaround: Till blivet fixes the issue, use vgcreate command
        devices = ','.join([device.path for device in deviceList])
        rc, out, err = commands.execCmd([
            _vgCreateCommandPath.cmd, '-s',
            '%sk' % stripeSize, vgName, devices
        ])
        if rc:
            raise ge.GlusterHostStorageDeviceVGCreateFailedException(
                vgName, devices, stripeSize, rc, out, err)
        blivetEnv.reset()
        return blivetEnv.devicetree.getDeviceByName(vgName)

    def _createThinPool(poolName, vg, alignment, poolMetaDataSize,
                        poolDataSize):
        metaName = "meta-%s" % poolName
        vgPoolName = "%s/%s" % (vg.name, poolName)
        metaLv = LVMLogicalVolumeDevice(metaName,
                                        parents=[vg],
                                        size=blivet.size.Size(
                                            '%d KiB' % poolMetaDataSize))
        poolLv = LVMLogicalVolumeDevice(poolName,
                                        parents=[vg],
                                        size=blivet.size.Size('%d KiB' %
                                                              poolDataSize))
        blivetEnv.createDevice(metaLv)
        blivetEnv.createDevice(poolLv)
        blivetEnv.doIt()

        # bz#1100514: LVM2 currently only supports physical extent sizes
        # that are a power of 2. Till that support is available we need
        # to use lvconvert to achive that.
        # bz#1179826: blivet doesn't support lvconvert functionality.
        # Workaround: Till the bz gets fixed, lvconvert command is used
        rc, out, err = commands.execCmd([
            _lvconvertCommandPath.cmd, '--chunksize',
            '%sK' % alignment, '--thinpool', vgPoolName, '--poolmetadata',
            "%s/%s" % (vg.name, metaName), '--poolmetadataspar', 'n', '-y'
        ])

        if rc:
            raise ge.GlusterHostStorageDeviceLVConvertFailedException(
                vg.path, alignment, rc, out, err)
        rc, out, err = commands.execCmd(
            [_lvchangeCommandPath.cmd, '--zero', 'n', vgPoolName])
        if rc:
            raise ge.GlusterHostStorageDeviceLVChangeFailedException(
                vgPoolName, rc, out, err)
        _reset_blivet(blivetEnv)
        return blivetEnv.devicetree.getDeviceByName(poolLv.name)

    if os.path.ismount(mountPoint):
        raise ge.GlusterHostStorageMountPointInUseException(mountPoint)

    vgName = "vg-" + brickName
    poolName = "pool-" + brickName
    poolDataSize = 0
    count = 0
    raidType = raidParams.get('type')
    metaDataSizeKib = DEFAULT_METADATA_SIZE_KB
    if raidType == '6':
        count = raidParams['pdCount'] - 2
        alignment = raidParams['stripeSize'] * count
        chunkSize = alignment
    elif raidType == '10':
        count = raidParams['pdCount'] / 2
        alignment = raidParams['stripeSize'] * count
        chunkSize = DEFAULT_CHUNK_SIZE_KB
    else:  # Device type is JBOD
        alignment = DEFAULT_CHUNK_SIZE_KB
        chunkSize = DEFAULT_CHUNK_SIZE_KB

    blivetEnv = blivet.Blivet()
    _reset_blivet(blivetEnv)

    # get the devices list from the device name
    deviceList = _getDeviceList(devNameList)

    # raise an error when any device not actually found in the given list
    notFoundList = set(devNameList).difference(
        set([dev.name for dev in deviceList]))
    if notFoundList:
        raise ge.GlusterHostStorageDeviceNotFoundException(notFoundList)

    # raise an error when any device is used already in the given list
    inUseList = set(devNameList).difference(
        set([not _canCreateBrick(dev) or dev.name for dev in deviceList]))
    if inUseList:
        raise ge.GlusterHostStorageDeviceInUseException(inUseList)

    pvDeviceList = _createPV(deviceList, alignment)
    vg = _createVG(vgName, pvDeviceList, alignment)
    # The following calculation is based on the redhat storage performance doc
    # http://docbuilder.usersys.redhat.com/22522
    # /#chap-Configuring_Red_Hat_Storage_for_Enhancing_Performance

    # create ~16GB metadata LV (metaDataSizeKib) that has a size which is
    # a multiple of RAID stripe width if it is > minimum vg size
    # otherwise allocate a minimum of 0.5% of the data device size
    # and create data LV (poolDataSize) that has a size which is
    # a multiple of stripe width.
    vgSizeKib = int(_getDeviceSize(vg, 'KiB'))
    if _getDeviceSize(vg) < MIN_VG_SIZE:
        metaDataSizeKib = vgSizeKib * MIN_METADATA_PERCENT
    poolDataSize = vgSizeKib - metaDataSizeKib

    metaDataSizeKib = (metaDataSizeKib - (metaDataSizeKib % alignment))
    poolDataSize = (poolDataSize - (poolDataSize % alignment))

    # Creating a thin pool from the data LV and the metadata LV
    # lvconvert --chunksize alignment --thinpool VOLGROUP/thin_pool
    #     --poolmetadata VOLGROUP/metadata_device_name
    pool = _createThinPool(poolName, vg, chunkSize, metaDataSizeKib,
                           poolDataSize)
    thinlv = LVMThinLogicalVolumeDevice(brickName,
                                        parents=[pool],
                                        size=vg.size,
                                        grow=True)
    blivetEnv.createDevice(thinlv)
    blivetEnv.doIt()

    if fsType != DEFAULT_FS_TYPE:
        log.error("fstype %s is currently unsupported" % fsType)
        raise ge.GlusterHostStorageDeviceMkfsFailedException(
            thinlv.path, alignment, raidParams.get('stripeSize', 0), fsType)

    format = blivet.formats.getFormat(DEFAULT_FS_TYPE,
                                      device=thinlv.path,
                                      mountopts=DEFAULT_MOUNT_OPTIONS)
    format._defaultFormatOptions = ["-f", "-i", "size=512", "-n", "size=8192"]
    if raidParams.get('type') == '6':
        format._defaultFormatOptions += [
            "-d", "sw=%s,su=%sk" % (count, raidParams.get('stripeSize'))
        ]
    blivetEnv.formatDevice(thinlv, format)
    blivetEnv.doIt()

    try:
        os.makedirs(mountPoint)
    except OSError as e:
        if errno.EEXIST != e.errno:
            errMsg = "[Errno %s] %s: '%s'" % (e.errno, e.strerror, e.filename)
            raise ge.GlusterHostStorageDeviceMakeDirsFailedException(
                err=[errMsg])
    thinlv.format.setup(mountpoint=mountPoint)
    blivetEnv.doIt()

    # bz#1230495: lvm devices are invisible and appears only after vgscan
    # Workaround: Till the bz gets fixed, We use vgscan to refresh LVM devices
    rc, out, err = commands.execCmd([_vgscanCommandPath.cmd])
    if rc:
        raise ge.GlusterHostStorageDeviceVGScanFailedException(rc, out, err)
    fstab.FsTab().add(thinlv.path,
                      mountPoint,
                      DEFAULT_FS_TYPE,
                      mntOpts=[DEFAULT_MOUNT_OPTIONS])

    # If selinux is enabled, set correct selinux labels on the brick.
    if selinux.is_selinux_enabled():
        rc, out, err = commands.execCmd([
            _semanageCommandPath.cmd, 'fcontext', '-a', '-t',
            'glusterd_brick_t', mountPoint
        ])
        if rc:
            raise ge.GlusterHostFailedToSetSelinuxContext(
                mountPoint, rc, out, err)
        try:
            selinux.restorecon(mountPoint, recursive=True)
        except OSError as e:
            errMsg = "[Errno %s] %s: '%s'" % (e.errno, e.strerror, e.filename)
            raise ge.GlusterHostFailedToRunRestorecon(mountPoint, err=errMsg)
    return _getDeviceDict(thinlv)
예제 #21
0
파일: storagedev.py 프로젝트: igoihman/vdsm
def storageDevicesList():
    blivetEnv = blivet.Blivet()
    _reset_blivet(blivetEnv)
    return _parseDevices(blivetEnv.devices)
예제 #22
0
    def test_weak_dependencies(self):
        self.bvt = blivet.Blivet()  # pylint: disable=attribute-defined-outside-init
        availability.CACHE_AVAILABILITY = False

        # reinitialize blockdev without the plugins
        # TODO: uncomment (workaround (1/2) for blivet.reset fail)
        # self.unload_all_plugins()
        disk1 = DiskFile(self.disk1_file)

        self.bvt.exclusive_disks = [disk1.name]
        if os.geteuid() == 0:
            try:
                self.bvt.reset()
            except blockdev.BlockDevNotImplementedError:  # pylint: disable=catching-non-exception
                self.fail("Improper handling of missing libblockdev plugin")
        # TODO: remove line (workaround (2/2) for blivet.reset fail)
        self.unload_all_plugins()

        self.bvt.devicetree._add_device(disk1)
        self.bvt.initialize_disk(disk1)

        pv = self.bvt.new_partition(size=Size("8GiB"), fmt_type="lvmpv")
        pv_fail = self.bvt.new_partition(size=Size("8GiB"), fmt_type="lvmpv")
        btrfs = self.bvt.new_partition(size=Size("1GiB"), fmt_type="btrfs")
        raid1 = self.bvt.new_partition(size=Size("1GiB"), fmt_type="mdmember")
        raid2 = self.bvt.new_partition(size=Size("1GiB"), fmt_type="mdmember")

        with six.assertRaisesRegex(
                self, ValueError,
                "resource to create this format.*unavailable"):
            self.bvt.create_device(pv_fail)

        # to be able to test functions like destroy_device it is necessary to have some
        # testing devices actually created - hence loading libblockdev plugins...
        self.load_all_plugins()
        self.bvt.create_device(pv)
        self.bvt.create_device(btrfs)
        # ... and unloading again when tests can continue
        self.unload_all_plugins()

        try:
            vg = self.bvt.new_vg(parents=[pv])
        except blockdev.BlockDevNotImplementedError:  # pylint: disable=catching-non-exception
            self.fail("Improper handling of missing libblockdev plugin")

        self.load_all_plugins()
        self.bvt.create_device(vg)
        self.unload_all_plugins()

        try:
            lv1 = self.bvt.new_lv(fmt_type="ext4",
                                  size=Size("1GiB"),
                                  parents=[vg])
            lv2 = self.bvt.new_lv(fmt_type="ext4",
                                  size=Size("1GiB"),
                                  parents=[vg])
            lv3 = self.bvt.new_lv(fmt_type="luks",
                                  size=Size("1GiB"),
                                  parents=[vg])
        except blockdev.BlockDevNotImplementedError:  # pylint: disable=catching-non-exception
            self.fail("Improper handling of missing libblockdev plugin")

        self.load_all_plugins()
        self.bvt.create_device(lv1)
        self.bvt.create_device(lv2)
        self.bvt.create_device(lv3)
        self.unload_all_plugins()

        try:
            pool = self.bvt.new_lv_from_lvs(vg,
                                            name='pool',
                                            seg_type="thin-pool",
                                            from_lvs=[lv1, lv2])
        except blockdev.BlockDevNotImplementedError:  # pylint: disable=catching-non-exception
            self.fail("Improper handling of missing libblockdev plugin")

        self.load_all_plugins()
        self.bvt.create_device(pool)
        self.unload_all_plugins()

        with six.assertRaisesRegex(self, DependencyError,
                                   "requires unavailable_dependencies"):
            self.bvt.destroy_device(pool)

        try:
            self.bvt.new_btrfs(parents=[btrfs])
        except blockdev.BlockDevNotImplementedError:  # pylint: disable=catching-non-exception
            self.fail("Improper handling of missing libblockdev plugin")

        with six.assertRaisesRegex(self, ValueError,
                                   "device cannot be resized"):
            self.bvt.resize_device(lv3, Size("2GiB"))

        try:
            self.bvt.new_tmp_fs(fmt=disk1.format, size=Size("500MiB"))
        except blockdev.BlockDevNotImplementedError:  # pylint: disable=catching-non-exception
            self.fail("Improper handling of missing libblockdev plugin")

        try:
            self.bvt.new_mdarray(level='raid0', parents=[raid1, raid2])
        except blockdev.BlockDevNotImplementedError:  # pylint: disable=catching-non-exception
            self.fail("Improper handling of missing libblockdev plugin")
예제 #23
0
 def __init__(self):
     self._blivet = blivet.Blivet()
     self._blivet.reset()
예제 #24
0
    def testShouldClear(self):
        """ Test the Blivet.shouldClear method. """
        b = blivet.Blivet()

        DiskDevice = blivet.devices.DiskDevice
        PartitionDevice = blivet.devices.PartitionDevice

        # sda is a disk with an existing disklabel containing two partitions
        sda = DiskDevice("sda", size=100000, exists=True)
        sda.format = blivet.formats.getFormat("disklabel",
                                              device=sda.path,
                                              exists=True)
        sda.format._partedDisk = mock.Mock()
        sda.format._partedDevice = mock.Mock()
        b.devicetree._addDevice(sda)

        # sda1 is a partition containing an existing ext4 filesystem
        sda1 = PartitionDevice("sda1", size=500, exists=True, parents=[sda])
        sda1._partedPartition = mock.Mock(**{
            'type': PARTITION_NORMAL,
            'getFlag.return_value': 0
        })
        sda1.format = blivet.formats.getFormat("ext4",
                                               mountpoint="/boot",
                                               device=sda1.path,
                                               exists=True)
        b.devicetree._addDevice(sda1)

        # sda2 is a partition containing an existing vfat filesystem
        sda2 = PartitionDevice("sda2", size=10000, exists=True, parents=[sda])
        sda2._partedPartition = mock.Mock(**{
            'type': PARTITION_NORMAL,
            'getFlag.return_value': 0
        })
        sda2.format = blivet.formats.getFormat("vfat",
                                               mountpoint="/foo",
                                               device=sda2.path,
                                               exists=True)
        b.devicetree._addDevice(sda2)

        # sdb is an unpartitioned disk containing an xfs filesystem
        sdb = DiskDevice("sdb", size=100000, exists=True)
        sdb.format = blivet.formats.getFormat("xfs",
                                              device=sdb.path,
                                              exists=True)
        b.devicetree._addDevice(sdb)

        # sdc is an unformatted/uninitialized/empty disk
        sdc = DiskDevice("sdc", size=100000, exists=True)
        b.devicetree._addDevice(sdc)

        # sdd is a disk containing an existing disklabel with no partitions
        sdd = DiskDevice("sdd", size=100000, exists=True)
        sdd.format = blivet.formats.getFormat("disklabel",
                                              device=sdd.path,
                                              exists=True)
        b.devicetree._addDevice(sdd)

        #
        # clearpart type none
        #
        b.config.clearPartType = CLEARPART_TYPE_NONE
        self.assertFalse(b.shouldClear(sda1),
                         msg="type none should not clear any partitions")
        self.assertFalse(b.shouldClear(sda2),
                         msg="type none should not clear any partitions")

        b.config.initializeDisks = False
        self.assertFalse(b.shouldClear(sda),
                         msg="type none should not clear non-empty disks")
        self.assertFalse(b.shouldClear(sdb),
                         msg="type none should not clear formatting from "
                         "unpartitioned disks")

        self.assertFalse(b.shouldClear(sdc),
                         msg="type none should not clear empty disk without "
                         "initlabel")
        self.assertFalse(
            b.shouldClear(sdd),
            msg="type none should not clear empty partition table "
            "without initlabel")

        b.config.initializeDisks = True
        self.assertFalse(b.shouldClear(sda),
                         msg="type none should not clear non-empty disks even "
                         "with initlabel")
        self.assertFalse(b.shouldClear(sdb),
                         msg="type non should not clear formatting from "
                         "unpartitioned disks even with initlabel")
        self.assertTrue(
            b.shouldClear(sdc),
            msg="type none should clear empty disks when initlabel "
            "is set")
        self.assertTrue(
            b.shouldClear(sdd),
            msg="type none should clear empty partition table when "
            "initlabel is set")

        #
        # clearpart type linux
        #
        b.config.clearPartType = CLEARPART_TYPE_LINUX
        self.assertTrue(b.shouldClear(sda1),
                        msg="type linux should clear partitions containing "
                        "ext4 filesystems")
        self.assertFalse(b.shouldClear(sda2),
                         msg="type linux should not clear partitions "
                         "containing vfat filesystems")

        b.config.initializeDisks = False
        self.assertFalse(
            b.shouldClear(sda),
            msg="type linux should not clear non-empty disklabels")
        self.assertTrue(b.shouldClear(sdb),
                        msg="type linux should clear linux-native whole-disk "
                        "formatting regardless of initlabel setting")
        self.assertFalse(b.shouldClear(sdc),
                         msg="type linux should not clear unformatted disks "
                         "unless initlabel is set")
        self.assertFalse(b.shouldClear(sdd),
                         msg="type linux should not clear disks with empty "
                         "partition tables unless initlabel is set")

        b.config.initializeDisks = True
        self.assertFalse(
            b.shouldClear(sda),
            msg="type linux should not clear non-empty disklabels")
        self.assertTrue(b.shouldClear(sdb),
                        msg="type linux should clear linux-native whole-disk "
                        "formatting regardless of initlabel setting")
        self.assertTrue(b.shouldClear(sdc),
                        msg="type linux should clear unformatted disks when "
                        "initlabel is set")
        self.assertTrue(b.shouldClear(sdd),
                        msg="type linux should clear disks with empty "
                        "partition tables when initlabel is set")

        sda1.protected = True
        self.assertFalse(b.shouldClear(sda1),
                         msg="protected devices should never be cleared")
        self.assertFalse(b.shouldClear(sda),
                         msg="disks containing protected devices should never "
                         "be cleared")
        sda1.protected = False

        #
        # clearpart type all
        #
        b.config.clearPartType = CLEARPART_TYPE_ALL
        self.assertTrue(b.shouldClear(sda1),
                        msg="type all should clear all partitions")
        self.assertTrue(b.shouldClear(sda2),
                        msg="type all should clear all partitions")

        b.config.initializeDisks = False
        self.assertTrue(b.shouldClear(sda),
                        msg="type all should initialize all disks")
        self.assertTrue(b.shouldClear(sdb),
                        msg="type all should initialize all disks")
        self.assertTrue(b.shouldClear(sdc),
                        msg="type all should initialize all disks")
        self.assertTrue(b.shouldClear(sdd),
                        msg="type all should initialize all disks")

        b.config.initializeDisks = True
        self.assertTrue(b.shouldClear(sda),
                        msg="type all should initialize all disks")
        self.assertTrue(b.shouldClear(sdb),
                        msg="type all should initialize all disks")
        self.assertTrue(b.shouldClear(sdc),
                        msg="type all should initialize all disks")
        self.assertTrue(b.shouldClear(sdd),
                        msg="type all should initialize all disks")

        sda1.protected = True
        self.assertFalse(b.shouldClear(sda1),
                         msg="protected devices should never be cleared")
        self.assertFalse(b.shouldClear(sda),
                         msg="disks containing protected devices should never "
                         "be cleared")
        sda1.protected = False
예제 #25
0
파일: rescue.py 프로젝트: numbnet/anaconda
def doRescue(intf, rescue_mount, ksdata):
    import blivet

    # XXX: hook the exception handler wrapper that turns off snack first
    orig_hook = sys.excepthook
    sys.excepthook = lambda ty, val, tb: _exception_handler_wrapper(
        orig_hook, intf.screen, ty, val, tb)

    for f in [
            "services", "protocols", "group", "joe", "man.config",
            "nsswitch.conf", "selinux", "mke2fs.conf"
    ]:
        try:
            os.symlink('/mnt/runtime/etc/' + f, '/etc/' + f)
        except OSError:
            pass

    # Early shell access with no disk access attempts
    if not rescue_mount:
        # the %post should be responsible for mounting all needed file systems
        # NOTE: 1st script must be bash or simple python as nothing else might be available in the rescue image
        if flags.automatedInstall and ksdata.scripts:
            runPostScripts(ksdata.scripts)
        else:
            runShell()

        sys.exit(0)

    if flags.automatedInstall:
        readOnly = ksdata.rescue.romount
    else:
        # prompt to see if we should try and find root filesystem and mount
        # everything in /etc/fstab on that root
        while True:
            rc = ButtonChoiceWindow(
                intf.screen, _("Rescue"),
                _("The rescue environment will now attempt to find your "
                  "Linux installation and mount it under the directory "
                  "%s.  You can then make any changes required to your "
                  "system.  If you want to proceed with this step choose "
                  "'Continue'.  You can also choose to mount your file systems "
                  "read-only instead of read-write by choosing 'Read-Only'.  "
                  "\n\n"
                  "If for some reason this process fails you can choose 'Skip' "
                  "and this step will be skipped and you will go directly to a "
                  "command shell.\n\n") % (ROOT_PATH, ),
                [_("Continue"), _("Read-Only"),
                 _("Skip")])

            if rc == _("Skip").lower():
                runShell(intf.screen)
                sys.exit(0)
            else:
                readOnly = rc == _("Read-Only").lower()

            break

    sto = blivet.Blivet(ksdata=ksdata)
    blivet.storageInitialize(sto, ksdata, [])
    _unlock_devices(intf, sto)
    roots = blivet.findExistingInstallations(sto.devicetree)

    if not roots:
        root = None
    elif len(roots) == 1:
        root = roots[0]
    else:
        height = min(len(roots), 12)
        if height == 12:
            scroll = 1
        else:
            scroll = 0

        lst = []
        for root in roots:
            lst.append("%s" % root.name)

        (button, choice) = \
            ListboxChoiceWindow(intf.screen, _("System to Rescue"),
                                _("Which device holds the root partition "
                                  "of your installation?"), lst,
                                [ _("OK"), _("Exit") ], width = 30,
                                scroll = scroll, height = height,
                                help = "multipleroot")

        if button == _("Exit").lower():
            root = None
        else:
            root = roots[choice]

    rootmounted = False

    if root:
        try:
            # TODO: add a callback to warn about dirty filesystems
            mountExistingSystem(sto.fsset,
                                root.device,
                                allowDirty=True,
                                readOnly=readOnly)

            if not flags.imageInstall:
                msg = _("The system will reboot automatically when you exit "
                        "from the shell.")
            else:
                msg = _("Run %s to unmount the system "
                        "when you are finished.") % ANACONDA_CLEANUP

            if rc == -1:
                if flags.automatedInstall:
                    log.error(
                        "System had dirty file systems which you chose not to mount"
                    )
                else:
                    ButtonChoiceWindow(
                        intf.screen,
                        _("Rescue"),
                        _("Your system had dirty file systems which you chose not "
                          "to mount.  Press return to get a shell from which "
                          "you can fsck and mount your partitions. %s") % msg,
                        [_("OK")],
                        width=50)
                rootmounted = False
            else:
                if flags.automatedInstall:
                    log.info("System has been mounted under: %s", ROOT_PATH)
                else:
                    ButtonChoiceWindow(
                        intf.screen, _("Rescue"),
                        _("Your system has been mounted under %(rootPath)s.\n\n"
                          "Press <return> to get a shell. If you would like to "
                          "make your system the root environment, run the command:\n\n"
                          "\tchroot %(rootPath)s\n\n%(msg)s") % {
                              'rootPath': ROOT_PATH,
                              'msg': msg
                          }, [_("OK")])
                rootmounted = True

                # now turn on swap
                if not readOnly:
                    try:
                        sto.turnOnSwap()
                    except StorageError:
                        log.error("Error enabling swap")

                # and selinux too
                if flags.selinux:
                    # we have to catch the possible exception
                    # because we support read-only mounting
                    try:
                        fd = open("%s/.autorelabel" % ROOT_PATH, "w+")
                        fd.close()
                    except IOError:
                        log.warning("cannot touch /.autorelabel")

                # set a library path to use mounted fs
                libdirs = os.environ.get("LD_LIBRARY_PATH", "").split(":")
                mounted = map(lambda dir: "/mnt/sysimage%s" % dir, libdirs)
                os.environ["LD_LIBRARY_PATH"] = ":".join(libdirs + mounted)

                # find groff data dir
                gversion = None
                try:
                    glst = os.listdir("/mnt/sysimage/usr/share/groff")
                except OSError:
                    pass
                else:
                    # find a directory which is a numeral, its where
                    # data files are
                    for gdir in glst:
                        if re.match(r'\d[.\d]+\d$', gdir):
                            gversion = gdir
                            break

                if gversion is not None:
                    gpath = "/mnt/sysimage/usr/share/groff/" + gversion
                    os.environ["GROFF_FONT_PATH"] = gpath + '/font'
                    os.environ[
                        "GROFF_TMAC_PATH"] = "%s:/mnt/sysimage/usr/share/groff/site-tmac" % (
                            gpath + '/tmac', )

                # do we have bash?
                try:
                    if os.access("/usr/bin/bash", os.R_OK):
                        os.symlink("/usr/bin/bash", "/bin/bash")
                except OSError:
                    pass
        except (ValueError, LookupError, SyntaxError, NameError):
            raise
        except Exception as e:
            log.error("doRescue caught exception: %s", e)
            if flags.automatedInstall:
                log.error(
                    "An error occurred trying to mount some or all of your system"
                )
            else:
                if not flags.imageInstall:
                    msg = _("The system will reboot automatically when you "
                            "exit from the shell.")
                else:
                    msg = _("Run %s to unmount the system "
                            "when you are finished.") % ANACONDA_CLEANUP

                ButtonChoiceWindow(
                    intf.screen, _("Rescue"),
                    _("An error occurred trying to mount some or all of your "
                      "system. Some of it may be mounted under %s.\n\n"
                      "Press <return> to get a shell.") % ROOT_PATH + msg,
                    [_("OK")])
    else:
        if flags.automatedInstall and ksdata.reboot.action in [
                KS_REBOOT, KS_SHUTDOWN
        ]:
            log.info("No Linux partitions found")
            intf.screen.finish()
            print(_("You don't have any Linux partitions.  Rebooting.\n"))
            sys.exit(0)
        else:
            if not flags.imageInstall:
                msg = _(" The system will reboot automatically when you exit "
                        "from the shell.")
            else:
                msg = ""
            ButtonChoiceWindow(intf.screen,
                               _("Rescue Mode"),
                               _("You don't have any Linux partitions. Press "
                                 "return to get a shell.%s") % msg, [_("OK")],
                               width=50)

    msgStr = ""

    if rootmounted and not readOnly:
        sto.makeMtab()
        try:
            makeResolvConf(ROOT_PATH)
        except (OSError, IOError) as e:
            log.error("error making a resolv.conf: %s", e)
        msgStr = _("Your system is mounted under the %s directory.") % (
            ROOT_PATH, )
        ButtonChoiceWindow(intf.screen, _("Rescue"), msgStr, [_("OK")])

    # we do not need ncurses anymore, shut them down
    intf.shutdown()

    #create /etc/fstab in ramdisk, so it is easier to work with RO mounted filesystems
    makeFStab()

    # run %post if we've mounted everything
    if rootmounted and not readOnly and flags.automatedInstall:
        runPostScripts(ksdata.scripts)

    # start shell if reboot wasn't requested
    if not flags.automatedInstall or not ksdata.reboot.action in [
            KS_REBOOT, KS_SHUTDOWN
    ]:
        runShell(msg=msgStr)

    sys.exit(0)
예제 #26
0
def createBrick(brickName,
                mountPoint,
                devNameList,
                fsType=DEFAULT_FS_TYPE,
                raidParams={}):
    def _getDeviceList(devNameList):
        return [
            blivetEnv.devicetree.getDeviceByName(devName.split("/")[-1])
            for devName in devNameList
        ]

    def _makePartition(deviceList):
        pvDeviceList = []
        doPartitioning = False
        for dev in deviceList:
            if dev.type not in ['disk', 'dm-multipath']:
                pvDeviceList.append(dev)
            else:
                blivetEnv.initializeDisk(dev)
                part = blivetEnv.newPartition(fmt_type="lvmpv",
                                              grow=True,
                                              parents=[dev])
                blivetEnv.createDevice(part)
                pvDeviceList.append(part)
                doPartitioning = True

        if doPartitioning:
            blivet.partitioning.doPartitioning(blivetEnv)
        return pvDeviceList

    def _createPV(deviceList, alignment=0):
        def _createAlignedPV(deviceList, alignment):
            for dev in deviceList:
                rc, out, err = lvm._createpv([dev.path],
                                             metadataSize=0,
                                             options=('--dataalignment',
                                                      '%sK' % alignment))
                if rc:
                    raise ge.GlusterHostStorageDevicePVCreateFailedException(
                        dev.path, alignment, rc, out, err)

            blivetEnv.reset()
            return _getDeviceList([dev.name for dev in deviceList])

        if alignment:
            blivetEnv.doIt()
            return _createAlignedPV(deviceList, alignment)

        for dev in deviceList:
            lvmpv = blivet.formats.getFormat("lvmpv", device=dev.path)
            blivetEnv.formatDevice(dev, lvmpv)

        blivet.partitioning.doPartitioning(blivetEnv)
        return deviceList

    def _createVG(vgName, deviceList, stripeSize=0):
        if stripeSize:
            vg = LVMVolumeGroupDevice(vgName,
                                      peSize=blivet.size.Size('%s KiB' %
                                                              stripeSize),
                                      parents=deviceList)
        else:
            vg = LVMVolumeGroupDevice(vgName, parents=deviceList)

        blivetEnv.createDevice(vg)
        return vg

    def _createThinPool(poolName,
                        vg,
                        alignment=0,
                        poolMetaDataSize=0,
                        poolDataSize=0):
        if not alignment:
            # bz#1180228: blivet doesn't handle percentage-based sizes properly
            # Workaround: Till the bz gets fixed, we take only 99% size from vg
            pool = LVMThinPoolDevice(poolName,
                                     parents=[vg],
                                     size=(vg.size * 99 / 100),
                                     grow=True)
            blivetEnv.createDevice(pool)
            return pool
        else:
            metaName = "meta-%s" % poolName
            vgPoolName = "%s/%s" % (vg.name, poolName)
            metaLv = LVMLogicalVolumeDevice(metaName,
                                            parents=[vg],
                                            size=blivet.size.Size(
                                                '%d KiB' % poolMetaDataSize))
            poolLv = LVMLogicalVolumeDevice(poolName,
                                            parents=[vg],
                                            size=blivet.size.Size(
                                                '%d KiB' % poolDataSize))
            blivetEnv.createDevice(metaLv)
            blivetEnv.createDevice(poolLv)
            blivetEnv.doIt()

            # bz#1100514: LVM2 currently only supports physical extent sizes
            # that are a power of 2. Till that support is available we need
            # to use lvconvert to achive that.
            # bz#1179826: blivet doesn't support lvconvert functionality.
            # Workaround: Till the bz gets fixed, lvconvert command is used
            rc, out, err = utils.execCmd([
                _lvconvertCommandPath.cmd, '--chunksize',
                '%sK' % alignment, '--thinpool', vgPoolName, '--poolmetadata',
                "%s/%s" % (vg.name, metaName), '--poolmetadataspar', 'n', '-y'
            ])

            if rc:
                raise ge.GlusterHostStorageDeviceLVConvertFailedException(
                    vg.path, alignment, rc, out, err)

            rc, out, err = utils.execCmd(
                [_lvchangeCommandPath.cmd, '--zero', 'n', vgPoolName])
            if rc:
                raise ge.GlusterHostStorageDeviceLVChangeFailedException(
                    vgPoolName, rc, out, err)

            blivetEnv.reset()
            return blivetEnv.devicetree.getDeviceByName(poolLv.name)

    vgName = "vg-" + brickName
    poolName = "pool-" + brickName
    alignment = 0
    chunkSize = 0
    poolDataSize = 0
    count = 0
    metaDataSize = DEFAULT_METADATA_SIZE_KB
    if raidParams.get('type') == '6':
        count = raidParams['pdCount'] - 2
        alignment = raidParams['stripeSize'] * count
        chunkSize = alignment
    elif raidParams.get('type') == '10':
        count = raidParams['pdCount'] / 2
        alignment = raidParams['stripeSize'] * count
        chunkSize = DEFAULT_CHUNK_SIZE_KB

    blivetEnv = blivet.Blivet()
    blivetEnv.reset()

    deviceList = _getDeviceList(devNameList)

    notFoundList = set(devNameList).difference(
        set([dev.name for dev in deviceList]))
    if notFoundList:
        raise ge.GlusterHostStorageDeviceNotFoundException(notFoundList)

    inUseList = set(devNameList).difference(
        set([not _canCreateBrick(dev) or dev.name for dev in deviceList]))
    if inUseList:
        raise ge.GlusterHostStorageDeviceInUseException(inUseList)

    pvDeviceList = _makePartition(deviceList)
    pvDeviceList = _createPV(pvDeviceList, alignment)
    vg = _createVG(vgName, pvDeviceList, raidParams.get('stripeSize', 0))

    # The following calculation is based on the redhat storage performance doc
    # http://docbuilder.usersys.redhat.com/22522
    # /#chap-Configuring_Red_Hat_Storage_for_Enhancing_Performance

    if alignment:
        vgSizeKib = int(vg.size.convertTo(spec="KiB"))
        if vg.size.convertTo(spec='MiB') < MIN_VG_SIZE:
            metaDataSize = vgSizeKib * MIN_METADATA_PERCENT
        poolDataSize = vgSizeKib - metaDataSize
        metaDataSize = (metaDataSize - (metaDataSize % alignment))
        poolDataSize = (poolDataSize - (poolDataSize % alignment))

    pool = _createThinPool(poolName, vg, chunkSize, metaDataSize, poolDataSize)
    thinlv = LVMThinLogicalVolumeDevice(brickName,
                                        parents=[pool],
                                        size=pool.size,
                                        grow=True)
    blivetEnv.createDevice(thinlv)
    blivetEnv.doIt()

    if fsType != DEFAULT_FS_TYPE:
        log.error("fstype %s is currently unsupported" % fsType)
        raise ge.GlusterHostStorageDeviceMkfsFailedException(
            thinlv.path, alignment, raidParams.get('stripeSize', 0), fsType)

    format = blivet.formats.getFormat(DEFAULT_FS_TYPE, device=thinlv.path)
    if alignment:
        format._defaultFormatOptions = [
            "-f", "-K", "-i", "size=512", "-d",
            "sw=%s,su=%sk" % (count, raidParams.get('stripeSize')), "-n",
            "size=8192"
        ]
    blivetEnv.formatDevice(thinlv, format)
    blivetEnv.doIt()

    try:
        os.makedirs(mountPoint)
    except OSError as e:
        if errno.EEXIST != e.errno:
            errMsg = "[Errno %s] %s: '%s'" % (e.errno, e.strerror, e.filename)
            raise ge.GlusterHostStorageDeviceMakeDirsFailedException(
                err=[errMsg])
    thinlv.format.setup(mountpoint=mountPoint)
    blivetEnv.doIt()
    fstab.FsTab().add(thinlv.path, mountPoint, DEFAULT_FS_TYPE)
    return _getDeviceDict(thinlv)
예제 #27
0
    import blivet
    from pykickstart.version import makeVersion
    from pyanaconda.packaging.yumpayload import YumPayload

    # set some things specially since we're just testing
    flags.testing = True

    # set up ksdata
    ksdata = makeVersion()

    #ksdata.method.method = "url"
    #ksdata.method.url = "http://husky/install/f17/os/"
    #ksdata.method.url = "http://dl.fedoraproject.org/pub/fedora/linux/development/17/x86_64/os/"

    # set up storage
    storage = blivet.Blivet(ksdata=ksdata)
    storage.reset()

    # set up the payload
    payload = YumPayload(ksdata)
    payload.setup(storage)

    for repo in payload._yum.repos.repos.values():
        print repo.name, repo.enabled

    ksdata.method.method = "url"
    #ksdata.method.url = "http://husky/install/f17/os/"
    ksdata.method.url = "http://dl.fedoraproject.org/pub/fedora/linux/development/17/x86_64/os/"

    # now switch the base repo to what we set ksdata.method to just above
    payload.updateBaseRepo()
def sync_volumes(volumes, index, vol_options):
    # instantiating blivet class, this will be used for
    # getting brick_device_details
    b = blivet.Blivet()

    # reset blivet during every sync to get latest information
    # about storage devices in the machine
    b.reset()
    devicetree = b.devicetree

    SYNC_TTL = int(NS.config.data.get("sync_interval", 10)) + len(volumes) * 10

    node_context = NS.node_context.load()
    tag_list = node_context.tags
    # Raise alerts for volume state change.
    cluster_provisioner = "provisioner/%s" % NS.tendrl_context.integration_id
    if cluster_provisioner in tag_list:
        try:
            stored_volume_status = NS._int.client.read(
                "clusters/%s/Volumes/%s/status" %
                (NS.tendrl_context.integration_id,
                 volumes['volume%s.id' % index])).value
            current_status = volumes['volume%s.status' % index]
            if stored_volume_status != "" and \
                current_status != stored_volume_status:
                msg = ("Status of volume: %s in cluster %s "
                       "changed from %s to %s") % (
                           volumes['volume%s.name' % index],
                           NS.tendrl_context.integration_id,
                           stored_volume_status, current_status)
                instance = "volume_%s" % volumes['volume%s.name' % index]
                event_utils.emit_event(
                    "volume_status",
                    current_status,
                    msg,
                    instance,
                    'WARNING' if current_status == 'Stopped' else 'INFO',
                    tags={
                        "entity_type": RESOURCE_TYPE_VOLUME,
                        "volume_name": volumes['volume%s.name' % index]
                    })
        except (KeyError, etcd.EtcdKeyNotFound) as ex:
            if isinstance(ex, KeyError):
                raise ex
            pass

        volume = NS.gluster.objects.Volume(
            vol_id=volumes['volume%s.id' % index],
            vol_type="arbiter"
            if int(volumes['volume%s.arbiter_count' % index]) > 0 else
            volumes['volume%s.type' % index],
            name=volumes['volume%s.name' % index],
            transport_type=volumes['volume%s.transport_type' % index],
            status=volumes['volume%s.status' % index],
            brick_count=volumes['volume%s.brickcount' % index],
            snap_count=volumes['volume%s.snap_count' % index],
            stripe_count=volumes['volume%s.stripe_count' % index],
            replica_count=volumes['volume%s.replica_count' % index],
            subvol_count=volumes['volume%s.subvol_count' % index],
            arbiter_count=volumes['volume%s.arbiter_count' % index],
            disperse_count=volumes['volume%s.disperse_count' % index],
            redundancy_count=volumes['volume%s.redundancy_count' % index],
            quorum_status=volumes['volume%s.quorum_status' % index],
            snapd_status=volumes['volume%s.snapd_svc.online_status' % index],
            snapd_inited=volumes['volume%s.snapd_svc.inited' % index],
        )
        volume.save(ttl=SYNC_TTL)

        # Initialize volume alert count
        try:
            volume_alert_count_key = '/clusters/%s/Volumes/%s/'\
                                     'alert_counters' % (
                                         NS.tendrl_context.integration_id,
                                         volumes['volume%s.id' % index]
                                     )
            etcd_utils.read(volume_alert_count_key)
        except (etcd.EtcdException) as ex:
            if type(ex) == etcd.EtcdKeyNotFound:
                NS.gluster.objects.VolumeAlertCounters(
                    integration_id=NS.tendrl_context.integration_id,
                    volume_id=volumes['volume%s.id' % index]).save()
        # Save the default values of volume options
        vol_opt_dict = {}
        for opt_count in \
            range(1, int(vol_options['volume%s.options.count' % index])):
            vol_opt_dict[vol_options['volume%s.options.key%s' %
                                     (index, opt_count)]] = vol_options[
                                         'volume%s.options.value%s' %
                                         (index, opt_count)]
        NS.gluster.objects.VolumeOptions(
            vol_id=volume.vol_id, options=vol_opt_dict).save(ttl=SYNC_TTL)

    rebal_det = NS.gluster.objects.RebalanceDetails(
        vol_id=volumes['volume%s.id' % index],
        rebal_id=volumes['volume%s.rebalance.id' % index],
        rebal_status=volumes['volume%s.rebalance.status' % index],
        rebal_failures=volumes['volume%s.rebalance.failures' % index],
        rebal_skipped=volumes['volume%s.rebalance.skipped' % index],
        rebal_lookedup=volumes['volume%s.rebalance.lookedup' % index],
        rebal_files=volumes['volume%s.rebalance.files' % index],
        rebal_data=volumes['volume%s.rebalance.data' % index],
        time_left=volumes.get('volume%s.rebalance.time_left' % index),
    )
    rebal_det.save(ttl=SYNC_TTL)
    georep_details.save_georep_details(volumes, index)

    b_index = 1
    # ipv4 address of current node
    try:
        network_ip = []
        networks = NS._int.client.read("nodes/%s/Networks" %
                                       NS.node_context.node_id)
        for interface in networks.leaves:
            key = interface.key.split("/")[-1]
            network = NS.tendrl.objects.NodeNetwork(interface=key).load()
            network_ip.extend(network.ipv4)
    except etcd.EtcdKeyNotFound as ex:
        Event(
            ExceptionMessage(priority="debug",
                             publisher=NS.publisher_id,
                             payload={
                                 "message":
                                 "Could not find "
                                 "any ipv4 networks for node"
                                 " %s" % NS.node_context.node_id,
                                 "exception":
                                 ex
                             }))
    while True:
        try:
            # Update brick node wise
            hostname = volumes['volume%s.brick%s.hostname' % (index, b_index)]
            if (NS.node_context.fqdn != hostname) and (hostname
                                                       not in network_ip):
                b_index += 1
                continue
            sub_vol_size = (int(volumes['volume%s.brickcount' % index])) / int(
                volumes['volume%s.subvol_count' % index])
            brick_name = NS.node_context.fqdn
            brick_name += ":"
            brick_name += volumes['volume%s.brick%s'
                                  '.path' %
                                  (index, b_index)].split(":")[-1].replace(
                                      "/", "_")

            # Raise alerts if the brick path changes
            try:
                sbs = NS._int.client.read(
                    "clusters/%s/Bricks/all/"
                    "%s/%s/status" %
                    (NS.tendrl_context.integration_id, NS.node_context.fqdn,
                     brick_name.split(":_")[-1])).value
                current_status = volumes.get('volume%s.brick%s.status' %
                                             (index, b_index))
                if current_status != sbs:
                    msg = ("Status of brick: %s "
                           "under volume %s in cluster %s chan"
                           "ged from %s to %s") % (
                               volumes['volume%s.brick%s'
                                       '.path' % (index, b_index)],
                               volumes['volume%s.'
                                       'name' % index],
                               NS.tendrl_context.integration_id, sbs,
                               current_status)
                    instance = "volume_%s|brick_%s" % (
                        volumes['volume%s.name' % index],
                        volumes['volume%s.brick%s.path' % (index, b_index)])
                    event_utils.emit_event(
                        "brick_status",
                        current_status,
                        msg,
                        instance,
                        'WARNING' if current_status == 'Stopped' else 'INFO',
                        tags={
                            "entity_type": RESOURCE_TYPE_BRICK,
                            "volume_name": volumes['volume%s.'
                                                   'name' % index]
                        })

            except etcd.EtcdKeyNotFound:
                pass

            brk_pth = "clusters/%s/Volumes/%s/Bricks/subvolume%s/%s"

            vol_brick_path = brk_pth % (NS.tendrl_context.integration_id,
                                        volumes['volume%s.id' % index],
                                        str((b_index - 1) / sub_vol_size),
                                        brick_name)

            NS._int.wclient.write(vol_brick_path, "")

            brick = NS.gluster.objects.Brick(
                NS.node_context.fqdn,
                brick_name.split(":_")[-1],
                name=brick_name,
                vol_id=volumes['volume%s.id' % index],
                sequence_number=b_index,
                brick_path=volumes['volume%s.brick%s.path' % (index, b_index)],
                hostname=volumes.get('volume%s.brick%s.hostname' %
                                     (index, b_index)),
                port=volumes.get('volume%s.brick%s.port' % (index, b_index)),
                vol_name=volumes['volume%s.name' % index],
                used=True,
                node_id=NS.node_context.node_id,
                status=volumes.get('volume%s.brick%s.status' %
                                   (index, b_index)),
                filesystem_type=volumes.get(
                    'volume%s.brick%s.filesystem_type' % (index, b_index)),
                mount_opts=volumes.get('volume%s.brick%s.mount_options' %
                                       (index, b_index)),
                utilization=brick_utilization.brick_utilization(
                    volumes['volume%s.brick%s.path' % (index, b_index)]),
                client_count=volumes.get('volume%s.brick%s.client_count' %
                                         (index, b_index)),
                is_arbiter=volumes.get('volume%s.brick%s.is_arbiter' %
                                       (index, b_index)),
            )
            brick.save()
            # sync brick device details
            brick_device_details.\
                update_brick_device_details(
                    brick_name,
                    volumes[
                        'volume%s.brick%s.path' % (
                            index, b_index)
                    ],
                    devicetree
                )

            # Sync the brick client details
            c_index = 1
            if volumes.get('volume%s.brick%s.client_count' %
                           (index, b_index)) > 0:
                while True:
                    try:
                        NS.gluster.objects.ClientConnection(
                            brick_name=brick_name,
                            fqdn=NS.node_context.fqdn,
                            brick_dir=brick_name.split(":_")[-1],
                            hostname=volumes[
                                'volume%s.brick%s.client%s.hostname' %
                                (index, b_index, c_index)],
                            bytesread=volumes[
                                'volume%s.brick%s.client%s.bytesread' %
                                (index, b_index, c_index)],
                            byteswrite=volumes[
                                'volume%s.brick%s.client%s.byteswrite' %
                                (index, b_index, c_index)],
                            opversion=volumes[
                                'volume%s.brick%s.client%s.opversion' %
                                (index, b_index, c_index)]).save(ttl=SYNC_TTL)
                    except KeyError:
                        break
                    c_index += 1
            b_index += 1
        except KeyError:
            break
예제 #29
0
import os

import blivet
from blivet.size import Size
from blivet.util import set_up_logging, create_sparse_tempfile
from blivet.devices.lvm import LVMCacheRequest

set_up_logging()
b = blivet.Blivet()  # create an instance of Blivet (don't add system devices)

# create a disk image file on which to create new devices
disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
b.config.disk_images["disk1"] = disk1_file
disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
b.config.disk_images["disk2"] = disk2_file

b.reset()

try:
    disk1 = b.devicetree.get_device_by_name("disk1")
    disk2 = b.devicetree.get_device_by_name("disk2")

    b.initialize_disk(disk1)
    b.initialize_disk(disk2)

    pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
    b.create_device(pv)
    pv2 = b.new_partition(size=Size("50GiB"),
                          fmt_type="lvmpv",
                          parents=[disk2])
    b.create_device(pv2)
예제 #30
0
import blivet
from blivet.util import set_up_logging

set_up_logging()
b = blivet.Blivet()  # create an instance of Blivet
b.reset()  # detect system storage configuration

print(b.devicetree)