Ejemplo n.º 1
0
 def test_uses_multiple_devices(self, monkeypatch, fake_run):
     monkeypatch.setattr(api, 'get_vg', lambda **kw: True)
     api.extend_vg(self.foo_volume, ['/dev/sda', '/dev/sdb'])
     expected = [
         'vgextend', '--force', '--yes', 'foo', '/dev/sda', '/dev/sdb'
     ]
     assert fake_run.calls[0]['args'][0] == expected
Ejemplo n.º 2
0
    def execute(self):
        """
        Create vgs/lvs from the incoming set of devices, assign their roles
        (data, journal) and offload the OSD creation to ``lvm create``
        """
        blank_journal_dev_paths = [d.abspath for d in self.blank_journal_devs]
        data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])

        # no common vg is found, create one with all the blank SSDs
        if not self.common_vg:
            journal_vg = lvm.create_vg(blank_journal_dev_paths, name_prefix='ceph-journals')
        # a vg exists that can be extended
        elif self.common_vg and blank_journal_dev_paths:
            journal_vg = lvm.extend_vg(self.common_vg, blank_journal_dev_paths)
        # one common vg with nothing else to extend can be used directly
        else:
            journal_vg = self.common_vg

        journal_size = prepare.get_journal_size(lv_format=True)

        # create 1 vg per data device first, mapping them to the device path,
        # when the lv gets created later, it can create as many as needed (or
        # even just 1)
        for osd in self.computed['osds']:
            vg = data_vgs.get(osd['data']['path'])
            if not vg:
                vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data')
                data_vgs[osd['data']['path']] = vg

        for osd in self.computed['osds']:
            data_path = osd['data']['path']
            data_vg = data_vgs[data_path]
            data_lv_extents = data_vg.sizing(parts=1)['extents']
            data_lv = lvm.create_lv(
                'osd-data', data_vg.name, extents=data_lv_extents, uuid_name=True
            )
            journal_lv = lvm.create_lv(
                'osd-journal', journal_vg.name, size=journal_size, uuid_name=True
            )

            command = ['--filestore', '--data']
            command.append('%s/%s' % (data_vg.name, data_lv.name))
            command.extend(['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)])
            if self.args.dmcrypt:
                command.append('--dmcrypt')
            if self.args.no_systemd:
                command.append('--no-systemd')
            if self.args.crush_device_class:
                command.extend(['--crush-device-class', self.args.crush_device_class])
            if 'osd_id' in osd:
                command.extend(['--osd-id', osd['osd_id']])

            if self.args.prepare:
                Prepare(command).main()
            else:
                Create(command).main()
Ejemplo n.º 3
0
    def execute(self):
        """
        Create vgs/lvs from the incoming set of devices, assign their roles
        (data, journal) and offload the OSD creation to ``lvm create``
        """
        blank_ssd_paths = [d.abspath for d in self.blank_ssds]
        data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])

        # no common vg is found, create one with all the blank SSDs
        if not self.common_vg:
            journal_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-journals')
        # a vg exists that can be extended
        elif self.common_vg and blank_ssd_paths:
            journal_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths)
        # one common vg with nothing else to extend can be used directly
        else:
            journal_vg = self.common_vg

        journal_size = prepare.get_journal_size(lv_format=True)

        # create 1 vg per data device first, mapping them to the device path,
        # when the lv gets created later, it can create as many as needed (or
        # even just 1)
        for osd in self.computed['osds']:
            vg = data_vgs.get(osd['data']['path'])
            if not vg:
                vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data')
                data_vgs[osd['data']['path']] = vg

        for osd in self.computed['osds']:
            data_path = osd['data']['path']
            data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int()
            data_vg = data_vgs[data_path]
            data_lv_extents = data_vg.sizing(size=data_lv_size)['extents']
            data_lv = lvm.create_lv(
                'osd-data', data_vg.name, extents=data_lv_extents, uuid_name=True
            )
            journal_lv = lvm.create_lv(
                'osd-journal', journal_vg.name, size=journal_size, uuid_name=True
            )

            command = ['--filestore', '--data']
            command.append('%s/%s' % (data_vg.name, data_lv.name))
            command.extend(['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)])
            if self.args.dmcrypt:
                command.append('--dmcrypt')
            if self.args.no_systemd:
                command.append('--no-systemd')
            if self.args.crush_device_class:
                command.extend(['--crush-device-class', self.args.crush_device_class])

            Create(command).main()
Ejemplo n.º 4
0
    def execute(self):
        """
        Create vgs/lvs from the incoming set of devices, assign their roles
        (data, journal) and offload the OSD creation to ``lvm create``
        """
        ssd_paths = [d.abspath for d in self.blank_ssds]

        # no common vg is found, create one with all the blank SSDs
        if not self.common_vg:
            journal_vg = lvm.create_vg(ssd_paths, name_prefix='ceph-journals')
        # a vg exists that can be extended
        elif self.common_vg and ssd_paths:
            journal_vg = lvm.extend_vg(self.common_vg, ssd_paths)
        # one common vg with nothing else to extend can be used directly
        else:
            journal_vg = self.common_vg

        journal_size = prepare.get_journal_size(lv_format=True)

        for osd in self.computed['osds']:
            data_vg = lvm.create_vg(osd['data']['path'],
                                    name_prefix='ceph-data')
            # no extents or size means it will use 100%FREE
            data_lv = lvm.create_lv('osd-data', data_vg.name)
            journal_lv = lvm.create_lv('osd-journal',
                                       journal_vg.name,
                                       size=journal_size,
                                       uuid_name=True)

            command = ['--filestore', '--data']
            command.append('%s/%s' % (data_vg.name, data_lv.name))
            command.extend(
                ['--journal',
                 '%s/%s' % (journal_vg.name, journal_lv.name)])
            if self.args.dmcrypt:
                command.append('--dmcrypt')
            if self.args.no_systemd:
                command.append('--no-systemd')
            if self.args.crush_device_class:
                command.extend(
                    ['--crush-device-class', self.args.crush_device_class])

            Create(command).main()
Ejemplo n.º 5
0
    def execute(self):
        """
        Create vgs/lvs from the incoming set of devices, assign their roles
        (block, block.db, block.wal, etc..) and offload the OSD creation to
        ``lvm create``
        """
        blank_ssd_paths = [d.abspath for d in self.blank_ssds]
        data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])

        # no common vg is found, create one with all the blank SSDs
        if not self.common_vg:
            db_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-block-dbs')

        # if a common vg exists then extend it with any blank ssds
        elif self.common_vg and blank_ssd_paths:
            db_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths)

        # one common vg with nothing else to extend can be used directly,
        # either this is one device with one vg, or multiple devices with the
        # same vg
        else:
            db_vg = self.common_vg

        # since we are falling back to a block_db_size that might be "as large
        # as possible" we can't fully rely on LV format coming from the helper
        # function that looks up this value
        block_db_size = "%sG" % self.block_db_size.gb.as_int()

        # create 1 vg per data device first, mapping them to the device path,
        # when the lv gets created later, it can create as many as needed (or
        # even just 1)
        for osd in self.computed['osds']:
            vg = data_vgs.get(osd['data']['path'])
            if not vg:
                vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block')
                data_vgs[osd['data']['path']] = vg

        # create the data lvs, and create the OSD with an lv from the common
        # block.db vg from before
        for osd in self.computed['osds']:
            data_path = osd['data']['path']
            data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int()
            data_vg = data_vgs[data_path]
            data_lv_extents = data_vg.sizing(size=data_lv_size)['extents']
            data_lv = lvm.create_lv(
                'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True
            )
            db_lv = lvm.create_lv(
                'osd-block-db', db_vg.name, size=block_db_size, uuid_name=True
            )
            command = [
                '--bluestore',
                '--data', "%s/%s" % (data_lv.vg_name, data_lv.name),
                '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name)
            ]
            if self.args.dmcrypt:
                command.append('--dmcrypt')
            if self.args.no_systemd:
                command.append('--no-systemd')
            if self.args.crush_device_class:
                command.extend(['--crush-device-class', self.args.crush_device_class])

            if self.args.prepare:
                Prepare(command).main()
            else:
                Create(command).main()
Ejemplo n.º 6
0
    def execute(self):
        """
        Create vgs/lvs from the incoming set of devices, assign their roles
        (block, block.db, block.wal, etc..) and offload the OSD creation to
        ``lvm create``
        """
        blank_ssd_paths = [d.abspath for d in self.blank_ssds]
        data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])

        # no common vg is found, create one with all the blank SSDs
        if not self.common_vg:
            db_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-block-dbs')

        # if a common vg exists then extend it with any blank ssds
        elif self.common_vg and blank_ssd_paths:
            db_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths)

        # one common vg with nothing else to extend can be used directly,
        # either this is one device with one vg, or multiple devices with the
        # same vg
        else:
            db_vg = self.common_vg

        # create 1 vg per data device first, mapping them to the device path,
        # when the lv gets created later, it can create as many as needed (or
        # even just 1)
        for osd in self.computed['osds']:
            vg = data_vgs.get(osd['data']['path'])
            if not vg:
                vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block')
                data_vgs[osd['data']['path']] = vg

        if self.use_large_block_db:
            # make the block.db lvs as large as possible
            vg_free_count = str_to_int(db_vg.vg_free_count)
            db_lv_extents = int(vg_free_count / self.dbs_needed)
        else:
            db_lv_extents = db_vg.sizing(size=self.block_db_size.gb.as_int())['extents']

        # create the data lvs, and create the OSD with an lv from the common
        # block.db vg from before
        for osd in self.computed['osds']:
            data_path = osd['data']['path']
            data_vg = data_vgs[data_path]
            data_lv_extents = data_vg.sizing(parts=1)['extents']
            data_lv = lvm.create_lv(
                'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True
            )
            db_lv = lvm.create_lv(
                'osd-block-db', db_vg.name, extents=db_lv_extents, uuid_name=True
            )
            command = [
                '--bluestore',
                '--data', "%s/%s" % (data_lv.vg_name, data_lv.name),
                '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name)
            ]
            if self.args.dmcrypt:
                command.append('--dmcrypt')
            if self.args.no_systemd:
                command.append('--no-systemd')
            if self.args.crush_device_class:
                command.extend(['--crush-device-class', self.args.crush_device_class])

            if self.args.prepare:
                Prepare(command).main()
            else:
                Create(command).main()
Ejemplo n.º 7
0
 def test_uses_multiple_devices(self, monkeypatch, fake_run):
     monkeypatch.setattr(api, 'get_vg', lambda **kw: True)
     api.extend_vg(self.foo_volume, ['/dev/sda', '/dev/sdb'])
     expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda', '/dev/sdb']
     assert fake_run.calls[0]['args'][0] == expected
Ejemplo n.º 8
0
    def execute(self):
        """
        Create vgs/lvs from the incoming set of devices, assign their roles
        (block, block.db, block.wal, etc..) and offload the OSD creation to
        ``lvm create``
        """
        data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])

        # create 1 vg per data device first, mapping them to the device path,
        # when the lv gets created later, it can create as many as needed (or
        # even just 1)
        for osd in self.computed['osds']:
            vg = data_vgs.get(osd['data']['path'])
            if not vg:
                vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block')
                data_vgs[osd['data']['path']] = vg

        if self.data_devs and self.db_or_journal_devs:
            blank_db_dev_paths = [d.abspath for d in self.blank_db_devs]

            # no common vg is found, create one with all the blank SSDs
            if not self.common_vg:
                db_vg = lvm.create_vg(blank_db_dev_paths, name_prefix='ceph-block-dbs')
            elif self.common_vg and blank_db_dev_paths:
                # if a common vg exists then extend it with any blank ssds
                db_vg = lvm.extend_vg(self.common_vg, blank_db_dev_paths)
            else:
                # one common vg with nothing else to extend can be used directly,
                # either this is one device with one vg, or multiple devices with the
                # same vg
                db_vg = self.common_vg

            if self.use_large_block_db:
                # make the block.db lvs as large as possible
                vg_free_count = str_to_int(db_vg.vg_free_count)
                db_lv_extents = int(vg_free_count / self.dbs_needed)
            else:
                db_lv_extents = db_vg.sizing(size=self.block_db_size.gb.as_int())['extents']

        if self.data_devs and self.wal_devs:
            blank_wal_dev_paths = [d.abspath for d in self.blank_wal_devs]

            if not self.common_wal_vg:
                wal_vg = lvm.create_vg(blank_wal_dev_paths,
                                      name_prefix='ceph-block-wals')
            elif self.common_wal_vg and blank_wal_dev_paths:
                wal_vg = lvm.extend_vg(self.common_wal_vg, blank_wal_dev_paths)
            else:
                wal_vg = self.common_wal_vg

            if self.use_large_block_wal:
                # make the block.db lvs as large as possible
                vg_free_count = str_to_int(wal_vg.vg_free_count)
                wal_lv_extents = int(vg_free_count / self.wals_needed)
            else:
                wal_lv_extents = wal_vg.sizing(size=self.block_wal_size.gb.as_int())['extents']

        # create the data lvs, and create the OSD with an lv from the common
        # block.db vg from before
        for osd in self.computed['osds']:
            data_path = osd['data']['path']
            data_vg = data_vgs[data_path]
            data_lv_extents = data_vg.sizing(parts=1)['extents']
            data_lv = lvm.create_lv(
                'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True
            )
            command = [
                '--bluestore',
                '--data', "%s/%s" % (data_lv.vg_name, data_lv.name),
            ]
            if 'block.db' in osd:
                db_lv = lvm.create_lv(
                    'osd-block-db', db_vg.name, extents=db_lv_extents, uuid_name=True
                )
                command.extend([ '--block.db',
                                '{}/{}'.format(db_lv.vg_name, db_lv.name)])
            if 'block.wal' in osd:
                wal_lv = lvm.create_lv(
                    'osd-block-wal', wal_vg.name, extents=wal_lv_extents, uuid_name=True
                )
                command.extend(
                    ['--block.wal',
                     '{}/{}'.format(wal_lv.vg_name, wal_lv.name)
                    ])
            if self.args.dmcrypt:
                command.append('--dmcrypt')
            if self.args.no_systemd:
                command.append('--no-systemd')
            if self.args.crush_device_class:
                command.extend(['--crush-device-class', self.args.crush_device_class])
            if 'osd_id' in osd:
                command.extend(['--osd-id', osd['osd_id']])

            if self.args.prepare:
                Prepare(command).main()
            else:
                Create(command).main()
Ejemplo n.º 9
0
    def execute(self):
        """
        Create vgs/lvs from the incoming set of devices, assign their roles
        (block, block.db, block.wal, etc..) and offload the OSD creation to
        ``lvm create``
        """
        blank_ssd_paths = [d.abspath for d in self.blank_ssds]
        data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])

        # no common vg is found, create one with all the blank SSDs
        if not self.common_vg:
            db_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-block-dbs')

        # if a common vg exists then extend it with any blank ssds
        elif self.common_vg and blank_ssd_paths:
            db_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths)

        # one common vg with nothing else to extend can be used directly,
        # either this is one device with one vg, or multiple devices with the
        # same vg
        else:
            db_vg = self.common_vg

        # since we are falling back to a block_db_size that might be "as large
        # as possible" we can't fully rely on LV format coming from the helper
        # function that looks up this value
        block_db_size = "%sG" % self.block_db_size.gb.as_int()

        # create 1 vg per data device first, mapping them to the device path,
        # when the lv gets created later, it can create as many as needed (or
        # even just 1)
        for osd in self.computed['osds']:
            vg = data_vgs.get(osd['data']['path'])
            if not vg:
                vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block')
                data_vgs[osd['data']['path']] = vg

        # create the data lvs, and create the OSD with an lv from the common
        # block.db vg from before
        for osd in self.computed['osds']:
            data_path = osd['data']['path']
            data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int()
            data_vg = data_vgs[data_path]
            data_lv_extents = data_vg.sizing(size=data_lv_size)['extents']
            data_lv = lvm.create_lv(
                'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True
            )
            db_lv = lvm.create_lv(
                'osd-block-db', db_vg.name, size=block_db_size, uuid_name=True
            )
            command = [
                '--bluestore',
                '--data', "%s/%s" % (data_lv.vg_name, data_lv.name),
                '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name)
            ]
            if self.args.dmcrypt:
                command.append('--dmcrypt')
            if self.args.no_systemd:
                command.append('--no-systemd')
            if self.args.crush_device_class:
                command.extend(['--crush-device-class', self.args.crush_device_class])

            Create(command).main()