Ejemplo n.º 1
0
 def test_assignment_addition_with_size_objects(self):
     result = disk.Size(mb=256) + disk.Size(gb=1)
     assert result.gb == 1.25
     assert result.gb.as_int() == 1
     assert result.gb.as_float() == 1.25
Ejemplo n.º 2
0
 def test_gigabytes_is_smaller_or_equal(self, larger):
     assert disk.Size(gb=1) <= disk.Size(mb=larger)
Ejemplo n.º 3
0
 def test_gigabytes_is_larger_or_equal(self, smaller):
     assert disk.Size(gb=1) >= disk.Size(mb=smaller)
Ejemplo n.º 4
0
 def test_gigabytes_is_smaller(self, larger):
     assert disk.Size(gb=1) < disk.Size(mb=larger)
Ejemplo n.º 5
0
 def test_gigabytes_is_larger(self, smaller):
     assert disk.Size(gb=1) > disk.Size(mb=smaller)
Ejemplo n.º 6
0
 def test_addition_with_non_size_objects(self):
     with pytest.raises(TypeError):
         disk.Size(mb=100) + 4
Ejemplo n.º 7
0
 def test_size_is_larger_than_5gb_large_journal(self, fakedevice):
     devices = [fakedevice(sys_api=dict(size=6073740000))]
     assert validators.minimum_device_collocated_size(
         devices, disk.Size(mb=1)) is None
Ejemplo n.º 8
0
 def get_block_size(self):
     if self.args.block_db_size:
         return disk.Size(b=self.args.block_db_size)
     else:
         return prepare.get_block_db_size(lv_format=False) or disk.Size(b=0)
Ejemplo n.º 9
0
    def execute(self):
        """
        Create vgs/lvs from the incoming set of devices, assign their roles
        (block, block.db, block.wal, etc..) and offload the OSD creation to
        ``lvm create``
        """
        blank_ssd_paths = [d.abspath for d in self.blank_ssds]
        data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])

        # no common vg is found, create one with all the blank SSDs
        if not self.common_vg:
            db_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-block-dbs')

        # if a common vg exists then extend it with any blank ssds
        elif self.common_vg and blank_ssd_paths:
            db_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths)

        # one common vg with nothing else to extend can be used directly,
        # either this is one device with one vg, or multiple devices with the
        # same vg
        else:
            db_vg = self.common_vg

        # since we are falling back to a block_db_size that might be "as large
        # as possible" we can't fully rely on LV format coming from the helper
        # function that looks up this value
        block_db_size = "%sG" % self.block_db_size.gb.as_int()

        # create 1 vg per data device first, mapping them to the device path,
        # when the lv gets created later, it can create as many as needed (or
        # even just 1)
        for osd in self.computed['osds']:
            vg = data_vgs.get(osd['data']['path'])
            if not vg:
                vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block')
                data_vgs[osd['data']['path']] = vg

        # create the data lvs, and create the OSD with an lv from the common
        # block.db vg from before
        for osd in self.computed['osds']:
            data_path = osd['data']['path']
            data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int()
            data_vg = data_vgs[data_path]
            data_lv_extents = data_vg.sizing(size=data_lv_size)['extents']
            data_lv = lvm.create_lv(
                'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True
            )
            db_lv = lvm.create_lv(
                'osd-block-db', db_vg.name, size=block_db_size, uuid_name=True
            )
            command = [
                '--bluestore',
                '--data', "%s/%s" % (data_lv.vg_name, data_lv.name),
                '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name)
            ]
            if self.args.dmcrypt:
                command.append('--dmcrypt')
            if self.args.no_systemd:
                command.append('--no-systemd')
            if self.args.crush_device_class:
                command.extend(['--crush-device-class', self.args.crush_device_class])

            Create(command).main()
Ejemplo n.º 10
0
    def execute(self):
        """
        Create vgs/lvs from the incoming set of devices, assign their roles
        (data, journal) and offload the OSD creation to ``lvm create``
        """
        blank_ssd_paths = [d.abspath for d in self.blank_ssds]
        data_vgs = dict([(osd['data']['path'], None)
                         for osd in self.computed['osds']])

        # no common vg is found, create one with all the blank SSDs
        if not self.common_vg:
            journal_vg = lvm.create_vg(blank_ssd_paths,
                                       name_prefix='ceph-journals')
        # a vg exists that can be extended
        elif self.common_vg and blank_ssd_paths:
            journal_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths)
        # one common vg with nothing else to extend can be used directly
        else:
            journal_vg = self.common_vg

        journal_size = prepare.get_journal_size(lv_format=True)

        # create 1 vg per data device first, mapping them to the device path,
        # when the lv gets created later, it can create as many as needed (or
        # even just 1)
        for osd in self.computed['osds']:
            vg = data_vgs.get(osd['data']['path'])
            if not vg:
                vg = lvm.create_vg(osd['data']['path'],
                                   name_prefix='ceph-data')
                data_vgs[osd['data']['path']] = vg

        for osd in self.computed['osds']:
            data_path = osd['data']['path']
            data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int()
            data_vg = data_vgs[data_path]
            data_lv_extents = data_vg.sizing(size=data_lv_size)['extents']
            data_lv = lvm.create_lv('osd-data',
                                    data_vg.name,
                                    extents=data_lv_extents,
                                    uuid_name=True)
            journal_lv = lvm.create_lv('osd-journal',
                                       journal_vg.name,
                                       size=journal_size,
                                       uuid_name=True)

            command = ['--filestore', '--data']
            command.append('%s/%s' % (data_vg.name, data_lv.name))
            command.extend(
                ['--journal',
                 '%s/%s' % (journal_vg.name, journal_lv.name)])
            if self.args.dmcrypt:
                command.append('--dmcrypt')
            if self.args.no_systemd:
                command.append('--no-systemd')
            if self.args.crush_device_class:
                command.extend(
                    ['--crush-device-class', self.args.crush_device_class])

            if self.args.prepare:
                Prepare(command).main()
            else:
                Create(command).main()
Ejemplo n.º 11
0
def get_lvm_fast_allocs(lvs):
    return [("{}/{}".format(d.vg_name, d.lv_name), 100.0,
             disk.Size(b=int(d.lvs[0].lv_size)), 1) for d in lvs if not
            d.used_by_ceph]
Ejemplo n.º 12
0
    def _validate_wal_devs(self):
        # do not allow non-common VG to continue
        validators.has_common_vg(self.wal_devs)

        # find the common VG to calculate how much is available
        self.common_wal_vg = self.get_common_vg(self.wal_devs)

        # find how many block.wal LVs are possible from the common VG
        if self.common_wal_vg:
            common_vg_size = disk.Size(gb=self.common_wal_vg.free)
        else:
            common_vg_size = disk.Size(gb=0)

        # non-VG SSDs
        vg_members = set([d for d in self.wal_devs if d.is_lvm_member])
        self.blank_wal_devs = set(self.wal_devs).difference(vg_members)
        self.total_blank_wal_dev_size = disk.Size(b=0)
        for blank_wal_dev in self.blank_wal_devs:
            self.total_blank_wal_dev_size += disk.Size(
                b=blank_wal_dev.lvm_size.b)

        self.total_available_wal_space = self.total_blank_wal_dev_size + common_vg_size

        # If not configured, we default to 0, which is really "use as much as
        # possible" captured by the `else` condition
        if self.block_wal_size.gb > 0:
            try:
                self.vg_extents = lvm.sizing(self.total_available_wal_space.b,
                                             size=self.block_wal_size.b *
                                             self.osds_per_device)
            except SizeAllocationError:
                msg = "Not enough space in fast devices (%s) to create %s x %s block.wal LV"
                raise RuntimeError(msg %
                                   (self.total_available_wal_space,
                                    self.osds_per_device, self.block_wal_size))
        else:
            self.wal_vg_extents = lvm.sizing(self.total_available_wal_space.b,
                                             parts=self.wals_needed)

        # validate that number of block.wal LVs possible are enough for number of
        # OSDs proposed
        if self.total_available_wal_space.b == 0:
            msg = "No space left in fast devices to create block.wal LVs"
            raise RuntimeError(msg)

        # bluestore_block_wal_size was unset, so we must set this to whatever
        # size we get by dividing the total available space for block.wal LVs
        # into the number of block.wal LVs needed (i.e. "as large as possible")
        if self.block_wal_size.b == 0:
            self.block_wal_size = self.total_available_wal_space / self.wals_needed
            self.use_large_block_wal = True

        total_wals_possible = self.total_available_wal_space / self.block_wal_size

        if self.wals_needed > total_wals_possible:
            msg = "Not enough space (%s) to create %s x %s block.wal LVs" % (
                self.total_available_wal_space,
                self.wals_needed,
                self.block_wal_size,
            )
            raise RuntimeError(msg)
Ejemplo n.º 13
0
    def compute(self):
        osds = self.computed['osds']

        if self.data_devs and self.db_or_journal_devs:
            if not self.common_vg:
                # there isn't a common vg, so a new one must be created with all
                # the blank db devs
                self.computed['vg'] = {
                    'devices':
                    ", ".join([ssd.abspath for ssd in self.blank_db_devs]),
                    'parts':
                    self.dbs_needed,
                    'percentages':
                    self.vg_extents['percentages'],
                    'sizes':
                    self.block_db_size.b.as_int(),
                    'size':
                    self.total_blank_db_dev_size.b.as_int(),
                    'human_readable_sizes':
                    str(self.block_db_size),
                    'human_readable_size':
                    str(self.total_available_db_space),
                }
                vg_name = 'vg/lv'
            else:
                vg_name = self.common_vg.name

        if self.data_devs and self.wal_devs:
            if not self.common_wal_vg:
                # there isn't a common vg, so a new one must be created with all
                # the blank wal devs
                self.computed['wal_vg'] = {
                    'devices':
                    ", ".join([dev.abspath for dev in self.blank_wal_devs]),
                    'parts':
                    self.wals_needed,
                    'percentages':
                    self.wal_vg_extents['percentages'],
                    'sizes':
                    self.block_wal_size.b.as_int(),
                    'size':
                    self.total_blank_wal_dev_size.b.as_int(),
                    'human_readable_sizes':
                    str(self.block_wal_size),
                    'human_readable_size':
                    str(self.total_available_wal_space),
                }
                wal_vg_name = 'vg/lv'
            else:
                wal_vg_name = self.common_wal_vg.name

        for device in self.data_devs:
            for hdd in range(self.osds_per_device):
                osd = {'data': {}}
                osd['data']['path'] = device.abspath
                osd['data']['size'] = device.lvm_size.b / self.osds_per_device
                osd['data']['percentage'] = 100 / self.osds_per_device
                osd['data']['human_readable_size'] = str(
                    disk.Size(b=device.lvm_size.b) / self.osds_per_device)

                if self.db_or_journal_devs:
                    osd['block.db'] = {}
                    osd['block.db']['path'] = 'vg: %s' % vg_name
                    osd['block.db']['size'] = int(self.block_db_size.b)
                    osd['block.db']['human_readable_size'] = str(
                        self.block_db_size)
                    osd['block.db']['percentage'] = self.vg_extents[
                        'percentages']

                if self.wal_devs:
                    osd['block.wal'] = {}
                    osd['block.wal']['path'] = 'vg: %s' % wal_vg_name
                    osd['block.wal']['size'] = int(self.block_wal_size.b)
                    osd['block.wal']['human_readable_size'] = str(
                        self.block_wal_size)
                    osd['block.wal']['percentage'] = self.wal_vg_extents[
                        'percentages']

                if self.osd_ids:
                    osd['osd_id'] = self.osd_ids.pop(0)

                osds.append(osd)

        self.computed['changed'] = len(osds) > 0
Ejemplo n.º 14
0
    def report_pretty(self, filtered_devices):
        string = ""
        if filtered_devices:
            string += templates.filtered_devices(filtered_devices)
        string += templates.total_osds.format(total_osds=len(self.data_devs) *
                                              self.osds_per_device)

        if self.db_or_journal_devs:
            vg_extents = lvm.sizing(self.total_available_db_space.b,
                                    parts=self.dbs_needed)
            db_size = str(disk.Size(b=(vg_extents['sizes'])))

            string += templates.ssd_volume_group.format(
                target='block.db',
                total_lv_size=str(self.total_available_db_space),
                total_lvs=vg_extents['parts'] * self.osds_per_device,
                block_lv_size=db_size,
                block_db_devices=', '.join(
                    [ssd.abspath for ssd in self.db_or_journal_devs]),
                lv_size=self.block_db_size
                or str(disk.Size(b=(vg_extents['sizes']))),
                total_osds=len(self.data_devs))

        if self.wal_devs:
            wal_vg_extents = lvm.sizing(self.total_available_wal_space.b,
                                        parts=self.wals_needed)
            wal_size = str(disk.Size(b=(wal_vg_extents['sizes'])))
            string += templates.ssd_volume_group.format(
                target='block.wal',
                total_lv_size=str(self.total_available_wal_space),
                total_lvs=wal_vg_extents['parts'] * self.osds_per_device,
                block_lv_size=wal_size,
                block_db_devices=', '.join(
                    [dev.abspath for dev in self.wal_devs]),
                lv_size=self.block_wal_size
                or str(disk.Size(b=(wal_vg_extents['sizes']))),
                total_osds=len(self.data_devs))

        string += templates.osd_component_titles
        for osd in self.computed['osds']:
            string += templates.osd_header
            if 'osd_id' in osd:
                string += templates.osd_reused_id.format(id_=osd['osd_id'])
            string += templates.osd_component.format(
                _type='[data]',
                path=osd['data']['path'],
                size=osd['data']['human_readable_size'],
                percent=osd['data']['percentage'])

            if 'block.db' in osd:
                string += templates.osd_component.format(
                    _type='[block.db]',
                    path=osd['block.db']['path'],
                    size=osd['block.db']['human_readable_size'],
                    percent=osd['block.db']['percentage'])

            if 'block.wal' in osd:
                string += templates.osd_component.format(
                    _type='[block.wal]',
                    path=osd['block.wal']['path'],
                    size=osd['block.wal']['human_readable_size'],
                    percent=osd['block.wal']['percentage'])

        print(string)
Ejemplo n.º 15
0
 def test_self_addition_with_size_objects(self):
     base = disk.Size(mb=256)
     base += disk.Size(gb=1)
     assert base.gb == 1.25
Ejemplo n.º 16
0
    def validate(self):
        """
        HDDs represent data devices, and solid state devices are for block.db,
        make sure that the number of data devices would have enough LVs and
        those LVs would be large enough to accommodate a block.db
        """
        # validate minimum size for all devices
        validators.minimum_device_size(self.devices, osds_per_device=self.osds_per_device)

        # make sure that data devices do not have any LVs
        validators.no_lvm_membership(self.hdds)

        # do not allow non-common VG to continue
        validators.has_common_vg(self.ssds)

        # find the common VG to calculate how much is available
        self.common_vg = self.get_common_vg()

        # find how many block.db LVs are possible from the common VG
        if self.common_vg:
            common_vg_size = disk.Size(gb=self.common_vg.free)
        else:
            common_vg_size = disk.Size(gb=0)

        # non-VG SSDs
        self.vg_ssds = set([d for d in self.ssds if d.is_lvm_member])
        self.blank_ssds = set(self.ssds).difference(self.vg_ssds)
        self.total_blank_ssd_size = disk.Size(b=0)
        for blank_ssd in self.blank_ssds:
            self.total_blank_ssd_size += disk.Size(b=blank_ssd.sys_api['size'])

        self.total_available_db_space = self.total_blank_ssd_size + common_vg_size

        # If not configured, we default to 0, which is really "use as much as
        # possible" captured by the `else` condition
        if self.block_db_size.gb > 0:
            try:
                self.vg_extents = lvm.sizing(
                    self.total_available_db_space.b, size=self.block_db_size.b * self.osds_per_device
                )
            except SizeAllocationError:
                msg = "Not enough space in fast devices (%s) to create %s x %s block.db LV"
                raise RuntimeError(
                    msg % (self.total_available_db_space, self.osds_per_device, self.block_db_size)
                )
        else:
            self.vg_extents = lvm.sizing(
                self.total_available_db_space.b, parts=self.dbs_needed
            )

        # validate that number of block.db LVs possible are enough for number of
        # OSDs proposed
        if self.total_available_db_space.b == 0:
            msg = "No space left in fast devices to create block.db LVs"
            raise RuntimeError(msg)

        # bluestore_block_db_size was unset, so we must set this to whatever
        # size we get by dividing the total available space for block.db LVs
        # into the number of block.db LVs needed (i.e. "as large as possible")
        if self.block_db_size.b == 0:
            self.block_db_size = self.total_available_db_space / self.dbs_needed

        total_dbs_possible = self.total_available_db_space / self.block_db_size

        if self.dbs_needed > total_dbs_possible:
            msg = "Not enough space (%s) to create %s x %s block.db LVs" % (
                self.total_available_db_space, self.dbs_needed, self.block_db_size,
            )
            raise RuntimeError(msg)
Ejemplo n.º 17
0
 def test_self_addition_does_not_alter_state(self):
     base = disk.Size(mb=256)
     base + disk.Size(gb=1)
     assert base.mb == 256
Ejemplo n.º 18
0
 def test_aliases(self, aliases):
     short_alias, long_alias = aliases
     s = disk.Size(b=1)
     short_alias = getattr(s, short_alias)
     long_alias = getattr(s, long_alias)
     assert short_alias == long_alias
Ejemplo n.º 19
0
 def test_assignment_subtraction_with_size_objects(self):
     base = disk.Size(gb=1)
     base -= disk.Size(mb=256)
     assert base.mb == 768
Ejemplo n.º 20
0
 def setup(self):
     self.journal_size = disk.Size(gb=5)