Example #1
0
    def process_deployment_for_cluster(cls, cluster, cluster_data):
        """Added ceph related information to deployment info for cluster."""
        all_nodes = {n.uid: n for n in cluster.nodes}
        osd_num = 0
        for n in cluster_data['nodes']:
            if 'ceph-osd' in (n.get('roles') or [n.get('role')]):
                volumes = cls.get_node_volumes(all_nodes[n['uid']]) or []
                for volume in volumes:
                    for part in volume.get('volumes', []):
                        if (part.get('name') == 'ceph' and
                                part.get('size', 0) > 0):
                            osd_num += 1

        storage_attrs = cluster_data.setdefault('storage', {})
        pg_counts = get_pool_pg_count(
            osd_num=osd_num,
            pool_sz=int(storage_attrs['osd_pool_size']),
            ceph_version='firefly',
            volumes_ceph=storage_attrs['volumes_ceph'],
            objects_ceph=storage_attrs['objects_ceph'],
            ephemeral_ceph=storage_attrs['ephemeral_ceph'],
            images_ceph=storage_attrs['images_ceph'],
            emulate_pre_7_0=False)

        # Log {pool_name: pg_count} mapping
        pg_str = ", ".join(map("{0[0]}={0[1]}".format, pg_counts.items()))
        logger.debug("Ceph: PG values {%s}", pg_str)
        storage_attrs['pg_num'] = pg_counts['default_pg_num']
        storage_attrs['per_pool_pg_nums'] = pg_counts
Example #2
0
    def _set_pg_count_storage_parameters(cls, data, nodes):
        """Generate pg_num

        pg_num is generated as the number of OSDs across the cluster
        multiplied by 100, divided by Ceph replication factor, and
        rounded up to the nearest power of 2.
        """
        osd_num = 0
        osd_nodes = [node for node in nodes if 'ceph-osd' in node.all_roles]

        for node in osd_nodes:
            for disk in cls.get_node_volumes(node):
                for part in disk.get('volumes', []):
                    if part.get('name') == 'ceph' and part.get('size', 0) > 0:
                        osd_num += 1

        for node in data:
            storage_attrs = node['storage']

            pg_counts = get_pool_pg_count(
                osd_num=osd_num,
                pool_sz=int(storage_attrs['osd_pool_size']),
                ceph_version='firefly',
                volumes_ceph=storage_attrs['volumes_ceph'],
                objects_ceph=storage_attrs['objects_ceph'],
                ephemeral_ceph=storage_attrs['ephemeral_ceph'],
                images_ceph=storage_attrs['images_ceph'],
                emulate_pre_7_0=False)

            # Log {pool_name: pg_count} mapping
            pg_str = ", ".join(map("{0[0]}={0[1]}".format, pg_counts.items()))
            logger.debug("Ceph: PG values {%s}", pg_str)

            storage_attrs['pg_num'] = pg_counts['default_pg_num']
            storage_attrs['per_pool_pg_nums'] = pg_counts
    def test_pg_count(self):
        params = [(osd, pool_sz, version,
                   dict(volumes_ceph=volumes_ceph,
                        objects_ceph=objects_ceph,
                        ephemeral_ceph=ephemeral_ceph,
                        images_ceph=images_ceph))
                  for osd in [0, 20, 5000]
                  for pool_sz in [1, 3]
                  for version in ['firefly', 'hammer']
                  for volumes_ceph in (True, False)
                  for objects_ceph in (True, False)
                  for ephemeral_ceph in (True, False)
                  for images_ceph in (True, False)]

        for osd, pool_sz, version, pools_used in params:
            if not any(pools_used.values()):
                continue

            res = ceph.get_pool_pg_count(osd, pool_sz, version,
                                         emulate_pre_7_0=False,
                                         **pools_used)

            old_res = ceph.get_pool_pg_count(osd, pool_sz,
                                             version,
                                             emulate_pre_7_0=True,
                                             **pools_used)

            if 0 == osd:
                self.assertEqual(res['default_pg_num'], DEFAULT_PG)
                self.assertEqual(old_res['default_pg_num'], DEFAULT_PG)
                continue

            pg_count = sum(res.values()) * pool_sz
            pg_count_lower_bound1 = osd * MAX_PG_PER_OSD
            pg_count_lower_bound2 = res['default_pg_num'] * len(res) * pool_sz

            if pg_count_lower_bound2 < pg_count_lower_bound1:
                self.assertLess(pg_count, pg_count_lower_bound1)

            if volumes_ceph or objects_ceph or ephemeral_ceph or images_ceph:
                self.assertGreater(pg_count, osd * MIN_PG_PER_OSD)

            pre_7_0_pg_num = 2 ** int(math.ceil(
                math.log(osd * 100.0 / pool_sz, 2)))

            self.assertEqual(old_res['default_pg_num'],
                             pre_7_0_pg_num)
Example #4
0
    def test_pg_count(self):
        params = [(osd, pool_sz, version,
                   dict(volumes_ceph=volumes_ceph,
                        objects_ceph=objects_ceph,
                        ephemeral_ceph=ephemeral_ceph,
                        images_ceph=images_ceph)) for osd in [0, 20, 5000]
                  for pool_sz in [1, 3] for version in ['firefly', 'hammer']
                  for volumes_ceph in (True, False)
                  for objects_ceph in (True, False)
                  for ephemeral_ceph in (True, False)
                  for images_ceph in (True, False)]

        for osd, pool_sz, version, pools_used in params:
            if not any(pools_used.values()):
                continue

            res = ceph.get_pool_pg_count(osd,
                                         pool_sz,
                                         version,
                                         emulate_pre_7_0=False,
                                         **pools_used)

            old_res = ceph.get_pool_pg_count(osd,
                                             pool_sz,
                                             version,
                                             emulate_pre_7_0=True,
                                             **pools_used)

            if 0 == osd:
                self.assertEqual(res['default_pg_num'], DEFAULT_PG)
                self.assertEqual(old_res['default_pg_num'], DEFAULT_PG)
                continue

            pg_count = sum(res.values()) * pool_sz
            pg_count_lower_bound1 = osd * MAX_PG_PER_OSD
            pg_count_lower_bound2 = res['default_pg_num'] * len(res) * pool_sz

            if pg_count_lower_bound2 < pg_count_lower_bound1:
                self.assertLess(pg_count, pg_count_lower_bound1)

            if volumes_ceph or objects_ceph or ephemeral_ceph or images_ceph:
                self.assertGreater(pg_count, osd * MIN_PG_PER_OSD)

            pre_7_0_pg_num = 2**int(
                math.ceil(math.log(osd * 100.0 / pool_sz, 2)))

            self.assertEqual(old_res['default_pg_num'], pre_7_0_pg_num)
    def set_storage_parameters(self, cluster, attrs):
        """Generate pg_num

        pg_num is generated as the number of OSDs across the cluster
        multiplied by 100, divided by Ceph replication factor, and
        rounded up to the nearest power of 2.
        """
        osd_num = 0
        nodes = db().query(Node).filter(
            Node.cluster == cluster
        ).filter(sa.or_(
            Node.roles.any('ceph-osd'),
            Node.pending_roles.any('ceph-osd')
        ))

        for node in nodes:
            for disk in node_extension_call('get_node_volumes', node):
                for part in disk.get('volumes', []):
                    if part.get('name') == 'ceph' and part.get('size', 0) > 0:
                        osd_num += 1

        storage_attrs = attrs['storage']

        pg_counts = get_pool_pg_count(
            osd_num=osd_num,
            pool_sz=int(storage_attrs['osd_pool_size']),
            ceph_version='firefly',
            volumes_ceph=storage_attrs['volumes_ceph'],
            objects_ceph=storage_attrs['objects_ceph'],
            ephemeral_ceph=storage_attrs['ephemeral_ceph'],
            images_ceph=storage_attrs['images_ceph'],
            emulate_pre_7_0=False)

        # Log {pool_name: pg_count} mapping
        pg_str = ", ".join(map("{0[0]}={0[1]}".format, pg_counts.items()))
        logger.debug("Ceph: PG values {%s}", pg_str)

        storage_attrs['pg_num'] = pg_counts['default_pg_num']
        storage_attrs['per_pool_pg_nums'] = pg_counts
    def set_storage_parameters(self, cluster, attrs):
        """Generate pg_num

        pg_num is generated as the number of OSDs across the cluster
        multiplied by 100, divided by Ceph replication factor, and
        rounded up to the nearest power of 2.
        """
        osd_num = 0
        nodes = db().query(Node).filter(Node.cluster == cluster).filter(
            sa.or_(Node.roles.any('ceph-osd'),
                   Node.pending_roles.any('ceph-osd'))).options(
                       joinedload('attributes'))

        for node in nodes:
            for disk in node_extension_call('get_node_volumes', node):
                for part in disk.get('volumes', []):
                    if part.get('name') == 'ceph' and part.get('size', 0) > 0:
                        osd_num += 1

        storage_attrs = attrs['storage']

        pg_counts = get_pool_pg_count(
            osd_num=osd_num,
            pool_sz=int(storage_attrs['osd_pool_size']),
            ceph_version='firefly',
            volumes_ceph=storage_attrs['volumes_ceph'],
            objects_ceph=storage_attrs['objects_ceph'],
            ephemeral_ceph=storage_attrs['ephemeral_ceph'],
            images_ceph=storage_attrs['images_ceph'],
            emulate_pre_7_0=False)

        # Log {pool_name: pg_count} mapping
        pg_str = ", ".join(map("{0[0]}={0[1]}".format, pg_counts.items()))
        logger.debug("Ceph: PG values {%s}", pg_str)

        storage_attrs['pg_num'] = pg_counts['default_pg_num']
        storage_attrs['per_pool_pg_nums'] = pg_counts
Example #7
0
    def _set_pg_count_storage_parameters(cls, data, nodes):
        """Generate pg_num

        pg_num is generated as the number of OSDs across the cluster
        multiplied by 100, divided by Ceph replication factor, and
        rounded up to the nearest power of 2.
        """
        osd_num = 0
        osd_nodes = [node for node in nodes
                     if 'ceph-osd' in node.all_roles]

        for node in osd_nodes:
            for disk in cls.get_node_volumes(node):
                for part in disk.get('volumes', []):
                    if part.get('name') == 'ceph' and part.get('size', 0) > 0:
                        osd_num += 1

        for node in data:
            storage_attrs = node['storage']

            pg_counts = get_pool_pg_count(
                osd_num=osd_num,
                pool_sz=int(storage_attrs['osd_pool_size']),
                ceph_version='firefly',
                volumes_ceph=storage_attrs['volumes_ceph'],
                objects_ceph=storage_attrs['objects_ceph'],
                ephemeral_ceph=storage_attrs['ephemeral_ceph'],
                images_ceph=storage_attrs['images_ceph'],
                emulate_pre_7_0=False)

            # Log {pool_name: pg_count} mapping
            pg_str = ", ".join(map("{0[0]}={0[1]}".format, pg_counts.items()))
            logger.debug("Ceph: PG values {%s}", pg_str)

            storage_attrs['pg_num'] = pg_counts['default_pg_num']
            storage_attrs['per_pool_pg_nums'] = pg_counts