Esempio n. 1
0
    def _get_pool_status(
            self,
            osdmap: OSDMap,
            pools: Dict[str, Dict[str, Any]],
            profile: 'ScaleModeT',
    ) -> Tuple[List[Dict[str, Any]],
               Dict[int, CrushSubtreeResourceStatus]]:
        threshold = self.threshold
        assert threshold >= 1.0

        crush_map = osdmap.get_crush()
        root_map, overlapped_roots = self.get_subtree_resource_status(osdmap, crush_map)
        df = self.get('df')
        pool_stats = dict([(p['id'], p['stats']) for p in df['pools']])

        ret: List[Dict[str, Any]] = []

        # Iterate over all pools to determine how they should be sized.
        # First call of _calc_pool_targets() is to find/adjust pools that uses more capacaity than
        # the even_ratio of other pools and we adjust those first.
        # Second call make use of the even_pools we keep track of in the first call.
        # All we need to do is iterate over those and give them 1/pool_count of the
        # total pgs.

        ret, even_pools = self._calc_pool_targets(osdmap, pools, crush_map, root_map,
                                                  pool_stats, ret, threshold, True, profile, overlapped_roots)

        if profile == "scale-down":
            # We only have adjust even_pools when we use scale-down profile
            ret, _ = self._calc_pool_targets(osdmap, even_pools, crush_map, root_map,
                                             pool_stats, ret, threshold, False, profile, overlapped_roots)

        return (ret, root_map)
Esempio n. 2
0
    def identify_subtrees_and_overlaps(self,
                                       osdmap: OSDMap,
                                       crush: CRUSHMap,
                                       result: Dict[int, CrushSubtreeResourceStatus],
                                       overlapped_roots: Set[int],
                                       roots: List[CrushSubtreeResourceStatus]) -> \
        Tuple[List[CrushSubtreeResourceStatus],
              Set[int]]:

        # We identify subtrees and overlapping roots from osdmap
        for pool_id, pool in osdmap.get_pools().items():
            crush_rule = crush.get_rule_by_id(pool['crush_rule'])
            assert crush_rule is not None
            cr_name = crush_rule['rule_name']
            root_id = crush.get_rule_root(cr_name)
            assert root_id is not None
            osds = set(crush.get_osds_under(root_id))

            # Are there overlapping roots?
            s = None
            for prev_root_id, prev in result.items():
                if osds & prev.osds:
                    s = prev
                    if prev_root_id != root_id:
                        overlapped_roots.add(prev_root_id)
                        overlapped_roots.add(root_id)
                        self.log.error('pool %d has overlapping roots: %s',
                                       pool_id, overlapped_roots)
                    break
            if not s:
                s = CrushSubtreeResourceStatus()
                roots.append(s)
            result[root_id] = s
            s.root_ids.append(root_id)
            s.osds |= osds
            s.pool_ids.append(pool_id)
            s.pool_names.append(pool['pool_name'])
            s.pg_current += pool['pg_num_target'] * pool['size']
            target_ratio = pool['options'].get('target_size_ratio', 0.0)
            if target_ratio:
                s.total_target_ratio += target_ratio
            else:
                target_bytes = pool['options'].get('target_size_bytes', 0)
                if target_bytes:
                    s.total_target_bytes += target_bytes * osdmap.pool_raw_used_rate(
                        pool_id)
        return roots, overlapped_roots
Esempio n. 3
0
    def _get_pool_pg_targets(
        self,
        osdmap: OSDMap,
        pools: Dict[str, Dict[str, Any]],
        crush_map: CRUSHMap,
        root_map: Dict[int, CrushSubtreeResourceStatus],
        pool_stats: Dict[int, Dict[str, int]],
        ret: List[Dict[str, Any]],
        threshold: float,
        func_pass: '******',
        overlapped_roots: Set[int],
    ) -> Tuple[List[Dict[str, Any]], Dict[str, Dict[str, Any]], Dict[str, Dict[
            str, Any]]]:
        """
        Calculates final_pg_target of each pools and determine if it needs
        scaling, this depends on the profile of the autoscaler. For scale-down,
        we start out with a full complement of pgs and only descrease it when other
        pools needs more pgs due to increased usage. For scale-up, we start out with
        the minimal amount of pgs and only scale when there is increase in usage.
        """
        even_pools: Dict[str, Dict[str, Any]] = {}
        bulk_pools: Dict[str, Dict[str, Any]] = {}
        for pool_name, p in pools.items():
            pool_id = p['pool']
            if pool_id not in pool_stats:
                # race with pool deletion; skip
                continue

            # FIXME: we assume there is only one take per pool, but that
            # may not be true.
            crush_rule = crush_map.get_rule_by_id(p['crush_rule'])
            assert crush_rule is not None
            cr_name = crush_rule['rule_name']
            root_id = crush_map.get_rule_root(cr_name)
            assert root_id is not None
            if root_id in overlapped_roots:
                # skip pools
                # with overlapping roots
                self.log.warn(
                    "pool %d contains an overlapping root %d"
                    "... skipping scaling", pool_id, root_id)
                continue
            capacity = root_map[root_id].capacity
            assert capacity is not None
            if capacity == 0:
                self.log.debug('skipping empty subtree %s', cr_name)
                continue

            raw_used_rate = osdmap.pool_raw_used_rate(pool_id)

            pool_logical_used = pool_stats[pool_id]['stored']
            bias = p['options'].get('pg_autoscale_bias', 1.0)
            target_bytes = 0
            # ratio takes precedence if both are set
            if p['options'].get('target_size_ratio', 0.0) == 0.0:
                target_bytes = p['options'].get('target_size_bytes', 0)

            # What proportion of space are we using?
            actual_raw_used = pool_logical_used * raw_used_rate
            actual_capacity_ratio = float(actual_raw_used) / capacity

            pool_raw_used = max(pool_logical_used,
                                target_bytes) * raw_used_rate
            capacity_ratio = float(pool_raw_used) / capacity

            self.log.info("effective_target_ratio {0} {1} {2} {3}".format(
                p['options'].get('target_size_ratio',
                                 0.0), root_map[root_id].total_target_ratio,
                root_map[root_id].total_target_bytes, capacity))

            target_ratio = effective_target_ratio(
                p['options'].get('target_size_ratio',
                                 0.0), root_map[root_id].total_target_ratio,
                root_map[root_id].total_target_bytes, capacity)

            # determine if the pool is a bulk
            bulk = False
            flags = p['flags_names'].split(",")
            if "bulk" in flags:
                bulk = True

            capacity_ratio = max(capacity_ratio, target_ratio)
            final_ratio, pool_pg_target, final_pg_target = self._calc_final_pg_target(
                p, pool_name, root_map, root_id, capacity_ratio, bias,
                even_pools, bulk_pools, func_pass, bulk)

            if final_ratio is None:
                continue

            adjust = False
            if (final_pg_target > p['pg_num_target'] * threshold or
                    final_pg_target < p['pg_num_target'] / threshold) and \
                    final_ratio >= 0.0 and \
                    final_ratio <= 1.0:
                adjust = True

            assert pool_pg_target is not None
            ret.append({
                'pool_id':
                pool_id,
                'pool_name':
                p['pool_name'],
                'crush_root_id':
                root_id,
                'pg_autoscale_mode':
                p['pg_autoscale_mode'],
                'pg_num_target':
                p['pg_num_target'],
                'logical_used':
                pool_logical_used,
                'target_bytes':
                target_bytes,
                'raw_used_rate':
                raw_used_rate,
                'subtree_capacity':
                capacity,
                'actual_raw_used':
                actual_raw_used,
                'raw_used':
                pool_raw_used,
                'actual_capacity_ratio':
                actual_capacity_ratio,
                'capacity_ratio':
                capacity_ratio,
                'target_ratio':
                p['options'].get('target_size_ratio', 0.0),
                'effective_target_ratio':
                target_ratio,
                'pg_num_ideal':
                int(pool_pg_target),
                'pg_num_final':
                final_pg_target,
                'would_adjust':
                adjust,
                'bias':
                p.get('options', {}).get('pg_autoscale_bias', 1.0),
                'bulk':
                bulk,
            })

        return ret, bulk_pools, even_pools
Esempio n. 4
0
    def get_subtree_resource_status(self,
                                    osdmap: OSDMap,
                                    crush: CRUSHMap) -> Tuple[Dict[int, CrushSubtreeResourceStatus],
                                                              Dict[int, int]]:
        """
        For each CRUSH subtree of interest (i.e. the roots under which
        we have pools), calculate the current resource usages and targets,
        such as how many PGs there are, vs. how many PGs we would
        like there to be.
        """
        result: Dict[int, CrushSubtreeResourceStatus] = {}
        pool_root = {}
        roots = []

        # identify subtrees (note that they may overlap!)
        for pool_id, pool in osdmap.get_pools().items():
            crush_rule = crush.get_rule_by_id(pool['crush_rule'])
            assert crush_rule is not None
            cr_name = crush_rule['rule_name']
            root_id = crush.get_rule_root(cr_name)
            assert root_id is not None
            pool_root[pool_id] = root_id
            osds = set(crush.get_osds_under(root_id))

            # do we intersect an existing root?
            s = None
            for prev in result.values():
                if osds & prev.osds:
                    s = prev
                    break
            if not s:
                s = CrushSubtreeResourceStatus()
                roots.append(s)
            result[root_id] = s
            s.root_ids.append(root_id)
            s.osds |= osds
            s.pool_ids.append(pool_id)
            s.pool_names.append(pool['pool_name'])
            s.pg_current += pool['pg_num_target'] * pool['size']
            target_ratio = pool['options'].get('target_size_ratio', 0.0)
            if target_ratio:
                s.total_target_ratio += target_ratio
            else:
                target_bytes = pool['options'].get('target_size_bytes', 0)
                if target_bytes:
                    s.total_target_bytes += target_bytes * osdmap.pool_raw_used_rate(pool_id)

        # finish subtrees
        all_stats = self.get('osd_stats')
        for s in roots:
            assert s.osds is not None
            s.osd_count = len(s.osds)
            s.pg_target = s.osd_count * self.mon_target_pg_per_osd
            s.pg_left = s.pg_target
            s.pool_count = len(s.pool_ids)
            capacity = 0
            for osd_stats in all_stats['osd_stats']:
                if osd_stats['osd'] in s.osds:
                    # Intentionally do not apply the OSD's reweight to
                    # this, because we want to calculate PG counts based
                    # on the physical storage available, not how it is
                    # reweighted right now.
                    capacity += osd_stats['kb'] * 1024

            s.capacity = capacity

            self.log.debug('root_ids %s pools %s with %d osds, pg_target %d',
                           s.root_ids,
                           s.pool_ids,
                           s.osd_count,
                           s.pg_target)

        return result, pool_root