Esempio n. 1
0
    def get_compat_weight_set_weights(self, ms):
        if not CRUSHMap.have_default_choose_args(ms.crush_dump):
            # enable compat weight-set first
            self.log.debug('ceph osd crush weight-set create-compat')
            result = CommandResult('')
            self.send_command(
                result, 'mon', '',
                json.dumps({
                    'prefix': 'osd crush weight-set create-compat',
                    'format': 'json',
                }), '')
            r, outb, outs = result.wait()
            if r != 0:
                self.log.error('Error creating compat weight-set')
                return

            result = CommandResult('')
            self.send_command(
                result, 'mon', '',
                json.dumps({
                    'prefix': 'osd crush dump',
                    'format': 'json',
                }), '')
            r, outb, outs = result.wait()
            if r != 0:
                self.log.error('Error dumping crush map')
                return
            try:
                crushmap = json.loads(outb)
            except:
                raise RuntimeError('unable to parse crush map')
        else:
            crushmap = ms.crush_dump

        raw = CRUSHMap.get_default_choose_args(crushmap)
        weight_set = {}
        for b in raw:
            bucket = None
            for t in crushmap['buckets']:
                if t['id'] == b['bucket_id']:
                    bucket = t
                    break
            if not bucket:
                raise RuntimeError('could not find bucket %s' % b['bucket_id'])
            self.log.debug('bucket items %s' % bucket['items'])
            self.log.debug('weight set %s' % b['weight_set'][0])
            if len(bucket['items']) != len(b['weight_set'][0]):
                raise RuntimeError(
                    'weight-set size does not match bucket items')
            for pos in range(len(bucket['items'])):
                weight_set[bucket['items'][pos]
                           ['id']] = b['weight_set'][0][pos]

        self.log.debug('weight_set weights %s' % weight_set)
        return weight_set
Esempio n. 2
0
    def get_compat_weight_set_weights(self, ms):
        if not CRUSHMap.have_default_choose_args(ms.crush_dump):
            # enable compat weight-set first
            self.log.debug('ceph osd crush weight-set create-compat')
            result = CommandResult('')
            self.send_command(result, 'mon', '', json.dumps({
                'prefix': 'osd crush weight-set create-compat',
                'format': 'json',
            }), '')
            r, outb, outs = result.wait()
            if r != 0:
                self.log.error('Error creating compat weight-set')
                return

            result = CommandResult('')
            self.send_command(result, 'mon', '', json.dumps({
                'prefix': 'osd crush dump',
                'format': 'json',
            }), '')
            r, outb, outs = result.wait()
            if r != 0:
                self.log.error('Error dumping crush map')
                return
            try:
                crushmap = json.loads(outb)
            except:
                raise RuntimeError('unable to parse crush map')
        else:
            crushmap = ms.crush_dump

        raw = CRUSHMap.get_default_choose_args(crushmap)
        weight_set = {}
        for b in raw:
            bucket = None
            for t in crushmap['buckets']:
                if t['id'] == b['bucket_id']:
                    bucket = t
                    break
            if not bucket:
                raise RuntimeError('could not find bucket %s' % b['bucket_id'])
            self.log.debug('bucket items %s' % bucket['items'])
            self.log.debug('weight set %s' % b['weight_set'][0])
            if len(bucket['items']) != len(b['weight_set'][0]):
                raise RuntimeError('weight-set size does not match bucket items')
            for pos in range(len(bucket['items'])):
                weight_set[bucket['items'][pos]['id']] = b['weight_set'][0][pos]

        self.log.debug('weight_set weights %s' % weight_set)
        return weight_set
Esempio n. 3
0
 def show(self):
     ls = []
     ls.append('# starting osdmap epoch %d' %
               self.initial.osdmap.get_epoch())
     ls.append('# starting crush version %d' %
               self.initial.osdmap.get_crush_version())
     ls.append('# mode %s' % self.mode)
     if len(self.compat_ws) and \
        not CRUSHMap.have_default_choose_args(self.initial.crush_dump):
         ls.append('ceph osd crush weight-set create-compat')
     for osd, weight in self.compat_ws.iteritems():
         ls.append('ceph osd crush weight-set reweight-compat %s %f' %
                   (osd, weight))
     for osd, weight in self.osd_weights.iteritems():
         ls.append('ceph osd reweight osd.%d %f' % (osd, weight))
     incdump = self.inc.dump()
     for pgid in incdump.get('old_pg_upmap_items', []):
         ls.append('ceph osd rm-pg-upmap-items %s' % pgid)
     for item in incdump.get('new_pg_upmap_items', []):
         osdlist = []
         for m in item['mappings']:
             osdlist += [m['from'], m['to']]
         ls.append('ceph osd pg-upmap-items %s %s' %
                   (item['pgid'], ' '.join([str(a) for a in osdlist])))
     return '\n'.join(ls)
Esempio n. 4
0
    def identify_subtrees_and_overlaps(self,
                                       osdmap: OSDMap,
                                       crush: CRUSHMap,
                                       result: Dict[int, CrushSubtreeResourceStatus],
                                       overlapped_roots: Set[int],
                                       roots: List[CrushSubtreeResourceStatus]) -> \
        Tuple[List[CrushSubtreeResourceStatus],
              Set[int]]:

        # We identify subtrees and overlapping roots from osdmap
        for pool_id, pool in osdmap.get_pools().items():
            crush_rule = crush.get_rule_by_id(pool['crush_rule'])
            assert crush_rule is not None
            cr_name = crush_rule['rule_name']
            root_id = crush.get_rule_root(cr_name)
            assert root_id is not None
            osds = set(crush.get_osds_under(root_id))

            # Are there overlapping roots?
            s = None
            for prev_root_id, prev in result.items():
                if osds & prev.osds:
                    s = prev
                    if prev_root_id != root_id:
                        overlapped_roots.add(prev_root_id)
                        overlapped_roots.add(root_id)
                        self.log.error('pool %d has overlapping roots: %s',
                                       pool_id, overlapped_roots)
                    break
            if not s:
                s = CrushSubtreeResourceStatus()
                roots.append(s)
            result[root_id] = s
            s.root_ids.append(root_id)
            s.osds |= osds
            s.pool_ids.append(pool_id)
            s.pool_names.append(pool['pool_name'])
            s.pg_current += pool['pg_num_target'] * pool['size']
            target_ratio = pool['options'].get('target_size_ratio', 0.0)
            if target_ratio:
                s.total_target_ratio += target_ratio
            else:
                target_bytes = pool['options'].get('target_size_bytes', 0)
                if target_bytes:
                    s.total_target_bytes += target_bytes * osdmap.pool_raw_used_rate(
                        pool_id)
        return roots, overlapped_roots
Esempio n. 5
0
 def show(self):
     ls = []
     ls.append('# starting osdmap epoch %d' % self.initial.osdmap.get_epoch())
     ls.append('# starting crush version %d' %
               self.initial.osdmap.get_crush_version())
     ls.append('# mode %s' % self.mode)
     if len(self.compat_ws) and \
        not CRUSHMap.have_default_choose_args(self.initial.crush_dump):
         ls.append('ceph osd crush weight-set create-compat')
     for osd, weight in six.iteritems(self.compat_ws):
         ls.append('ceph osd crush weight-set reweight-compat %s %f' %
                   (osd, weight))
     for osd, weight in six.iteritems(self.osd_weights):
         ls.append('ceph osd reweight osd.%d %f' % (osd, weight))
     incdump = self.inc.dump()
     for pgid in incdump.get('old_pg_upmap_items', []):
         ls.append('ceph osd rm-pg-upmap-items %s' % pgid)
     for item in incdump.get('new_pg_upmap_items', []):
         osdlist = []
         for m in item['mappings']:
             osdlist += [m['from'], m['to']]
         ls.append('ceph osd pg-upmap-items %s %s' %
                   (item['pgid'], ' '.join([str(a) for a in osdlist])))
     return '\n'.join(ls)
Esempio n. 6
0
    def execute(self, plan):
        self.log.info('Executing plan %s' % plan.name)

        commands = []

        # compat weight-set
        if len(plan.compat_ws) and \
           not CRUSHMap.have_default_choose_args(plan.initial.crush_dump):
            self.log.debug('ceph osd crush weight-set create-compat')
            result = CommandResult('')
            self.send_command(result, 'mon', '', json.dumps({
                'prefix': 'osd crush weight-set create-compat',
                'format': 'json',
            }), '')
            r, outb, outs = result.wait()
            if r != 0:
                self.log.error('Error creating compat weight-set')
                return r, outs

        for osd, weight in six.iteritems(plan.compat_ws):
            self.log.info('ceph osd crush weight-set reweight-compat osd.%d %f',
                          osd, weight)
            result = CommandResult('')
            self.send_command(result, 'mon', '', json.dumps({
                'prefix': 'osd crush weight-set reweight-compat',
                'format': 'json',
                'item': 'osd.%d' % osd,
                'weight': [weight],
            }), '')
            commands.append(result)

        # new_weight
        reweightn = {}
        for osd, weight in six.iteritems(plan.osd_weights):
            reweightn[str(osd)] = str(int(weight * float(0x10000)))
        if len(reweightn):
            self.log.info('ceph osd reweightn %s', reweightn)
            result = CommandResult('')
            self.send_command(result, 'mon', '', json.dumps({
                'prefix': 'osd reweightn',
                'format': 'json',
                'weights': json.dumps(reweightn),
            }), '')
            commands.append(result)

        # upmap
        incdump = plan.inc.dump()
        for pgid in incdump.get('old_pg_upmap_items', []):
            self.log.info('ceph osd rm-pg-upmap-items %s', pgid)
            result = CommandResult('foo')
            self.send_command(result, 'mon', '', json.dumps({
                'prefix': 'osd rm-pg-upmap-items',
                'format': 'json',
                'pgid': pgid,
            }), 'foo')
            commands.append(result)

        for item in incdump.get('new_pg_upmap_items', []):
            self.log.info('ceph osd pg-upmap-items %s mappings %s', item['pgid'],
                          item['mappings'])
            osdlist = []
            for m in item['mappings']:
                osdlist += [m['from'], m['to']]
            result = CommandResult('foo')
            self.send_command(result, 'mon', '', json.dumps({
                'prefix': 'osd pg-upmap-items',
                'format': 'json',
                'pgid': item['pgid'],
                'id': osdlist,
            }), 'foo')
            commands.append(result)

        # wait for commands
        self.log.debug('commands %s' % commands)
        for result in commands:
            r, outb, outs = result.wait()
            if r != 0:
                self.log.error('execute error: r = %d, detail = %s' % (r, outs))
                return r, outs
        self.log.debug('done')
        return 0, ''
Esempio n. 7
0
    def _get_pool_pg_targets(
        self,
        osdmap: OSDMap,
        pools: Dict[str, Dict[str, Any]],
        crush_map: CRUSHMap,
        root_map: Dict[int, CrushSubtreeResourceStatus],
        pool_stats: Dict[int, Dict[str, int]],
        ret: List[Dict[str, Any]],
        threshold: float,
        func_pass: '******',
        overlapped_roots: Set[int],
    ) -> Tuple[List[Dict[str, Any]], Dict[str, Dict[str, Any]], Dict[str, Dict[
            str, Any]]]:
        """
        Calculates final_pg_target of each pools and determine if it needs
        scaling, this depends on the profile of the autoscaler. For scale-down,
        we start out with a full complement of pgs and only descrease it when other
        pools needs more pgs due to increased usage. For scale-up, we start out with
        the minimal amount of pgs and only scale when there is increase in usage.
        """
        even_pools: Dict[str, Dict[str, Any]] = {}
        bulk_pools: Dict[str, Dict[str, Any]] = {}
        for pool_name, p in pools.items():
            pool_id = p['pool']
            if pool_id not in pool_stats:
                # race with pool deletion; skip
                continue

            # FIXME: we assume there is only one take per pool, but that
            # may not be true.
            crush_rule = crush_map.get_rule_by_id(p['crush_rule'])
            assert crush_rule is not None
            cr_name = crush_rule['rule_name']
            root_id = crush_map.get_rule_root(cr_name)
            assert root_id is not None
            if root_id in overlapped_roots:
                # skip pools
                # with overlapping roots
                self.log.warn(
                    "pool %d contains an overlapping root %d"
                    "... skipping scaling", pool_id, root_id)
                continue
            capacity = root_map[root_id].capacity
            assert capacity is not None
            if capacity == 0:
                self.log.debug('skipping empty subtree %s', cr_name)
                continue

            raw_used_rate = osdmap.pool_raw_used_rate(pool_id)

            pool_logical_used = pool_stats[pool_id]['stored']
            bias = p['options'].get('pg_autoscale_bias', 1.0)
            target_bytes = 0
            # ratio takes precedence if both are set
            if p['options'].get('target_size_ratio', 0.0) == 0.0:
                target_bytes = p['options'].get('target_size_bytes', 0)

            # What proportion of space are we using?
            actual_raw_used = pool_logical_used * raw_used_rate
            actual_capacity_ratio = float(actual_raw_used) / capacity

            pool_raw_used = max(pool_logical_used,
                                target_bytes) * raw_used_rate
            capacity_ratio = float(pool_raw_used) / capacity

            self.log.info("effective_target_ratio {0} {1} {2} {3}".format(
                p['options'].get('target_size_ratio',
                                 0.0), root_map[root_id].total_target_ratio,
                root_map[root_id].total_target_bytes, capacity))

            target_ratio = effective_target_ratio(
                p['options'].get('target_size_ratio',
                                 0.0), root_map[root_id].total_target_ratio,
                root_map[root_id].total_target_bytes, capacity)

            # determine if the pool is a bulk
            bulk = False
            flags = p['flags_names'].split(",")
            if "bulk" in flags:
                bulk = True

            capacity_ratio = max(capacity_ratio, target_ratio)
            final_ratio, pool_pg_target, final_pg_target = self._calc_final_pg_target(
                p, pool_name, root_map, root_id, capacity_ratio, bias,
                even_pools, bulk_pools, func_pass, bulk)

            if final_ratio is None:
                continue

            adjust = False
            if (final_pg_target > p['pg_num_target'] * threshold or
                    final_pg_target < p['pg_num_target'] / threshold) and \
                    final_ratio >= 0.0 and \
                    final_ratio <= 1.0:
                adjust = True

            assert pool_pg_target is not None
            ret.append({
                'pool_id':
                pool_id,
                'pool_name':
                p['pool_name'],
                'crush_root_id':
                root_id,
                'pg_autoscale_mode':
                p['pg_autoscale_mode'],
                'pg_num_target':
                p['pg_num_target'],
                'logical_used':
                pool_logical_used,
                'target_bytes':
                target_bytes,
                'raw_used_rate':
                raw_used_rate,
                'subtree_capacity':
                capacity,
                'actual_raw_used':
                actual_raw_used,
                'raw_used':
                pool_raw_used,
                'actual_capacity_ratio':
                actual_capacity_ratio,
                'capacity_ratio':
                capacity_ratio,
                'target_ratio':
                p['options'].get('target_size_ratio', 0.0),
                'effective_target_ratio':
                target_ratio,
                'pg_num_ideal':
                int(pool_pg_target),
                'pg_num_final':
                final_pg_target,
                'would_adjust':
                adjust,
                'bias':
                p.get('options', {}).get('pg_autoscale_bias', 1.0),
                'bulk':
                bulk,
            })

        return ret, bulk_pools, even_pools
Esempio n. 8
0
    def get_subtree_resource_status(self,
                                    osdmap: OSDMap,
                                    crush: CRUSHMap) -> Tuple[Dict[int, CrushSubtreeResourceStatus],
                                                              Dict[int, int]]:
        """
        For each CRUSH subtree of interest (i.e. the roots under which
        we have pools), calculate the current resource usages and targets,
        such as how many PGs there are, vs. how many PGs we would
        like there to be.
        """
        result: Dict[int, CrushSubtreeResourceStatus] = {}
        pool_root = {}
        roots = []

        # identify subtrees (note that they may overlap!)
        for pool_id, pool in osdmap.get_pools().items():
            crush_rule = crush.get_rule_by_id(pool['crush_rule'])
            assert crush_rule is not None
            cr_name = crush_rule['rule_name']
            root_id = crush.get_rule_root(cr_name)
            assert root_id is not None
            pool_root[pool_id] = root_id
            osds = set(crush.get_osds_under(root_id))

            # do we intersect an existing root?
            s = None
            for prev in result.values():
                if osds & prev.osds:
                    s = prev
                    break
            if not s:
                s = CrushSubtreeResourceStatus()
                roots.append(s)
            result[root_id] = s
            s.root_ids.append(root_id)
            s.osds |= osds
            s.pool_ids.append(pool_id)
            s.pool_names.append(pool['pool_name'])
            s.pg_current += pool['pg_num_target'] * pool['size']
            target_ratio = pool['options'].get('target_size_ratio', 0.0)
            if target_ratio:
                s.total_target_ratio += target_ratio
            else:
                target_bytes = pool['options'].get('target_size_bytes', 0)
                if target_bytes:
                    s.total_target_bytes += target_bytes * osdmap.pool_raw_used_rate(pool_id)

        # finish subtrees
        all_stats = self.get('osd_stats')
        for s in roots:
            assert s.osds is not None
            s.osd_count = len(s.osds)
            s.pg_target = s.osd_count * self.mon_target_pg_per_osd
            s.pg_left = s.pg_target
            s.pool_count = len(s.pool_ids)
            capacity = 0
            for osd_stats in all_stats['osd_stats']:
                if osd_stats['osd'] in s.osds:
                    # Intentionally do not apply the OSD's reweight to
                    # this, because we want to calculate PG counts based
                    # on the physical storage available, not how it is
                    # reweighted right now.
                    capacity += osd_stats['kb'] * 1024

            s.capacity = capacity

            self.log.debug('root_ids %s pools %s with %d osds, pg_target %d',
                           s.root_ids,
                           s.pool_ids,
                           s.osd_count,
                           s.pg_target)

        return result, pool_root