Example #1
0
    def update(self, node_id, attributes):
        # TODO report Not Modified http://tracker.ceph.com/issues/9764
        current_node = self.osd_map.get_tree_node(node_id)
        parent = self.osd_map.parent_bucket_by_node_id.get(node_id, None)
        name, bucket_type, items = [attributes[key] for key in ('name', 'bucket_type', 'items')]
        commands = []

        # TODO change to use rename-bucket when #9526 lands in ceph 0.89
        if name != current_node['name'] or bucket_type != current_node['type_name']:
            commands.append(add_bucket(name, bucket_type))
            if parent is not None:
                commands.append(move_bucket(name, parent['name'], parent['type']))

        to_remove = [item for item in current_node['items'] if item not in items]
        commands += self._remove_items(name, bucket_type, to_remove)
        for c in self._add_items(name, bucket_type, items):
                if c not in commands:
                        commands.append(c)

        if name != current_node['name'] or bucket_type != current_node['type_name']:
            commands.append(remove_bucket(current_node['name'], None))

        log.info("Updating CRUSH node {c} parent {p} version {v}".format(c=commands, p=parent, v=self.osd_map.version))
        message = "Updating CRUSH node in {cluster_name}".format(cluster_name=self._cluster_monitor.name)
        return OsdMapModifyingRequest(message, self._cluster_monitor.fsid, self._cluster_monitor.name, commands)
    def create(self, attributes):
        name, bucket_type, items = [attributes[key] for key in ('name', 'bucket_type', 'items')]
        commands = [add_bucket(name, bucket_type)] +\
            self._add_items(name, bucket_type, items)

        message = "Creating CRUSH node in {cluster_name}".format(cluster_name=self._cluster_monitor.name)
        return OsdMapModifyingRequest(message, self._cluster_monitor.fsid, self._cluster_monitor.name, commands)
Example #3
0
 def delete(self, rule_id):
     crush_rule = self.osd_map.crush_rule_by_id[int(rule_id)]
     commands = [('osd crush rule rm', {'name': crush_rule['rule_name']})]
     message = "Removing CRUSH rule in {cluster_name}".format(
         cluster_name=self._cluster_monitor.name)
     return OsdMapModifyingRequest(message, self._cluster_monitor.fsid,
                                   self._cluster_monitor.name, commands)
 def delete(self, node_id):
     current_node = self.osd_map.get_tree_node(node_id)
     commands = [remove_bucket(current_node['name'], current_node)]
     message = "Removing CRUSH node in {cluster_name}".format(
         cluster_name=self._cluster_monitor.name)
     return OsdMapModifyingRequest(message, self._cluster_monitor.fsid,
                                   self._cluster_monitor.name, commands)
Example #5
0
 def create(self, attributes):
     # get the text map
     crush_map = self.osd_map.data['crush_map_text']
     merged_map = _merge_rule_and_map(crush_map, attributes)
     commands = [('osd setcrushmap', {'data': merged_map})]
     log.error('setcrushmap {0} {1}'.format(merged_map, attributes))
     message = "Creating CRUSH rule in {cluster_name}".format(
         cluster_name=self._cluster_monitor.name)
     return OsdMapModifyingRequest(message, self._cluster_monitor.fsid,
                                   self._cluster_monitor.name, commands)
Example #6
0
 def update(self, rule_id, attributes):
     # merge it with the supplied rule
     crush_map = self.osd_map.data['crush_map_text']
     crush_rule = self.osd_map.crush_rule_by_id[rule_id]
     merged_map = _merge_rule_and_map(crush_map, attributes,
                                      crush_rule['rule_name'])
     commands = [('osd setcrushmap', {'data': merged_map})]
     message = "Updating CRUSH rule in {cluster_name}"
     return OsdMapModifyingRequest(message, self._cluster_monitor.fsid,
                                   self._cluster_monitor.name, commands)
Example #7
0
    def update(self, pool_id, attributes):
        osd_map = self._cluster_monitor.get_sync_object(OsdMap)
        pool = self._resolve_pool(pool_id)
        pool_name = pool['pool_name']

        if 'pg_num' in attributes:
            # Special case when setting pg_num: have to do some extra work
            # to wait for PG creation between setting these two fields.
            final_pg_count = attributes['pg_num']

            if 'pgp_num' in attributes:
                pgp_num = attributes['pgp_num']
                del attributes['pgp_num']
            else:
                pgp_num = attributes['pg_num']
            del attributes['pg_num']

            pre_create_commands = self._pool_attribute_commands(pool_name, attributes)

            # This setting is new in Ceph Firefly, where it defaults to 32.  For older revisions, we simply
            # pretend that the setting exists with a default setting.
            mon_osd_max_split_count = int(self._cluster_monitor.get_sync_object_data(Config).get(
                'mon_osd_max_split_count', LEGACY_MON_OSD_MAX_SPLIT_COUNT))
            initial_pg_count = pool['pg_num']
            n_osds = min(initial_pg_count, len(osd_map.osds_by_id))
            # The rules about creating PGs:
            #  where N_osds = min(old_pg_count, osd_count)
            #    the number of new PGs divided by N_osds may not be greater than mon_osd_max_split_count
            block_size = mon_osd_max_split_count * n_osds

            return PgCreatingRequest(
                "Growing pool '{name}' to {size} PGs".format(name=pool_name, size=final_pg_count),
                self._cluster_monitor.fsid, self._cluster_monitor.name,
                pre_create_commands,
                pool_id, pool_name, pgp_num,
                initial_pg_count, final_pg_count, block_size)
        else:
            commands = self._pool_attribute_commands(pool_name, attributes)
            if not commands:
                raise NotImplementedError(attributes)

            # TODO: provide some machine-readable indication of which objects are affected
            # by a particular request.
            # Perhaps subclass Request for each type of object, and have that subclass provide
            # both the patches->commands mapping and the human readable and machine readable
            # descriptions of it?

            # Objects may be decorated with 'id' from use in a bulk PATCH, but we don't want anything
            # from this point onwards to see that.
            if 'id' in attributes:
                del attributes['id']
            return OsdMapModifyingRequest(
                "Modifying pool '{name}' ({attrs})".format(
                    name=pool_name, attrs=", ".join("%s=%s" % (k, v) for k, v in attributes.items())
                ), self._cluster_monitor.fsid, self._cluster_monitor.name, commands)
Example #8
0
    def update(self, osd_id, attributes):
        commands = []

        osd_map = self._cluster_monitor.get_sync_object(OsdMap)

        # in/out/down take a vector of strings called 'ids', while 'reweight' takes a single integer

        if 'in' in attributes and bool(attributes['in']) != bool(
                osd_map.osds_by_id[osd_id]['in']):
            if attributes['in']:
                commands.append(('osd in', {
                    'ids': [attributes['id'].__str__()]
                }))
            else:
                commands.append(('osd out', {
                    'ids': [attributes['id'].__str__()]
                }))

        if 'up' in attributes and bool(attributes['up']) != bool(
                osd_map.osds_by_id[osd_id]['up']):
            if not attributes['up']:
                commands.append(('osd down', {
                    'ids': [attributes['id'].__str__()]
                }))
            else:
                raise RuntimeError(
                    "It is not valid to set a down OSD to be up")

        if 'reweight' in attributes:
            if attributes['reweight'] != float(
                    osd_map.osd_tree_node_by_id[osd_id]['reweight']):
                commands.append(('osd reweight', {
                    'id': osd_id,
                    'weight': attributes['reweight']
                }))

        if not commands:
            # Returning None indicates no-op
            return None

        print_attrs = attributes.copy()
        del print_attrs['id']

        return OsdMapModifyingRequest(
            "Modifying {cluster_name}-osd.{id} ({attrs})".format(
                cluster_name=self._cluster_monitor.name,
                id=osd_id,
                attrs=", ".join("%s=%s" % (k, v)
                                for k, v in print_attrs.items())),
            self._cluster_monitor.fsid, self._cluster_monitor.name, commands)
Example #9
0
    def delete(self, pool_id):
        # Resolve pool ID to name
        pool_name = self._resolve_pool(pool_id)['pool_name']

        # TODO: perhaps the REST API should have something in the body to
        # make it slightly harder to accidentally delete a pool, to respect
        # the severity of this operation since we're hiding the --yes-i-really-really-want-to
        # stuff here
        # TODO: handle errors in a way that caller can show to a user, e.g.
        # if the name is wrong we should be sending a structured errors dict
        # that they can use to associate the complaint with the 'name' field.
        commands = [
            ('osd pool delete', {'pool': pool_name, 'pool2': pool_name, 'sure': '--yes-i-really-really-mean-it'})]
        return OsdMapModifyingRequest("Deleting pool '{name}'".format(name=pool_name),
                                      self._cluster_monitor.fsid, self._cluster_monitor.name, commands)
Example #10
0
    def update_config(self, _, attributes):

        osd_map = self._cluster_monitor.get_sync_object(OsdMap)

        commands = self._commands_to_set_flags(osd_map, attributes)

        if commands:
            return OsdMapModifyingRequest(
                "Modifying OSD config {cluster_name} ({attrs})".format(
                    cluster_name=self._cluster_monitor.name,
                    attrs=", ".join("%s=%s" % (k, v)
                                    for k, v in attributes.items())),
                self._cluster_monitor.fsid, self._cluster_monitor.name,
                commands)

        else:
            return None
 def update(self, osd_id, attributes):
     commands = [('osd setcrushmap', {'data': attributes})]
     message = "Replacing CRUSH map in {cluster_name}".format(cluster_name=self._cluster_monitor.name)
     return OsdMapModifyingRequest(message, self._cluster_monitor.fsid, self._cluster_monitor.name, commands)