예제 #1
0
    def create(self, attributes):
        name, bucket_type, items = [
            attributes[key] for key in ('name', 'bucket_type', 'items')
        ]
        commands = [add_bucket(name, bucket_type)] +\
            self._add_items(name, bucket_type, items)

        message = "Creating CRUSH node in {cluster_name}".format(
            cluster_name=self._cluster_monitor.name)
        return OsdMapModifyingRequest(message, self._cluster_monitor.fsid,
                                      self._cluster_monitor.name, commands)
    def update_config(self, _, attributes):

        osd_map = NS.state_sync_thread.get_sync_object(OsdMap)

        commands = self._commands_to_set_flags(osd_map, attributes)

        if commands:
            return OsdMapModifyingRequest(
                "Modifying OSD config {cluster_name} ({attrs})".format(
                    cluster_name=NS.state_sync_thread.name,
                    attrs=", ".join("%s=%s" % (k, v)
                                    for k, v in attributes.items())),
                NS.state_sync_thread.fsid, NS.state_sync_thread.name, commands)

        else:
            return None
    def update(self, node_id, attributes):
        # TODO(Rohan) report Not Modified http://tracker.ceph.com/issues/9764
        current_node = self.osd_map.get_tree_node(node_id)
        parent = self.osd_map.parent_bucket_by_node_id.get(node_id, None)
        name, bucket_type, items = [
            attributes[key] for key in ('name', 'bucket_type', 'items')
        ]
        commands = []

        # TODO(Rohan) change to use rename-bucket when #9526 lands in ceph 0.89
        if name != current_node['name'] or bucket_type != current_node[
                'type_name']:
            commands.append(add_bucket(name, bucket_type))
            if parent is not None:
                commands.append(
                    move_bucket(name, parent['name'], parent['type']))

        to_remove = [
            item for item in current_node['items'] if item not in items
        ]
        commands += self._remove_items(name, bucket_type, to_remove)
        commands += self._add_items(name, bucket_type, items)

        if name != current_node['name'] or bucket_type != current_node[
                'type_name']:
            commands.append(remove_bucket(current_node['name'], None))

        Event(
            Message(priority="info",
                    publisher=NS.publisher_id,
                    payload={
                        "message":
                        "Updating CRUSH node {c} parent {p} "
                        "version {v}".format(c=commands,
                                             p=parent,
                                             v=self.osd_map.version)
                    }))
        message = "Updating CRUSH node in {cluster_name}".format(
            cluster_name=NS.state_sync_thread.name)
        return OsdMapModifyingRequest(message, NS.state_sync_thread.fsid,
                                      NS.state_sync_thread.name, commands)
예제 #4
0
    def update(self, osd_id, attributes):
        commands = []

        osd_map = self._cluster_monitor.get_sync_object(OsdMap)

        # in/out/down take a vector of strings called
        # 'ids', while 'reweight' takes a single integer

        if 'in' in attributes and bool(attributes['in']) != bool(
                osd_map.osds_by_id[osd_id]['in']):
            if attributes['in']:
                commands.append(('osd in', {
                    'ids': [attributes['id'].__str__()]
                }))
            else:
                commands.append(('osd out', {
                    'ids': [attributes['id'].__str__()]
                }))

        if 'up' in attributes and bool(attributes['up']) != bool(
                osd_map.osds_by_id[osd_id]['up']):
            if not attributes['up']:
                commands.append(('osd down', {
                    'ids': [attributes['id'].__str__()]
                }))
            else:
                raise RuntimeError(
                    "It is not valid to set a down OSD to be up")

        if 'reweight' in attributes:
            if attributes['reweight'] != float(
                    osd_map.osd_tree_node_by_id[osd_id]['reweight']):
                commands.append(('osd reweight', {
                    'id': osd_id,
                    'weight': attributes['reweight']
                }))

        if not commands:
            # Returning None indicates no-op
            return None

        msg_attrs = attributes.copy()
        del msg_attrs['id']

        if msg_attrs.keys() == ['in']:
            message = "Marking {cluster_name}-osd.{id} {state}".format(
                cluster_name=self._cluster_monitor.name,
                id=osd_id,
                state=("in" if msg_attrs['in'] else "out"))
        elif msg_attrs.keys() == ['up']:
            message = "Marking {cluster_name}-osd.{id} down".format(
                cluster_name=self._cluster_monitor.name, id=osd_id)
        elif msg_attrs.keys() == ['reweight']:
            message = "Re-weighting {cluster_name}-osd.{id} to {pct}%".format(
                cluster_name=self._cluster_monitor.name,
                id=osd_id,
                pct="{0:.1f}".format(msg_attrs['reweight'] * 100.0))
        else:
            message = "Modifying {cluster_name}-osd.{id} ({attrs})".format(
                cluster_name=self._cluster_monitor.name,
                id=osd_id,
                attrs=", ".join("%s=%s" % (k, v)
                                for k, v in msg_attrs.items()))

        return OsdMapModifyingRequest(message, self._cluster_monitor.fsid,
                                      self._cluster_monitor.name, commands)
예제 #5
0
 def update(self, osd_id, attributes):
     commands = [('osd setcrushmap', {'data': attributes})]
     message = "Replacing CRUSH map in"
     " {cluster_name}".format(cluster_name=self._cluster_monitor.name)
     return OsdMapModifyingRequest(message, self._cluster_monitor.fsid,
                                   self._cluster_monitor.name, commands)
예제 #6
0
    def update(self, pool_id, attributes):
        osd_map = self._cluster_monitor.get_sync_object(OsdMap)
        pool = self._resolve_pool(pool_id)
        pool_name = pool['pool_name']

        # Recalculate/clamp min_size if it or size is updated
        if 'size' in attributes or 'min_size' in attributes:
            size = attributes.get('size', pool['size'])
            min_size = attributes.get('min_size', pool['min_size'])
            attributes['min_size'] = self._pool_min_size(size, min_size)

        if 'pg_num' in attributes:
            # Special case when setting pg_num: have to do some extra work
            # to wait for PG creation between setting these two fields.
            final_pg_count = attributes['pg_num']

            if 'pgp_num' in attributes:
                pgp_num = attributes['pgp_num']
                del attributes['pgp_num']
            else:
                pgp_num = attributes['pg_num']
            del attributes['pg_num']

            pre_create_commands = self._pool_attribute_commands(
                pool_name, attributes)

            # This setting is new in Ceph Firefly, where it defaults to 32.
            # For older revisions, we simply pretend that the setting exists
            # with a default setting.
            mon_osd_max_split_count = int(
                self._cluster_monitor.get_sync_object_data(Config).get(
                    'mon_osd_max_split_count',
                    LEGACY_MON_OSD_MAX_SPLIT_COUNT
                )
            )
            initial_pg_count = pool['pg_num']
            n_osds = min(initial_pg_count, len(osd_map.osds_by_id))
            # The rules about creating PGs:
            #  where N_osds = min(old_pg_count, osd_count)
            # the number of new PGs divided by N_osds may not be greater than
            # mon_osd_max_split_count
            block_size = mon_osd_max_split_count * n_osds

            return PgCreatingRequest(
                "Growing pool '{name}' to {size} PGs".format(
                    name=pool_name, size=final_pg_count),
                self._cluster_monitor.fsid, self._cluster_monitor.name,
                pre_create_commands,
                pool_id, pool_name, pgp_num,
                initial_pg_count, final_pg_count, block_size)
        else:
            commands = self._pool_attribute_commands(pool_name, attributes)
            if not commands:
                raise NotImplementedError(attributes)

            # TODO(Rohan) provide some machine-readable indication of which
            # objects are affected by a particular request.
            # Perhaps subclass Request for each type of object, and have
            # that subclass provide both the patches->commands mapping and
            # the human readable and machine readable descriptions of it?
            # Objects may be decorated with 'id' from use in a bulk PATCH,
            # but we don't want anything from this point onwards to see that.
            if 'id' in attributes:
                del attributes['id']
            return OsdMapModifyingRequest(
                "Modifying pool '{name}' ({attrs})".format(
                    name=pool_name, attrs=", ".join(
                        "%s=%s" % (k, v) for k, v in attributes.items())
                ),
                self._cluster_monitor.fsid,
                self._cluster_monitor.name,
                commands
            )