示例#1
0
def rados_commands(fsid, cluster_name, commands):
    """
    Passing in both fsid and cluster_name, because the caller
    should always know both, and it saves this function the trouble
    of looking up one from the other.
    """

    import rados
    from ceph_argparse import json_command

    # Open a RADOS session
    cluster_handle = rados.Rados(name=RADOS_NAME, clustername=cluster_name, conffile='')
    cluster_handle.connect()

    results = []

    # Each command is a 2-tuple of a prefix followed by an argument dictionary
    for i, (prefix, argdict) in enumerate(commands):
        argdict['format'] = 'json'
        if prefix == 'osd setcrushmap':
            ret, stdout, outs = transform_crushmap(argdict['data'], 'set')
            if ret != 0:
                raise RuntimeError(outs)
            ret, outbuf, outs = json_command(cluster_handle, prefix=prefix, argdict={}, timeout=RADOS_TIMEOUT, inbuf=stdout)
        else:
            ret, outbuf, outs = json_command(cluster_handle, prefix=prefix, argdict=argdict, timeout=RADOS_TIMEOUT)
        if ret != 0:
            return {
                'error': True,
                'results': results,
                'error_status': outs,
                'versions': cluster_status(cluster_handle, cluster_name)['versions'],
                'fsid': fsid
            }
        if outbuf:
            results.append(json.loads(outbuf))
        else:
            results.append(None)

    # For all RADOS commands, we include the cluster map versions
    # in the response, so that the caller knows which versions to
    # wait for in order to see the consequences of their actions.
    # TODO: not all commands will require version info on completion, consider making
    # this optional.
    # TODO: we should endeavor to return something clean even if we can't talk to RADOS
    # enough to get version info
    versions = cluster_status(cluster_handle, cluster_name)['versions']

    # Success
    return {
        'error': False,
        'results': results,
        'error_status': '',
        'versions': versions,
        'fsid': fsid
    }
示例#2
0
    def _rados_command(self, prefix, args=None, decode=True):
        """
        Safer wrapper for ceph_argparse.json_command, which raises
        Error exception instead of relying on caller to check return
        codes.

        Error exception can result from:
        * Timeout
        * Actual legitimate errors
        * Malformed JSON output

        return: Decoded object from ceph, or None if empty string returned.
                If decode is False, return a string (the data returned by
                ceph command)
        """
        if args is None:
            args = {}

        argdict = args.copy()
        argdict["format"] = "json"

        ret, outbuf, outs = json_command(self.rados, prefix=prefix, argdict=argdict, timeout=RADOS_TIMEOUT)
        if ret != 0:
            raise rados.Error(outs)
        else:
            if decode:
                if outbuf:
                    try:
                        return json.loads(outbuf)
                    except (ValueError, TypeError):
                        raise RadosError("Invalid JSON output for command {0}".format(argdict))
                else:
                    return None
            else:
                return outbuf
示例#3
0
文件: ceph.py 项目: JiYou/ceph_code
def osdids():
    ret, outbuf, outs = json_command(cluster_handle, prefix='osd ls')
    if ret == -errno.EINVAL:
        # try old mon
        ret, outbuf, outs = send_command(cluster_handle, cmd=['osd', 'ls'])
    if ret:
        raise RuntimeError('Can\'t contact mon for osd list')
    return [i for i in outbuf.split('\n') if i != '']
示例#4
0
def ceph_get_pg_stats(rados_handle):
    """
    Wrapper for Ceph 'pg stat' json command
    """
    ret, j, _ = json_command(rados_handle, prefix="pg stat",
                           argdict={'format':'json'})

    if ret == 0:
        return json.loads(j)
def ceph_get_pg_stats(rados_handle):
    """
    Wrapper for Ceph 'pg stat' json command
    """
    ret, j, _ = json_command(rados_handle,
                             prefix="pg stat",
                             argdict={'format': 'json'})

    if ret == 0:
        return json.loads(j)
示例#6
0
文件: ceph.py 项目: JiYou/ceph_code
 def help_for_target(target, partial=None):
     ret, outbuf, outs = json_command(cluster_handle, target=target,
                                      prefix='get_command_descriptions', 
                                      timeout=10)
     if ret:
         print >> sys.stderr, \
             "couldn't get command descriptions for {0}: {1}".\
             format(target, outs)
     else:
         help_for_sigs(outbuf, partial)
示例#7
0
文件: ceph.py 项目: JiYou/ceph_code
def monids():
    ret, outbuf, outs = json_command(cluster_handle, prefix='mon dump',
                                     argdict={'format':'json'})
    if ret == -errno.EINVAL:
        # try old mon
        ret, outbuf, outs = send_command(cluster_handle,
                                         cmd=['mon', 'dump', '--format=json'])
    if ret:
        raise RuntimeError('Can\'t contact mon for mon list')
    d = json.loads(outbuf)
    return [m['name'] for m in d['mons']]
示例#8
0
 def _do_runsource(self, source):
     ret, buf, s = json_command(self.cluster,
                                prefix=self.cmd_prefix,
                                target=('mon-mgr', ),
                                inbuf=source.encode(),
                                timeout=self.timeout)
     if ret == 0:
         # TODO: better way to encode the outputs
         sys.stdout.write(buf.decode())
         sys.stderr.write(s)
     else:
         # needs more
         self.write("the input is not complete")
示例#9
0
    def get_command_descriptions(cluster, target=("mon", "")):
        ret, outbuf, outs = json_command(cluster, target, prefix="get_command_descriptions", timeout=30)
        if ret:
            err = "Can't get command descriptions: {0}".format(outs)
            app.logger.error(err)
            raise EnvironmentError(ret, err)

        try:
            sigdict = parse_json_funcsigs(outbuf, "rest")
        except Exception as e:
            err = "Can't parse command descriptions: {}".format(e)
            app.logger.error(err)
            raise EnvironmentError(err)
        return sigdict
示例#10
0
def rados_command(cluster_handle, prefix, args=None, decode=True):
    """Safer wrapper for ceph_argparse.json_command, which raises

    Error exception instead of relying on caller to check return

    codes.

    Error exception can result from:

    * Timeout

    * Actual legitimate errors

    * Malformed JSON output

    return: Decoded object from ceph, or None if empty string returned.

            If decode is False, return a string (the data returned by

            ceph command)

    """
    if args is None:
        args = {}

    argdict = args.copy()
    argdict['format'] = 'json'

    from ceph_argparse import json_command
    import rados

    ret, outbuf, outs = json_command(cluster_handle,
                                     prefix=prefix,
                                     argdict=argdict,
                                     timeout=RADOS_TIMEOUT)
    if ret != 0:
        raise rados.Error(outs)
    else:
        if decode:
            if outbuf:
                try:
                    return json_loads_byteified(outbuf)
                except (ValueError, TypeError):
                    raise RadosError(
                        "Invalid JSON output for command {0}".format(argdict)
                    )
            else:
                return None
        else:
            return outbuf
示例#11
0
文件: ceph.py 项目: JiYou/ceph_code
def mdsids():
    ret, outbuf, outs = json_command(cluster_handle, prefix='mds dump',
                                     argdict={'format':'json'})
    if ret == -errno.EINVAL:
        # try old mon
        ret, outbuf, outs = send_command(cluster_handle,
                                         cmd=['mds', 'dump', '--format=json'])
    if ret:
        raise RuntimeError('Can\'t contact mon for mds list')
    d = json.loads(outbuf)
    l = []
    infodict = d['info']
    for mdsdict in infodict.values():
        l.append(mdsdict['name'])
    return l
示例#12
0
    def get_command_descriptions(cluster, target=('mon', '')):
        ret, outbuf, outs = json_command(cluster, target,
                                         prefix='get_command_descriptions',
                                         timeout=30)
        if ret:
            err = "Can't get command descriptions: {0}".format(outs)
            app.logger.error(err)
            raise EnvironmentError(ret, err)

        try:
            sigdict = parse_json_funcsigs(outbuf, 'rest')
        except Exception as e:
            err = "Can't parse command descriptions: {}".format(e)
            app.logger.error(err)
            raise EnvironmentError(err)
        return sigdict
示例#13
0
def _set_data(cluster_name="ceph"):
    from ceph_argparse import json_command

    # Open a RADOS session
    with ClusterHandle(cluster_name) as cluster_handle:
        ret, outbuf, outs = json_command(
            cluster_handle, prefix="status", argdict={"format": "json"}, timeout=RADOS_TIMEOUT
        )
        status = json.loads(outbuf)
        fsid = status["fsid"]

        data = rbd_listing(cluster_handle, cluster_name)
        version = md5(msgpack.packb(data)).hexdigest()
        __salt__["data.update"]("rbd_list", (version, data))

    return fsid
示例#14
0
def find_up_osd(app):
    """
    Find an up OSD.  Return the last one that's up.
    Returns id as an int.
    """
    ret, outbuf, outs = json_command(app.ceph_cluster, prefix="osd dump", argdict=dict(format="json"))
    if ret:
        raise EnvironmentError(ret, "Can't get osd dump output")
    try:
        osddump = json.loads(outbuf)
    except:
        raise EnvironmentError(errno.EINVAL, "Invalid JSON back from osd dump")
    osds = [osd["osd"] for osd in osddump["osds"] if osd["up"]]
    if not osds:
        return None
    return int(osds[-1])
示例#15
0
def find_up_osd(app):
    '''
    Find an up OSD.  Return the last one that's up.
    Returns id as an int.
    '''
    ret, outbuf, outs = json_command(app.ceph_cluster, prefix="osd dump",
                                     argdict=dict(format='json'))
    if ret:
        raise EnvironmentError(ret, 'Can\'t get osd dump output')
    try:
        osddump = json.loads(outbuf)
    except:
        raise EnvironmentError(errno.EINVAL, 'Invalid JSON back from osd dump')
    osds = [osd['osd'] for osd in osddump['osds'] if osd['up']]
    if not osds:
        return None
    return int(osds[-1])
示例#16
0
def _set_data(cluster_name='ceph'):
    from ceph_argparse import json_command

    # Open a RADOS session
    with ClusterHandle(cluster_name) as cluster_handle:
        ret, outbuf, outs = json_command(cluster_handle,
                                         prefix='status',
                                         argdict={'format': 'json'},
                                         timeout=RADOS_TIMEOUT)
        status = json.loads(outbuf)
        fsid = status['fsid']

        data = rbd_listing(cluster_handle, cluster_name)
        version = md5(msgpack.packb(data)).hexdigest()
        __salt__['data.update']('rbd_list', (version, data))

    return fsid
示例#17
0
def _cmd(cluster, cmd, **kwargs):
    target = ceph_argparse.find_cmd_target(cmd.split())

    argdict = {
        'prefix': cmd,
        'target': target,
        'format': 'json',
    }
    argdict.update(kwargs)
    log.debug('Calling ceph: %r', argdict)
    ret, outbuf, outs = ceph_argparse.json_command(cluster,
                                                   target=target,
                                                   prefix=None,
                                                   argdict=argdict)
    if ret:
        raise RuntimeError(outs)
    return json.loads(outbuf.decode('utf-8'))
示例#18
0
def find_up_osd(app):
    '''
    Find an up OSD.  Return the last one that's up.
    Returns id as an int.
    '''
    ret, outbuf, outs = json_command(app.ceph_cluster, prefix="osd dump",
                                     argdict=dict(format='json'))
    if ret:
        raise EnvironmentError(ret, 'Can\'t get osd dump output')
    try:
        osddump = json.loads(outbuf)
    except:
        raise EnvironmentError(errno.EINVAL, 'Invalid JSON back from osd dump')
    osds = [osd['osd'] for osd in osddump['osds'] if osd['up']]
    if not osds:
        return None
    return int(osds[-1])
 def _get_utilization_data(self):
     from ceph_argparse import json_command
     import rados
     cluster_handle = rados.Rados(name=ceph.RADOS_NAME,
                                  clustername=self.name,
                                  conffile='')
     cluster_handle.connect()
     prefix = 'df'
     ret, outbuf, outs = json_command(cluster_handle,
                                      prefix=prefix,
                                      argdict={},
                                      timeout=ceph.RADOS_TIMEOUT)
     if ret != 0:
         raise rados.Error(outs)
     else:
         outbuf = outbuf.replace('RAW USED', 'RAW_USED')
         outbuf = outbuf.replace('%RAW USED', '%RAW_USED')
         outbuf = outbuf.replace('MAX AVAIL', 'MAX_AVAIL')
         lines = outbuf.split('\n')
         index = 0
         cluster_stat = {}
         pool_stat = []
         pool_stat_available = False
         while index < len(lines):
             line = lines[index]
             if line == "" or line == '\n':
                 index += 1
                 continue
             if "GLOBAL" in line:
                 index += 1
                 if len(lines) < 3:
                     raise rados.Error("Failed to parse pool stats data")
                 cluster_fields = lines[index].split()
                 cluster_size_idx = self._idx_in_list(
                     cluster_fields, 'SIZE')
                 cluster_avail_idx = self._idx_in_list(
                     cluster_fields, 'AVAIL')
                 cluster_used_idx = self._idx_in_list(
                     cluster_fields, 'RAW_USED')
                 cluster_pcnt_used_idx = self._idx_in_list(
                     cluster_fields, '%RAW_USED')
                 if cluster_size_idx == -1 or cluster_avail_idx == -1 or \
                     cluster_used_idx == -1 or cluster_pcnt_used_idx == -1:
                     raise rados.Error("Missing fields in cluster stat")
                 index += 1
                 if index >= len(lines):
                     raise rados.Error("No cluster stats to parse")
                 line = lines[index]
                 cluster_fields = line.split()
                 if len(cluster_fields) < 4:
                     raise rados.Error("Missing fields in cluster stat")
                 cluster_stat['total'] = self._to_bytes(
                     cluster_fields[cluster_size_idx])
                 cluster_stat['used'] = self._to_bytes(
                     cluster_fields[cluster_used_idx])
                 cluster_stat['available'] = self._to_bytes(
                     cluster_fields[cluster_avail_idx])
                 cluster_stat['pcnt_used'] = cluster_fields[
                     cluster_pcnt_used_idx]
             if "POOLS" in line:
                 pool_stat_available = True
                 index += 1
                 if index >= len(lines):
                     raise rados.Error("No pool stats to parse")
                 pool_fields = lines[index].split()
                 pool_name_idx = self._idx_in_list(pool_fields, 'NAME')
                 pool_id_idx = self._idx_in_list(pool_fields, 'ID')
                 pool_used_idx = self._idx_in_list(pool_fields, 'USED')
                 pool_pcnt_used_idx = self._idx_in_list(
                     pool_fields, '%USED')
                 pool_max_avail_idx = self._idx_in_list(
                     pool_fields, 'MAX_AVAIL')
                 if pool_name_idx == -1 or pool_id_idx == -1 or \
                     pool_used_idx == -1 or pool_pcnt_used_idx == -1 or \
                     pool_max_avail_idx == -1:
                     raise rados.Error("Missing fields in pool stat")
                 index += 1
             if pool_stat_available:
                 line = lines[index]
                 pool_fields = line.split()
                 if len(pool_fields) < 5:
                     raise rados.Error("Missing fields in pool stat")
                 dict = {}
                 dict['name'] = pool_fields[pool_name_idx]
                 dict['available'] = self._to_bytes(
                     pool_fields[pool_max_avail_idx])
                 dict['used'] = self._to_bytes(pool_fields[pool_used_idx])
                 dict['pcnt_used'] = pool_fields[pool_pcnt_used_idx]
                 pool_stat.append(dict)
             index += 1
         return {'cluster': cluster_stat, 'pools': pool_stat}
示例#20
0
def get_cluster_object(cluster_name, sync_type, since):
    # TODO: for the synced objects that support it, support
    # fetching older-than-present versions to allow the master
    # to backfill its history.

    from ceph_argparse import json_command

    # Check you're asking me for something I know how to give you
    assert sync_type in SYNC_TYPES

    # Open a RADOS session
    with ClusterHandle(cluster_name) as cluster_handle:
        ret, outbuf, outs = json_command(cluster_handle,
                                         prefix='status',
                                         argdict={'format': 'json'},
                                         timeout=RADOS_TIMEOUT)
        status = json.loads(outbuf)
        fsid = status['fsid']

        if sync_type == 'config':
            # Special case for config, get this via admin socket instead of librados
            raw = _get_config(cluster_name)
            version = md5(raw)
            data = json.loads(raw)
        else:
            command, kwargs, version_fn = {
                'quorum_status': ('quorum_status', {}, lambda d, r: d['election_epoch']),
                'mon_status': ('mon_status', {}, lambda d, r: d['election_epoch']),
                'mon_map': ('mon dump', {}, lambda d, r: d['epoch']),
                'osd_map': ('osd dump', {}, lambda d, r: d['epoch']),
                'mds_map': ('mds dump', {}, lambda d, r: d['epoch']),
                'pg_summary': ('pg dump', {'dumpcontents': ['pgs_brief']}, lambda d, r: md5(msgpack.packb(d))),
                'health': ('health', {'detail': ''}, lambda d, r: md5(r))
            }[sync_type]
            kwargs['format'] = 'json'
            ret, raw, outs = json_command(cluster_handle, prefix=command, argdict=kwargs, timeout=RADOS_TIMEOUT)
            assert ret == 0

            if sync_type == 'pg_summary':
                data = pg_summary(json.loads(raw))
                version = version_fn(data, raw)
            else:
                data = json.loads(raw)
                version = version_fn(data, raw)

            # Internally, the OSDMap includes the CRUSH map, and the 'osd tree' output
            # is generated from the OSD map.  We synthesize a 'full' OSD map dump to
            # send back to the calamari server.
            if sync_type == 'osd_map':
                ret, raw, outs = json_command(cluster_handle, prefix="osd tree", argdict={
                    'format': 'json',
                    'epoch': version
                }, timeout=RADOS_TIMEOUT)
                assert ret == 0
                data['tree'] = json.loads(raw)
                # FIXME: crush dump does not support an epoch argument, so this is potentially
                # from a higher-versioned OSD map than the one we've just read
                ret, raw, outs = json_command(cluster_handle, prefix="osd crush dump", argdict=kwargs,
                                              timeout=RADOS_TIMEOUT)
                assert ret == 0
                data['crush'] = json.loads(raw)

                ret, raw, outs = json_command(cluster_handle, prefix="osd getcrushmap", argdict={'epoch': version},
                                              timeout=RADOS_TIMEOUT)
                assert ret == 0

                ret, stdout, outs = transform_crushmap(raw, 'get')
                assert ret == 0
                data['crush_map_text'] = stdout
                data['osd_metadata'] = []

                for osd_entry in data['osds']:
                    osd_id = osd_entry['osd']
                    command = "osd metadata"
                    argdict = {'id': osd_id}
                    argdict.update(kwargs)
                    ret, raw, outs = json_command(cluster_handle, prefix=command, argdict=argdict,
                                                  timeout=RADOS_TIMEOUT)
                    # TODO I'm not sure this is what I want, but this can fail when a cluster is not healthy
                    if ret == 0:
                        updated_osd_metadata = json.loads(raw)
                        updated_osd_metadata['osd'] = osd_id
                        data['osd_metadata'].append(updated_osd_metadata)

    return {
        'type': sync_type,
        'fsid': fsid,
        'version': version,
        'data': data
    }
示例#21
0
文件: ceph.py 项目: JiYou/ceph_code
def new_style_command(parsed_args, cmdargs, target, sigdict, inbuf, verbose):
    """
    Do new-style command dance.
    target: daemon to receive command: mon (any) or osd.N
    sigdict - the parsed output from the new monitor describing commands
    inbuf - any -i input file data
    verbose - bool
    """
    if verbose:
        for cmdtag in sorted(sigdict.keys()):
            cmd = sigdict[cmdtag]
            sig = cmd['sig']
            print '{0}: {1}'.format(cmdtag, concise_sig(sig))

    got_command = False

    if not got_command:
        if cmdargs:
            # Validate input args against list of sigs
            valid_dict = validate_command(sigdict, cmdargs, verbose)
            if valid_dict:
                got_command = True
                if parsed_args.output_format:
                    valid_dict['format'] = parsed_args.output_format
            else:
                return -errno.EINVAL, '', 'invalid command'
        else:
            if sys.stdin.isatty():
                # do the command-interpreter looping
                # for raw_input to do readline cmd editing
                import readline

            while True:
                interactive_input = read_input()
                if interactive_input is None:
                    return 0, '', ''
                cmdargs = parse_cmdargs(shlex.split(interactive_input))[2]
                try:
                    target = find_cmd_target(cmdargs)
                except Exception as e:
                    print >> sys.stderr, \
                            'error handling command target: {0}'.format(e)
                    return 1, '', ''
                valid_dict = validate_command(sigdict, cmdargs, verbose)
                if valid_dict:
                    if parsed_args.output_format:
                        valid_dict['format'] = parsed_args.output_format
                    if verbose:
                        print >> sys.stderr, "Submitting command ", valid_dict
                    ret, outbuf, outs = json_command(cluster_handle,
                                                     target=target,
                                                     argdict=valid_dict)
                    if ret:
                        ret = abs(ret)
                        print >> sys.stderr, \
                            'Error: {0} {1}'.format(ret, errno.errorcode[ret])
                    if outbuf:
                        print outbuf
                    if outs:
                        print >> sys.stderr, 'Status:\n', outs
                else:
                    print >> sys.stderr, "Invalid command"

    if verbose:
        print >> sys.stderr, "Submitting command ", valid_dict
    return json_command(cluster_handle, target=target, argdict=valid_dict,
                        inbuf=inbuf)
示例#22
0
def handler(catchall_path=None, fmt=None, target=None):
    '''
    Main endpoint handler; generic for every endpoint, including catchall.
    Handles the catchall, anything with <.fmt>, anything with embedded
    <target>.  Partial match or ?help cause the HTML-table
    "show_human_help" output.
    '''

    ep = catchall_path or flask.request.endpoint
    ep = ep.replace('.<fmt>', '')

    if ep[0] != '/':
        ep = '/' + ep

    # demand that endpoint begin with app.ceph_baseurl
    if not ep.startswith(app.ceph_baseurl):
        return make_response(fmt, '', 'Page not found', 404)

    rel_ep = ep[len(app.ceph_baseurl) + 1:]

    # Extensions override Accept: headers override defaults
    if not fmt:
        if 'application/json' in flask.request.accept_mimetypes.values():
            fmt = 'json'
        elif 'application/xml' in flask.request.accept_mimetypes.values():
            fmt = 'xml'

    prefix = ''
    pgid = None
    cmdtarget = 'mon', ''

    if target:
        # got tell/<target>; validate osdid or pgid
        name = CephOsdName()
        pgidobj = CephPgid()
        try:
            name.valid(target)
        except ArgumentError:
            # try pgid
            try:
                pgidobj.valid(target)
            except ArgumentError:
                return flask.make_response("invalid osdid or pgid", 400)
            else:
                # it's a pgid
                pgid = pgidobj.val
                cmdtarget = 'pg', pgid
        else:
            # it's an osd
            cmdtarget = name.nametype, name.nameid

        # prefix does not include tell/<target>/
        prefix = ' '.join(rel_ep.split('/')[2:]).strip()
    else:
        # non-target command: prefix is entire path
        prefix = ' '.join(rel_ep.split('/')).strip()

    # show "match as much as you gave me" help for unknown endpoints
    if ep not in app.ceph_urls:
        helptext = show_human_help(prefix)
        if helptext:
            resp = flask.make_response(helptext, 400)
            resp.headers['Content-Type'] = 'text/html'
            return resp
        else:
            return make_response(fmt, '', 'Invalid endpoint ' + ep, 400)

    found = None
    exc = ''
    for urldict in app.ceph_urls[ep]:
        if flask.request.method not in urldict['methods']:
            continue
        paramsig = urldict['paramsig']

        # allow '?help' for any specifically-known endpoint
        if 'help' in flask.request.args:
            response = flask.make_response('{0}: {1}'.
                                           format(prefix +
                                                  concise_sig(paramsig),
                                                  urldict['help']))
            response.headers['Content-Type'] = 'text/plain'
            return response

        # if there are parameters for this endpoint, process them
        if paramsig:
            args = {}
            for k, l in flask.request.args.iterlists():
                if len(l) == 1:
                    args[k] = l[0]
                else:
                    args[k] = l

            # is this a valid set of params?
            try:
                argdict = validate(args, paramsig)
                found = urldict
                break
            except Exception as e:
                exc += str(e)
                continue
        else:
            if flask.request.args:
                continue
            found = urldict
            argdict = {}
            break

    if not found:
        return make_response(fmt, '', exc + '\n', 400)

    argdict['format'] = fmt or 'plain'
    argdict['module'] = found['module']
    argdict['perm'] = found['perm']
    if pgid:
        argdict['pgid'] = pgid

    if not cmdtarget:
        cmdtarget = ('mon', '')

    app.logger.debug('sending command prefix %s argdict %s', prefix, argdict)
    ret, outbuf, outs = json_command(app.ceph_cluster, prefix=prefix,
                                     target=cmdtarget,
                                     inbuf=flask.request.data, argdict=argdict)
    if ret:
        return make_response(fmt, '', 'Error: {0} ({1})'.format(outs, ret), 400)

    response = make_response(fmt, outbuf, outs or 'OK', 200)
    if fmt:
        contenttype = 'application/' + fmt.replace('-pretty', '')
    else:
        contenttype = 'text/plain'
    response.headers['Content-Type'] = contenttype
    return response
示例#23
0
文件: ceph.py 项目: JiYou/ceph_code
def main():
    # 这里是从环境变量中拿CEPH_ARGS
    # 因为这里存在一种用法:比如把日志打印到stderr
    # export CEPH_ARGS="--log-to-stderr"
    ceph_args = os.environ.get('CEPH_ARGS')

    # 如果这里有针对ceph的参数,那么需要进行处理。
    # 一般而言,这里是没有这些参数的。
    if ceph_args:
        if "injectargs" in sys.argv:
            i = sys.argv.index("injectargs")
            sys.argv = sys.argv[:i] + ceph_args.split() + sys.argv[i:]
        else:
            sys.argv.extend(ceph_args.split())

    parser, parsed_args, childargs = parse_cmdargs()

    # 如果只是查看版本,那么直接输出版本,然后退出。
    if parsed_args.version:
        print 'ceph version {0} ({1})'.format(CEPH_GIT_NICE_VER, CEPH_GIT_VER)
        return 0

    # verbose是进入一种交互式的命令行方式,类似于直接运行python。
    global verbose
    verbose = parsed_args.verbose

    if verbose:
        print >> sys.stderr, "parsed_args: {0}, childargs: {1}".format(parsed_args, childargs)

    # 这里只是一个客户端工具,是不能加上--admin-socket参数来运行。
    # 守护进程,比如ceph-mon,ceph-osd才可以用这种方式来运行。
    if parsed_args.admin_socket_nope:
        print >> sys.stderr, '--admin-socket is used by daemons; '\
        'you probably mean --admin-daemon/daemon'
        return 1

    # pass on --id, --name, --conf
    name = 'client.admin'
    # 这里获取需要认证的客户端用户名。一般是采用client.admin。
    # 如果用了id,那么就直接用id。
    if parsed_args.client_id:
        name = 'client.' + parsed_args.client_id

    # 如果给了名字,那么就直接使用名字。
    if parsed_args.client_name:
        name = parsed_args.client_name

    # default '' means default conf search
    # 这里设置使用的配置文件,一般是/etc/ceph/ceph.conf。

    conffile = ''
    if parsed_args.cephconf:
        conffile = parsed_args.cephconf
    # For now, --admin-daemon is handled as usual.  Try it
    # first in case we can't connect() to the cluster

    # 这里设置输出格式。
    format = parsed_args.output_format

    # 这里设置socket。在运行ceph -s的时候这段代码没什么用,不会用到。
    sockpath = None
    if parsed_args.admin_socket:
        sockpath = parsed_args.admin_socket

    elif len(childargs) > 0 and childargs[0] == "daemon":
        # Treat "daemon <path>" or "daemon <name>" like --admin_daemon <path>
        if len(childargs) > 2:
            if childargs[1].find('/') >= 0:
                sockpath = childargs[1]
            else:
                # try resolve daemon name
                try:
                    sockpath = ceph_conf(parsed_args, 'admin_socket',
                                         childargs[1])
                except Exception as e:
                    print >> sys.stderr, \
                        'Can\'t get admin socket path: ' + str(e)
                    return errno.EINVAL
            # for both:
            childargs = childargs[2:]
        else:
            print >> sys.stderr, 'daemon requires at least 3 arguments'
            return errno.EINVAL

    # ceph -s的时候,会直到这里来执行。

    if sockpath:
        try:
            print admin_socket(sockpath, childargs, format)
        except Exception as e:
            print >> sys.stderr, 'admin_socket: {0}'.format(e)
            return errno.EINVAL
        return 0

    timeout = None
    if parsed_args.cluster_timeout:
        timeout = parsed_args.cluster_timeout

    # basic help
    if parsed_args.help:
        do_basic_help(parser, childargs)

    # handle any 'generic' ceph arguments that we didn't parse here
    global cluster_handle

    # rados.Rados() will call rados_create2, and then read the conf file,
    # and then set the keys from the dict.  So we must do these
    # "pre-file defaults" first (see common_preinit in librados)
    conf_defaults = {
        'log_to_stderr':'true',
        'err_to_stderr':'true',
        'log_flush_on_exit':'true',
    }

    if 'injectargs' in childargs:
        position = childargs.index('injectargs')
        injectargs = childargs[position:]
        childargs = childargs[:position]
        if verbose:
            print >> sys.stderr, 'Separate childargs {0} from injectargs {1}'.\
                format(childargs, injectargs)
    else:
        injectargs = None

    clustername = 'ceph'
    if parsed_args.cluster:
        clustername = parsed_args.cluster

    try:
        cluster_handle = rados.Rados(name=name, clustername=clustername,
                                     conf_defaults=conf_defaults,
                                     conffile=conffile)
        retargs = cluster_handle.conf_parse_argv(childargs)
    except rados.Error as e:
        print >> sys.stderr, 'Error initializing cluster client: {0}'.\
            format(repr(e))
        return 1

    childargs = retargs
    if not childargs:
        childargs = []

    # -- means "stop parsing args", but we don't want to see it either
    if '--' in childargs:
        childargs.remove('--')
    if injectargs and '--' in injectargs:
        injectargs.remove('--')

    # special deprecation warning for 'ceph <type> tell'
    # someday 'mds' will be here too
    if len(childargs) >= 2 and \
        childargs[0] in ['mon', 'osd'] and \
        childargs[1] == 'tell':
        print >> sys.stderr, '"{0} tell" is deprecated; try "tell {0}.<id>" instead (id can be "*") '.format(childargs[0])
        return 1

    if parsed_args.help:
        # short default timeout for -h
        if not timeout:
            timeout = 5

        hdr('Monitor commands:')
        print '[Contacting monitor, timeout after %d seconds]' % timeout

    if childargs and childargs[0] == 'ping':
        if len(childargs) < 2:
            print >> sys.stderr, '"ping" requires a monitor name as argument: "ping mon.<id>"'
            return 1

    try:
        if childargs and childargs[0] == 'ping':
            return ping_monitor(cluster_handle, childargs[1])
        cluster_handle.connect(timeout=timeout)
    except KeyboardInterrupt:
        print >> sys.stderr, 'Cluster connection aborted'
        return 1
    except Exception as e:
        print >> sys.stderr, 'Error connecting to cluster: {0}'.\
            format(e.__class__.__name__)
        return 1

    if parsed_args.help:
        return do_extended_help(parser, childargs)

    # implement -w/--watch_*
    # This is ugly, but Namespace() isn't quite rich enough.
    level = ''
    for k, v in parsed_args._get_kwargs():
        if k.startswith('watch') and v:
            if k == 'watch':
                level = 'info'
            else:
                level = k.replace('watch_', '')
    if level:

        # an awfully simple callback
        def watch_cb(arg, line, who, stamp_sec, stamp_nsec, seq, level, msg):
            print line
            sys.stdout.flush()

        # first do a ceph status
        ret, outbuf, outs = json_command(cluster_handle, prefix='status')
        if ret == -errno.EINVAL:
            # try old mon
            ret, outbuf, outs = send_command(cluster_handle, cmd=['status'])
            # old mon returns status to outs...ick
            if ret == 0:
                outbuf += outs
        if ret:
            print >> sys.stderr, "status query failed: ", outs
            return ret
        print outbuf

        # this instance keeps the watch connection alive, but is
        # otherwise unused
        logwatch = rados.MonitorLog(cluster_handle, level, watch_cb, 0)

        # loop forever letting watch_cb print lines
        try:
            signal.pause()
        except KeyboardInterrupt:
            # or until ^C, at least
            return 0

    # read input file, if any
    inbuf = ''
    if parsed_args.input_file:
        try:
            with open(parsed_args.input_file, 'r') as f:
                inbuf = f.read()
        except Exception as e:
            print >> sys.stderr, 'Can\'t open input file {0}: {1}'.format(parsed_args.input_file, e)
            return 1

    # prepare output file, if any
    if parsed_args.output_file:
        try:
            outf = open(parsed_args.output_file, 'w')
        except Exception as e:
            print >> sys.stderr, \
                'Can\'t open output file {0}: {1}'.\
                format(parsed_args.output_file, e)
            return 1

    # -s behaves like a command (ceph status).
    if parsed_args.status:
        childargs.insert(0, 'status')

    try:
        target = find_cmd_target(childargs)
    except Exception as e:
        print >> sys.stderr, \
                'error handling command target: {0}'.format(e)
        return 1

    # Repulsive hack to handle tell: lop off 'tell' and target
    # and validate the rest of the command.  'target' is already
    # determined in our callers, so it's ok to remove it here.
    is_tell = False
    if len(childargs) and childargs[0] == 'tell':
        childargs = childargs[2:]
        is_tell = True

    if is_tell:
        if injectargs:
            childargs = injectargs
        if not len(childargs):
            print >> sys.stderr, \
                'Cannot use \'tell\' with interactive mode'
            return errno.EINVAL

    # fetch JSON sigs from command
    # each line contains one command signature (a placeholder name
    # of the form 'cmdNNN' followed by an array of argument descriptors)
    # as part of the validated argument JSON object

    targets = [target]

    if target[1] == '*':
        if target[0] == 'osd':
            targets = [(target[0], o) for o in osdids()]
        elif target[0] == 'mon':
            targets = [(target[0], m) for m in monids()]

    final_ret = 0
    for target in targets:
        # prettify?  prefix output with target, if there was a wildcard used
        prefix = ''
        suffix = ''
        if not parsed_args.output_file and len(targets) > 1:
            prefix = '{0}.{1}: '.format(*target)
            suffix = '\n'

        ret, outbuf, outs = json_command(cluster_handle, target=target,
                                         prefix='get_command_descriptions')
        compat = False
        if ret == -errno.EINVAL:
            # send command to old monitor or OSD
            if verbose:
                print prefix + '{0} to old {1}'.format(' '.join(childargs), target[0])
            compat = True
            if parsed_args.output_format:
                childargs.extend(['--format', parsed_args.output_format])
            ret, outbuf, outs = send_command(cluster_handle, target, childargs,
                                             inbuf)

            if ret == -errno.EINVAL:
                # did we race with a mon upgrade?  try again!
                ret, outbuf, outs = json_command(cluster_handle, target=target,
                                                 prefix='get_command_descriptions')
                if ret == 0:
                    compat = False  # yep, carry on
        if not compat:
            if ret:
                if ret < 0:
                    outs = 'problem getting command descriptions from {0}.{1}'.format(*target)
            else:
                sigdict = parse_json_funcsigs(outbuf, 'cli')

                if parsed_args.completion:
                    return complete(sigdict, childargs, target)

                ret, outbuf, outs = new_style_command(parsed_args, childargs, target,
                                                      sigdict, inbuf, verbose)

                # debug tool: send any successful command *again* to
                # verify that it is idempotent.
                if not ret and 'CEPH_CLI_TEST_DUP_COMMAND' in os.environ:
                    ret, outbuf, outs = new_style_command(parsed_args, childargs, target,
                                                          sigdict, inbuf, verbose)
                    if ret < 0:
                        ret = -ret
                        print >> sys.stderr, prefix + 'Second attempt of previously successful command failed with {0}: {1}'.format(errno.errorcode[ret], outs)

        if ret < 0:
            ret = -ret
            print >> sys.stderr, prefix + 'Error {0}: {1}'.format(errno.errorcode[ret], outs)
            if len(targets) > 1:
                final_ret = ret
            else:
                return ret

        # this assumes outs never has useful command output, only status
        if compat:
            if ret == 0:
                # old cli/mon would send status string to stdout on non-error
                print outs
        else:
            if outs:
                print >> sys.stderr, prefix + outs

        if (parsed_args.output_file):
            outf.write(outbuf)
        else:
            # hack: old code printed status line before many json outputs
            # (osd dump, etc.) that consumers know to ignore.  Add blank line
            # to satisfy consumers that skip the first line, but not annoy
            # consumers that don't.
            if parsed_args.output_format and \
               parsed_args.output_format.startswith('json') and \
               not compat:
                sys.stdout.write('\n')

            # if we are prettifying things, normalize newlines.  sigh.
            if suffix != '':
                outbuf = outbuf.rstrip()
            if outbuf != '':
                sys.stdout.write(prefix + outbuf + suffix)

        sys.stdout.flush()

    if (parsed_args.output_file):
        outf.close()

    if final_ret:
        return final_ret

    return 0
示例#24
0
文件: ceph.py 项目: GregMeno/calamari
def get_cluster_object(cluster_name, sync_type, since):
    # TODO: for the synced objects that support it, support
    # fetching older-than-present versions to allow the master
    # to backfill its history.

    # Check you're asking me for something I know how to give you
    assert sync_type in SYNC_TYPES

    # Open a RADOS session
    cluster_handle = rados.Rados(name=RADOS_NAME, clustername=cluster_name, conffile='')
    cluster_handle.connect()

    ret, outbuf, outs = json_command(cluster_handle,
                                     prefix='status',
                                     argdict={'format': 'json'},
                                     timeout=RADOS_TIMEOUT)
    status = json.loads(outbuf)
    fsid = status['fsid']

    if sync_type == 'config':
        # Special case for config, get this via admin socket instead of librados
        raw = _get_config(cluster_name)
        version = md5(raw)
        data = json.loads(raw)
    else:
        command, kwargs, version_fn = {
            'mon_status': ('mon_status', {}, lambda d, r: d['election_epoch']),
            'mon_map': ('mon dump', {}, lambda d, r: d['epoch']),
            'osd_map': ('osd dump', {}, lambda d, r: d['epoch']),
            'mds_map': ('mds dump', {}, lambda d, r: d['epoch']),
            'pg_summary': ('pg dump', {'dumpcontents': ['pgs_brief']}, lambda d, r: md5(msgpack.packb(d))),
            'health': ('health', {'detail': ''}, lambda d, r: md5(r))
        }[sync_type]
        kwargs['format'] = 'json'
        ret, raw, outs = json_command(cluster_handle, prefix=command, argdict=kwargs, timeout=RADOS_TIMEOUT)
        assert ret == 0

        if sync_type == 'pg_summary':
            data = pg_summary(json.loads(raw))
            version = version_fn(data, raw)
        else:
            data = json.loads(raw)
            version = version_fn(data, raw)

        # Internally, the OSDMap includes the CRUSH map, and the 'osd tree' output
        # is generated from the OSD map.  We synthesize a 'full' OSD map dump to
        # send back to the calamari server.
        if sync_type == 'osd_map':
            ret, raw, outs = json_command(cluster_handle, prefix="osd tree", argdict={
                'format': 'json',
                'epoch': version
            }, timeout=RADOS_TIMEOUT)
            assert ret == 0
            data['tree'] = json.loads(raw)
            # FIXME: crush dump does not support an epoch argument, so this is potentially
            # from a higher-versioned OSD map than the one we've just read
            ret, raw, outs = json_command(cluster_handle, prefix="osd crush dump", argdict=kwargs,
                                          timeout=RADOS_TIMEOUT)
            assert ret == 0
            data['crush'] = json.loads(raw)

    return {
        'type': sync_type,
        'fsid': fsid,
        'version': version,
        'data': data
    }
示例#25
0
def get_cluster_object(cluster_name, sync_type, since):
    # TODO: for the synced objects that support it, support
    # fetching older-than-present versions to allow the master
    # to backfill its history.

    import rados
    from ceph_argparse import json_command

    # Check you're asking me for something I know how to give you
    assert sync_type in SYNC_TYPES

    # Open a RADOS session
    cluster_handle = rados.Rados(name=RADOS_NAME, clustername=cluster_name, conffile="")
    cluster_handle.connect()

    ret, outbuf, outs = json_command(cluster_handle, prefix="status", argdict={"format": "json"}, timeout=RADOS_TIMEOUT)
    status = json.loads(outbuf)
    fsid = status["fsid"]

    if sync_type == "config":
        # Special case for config, get this via admin socket instead of librados
        raw = _get_config(cluster_name)
        version = md5(raw)
        data = json.loads(raw)
    else:
        command, kwargs, version_fn = {
            "mon_status": ("mon_status", {}, lambda d, r: d["election_epoch"]),
            "mon_map": ("mon dump", {}, lambda d, r: d["epoch"]),
            "osd_map": ("osd dump", {}, lambda d, r: d["epoch"]),
            "mds_map": ("mds dump", {}, lambda d, r: d["epoch"]),
            "pg_summary": ("pg dump", {"dumpcontents": ["pgs_brief"]}, lambda d, r: md5(msgpack.packb(d))),
            "health": ("health", {"detail": ""}, lambda d, r: md5(r)),
        }[sync_type]
        kwargs["format"] = "json"
        ret, raw, outs = json_command(cluster_handle, prefix=command, argdict=kwargs, timeout=RADOS_TIMEOUT)
        assert ret == 0

        if sync_type == "pg_summary":
            data = pg_summary(json.loads(raw))
            version = version_fn(data, raw)
        else:
            data = json.loads(raw)
            version = version_fn(data, raw)

        # Internally, the OSDMap includes the CRUSH map, and the 'osd tree' output
        # is generated from the OSD map.  We synthesize a 'full' OSD map dump to
        # send back to the calamari server.
        if sync_type == "osd_map":
            ret, raw, outs = json_command(
                cluster_handle, prefix="osd tree", argdict={"format": "json", "epoch": version}, timeout=RADOS_TIMEOUT
            )
            assert ret == 0
            data["tree"] = json.loads(raw)
            # FIXME: crush dump does not support an epoch argument, so this is potentially
            # from a higher-versioned OSD map than the one we've just read
            ret, raw, outs = json_command(
                cluster_handle, prefix="osd crush dump", argdict=kwargs, timeout=RADOS_TIMEOUT
            )
            assert ret == 0
            data["crush"] = json.loads(raw)

            ret, raw, outs = json_command(
                cluster_handle, prefix="osd getcrushmap", argdict={"epoch": version}, timeout=RADOS_TIMEOUT
            )
            assert ret == 0

            ret, stdout, outs = transform_crushmap(raw, "get")
            assert ret == 0
            data["crush_map_text"] = stdout

    return {"type": sync_type, "fsid": fsid, "version": version, "data": data}
示例#26
0
def get_cluster_object(cluster_name, sync_type, since):
    # TODO: for the synced objects that support it, support
    # fetching older-than-present versions to allow the master
    # to backfill its history.

    import rados
    from ceph_argparse import json_command

    # Check you're asking me for something I know how to give you
    assert sync_type in SYNC_TYPES

    # Open a RADOS session
    cluster_handle = rados.Rados(name=RADOS_NAME,
                                 clustername=cluster_name,
                                 conffile='')
    cluster_handle.connect()

    ret, outbuf, outs = json_command(cluster_handle,
                                     prefix='status',
                                     argdict={'format': 'json'},
                                     timeout=RADOS_TIMEOUT)
    status = json.loads(outbuf)
    fsid = status['fsid']

    if sync_type == 'config':
        # Special case for config, get this via admin socket instead of librados
        raw = _get_config(cluster_name)
        version = md5(raw)
        data = json.loads(raw)
    else:
        command, kwargs, version_fn = {
            'mon_status': ('mon_status', {}, lambda d, r: d['election_epoch']),
            'mon_map': ('mon dump', {}, lambda d, r: d['epoch']),
            'osd_map': ('osd dump', {}, lambda d, r: d['epoch']),
            'mds_map': ('mds dump', {}, lambda d, r: d['epoch']),
            'pg_summary': ('pg dump', {
                'dumpcontents': ['pgs_brief']
            }, lambda d, r: md5(msgpack.packb(d))),
            'health': ('health', {
                'detail': ''
            }, lambda d, r: md5(r))
        }[sync_type]
        kwargs['format'] = 'json'
        ret, raw, outs = json_command(cluster_handle,
                                      prefix=command,
                                      argdict=kwargs,
                                      timeout=RADOS_TIMEOUT)
        assert ret == 0

        if sync_type == 'pg_summary':
            data = pg_summary(json.loads(raw))
            version = version_fn(data, raw)
        else:
            data = json.loads(raw)
            version = version_fn(data, raw)

        # Internally, the OSDMap includes the CRUSH map, and the 'osd tree' output
        # is generated from the OSD map.  We synthesize a 'full' OSD map dump to
        # send back to the calamari server.
        if sync_type == 'osd_map':
            ret, raw, outs = json_command(cluster_handle,
                                          prefix="osd tree",
                                          argdict={
                                              'format': 'json',
                                              'epoch': version
                                          },
                                          timeout=RADOS_TIMEOUT)
            assert ret == 0
            data['tree'] = json.loads(raw)
            # FIXME: crush dump does not support an epoch argument, so this is potentially
            # from a higher-versioned OSD map than the one we've just read
            ret, raw, outs = json_command(cluster_handle,
                                          prefix="osd crush dump",
                                          argdict=kwargs,
                                          timeout=RADOS_TIMEOUT)
            assert ret == 0
            data['crush'] = json.loads(raw)

            ret, raw, outs = json_command(cluster_handle,
                                          prefix="osd getcrushmap",
                                          argdict={'epoch': version},
                                          timeout=RADOS_TIMEOUT)
            assert ret == 0

            ret, stdout, outs = transform_crushmap(raw, 'get')
            assert ret == 0
            data['crush_map_text'] = stdout

    return {'type': sync_type, 'fsid': fsid, 'version': version, 'data': data}
示例#27
0
    def _get_utilization_data(self):
        from ceph_argparse import json_command
        import rados
        _conf_file = os.path.join("/etc/ceph",
                                  NS.tendrl_context.cluster_name + ".conf")
        # TODO(shtripat) use ceph.ceph_command instead of rados/json_command
        cluster_handle = rados.Rados(
            name=ceph.RADOS_NAME,
            clustername=NS.tendrl_context.cluster_name,
            conffile=_conf_file)
        cluster_handle.connect()
        prefix = 'df'
        ret, outbuf, outs = json_command(cluster_handle,
                                         prefix=prefix,
                                         argdict={},
                                         timeout=ceph.RADOS_TIMEOUT)
        if ret != 0:
            cluster_handle.shutdown()
            raise rados.Error(outs)
        else:
            outbuf = outbuf.replace('RAW USED', 'RAW_USED')
            outbuf = outbuf.replace('%RAW USED', '%RAW_USED')
            outbuf = outbuf.replace('MAX AVAIL', 'MAX_AVAIL')
            lines = outbuf.split('\n')
            index = 0
            cluster_stat = {}
            pool_stat = {}
            pool_stat_available = False
            cluster_handle.shutdown()

            while index < len(lines):
                line = lines[index]
                if line == "" or line == '\n':
                    index += 1
                    continue
                if "GLOBAL" in line:
                    index += 1
                    if len(lines) < 3:
                        raise rados.Error("Failed to parse pool stats data")
                    cluster_fields = lines[index].split()
                    cluster_size_idx = self._idx_in_list(
                        cluster_fields, 'SIZE')
                    cluster_avail_idx = self._idx_in_list(
                        cluster_fields, 'AVAIL')
                    cluster_used_idx = self._idx_in_list(
                        cluster_fields, 'RAW_USED')
                    cluster_pcnt_used_idx = self._idx_in_list(
                        cluster_fields, '%RAW_USED')
                    if cluster_size_idx == -1 or cluster_avail_idx == -1 or \
                        cluster_used_idx == -1 or cluster_pcnt_used_idx == -1:
                        raise rados.Error("Missing fields in cluster stat")
                    index += 1
                    if index >= len(lines):
                        Event(
                            Message(priority="debug",
                                    publisher=NS.publisher_id,
                                    payload={
                                        "message": "No cluster stats to parse"
                                    }))
                        return {'cluster': cluster_stat, 'pools': {}}
                    line = lines[index]
                    cluster_fields = line.split()
                    if len(cluster_fields) < 4:
                        Event(
                            Message(priority="debug",
                                    publisher=NS.publisher_id,
                                    payload={
                                        "message":
                                        "Missing fields in cluster"
                                        " stat"
                                    }))
                        return {'cluster': cluster_stat, 'pools': {}}
                    cluster_stat['total'] = self._to_bytes(
                        cluster_fields[cluster_size_idx])
                    cluster_stat['used'] = self._to_bytes(
                        cluster_fields[cluster_used_idx])
                    cluster_stat['available'] = self._to_bytes(
                        cluster_fields[cluster_avail_idx])
                    cluster_stat['pcnt_used'] = cluster_fields[
                        cluster_pcnt_used_idx]
                if "POOLS" in line:
                    pool_stat_available = True
                    index += 1
                    if index >= len(lines):
                        Event(
                            Message(
                                priority="debug",
                                publisher=NS.publisher_id,
                                payload={"message": "No pool stats to parse"}))
                        return {'cluster': cluster_stat, 'pools': {}}
                    pool_fields = lines[index].split()
                    pool_name_idx = self._idx_in_list(pool_fields, 'NAME')
                    pool_id_idx = self._idx_in_list(pool_fields, 'ID')
                    pool_used_idx = self._idx_in_list(pool_fields, 'USED')
                    pool_pcnt_used_idx = self._idx_in_list(
                        pool_fields, '%USED')
                    pool_max_avail_idx = self._idx_in_list(
                        pool_fields, 'MAX_AVAIL')
                    if pool_name_idx == -1 or pool_id_idx == -1 or \
                        pool_used_idx == -1 or pool_pcnt_used_idx == -1 or \
                        pool_max_avail_idx == -1:
                        Event(
                            Message(priority="debug",
                                    publisher=NS.publisher_id,
                                    payload={
                                        "message": "Missing fields in pool "
                                        "stat"
                                    }))
                        return {'cluster': cluster_stat, 'pools': {}}
                    index += 1
                if pool_stat_available is True:
                    line = lines[index]
                    pool_fields = line.split()
                    if len(pool_fields) < 5:
                        Event(
                            Message(priority="debug",
                                    publisher=NS.publisher_id,
                                    payload={
                                        "message": "Missing fields in pool"
                                        " stat"
                                    }))
                        return {'cluster': cluster_stat, 'pools': {}}

                    loc_dict = {}
                    loc_dict['available'] = self._to_bytes(
                        pool_fields[pool_max_avail_idx])
                    loc_dict['used'] = self._to_bytes(
                        pool_fields[pool_used_idx])
                    loc_dict['pcnt_used'] = pool_fields[pool_pcnt_used_idx]
                    pool_stat[pool_fields[pool_name_idx]] = loc_dict
                index += 1

            return {'cluster': cluster_stat, 'pools': pool_stat}
示例#28
0
def get_cluster_object(cluster_name, sync_type):
    # TODO(Rohan) for the synced objects that support it, support
    # fetching older-than-present versions to allow the master
    # to backfill its history.

    from ceph_argparse import json_command
    import rados

    # Check you're asking me for something I know how to give you
    assert sync_type in SYNC_TYPES

    # Open a RADOS session
    if cluster_name is None:
        cluster_name = "ceph"

    _conf_file = os.path.join(SRC_DIR, cluster_name + ".conf")
    cluster_handle = rados.Rados(name=RADOS_NAME,
                                 clustername=cluster_name,
                                 conffile=_conf_file)
    cluster_handle.connect()

    ret, outbuf, outs = json_command(cluster_handle,
                                     prefix='status',
                                     argdict={'format': 'json'},
                                     timeout=RADOS_TIMEOUT)
    status = json_loads_byteified(outbuf)
    fsid = status['fsid']

    if sync_type == 'config':
        # Special case for config, get this via admin socket instead of
        # librados
        raw = _get_config(cluster_name)
        version = md5(raw)
        data = json_loads_byteified(raw)
    else:
        command, kwargs, version_fn = {
            'mon_status': ('mon_status', {}, lambda d, r: d['election_epoch']),
            'mon_map': ('mon dump', {}, lambda d, r: d['epoch']),
            'osd_map': ('osd dump', {}, lambda d, r: d['epoch']),
            'mds_map': ('mds dump', {}, lambda d, r: d['epoch']),
            'pg_summary': ('pg dump', {
                'dumpcontents': ['pgs_brief']
            }, lambda d, r: md5(msgpack.packb(d))),
            'health': ('health', {
                'detail': ''
            }, lambda d, r: md5(r))
        }[sync_type]
        kwargs['format'] = 'json'
        ret, raw, outs = json_command(cluster_handle,
                                      prefix=command,
                                      argdict=kwargs,
                                      timeout=RADOS_TIMEOUT)
        assert ret == 0

        if sync_type == 'pg_summary':
            data = pg_summary(json_loads_byteified(raw))
            version = version_fn(data, raw)
        else:
            data = json_loads_byteified(raw)
            version = version_fn(data, raw)

        # Internally, the OSDMap includes the CRUSH map, and the 'osd tree'
        # output is generated from the OSD map.  We synthesize a 'full' OSD
        # map dump to send back to the calamari server.
        if sync_type == 'osd_map':
            ret, raw, outs = json_command(cluster_handle,
                                          prefix="osd tree",
                                          argdict={
                                              'format': 'json',
                                              'epoch': version
                                          },
                                          timeout=RADOS_TIMEOUT)
            assert ret == 0
            data['tree'] = json_loads_byteified(raw)
            # FIXME: crush dump does not support an epoch argument, so this
            # is potentially from a higher-versioned OSD map than the one
            # we've just read
            ret, raw, outs = json_command(cluster_handle,
                                          prefix="osd crush dump",
                                          argdict=kwargs,
                                          timeout=RADOS_TIMEOUT)
            assert ret == 0
            data['crush'] = json_loads_byteified(raw)

            ret, raw, outs = json_command(cluster_handle,
                                          prefix="osd getcrushmap",
                                          argdict={'epoch': version},
                                          timeout=RADOS_TIMEOUT)
            assert ret == 0

            ret, stdout, outs = transform_crushmap(raw, 'get')
            assert ret == 0
            data['crush_map_text'] = stdout
            data['osd_metadata'] = []

            for osd_entry in data['osds']:
                osd_id = osd_entry['osd']
                command = "osd metadata"
                argdict = {'id': osd_id}
                argdict.update(kwargs)
                ret, raw, outs = json_command(cluster_handle,
                                              prefix=command,
                                              argdict=argdict,
                                              timeout=RADOS_TIMEOUT)
                if ret != 0:
                    Event(
                        Message(priority="debug",
                                publisher=NS.publisher_id,
                                payload={
                                    "message":
                                    "Metadata not"
                                    " available for OSD: %s" % osd_id
                                }))
                    continue
                updated_osd_metadata = json_loads_byteified(raw)
                updated_osd_metadata['osd'] = osd_id
                data['osd_metadata'].append(updated_osd_metadata)

    cluster_handle.shutdown()
    return {'type': sync_type, 'fsid': fsid, 'version': version, 'data': data}
示例#29
0
def handler(catchall_path=None, fmt=None, target=None):
    '''
    Main endpoint handler; generic for every endpoint, including catchall.
    Handles the catchall, anything with <.fmt>, anything with embedded
    <target>.  Partial match or ?help cause the HTML-table
    "show_human_help" output.
    '''

    ep = catchall_path or flask.request.endpoint
    ep = ep.replace('.<fmt>', '')

    if ep[0] != '/':
        ep = '/' + ep

    # demand that endpoint begin with app.ceph_baseurl
    if not ep.startswith(app.ceph_baseurl):
        return make_response(fmt, '', 'Page not found', 404)

    rel_ep = ep[len(app.ceph_baseurl) + 1:]

    # Extensions override Accept: headers override defaults
    if not fmt:
        if 'application/json' in flask.request.accept_mimetypes.values():
            fmt = 'json'
        elif 'application/xml' in flask.request.accept_mimetypes.values():
            fmt = 'xml'

    prefix = ''
    pgid = None
    cmdtarget = 'mon', ''

    if target:
        # got tell/<target>; validate osdid or pgid
        name = CephOsdName()
        pgidobj = CephPgid()
        try:
            name.valid(target)
        except ArgumentError:
            # try pgid
            try:
                pgidobj.valid(target)
            except ArgumentError:
                return flask.make_response("invalid osdid or pgid", 400)
            else:
                # it's a pgid
                pgid = pgidobj.val
                cmdtarget = 'pg', pgid
        else:
            # it's an osd
            cmdtarget = name.nametype, name.nameid

        # prefix does not include tell/<target>/
        prefix = ' '.join(rel_ep.split('/')[2:]).strip()
    else:
        # non-target command: prefix is entire path
        prefix = ' '.join(rel_ep.split('/')).strip()

    # show "match as much as you gave me" help for unknown endpoints
    if ep not in app.ceph_urls:
        helptext = show_human_help(prefix)
        if helptext:
            resp = flask.make_response(helptext, 400)
            resp.headers['Content-Type'] = 'text/html'
            return resp
        else:
            return make_response(fmt, '', 'Invalid endpoint ' + ep, 400)

    found = None
    exc = ''
    for urldict in app.ceph_urls[ep]:
        if flask.request.method not in urldict['methods']:
            continue
        paramsig = urldict['paramsig']

        # allow '?help' for any specifically-known endpoint
        if 'help' in flask.request.args:
            response = flask.make_response('{0}: {1}'.format(
                prefix + concise_sig(paramsig), urldict['help']))
            response.headers['Content-Type'] = 'text/plain'
            return response

        # if there are parameters for this endpoint, process them
        if paramsig:
            args = {}
            for k, l in flask.request.args.iterlists():
                if len(l) == 1:
                    args[k] = l[0]
                else:
                    args[k] = l

            # is this a valid set of params?
            try:
                argdict = validate(args, paramsig)
                found = urldict
                break
            except Exception as e:
                exc += str(e)
                continue
        else:
            if flask.request.args:
                continue
            found = urldict
            argdict = {}
            break

    if not found:
        return make_response(fmt, '', exc + '\n', 400)

    argdict['format'] = fmt or 'plain'
    argdict['module'] = found['module']
    argdict['perm'] = found['perm']
    if pgid:
        argdict['pgid'] = pgid

    if not cmdtarget:
        cmdtarget = ('mon', '')

    app.logger.debug('sending command prefix %s argdict %s', prefix, argdict)

    for _ in range(DEFAULT_TRIES):
        ret, outbuf, outs = json_command(app.ceph_cluster,
                                         prefix=prefix,
                                         target=cmdtarget,
                                         inbuf=flask.request.data,
                                         argdict=argdict,
                                         timeout=DEFAULT_TIMEOUT)
        if ret != -errno.EINTR:
            break
    else:
        return make_response(fmt, '', 'Timedout: {0} ({1})'.format(outs, ret),
                             504)
    if ret:
        return make_response(fmt, '', 'Error: {0} ({1})'.format(outs, ret),
                             400)

    response = make_response(fmt, outbuf, outs or 'OK', 200)
    if fmt:
        contenttype = 'application/' + fmt.replace('-pretty', '')
    else:
        contenttype = 'text/plain'
    response.headers['Content-Type'] = contenttype
    return response
示例#30
0
def handler(catchall_path=None, fmt=None, target=None):
    """
    Main endpoint handler; generic for every endpoint, including catchall.
    Handles the catchall, anything with <.fmt>, anything with embedded
    <target>.  Partial match or ?help cause the HTML-table
    "show_human_help" output.
    """

    ep = catchall_path or flask.request.endpoint
    ep = ep.replace(".<fmt>", "")

    if ep[0] != "/":
        ep = "/" + ep

    # demand that endpoint begin with app.ceph_baseurl
    if not ep.startswith(app.ceph_baseurl):
        return make_response(fmt, "", "Page not found", 404)

    rel_ep = ep[len(app.ceph_baseurl) + 1 :]

    # Extensions override Accept: headers override defaults
    if not fmt:
        if "application/json" in flask.request.accept_mimetypes.values():
            fmt = "json"
        elif "application/xml" in flask.request.accept_mimetypes.values():
            fmt = "xml"

    prefix = ""
    pgid = None
    cmdtarget = "mon", ""

    if target:
        # got tell/<target>; validate osdid or pgid
        name = CephOsdName()
        pgidobj = CephPgid()
        try:
            name.valid(target)
        except ArgumentError:
            # try pgid
            try:
                pgidobj.valid(target)
            except ArgumentError:
                return flask.make_response("invalid osdid or pgid", 400)
            else:
                # it's a pgid
                pgid = pgidobj.val
                cmdtarget = "pg", pgid
        else:
            # it's an osd
            cmdtarget = name.nametype, name.nameid

        # prefix does not include tell/<target>/
        prefix = " ".join(rel_ep.split("/")[2:]).strip()
    else:
        # non-target command: prefix is entire path
        prefix = " ".join(rel_ep.split("/")).strip()

    # show "match as much as you gave me" help for unknown endpoints
    if not ep in app.ceph_urls:
        helptext = show_human_help(prefix)
        if helptext:
            resp = flask.make_response(helptext, 400)
            resp.headers["Content-Type"] = "text/html"
            return resp
        else:
            return make_response(fmt, "", "Invalid endpoint " + ep, 400)

    found = None
    exc = ""
    for urldict in app.ceph_urls[ep]:
        if flask.request.method not in urldict["methods"]:
            continue
        paramsig = urldict["paramsig"]

        # allow '?help' for any specifically-known endpoint
        if "help" in flask.request.args:
            response = flask.make_response("{0}: {1}".format(prefix + concise_sig(paramsig), urldict["help"]))
            response.headers["Content-Type"] = "text/plain"
            return response

        # if there are parameters for this endpoint, process them
        if paramsig:
            args = {}
            for k, l in flask.request.args.iterlists():
                if len(l) == 1:
                    args[k] = l[0]
                else:
                    args[k] = l

            # is this a valid set of params?
            try:
                argdict = validate(args, paramsig)
                found = urldict
                break
            except Exception as e:
                exc += str(e)
                continue
        else:
            if flask.request.args:
                continue
            found = urldict
            argdict = {}
            break

    if not found:
        return make_response(fmt, "", exc + "\n", 400)

    argdict["format"] = fmt or "plain"
    argdict["module"] = found["module"]
    argdict["perm"] = found["perm"]
    if pgid:
        argdict["pgid"] = pgid

    if not cmdtarget:
        cmdtarget = ("mon", "")

    app.logger.debug("sending command prefix %s argdict %s", prefix, argdict)
    ret, outbuf, outs = json_command(
        app.ceph_cluster, prefix=prefix, target=cmdtarget, inbuf=flask.request.data, argdict=argdict
    )
    if ret:
        return make_response(fmt, "", "Error: {0} ({1})".format(outs, ret), 400)

    response = make_response(fmt, outbuf, outs or "OK", 200)
    if fmt:
        contenttype = "application/" + fmt.replace("-pretty", "")
    else:
        contenttype = "text/plain"
    response.headers["Content-Type"] = contenttype
    return response