示例#1
0
文件: base.py 项目: nicholsn/datalad
def get_interface_groups(include_plugins=False):
    """Return a list of command groups.

    Parameters
    ----------
    include_plugins : bool, optional
        Whether to include a group named 'plugins' that has a list of
        discovered plugin commands.

    Returns
    -------
    A list of tuples with the form (GROUP_NAME, GROUP_DESCRIPTION, COMMANDS).
    """
    from .. import interface as _interfaces

    grps = []
    # auto detect all available interfaces and generate a function-based
    # API from them
    for _item in _interfaces.__dict__:
        if not _item.startswith('_group_'):
            continue
        grp_name = _item[7:]
        grp = getattr(_interfaces, _item)
        grps.append((grp_name, ) + grp)
    # TODO(yoh): see if we could retain "generator" for plugins
    # ATM we need to make it explicit so we could check the command(s) below
    # It could at least follow the same destiny as extensions so we would
    # just do more iterative "load ups"

    if include_plugins:
        grps.append(('plugins', 'Plugins', list(_get_plugins())))
    return grps
示例#2
0
文件: base.py 项目: datalad/datalad
def get_interface_groups(include_plugins=False):
    """Return a list of command groups.

    Parameters
    ----------
    include_plugins : bool, optional
        Whether to include a group named 'plugins' that has a list of
        discovered plugin commands.

    Returns
    -------
    A list of tuples with the form (GROUP_NAME, GROUP_DESCRIPTION, COMMANDS).
    """
    from .. import interface as _interfaces

    grps = []
    # auto detect all available interfaces and generate a function-based
    # API from them
    for _item in _interfaces.__dict__:
        if not _item.startswith('_group_'):
            continue
        grp_name = _item[7:]
        grp = getattr(_interfaces, _item)
        grps.append((grp_name,) + grp)
    # TODO(yoh): see if we could retain "generator" for plugins
    # ATM we need to make it explicit so we could check the command(s) below
    # It could at least follow the same destiny as extensions so we would
    # just do more iterative "load ups"

    if include_plugins:
        grps.append(('plugins', 'Plugins', list(_get_plugins())))
    return grps
示例#3
0
文件: api.py 项目: ypid/datalad
def _load_plugins():
    from datalad.plugin import _get_plugins
    from datalad.plugin import _load_plugin
    import re

    camel = re.compile(r'([a-z])([A-Z])')

    for pname, props in _get_plugins():
        pi = _load_plugin(props['file'], fail=False)
        if pi is None:
            continue
        globals()[camel.sub('\\1_\\2', pi.__name__).lower()] = pi.__call__
示例#4
0
文件: api.py 项目: datalad/datalad
def _load_plugins():
    from datalad.plugin import _get_plugins
    from datalad.plugin import _load_plugin
    import re

    camel = re.compile(r'([a-z])([A-Z])')

    for pname, props in _get_plugins():
        pi = _load_plugin(props['file'], fail=False)
        if pi is None:
            continue
        globals()[camel.sub('\\1_\\2', pi.__name__).lower()] = pi.__call__
示例#5
0
def setup_parser(
        cmdlineargs,
        formatter_class=argparse.RawDescriptionHelpFormatter,
        return_subparsers=False):
    lgr.log(5, "Starting to setup_parser")
    # delay since it can be a heavy import
    from ..interface.base import dedent_docstring, get_interface_groups, \
        get_cmdline_command_name, alter_interface_docs_for_cmdline
    # setup cmdline args parser
    parts = {}
    # main parser
    parser = ArgumentParserDisableAbbrev(
        # cannot use '@' because we need to input JSON-LD properties (which might come wit @ prefix)
        # MH: question, do we need this at all?
        fromfile_prefix_chars=':',
        # usage="%(prog)s ...",
        description=dedent_docstring("""\
            DataLad provides a unified data distribution with the convenience of git-annex
            repositories as a backend.  DataLad command line tools allow to manipulate
            (obtain, create, update, publish, etc.) datasets and their collections."""),
        epilog='"Control Your Data"',
        formatter_class=formatter_class,
        add_help=False)
    # common options
    helpers.parser_add_common_opt(parser, 'log_level')
    helpers.parser_add_common_opt(parser, 'pbs_runner')
    helpers.parser_add_common_opt(parser, 'change_path')
    helpers.parser_add_common_opt(
        parser,
        'version',
        version='datalad %s\n\n%s' % (datalad.__version__, _license_info()))
    if __debug__:
        parser.add_argument(
            '--dbg', action='store_true', dest='common_debug',
            help="enter Python debugger when uncaught exception happens")
        parser.add_argument(
            '--idbg', action='store_true', dest='common_idebug',
            help="enter IPython debugger when uncaught exception happens")
    parser.add_argument(
        '-c', action='append', dest='cfg_overrides', metavar='KEY=VALUE',
        help="""configuration variable setting. Overrides any configuration
        read from a file, but is potentially overridden itself by configuration
        variables in the process environment.""")
    parser.add_argument(
        '-f', '--output-format', dest='common_output_format',
        default='default',
        type=assure_unicode,
        metavar="{default,json,json_pp,tailored,'<template>'}",
        help="""select format for returned command results. 'default' give one line
        per result reporting action, status, path and an optional message;
        'json' renders a JSON object with all properties for each result (one per
        line); 'json_pp' pretty-prints JSON spanning multiple lines; 'tailored'
        enables a command-specific rendering style that is typically
        tailored to human consumption (no result output otherwise),
        '<template>' reports any value(s) of any result properties in any format
        indicated by the template (e.g. '{path}'; compare with JSON
        output for all key-value choices). The template syntax follows the Python
        "format() language". It is possible to report individual
        dictionary values, e.g. '{metadata[name]}'. If a 2nd-level key contains
        a colon, e.g. 'music:Genre', ':' must be substituted by '#' in the template,
        like so: '{metadata[music#Genre]}'.""")
    parser.add_argument(
        '--report-status', dest='common_report_status',
        choices=['success', 'failure', 'ok', 'notneeded', 'impossible', 'error'],
        help="""constrain command result report to records matching the given
        status. 'success' is a synonym for 'ok' OR 'notneeded', 'failure' stands
        for 'impossible' OR 'error'.""")
    parser.add_argument(
        '--report-type', dest='common_report_type',
        choices=['dataset', 'file'],
        action='append',
        help="""constrain command result report to records matching the given
        type. Can be given more than once to match multiple types.""")
    parser.add_argument(
        '--on-failure', dest='common_on_failure',
        choices=['ignore', 'continue', 'stop'],
        # no default: better be configure per-command
        help="""when an operation fails: 'ignore' and continue with remaining
        operations, the error is logged but does not lead to a non-zero exit code
        of the command; 'continue' works like 'ignore', but an error causes a
        non-zero exit code; 'stop' halts on first failure and yields non-zero exit
        code. A failure is any result with status 'impossible' or 'error'.""")
    parser.add_argument(
        '--run-before', dest='common_run_before',
        nargs='+',
        action='append',
        metavar=('<PROCEDURE NAME>', 'ARGS'),
        help="""Dataset procedure to run before the main command (see run-procedure
        command for details). This option can be given more than once to run
        multiple procedures in the order in which they were given.
        It is important to specify the target dataset via the --dataset argument
        of the main command."""),
    parser.add_argument(
        '--run-after', dest='common_run_after',
        nargs='+',
        action='append',
        metavar=('<PROCEDURE NAME>', 'ARGS'),
        help="""Like --run-before, but procedures are executed after the main command
        has finished."""),
    parser.add_argument(
        '--cmd', dest='_', action='store_true',
        help="""syntactical helper that can be used to end the list of global
        command line options before the subcommand label. Options like
        --run-before can take an arbitrary number of arguments and may require
        to be followed by a single --cmd in order to enable identification
        of the subcommand.""")

    # yoh: atm we only dump to console.  Might adopt the same separation later on
    #      and for consistency will call it --verbose-level as well for now
    # log-level is set via common_opts ATM
    # parser.add_argument('--log-level',
    #                     choices=('critical', 'error', 'warning', 'info', 'debug'),
    #                     dest='common_log_level',
    #                     help="""level of verbosity in log files. By default
    #                          everything, including debug messages is logged.""")
    #parser.add_argument('-l', '--verbose-level',
    #                    choices=('critical', 'error', 'warning', 'info', 'debug'),
    #                    dest='common_verbose_level',
    #                    help="""level of verbosity of console output. By default
    #                         only warnings and errors are printed.""")

    # Before doing anything additional and possibly expensive see may be that
    # we have got the command already
    need_single_subparser = False if return_subparsers else None
    fail_handler = (lambda *a, **kw: True) \
        if return_subparsers else fail_with_short_help
    try:
        parsed_args, unparsed_args = parser._parse_known_args(
            cmdlineargs[1:], argparse.Namespace())
        if not unparsed_args:
            fail_handler(parser, msg="too few arguments", exit_code=2)
        lgr.debug("Command line args 1st pass. Parsed: %s Unparsed: %s",
                  parsed_args, unparsed_args)
    except Exception as exc:
        lgr.debug("Early parsing failed with %s", exc_str(exc))
        need_single_subparser = False
        unparsed_args = cmdlineargs[1:]  # referenced before assignment otherwise

    interface_groups = get_interface_groups()
    # TODO: see if we could retain "generator" for plugins
    # ATM we need to make it explicit so we could check the command(s) below
    # It could at least follow the same destiny as extensions so we would
    # just do more iterative "load ups"
    interface_groups.append(('plugins', 'Plugins', list(_get_plugins())))

    # First unparsed could be either unknown option to top level "datalad"
    # or a command. Among unknown could be --help/--help-np which would
    # need to be dealt with
    unparsed_arg = unparsed_args[0] if unparsed_args else None
    if need_single_subparser is not None \
            or unparsed_arg in ('--help', '--help-np', '-h'):
        need_single_subparser = False
        add_entrypoints_to_interface_groups(interface_groups)
    elif unparsed_arg.startswith('-'):  # unknown option
        fail_with_short_help(parser,
                             msg="unrecognized argument %s" % unparsed_arg,
                             exit_code=2)
        # if we could get a list of options known to parser,
        # we could suggest them
        # known=get_all_options(parser), provided=unparsed_arg)
    else:  # the command to handle
        known_commands = get_commands_from_groups(interface_groups)
        if unparsed_arg not in known_commands:
            # need to load all the extensions and try again
            add_entrypoints_to_interface_groups(interface_groups)
            known_commands = get_commands_from_groups(interface_groups)

        if unparsed_arg not in known_commands:
            # check if might be coming from known extensions
            from ..interface import _known_extension_commands
            extension_commands = {
                c: e
                for e, commands in _known_extension_commands.items()
                for c in commands
            }
            hint = None
            if unparsed_arg in extension_commands:
                hint = "Command %s is provided by (not installed) extension %s." \
                      % (unparsed_arg, extension_commands[unparsed_arg])
            fail_with_short_help(
                parser,
                hint=hint,
                provided=unparsed_arg,
                known=list(known_commands.keys()) + list(extension_commands.keys())
            )
        if need_single_subparser is None:
            need_single_subparser = unparsed_arg

    # --help specification was delayed since it causes immediate printout of
    # --help output before we setup --help for each command
    helpers.parser_add_common_opt(parser, 'help')

    grp_short_descriptions = []
    # create subparser, use module suffix as cmd name
    subparsers = parser.add_subparsers()
    for _, _, _interfaces \
            in sorted(interface_groups, key=lambda x: x[1]):
        # for all subcommand modules it can find
        cmd_short_descriptions = []

        for _intfspec in _interfaces:
            cmd_name = get_cmdline_command_name(_intfspec)
            if need_single_subparser and cmd_name != need_single_subparser:
                continue
            if isinstance(_intfspec[1], dict):
                # plugin
                _intf = _load_plugin(_intfspec[1]['file'], fail=False)
                if _intf is None:
                    # TODO:  add doc why we could skip this one... makes this
                    # loop harder to extract into a dedicated function
                    continue
            else:
                # turn the interface spec into an instance
                lgr.log(5, "Importing module %s " % _intfspec[0])
                try:
                    _mod = import_module(_intfspec[0], package='datalad')
                except Exception as e:
                    lgr.error("Internal error, cannot import interface '%s': %s",
                              _intfspec[0], exc_str(e))
                    continue
                _intf = getattr(_mod, _intfspec[1])
            # deal with optional parser args
            if hasattr(_intf, 'parser_args'):
                parser_args = _intf.parser_args
            else:
                parser_args = dict(formatter_class=formatter_class)
                # use class description, if no explicit description is available
                intf_doc = '' if _intf.__doc__ is None else _intf.__doc__.strip()
                if hasattr(_intf, '_docs_'):
                    # expand docs
                    intf_doc = intf_doc.format(**_intf._docs_)
                parser_args['description'] = alter_interface_docs_for_cmdline(
                    intf_doc)
            subparser = subparsers.add_parser(cmd_name, add_help=False, **parser_args)
            # our own custom help for all commands
            helpers.parser_add_common_opt(subparser, 'help')
            # let module configure the parser
            _intf.setup_parser(subparser)
            # logger for command

            # configure 'run' function for this command
            plumbing_args = dict(
                func=_intf.call_from_parser,
                logger=logging.getLogger(_intf.__module__),
                subparser=subparser)
            if hasattr(_intf, 'result_renderer_cmdline'):
                plumbing_args['result_renderer'] = _intf.result_renderer_cmdline
            subparser.set_defaults(**plumbing_args)
            parts[cmd_name] = subparser
            # store short description for later
            sdescr = getattr(_intf, 'short_description',
                             parser_args['description'].split('\n')[0])
            cmd_short_descriptions.append((cmd_name, sdescr))
        grp_short_descriptions.append(cmd_short_descriptions)

    # create command summary
    if '--help' in cmdlineargs or '--help-np' in cmdlineargs:
        parser.description = get_description_with_cmd_summary(
            grp_short_descriptions,
            interface_groups,
            parser.description)

    parts['datalad'] = parser
    lgr.log(5, "Finished setup_parser")
    if return_subparsers:
        return parts
    else:
        return parser
示例#6
0
文件: main.py 项目: xlecours/datalad
def setup_parser(
        cmdlineargs,
        formatter_class=argparse.RawDescriptionHelpFormatter,
        return_subparsers=False):

    lgr.log(5, "Starting to setup_parser")
    # delay since it can be a heavy import
    from ..interface.base import dedent_docstring, get_interface_groups, \
        get_cmdline_command_name, alter_interface_docs_for_cmdline
    # setup cmdline args parser
    parts = {}
    # main parser
    parser = argparse.ArgumentParser(
        # cannot use '@' because we need to input JSON-LD properties (which might come wit @ prefix)
        # MH: question, do we need this at all?
        fromfile_prefix_chars=':',
        # usage="%(prog)s ...",
        description=dedent_docstring("""\
            DataLad provides a unified data distribution with the convenience of git-annex
            repositories as a backend.  DataLad command line tools allow to manipulate
            (obtain, create, update, publish, etc.) datasets and their collections."""),
        epilog='"Control Your Data"',
        formatter_class=formatter_class,
        add_help=False)
    # common options
    helpers.parser_add_common_opt(parser, 'help')
    helpers.parser_add_common_opt(parser, 'log_level')
    helpers.parser_add_common_opt(parser, 'pbs_runner')
    helpers.parser_add_common_opt(parser, 'change_path')
    helpers.parser_add_common_opt(
        parser,
        'version',
        version='datalad %s\n\n%s' % (datalad.__version__, _license_info()))
    if __debug__:
        parser.add_argument(
            '--dbg', action='store_true', dest='common_debug',
            help="enter Python debugger when uncaught exception happens")
        parser.add_argument(
            '--idbg', action='store_true', dest='common_idebug',
            help="enter IPython debugger when uncaught exception happens")
    parser.add_argument(
        '-c', action='append', dest='cfg_overrides', metavar='KEY=VALUE',
        help="""configuration variable setting. Overrides any configuration
        read from a file, but is potentially overridden itself by configuration
        variables in the process environment.""")
    parser.add_argument(
        '-f', '--output-format', dest='common_output_format',
        default='default',
        type=assure_unicode,
        metavar="{default,json,json_pp,tailored,'<template>'}",
        help="""select format for returned command results. 'default' give one line
        per result reporting action, status, path and an optional message;
        'json' renders a JSON object with all properties for each result (one per
        line); 'json_pp' pretty-prints JSON spanning multiple lines; 'tailored'
        enables a command-specific rendering style that is typically
        tailored to human consumption (no result output otherwise),
        '<template>' reports any value(s) of any result properties in any format
        indicated by the template (e.g. '{path}'; compare with JSON
        output for all key-value choices). The template syntax follows the Python
        "format() language". It is possible to report individual
        dictionary values, e.g. '{metadata[name]}'. If a 2nd-level key contains
        a colon, e.g. 'music:Genre', ':' must be substituted by '#' in the template,
        like so: '{metadata[music#Genre]}'.""")
    parser.add_argument(
        '--report-status', dest='common_report_status',
        choices=['success', 'failure', 'ok', 'notneeded', 'impossible', 'error'],
        help="""constrain command result report to records matching the given
        status. 'success' is a synonym for 'ok' OR 'notneeded', 'failure' stands
        for 'impossible' OR 'error'.""")
    parser.add_argument(
        '--report-type', dest='common_report_type',
        choices=['dataset', 'file'],
        action='append',
        help="""constrain command result report to records matching the given
        type. Can be given more than once to match multiple types.""")
    parser.add_argument(
        '--on-failure', dest='common_on_failure',
        choices=['ignore', 'continue', 'stop'],
        # no default: better be configure per-command
        help="""when an operation fails: 'ignore' and continue with remaining
        operations, the error is logged but does not lead to a non-zero exit code
        of the command; 'continue' works like 'ignore', but an error causes a
        non-zero exit code; 'stop' halts on first failure and yields non-zero exit
        code. A failure is any result with status 'impossible' or 'error'.""")
    #parser.add_argument(
    #    '--run-before', dest='common_run_before',
    #    nargs='+',
    #    action='append',
    #    metavar='PLUGINSPEC',
    #    help="""DataLad plugin to run after the command. PLUGINSPEC is a list
    #    comprised of a plugin name plus optional `key=value` pairs with arguments
    #    for the plugin call (see `plugin` command documentation for details).
    #    This option can be given more than once to run multiple plugins
    #    in the order in which they were given.
    #    For running plugins that require a --dataset argument it is important
    #    to provide the respective dataset as the --dataset argument of the main
    #    command, if it is not in the list of plugin arguments."""),
    #parser.add_argument(
    #    '--run-after', dest='common_run_after',
    #    nargs='+',
    #    action='append',
    #    metavar='PLUGINSPEC',
    #    help="""Like --run-before, but plugins are executed after the main command
    #    has finished."""),
    parser.add_argument(
        '--cmd', dest='_', action='store_true',
        help="""syntactical helper that can be used to end the list of global
        command line options before the subcommand label. Options like
        --run-before can take an arbitray number of arguments and may require
        to be followed by a single --cmd in order to enable identification
        of the subcommand.""")

    # yoh: atm we only dump to console.  Might adopt the same separation later on
    #      and for consistency will call it --verbose-level as well for now
    # log-level is set via common_opts ATM
    # parser.add_argument('--log-level',
    #                     choices=('critical', 'error', 'warning', 'info', 'debug'),
    #                     dest='common_log_level',
    #                     help="""level of verbosity in log files. By default
    #                          everything, including debug messages is logged.""")
    #parser.add_argument('-l', '--verbose-level',
    #                    choices=('critical', 'error', 'warning', 'info', 'debug'),
    #                    dest='common_verbose_level',
    #                    help="""level of verbosity of console output. By default
    #                         only warnings and errors are printed.""")

    # subparsers
    subparsers = None

    # auto detect all available interfaces and generate a function-based
    # API from them
    cmdlineargs = set(cmdlineargs) if cmdlineargs else set()
    grp_short_descriptions = []
    interface_groups = get_interface_groups()
    interface_groups.append(('plugins', 'Plugins', _get_plugins()))

    for grp_name, grp_descr, _interfaces \
                in sorted(interface_groups, key=lambda x: x[1]):
        # for all subcommand modules it can find
        cmd_short_descriptions = []

        for _intfspec in _interfaces:
            cmd_name = get_cmdline_command_name(_intfspec)
            # for each interface to be imported decide if it is necessary
            # test conditions from frequent to infrequent occasions
            # we want to import everything for help requests of any kind
            # including a cluecless `datalad` without args
            if not (len(cmdlineargs) == 1 or
                    cmd_name in cmdlineargs or
                    '--help' in cmdlineargs or
                    '-h' in cmdlineargs or
                    '--help-np' in cmdlineargs):
                continue
            if isinstance(_intfspec[1], dict):
                # plugin
                _intf = _load_plugin(_intfspec[1]['file'], fail=False)
                if _intf is None:
                    continue
            else:
                # turn the interface spec into an instance
                lgr.log(5, "Importing module %s " % _intfspec[0])
                try:
                    _mod = import_module(_intfspec[0], package='datalad')
                except Exception as e:
                    lgr.error("Internal error, cannot import interface '%s': %s",
                              _intfspec[0], exc_str(e))
                    continue
                _intf = getattr(_mod, _intfspec[1])
            # deal with optional parser args
            if hasattr(_intf, 'parser_args'):
                parser_args = _intf.parser_args
            else:
                parser_args = dict(formatter_class=formatter_class)
            # use class description, if no explicit description is available
                intf_doc = '' if _intf.__doc__ is None else _intf.__doc__.strip()
                if hasattr(_intf, '_docs_'):
                    # expand docs
                    intf_doc = intf_doc.format(**_intf._docs_)
                parser_args['description'] = alter_interface_docs_for_cmdline(
                    intf_doc)
            # create subparser, use module suffix as cmd name
            if subparsers is None:
                subparsers = parser.add_subparsers()
            subparser = subparsers.add_parser(cmd_name, add_help=False, **parser_args)
            # our own custom help for all commands
            helpers.parser_add_common_opt(subparser, 'help')
            # let module configure the parser
            _intf.setup_parser(subparser)
            # logger for command

            # configure 'run' function for this command
            plumbing_args = dict(
                func=_intf.call_from_parser,
                logger=logging.getLogger(_intf.__module__),
                subparser=subparser)
            if hasattr(_intf, 'result_renderer_cmdline'):
                plumbing_args['result_renderer'] = _intf.result_renderer_cmdline
            subparser.set_defaults(**plumbing_args)
            # store short description for later
            sdescr = getattr(_intf, 'short_description',
                             parser_args['description'].split('\n')[0])
            cmd_short_descriptions.append((cmd_name, sdescr))
            parts[cmd_name] = subparser
        grp_short_descriptions.append(cmd_short_descriptions)

    # create command summary
    cmd_summary = []
    console_width = shutil.get_terminal_size()[0] \
        if hasattr(shutil, 'get_terminal_size') else 80

    for i, grp in enumerate(
            sorted(interface_groups, key=lambda x: x[1])):
        grp_descr = grp[1]
        grp_cmds = grp_short_descriptions[i]

        cmd_summary.append('\n*%s*\n' % (grp_descr,))
        for cd in grp_cmds:
            cmd_summary.append('  %s\n%s'
                               % ((cd[0],
                                  textwrap.fill(
                                      cd[1].rstrip(' .'),
                                      console_width - 5,
                                      initial_indent=' ' * 6,
                                      subsequent_indent=' ' * 6))))
    # we need one last formal section to not have the trailed be
    # confused with the last command group
    cmd_summary.append('\n*General information*\n')
    parser.description = '%s\n%s\n\n%s' \
        % (parser.description,
           '\n'.join(cmd_summary),
           textwrap.fill(dedent_docstring("""\
    Detailed usage information for individual commands is
    available via command-specific --help, i.e.:
    datalad <command> --help"""),
                         console_width - 5, initial_indent='', subsequent_indent=''))
    parts['datalad'] = parser
    lgr.log(5, "Finished setup_parser")
    if return_subparsers:
        return parts
    else:
        return parser