示例#1
0
文件: utils.py 项目: jelmer/datalad
    def eval_func(wrapped, instance, args, kwargs):
        # for result filters and pre/post procedures
        # we need to produce a dict with argname/argvalue pairs for all args
        # incl. defaults and args given as positionals
        allkwargs = get_allargs_as_kwargs(wrapped, args, kwargs)
        # determine class, the __call__ method of which we are decorating:
        # Ben: Note, that this is a bit dirty in PY2 and imposes restrictions on
        # when and how to use eval_results as well as on how to name a command's
        # module and class. As of now, we are inline with these requirements as
        # far as I'm aware.
        mod = sys.modules[wrapped.__module__]
        if PY2:
            # we rely on:
            # - decorated function is method of a subclass of Interface
            # - the name of the class matches the last part of the module's name
            #   if converted to lower
            # for example:
            # ..../where/ever/mycommand.py:
            # class MyCommand(Interface):
            #     @eval_results
            #     def __call__(..)
            command_class_names = \
                [i for i in mod.__dict__
                 if type(mod.__dict__[i]) == type and
                 issubclass(mod.__dict__[i], Interface) and
                 i.lower().startswith(wrapped.__module__.split('.')[-1].replace('datalad_', '').replace('_', ''))]
            assert len(command_class_names) == 1, (command_class_names, mod.__name__)
            command_class_name = command_class_names[0]
        else:
            command_class_name = wrapped.__qualname__.split('.')[-2]
        _func_class = mod.__dict__[command_class_name]
        lgr.debug("Determined class of decorated function: %s", _func_class)

        # retrieve common options from kwargs, and fall back on the command
        # class attributes, or general defaults if needed
        common_params = {
            p_name: kwargs.pop(
                p_name,
                getattr(_func_class, p_name, eval_defaults[p_name]))
            for p_name in eval_params}
        # short cuts and configured setup for common options
        on_failure = common_params['on_failure']
        return_type = common_params['return_type']
        # resolve string labels for transformers too
        result_xfm = common_params['result_xfm']
        if result_xfm in known_result_xfms:
            result_xfm = known_result_xfms[result_xfm]
        result_renderer = common_params['result_renderer']
        # TODO remove this conditional branch entirely, done outside
        if not result_renderer:
            result_renderer = dlcfg.get('datalad.api.result-renderer', None)
        # wrap the filter into a helper to be able to pass additional arguments
        # if the filter supports it, but at the same time keep the required interface
        # as minimal as possible. Also do this here, in order to avoid this test
        # to be performed for each return value
        result_filter = common_params['result_filter']
        _result_filter = result_filter
        if result_filter:
            if isinstance(result_filter, Constraint):
                _result_filter = result_filter.__call__
            if (PY2 and inspect.getargspec(_result_filter).keywords) or \
                    (not PY2 and inspect.getfullargspec(_result_filter).varkw):

                def _result_filter(res):
                    return result_filter(res, **allkwargs)

        def _get_procedure_specs(param_key=None, cfg_key=None, ds=None):
            spec = common_params.get(param_key, None)
            if spec is not None:
                # this is already a list of lists
                return spec

            from datalad.distribution.dataset import Dataset
            ds = ds if isinstance(ds, Dataset) else Dataset(ds) if ds else None
            spec = (ds.config if ds and ds.is_installed()
                    else dlcfg).get(cfg_key, None)
            if spec is None:
                return
            elif not isinstance(spec, tuple):
                spec = [spec]
            return [shlex.split(s) for s in spec]

        # query cfg for defaults
        cmdline_name = cls2cmdlinename(_func_class)
        dataset_arg = allkwargs.get('dataset', None)
        proc_pre = _get_procedure_specs(
            'proc_pre',
            'datalad.{}.proc-pre'.format(cmdline_name),
            ds=dataset_arg)
        proc_post = _get_procedure_specs(
            'proc_post',
            'datalad.{}.proc-post'.format(cmdline_name),
            ds=dataset_arg)

        # this internal helper function actually drives the command
        # generator-style, it may generate an exception if desired,
        # on incomplete results
        def generator_func(*_args, **_kwargs):
            # flag whether to raise an exception
            incomplete_results = []
            # track what actions were performed how many times
            action_summary = {}

            if proc_pre and cmdline_name != 'run-procedure':
                from datalad.interface.run_procedure import RunProcedure
                for procspec in proc_pre:
                    lgr.debug('Running configured pre-procedure %s', procspec)
                    for r in _process_results(
                            RunProcedure.__call__(
                                procspec,
                                dataset=dataset_arg,
                                return_type='generator'),
                            _func_class, action_summary,
                            on_failure, incomplete_results,
                            result_renderer, result_xfm, result_filter,
                            **_kwargs):
                        yield r

            # process main results
            for r in _process_results(
                    wrapped(*_args, **_kwargs),
                    _func_class, action_summary,
                    on_failure, incomplete_results,
                    result_renderer, result_xfm, _result_filter, **_kwargs):
                yield r

            if proc_post and cmdline_name != 'run-procedure':
                from datalad.interface.run_procedure import RunProcedure
                for procspec in proc_post:
                    lgr.debug('Running configured post-procedure %s', procspec)
                    for r in _process_results(
                            RunProcedure.__call__(
                                procspec,
                                dataset=dataset_arg,
                                return_type='generator'),
                            _func_class, action_summary,
                            on_failure, incomplete_results,
                            result_renderer, result_xfm, result_filter,
                            **_kwargs):
                        yield r

            # result summary before a potential exception
            if result_renderer == 'default' and action_summary and \
                    sum(sum(s.values()) for s in action_summary.values()) > 1:
                # give a summary in default mode, when there was more than one
                # action performed
                ui.message("action summary:\n  {}".format(
                    '\n  '.join('{} ({})'.format(
                        act,
                        ', '.join('{}: {}'.format(status, action_summary[act][status])
                                  for status in sorted(action_summary[act])))
                                for act in sorted(action_summary))))

            if incomplete_results:
                raise IncompleteResultsError(
                    failed=incomplete_results,
                    msg="Command did not complete successfully")

        if return_type == 'generator':
            # hand over the generator
            return generator_func(*args, **kwargs)
        else:
            @wrapt.decorator
            def return_func(wrapped_, instance_, args_, kwargs_):
                results = wrapped_(*args_, **kwargs_)
                if inspect.isgenerator(results):
                    # unwind generator if there is one, this actually runs
                    # any processing
                    results = list(results)
                # render summaries
                if not result_xfm and result_renderer == 'tailored':
                    # cannot render transformed results
                    if hasattr(_func_class, 'custom_result_summary_renderer'):
                        _func_class.custom_result_summary_renderer(results)
                if return_type == 'item-or-list' and \
                        len(results) < 2:
                    return results[0] if results else None
                else:
                    return results

            return return_func(generator_func)(*args, **kwargs)
示例#2
0
    def eval_func(wrapped, instance, args, kwargs):
        lgr.log(2, "Entered eval_func for %s", func)
        # for result filters
        # we need to produce a dict with argname/argvalue pairs for all args
        # incl. defaults and args given as positionals
        allkwargs = get_allargs_as_kwargs(wrapped, args, kwargs)

        # determine the command class associated with `wrapped`
        wrapped_class = get_wrapped_class(wrapped)

        # retrieve common options from kwargs, and fall back on the command
        # class attributes, or general defaults if needed
        kwargs = kwargs.copy()  # we will pop, which might cause side-effect
        common_params = {
            p_name: kwargs.pop(
                # go with any explicitly given default
                p_name,
                # otherwise determine the command class and pull any
                # default set in that class
                getattr(
                    wrapped_class,
                    p_name,
                    # or the common default
                    eval_defaults[p_name]))
            for p_name in eval_params
        }

        # short cuts and configured setup for common options
        return_type = common_params['return_type']
        result_filter = get_result_filter(common_params['result_filter'])
        # resolve string labels for transformers too
        result_xfm = known_result_xfms.get(
            common_params['result_xfm'],
            # use verbatim, if not a known label
            common_params['result_xfm'])
        result_renderer = common_params['result_renderer']
        # TODO remove this conditional branch entirely, done outside
        if not result_renderer:
            result_renderer = dlcfg.get('datalad.api.result-renderer', None)
        # look for potential override of logging behavior
        result_log_level = dlcfg.get('datalad.log.result-level', None)

        # query cfg for defaults
        # .is_installed and .config can be costly, so ensure we do
        # it only once. See https://github.com/datalad/datalad/issues/3575
        dataset_arg = allkwargs.get('dataset', None)
        from datalad.distribution.dataset import Dataset
        ds = dataset_arg if isinstance(dataset_arg, Dataset) \
            else Dataset(dataset_arg) if dataset_arg else None
        # look for hooks
        hooks = get_jsonhooks_from_config(ds.config if ds else dlcfg)

        # this internal helper function actually drives the command
        # generator-style, it may generate an exception if desired,
        # on incomplete results
        def generator_func(*_args, **_kwargs):
            # flag whether to raise an exception
            incomplete_results = []
            # track what actions were performed how many times
            action_summary = {}

            # if a custom summary is to be provided, collect the results
            # of the command execution
            results = []
            do_custom_result_summary = result_renderer in ('tailored', 'default') \
                and hasattr(wrapped_class, 'custom_result_summary_renderer')
            pass_summary = do_custom_result_summary and \
                getattr(wrapped_class,
                        'custom_result_summary_renderer_pass_summary', None)

            # process main results
            for r in _process_results(
                    # execution
                    wrapped(*_args, **_kwargs),
                    wrapped_class,
                    common_params['on_failure'],
                    # bookkeeping
                    action_summary,
                    incomplete_results,
                    # communication
                    result_renderer,
                    result_log_level,
                    # let renderers get to see how a command was called
                    allkwargs):
                for hook, spec in hooks.items():
                    # run the hooks before we yield the result
                    # this ensures that they are executed before
                    # a potentially wrapper command gets to act
                    # on them
                    if match_jsonhook2result(hook, r, spec['match']):
                        lgr.debug('Result %s matches hook %s', r, hook)
                        # a hook is also a command that yields results
                        # so yield them outside too
                        # users need to pay attention to void infinite
                        # loops, i.e. when a hook yields a result that
                        # triggers that same hook again
                        for hr in run_jsonhook(hook, spec, r, dataset_arg):
                            # apply same logic as for main results, otherwise
                            # any filters would only tackle the primary results
                            # and a mixture of return values could happen
                            if not keep_result(hr, result_filter, **allkwargs):
                                continue
                            hr = xfm_result(hr, result_xfm)
                            # rationale for conditional is a few lines down
                            if hr:
                                yield hr
                if not keep_result(r, result_filter, **allkwargs):
                    continue
                r = xfm_result(r, result_xfm)
                # in case the result_xfm decided to not give us anything
                # exclude it from the results. There is no particular reason
                # to do so other than that it was established behavior when
                # this comment was written. This will not affect any real
                # result record
                if r:
                    yield r

                # collect if summary is desired
                if do_custom_result_summary:
                    results.append(r)

            # result summary before a potential exception
            # custom first
            if do_custom_result_summary:
                if pass_summary:
                    summary_args = (results, action_summary)
                else:
                    summary_args = (results, )
                wrapped_class.custom_result_summary_renderer(*summary_args)
            elif result_renderer == 'default' and action_summary and \
                    sum(sum(s.values()) for s in action_summary.values()) > 1:
                # give a summary in default mode, when there was more than one
                # action performed
                render_action_summary(action_summary)

            if incomplete_results:
                raise IncompleteResultsError(
                    failed=incomplete_results,
                    msg="Command did not complete successfully")

        if return_type == 'generator':
            # hand over the generator
            lgr.log(2, "Returning generator_func from eval_func for %s",
                    wrapped_class)
            return generator_func(*args, **kwargs)
        else:

            @wrapt.decorator
            def return_func(wrapped_, instance_, args_, kwargs_):
                results = wrapped_(*args_, **kwargs_)
                if inspect.isgenerator(results):
                    # unwind generator if there is one, this actually runs
                    # any processing
                    results = list(results)
                if return_type == 'item-or-list' and \
                        len(results) < 2:
                    return results[0] if results else None
                else:
                    return results

            lgr.log(2, "Returning return_func from eval_func for %s",
                    wrapped_class)
            return return_func(generator_func)(*args, **kwargs)
示例#3
0
文件: utils.py 项目: datalad/datalad
    def eval_func(wrapped, instance, args, kwargs):
        # for result filters and pre/post procedures
        # we need to produce a dict with argname/argvalue pairs for all args
        # incl. defaults and args given as positionals
        allkwargs = get_allargs_as_kwargs(wrapped, args, kwargs)
        # determine class, the __call__ method of which we are decorating:
        # Ben: Note, that this is a bit dirty in PY2 and imposes restrictions on
        # when and how to use eval_results as well as on how to name a command's
        # module and class. As of now, we are inline with these requirements as
        # far as I'm aware.
        mod = sys.modules[wrapped.__module__]
        if PY2:
            # we rely on:
            # - decorated function is method of a subclass of Interface
            # - the name of the class matches the last part of the module's name
            #   if converted to lower
            # for example:
            # ..../where/ever/mycommand.py:
            # class MyCommand(Interface):
            #     @eval_results
            #     def __call__(..)
            command_class_names = \
                [i for i in mod.__dict__
                 if type(mod.__dict__[i]) == type and
                 issubclass(mod.__dict__[i], Interface) and
                 i.lower().startswith(wrapped.__module__.split('.')[-1].replace('datalad_', '').replace('_', ''))]
            assert len(command_class_names) == 1, (command_class_names, mod.__name__)
            command_class_name = command_class_names[0]
        else:
            command_class_name = wrapped.__qualname__.split('.')[-2]
        _func_class = mod.__dict__[command_class_name]
        lgr.debug("Determined class of decorated function: %s", _func_class)

        # retrieve common options from kwargs, and fall back on the command
        # class attributes, or general defaults if needed
        kwargs = kwargs.copy()  # we will pop, which might cause side-effect
        common_params = {
            p_name: kwargs.pop(
                p_name,
                getattr(_func_class, p_name, eval_defaults[p_name]))
            for p_name in eval_params}
        # short cuts and configured setup for common options
        on_failure = common_params['on_failure']
        return_type = common_params['return_type']
        # resolve string labels for transformers too
        result_xfm = common_params['result_xfm']
        if result_xfm in known_result_xfms:
            result_xfm = known_result_xfms[result_xfm]
        result_renderer = common_params['result_renderer']
        # TODO remove this conditional branch entirely, done outside
        if not result_renderer:
            result_renderer = dlcfg.get('datalad.api.result-renderer', None)
        # wrap the filter into a helper to be able to pass additional arguments
        # if the filter supports it, but at the same time keep the required interface
        # as minimal as possible. Also do this here, in order to avoid this test
        # to be performed for each return value
        result_filter = common_params['result_filter']
        _result_filter = result_filter
        if result_filter:
            if isinstance(result_filter, Constraint):
                _result_filter = result_filter.__call__
            if (PY2 and inspect.getargspec(_result_filter).keywords) or \
                    (not PY2 and inspect.getfullargspec(_result_filter).varkw):

                def _result_filter(res):
                    return result_filter(res, **allkwargs)

        def _get_procedure_specs(param_key=None, cfg_key=None, ds=None):
            spec = common_params.get(param_key, None)
            if spec is not None:
                # this is already a list of lists
                return spec

            from datalad.distribution.dataset import Dataset
            ds = ds if isinstance(ds, Dataset) else Dataset(ds) if ds else None
            spec = (ds.config if ds and ds.is_installed()
                    else dlcfg).get(cfg_key, None)
            if spec is None:
                return
            elif not isinstance(spec, tuple):
                spec = [spec]
            return [shlex.split(s) for s in spec]

        # query cfg for defaults
        cmdline_name = cls2cmdlinename(_func_class)
        dataset_arg = allkwargs.get('dataset', None)
        proc_pre = _get_procedure_specs(
            'proc_pre',
            'datalad.{}.proc-pre'.format(cmdline_name),
            ds=dataset_arg)
        proc_post = _get_procedure_specs(
            'proc_post',
            'datalad.{}.proc-post'.format(cmdline_name),
            ds=dataset_arg)

        # this internal helper function actually drives the command
        # generator-style, it may generate an exception if desired,
        # on incomplete results
        def generator_func(*_args, **_kwargs):
            # flag whether to raise an exception
            incomplete_results = []
            # track what actions were performed how many times
            action_summary = {}

            if proc_pre and cmdline_name != 'run-procedure':
                from datalad.interface.run_procedure import RunProcedure
                for procspec in proc_pre:
                    lgr.debug('Running configured pre-procedure %s', procspec)
                    for r in _process_results(
                            RunProcedure.__call__(
                                procspec,
                                dataset=dataset_arg,
                                return_type='generator'),
                            _func_class, action_summary,
                            on_failure, incomplete_results,
                            result_renderer, result_xfm, result_filter,
                            **_kwargs):
                        yield r

            # if a custom summary is to be provided, collect the results
            # of the command execution
            results = []
            do_custom_result_summary = result_renderer == 'tailored' \
                and hasattr(_func_class, 'custom_result_summary_renderer')

            # process main results
            for r in _process_results(
                    wrapped(*_args, **_kwargs),
                    _func_class, action_summary,
                    on_failure, incomplete_results,
                    result_renderer, result_xfm, _result_filter, **_kwargs):
                yield r
                # collect if summary is desired
                if do_custom_result_summary:
                    results.append(r)

            if proc_post and cmdline_name != 'run-procedure':
                from datalad.interface.run_procedure import RunProcedure
                for procspec in proc_post:
                    lgr.debug('Running configured post-procedure %s', procspec)
                    for r in _process_results(
                            RunProcedure.__call__(
                                procspec,
                                dataset=dataset_arg,
                                return_type='generator'),
                            _func_class, action_summary,
                            on_failure, incomplete_results,
                            result_renderer, result_xfm, result_filter,
                            **_kwargs):
                        yield r

            # result summary before a potential exception
            # custom first
            if do_custom_result_summary:
                _func_class.custom_result_summary_renderer(results)
            elif result_renderer == 'default' and action_summary and \
                    sum(sum(s.values()) for s in action_summary.values()) > 1:
                # give a summary in default mode, when there was more than one
                # action performed
                ui.message("action summary:\n  {}".format(
                    '\n  '.join('{} ({})'.format(
                        act,
                        ', '.join('{}: {}'.format(status, action_summary[act][status])
                                  for status in sorted(action_summary[act])))
                                for act in sorted(action_summary))))

            if incomplete_results:
                raise IncompleteResultsError(
                    failed=incomplete_results,
                    msg="Command did not complete successfully")

        if return_type == 'generator':
            # hand over the generator
            return generator_func(*args, **kwargs)
        else:
            @wrapt.decorator
            def return_func(wrapped_, instance_, args_, kwargs_):
                results = wrapped_(*args_, **kwargs_)
                if inspect.isgenerator(results):
                    # unwind generator if there is one, this actually runs
                    # any processing
                    results = list(results)
                # render summaries
                if not result_xfm and result_renderer == 'tailored':
                    # cannot render transformed results
                    if hasattr(_func_class, 'custom_result_summary_renderer'):
                        _func_class.custom_result_summary_renderer(results)
                if return_type == 'item-or-list' and \
                        len(results) < 2:
                    return results[0] if results else None
                else:
                    return results

            return return_func(generator_func)(*args, **kwargs)