def custom_result_renderer(res, **kwargs): from datalad.ui import ui # should we attempt to remove an unknown sibling, complain like Git does if res['status'] == 'notneeded' and res['action'] == 'remove-sibling': ui.message('{warn}: No sibling "{name}" in dataset {path}'.format( warn=ac.color_word('Warning', ac.LOG_LEVEL_COLORS['WARNING']), **res)) return if res['status'] != 'ok' or not res.get('action', '').endswith('-sibling'): # logging complained about this already return path = op.relpath(res['path'], res['refds']) if res.get( 'refds', None) else res['path'] got_url = 'url' in res spec = '{}{}{}{}'.format(res.get('url', ''), ' (' if got_url else '', res.get('annex-externaltype', 'git'), ')' if got_url else '') ui.message('{path}: {name}({with_annex}) [{spec}]'.format( **dict( res, path=path, # TODO report '+' for special remotes with_annex='+' if 'annex-uuid' in res \ else ('-' if res.get('annex-ignore', None) else '?'), spec=spec)))
def custom_result_renderer(res, **kwargs): # pragma: no cover if not (res['status'] == 'ok' \ and res['action'] in ('status', 'diff') \ and res.get('state', None) != 'clean'): # logging reported already return from datalad.ui import ui # when to render relative paths: # 1) if a dataset arg was given # 2) if CWD is the refds refds = res.get('refds', None) refds = refds if kwargs.get('dataset', None) is not None \ or refds == os.getcwd() else None # Note: We have to force unicode for res['path'] because # interface.utils encodes it on py2 before passing it to # custom_result_renderer(). path = assure_unicode(res['path']) if refds is None \ else text_type(ut.Path(res['path']).relative_to(refds)) type_ = res.get('type', res.get('type_src', '')) max_len = len('untracked') state = res.get('state', 'unknown') ui.message(u'{fill}{state}: {path}{type_}'.format( fill=' ' * max(0, max_len - len(state)), state=ac.color_word( state, STATE_COLOR_MAP.get(res.get('state', 'unknown'))), path=path, type_=' ({})'.format( ac.color_word(type_, ac.MAGENTA) if type_ else '')))
def custom_result_summary_renderer(results): # pragma: no cover # fish out sizes of annexed files. those will only be present # with --annex ... annexed = [ (int(r['bytesize']), r.get('has_content', None)) for r in results if r.get('action', None) == 'status' \ and 'key' in r and 'bytesize' in r] if annexed: have_availability = any(a[1] is not None for a in annexed) total_size = bytes2human(sum(a[0] for a in annexed)) # we have availability info encoded in the results from datalad.ui import ui if have_availability: ui.message( "{} annex'd {} ({}/{} present/total size)".format( len(annexed), single_or_plural('file', 'files', len(annexed)), bytes2human(sum(a[0] for a in annexed if a[1])), total_size)) else: ui.message( "{} annex'd {} ({} recorded total size)".format( len(annexed), single_or_plural('file', 'files', len(annexed)), total_size))
def _render_result_json(res, prettyprint): ui.message( json.dumps({k: v for k, v in res.items() if k not in ('logger')}, sort_keys=True, indent=2 if prettyprint else None, default=str))
def generic_result_renderer(res): if res.get('status', None) != 'notneeded': path = res.get('path', None) if path and res.get('refds'): try: path = relpath(path, res['refds']) except ValueError: # can happen, e.g., on windows with paths from different # drives. just go with the original path in this case pass ui.message('{action}({status}):{path}{type}{msg}{err}'.format( action=ac.color_word(res.get('action', '<action-unspecified>'), ac.BOLD), status=ac.color_status(res.get('status', '<status-unspecified>')), path=' {}'.format(path) if path else '', type=' ({})'.format(ac.color_word(res['type'], ac.MAGENTA)) if 'type' in res else '', msg=' [{}]'.format(res['message'][0] % res['message'][1:] if isinstance( res['message'], tuple) else res['message']) if res.get('message', None) else '', err=ac.color_word( ' [{}]'.format(res['error_message'][0] % res['error_message'][1:] if isinstance( res['error_message'], tuple ) else res['error_message']), ac.RED) if res.get('error_message', None) and res.get('status', None) != 'ok' else ''))
def custom_result_renderer(res, **kwargs): if res["action"] != "containers": default_result_renderer(res) else: ui.message("{name} -> {path}".format( name=ac.color_word(res["name"], ac.MAGENTA), path=op.relpath(res["path"], res["refds"])))
def custom_result_renderer(res, **kwargs): from datalad.ui import ui if not res or res.get('type', None) != 'dataset' or 'path' not in res: return ds = Dataset(res['path']) commit = ds.repo.get_hexsha() ui.message('Saved state: {0} for {1}'.format(commit, ds))
def result_renderer_cmdline(res, args): from datalad.ui import ui if not res: return for ds in res: commit = ds.repo.repo.head.commit ui.message('Saved state: {0} for {1}'.format(commit.hexsha, ds))
def custom_result_renderer(res, **kwargs): dry_run = kwargs.get("dry_run") if dry_run and "dry_run_info" in res: if dry_run == "basic": _display_basic(res) elif dry_run == "command": ui.message(res["dry_run_info"]["cmd_expanded"]) else: raise ValueError(f"Unknown dry-run mode: {dry_run!r}") else: if kwargs.get("on_failure") == "stop" and \ res.get("action") == "run" and res.get("status") == "error": msg_path = res.get("msg_path") if msg_path: ds_path = res["path"] if datalad.get_apimode() == 'python': help = f"\"Dataset('{ds_path}').save(path='.', " \ "recursive=True, message_file='%s')\"" else: help = "'datalad save -d . -r -F %s'" lgr.info( "The command had a non-zero exit code. " "If this is expected, you can save the changes with " f"{help}", # shorten to the relative path for a more concise # message Path(msg_path).relative_to(ds_path)) generic_result_renderer(res)
def generator_func(*_args, **_kwargs): # flag whether to raise an exception incomplete_results = [] # track what actions were performed how many times action_summary = {} if proc_pre and cmdline_name != 'run-procedure': from datalad.interface.run_procedure import RunProcedure for procspec in proc_pre: lgr.debug('Running configured pre-procedure %s', procspec) for r in _process_results( RunProcedure.__call__( procspec, dataset=dataset_arg, return_type='generator'), _func_class, action_summary, on_failure, incomplete_results, result_renderer, result_xfm, result_filter, **_kwargs): yield r # process main results for r in _process_results( wrapped(*_args, **_kwargs), _func_class, action_summary, on_failure, incomplete_results, result_renderer, result_xfm, _result_filter, **_kwargs): yield r if proc_post and cmdline_name != 'run-procedure': from datalad.interface.run_procedure import RunProcedure for procspec in proc_post: lgr.debug('Running configured post-procedure %s', procspec) for r in _process_results( RunProcedure.__call__( procspec, dataset=dataset_arg, return_type='generator'), _func_class, action_summary, on_failure, incomplete_results, result_renderer, result_xfm, result_filter, **_kwargs): yield r # result summary before a potential exception if result_renderer == 'default' and action_summary and \ sum(sum(s.values()) for s in action_summary.values()) > 1: # give a summary in default mode, when there was more than one # action performed ui.message("action summary:\n {}".format( '\n '.join('{} ({})'.format( act, ', '.join('{}: {}'.format(status, action_summary[act][status]) for status in sorted(action_summary[act]))) for act in sorted(action_summary)))) if incomplete_results: raise IncompleteResultsError( failed=incomplete_results, msg="Command did not complete successfully")
def custom_result_renderer(res, **kwargs): from datalad.interface.utils import generic_result_renderer from datalad.ui import ui if res['status'] != 'ok' or 'procedure' not in res.get('action', ''): # it's not our business generic_result_renderer(res) return if kwargs.get('discover', None): ui.message('{name} ({path}){msg}'.format( # bold-faced name, if active name=ac.color_word(res['procedure_name'], ac.BOLD) if res['state'] == 'executable' else res['procedure_name'], path=res['path'], msg=' [{}]'.format( res['message'][0] % res['message'][1:] if isinstance(res['message'], tuple) else res['message']) if 'message' in res else '')) elif kwargs.get('help_proc', None): ui.message('{name} ({path}){help}'.format( name=ac.color_word(res['procedure_name'], ac.BOLD), path=op.relpath(res['path'], res['refds']) if res.get( 'refds', None) else res['path'], help='{nl}{msg}'.format( nl=os.linesep, msg=res['message'][0] % res['message'][1:] if isinstance(res['message'], tuple) else res['message']) if 'message' in res else '')) else: generic_result_renderer(res)
def custom_result_renderer(res, **kwargs): # pragma: more cover if (res['status'] == 'ok' and res['action'] in ('status', 'diff') and res.get('state') == 'clean'): # this renderer will be silent for clean status|diff results return if res['status'] != 'ok' or res['action'] not in ('status', 'diff'): # whatever this renderer cannot account for, send to generic generic_result_renderer(res) return from datalad.ui import ui # when to render relative paths: # 1) if a dataset arg was given # 2) if CWD is the refds refds = res.get('refds', None) refds = refds if kwargs.get('dataset', None) is not None \ or refds == os.getcwd() else None path = res['path'] if refds is None \ else str(ut.Path(res['path']).relative_to(refds)) type_ = res.get('type', res.get('type_src', '')) max_len = len('untracked') state = res.get('state', 'unknown') ui.message(u'{fill}{state}: {path}{type_}'.format( fill=' ' * max(0, max_len - len(state)), state=ac.color_word( state, STATE_COLOR_MAP.get(res.get('state', 'unknown'))), path=path, type_=' ({})'.format( ac.color_word(type_, ac.MAGENTA) if type_ else '')))
def _display_basic(res): ui.message(ac.color_word("Dry run information", ac.MAGENTA)) def fmt_line(key, value, multiline=False): return (" {key}:{sep}{value}" .format(key=ac.color_word(key, ac.BOLD), sep=os.linesep + " " if multiline else " ", value=value)) dry_run_info = res["dry_run_info"] lines = [fmt_line("location", dry_run_info["pwd_full"])] # TODO: Inputs and outputs could be pretty long. These may be worth # truncating. inputs = dry_run_info["inputs"] if inputs: lines.append(fmt_line("expanded inputs", inputs, multiline=True)) outputs = dry_run_info["outputs"] if outputs: lines.append(fmt_line("expanded outputs", outputs, multiline=True)) cmd = res["run_info"]["cmd"] cmd_expanded = dry_run_info["cmd_expanded"] lines.append(fmt_line("command", cmd, multiline=True)) if cmd != cmd_expanded: lines.append(fmt_line("expanded command", cmd_expanded, multiline=True)) ui.message(os.linesep.join(lines))
def custom_result_renderer(res, **kwargs): # pragma: no cover if not (res['status'] == 'ok' \ and res['action'] in ('status', 'diff') \ and res.get('state', None) != 'clean'): # logging reported already return from datalad.ui import ui # when to render relative paths: # 1) if a dataset arg was given # 2) if CWD is the refds refds = res.get('refds', None) refds = refds if kwargs.get('dataset', None) is not None \ or refds == os.getcwd() else None path = res['path'] if refds is None \ else str(ut.Path(res['path']).relative_to(refds)) type_ = res.get('type', res.get('type_src', '')) max_len = len('untracked') state = res['state'] ui.message('{fill}{state}: {path}{type_}'.format( fill=' ' * max(0, max_len - len(state)), state=ac.color_word( state, STATE_COLOR_MAP.get(res['state'], ac.WHITE)), path=path, type_=' ({})'.format( ac.color_word(type_, ac.MAGENTA) if type_ else '')))
def custom_result_summary_renderer(results): # Since 'notneeded' results aren't rendered by default, give # a nothing-to-clean-message if all results were "notneeded", # to not remain entirely silent. if all(r['status'] == 'notneeded' for r in results): from datalad.ui import ui ui.message("nothing to clean, no temporary locations present.")
def custom_result_renderer(res, **kwargs): from datalad.ui import ui if res.get('action', None) == 'create' and \ res.get('status', None) == 'ok' and \ res.get('type', None) == 'dataset': ui.message("Created dataset at {}.".format(res['path'])) else: ui.message("Nothing was created")
def custom_result_renderer(res, **kwargs): # pragma: no cover from datalad.ui import ui if res.get('action', None) == 'create' and \ res.get('status', None) == 'ok' and \ res.get('type', None) == 'dataset': ui.message("Created dataset at {}.".format(res['path'])) else: ui.message("Nothing was created")
def __call__(dataset=None, sensitive=None, clipboard=None): from datalad.distribution.dataset import require_dataset from datalad.support.exceptions import NoDatasetArgumentFound from datalad.interface.results import get_status_dict ds = None try: ds = require_dataset(dataset, check_installed=False, purpose='reporting') except NoDatasetArgumentFound: # failure is already logged pass if ds and not ds.is_installed(): # we don't deal with absent datasets ds = None if sensitive: if ds is None: from datalad import cfg else: cfg = ds.config else: cfg = None from datalad.ui import ui from datalad.support.external_versions import external_versions infos = {} res = get_status_dict( action='wtf', path=ds.path if ds else op.abspath(op.curdir), type='dataset' if ds else 'directory', status='ok', logger=lgr, infos=infos, ) infos['datalad'] = _describe_datalad() infos['git-annex'] = _describe_annex() infos['system'] = _describe_system() infos['environment'] = _describe_environment() infos['configuration'] = _describe_configuration(cfg, sensitive) infos['extentions'] = _describe_extensions() infos['metadata_extractors'] = _describe_metadata_extractors() infos['dependencies'] = _describe_dependencies() if ds: try: infos['dataset'] = _describe_dataset(ds, sensitive) except InvalidGitRepositoryError as e: infos['dataset'] = {"invalid": exc_str(e)} if clipboard: external_versions.check( 'pyperclip', msg="It is needed to be able to use clipboard") import pyperclip report = _render_report(res) pyperclip.copy(report) ui.message("WTF information of length %s copied to clipboard" % len(report)) yield res return
def generator_func(*_args, **_kwargs): # flag whether to raise an exception incomplete_results = [] # track what actions were performed how many times action_summary = {} # TODO needs replacement plugin is gone #for pluginspec in run_before or []: # lgr.debug('Running pre-proc plugin %s', pluginspec) # for r in _process_results( # Plugin.__call__( # pluginspec, # dataset=allkwargs.get('dataset', None), # return_type='generator'), # _func_class, action_summary, # on_failure, incomplete_results, # result_renderer, result_xfm, result_filter, # **_kwargs): # yield r # process main results for r in _process_results( wrapped(*_args, **_kwargs), _func_class, action_summary, on_failure, incomplete_results, result_renderer, result_xfm, _result_filter, **_kwargs): yield r # TODO needs replacement plugin is gone #for pluginspec in run_after or []: # lgr.debug('Running post-proc plugin %s', pluginspec) # for r in _process_results( # Plugin.__call__( # pluginspec, # dataset=allkwargs.get('dataset', None), # return_type='generator'), # _func_class, action_summary, # on_failure, incomplete_results, # result_renderer, result_xfm, result_filter, # **_kwargs): # yield r # result summary before a potential exception if result_renderer == 'default' and action_summary and \ sum(sum(s.values()) for s in action_summary.values()) > 1: # give a summary in default mode, when there was more than one # action performed ui.message("action summary:\n {}".format( '\n '.join('{} ({})'.format( act, ', '.join('{}: {}'.format(status, action_summary[act][status]) for status in sorted(action_summary[act]))) for act in sorted(action_summary)))) if incomplete_results: raise IncompleteResultsError( failed=incomplete_results, msg="Command did not complete successfully")
def result_renderer_cmdline(res, args): from datalad.ui import ui if not res: return for ds in res: commit = ds.repo.repo.head.commit ui.message('Saved state: {0} for {1}'.format( commit.hexsha, ds))
def custom_result_renderer(res, **kwargs): from datalad.ui import ui if not res or res.get('type', None) != 'dataset' or 'path' not in res: return ds = Dataset(res['path']) commit = ds.repo.get_hexsha() ui.message('Saved state: {0} for {1}'.format( commit, ds))
def _display_suppressed_message(nsimilar, ndisplayed, final=False): # +1 because there was the original result + nsimilar displayed. n_suppressed = nsimilar - ndisplayed + 1 if n_suppressed > 0: ui.message(' [{} similar {} been suppressed]'.format( n_suppressed, single_or_plural("message has", "messages have", n_suppressed, False)), cr="\n" if final else "\r")
def _search_from_virgin_install(dataset, query): # # this is to be nice to newbies # exc_info = sys.exc_info() if dataset is None: if not ui.is_interactive: raise NoDatasetArgumentFound( "No DataLad dataset found. Specify a dataset to be " "searched, or run interactively to get assistance " "installing a queriable superdataset." ) # none was provided so we could ask user whether he possibly wants # to install our beautiful mega-duper-super-dataset? # TODO: following logic could possibly benefit other actions. DEFAULT_DATASET_PATH = cfg.obtain('datalad.locations.default-dataset') if os.path.exists(DEFAULT_DATASET_PATH): default_ds = Dataset(DEFAULT_DATASET_PATH) if default_ds.is_installed(): if ui.yesno( title="No DataLad dataset found at current location", text="Would you like to search the DataLad " "superdataset at %r?" % DEFAULT_DATASET_PATH): pass else: raise exc_info[1] else: raise NoDatasetArgumentFound( "No DataLad dataset found at current location. " "The DataLad superdataset location %r exists, " "but does not contain an dataset." % DEFAULT_DATASET_PATH) elif ui.yesno( title="No DataLad dataset found at current location", text="Would you like to install the DataLad " "superdataset at %r?" % DEFAULT_DATASET_PATH): from datalad.api import install default_ds = install(DEFAULT_DATASET_PATH, source='///') ui.message( "From now on you can refer to this dataset using the " "label '///'" ) else: raise exc_info[1] lgr.info( "Performing search using DataLad superdataset %r", default_ds.path ) for res in default_ds.search(query): yield res return else: raise # this function is called within exception handling block
def __call__(dataset=None, sensitive=None, clipboard=None): from datalad.distribution.dataset import require_dataset from datalad.support.exceptions import NoDatasetArgumentFound from datalad.interface.results import get_status_dict ds = None try: ds = require_dataset(dataset, check_installed=False, purpose='reporting') except NoDatasetArgumentFound: # failure is already logged pass if ds and not ds.is_installed(): # we don't deal with absent datasets ds = None if sensitive: if ds is None: from datalad import cfg else: cfg = ds.config else: cfg = None from datalad.ui import ui from datalad.support.external_versions import external_versions infos = {} res = get_status_dict( action='wtf', path=ds.path if ds else op.abspath(op.curdir), type='dataset' if ds else 'directory', status='ok', logger=lgr, infos=infos, ) infos['datalad'] = _describe_datalad() infos['git-annex'] = _describe_annex() infos['system'] = _describe_system() infos['environment'] = _describe_environment() infos['configuration'] = _describe_configuration(cfg, sensitive) infos['extentions'] = _describe_extensions() infos['metadata_extractors'] = _describe_metadata_extractors() infos['dependencies'] = _describe_dependencies() if ds: infos['dataset'] = _describe_dataset(ds, sensitive) if clipboard: external_versions.check( 'pyperclip', msg="It is needed to be able to use clipboard") import pyperclip report = _render_report(res) pyperclip.copy(report) ui.message("WTF information of length %s copied to clipboard" % len(report)) yield res return
def custom_result_renderer(res, **kwargs): dry_run = kwargs.get("dry_run") if dry_run and "dry_run_info" in res: if dry_run == "basic": _display_basic(res) elif dry_run == "command": ui.message(res["dry_run_info"]["cmd_expanded"]) else: raise ValueError(f"Unknown dry-run mode: {dry_run!r}") else: default_result_renderer(res)
def result_renderer_cmdline(res, args): from datalad.ui import ui if res is None: res = [] if not isinstance(res, list): res = [res] if not len(res): ui.message("Nothing was unlocked") return items = '\n'.join(map(str, res)) msg = "Unlocked {n} files:\n{items}".format(n=len(res), items=items) ui.message(msg)
def result_renderer_cmdline(res, args): from datalad.ui import ui if res is None: res = [] if not len(res): ui.message("No repos were created... oops") return items = '\n'.join(map(str, res)) msg = "{n} installed {obj} available at\n{items}".format( obj='items are' if len(res) > 1 else 'item is', n=len(res), items=items) ui.message(msg)
def default_result_renderer(res): if res.get('status', None) != 'notneeded': ui.message('{action}({status}): {path}{type}{msg}'.format( action=ac.color_word(res['action'], ac.BOLD), status=ac.color_status(res['status']), path=relpath(res['path'], res['refds']) if res.get('refds', None) else res['path'], type=' ({})'.format(ac.color_word(res['type'], ac.MAGENTA)) if 'type' in res else '', msg=' [{}]'.format(res['message'][0] % res['message'][1:] if isinstance( res['message'], tuple) else res['message']) if 'message' in res else ''))
def result_renderer_cmdline(res, args): from datalad.ui import ui if res is None: res = [] if not isinstance(res, list): res = [res] if not len(res): ui.message("Nothing was unlocked") return items = '\n'.join(map(str, res)) msg = "Unlocked {n} files:\n{items}".format( n=len(res), items=items) ui.message(msg)
def result_renderer_cmdline(res): from datalad.ui import ui if res is None: res = [] if not isinstance(res, list): res = [res] if not len(res): ui.message("No sibling was added") return items = '\n'.join(map(str, res)) msg = "Added sibling to {ds}:\n{items}".format(ds='{} datasets'.format( len(res)) if len(res) > 1 else 'one dataset', items=items) ui.message(msg)
def result_renderer_cmdline(res, args): from datalad.ui import ui if not res: ui.message("Nothing was uninstalled") return msg = "{n} {obj} uninstalled:\n".format( obj='items were' if len(res) > 1 else 'item was', n=len(res)) for item in res: if isinstance(item, Dataset): msg += "Dataset: %s\n" % item.path else: msg += "File: %s\n" % item ui.message(msg)
def custom_result_renderer(res, **kwargs): if (res['status'] != 'ok' or res['action'] not in ('get_configuration', 'dump_configuration')): if 'message' not in res and 'name' in res: suffix = '={}'.format(res['value']) if 'value' in res else '' res['message'] = '{}{}'.format( res['name'], suffix) default_result_renderer(res) return # TODO source from datalad.ui import ui name = res['name'] if res['action'] == 'dump_configuration': for key in ('purpose', 'description'): s = res.get(key) if s: ui.message('\n'.join(wrap( s, initial_indent='# ', subsequent_indent='# ', ))) if kwargs.get('recursive', False): have_subds = res['path'] != res['refds'] # we need to mark up from which dataset results are reported prefix = '<ds>{}{}:'.format( '/' if have_subds else '', Path(res['path']).relative_to(res['refds']).as_posix() if have_subds else '', ) else: prefix = '' if kwargs.get('action', None) == 'dump': if 'value_type' in res: value_type = res['value_type'] vtype = value_type.short_description() \ if hasattr(value_type, 'short_description') else str(value_type) vtype = f'Value constraint: {vtype}' ui.message('\n'.join(wrap( vtype, initial_indent='# ', subsequent_indent='# ', break_on_hyphens=False, ))) else: vtype = '' value = res['value'] if res['value'] is not None else '' if value in (True, False): # normalize booleans for git-config syntax value = str(value).lower() ui.message(f'{prefix}{ac.color_word(name, ac.BOLD)}={value}') else: ui.message('{}{}'.format( prefix, res['value'] if res['value'] is not None else '', ))
def custom_result_summary_renderer(results): # pragma: more cover # report on any hints at the end # get all unique hints hints = set([r.get('hints', None) for r in results]) hints = [hint for hint in hints if hint is not None] if hints: from datalad.ui import ui from datalad.support import ansi_colors intro = ansi_colors.color_word( "Potential hints to solve encountered errors: ", ansi_colors.YELLOW) ui.message(intro) [ui.message("{}: {}".format( ansi_colors.color_word(id + 1, ansi_colors.YELLOW), hint)) for id, hint in enumerate(hints)]
def result_renderer_cmdline(res): from datalad.ui import ui if res is None: res = [] if not isinstance(res, list): res = [res] if not len(res): ui.message("Nothing was installed") return items = '\n'.join(map(str, res)) msg = "{n} installed {obj} available at\n{items}".format( obj='items are' if len(res) > 1 else 'item is', n=len(res), items=items) ui.message(msg)
def custom_result_renderer(res, **kwargs): from datalad.ui import ui status_str = "{action}({status}): " if res['action'] == "create-project-osf": ui.message("{action}({status}): {url}".format( action=ac.color_word(res['action'], ac.BOLD), status=ac.color_status(res['status']), url=res['url'])) elif res['action'] == "add-sibling-osf": ui.message("{action}({status})".format( action=ac.color_word(res['action'], ac.BOLD), status=ac.color_status(res['status']))) else: from datalad.interface.utils import default_result_renderer default_result_renderer(res, **kwargs)
def custom_result_renderer(res, **kwargs): from datalad.ui import ui if not res['status'] == 'ok': # logging reported already return path = relpath(res['path'], start=res['refds']) \ if res.get('refds', None) else res['path'] type_ = res.get('type', res.get('type_src', '')) max_len = len('untracked(directory)') state_msg = '{}{}'.format(res['state'], '({})'.format(type_ if type_ else '')) ui.message('{fill}{state_msg}: {path}'.format( fill=' ' * max(0, max_len - len(state_msg)), state_msg=state_msg, path=path))
def result_renderer_cmdline(res): from datalad.ui import ui if res is None: res = [] if not isinstance(res, list): res = [res] if not len(res): ui.message("No sibling was added") return items= '\n'.join(map(str, res)) msg = "Added sibling to {ds}:\n{items}".format( ds='{} datasets'.format(len(res)) if len(res) > 1 else 'one dataset', items=items) ui.message(msg)
def default_result_renderer(res): if res.get('status', None) != 'notneeded': ui.message('{action}({status}): {path}{type}{msg}'.format( action=ac.color_word(res['action'], ac.BOLD), status=ac.color_status(res['status']), path=relpath(res['path'], res['refds']) if res.get('refds', None) else res[ 'path'], type=' ({})'.format( ac.color_word(res['type'], ac.MAGENTA) ) if 'type' in res else '', msg=' [{}]'.format( res['message'][0] % res['message'][1:] if isinstance(res['message'], tuple) else res[ 'message']) if 'message' in res else ''))
def custom_result_renderer(res, **kwargs): from datalad.ui import ui if not res['status'] == 'ok': # logging reported already return path = relpath(res['path'], start=res['refds']) \ if res.get('refds', None) else res['path'] type_ = res.get('type', res.get('type_src', '')) max_len = len('untracked(directory)') state_msg = '{}{}'.format( res['state'], '({})'.format(type_ if type_ else '')) ui.message('{fill}{state_msg}: {path}'.format( fill=' ' * max(0, max_len - len(state_msg)), state_msg=state_msg, path=path))
def custom_result_renderer(res, **kwargs): if res['status'] != 'ok' or not res.get('action', None) == 'metadata': # logging complained about this already return # list the path, available metadata keys, and tags path = op.relpath(res['path'], res['refds']) if res.get('refds', None) else res['path'] meta = res.get('metadata', {}) ui.message('{path}{type}:{spacer}{meta}{tags}'.format( path=ac.color_word(path, ac.BOLD), type=' ({})'.format( ac.color_word(res['type'], ac.MAGENTA)) if 'type' in res else '', spacer=' ' if len([m for m in meta if m != 'tag']) else '', meta=','.join(k for k in sorted(meta.keys()) if k not in ('tag', '@context', '@id')) if meta else ' -' if 'metadata' in res else ' aggregated', tags='' if 'tag' not in meta else ' [{}]'.format( ','.join(assure_list(meta['tag'])))))
def custom_result_renderer(res, **kwargs): from datalad.ui import ui from datalad.interface.utils import default_result_renderer if res['status'] != 'ok': # logging complained about this already return if 'procedure' not in res.get('action', ''): # it's not our business default_result_renderer(res) return if kwargs.get('discover', None): ui.message('{name} ({path}){msg}'.format( name=ac.color_word(res['procedure_name'], ac.BOLD), path=op.relpath( res['path'], res['refds']) if res.get('refds', None) else res['path'], msg=' [{}]'.format( res['message'][0] % res['message'][1:] if isinstance(res['message'], tuple) else res['message']) if 'message' in res else '' )) elif kwargs.get('help_proc', None): ui.message('{name} ({path}){help}'.format( name=ac.color_word(res['procedure_name'], ac.BOLD), path=op.relpath( res['path'], res['refds']) if res.get('refds', None) else res['path'], help='{nl}{msg}'.format( nl=os.linesep, msg=res['message'][0] % res['message'][1:] if isinstance(res['message'], tuple) else res['message']) if 'message' in res else '' )) else: default_result_renderer(res)
def custom_result_renderer(res, **kwargs): """Like 'json_pp', but skip non-error results without flagged objects. """ # FIXME: I think the proper way to do this is to use 'result_filter', # but I couldn't seem to get eval_results to detect the filter when I # used # # result_renderer = "json_pp" # result_filter = lambda x: ... # # Also, I want to keep the "message" key for errors. from datalad.ui import ui to_render = {} if res["status"] == "error": to_render = dict(res.items()) elif "report" in res and res["report"]["objects"]: to_render = {k: v for k, v in res.items() if k not in ["status", "message", "logger"]} if to_render: ui.message(json.dumps(to_render, sort_keys=True, indent=2))
def result_renderer_cmdline(res, args): from datalad.ui import ui from os import linesep if res is None: res = [] if not isinstance(res, list): res = [res] if not len(res): ui.message("Nothing was added{}".format( '' if args.recursive else " (consider --recursive if that is unexpected)")) return msg = linesep.join([ "{suc} {path}".format( suc="Added" if item.get('success', False) else "Failed to add. (%s)" % item.get('note', 'unknown reason'), path=item.get('file')) for item in res]) ui.message(msg)
def custom_result_renderer(res, **kwargs): from datalad.ui import ui if res['status'] != 'ok' or not res.get('action', '').endswith('-sibling') : # logging complained about this already return path = relpath(res['path'], res['refds']) if res.get('refds', None) else res['path'] got_url = 'url' in res spec = '{}{}{}{}'.format( res.get('url', ''), ' (' if got_url else '', res.get('annex-externaltype', 'git'), ')' if got_url else '') ui.message('{path}: {name}({with_annex}) [{spec}]'.format( **dict( res, path=path, # TODO report '+' for special remotes with_annex='+' if 'annex-uuid' in res \ else ('-' if res.get('annex-ignore', None) else '?'), spec=spec)))
def result_renderer_cmdline(res, args): from datalad.ui import ui from os import linesep if res is None: res = [] if not isinstance(res, list): res = [res] if not len(res): ui.message("Got nothing new") return # provide summary nsuccess = sum(item.get('success', False) if isinstance(item, dict) else True for item in res) nfailure = len(res) - nsuccess msg = "Tried to get %d %s." % ( len(res), single_or_plural("file", "files", len(res))) if nsuccess: msg += " Got %d. " % nsuccess if nfailure: msg += " Failed to get %d." % (nfailure,) ui.message(msg) # if just a few or less than initially explicitly requested if len(res) < 10 or args.verbose: msg = linesep.join([ "{path} ... {suc}".format( suc="ok." if isinstance(item, Dataset) or item.get('success', False) else "failed. (%s)" % item.get('note', 'unknown reason'), path=item.get('file') if isinstance(item, dict) else item.path) for item in res]) ui.message(msg)
def custom_result_summary_renderer(res): from datalad.ui import ui from os import linesep if not len(res): ui.message("Got nothing new") return nfiles = count_results(res, type='file') nsuccess_file = count_results(res, type='file', status='ok') nfailure = nfiles - nsuccess_file msg = "Tried to get %d %s that had no content yet." % ( nfiles, single_or_plural("file", "files", nfiles)) if nsuccess_file: msg += " Successfully obtained %d. " % nsuccess_file if nfailure: msg += " %d (failed)." % (nfailure,) ui.message(msg) # if just a few or less than initially explicitly requested if len(res) < 10: msg = linesep.join([ "{path}{type} ... {suc}".format( suc=item.get('status'), path=item.get('path'), type=' [{}]'.format(item['type']) if 'type' in item else '') for item in res]) ui.message(msg)
def result_renderer_cmdline(res, cmdlineargs): from datalad.ui import ui if res is None: res = [] format = cmdlineargs.format or 'custom' if format == 'custom': if cmdlineargs.report in ('*', ['*']) \ or cmdlineargs.report_matched \ or (cmdlineargs.report is not None and len(cmdlineargs.report) > 1): # multiline if multiple were requested and we need to disambiguate ichr = jchr = '\n' fmt = ' {k}: {v}' else: jchr = ', ' ichr = ' ' fmt = '{v}' anything = False for location, r in res: # XXX Yarik thinks that Match should be replaced with actual path to the dataset ui.message('{}{}{}{}'.format( ansi_colors.color_word(location, ansi_colors.DATASET), ':' if r else '', ichr, jchr.join( [ fmt.format( k=ansi_colors.color_word(k, ansi_colors.FIELD), v=pretty_bytes(r[k])) for k in sorted(r) ]))) anything = True if not anything: ui.message("Nothing to report") elif format == 'json': import json ui.message(json.dumps(list(map(itemgetter(1), res)), indent=2)) elif format == 'yaml': import yaml lgr.warning("yaml output support is not yet polished") ui.message(yaml.safe_dump(list(map(itemgetter(1), res)), allow_unicode=True))
def result_renderer_cmdline(res, args): from datalad.ui import ui res = assure_list(res) if args.dryrun: ui.message('DRYRUN -- Anticipated results:') if not len(res): ui.message("Nothing done") else: for d, url, existed in res: ui.message( "'{}'{} configured as sibling '{}' for {}".format( url, " (existing repository)" if existed else '', args.name, d))
def __call__(match, dataset=None, search=None, report=None, report_matched=False, format='custom', regex=False): lgr.debug("Initiating search for match=%r and dataset %r", match, dataset) try: ds = require_dataset(dataset, check_installed=True, purpose='dataset search') if ds.id is None: raise NoDatasetArgumentFound( "This does not seem to be a dataset (no DataLad dataset ID " "found). 'datalad create --force %s' can initialize " "this repository as a DataLad dataset" % ds.path) except NoDatasetArgumentFound: exc_info = sys.exc_info() if dataset is None: if not ui.is_interactive: raise NoDatasetArgumentFound( "No DataLad dataset found. Specify a dataset to be " "searched, or run interactively to get assistance " "installing a queriable superdataset." ) # none was provided so we could ask user either he possibly wants # to install our beautiful mega-duper-super-dataset? # TODO: following logic could possibly benefit other actions. if os.path.exists(LOCAL_CENTRAL_PATH): central_ds = Dataset(LOCAL_CENTRAL_PATH) if central_ds.is_installed(): if ui.yesno( title="No DataLad dataset found at current location", text="Would you like to search the DataLad " "superdataset at %r?" % LOCAL_CENTRAL_PATH): pass else: reraise(*exc_info) else: raise NoDatasetArgumentFound( "No DataLad dataset found at current location. " "The DataLad superdataset location %r exists, " "but does not contain an dataset." % LOCAL_CENTRAL_PATH) elif ui.yesno( title="No DataLad dataset found at current location", text="Would you like to install the DataLad " "superdataset at %r?" % LOCAL_CENTRAL_PATH): from datalad.api import install central_ds = install(LOCAL_CENTRAL_PATH, source='///') ui.message( "From now on you can refer to this dataset using the " "label '///'" ) else: reraise(*exc_info) lgr.info( "Performing search using DataLad superdataset %r", central_ds.path ) for res in central_ds.search( match, search=search, report=report, report_matched=report_matched, format=format, regex=regex): yield res return else: raise cache_dir = opj(opj(ds.path, get_git_dir(ds.path)), 'datalad', 'cache') mcache_fname = opj(cache_dir, 'metadata.p%d' % pickle.HIGHEST_PROTOCOL) meta = None if os.path.exists(mcache_fname): lgr.debug("use cached metadata of '{}' from {}".format(ds, mcache_fname)) meta, checksum = pickle.load(open(mcache_fname, 'rb')) # TODO add more sophisticated tests to decide when the cache is no longer valid if checksum != ds.repo.get_hexsha(): # errrr, try again below meta = None # don't put in 'else', as yet to be written tests above might fail and require # regenerating meta data if meta is None: lgr.info("Loading and caching local meta-data... might take a few seconds") if not exists(cache_dir): os.makedirs(cache_dir) meta = get_metadata(ds, guess_type=False, ignore_subdatasets=False, ignore_cache=False) # merge all info on datasets into a single dict per dataset meta = flatten_metadata_graph(meta) # extract graph, if any meta = meta.get('@graph', meta) # build simple queriable representation if not isinstance(meta, list): meta = [meta] # sort entries by location (if present) sort_keys = ('location', 'description', 'id') meta = sorted(meta, key=lambda m: tuple(m.get(x, "") for x in sort_keys)) # use pickle to store the optimized graph in the cache pickle.dump( # graph plus checksum from what it was built (meta, ds.repo.get_hexsha()), open(mcache_fname, 'wb')) lgr.debug("cached meta data graph of '{}' in {}".format(ds, mcache_fname)) if report in ('', ['']): report = [] elif report and not isinstance(report, list): report = [report] match = assure_list(match) search = assure_list(search) # convert all to lower case for case insensitive matching search = {x.lower() for x in search} def get_in_matcher(m): """Function generator to provide closure for a specific value of m""" mlower = m.lower() def matcher(s): return mlower in s.lower() return matcher matchers = [ re.compile(match_).search if regex else get_in_matcher(match_) for match_ in match ] # location should be reported relative to current location # We will assume that noone chpwd while we are yielding ds_path_prefix = get_path_prefix(ds.path) # So we could provide a useful message whenever there were not a single # dataset with specified `--search` properties observed_properties = set() # for every meta data set for mds in meta: hit = False hits = [False] * len(matchers) matched_fields = set() if not mds.get('type', mds.get('schema:type', None)) == 'Dataset': # we are presently only dealing with datasets continue # TODO consider the possibility of nested and context/graph dicts # but so far we were trying to build simple lists of dicts, as much # as possible if not isinstance(mds, dict): raise NotImplementedError("nested meta data is not yet supported") # manual loop for now for k, v in iteritems(mds): if search: k_lower = k.lower() if k_lower not in search: if observed_properties is not None: # record for providing a hint later observed_properties.add(k_lower) continue # so we have a hit, no need to track observed_properties = None if isinstance(v, dict) or isinstance(v, list): v = text_type(v) for imatcher, matcher in enumerate(matchers): if matcher(v): hits[imatcher] = True matched_fields.add(k) if all(hits): hit = True # no need to do it longer than necessary if not report_matched: break if hit: location = mds.get('location', '.') report_ = matched_fields.union(report if report else {}) \ if report_matched else report if report_ == ['*']: report_dict = mds elif report_: report_dict = {k: mds[k] for k in report_ if k in mds} if report_ and not report_dict: lgr.debug( 'meta data match for %s, but no to-be-reported ' 'properties (%s) found. Present properties: %s', location, ", ".join(report_), ", ".join(sorted(mds)) ) else: report_dict = {} # it was empty but not None -- asked to # not report any specific field if isinstance(location, (list, tuple)): # could be that the same dataset installed into multiple # locations. For now report them separately for l in location: yield opj(ds_path_prefix, l), report_dict else: yield opj(ds_path_prefix, location), report_dict if search and observed_properties is not None: import difflib suggestions = { s: difflib.get_close_matches(s, observed_properties) for s in search } suggestions_str = "\n ".join( "%s for %s" % (", ".join(choices), s) for s, choices in iteritems(suggestions) if choices ) lgr.warning( "Found no properties which matched one of the one you " "specified (%s). May be you meant one among: %s.\n" "Suggestions:\n" " %s", ", ".join(search), ", ".join(observed_properties), suggestions_str if suggestions_str.strip() else "none" )
def __call__(dataset=None, sensitive=None, sections=None, decor=None, clipboard=None): from datalad.distribution.dataset import require_dataset from datalad.support.exceptions import NoDatasetArgumentFound from datalad.interface.results import get_status_dict ds = None try: ds = require_dataset(dataset, check_installed=False, purpose='reporting') except NoDatasetArgumentFound: # failure is already logged pass if ds and not ds.is_installed(): # we don't deal with absent datasets ds = None if sensitive: if ds is None: from datalad import cfg else: cfg = ds.config else: cfg = None from datalad.ui import ui from datalad.support.external_versions import external_versions infos = OrderedDict() res = get_status_dict( action='wtf', path=ds.path if ds else op.abspath(op.curdir), type='dataset' if ds else 'directory', status='ok', logger=lgr, decor=decor, infos=infos, ) # Define section callables which require variables. # so there is no side-effect on module level original section_callables = SECTION_CALLABLES.copy() section_callables['location'] = partial(_describe_location, res) section_callables['configuration'] = \ partial(_describe_configuration, cfg, sensitive) if ds: section_callables['dataset'] = \ partial(_describe_dataset, ds, sensitive) else: section_callables.pop('dataset') assert all(section_callables.values()) # check if none was missed if sections is None: sections = sorted(list(section_callables)) for s in sections: infos[s] = section_callables[s]() if clipboard: external_versions.check( 'pyperclip', msg="It is needed to be able to use clipboard") import pyperclip report = _render_report(res) pyperclip.copy(assure_bytes(report)) ui.message("WTF information of length %s copied to clipboard" % len(report)) yield res return
def generator_func(*_args, **_kwargs): # flag whether to raise an exception incomplete_results = [] # track what actions were performed how many times action_summary = {} if proc_pre and cmdline_name != 'run-procedure': from datalad.interface.run_procedure import RunProcedure for procspec in proc_pre: lgr.debug('Running configured pre-procedure %s', procspec) for r in _process_results( RunProcedure.__call__( procspec, dataset=dataset_arg, return_type='generator'), _func_class, action_summary, on_failure, incomplete_results, result_renderer, result_xfm, result_filter, **_kwargs): yield r # if a custom summary is to be provided, collect the results # of the command execution results = [] do_custom_result_summary = result_renderer == 'tailored' \ and hasattr(_func_class, 'custom_result_summary_renderer') # process main results for r in _process_results( wrapped(*_args, **_kwargs), _func_class, action_summary, on_failure, incomplete_results, result_renderer, result_xfm, _result_filter, **_kwargs): yield r # collect if summary is desired if do_custom_result_summary: results.append(r) if proc_post and cmdline_name != 'run-procedure': from datalad.interface.run_procedure import RunProcedure for procspec in proc_post: lgr.debug('Running configured post-procedure %s', procspec) for r in _process_results( RunProcedure.__call__( procspec, dataset=dataset_arg, return_type='generator'), _func_class, action_summary, on_failure, incomplete_results, result_renderer, result_xfm, result_filter, **_kwargs): yield r # result summary before a potential exception # custom first if do_custom_result_summary: _func_class.custom_result_summary_renderer(results) elif result_renderer == 'default' and action_summary and \ sum(sum(s.values()) for s in action_summary.values()) > 1: # give a summary in default mode, when there was more than one # action performed ui.message("action summary:\n {}".format( '\n '.join('{} ({})'.format( act, ', '.join('{}: {}'.format(status, action_summary[act][status]) for status in sorted(action_summary[act]))) for act in sorted(action_summary)))) if incomplete_results: raise IncompleteResultsError( failed=incomplete_results, msg="Command did not complete successfully")
def _process_results( results, cmd_class, action_summary, on_failure, incomplete_results, result_renderer, result_xfm, result_filter, **kwargs): # private helper pf @eval_results # loop over results generated from some source and handle each # of them according to the requested behavior (logging, rendering, ...) for res in results: if not res or 'action' not in res: # XXX Yarik has to no clue on how to track the origin of the # record to figure out WTF, so he just skips it continue if PY2: for k, v in res.items(): if isinstance(v, unicode): res[k] = v.encode('utf-8') actsum = action_summary.get(res['action'], {}) if res['status']: actsum[res['status']] = actsum.get(res['status'], 0) + 1 action_summary[res['action']] = actsum ## log message, if a logger was given # remove logger instance from results, as it is no longer useful # after logging was done, it isn't serializable, and generally # pollutes the output res_lgr = res.pop('logger', None) if isinstance(res_lgr, logging.Logger): # didn't get a particular log function, go with default res_lgr = getattr(res_lgr, default_logchannels[res['status']]) if res_lgr and 'message' in res: msg = res['message'] msgargs = None if isinstance(msg, tuple): msgargs = msg[1:] msg = msg[0] if 'path' in res: msg = '{} [{}({})]'.format( msg, res['action'], res['path']) if msgargs: # support string expansion of logging to avoid runtime cost res_lgr(msg, *msgargs) else: res_lgr(msg) ## error handling # looks for error status, and report at the end via # an exception if on_failure in ('continue', 'stop') \ and res['status'] in ('impossible', 'error'): incomplete_results.append(res) if on_failure == 'stop': # first fail -> that's it # raise will happen after the loop break if result_filter: try: if not result_filter(res): raise ValueError('excluded by filter') except ValueError as e: lgr.debug('not reporting result (%s)', exc_str(e)) continue ## output rendering # TODO RF this in a simple callable that gets passed into this function if result_renderer is None or result_renderer == 'disabled': pass elif result_renderer == 'default': default_result_renderer(res) elif result_renderer in ('json', 'json_pp'): ui.message(json.dumps( {k: v for k, v in res.items() if k not in ('message', 'logger')}, sort_keys=True, indent=2 if result_renderer.endswith('_pp') else None, default=lambda x: str(x))) elif result_renderer == 'tailored': if hasattr(cmd_class, 'custom_result_renderer'): cmd_class.custom_result_renderer(res, **kwargs) elif hasattr(result_renderer, '__call__'): try: result_renderer(res, **kwargs) except Exception as e: lgr.warn('Result rendering failed for: %s [%s]', res, exc_str(e)) else: raise ValueError('unknown result renderer "{}"'.format(result_renderer)) if PY2: for k, v in res.items(): if isinstance(v, str): res[k] = v.decode('utf-8') if result_xfm: res = result_xfm(res) if res is None: continue yield res
def result_renderer_cmdline(res, args): from datalad.ui import ui if res is None: ui.message("Nothing was created") elif isinstance(res, Dataset): ui.message("Created dataset at %s." % res.path)
def custom_result_renderer(res, **kwargs): from datalad.ui import ui ui.message(_render_report(res))