示例#1
0
def eval_var(var):
    """Wrap ast.literal_eval to provide more helpful error.

    Examples:
        >>> eval_var('42')
        42
        >>> eval_var('"string"')
        'string'
        >>> eval_var('string')
        Traceback (most recent call last):
        cylc.flow.exceptions.UserInputError: Invalid template variable: string
        (note string values must be quoted)
        >>> eval_var('[')
        Traceback (most recent call last):
        cylc.flow.exceptions.UserInputError: Invalid template variable: [
        (values must be valid Python literals)

    """
    try:
        return literal_eval(var)
    except ValueError:
        raise UserInputError(f'Invalid template variable: {var}'
                             '\n(note string values must be quoted)') from None
    except SyntaxError:
        raise UserInputError(
            f'Invalid template variable: {var}'
            '\n(values must be valid Python literals)') from None
示例#2
0
def parse_rm_dirs(rm_dirs: Iterable[str]) -> Set[str]:
    """Parse a list of possibly colon-separated dirs (or files or globs).
    Return the set of all the dirs.

    Used by cylc clean with the --rm option.
    """
    result: Set[str] = set()
    for item in rm_dirs:
        for part in item.split(':'):
            part = part.strip()
            if not part:
                continue
            is_dir = part.endswith(os.sep)
            part = os.path.normpath(part)
            if os.path.isabs(part):
                raise UserInputError("--rm option cannot take absolute paths")
            if part == '.' or part.startswith(f'..{os.sep}'):
                raise UserInputError(
                    "--rm option cannot take paths that point to the "
                    "run directory or above")
            if is_dir:
                # Preserve trailing slash to ensure it only matches dirs,
                # not files, when globbing
                part += os.sep
            result.add(part)
    return result
示例#3
0
def _validate(options: 'Values', *task_globs: str) -> None:
    """Check combination of options and task globs is valid."""
    if options.release_all:
        if task_globs:
            raise UserInputError("Cannot combine --all with TASK_GLOB(s).")
    else:
        if not task_globs:
            raise UserInputError("Missing arguments: TASK_GLOB [...]. "
                                 "See `cylc release --help`.")
示例#4
0
def _validate(options: 'Values', *tokens_list: str) -> None:
    """Check combination of options and task globs is valid."""
    if options.release_all:
        if tokens_list:
            raise UserInputError("Cannot combine --all with Cycle/Task IDs")
    else:
        if not tokens_list:
            raise UserInputError(
                "Must define Cycles/Tasks. See `cylc release --help`.")
示例#5
0
def _validate(options: 'Values', *task_globs: str) -> None:
    """Check combination of options and task globs is valid."""
    if options.hold_point_string:
        if task_globs:
            raise UserInputError(
                "Cannot combine --after with TASK_GLOB(s).\n"
                "`cylc hold --after` holds all tasks after the given "
                "cycle point.")
    elif not task_globs:
        raise UserInputError(
            "Missing arguments: TASK_GLOB [...]. See `cylc hold --help`.")
示例#6
0
def _validate(options: 'Values', *task_globs: str) -> None:
    """Check combination of options and task globs is valid."""
    if options.hold_point_string:
        if task_globs:
            raise UserInputError(
                "Cannot combine --after with Cylc/Task IDs.\n"
                "`cylc hold --after` holds all tasks after the given "
                "cycle point.")
    elif not task_globs:
        raise UserInputError(
            "Must define Cycles/Tasks. See `cylc hold --help`.")
示例#7
0
def install(parser: COP, opts: 'Values', reg: Optional[str] = None) -> None:
    if opts.no_run_name and opts.run_name:
        raise UserInputError(
            "options --no-run-name and --run-name are mutually exclusive.")

    if reg is None:
        source = opts.source
    else:
        if opts.source:
            raise UserInputError(
                "WORKFLOW_NAME and --directory are mutually exclusive.")
        source = search_install_source_dirs(reg)
    workflow_name = opts.workflow_name or reg

    for entry_point in iter_entry_points('cylc.pre_configure'):
        try:
            if source:
                entry_point.resolve()(srcdir=source, opts=opts)
            else:
                from pathlib import Path
                entry_point.resolve()(srcdir=Path().cwd(), opts=opts)
        except Exception as exc:
            # NOTE: except Exception (purposefully vague)
            # this is to separate plugin from core Cylc errors
            raise PluginError('cylc.pre_configure', entry_point.name,
                              exc) from None

    cli_symdirs: Optional[Dict[str, Dict[str, Any]]] = None
    if opts.symlink_dirs == '':
        cli_symdirs = {}
    elif opts.symlink_dirs:
        cli_symdirs = parse_cli_sym_dirs(opts.symlink_dirs)

    source_dir, rundir, _workflow_name = install_workflow(
        workflow_name=workflow_name,
        source=source,
        run_name=opts.run_name,
        no_run_name=opts.no_run_name,
        cli_symlink_dirs=cli_symdirs)

    for entry_point in iter_entry_points('cylc.post_install'):
        try:
            entry_point.resolve()(srcdir=source_dir,
                                  opts=opts,
                                  rundir=str(rundir))
        except Exception as exc:
            # NOTE: except Exception (purposefully vague)
            # this is to separate plugin from core Cylc errors
            raise PluginError('cylc.post_install', entry_point.name,
                              exc) from None
示例#8
0
def main(parser, opts, suite=None, start=None, stop=None):
    """Implement ``cylc graph``."""
    if opts.ungrouped and opts.namespaces:
        raise UserInputError('Cannot combine --ungrouped and --namespaces.')
    if not opts.reference:
        raise UserInputError('Only the --reference use cases are supported')

    template_vars = load_template_vars(
        opts.templatevars, opts.templatevars_file)

    config = get_config(suite, opts, template_vars=template_vars)
    if opts.namespaces:
        graph_inheritance(config)
    else:
        graph_workflow(config, start, stop, ungrouped=opts.ungrouped,
                       show_suicide=opts.show_suicide)
示例#9
0
def get_rdict(left, right=None):
    """Check+transform left=right into a nested dict.

    left can be key, [key], [key1]key2, [key1][key2], [key1][key2]key3, etc.
    """
    if left == "inherit":
        raise UserInputError(
            "Inheritance cannot be changed by broadcast")
    rdict = {}
    cur_dict = rdict
    tail = left
    while tail:
        match = REC_ITEM.match(tail)
        if match:
            sect, tail = match.groups()
            if tail:
                # [sect]... = right
                cur_dict[sect.strip()] = {}
                cur_dict = cur_dict[sect.strip()]
            else:
                # [sect] = right
                cur_dict[sect.strip()] = right
        else:
            # item = right
            cur_dict[tail.strip()] = right
            tail = None
    upg({'runtime': {'__MANY__': rdict}}, 'test')
    cylc_config_validate(rdict, SPEC['runtime']['__MANY__'])
    return rdict
示例#10
0
async def main(opts, color=False, scan_dir=None, write=cprint):
    """Open up a Python API for testing purposes.

    Note:
        Don't use this API for anything other than testing, there is a
        proper Python API for these purposes.

    """
    # validate / standardise the list of workflow states
    opts.states = set(opts.states.split(','))
    if 'all' in opts.states:
        opts.states = FLOW_STATES
    elif (not opts.states
          or not all(state in FLOW_STATES for state in opts.states)):
        raise UserInputError(
            '--states must be set to a comma separated list of workflow'
            ' states. \nSee `cylc scan --help` for a list of supported'
            ' states.')

    if not color:
        # we cannot support colour or have been requested not to use it
        opts.colour_blind = True

    # print state totals key as needed
    if opts.format == 'rich' and not opts.colour_blind:
        cprint(state_totals_key() + '\n')

    await scanner(opts, write, scan_dir)
示例#11
0
def _validate_constraint(*tokens_list, constraint=None):
    if constraint == 'workflows':
        for tokens in tokens_list:
            if tokens.is_null or tokens.is_task_like:
                raise UserInputError('IDs must be workflows')
        return
    if constraint == 'tasks':
        for tokens in tokens_list:
            if tokens.is_null or not tokens.is_task_like:
                raise UserInputError('IDs must be tasks')
        return
    if constraint == 'mixed':
        for tokens in tokens_list:
            if tokens.is_null:
                raise UserInputError('IDs cannot be null.')
        return
示例#12
0
def _validate_workflow_ids(*tokens_list, src_path):
    for ind, tokens in enumerate(tokens_list):
        if tokens['user']:
            raise UserInputError(
                "Operating on others users' workflows is not supported")
        if not src_path:
            validate_workflow_name(tokens['workflow'])
        if ind == 0 and src_path:
            # source workflow passed in as a path
            pass
        else:
            src_path = Path(get_workflow_run_dir(tokens['workflow']))
        if src_path.is_file():
            raise UserInputError(
                f'Workflow ID cannot be a file: {tokens["workflow"]}')
        detect_both_flow_and_suite(src_path)
示例#13
0
def _validate_number(*tokens_list, max_workflows=None, max_tasks=None):
    if not max_workflows and not max_tasks:
        return
    workflows_count = 0
    tasks_count = 0
    for tokens in tokens_list:
        if tokens.is_task_like:
            tasks_count += 1
        else:
            workflows_count += 1
    if max_workflows and workflows_count > max_workflows:
        raise UserInputError(
            f'IDs contain too many workflows (max {max_workflows})')
    if max_tasks and tasks_count > max_tasks:
        raise UserInputError(
            f'IDs contain too many cycles/tasks/jobs (max {max_tasks})')
示例#14
0
def main(parser: COP, options: 'Values', *ids: str):
    """CLI for "cylc trigger"."""
    if options.flow_descr and not options.reflow:
        raise UserInputError("--meta requires --reflow")
    call_multi(
        partial(run, options),
        *ids,
    )
示例#15
0
def main(_, opts: 'Values', *ids: str):
    if cylc.flow.flags.verbosity < 2:
        disable_timestamps(LOG)

    if opts.local_only and opts.remote_only:
        raise UserInputError(
            "--local and --remote options are mutually exclusive")

    asyncio.run(run(*ids, opts=opts))
示例#16
0
def main(parser: COP, options: 'Values', *args: str) -> None:
    """CLI."""
    if not args:
        return parser.error('No message supplied')
    if len(args) <= 2:
        # BACK COMPAT: args <= 2
        # from:
        #     7.6?
        # remove at:
        #     9.0?
        # (As of Dec 2020 some functional tests still use the classic
        # two arg interface)
        workflow_id = os.getenv('CYLC_WORKFLOW_ID')
        task_job = os.getenv('CYLC_TASK_JOB')
        message_strs = list(args)
    else:
        workflow_id, task_job, *message_strs = args
        workflow_id, *_ = parse_id(
            workflow_id,
            constraint='workflows',
        )
    # Read messages from STDIN
    if '-' in message_strs:
        current_message_str = ''
        while True:  # Note: `for line in sys.stdin:` can hang
            message_str = sys.stdin.readline()
            if message_str.strip():
                # non-empty line
                current_message_str += message_str
            elif message_str:
                # empty line, start next message
                if current_message_str:
                    message_strs.append(current_message_str)
                current_message_str = ''  # reset
            else:
                # end of file
                if current_message_str:
                    message_strs.append(current_message_str)
                break
    # Separate "severity: message"
    messages = []  # [(severity, message_str), ...]
    for message_str in message_strs:
        if message_str == '-':
            pass
        elif ':' in message_str:
            valid, err_msg = TaskMessageValidator.validate(message_str)
            if not valid:
                raise UserInputError(
                    f'Invalid task message "{message_str}" - {err_msg}')
            messages.append(
                [item.strip() for item in message_str.split(':', 1)])
        elif options.severity:
            messages.append([options.severity, message_str.strip()])
        else:
            messages.append([getLevelName(INFO), message_str.strip()])
    record_messages(workflow_id, task_job, messages)
示例#17
0
def main(parser: COP, options: 'Values', severity_str: str, *ids) -> None:
    try:
        severity = LOG_LEVELS[severity_str]
    except KeyError:
        raise UserInputError("Illegal logging level, %s" % severity_str)
    call_multi(
        partial(run, options, severity),
        *ids,
        constraint='workflows',
    )
示例#18
0
文件: clean.py 项目: lparkes/cylc
def main(parser: COP, opts: 'Values', reg: str):
    # Note: do not use workflow_files.parse_reg here
    if cylc.flow.flags.verbosity < 2:
        disable_timestamps(LOG)

    if opts.local_only and opts.remote_only:
        raise UserInputError(
            "--local and --remote options are mutually exclusive")

    init_clean(reg, opts)
示例#19
0
def main(parser, options, *args):
    """CLI."""
    if not args:
        return parser.error('No message supplied')
    cylc.flow.flags.verbose = os.getenv('CYLC_VERBOSE') == 'true'
    cylc.flow.flags.debug = os.getenv('CYLC_DEBUG') == 'true'
    if len(args) <= 2:
        # BACK COMPAT: args <= 2
        # from:
        #     7.6?
        # remove at:
        #     9.0?
        # (As of Dec 2020 some functional tests still use the classic
        # two arg interface)
        suite = os.getenv('CYLC_SUITE_NAME')
        task_job = os.getenv('CYLC_TASK_JOB')
        message_strs = list(args)
    else:
        suite, task_job, *message_strs = args
    # Read messages from STDIN
    if '-' in message_strs:
        current_message_str = ''
        while True:  # Note: `for line in sys.stdin:` can hang
            message_str = sys.stdin.readline()
            if message_str.strip():
                # non-empty line
                current_message_str += message_str
            elif message_str:
                # empty line, start next message
                if current_message_str:
                    message_strs.append(current_message_str)
                current_message_str = ''  # reset
            else:
                # end of file
                if current_message_str:
                    message_strs.append(current_message_str)
                break
    # Separate "severity: message"
    messages = []  # [(severity, message_str), ...]
    for message_str in message_strs:
        if message_str == '-':
            pass
        elif ':' in message_str:
            valid, err_msg = TaskMessageValidator.validate(message_str)
            if not valid:
                raise UserInputError(
                    f'Invalid task message "{message_str}" - {err_msg}')
            messages.append(
                [item.strip() for item in message_str.split(':', 1)])
        elif options.severity:
            messages.append([options.severity, message_str.strip()])
        else:
            messages.append([getLevelName(INFO), message_str.strip()])
    record_messages(suite, task_job, messages)
 def parse_args():
     if {'help', '--help', "-h"} & set(sys.argv):
         print(__doc__)
     elif len(sys.argv) < 2:
         raise UserInputError(
             "wrong number of arguments, "
             f"see '{os.path.basename(sys.argv[0])} --help'.")
     elif '--list' in sys.argv:
         print('\n'.join(list_resources()))
     else:
         return (None, sys.argv[1:])
     sys.exit()
示例#21
0
def main(parser, opts, workflow=None, start=None, stop=None):
    """Implement ``cylc graph``."""
    if opts.ungrouped and opts.namespaces:
        raise UserInputError('Cannot combine --ungrouped and --namespaces.')
    if not (opts.reference or opts.diff):
        raise UserInputError(
            'Only the --reference and --diff use cases are supported')

    template_vars = load_template_vars(opts.templatevars,
                                       opts.templatevars_file)

    write = print
    flows = [(workflow, [])]
    if opts.diff:
        flows.append((opts.diff, []))

    for flow, graph in flows:
        if opts.diff:
            write = graph.append
        config = get_config(flow, opts, template_vars=template_vars)
        if opts.namespaces:
            graph_inheritance(config, write=write)
        else:
            graph_workflow(config,
                           start,
                           stop,
                           ungrouped=opts.ungrouped,
                           show_suicide=opts.show_suicide,
                           write=write)

    if opts.diff:
        lines = list(
            unified_diff([f'{line}\n' for line in flows[0][1]],
                         [f'{line}\n' for line in flows[1][1]],
                         fromfile=flows[0][0],
                         tofile=flows[1][0]))

        if lines:
            sys.stdout.writelines(lines)
            sys.exit(1)
示例#22
0
文件: graph.py 项目: lparkes/cylc
def main(parser: COP,
         opts: 'Values',
         workflow: str,
         start: Optional[str] = None,
         stop: Optional[str] = None) -> None:
    """Implement ``cylc graph``."""
    if opts.grouping and opts.namespaces:
        raise UserInputError('Cannot combine --group and --namespaces.')

    lines: List[str] = []
    if not (opts.reference or opts.diff):
        write = lines.append
    else:
        write = print

    flows: List[Tuple[str, List[str]]] = [(workflow, [])]
    if opts.diff:
        flows.append((opts.diff, []))

    for flow, graph in flows:
        if opts.diff:
            write = graph.append
        config = get_config(flow, opts)
        if opts.namespaces:
            graph_inheritance(config, write=write)
        else:
            graph_workflow(config,
                           start,
                           stop,
                           grouping=opts.grouping,
                           show_suicide=opts.show_suicide,
                           write=write)

    if opts.diff:
        diff_lines = list(
            unified_diff([f'{line}\n' for line in flows[0][1]],
                         [f'{line}\n' for line in flows[1][1]],
                         fromfile=flows[0][0],
                         tofile=flows[1][0]))

        if diff_lines:
            sys.stdout.writelines(diff_lines)
            sys.exit(1)

    if not (opts.reference or opts.diff):
        filename = dot(opts, lines)
        if opts.output:
            print(f'Graph rendered to {opts.output}')
        else:
            gui(filename)
示例#23
0
def load(config, additional_plugins=None):
    additional_plugins = additional_plugins or []
    entry_points = {
        entry_point.name: entry_point
        for entry_point in
        iter_entry_points('cylc.main_loop')
    }
    plugins = {
        'state': {},
        'timings': {}
    }
    for plugin_name in config['plugins'] + additional_plugins:
        # get plugin
        try:
            module_name = entry_points[plugin_name.replace(' ', '_')]
        except KeyError:
            raise UserInputError(
                f'No main-loop plugin: "{plugin_name}"\n'
                + '    Available plugins:\n'
                + indent('\n'.join(sorted(entry_points)), '        ')
            )
        # load plugin
        try:
            module = module_name.load()
        except Exception:
            raise CylcError(f'Could not load plugin: "{plugin_name}"')
        # load coroutines
        log = []
        for coro_name, coro in (
                (coro_name, coro)
                for coro_name, coro in getmembers(module)
                if isfunction(coro)
                if hasattr(coro, 'main_loop')
        ):
            log.append(coro_name)
            plugins.setdefault(
                coro.main_loop, {}
            )[(plugin_name, coro_name)] = coro
            plugins['timings'][(plugin_name, coro_name)] = deque(maxlen=1)
        LOG.debug(
            'Loaded main loop plugin "%s": %s',
            plugin_name + '\n',
            '\n'.join((f'* {x}' for x in log))
        )
        # set the initial state of the plugin
        plugins['state'][plugin_name] = {}
    # make a note of the config here for ease of reference
    plugins['config'] = config
    return plugins
示例#24
0
async def _expand_workflow_tokens_impl(tokens, match_active=True):
    """Use "cylc scan" to expand workflow patterns."""
    workflow_sel = tokens['workflow_sel']
    if workflow_sel and workflow_sel != 'running':
        raise UserInputError(f'The workflow selector :{workflow_sel} is not'
                             'currently supported.')

    # construct the pipe
    pipe = scan | filter_name(fnmatch.translate(tokens['workflow']))
    if match_active is not None:
        pipe |= is_active(match_active)

    # iter the results
    async for workflow in pipe:
        yield tokens.duplicate(workflow=workflow['name'])
示例#25
0
def main(parser: COP, opts: 'Values', reg: str):
    # Note: do not use workflow_files.parse_reg here

    if cylc.flow.flags.verbosity < 2:
        # for readability omit timestamps from logging unless in debug mode
        for handler in LOG.handlers:
            if isinstance(handler.formatter, CylcLogFormatter):
                handler.formatter.configure(timestamp=False)

    if opts.local_only and opts.remote_only:
        raise UserInputError(
            "--local and --remote options are mutually exclusive"
        )

    init_clean(reg, opts)
示例#26
0
def _parse_src_path(id_):
    src_path = Path(id_)
    if (id_ == os.curdir or id_.startswith(f'{os.curdir}{os.sep}')
            or Path(id_).is_absolute()):
        src_path = src_path.resolve()
        if not src_path.exists():
            raise UserInputError(f'Path does not exist: {src_path}')
        if src_path.name in {WorkflowFiles.FLOW_FILE, WorkflowFiles.SUITE_RC}:
            src_path = src_path.parent
        try:
            src_file_path = check_flow_file(src_path)
        except WorkflowFilesError:
            raise WorkflowFilesError(NO_FLOW_FILE_MSG.format(id_))
        workflow_id = src_path.name
        return workflow_id, src_path, src_file_path
    return None
示例#27
0
def main(parser, options, suite, *items):
    for i, item in enumerate(items):
        if not TaskID.is_valid_id_2(item):
            raise UserInputError(
                '"%s": invalid task ID (argument %d)' % (item, i + 1))
    prompt('Insert %s in %s' % (items, suite), options.force)

    pclient = SuiteRuntimeClient(
        suite, options.owner, options.host, options.port)

    pclient(
        'insert_tasks',
        {'items': items, 'no_check': options.no_check,
         'stop_point_string': options.stop_point_string},
        timeout=options.comms_timeout
    )
示例#28
0
def main(
    parser: COP,
    options: 'Values',
    workflow: str,
    task_id: Optional[str] = None
) -> None:
    workflow, _ = parse_reg(workflow)
    pclient = get_client(workflow, timeout=options.comms_timeout)

    if task_id and not TaskID.is_valid_id(task_id):
        raise UserInputError("Invalid task ID: %s" % task_id)

    flow_kwargs = {
        'request_string': FLOW_QUERY,
        'variables': {'wFlows': [workflow]}
    }
    task_kwargs: Dict[str, Any] = {
        'request_string': TASK_QUERY,
    }
    # cylc ping WORKFLOW
    result = pclient('graphql', flow_kwargs)
    msg = ""
    for flow in result['workflows']:
        w_name = flow['name']
        w_port = flow['port']
        w_pub_port = flow['pubPort']
        if cylc.flow.flags.verbosity > 0:
            sys.stdout.write(
                f'{w_name} running on '
                f'{pclient.host}:{w_port} {w_pub_port}\n'
            )
        # cylc ping WORKFLOW TASKID
        if task_id:
            task, point = TaskID.split(task_id)
            w_id = flow['id']
            task_kwargs['variables'] = {
                'tProxy': f'{w_id}{ID_DELIM}{point}{ID_DELIM}{task}'
            }
            task_result = pclient('graphql', task_kwargs)
            if not task_result.get('taskProxy'):
                msg = "task not found"
            elif task_result['taskProxy']['state'] != TASK_STATUS_RUNNING:
                msg = f"task not {TASK_STATUS_RUNNING}"
            if msg:
                print(cparse(f'<red>{msg}</red>'))
                sys.exit(1)
示例#29
0
def main(
    parser: COP,
    options: 'Values',
    *ids,
) -> None:

    if options.print_platform_names and options.print_platforms:
        options.print_platform_names = False

    if options.print_platform_names or options.print_platforms:
        # Get platform information:
        if ids:
            raise UserInputError(
                "Workflow IDs are incompatible with --platform options.")
        glbl_cfg().platform_dump(options.print_platform_names,
                                 options.print_platforms)
        return

    if not ids:
        if options.print_hierarchy:
            print("\n".join(get_config_file_hierarchy()))
            return

        glbl_cfg().idump(options.item,
                         not options.defaults,
                         oneline=options.oneline,
                         none_str=options.none_str)
        return

    workflow_id, _, flow_file = parse_id(
        *ids,
        src=True,
        constraint='workflows',
    )

    if options.print_hierarchy:
        print("\n".join(get_config_file_hierarchy(workflow_id)))
        return

    config = WorkflowConfig(workflow_id, flow_file, options,
                            get_template_vars(options))

    config.pcfg.idump(options.item,
                      not options.defaults,
                      oneline=options.oneline,
                      none_str=options.none_str)
示例#30
0
def get_resources(resource: str, tgt_dir: Optional[str]):
    """Extract cylc.flow resources and write them to a target directory.

    Arguments:
        resource: path relative to RESOURCE_DIR.
        target_dir: Where to put extracted resources, created if necessary.

    """
    # get the resource path
    resource_path = Path(resource)

    if resource in ('api-key', 'tutorial/api-key'):
        print(get_api_key())
        return

    src = RESOURCE_DIR / resource_path
    if not src.exists():
        raise UserInputError(
            f'No such resources {resource}.'
            '\nRun `cylc get-resources --list` for resource names.')

    is_tutorial = path_is_tutorial(src)

    # get the target path
    if not tgt_dir:
        if is_tutorial:
            # this is a tutorial => use the primary source dir
            _tgt_dir = Path(glbl_cfg().get(['install', 'source dirs'])[0])
        else:
            # this is a regular resource => use $PWD
            _tgt_dir = Path.cwd()
    else:
        _tgt_dir = Path(tgt_dir).resolve()
    tgt = _tgt_dir / resource_path.name

    tgt = tgt.expanduser()
    tgt = tgt.resolve()

    # extract resources
    extract_resource(src, tgt, is_tutorial)
    if is_tutorial:
        set_api_key(tgt)