Ejemplo n.º 1
0
def main(parser: COP, options: 'Values', workflow_id1: str, workflow_id2: str):
    workflow_id_1, _, workflow_file_1_ = parse_id(
        workflow_id1,
        src=True,
        constraint='workflows',
    )
    workflow_id_2, _, workflow_file_2_ = parse_id(
        workflow_id2,
        src=True,
        constraint='workflows',
    )
    if workflow_file_1_ == workflow_file_2_:
        parser.error("You can't diff a single workflow.")
    print(f"Parsing {workflow_id_1} ({workflow_file_1_})")
    template_vars = load_template_vars(
        options.templatevars, options.templatevars_file
    )
    config1 = WorkflowConfig(
        workflow_id_1, workflow_file_1_, options, template_vars
    ).cfg
    print(f"Parsing {workflow_id_2} ({workflow_file_2_})")
    config2 = WorkflowConfig(
        workflow_id_2, workflow_file_2_, options, template_vars,
        is_reload=True
    ).cfg

    if config1 == config2:
        print(
            f"Workflow definitions {workflow_id_1} and {workflow_id_2} are "
            f"identical"
        )
        sys.exit(0)

    print(f"Workflow definitions {workflow_id_1} and {workflow_id_2} differ")

    workflow1_only = {}  # type: ignore
    workflow2_only = {}  # type: ignore
    diff_1_2 = {}  # type: ignore
    # TODO: this whole file could do wih refactoring at some point

    diffdict(config1, config2, workflow1_only, workflow2_only, diff_1_2)

    if n_oone > 0:
        print(f'\n{n_oone} items only in {workflow_id_1} (<)')
        prdict(workflow1_only, '<', nested=options.nested)

    if n_otwo > 0:
        print(f'\n{n_otwo} items only in {workflow_id_2} (>)')
        prdict(workflow2_only, '>', nested=options.nested)

    if n_diff > 0:
        print(f'\n{n_diff} common items differ {workflow_id_1}(<) '
              f'{workflow_id_2}(>)')
        prdict(diff_1_2, '', diff=True, nested=options.nested)
Ejemplo n.º 2
0
def main(parser: COP, options: 'Values', workflow_id: str) -> None:
    workflow_id, *_ = parse_id(
        workflow_id,
        constraint='workflows',
    )

    output_options = [
        options.show_raw, options.show_summary, options.html_summary
    ]
    if output_options.count(True) > 1:
        parser.error('Cannot combine output formats (choose one)')
    if not any(output_options):
        # No output specified - choose summary by default
        options.show_summary = True

    run_db = _get_dao(workflow_id)
    row_buf = format_rows(*run_db.select_task_times())
    with smart_open(options.output_filename) as output:
        if options.show_raw:
            output.write(row_buf.getvalue())
        else:
            summary: TimingSummary
            if options.show_summary:
                summary = TextTimingSummary(row_buf)
            elif options.html_summary:
                summary = HTMLTimingSummary(row_buf)
            summary.write_summary(output)
Ejemplo n.º 3
0
def get_platforms_from_task_jobs(flow: str, cyclepoint: str) -> Dict[str, Any]:
    """Access flow database. Return platform for task at fixed cycle point

    Uses the workflow database - designed to be used with tasks where jobs
    have been submitted. We assume that we want the most recent submission.

    Args:
        flow: The name of the Cylc flow to be queried.
        cyclepoint: The CyclePoint at which to query the job.
        task: The name of the task to be queried.

    Returns:
        Platform Dictionary.
    """
    _, _, flow_file = parse_id(flow, constraint='workflows', src=True)
    dbfilepath = Path(flow_file).parent / '.service/db'
    dao = CylcWorkflowDAO(dbfilepath)
    task_platform_map: Dict = {}
    stmt = ('SELECT "name", "platform_name", "submit_num" '
            'FROM task_jobs WHERE cycle=?')
    for row in dao.connect().execute(stmt, [cyclepoint]):
        task, platform_n, submit_num = row
        platform = get_platform(platform_n)
        if ((task in task_platform_map
             and task_platform_map[task][0] < submit_num)
                or task not in task_platform_map):
            task_platform_map[task] = [submit_num, platform]

    # get rid of the submit number, we don't want it
    task_platform_map = {
        key: value[1]
        for key, value in task_platform_map.items()
    }

    return task_platform_map
Ejemplo n.º 4
0
def get_platform_from_task_def(flow: str, task: str) -> Dict[str, Any]:
    """Return the platform dictionary for a particular task.

    Uses the flow definition - designed to be used with tasks
    with unsubmitted jobs. Evaluates platform/host defined as subshell.

    Args:
        flow: The name of the Cylc flow to be queried.
        task: The name of the task to be queried.

    Returns:
        Platform Dictionary.
    """
    _, _, flow_file = parse_id(flow, constraint='workflows', src=True)
    config = WorkflowConfig(flow, flow_file, Values())
    # Get entire task spec to allow Cylc 7 platform from host guessing.
    task_spec = config.pcfg.get(['runtime', task])
    # check for subshell and evaluate
    if (task_spec.get('platform')
            and is_platform_definition_subshell(task_spec['platform'])):
        task_spec['platform'] = eval_subshell(task_spec['platform'])
    elif (task_spec.get('remote', {}).get('host')
          and HOST_REC_COMMAND.match(task_spec['remote']['host'])):
        task_spec['remote']['host'] = eval_subshell(
            task_spec['remote']['host'])
    platform = get_platform(task_spec)
    return platform
Ejemplo n.º 5
0
def main(_, options: 'Values', *ids) -> None:
    workflow_id, _, flow_file = parse_id(
        *ids,
        src=True,
        constraint='workflows',
    )

    # extract task host platforms from the workflow_id
    config = WorkflowConfig(
        workflow_id, flow_file, options,
        load_template_vars(options.templatevars, options.templatevars_file))

    platforms = {
        config.get_config(['runtime', name, 'platform'])
        for name in config.get_namespace_list('all tasks')
    } - {None, 'localhost'}

    # When "workflow run hosts" are formalised as "flow platforms"
    # we can substitute `localhost` for this, in the mean time
    # we will have to assume that flow hosts are configured correctly.

    if not platforms:
        sys.exit(0)

    verbose = cylc.flow.flags.verbosity > 0

    # get the cylc version on each platform
    versions = {}
    for platform_name in sorted(platforms):
        platform = get_platform(platform_name)
        host = get_host_from_platform(platform, bad_hosts=None)
        cmd = construct_ssh_cmd(['version'], platform, host)
        if verbose:
            print(cmd)
        proc = procopen(cmd, stdin=DEVNULL, stdout=PIPE, stderr=PIPE)
        out, err = proc.communicate()
        out = out.decode()
        err = err.decode()
        if proc.wait() == 0:
            if verbose:
                print("   %s" % out)
            versions[platform_name] = out.strip()
        else:
            versions[platform_name] = f'ERROR: {err.strip()}'

    # report results
    max_len = max((len(platform_name) for platform_name in platforms))
    print(f'{"platform".rjust(max_len)}: cylc version')
    print('-' * (max_len + 14))
    for platform_name, result in versions.items():
        print(f'{platform_name.rjust(max_len)}: {result}')
    if all((version == CYLC_VERSION for version in versions.values())):
        ret_code = 0
    elif options.error:
        ret_code = 1
    else:
        ret_code = 0
    sys.exit(ret_code)
Ejemplo n.º 6
0
def main(parser: COP, options: 'Values', *args: str) -> None:
    """CLI."""
    if not args:
        return parser.error('No message supplied')
    if len(args) <= 2:
        # BACK COMPAT: args <= 2
        # from:
        #     7.6?
        # remove at:
        #     9.0?
        # (As of Dec 2020 some functional tests still use the classic
        # two arg interface)
        workflow_id = os.getenv('CYLC_WORKFLOW_ID')
        task_job = os.getenv('CYLC_TASK_JOB')
        message_strs = list(args)
    else:
        workflow_id, task_job, *message_strs = args
        workflow_id, *_ = parse_id(
            workflow_id,
            constraint='workflows',
        )
    # Read messages from STDIN
    if '-' in message_strs:
        current_message_str = ''
        while True:  # Note: `for line in sys.stdin:` can hang
            message_str = sys.stdin.readline()
            if message_str.strip():
                # non-empty line
                current_message_str += message_str
            elif message_str:
                # empty line, start next message
                if current_message_str:
                    message_strs.append(current_message_str)
                current_message_str = ''  # reset
            else:
                # end of file
                if current_message_str:
                    message_strs.append(current_message_str)
                break
    # Separate "severity: message"
    messages = []  # [(severity, message_str), ...]
    for message_str in message_strs:
        if message_str == '-':
            pass
        elif ':' in message_str:
            valid, err_msg = TaskMessageValidator.validate(message_str)
            if not valid:
                raise UserInputError(
                    f'Invalid task message "{message_str}" - {err_msg}')
            messages.append(
                [item.strip() for item in message_str.split(':', 1)])
        elif options.severity:
            messages.append([options.severity, message_str.strip()])
        else:
            messages.append([getLevelName(INFO), message_str.strip()])
    record_messages(workflow_id, task_job, messages)
Ejemplo n.º 7
0
def main(parser: COP, opts: 'Values', args: Optional[str] = None) -> None:
    run_dir: Optional[Path]
    workflow_id: str
    if args is None:
        try:
            workflow_id = str(Path.cwd().relative_to(
                Path(get_cylc_run_dir()).resolve()))
        except ValueError:
            raise WorkflowFilesError(
                "The current working directory is not a workflow run directory"
            )
    else:
        workflow_id, *_ = parse_id(
            args,
            constraint='workflows',
        )
    run_dir = Path(get_workflow_run_dir(workflow_id))
    if not run_dir.is_dir():
        raise WorkflowFilesError(
            f'"{workflow_id}" is not an installed workflow.')
    source, source_symlink = get_workflow_source_dir(run_dir)
    if not source:
        raise WorkflowFilesError(
            f'"{workflow_id}" was not installed with cylc install.')
    if not Path(source).is_dir():
        raise WorkflowFilesError(
            f'Workflow source dir is not accessible: "{source}".\n'
            f'Restore the source or modify the "{source_symlink}"'
            ' symlink to continue.')
    for entry_point in iter_entry_points('cylc.pre_configure'):
        try:
            entry_point.resolve()(srcdir=source, opts=opts)
        except Exception as exc:
            # NOTE: except Exception (purposefully vague)
            # this is to separate plugin from core Cylc errors
            raise PluginError('cylc.pre_configure', entry_point.name,
                              exc) from None

    reinstall_workflow(
        named_run=workflow_id,
        rundir=run_dir,
        source=source,
        dry_run=False  # TODO: ready for dry run implementation
    )

    for entry_point in iter_entry_points('cylc.post_install'):
        try:
            entry_point.resolve()(srcdir=source,
                                  opts=opts,
                                  rundir=str(run_dir))
        except Exception as exc:
            # NOTE: except Exception (purposefully vague)
            # this is to separate plugin from core Cylc errors
            raise PluginError('cylc.post_install', entry_point.name,
                              exc) from None
Ejemplo n.º 8
0
def get_config(workflow_id: str, opts: 'Values') -> WorkflowConfig:
    """Return a WorkflowConfig object for the provided reg / path."""
    workflow_id, _, flow_file = parse_id(
        workflow_id,
        src=True,
        constraint='workflows',
    )
    template_vars = get_template_vars(opts)
    return WorkflowConfig(workflow_id,
                          flow_file,
                          opts,
                          template_vars=template_vars)
def main(parser: COP, options: 'Values', workflow_id: str) -> None:
    """CLI for "cylc get-workflow-contact"."""
    workflow_id, *_ = parse_id(
        workflow_id,
        constraint='workflows',
    )
    try:
        data = load_contact_file(workflow_id)
    except ServiceFileError:
        raise CylcError(
            f"{workflow_id}: cannot get contact info, workflow not running?")
    else:
        for key, value in sorted(data.items()):
            print("%s=%s" % (key, value))
Ejemplo n.º 10
0
def main(
    parser: COP,
    options: 'Values',
    *ids,
) -> None:

    if options.print_platform_names and options.print_platforms:
        options.print_platform_names = False

    if options.print_platform_names or options.print_platforms:
        # Get platform information:
        if ids:
            raise UserInputError(
                "Workflow IDs are incompatible with --platform options.")
        glbl_cfg().platform_dump(options.print_platform_names,
                                 options.print_platforms)
        return

    if not ids:
        if options.print_hierarchy:
            print("\n".join(get_config_file_hierarchy()))
            return

        glbl_cfg().idump(options.item,
                         not options.defaults,
                         oneline=options.oneline,
                         none_str=options.none_str)
        return

    workflow_id, _, flow_file = parse_id(
        *ids,
        src=True,
        constraint='workflows',
    )

    if options.print_hierarchy:
        print("\n".join(get_config_file_hierarchy(workflow_id)))
        return

    config = WorkflowConfig(workflow_id, flow_file, options,
                            get_template_vars(options))

    config.pcfg.idump(options.item,
                      not options.defaults,
                      oneline=options.oneline,
                      none_str=options.none_str)
Ejemplo n.º 11
0
def main(_, options: 'Values', workflow_id: str, func: str) -> None:
    workflow_id, *_ = parse_id(
        workflow_id,
        constraint='workflows',
    )
    pclient = WorkflowRuntimeClient(workflow_id, timeout=options.comms_timeout)
    if options.no_input:
        kwargs = {}
    else:
        kwargs = json.load(sys.stdin)
    sys.stdin.close()
    res = pclient(func, kwargs)
    if func in PB_METHOD_MAP:
        if 'element_type' in kwargs:
            pb_msg = PB_METHOD_MAP[func][kwargs['element_type']]()
        else:
            pb_msg = PB_METHOD_MAP[func]()
        pb_msg.ParseFromString(res)
        res_msg = MessageToDict(pb_msg)
    else:
        res_msg = res
    sys.stdout.write(json.dumps(res_msg, indent=4) + '\n')
Ejemplo n.º 12
0
def main(_, options: 'Values', workflow_id: str) -> None:
    workflow_id, *_ = parse_id(
        workflow_id,
        constraint='workflows',
    )
    screen = None
    if options.display == 'html':
        TREE_EXPAND_DEPTH[0] = -1  # expand tree fully
        screen = html_fragment.HtmlGenerator()
        screen.set_terminal_properties(256)
        screen.register_palette(TuiApp.palette)
        html_fragment.screenshot_init(
            [tuple(map(int, options.v_term_size.split(',')))], [])

    try:
        TuiApp(workflow_id, screen=screen).main()

        if options.display == 'html':
            for fragment in html_fragment.screenshot_collect():
                print(fragment)
    except KeyboardInterrupt:
        pass
Ejemplo n.º 13
0
def get_platform_from_task_def(flow: str, task: str) -> Dict[str, Any]:
    """Return the platform dictionary for a particular task.

    Uses the flow definition - designed to be used with tasks
    with unsubmitted jobs.

    Args:
        flow: The name of the Cylc flow to be queried.
        task: The name of the task to be queried.

    Returns:
        Platform Dictionary.
    """
    _, _, flow_file = parse_id(flow, constraint='workflows', src=True)
    config = WorkflowConfig(flow, flow_file, Values())
    # Get entire task spec to allow Cylc 7 platform from host guessing.
    task_spec = config.pcfg.get(['runtime', task])
    platform = get_platform(task_spec)
    if platform is None:
        raise PlatformLookupError(
            'Platform lookup failed; platform is a subshell to be evaluated: '
            f' Task: {task}, platform: {task_spec["platform"]}.')
    return platform
Ejemplo n.º 14
0
def main(parser: COP, options: 'Values', workflow_id: str, event_msg: str,
         event_id: str) -> None:
    workflow_id, *_ = parse_id(
        workflow_id,
        constraint='workflows',
    )
    LOG.info('Send to workflow %s: "%s" (%s)', workflow_id, event_msg,
             event_id)
    pclient = get_client(workflow_id, timeout=options.comms_timeout)

    max_n_tries = int(options.max_n_tries)
    retry_intvl_secs = float(options.retry_intvl_secs)

    mutation_kwargs = {
        'request_string': MUTATION,
        'variables': {
            'wFlows': [workflow_id],
            'eventMsg': event_msg,
            'eventId': event_id,
        }
    }

    for i_try in range(max_n_tries):
        try:
            pclient('graphql', mutation_kwargs)
        except ClientError as exc:
            LOG.exception(exc)
            LOG.info(MSG_SEND_FAILED, i_try + 1, max_n_tries)
            if i_try == max_n_tries - 1:  # final attempt
                raise CylcError('send failed')
            LOG.info(MSG_SEND_RETRY, retry_intvl_secs, options.comms_timeout)
            sleep(retry_intvl_secs)
        else:
            if i_try > 0:
                LOG.info(MSG_SEND_SUCCEED, i_try + 1, max_n_tries)
            break
Ejemplo n.º 15
0
def main(parser: COP, options: 'Values', workflow_id: str) -> None:
    workflow_id, _, flow_file = parse_id(
        workflow_id,
        src=True,
        constraint='workflows',
    )

    if options.geditor:
        editor = glbl_cfg().get(['editors', 'gui'])
    else:
        editor = glbl_cfg().get(['editors', 'terminal'])

    # read in the flow.cylc file
    viewcfg = {
        'mark':
        options.mark,
        'single':
        options.single,
        'label':
        options.label,
        'empy':
        options.empy or options.process,
        'jinja2':
        options.jinja2 or options.process,
        'contin':
        options.cat or options.process,
        'inline': (options.inline or options.jinja2 or options.empy
                   or options.process),
    }
    lines = read_and_proc(flow_file,
                          load_template_vars(options.templatevars,
                                             options.templatevars_file),
                          viewcfg=viewcfg)

    if options.stdout:
        for line in lines:
            print(line)
        sys.exit(0)

    # write to a temporary file
    viewfile = NamedTemporaryFile(
        suffix=".flow.cylc",
        prefix=workflow_id.replace('/', '_') + '.',
    )
    for line in lines:
        viewfile.write((line + '\n').encode())
    viewfile.seek(0, 0)

    # set the file to be read only
    os.chmod(viewfile.name, 0o400)

    # capture the temp file's mod time in case the user edits it
    # and overrides the readonly mode.
    modtime1 = os.stat(viewfile.name).st_mtime

    # in case editor has options, e.g. 'emacs -nw':
    command_list = shlex.split(editor)
    command_list.append(viewfile.name)
    command = ' '.join(command_list)
    # THIS BLOCKS UNTIL THE COMMAND COMPLETES
    retcode = call(command_list)  # nosec (editor command is user configurable)
    if retcode != 0:
        # the command returned non-zero exist status
        raise CylcError(f'{command} failed: {retcode}')

    # !!!VIEWING FINISHED!!!

    # Did the user edit the file
    modtime2 = os.stat(viewfile.name).st_mtime

    if modtime2 > modtime1:
        print(
            "\nWARNING: YOU HAVE EDITED A TEMPORARY READ-ONLY COPY "
            f"OF THE WORKFLOW:\n   {viewfile.name}\n",
            file=sys.stderr)
    # DONE
    viewfile.close()
Ejemplo n.º 16
0
Archivo: list.py Proyecto: wxtim/cylc
def main(parser: COP, options: 'Values', workflow_id: str) -> None:
    workflow_id, _, flow_file = parse_id(
        workflow_id,
        src=True,
        constraint='workflows',
    )
    template_vars = get_template_vars(options)

    if options.all_tasks and options.all_namespaces:
        parser.error("Choose either -a or -n")
    if options.all_tasks:
        which = "all tasks"
    elif options.all_namespaces:
        which = "all namespaces"
    elif options.prange:
        which = "prange"
        if options.prange == ",":
            tr_start = None
            tr_stop = None
        elif options.prange.endswith(","):
            tr_start = options.prange[:-1]
            tr_stop = None
        elif options.prange.startswith(","):
            tr_start = None
            tr_stop = options.prange[1:]
        else:
            tr_start, tr_stop = options.prange.split(',')
    else:
        which = "graphed tasks"

    if options.tree and os.environ['LANG'] == 'C' and options.box:
        print("WARNING, ignoring -t/--tree: $LANG=C", file=sys.stderr)
        options.tree = False

    if options.titles and options.mro:
        parser.error("Please choose --mro or --title, not both")

    if options.tree and any(
        [options.all_tasks, options.all_namespaces, options.mro]):
        print("WARNING: -t chosen, ignoring non-tree options.",
              file=sys.stderr)
    config = WorkflowConfig(workflow_id, flow_file, options, template_vars)
    if options.tree:
        config.print_first_parent_tree(pretty=options.box,
                                       titles=options.titles)
    elif options.prange:
        for node in sorted(config.get_node_labels(tr_start, tr_stop)):
            print(node)
    else:
        result = config.get_namespace_list(which)
        namespaces = list(result)
        namespaces.sort()

        if (options.mro or options.titles):
            # compute padding
            maxlen = 0
            for ns in namespaces:
                if len(ns) > maxlen:
                    maxlen = len(ns)
            padding = maxlen * ' '

        for ns in namespaces:
            if options.mro:
                print(ns, padding[0:len(padding) - len(ns)], end=' ')
                print(' '.join(config.get_mro(ns)))
            elif options.titles:
                print(ns, padding[0:len(padding) - len(ns)], end=' ')
                print(result[ns])
            else:
                print(ns)
Ejemplo n.º 17
0
def main(parser: COP, options: 'Values', workflow_id: str) -> None:
    """cylc validate CLI."""
    profiler = Profiler(None, options.profile_mode)
    profiler.start()

    if cylc.flow.flags.verbosity < 2:
        disable_timestamps(LOG)

    workflow_id, _, flow_file = parse_id(
        workflow_id,
        src=True,
        constraint='workflows',
    )
    cfg = WorkflowConfig(workflow_id,
                         flow_file,
                         options,
                         get_template_vars(options),
                         output_fname=options.output,
                         mem_log_func=profiler.log_memory)

    # Instantiate tasks and force evaluation of trigger expressions.
    # (Taken from config.py to avoid circular import problems.)
    # TODO - This is not exhaustive, it only uses the initial cycle point.
    if cylc.flow.flags.verbosity > 0:
        print('Instantiating tasks to check trigger expressions')
    for name, taskdef in cfg.taskdefs.items():
        try:
            itask = TaskProxy(taskdef, cfg.start_point)
        except TaskProxySequenceBoundsError:
            # Should already failed above
            mesg = 'Task out of bounds for %s: %s\n' % (cfg.start_point, name)
            if cylc.flow.flags.verbosity > 0:
                sys.stderr.write(' + %s\n' % mesg)
            continue
        except Exception as exc:
            raise WorkflowConfigError('failed to instantiate task %s: %s' %
                                      (name, exc))

        # force trigger evaluation now
        try:
            itask.state.prerequisites_eval_all()
        except TriggerExpressionError as exc:
            err = str(exc)
            if '@' in err:
                print(
                    f"ERROR, {name}: xtriggers can't be in conditional"
                    f" expressions: {err}",
                    file=sys.stderr)
            else:
                print('ERROR, %s: bad trigger: %s' % (name, err),
                      file=sys.stderr)
            raise WorkflowConfigError("ERROR: bad trigger")
        except Exception as exc:
            print(str(exc), file=sys.stderr)
            raise WorkflowConfigError('%s: failed to evaluate triggers.' %
                                      name)
        if cylc.flow.flags.verbosity > 0:
            print('  + %s ok' % itask.identity)

    print(cparse('<green>Valid for cylc-%s</green>' % CYLC_VERSION))
    profiler.stop()
Ejemplo n.º 18
0
def main(parser: COP, options: 'Values', workflow_id: str) -> None:
    workflow_id, *_ = parse_id(
        workflow_id,
        constraint='workflows',
    )

    if options.use_task_point and options.cycle:
        raise UserInputError(
            "cannot specify a cycle point and use environment variable")

    if options.use_task_point:
        if "CYLC_TASK_CYCLE_POINT" not in os.environ:
            raise UserInputError("CYLC_TASK_CYCLE_POINT is not defined")
        options.cycle = os.environ["CYLC_TASK_CYCLE_POINT"]

    if options.offset and not options.cycle:
        raise UserInputError("You must target a cycle point to use an offset")

    # Attempt to apply specified offset to the targeted cycle
    if options.offset:
        options.cycle = str(add_offset(options.cycle, options.offset))

    # Exit if both task state and message are to being polled
    if options.status and options.msg:
        raise UserInputError("cannot poll both status and custom output")

    if options.msg and not options.task and not options.cycle:
        raise UserInputError("need a taskname and cyclepoint")

    # Exit if an invalid status is requested
    if (options.status and options.status not in TASK_STATUSES_ORDERED
            and options.status not in CylcWorkflowDBChecker.STATE_ALIASES):
        raise UserInputError(f"invalid status '{options.status}'")

    # this only runs locally
    if options.run_dir:
        run_dir = expand_path(options.run_dir)
    else:
        run_dir = get_cylc_run_dir()

    pollargs = {
        'workflow_id': workflow_id,
        'run_dir': run_dir,
        'task': options.task,
        'cycle': options.cycle,
        'status': options.status,
        'message': options.msg,
    }

    spoller = WorkflowPoller(
        "requested state",
        options.interval,
        options.max_polls,
        args=pollargs,
    )

    connected, formatted_pt = spoller.connect()

    if not connected:
        raise CylcError("cannot connect to the workflow_id DB")

    if options.status and options.task and options.cycle:
        # check a task status
        spoller.condition = options.status
        if not asyncio.run(spoller.poll()):
            sys.exit(1)
    elif options.msg:
        # Check for a custom task output
        spoller.condition = "output: %s" % options.msg
        if not asyncio.run(spoller.poll()):
            sys.exit(1)
    else:
        # just display query results
        spoller.checker.display_maps(
            spoller.checker.workflow_state_query(task=options.task,
                                                 cycle=formatted_pt,
                                                 status=options.status))
Ejemplo n.º 19
0
def main(_, options: 'Values', workflow_id: str) -> None:
    workflow_id, *_ = parse_id(
        workflow_id,
        constraint='workflows',
    )
    pclient = get_client(workflow_id, timeout=options.comms_timeout)

    if options.sort_by_cycle:
        sort_args = {'keys': ['cyclePoint', 'name']}
    else:
        sort_args = {'keys': ['name', 'cyclePoint']}

    if options.disp_form == "raw":
        query = f'''
            {TASK_SUMMARY_FRAGMENT}
            {FAMILY_SUMMARY_FRAGMENT}
            {WORKFLOW_SUMMARY_FRAGMENT}
            query ($wFlows: [ID]!, $sortBy: SortArgs) {{
              workflows (ids: $wFlows, stripNull: false) {{
                ...wFlow
                taskProxies (sort: $sortBy) {{
                  ...tProxy
                }}
                familyProxies (sort: $sortBy) {{
                  ...fProxy
                }}
              }}
            }}'''
    elif options.disp_form != "tasks":
        query = f'''
            {WORKFLOW_SUMMARY_FRAGMENT}
            query ($wFlows: [ID]!) {{
              workflows (ids: $wFlows, stripNull: false) {{
                ...wFlow
              }}
            }}'''
    else:
        query = f'''
            {TASK_SUMMARY_FRAGMENT}
            query ($wFlows: [ID]!, $sortBy: SortArgs) {{
              workflows (ids: $wFlows, stripNull: false) {{
                taskProxies (sort: $sortBy) {{
                  ...tProxy
                }}
              }}
            }}'''

    query_kwargs = {
        'request_string': query,
        'variables': {
            'wFlows': [workflow_id],
            'sortBy': sort_args
        }
    }

    workflows = pclient('graphql', query_kwargs)

    try:
        for summary in workflows['workflows']:
            if options.disp_form == "raw":
                if options.pretty:
                    sys.stdout.write(json.dumps(summary, indent=4) + '\n')
                else:
                    print(summary)
            else:
                if options.disp_form != "tasks":
                    node_urls = {
                        node['name']: node['meta']['URL']
                        for node in summary['tasks'] + summary['families']
                    }
                    summary['workflowUrls'] = {
                        node_name: node_urls[node_name]
                        for node_name in summary['namespaceDefinitionOrder']
                        if node_name in node_urls
                    }
                    summary['workflowUrls']['workflow_id'] = (
                        summary['meta']['URL'])
                    del summary['tasks']
                    del summary['families']
                    del summary['meta']
                    for key, value in sorted(summary.items()):
                        print(
                            f'{to_snake_case(key).replace("_", " ")}={value}')
                else:
                    for item in summary['taskProxies']:
                        if options.sort_by_cycle:
                            values = [
                                item['cyclePoint'], item['name'], item['state']
                            ]
                        else:
                            values = [
                                item['name'], item['cyclePoint'], item['state']
                            ]
                        values.append('held' if item['isHeld'] else 'not-held')
                        values.append(
                            'queued' if item['isQueued'] else 'not-queued')
                        values.append('runahead' if item['isRunahead'] else
                                      'not-runahead')
                        if options.show_flows:
                            values.append(item['flowNums'])
                        print(', '.join(values))
    except Exception as exc:
        raise CylcError(
            json.dumps(workflows, indent=4) + '\n' + str(exc) + '\n')
Ejemplo n.º 20
0
def main(parser: COP, options: 'Values', *ids, color: bool = False) -> None:
    """Implement cylc cat-log CLI.

    Determine log path, user@host, batchview_cmd, and action (print, dir-list,
    cat, edit, or tail), and then if the log path is:
      a) local: perform action on log path, or
      b) remote: re-invoke cylc cat-log as a) on the remote account

    """
    if options.remote_args:
        # Invoked on job hosts for job logs only, as a wrapper to view_log().
        # Tail and batchview commands from global config on workflow host).
        logpath, mode, tail_tmpl = options.remote_args[0:3]
        logpath = expand_path(logpath)
        tail_tmpl = expand_path(tail_tmpl)
        try:
            batchview_cmd = options.remote_args[3]
        except IndexError:
            batchview_cmd = None
        res = view_log(logpath,
                       mode,
                       tail_tmpl,
                       batchview_cmd,
                       remote=True,
                       color=color)
        if res == 1:
            sys.exit(res)
        return

    workflow_id, tokens, _ = parse_id(*ids, constraint='mixed')

    # Get long-format mode.
    try:
        mode = MODES[options.mode]
    except KeyError:
        mode = options.mode

    if not tokens or not tokens.get('task'):
        # Cat workflow logs, local only.
        if options.filename is not None:
            raise UserInputError("The '-f' option is for job logs only.")

        logpath = get_workflow_run_log_name(workflow_id)
        if options.rotation_num:
            logs = glob('%s.*' % logpath)
            logs.sort(key=os.path.getmtime, reverse=True)
            try:
                logpath = logs[int(options.rotation_num)]
            except IndexError:
                raise UserInputError("max rotation %d" % (len(logs) - 1))
        tail_tmpl = os.path.expandvars(get_platform()["tail command template"])
        out = view_log(logpath, mode, tail_tmpl, color=color)
        if out == 1:
            sys.exit(1)
        if mode == 'edit':
            tmpfile_edit(out, options.geditor)
        return

    else:
        # Cat task job logs, may be on workflow or job host.
        if options.rotation_num is not None:
            raise UserInputError("only workflow (not job) logs get rotated")
        task = tokens['task']
        point = tokens['cycle']
        if options.submit_num != NN:
            try:
                options.submit_num = "%02d" % int(options.submit_num)
            except ValueError:
                parser.error("Illegal submit number: %s" % options.submit_num)
        if options.filename is None:
            options.filename = JOB_LOG_OUT
        else:
            # Convert short filename args to long (e.g. 'o' to 'job.out').
            with suppress(KeyError):
                options.filename = JOB_LOG_OPTS[options.filename]
                # KeyError: Is already long form (standard log, or custom).
        platform_name, job_runner_name, live_job_id = get_task_job_attrs(
            workflow_id, point, task, options.submit_num)
        platform = get_platform(platform_name)
        batchview_cmd = None
        if live_job_id is not None:
            # Job is currently running. Get special job runner log view
            # command (e.g. qcat) if one exists, and the log is out or err.
            conf_key = None
            if options.filename == JOB_LOG_OUT:
                if mode == 'cat':
                    conf_key = "out viewer"
                elif mode == 'tail':
                    conf_key = "out tailer"
            elif options.filename == JOB_LOG_ERR:
                if mode == 'cat':
                    conf_key = "err viewer"
                elif mode == 'tail':
                    conf_key = "err tailer"
            if conf_key is not None:
                batchview_cmd_tmpl = None
                with suppress(KeyError):
                    batchview_cmd_tmpl = platform[conf_key]
                if batchview_cmd_tmpl is not None:
                    batchview_cmd = batchview_cmd_tmpl % {
                        "job_id": str(live_job_id)
                    }

        log_is_remote = (is_remote_platform(platform)
                         and (options.filename != JOB_LOG_ACTIVITY))
        log_is_retrieved = (platform['retrieve job logs']
                            and live_job_id is None)
        if log_is_remote and (not log_is_retrieved or options.force_remote):
            logpath = os.path.normpath(
                get_remote_workflow_run_job_dir(workflow_id, point, task,
                                                options.submit_num,
                                                options.filename))
            tail_tmpl = platform["tail command template"]
            # Reinvoke the cat-log command on the remote account.
            cmd = ['cat-log', *verbosity_to_opts(cylc.flow.flags.verbosity)]
            for item in [logpath, mode, tail_tmpl]:
                cmd.append('--remote-arg=%s' % shlex.quote(item))
            if batchview_cmd:
                cmd.append('--remote-arg=%s' % shlex.quote(batchview_cmd))
            cmd.append(workflow_id)
            is_edit_mode = (mode == 'edit')
            # TODO: Add Intelligent Host selection to this
            try:
                proc = remote_cylc_cmd(cmd,
                                       platform,
                                       capture_process=is_edit_mode,
                                       manage=(mode == 'tail'))
            except KeyboardInterrupt:
                # Ctrl-C while tailing.
                pass
            else:
                if is_edit_mode:
                    # Write remote stdout to a temp file for viewing in editor.
                    # Only BUFSIZE bytes at a time in case huge stdout volume.
                    out = NamedTemporaryFile()
                    data = proc.stdout.read(BUFSIZE)
                    while data:
                        out.write(data)
                        data = proc.stdout.read(BUFSIZE)
                    os.chmod(out.name, S_IRUSR)
                    out.seek(0, 0)
        else:
            # Local task job or local job log.
            logpath = os.path.normpath(
                get_workflow_run_job_dir(workflow_id, point, task,
                                         options.submit_num, options.filename))
            tail_tmpl = os.path.expandvars(platform["tail command template"])
            out = view_log(logpath,
                           mode,
                           tail_tmpl,
                           batchview_cmd,
                           color=color)
            if mode != 'edit':
                sys.exit(out)
        if mode == 'edit':
            tmpfile_edit(out, options.geditor)