Example #1
0
def main(parser: COP, options: 'Values', workflow: str, event_msg: str,
         event_id: str) -> None:
    workflow, _ = parse_reg(workflow)
    LOG.info('Send to workflow %s: "%s" (%s)', workflow, event_msg, event_id)
    pclient = get_client(workflow, timeout=options.comms_timeout)

    max_n_tries = int(options.max_n_tries)
    retry_intvl_secs = float(options.retry_intvl_secs)

    mutation_kwargs = {
        'request_string': MUTATION,
        'variables': {
            'wFlows': [workflow],
            'eventMsg': event_msg,
            'eventId': event_id,
        }
    }

    for i_try in range(max_n_tries):
        try:
            pclient('graphql', mutation_kwargs)
        except ClientError as exc:
            LOG.exception(exc)
            LOG.info(MSG_SEND_FAILED, i_try + 1, max_n_tries)
            if i_try == max_n_tries - 1:  # final attempt
                raise CylcError('send failed')
            LOG.info(MSG_SEND_RETRY, retry_intvl_secs, options.comms_timeout)
            sleep(retry_intvl_secs)
        else:
            if i_try > 0:
                LOG.info(MSG_SEND_SUCCEED, i_try + 1, max_n_tries)
            break
Example #2
0
def get_config(workflow: str, opts: 'Values') -> WorkflowConfig:
    """Return a WorkflowConfig object for the provided reg / path."""
    workflow, flow_file = parse_reg(workflow, src=True)
    template_vars = get_template_vars(opts)
    return WorkflowConfig(workflow,
                          flow_file,
                          opts,
                          template_vars=template_vars)
Example #3
0
def main(parser: COP, options: 'Values', workflow1: str, workflow2: str):
    workflow1_name, workflow1_fpath = parse_reg(workflow1, src=True)
    workflow2_name, workflow2_fpath = parse_reg(workflow2, src=True)
    if workflow1_fpath == workflow2_fpath:
        parser.error("You can't diff a single workflow.")
    print(f"Parsing {workflow1_name} ({workflow1_fpath})")
    template_vars = load_template_vars(options.templatevars,
                                       options.templatevars_file)
    config1 = WorkflowConfig(workflow1_name, workflow1_fpath, options,
                             template_vars).cfg
    print(f"Parsing {workflow2_name} ({workflow2_fpath})")
    config2 = WorkflowConfig(workflow2_name,
                             workflow2_fpath,
                             options,
                             template_vars,
                             is_reload=True).cfg

    if config1 == config2:
        print(
            f"Workflow definitions {workflow1_name} and {workflow2_name} are "
            f"identical")
        sys.exit(0)

    print(f"Workflow definitions {workflow1_name} and {workflow2_name} differ")

    workflow1_only = {}  # type: ignore
    workflow2_only = {}  # type: ignore
    diff_1_2 = {}  # type: ignore
    # TODO: this whole file could do wih refactoring at some point

    diffdict(config1, config2, workflow1_only, workflow2_only, diff_1_2)

    if n_oone > 0:
        print(f'\n{n_oone} items only in {workflow1_name} (<)')
        prdict(workflow1_only, '<', nested=options.nested)

    if n_otwo > 0:
        print(f'\n{n_otwo} items only in {workflow2_name} (>)')
        prdict(workflow2_only, '>', nested=options.nested)

    if n_diff > 0:
        print(f'\n{n_diff} common items differ {workflow1_name}(<) '
              f'{workflow2_name}(>)')
        prdict(diff_1_2, '', diff=True, nested=options.nested)
Example #4
0
def main(parser: COP, options: 'Values', reg: str) -> None:
    reg, _ = parse_reg(reg)
    pclient = get_client(reg, timeout=options.comms_timeout)

    query_kwargs = {'request_string': QUERY, 'variables': {'wFlows': [reg]}}

    result = pclient('graphql', query_kwargs)

    for workflow in result['workflows']:
        print(workflow['cylcVersion'])
Example #5
0
def main(parser: COP,
         options: 'Values',
         reg: str,
         shutdown_arg: Optional[str] = None) -> None:
    if shutdown_arg is not None and options.kill:
        parser.error("ERROR: --kill is not compatible with [STOP]")

    if options.kill and options.now:
        parser.error("ERROR: --kill is not compatible with --now")

    if options.flow_label and int(options.max_polls) > 0:
        parser.error("ERROR: --flow is not compatible with --max-polls")

    reg, _ = parse_reg(reg)
    pclient = get_client(reg, timeout=options.comms_timeout)

    if int(options.max_polls) > 0:
        # (test to avoid the "nothing to do" warning for # --max-polls=0)
        spoller = StopPoller(pclient, "workflow stopped", options.interval,
                             options.max_polls)

    # mode defaults to 'Clean'
    mode = None
    task = None
    cycle_point = None
    if shutdown_arg is not None and TaskID.is_valid_id(shutdown_arg):
        # STOP argument detected
        task = shutdown_arg
    elif shutdown_arg is not None:
        # not a task ID, may be a cycle point
        cycle_point = shutdown_arg
    elif options.kill:
        mode = WorkflowStopMode.Kill.name
    elif options.now > 1:
        mode = WorkflowStopMode.NowNow.name
    elif options.now:
        mode = WorkflowStopMode.Now.name

    mutation_kwargs = {
        'request_string': MUTATION,
        'variables': {
            'wFlows': [reg],
            'stopMode': mode,
            'cyclePoint': cycle_point,
            'clockTime': options.wall_clock,
            'task': task,
            'flowLabel': options.flow_label,
        }
    }

    pclient('graphql', mutation_kwargs)

    if int(options.max_polls) > 0 and not spoller.poll():
        # (test to avoid the "nothing to do" warning for # --max-polls=0)
        sys.exit(1)
Example #6
0
def main(parser: COP, options: 'Values', reg: str) -> None:
    """CLI for "cylc get-workflow-contact"."""
    reg, _ = parse_reg(reg)
    try:
        data = load_contact_file(reg)
    except ServiceFileError:
        raise CylcError(
            f"{reg}: cannot get contact info, workflow not running?")
    else:
        for key, value in sorted(data.items()):
            print("%s=%s" % (key, value))
Example #7
0
def main(_, options: 'Values', reg: str) -> None:
    workflow, flow_file = parse_reg(reg, src=True)

    # extract task host platforms from the workflow
    config = WorkflowConfig(
        workflow, flow_file, options,
        load_template_vars(options.templatevars, options.templatevars_file))

    platforms = {
        config.get_config(['runtime', name, 'platform'])
        for name in config.get_namespace_list('all tasks')
    } - {None, 'localhost'}

    # When "workflow run hosts" are formalised as "flow platforms"
    # we can substitute `localhost` for this, in the mean time
    # we will have to assume that flow hosts are configured correctly.

    if not platforms:
        sys.exit(0)

    verbose = cylc.flow.flags.verbosity > 0

    # get the cylc version on each platform
    versions = {}
    for platform_name in sorted(platforms):
        platform = get_platform(platform_name)
        host = get_host_from_platform(platform, bad_hosts=None)
        cmd = construct_ssh_cmd(['version'], platform, host)
        if verbose:
            print(cmd)
        proc = procopen(cmd, stdin=DEVNULL, stdout=PIPE, stderr=PIPE)
        out, err = proc.communicate()
        out = out.decode()
        err = err.decode()
        if proc.wait() == 0:
            if verbose:
                print("   %s" % out)
            versions[platform_name] = out.strip()
        else:
            versions[platform_name] = f'ERROR: {err.strip()}'

    # report results
    max_len = max((len(platform_name) for platform_name in platforms))
    print(f'{"platform".rjust(max_len)}: cylc version')
    print('-' * (max_len + 14))
    for platform_name, result in versions.items():
        print(f'{platform_name.rjust(max_len)}: {result}')
    if all((version == CYLC_VERSION for version in versions.values())):
        ret_code = 0
    elif options.error:
        ret_code = 1
    else:
        ret_code = 0
    sys.exit(ret_code)
Example #8
0
def main(parser: COP, options: 'Values', workflow: str) -> None:
    workflow, _ = parse_reg(workflow)
    pclient = WorkflowRuntimeClient(workflow, timeout=options.comms_timeout)

    mutation_kwargs = {
        'request_string': MUTATION,
        'variables': {
            'wFlows': [workflow],
        }
    }

    pclient('graphql', mutation_kwargs)
Example #9
0
def main(parser: COP, options: 'Values', *args: str) -> None:
    """CLI."""
    if not args:
        return parser.error('No message supplied')
    if len(args) <= 2:
        # BACK COMPAT: args <= 2
        # from:
        #     7.6?
        # remove at:
        #     9.0?
        # (As of Dec 2020 some functional tests still use the classic
        # two arg interface)
        workflow = os.getenv('CYLC_WORKFLOW_NAME')
        task_job = os.getenv('CYLC_TASK_JOB')
        message_strs = list(args)
    else:
        workflow, task_job, *message_strs = args
        workflow, _ = parse_reg(workflow)
    # Read messages from STDIN
    if '-' in message_strs:
        current_message_str = ''
        while True:  # Note: `for line in sys.stdin:` can hang
            message_str = sys.stdin.readline()
            if message_str.strip():
                # non-empty line
                current_message_str += message_str
            elif message_str:
                # empty line, start next message
                if current_message_str:
                    message_strs.append(current_message_str)
                current_message_str = ''  # reset
            else:
                # end of file
                if current_message_str:
                    message_strs.append(current_message_str)
                break
    # Separate "severity: message"
    messages = []  # [(severity, message_str), ...]
    for message_str in message_strs:
        if message_str == '-':
            pass
        elif ':' in message_str:
            valid, err_msg = TaskMessageValidator.validate(message_str)
            if not valid:
                raise UserInputError(
                    f'Invalid task message "{message_str}" - {err_msg}')
            messages.append(
                [item.strip() for item in message_str.split(':', 1)])
        elif options.severity:
            messages.append([options.severity, message_str.strip()])
        else:
            messages.append([getLevelName(INFO), message_str.strip()])
    record_messages(workflow, task_job, messages)
Example #10
0
def main(parser: COP, options: 'Values', workflow: str, *task_globs: str):
    workflow, _ = parse_reg(workflow)
    pclient = get_client(workflow, timeout=options.comms_timeout)

    mutation_kwargs = {
        'request_string': MUTATION,
        'variables': {
            'wFlows': [workflow],
            'tasks': list(task_globs),
        }
    }

    pclient('graphql', mutation_kwargs)
Example #11
0
def main(parser: COP, opts: 'Values', reg: Optional[str] = None) -> None:
    run_dir: Optional[Path]
    if reg is None:
        try:
            reg = str(Path.cwd().relative_to(
                Path(get_cylc_run_dir()).resolve()))
        except ValueError:
            raise WorkflowFilesError(
                "The current working directory is not a workflow run directory"
            )
    else:
        reg, _ = parse_reg(reg)
    run_dir = Path(get_workflow_run_dir(reg))
    if not run_dir.is_dir():
        raise WorkflowFilesError(f'"{reg}" is not an installed workflow.')
    source, source_symlink = get_workflow_source_dir(run_dir)
    if not source:
        raise WorkflowFilesError(
            f'"{reg}" was not installed with cylc install.')
    if not Path(source).is_dir():
        raise WorkflowFilesError(
            f'Workflow source dir is not accessible: "{source}".\n'
            f'Restore the source or modify the "{source_symlink}"'
            ' symlink to continue.')
    for entry_point in iter_entry_points('cylc.pre_configure'):
        try:
            entry_point.resolve()(srcdir=source, opts=opts)
        except Exception as exc:
            # NOTE: except Exception (purposefully vague)
            # this is to separate plugin from core Cylc errors
            raise PluginError('cylc.pre_configure', entry_point.name,
                              exc) from None

    reinstall_workflow(
        named_run=reg,
        rundir=run_dir,
        source=source,
        dry_run=False  # TODO: ready for dry run implementation
    )

    for entry_point in iter_entry_points('cylc.post_install'):
        try:
            entry_point.resolve()(srcdir=source,
                                  opts=opts,
                                  rundir=str(run_dir))
        except Exception as exc:
            # NOTE: except Exception (purposefully vague)
            # this is to separate plugin from core Cylc errors
            raise PluginError('cylc.post_install', entry_point.name,
                              exc) from None
Example #12
0
def main(parser: COP, options: 'Values', reg: str, *task_globs: str) -> None:
    reg, _ = parse_reg(reg)
    pclient = get_client(reg, timeout=options.comms_timeout)

    mutation_kwargs = {
        'request_string': MUTATION,
        'variables': {
            'wFlows': [reg],
            'tasks': list(task_globs),
            'outputs': options.outputs,
        }
    }

    pclient('graphql', mutation_kwargs)
Example #13
0
def main(
    parser: COP,
    options: 'Values',
    workflow: str,
    task_id: Optional[str] = None
) -> None:
    workflow, _ = parse_reg(workflow)
    pclient = get_client(workflow, timeout=options.comms_timeout)

    if task_id and not TaskID.is_valid_id(task_id):
        raise UserInputError("Invalid task ID: %s" % task_id)

    flow_kwargs = {
        'request_string': FLOW_QUERY,
        'variables': {'wFlows': [workflow]}
    }
    task_kwargs: Dict[str, Any] = {
        'request_string': TASK_QUERY,
    }
    # cylc ping WORKFLOW
    result = pclient('graphql', flow_kwargs)
    msg = ""
    for flow in result['workflows']:
        w_name = flow['name']
        w_port = flow['port']
        w_pub_port = flow['pubPort']
        if cylc.flow.flags.verbosity > 0:
            sys.stdout.write(
                f'{w_name} running on '
                f'{pclient.host}:{w_port} {w_pub_port}\n'
            )
        # cylc ping WORKFLOW TASKID
        if task_id:
            task, point = TaskID.split(task_id)
            w_id = flow['id']
            task_kwargs['variables'] = {
                'tProxy': f'{w_id}{ID_DELIM}{point}{ID_DELIM}{task}'
            }
            task_result = pclient('graphql', task_kwargs)
            if not task_result.get('taskProxy'):
                msg = "task not found"
            elif task_result['taskProxy']['state'] != TASK_STATUS_RUNNING:
                msg = f"task not {TASK_STATUS_RUNNING}"
            if msg:
                print(cparse(f'<red>{msg}</red>'))
                sys.exit(1)
Example #14
0
def main(parser: COP, options: 'Values', workflow: str, *task_globs: str):
    """CLI for "cylc trigger"."""
    if options.flow_descr and not options.reflow:
        parser.error("--meta requires --reflow")
    workflow, _ = parse_reg(workflow)
    pclient = get_client(workflow, timeout=options.comms_timeout)

    mutation_kwargs = {
        'request_string': MUTATION,
        'variables': {
            'wFlows': [workflow],
            'tasks': list(task_globs),
            'reflow': options.reflow,
            'flowDescr': options.flow_descr,
        }
    }

    pclient('graphql', mutation_kwargs)
Example #15
0
def main(parser: COP, options: 'Values', reg: str, severity_str: str) -> None:
    try:
        severity = LOG_LEVELS[severity_str]
    except KeyError:
        parser.error("Illegal logging level, %s" % severity_str)

    reg, _ = parse_reg(reg)
    pclient = get_client(reg, timeout=options.comms_timeout)

    mutation_kwargs = {
        'request_string': MUTATION,
        'variables': {
            'wFlows': [reg],
            'level': severity,
        }
    }

    pclient('graphql', mutation_kwargs)
Example #16
0
def main(_, options: 'Values', workflow: str, func: str) -> None:
    workflow, _ = parse_reg(workflow)
    pclient = WorkflowRuntimeClient(workflow, timeout=options.comms_timeout)
    if options.no_input:
        kwargs = {}
    else:
        kwargs = json.load(sys.stdin)
    sys.stdin.close()
    res = pclient(func, kwargs)
    if func in PB_METHOD_MAP:
        if 'element_type' in kwargs:
            pb_msg = PB_METHOD_MAP[func][kwargs['element_type']]()
        else:
            pb_msg = PB_METHOD_MAP[func]()
        pb_msg.ParseFromString(res)
        res_msg = MessageToDict(pb_msg)
    else:
        res_msg = res
    sys.stdout.write(json.dumps(res_msg, indent=4) + '\n')
Example #17
0
def main(_, options: 'Values', reg: str) -> None:
    reg, _ = parse_reg(reg)
    screen = None
    if options.display == 'html':
        TREE_EXPAND_DEPTH[0] = -1  # expand tree fully
        screen = html_fragment.HtmlGenerator()
        screen.set_terminal_properties(256)
        screen.register_palette(TuiApp.palette)
        html_fragment.screenshot_init(
            [tuple(map(int, options.v_term_size.split(',')))], [])

    try:
        TuiApp(reg, screen=screen).main()

        if options.display == 'html':
            for fragment in html_fragment.screenshot_collect():
                print(fragment)
    except KeyboardInterrupt:
        pass
Example #18
0
def main(parser: COP, options: 'Values', reg: Optional[str] = None) -> None:
    if options.print_hierarchy:
        print("\n".join(get_config_file_hierarchy(reg)))
        return

    if reg is None:
        glbl_cfg().idump(options.item,
                         not options.defaults,
                         oneline=options.oneline,
                         none_str=options.none_str)
        return

    workflow, flow_file = parse_reg(reg, src=True)

    config = WorkflowConfig(workflow, flow_file, options,
                            get_template_vars(options, flow_file))

    config.pcfg.idump(options.item,
                      not options.defaults,
                      oneline=options.oneline,
                      none_str=options.none_str)
Example #19
0
def main(parser: COP, options: 'Values', workflow: str, *task_globs: str):

    _validate(options, *task_globs)

    workflow, _ = parse_reg(workflow)
    pclient = get_client(workflow, timeout=options.comms_timeout)

    if options.release_all:
        mutation = RELEASE_HOLD_POINT_MUTATION
        args = {}
    else:
        mutation = RELEASE_MUTATION
        args = {'tasks': list(task_globs)}

    mutation_kwargs = {
        'request_string': mutation,
        'variables': {
            'wFlows': [workflow],
            **args
        }
    }

    pclient('graphql', mutation_kwargs)
Example #20
0
def main(parser: COP, options: 'Values', workflow: str) -> None:
    workflow, _ = parse_reg(workflow)

    output_options = [
        options.show_raw, options.show_summary, options.html_summary
    ]
    if output_options.count(True) > 1:
        parser.error('Cannot combine output formats (choose one)')
    if not any(output_options):
        # No output specified - choose summary by default
        options.show_summary = True

    run_db = _get_dao(workflow)
    row_buf = format_rows(*run_db.select_task_times())
    with smart_open(options.output_filename) as output:
        if options.show_raw:
            output.write(row_buf.getvalue())
        else:
            summary: TimingSummary
            if options.show_summary:
                summary = TextTimingSummary(row_buf)
            elif options.html_summary:
                summary = HTMLTimingSummary(row_buf)
            summary.write_summary(output)
Example #21
0
def main(_, options: 'Values', workflow: str) -> None:
    workflow, _ = parse_reg(workflow)
    pclient = get_client(workflow, timeout=options.comms_timeout)

    if options.sort_by_cycle:
        sort_args = {'keys': ['cyclePoint', 'name']}
    else:
        sort_args = {'keys': ['name', 'cyclePoint']}

    if options.disp_form == "raw":
        query = f'''
            {TASK_SUMMARY_FRAGMENT}
            {FAMILY_SUMMARY_FRAGMENT}
            {WORKFLOW_SUMMARY_FRAGMENT}
            query ($wFlows: [ID]!, $sortBy: SortArgs) {{
              workflows (ids: $wFlows, stripNull: false) {{
                ...wFlow
                taskProxies (sort: $sortBy) {{
                  ...tProxy
                }}
                familyProxies (sort: $sortBy) {{
                  ...fProxy
                }}
              }}
            }}'''
    elif options.disp_form != "tasks":
        query = f'''
            {WORKFLOW_SUMMARY_FRAGMENT}
            query ($wFlows: [ID]!) {{
              workflows (ids: $wFlows, stripNull: false) {{
                ...wFlow
              }}
            }}'''
    else:
        query = f'''
            {TASK_SUMMARY_FRAGMENT}
            query ($wFlows: [ID]!, $sortBy: SortArgs) {{
              workflows (ids: $wFlows, stripNull: false) {{
                taskProxies (sort: $sortBy) {{
                  ...tProxy
                }}
              }}
            }}'''

    query_kwargs = {
        'request_string': query,
        'variables': {'wFlows': [workflow], 'sortBy': sort_args}
    }

    workflows = pclient('graphql', query_kwargs)

    try:
        for summary in workflows['workflows']:
            if options.disp_form == "raw":
                if options.pretty:
                    sys.stdout.write(json.dumps(summary, indent=4) + '\n')
                else:
                    print(summary)
            else:
                if options.disp_form != "tasks":
                    node_urls = {
                        node['name']: node['meta']['URL']
                        for node in summary['tasks'] + summary['families']
                    }
                    summary['workflowUrls'] = {
                        node_name: node_urls[node_name]
                        for node_name in summary['namespaceDefinitionOrder']
                        if node_name in node_urls
                    }
                    summary['workflowUrls']['workflow'] = (
                        summary['meta']['URL'])
                    del summary['tasks']
                    del summary['families']
                    del summary['meta']
                    for key, value in sorted(summary.items()):
                        print(
                            f'{to_snake_case(key).replace("_", " ")}={value}')
                else:
                    for item in summary['taskProxies']:
                        if options.sort_by_cycle:
                            values = [
                                item['cyclePoint'],
                                item['name'],
                                item['state']]
                        else:
                            values = [
                                item['name'],
                                item['cyclePoint'],
                                item['state']]
                        values.append('held' if item['isHeld'] else 'unheld')
                        values.append('queued' if item['isQueued']
                                      else 'not-queued')
                        values.append('runahead' if item['isRunahead']
                                      else 'not-runahead')
                        if options.flow:
                            values.append(item['flowLabel'])
                        print(', '.join(values))
    except Exception as exc:
        raise CylcError(
            json.dumps(workflows, indent=4) + '\n' + str(exc) + '\n')
Example #22
0
def main(_, options: 'Values', reg: str, *task_args: str) -> None:
    """Implement "cylc show" CLI."""
    reg, _ = parse_reg(reg)
    pclient = get_client(reg, timeout=options.comms_timeout)
    json_filter = {}

    if not task_args:
        query = WORKFLOW_META_QUERY
        query_kwargs = {
            'request_string': query,
            'variables': {
                'wFlows': [reg]
            }
        }
        # Print workflow info.
        results = pclient('graphql', query_kwargs)
        for reg in results['workflows']:
            flat_data = flatten_data(reg)
            if options.json:
                json_filter.update(flat_data)
            else:
                for key, value in sorted(flat_data.items(), reverse=True):
                    ansiprint(
                        f'<bold>{key}:</bold> {value or "<m>(not given)</m>"}')

    task_names = [arg for arg in task_args if TaskID.is_valid_name(arg)]
    task_ids = [arg for arg in task_args if TaskID.is_valid_id_2(arg)]

    if task_names:
        tasks_query = TASK_META_QUERY
        tasks_kwargs = {
            'request_string': tasks_query,
            'variables': {
                'wFlows': [reg],
                'taskIds': task_names
            }
        }
        # Print workflow info.
        results = pclient('graphql', tasks_kwargs)
        multi = len(results['tasks']) > 1
        for task in results['tasks']:
            flat_data = flatten_data(task['meta'])
            if options.json:
                json_filter.update({task['name']: flat_data})
            else:
                if multi:
                    print(f'----\nTASK NAME: {task["name"]}')
                for key, value in sorted(flat_data.items(), reverse=True):
                    ansiprint(
                        f'<bold>{key}:</bold> {value or "<m>(not given)</m>"}')

    if task_ids:
        tp_query = TASK_PREREQS_QUERY
        tp_kwargs = {
            'request_string': tp_query,
            'variables': {
                'wFlows': [reg],
                'taskIds': [
                    f'{c}{ID_DELIM}{n}' for n, c in [
                        TaskID.split(t_id)
                        for t_id in task_ids if TaskID.is_valid_id(t_id)
                    ]
                ] + [
                    f'{c}{ID_DELIM}{n}' for c, n in [
                        t_id.rsplit(TaskID.DELIM2, 1)
                        for t_id in task_ids if not TaskID.is_valid_id(t_id)
                    ]
                ]
            }
        }
        results = pclient('graphql', tp_kwargs)
        multi = len(results['taskProxies']) > 1
        for t_proxy in results['taskProxies']:
            task_id = TaskID.get(t_proxy['name'], t_proxy['cyclePoint'])
            if options.json:
                json_filter.update({task_id: t_proxy})
            else:
                if multi:
                    print(f'----\nTASK ID: {task_id}')
                prereqs = []
                for item in t_proxy['prerequisites']:
                    prefix = ''
                    multi_cond = len(item['conditions']) > 1
                    if multi_cond:
                        prereqs.append([
                            True, '', item['expression'].replace('c', ''),
                            item['satisfied']
                        ])
                    for cond in item['conditions']:
                        if multi_cond and not options.list_prereqs:
                            prefix = f'\t{cond["exprAlias"].strip("c")} = '
                        _, _, point, name = cond['taskId'].split(ID_DELIM)
                        cond_id = TaskID.get(name, point)
                        prereqs.append([
                            False, prefix, f'{cond_id} {cond["reqState"]}',
                            cond['satisfied']
                        ])
                if options.list_prereqs:
                    for composite, _, msg, _ in prereqs:
                        if not composite:
                            print(msg)
                else:
                    flat_meta = flatten_data(t_proxy['task']['meta'])
                    for key, value in sorted(flat_meta.items(), reverse=True):
                        ansiprint(f'<bold>{key}:</bold>'
                                  f' {value or "<m>(not given)</m>"}')
                    ansiprint('\n<bold>prerequisites</bold>'
                              ' (<red>- => not satisfied</red>):')
                    if not prereqs:
                        print('  (None)')
                    for _, prefix, msg, state in prereqs:
                        print_msg_state(f'{prefix}{msg}', state)

                    ansiprint('\n<bold>outputs</bold>'
                              ' (<red>- => not completed</red>):')
                    if not t_proxy['outputs']:
                        print('  (None)')
                    for output in t_proxy['outputs']:
                        info = f'{task_id} {output["label"]}'
                        print_msg_state(info, output['satisfied'])
                    if (t_proxy['clockTrigger']['timeString']
                            or t_proxy['externalTriggers']
                            or t_proxy['xtriggers']):
                        ansiprint('\n<bold>other</bold>'
                                  ' (<red>- => not satisfied</red>):')
                        if t_proxy['clockTrigger']['timeString']:
                            state = t_proxy['clockTrigger']['satisfied']
                            time_str = t_proxy['clockTrigger']['timeString']
                            print_msg_state('Clock trigger time reached',
                                            state)
                            print(f'  o Triggers at ... {time_str}')
                        for ext_trig in t_proxy['externalTriggers']:
                            state = ext_trig['satisfied']
                            print_msg_state(f'{ext_trig["label"]} ... {state}',
                                            state)
                        for xtrig in t_proxy['xtriggers']:
                            state = xtrig['satisfied']
                            print_msg_state(
                                f'xtrigger "{xtrig["label"]} = {xtrig["id"]}"',
                                state)
        if not results['taskProxies']:
            ansiprint(f"<red>No matching tasks found: {task_ids}",
                      file=sys.stderr)
            sys.exit(1)

    if options.json:
        print(json.dumps(json_filter, indent=4))
Example #23
0
def main(parser: COP, options: 'Values', reg: str) -> None:
    """cylc validate CLI."""
    profiler = Profiler(None, options.profile_mode)
    profiler.start()

    if cylc.flow.flags.verbosity < 2:
        # for readability omit timestamps from logging unless in debug mode
        for handler in LOG.handlers:
            if isinstance(handler.formatter, CylcLogFormatter):
                handler.formatter.configure(timestamp=False)

    workflow, flow_file = parse_reg(reg, src=True)
    cfg = WorkflowConfig(workflow,
                         flow_file,
                         options,
                         get_template_vars(options, flow_file),
                         output_fname=options.output,
                         mem_log_func=profiler.log_memory)

    # Check bounds of sequences
    out_of_bounds = [
        str(seq) for seq in cfg.sequences
        if seq.get_first_point(cfg.start_point) is None
    ]
    if out_of_bounds:
        if len(out_of_bounds) > 1:
            # avoid spamming users with multiple warnings
            out_of_bounds_str = '\n'.join(
                textwrap.wrap(', '.join(out_of_bounds), 70))
            msg = ("multiple sequences out of bounds for initial cycle point "
                   f"{cfg.start_point}:\n{out_of_bounds_str}")
        else:
            msg = (f"{out_of_bounds[0]}: sequence out of bounds for "
                   f"initial cycle point {cfg.start_point}")
        LOG.warning(msg)

    # Instantiate tasks and force evaluation of trigger expressions.
    # (Taken from config.py to avoid circular import problems.)
    # TODO - This is not exhaustive, it only uses the initial cycle point.
    if cylc.flow.flags.verbosity > 0:
        print('Instantiating tasks to check trigger expressions')
    flow_label = FlowLabelMgr().get_new_label()
    for name, taskdef in cfg.taskdefs.items():
        try:
            itask = TaskProxy(taskdef, cfg.start_point, flow_label)
        except TaskProxySequenceBoundsError:
            # Should already failed above
            mesg = 'Task out of bounds for %s: %s\n' % (cfg.start_point, name)
            if cylc.flow.flags.verbosity > 0:
                sys.stderr.write(' + %s\n' % mesg)
            continue
        except Exception as exc:
            raise WorkflowConfigError('failed to instantiate task %s: %s' %
                                      (name, exc))

        # force trigger evaluation now
        try:
            itask.state.prerequisites_eval_all()
        except TriggerExpressionError as exc:
            err = str(exc)
            if '@' in err:
                print(
                    f"ERROR, {name}: xtriggers can't be in conditional"
                    f" expressions: {err}",
                    file=sys.stderr)
            else:
                print('ERROR, %s: bad trigger: %s' % (name, err),
                      file=sys.stderr)
            raise WorkflowConfigError("ERROR: bad trigger")
        except Exception as exc:
            print(str(exc), file=sys.stderr)
            raise WorkflowConfigError('%s: failed to evaluate triggers.' %
                                      name)
        if cylc.flow.flags.verbosity > 0:
            print('  + %s ok' % itask.identity)

    print(cparse('<green>Valid for cylc-%s</green>' % CYLC_VERSION))
    profiler.stop()
Example #24
0
File: list.py Project: lparkes/cylc
def main(parser: COP, options: 'Values', reg: str) -> None:
    workflow, flow_file = parse_reg(reg, src=True)
    template_vars = get_template_vars(options)

    if options.all_tasks and options.all_namespaces:
        parser.error("Choose either -a or -n")
    if options.all_tasks:
        which = "all tasks"
    elif options.all_namespaces:
        which = "all namespaces"
    elif options.prange:
        which = "prange"
        if options.prange == ",":
            tr_start = None
            tr_stop = None
        elif options.prange.endswith(","):
            tr_start = options.prange[:-1]
            tr_stop = None
        elif options.prange.startswith(","):
            tr_start = None
            tr_stop = options.prange[1:]
        else:
            tr_start, tr_stop = options.prange.split(',')
    else:
        which = "graphed tasks"

    if options.tree and os.environ['LANG'] == 'C' and options.box:
        print("WARNING, ignoring -t/--tree: $LANG=C", file=sys.stderr)
        options.tree = False

    if options.titles and options.mro:
        parser.error("Please choose --mro or --title, not both")

    if options.tree and any(
            [options.all_tasks, options.all_namespaces, options.mro]):
        print("WARNING: -t chosen, ignoring non-tree options.",
              file=sys.stderr)
    config = WorkflowConfig(
        workflow,
        flow_file,
        options,
        template_vars
    )
    if options.tree:
        config.print_first_parent_tree(
            pretty=options.box, titles=options.titles)
    elif options.prange:
        for node in sorted(config.get_node_labels(tr_start, tr_stop)):
            print(node)
    else:
        result = config.get_namespace_list(which)
        namespaces = list(result)
        namespaces.sort()

        if (options.mro or options.titles):
            # compute padding
            maxlen = 0
            for ns in namespaces:
                if len(ns) > maxlen:
                    maxlen = len(ns)
            padding = maxlen * ' '

        for ns in namespaces:
            if options.mro:
                print(ns, padding[0:len(padding) - len(ns)], end=' ')
                print(' '.join(config.get_mro(ns)))
            elif options.titles:
                print(ns, padding[0:len(padding) - len(ns)], end=' ')
                print(result[ns])
            else:
                print(ns)
Example #25
0
def main(parser: COP,
         options: 'Values',
         reg: str,
         task_id: Optional[str] = None,
         color: bool = False) -> None:
    """Implement cylc cat-log CLI.

    Determine log path, user@host, batchview_cmd, and action (print, dir-list,
    cat, edit, or tail), and then if the log path is:
      a) local: perform action on log path, or
      b) remote: re-invoke cylc cat-log as a) on the remote account

    """
    if options.remote_args:
        # Invoked on job hosts for job logs only, as a wrapper to view_log().
        # Tail and batchview commands from global config on workflow host).
        logpath, mode, tail_tmpl = options.remote_args[0:3]
        logpath = expand_path(logpath)
        tail_tmpl = expand_path(tail_tmpl)
        try:
            batchview_cmd = options.remote_args[3]
        except IndexError:
            batchview_cmd = None
        res = view_log(logpath,
                       mode,
                       tail_tmpl,
                       batchview_cmd,
                       remote=True,
                       color=color)
        if res == 1:
            sys.exit(res)
        return

    workflow_name, _ = parse_reg(reg)
    # Get long-format mode.
    try:
        mode = MODES[options.mode]
    except KeyError:
        mode = options.mode

    if not task_id:
        # Cat workflow logs, local only.
        if options.filename is not None:
            raise UserInputError("The '-f' option is for job logs only.")

        logpath = get_workflow_run_log_name(workflow_name)
        if options.rotation_num:
            logs = glob('%s.*' % logpath)
            logs.sort(key=os.path.getmtime, reverse=True)
            try:
                logpath = logs[int(options.rotation_num)]
            except IndexError:
                raise UserInputError("max rotation %d" % (len(logs) - 1))
        tail_tmpl = os.path.expandvars(get_platform()["tail command template"])
        out = view_log(logpath, mode, tail_tmpl, color=color)
        if out == 1:
            sys.exit(1)
        if mode == 'edit':
            tmpfile_edit(out, options.geditor)
        return

    if task_id:
        # Cat task job logs, may be on workflow or job host.
        if options.rotation_num is not None:
            raise UserInputError("only workflow (not job) logs get rotated")
        try:
            task, point = TaskID.split(task_id)
        except ValueError:
            parser.error("Illegal task ID: %s" % task_id)
        if options.submit_num != NN:
            try:
                options.submit_num = "%02d" % int(options.submit_num)
            except ValueError:
                parser.error("Illegal submit number: %s" % options.submit_num)
        if options.filename is None:
            options.filename = JOB_LOG_OUT
        else:
            # Convert short filename args to long (e.g. 'o' to 'job.out').
            with suppress(KeyError):
                options.filename = JOB_LOG_OPTS[options.filename]
                # KeyError: Is already long form (standard log, or custom).
        platform_name, job_runner_name, live_job_id = get_task_job_attrs(
            workflow_name, point, task, options.submit_num)
        platform = get_platform(platform_name)
        batchview_cmd = None
        if live_job_id is not None:
            # Job is currently running. Get special job runner log view
            # command (e.g. qcat) if one exists, and the log is out or err.
            conf_key = None
            if options.filename == JOB_LOG_OUT:
                if mode == 'cat':
                    conf_key = "out viewer"
                elif mode == 'tail':
                    conf_key = "out tailer"
            elif options.filename == JOB_LOG_ERR:
                if mode == 'cat':
                    conf_key = "err viewer"
                elif mode == 'tail':
                    conf_key = "err tailer"
            if conf_key is not None:
                batchview_cmd_tmpl = None
                with suppress(KeyError):
                    batchview_cmd_tmpl = platform[conf_key]
                if batchview_cmd_tmpl is not None:
                    batchview_cmd = batchview_cmd_tmpl % {
                        "job_id": str(live_job_id)
                    }

        log_is_remote = (is_remote_platform(platform)
                         and (options.filename != JOB_LOG_ACTIVITY))
        log_is_retrieved = (platform['retrieve job logs']
                            and live_job_id is None)
        if log_is_remote and (not log_is_retrieved or options.force_remote):
            logpath = os.path.normpath(
                get_remote_workflow_run_job_dir(workflow_name, point, task,
                                                options.submit_num,
                                                options.filename))
            tail_tmpl = platform["tail command template"]
            # Reinvoke the cat-log command on the remote account.
            cmd = ['cat-log', *verbosity_to_opts(cylc.flow.flags.verbosity)]
            for item in [logpath, mode, tail_tmpl]:
                cmd.append('--remote-arg=%s' % shlex.quote(item))
            if batchview_cmd:
                cmd.append('--remote-arg=%s' % shlex.quote(batchview_cmd))
            cmd.append(workflow_name)
            is_edit_mode = (mode == 'edit')
            # TODO: Add Intelligent Host selection to this
            try:
                proc = remote_cylc_cmd(cmd,
                                       platform,
                                       capture_process=is_edit_mode,
                                       manage=(mode == 'tail'))
            except KeyboardInterrupt:
                # Ctrl-C while tailing.
                pass
            else:
                if is_edit_mode:
                    # Write remote stdout to a temp file for viewing in editor.
                    # Only BUFSIZE bytes at a time in case huge stdout volume.
                    out = NamedTemporaryFile()
                    data = proc.stdout.read(BUFSIZE)
                    while data:
                        out.write(data)
                        data = proc.stdout.read(BUFSIZE)
                    os.chmod(out.name, S_IRUSR)
                    out.seek(0, 0)
        else:
            # Local task job or local job log.
            logpath = os.path.normpath(
                get_workflow_run_job_dir(workflow_name, point, task,
                                         options.submit_num, options.filename))
            tail_tmpl = os.path.expandvars(platform["tail command template"])
            out = view_log(logpath,
                           mode,
                           tail_tmpl,
                           batchview_cmd,
                           color=color)
            if mode != 'edit':
                sys.exit(out)
        if mode == 'edit':
            tmpfile_edit(out, options.geditor)
Example #26
0
def main(_, options: 'Values', workflow: str) -> None:
    """Implement cylc broadcast."""
    workflow, _ = parse_reg(workflow)
    pclient = get_client(workflow, timeout=options.comms_timeout)

    mutation_kwargs: Dict[str, Any] = {
        'request_string': MUTATION,
        'variables': {
            'wFlows': [workflow],
            'bMode': 'Set',
            'cPoints': options.point_strings,
            'nSpaces': options.namespaces,
            'bSettings': options.settings,
            'bCutoff': options.expire,
        }
    }

    query_kwargs: Dict[str, Any] = {
        'request_string': QUERY,
        'variables': {
            'wFlows': [workflow],
            'nIds': []
        }
    }

    if options.show or options.showtask:
        if options.showtask:
            try:
                task, point = TaskID.split(options.showtask)
                query_kwargs['variables']['nIds'] = [
                    f'{point}{ID_DELIM}{task}'
                ]
            except ValueError:
                raise UserInputError("TASKID must be " + TaskID.SYNTAX)
        result = pclient('graphql', query_kwargs)
        for wflow in result['workflows']:
            settings = wflow['broadcasts']
            padding = get_padding(settings) * ' '
            if options.raw:
                print(str(settings))
            else:
                print_tree(settings, padding, options.unicode)
        sys.exit(0)

    report_cancel = True
    report_set = False
    if options.clear:
        mutation_kwargs['variables']['bMode'] = 'Clear'

    if options.expire:
        mutation_kwargs['variables']['bMode'] = 'Expire'

    # implement namespace and cycle point defaults here
    namespaces = options.namespaces
    if not namespaces:
        namespaces = ["root"]
    point_strings = options.point_strings
    if not point_strings:
        point_strings = ["*"]

    if options.cancel or options.cancel_files:
        settings = []
        for option_item in options.cancel:
            if "=" in option_item:
                raise UserInputError(
                    "--cancel=[SEC]ITEM does not take a value")
            option_item = option_item.strip()
            setting = get_rdict(option_item)
            settings.append(setting)
        files_to_settings(settings, options.cancel_files, options.cancel)
        mutation_kwargs['variables'].update({
            'bMode': 'Clear',
            'cPoints': point_strings,
            'nSpaces': namespaces,
            'bSettings': settings,
        })

    if options.settings or options.setting_files:
        settings = []
        for option_item in options.settings:
            if "=" not in option_item:
                raise UserInputError("--set=[SEC]ITEM=VALUE requires a value")
            lhs, rhs = [s.strip() for s in option_item.split("=", 1)]
            setting = get_rdict(lhs, rhs)
            settings.append(setting)
        files_to_settings(settings, options.setting_files)
        mutation_kwargs['variables'].update({
            'bMode': 'Set',
            'cPoints': point_strings,
            'nSpaces': namespaces,
            'bSettings': settings,
        })
        report_cancel = False
        report_set = True

    results = pclient('graphql', mutation_kwargs)
    for result in results['broadcast']['result']:
        modified_settings = result['response'][0]
        bad_options = result['response'][1]
        if modified_settings:
            print(
                get_broadcast_change_report(modified_settings,
                                            is_cancel=report_cancel))
    sys.exit(report_bad_options(bad_options, is_set=report_set))
Example #27
0
def main(parser: COP, options: 'Values', reg: str) -> None:
    workflow, flow_file = parse_reg(reg, src=True)

    if options.geditor:
        editor = glbl_cfg().get(['editors', 'gui'])
    else:
        editor = glbl_cfg().get(['editors', 'terminal'])

    # read in the flow.cylc file
    viewcfg = {
        'mark':
        options.mark,
        'single':
        options.single,
        'label':
        options.label,
        'empy':
        options.empy or options.process,
        'jinja2':
        options.jinja2 or options.process,
        'contin':
        options.cat or options.process,
        'inline': (options.inline or options.jinja2 or options.empy
                   or options.process),
    }
    lines = read_and_proc(flow_file,
                          load_template_vars(options.templatevars,
                                             options.templatevars_file),
                          viewcfg=viewcfg)

    if options.stdout:
        for line in lines:
            print(line)
        sys.exit(0)

    # write to a temporary file
    viewfile = NamedTemporaryFile(
        suffix=".flow.cylc",
        prefix=workflow.replace('/', '_') + '.',
    )
    for line in lines:
        viewfile.write((line + '\n').encode())
    viewfile.seek(0, 0)

    # set the file to be read only
    os.chmod(viewfile.name, 0o400)

    # capture the temp file's mod time in case the user edits it
    # and overrides the readonly mode.
    modtime1 = os.stat(viewfile.name).st_mtime

    # in case editor has options, e.g. 'emacs -nw':
    command_list = shlex.split(editor)
    command_list.append(viewfile.name)
    command = ' '.join(command_list)
    # THIS BLOCKS UNTIL THE COMMAND COMPLETES
    retcode = call(command_list)  # nosec (editor command is user configurable)
    if retcode != 0:
        # the command returned non-zero exist status
        raise CylcError(f'{command} failed: {retcode}')

    # !!!VIEWING FINISHED!!!

    # Did the user edit the file
    modtime2 = os.stat(viewfile.name).st_mtime

    if modtime2 > modtime1:
        print(
            "\nWARNING: YOU HAVE EDITED A TEMPORARY READ-ONLY COPY "
            f"OF THE WORKFLOW:\n   {viewfile.name}\n",
            file=sys.stderr)
    # DONE
    viewfile.close()
Example #28
0
def main(parser: COP, options: 'Values', workflow: str) -> None:
    workflow, _ = parse_reg(workflow)

    if options.use_task_point and options.cycle:
        raise UserInputError(
            "cannot specify a cycle point and use environment variable")

    if options.use_task_point:
        if "CYLC_TASK_CYCLE_POINT" not in os.environ:
            raise UserInputError("CYLC_TASK_CYCLE_POINT is not defined")
        options.cycle = os.environ["CYLC_TASK_CYCLE_POINT"]

    if options.offset and not options.cycle:
        raise UserInputError(
            "You must target a cycle point to use an offset")

    # Attempt to apply specified offset to the targeted cycle
    if options.offset:
        options.cycle = str(add_offset(options.cycle, options.offset))

    # Exit if both task state and message are to being polled
    if options.status and options.msg:
        raise UserInputError("cannot poll both status and custom output")

    if options.msg and not options.task and not options.cycle:
        raise UserInputError("need a taskname and cyclepoint")

    # Exit if an invalid status is requested
    if (options.status and
            options.status not in TASK_STATUSES_ORDERED and
            options.status not in CylcWorkflowDBChecker.STATE_ALIASES):
        raise UserInputError(f"invalid status '{options.status}'")

    # this only runs locally
    if options.run_dir:
        run_dir = expand_path(options.run_dir)
    else:
        run_dir = get_cylc_run_dir()

    pollargs = {
        'workflow': workflow,
        'run_dir': run_dir,
        'task': options.task,
        'cycle': options.cycle,
        'status': options.status,
        'message': options.msg,
    }

    spoller = WorkflowPoller("requested state",
                             options.interval,
                             options.max_polls,
                             args=pollargs)

    connected, formatted_pt = spoller.connect()

    if not connected:
        raise CylcError("cannot connect to the workflow DB")

    if options.status and options.task and options.cycle:
        # check a task status
        spoller.condition = options.status
        if not spoller.poll():
            sys.exit(1)
    elif options.msg:
        # Check for a custom task output
        spoller.condition = "output: %s" % options.msg
        if not spoller.poll():
            sys.exit(1)
    else:
        # just display query results
        spoller.checker.display_maps(
            spoller.checker.workflow_state_query(
                task=options.task,
                cycle=formatted_pt,
                status=options.status))
Example #29
0
def scheduler_cli(options: 'Values', reg: str) -> None:
    """Run the workflow.

    This function should contain all of the command line facing
    functionality of the Scheduler, exit codes, logging, etc.

    The Scheduler itself should be a Python object you can import and
    run in a regular Python session so cannot contain this kind of
    functionality.

    """
    # Parse workflow name but delay Cylc 7 suiter.rc deprecation warning
    # until after the start-up splash is printed.
    reg, _ = parse_reg(reg, warn_depr=False)
    try:
        detect_old_contact_file(reg)
    except ServiceFileError as exc:
        print(f"Resuming already-running workflow\n\n{exc}")
        pclient = WorkflowRuntimeClient(reg, timeout=options.comms_timeout)
        mutation_kwargs = {
            'request_string': RESUME_MUTATION,
            'variables': {
                'wFlows': [reg]
            }
        }
        pclient('graphql', mutation_kwargs)
        sys.exit(0)

    # re-execute on another host if required
    _distribute(options.host)

    # print the start message
    if (cylc.flow.flags.verbosity > -1
            and (options.no_detach or options.format == 'plain')):
        print(cparse(cylc_header()))

    if cylc.flow.flags.cylc7_back_compat:
        LOG.warning(SUITERC_DEPR_MSG)

    # setup the scheduler
    # NOTE: asyncio.run opens an event loop, runs your coro,
    #       then shutdown async generators and closes the event loop
    scheduler = Scheduler(reg, options)
    asyncio.run(_setup(scheduler))

    # daemonize if requested
    # NOTE: asyncio event loops cannot persist across daemonization
    #       ensure you have tidied up all threads etc before daemonizing
    if not options.no_detach:
        from cylc.flow.daemonize import daemonize
        daemonize(scheduler)

    # setup loggers
    _open_logs(reg, options.no_detach)

    # run the workflow
    ret = asyncio.run(_run(scheduler))

    # exit
    # NOTE: we must clean up all asyncio / threading stuff before exiting
    # NOTE: any threads which include sleep statements could cause
    #       sys.exit to hang if not shutdown properly
    LOG.info("DONE")
    _close_logs()
    sys.exit(ret)