示例#1
0
async def test_illegal_config_load(
    test_conf: dict,
    expected_msg: str,
    reload: bool,
    flow: Callable,
    one_conf: dict,
    start: Callable,
    run: Callable,
    scheduler: Callable,
    log_filter: Callable
):
    """Test that ParsecErrors (illegal config) - that occur during config load
    when running a workflow - are displayed without traceback.

    Params:
        test_conf: Dict to update one_conf with.
        expected_msg: Expected log message at error level.
        reload: If False, test a workflow start with invalid config.
            If True, test a workflow start with valid config followed by
            reload with invalid config.
    """
    if not reload:
        one_conf.update(test_conf)
    reg: str = flow(one_conf)
    schd: Scheduler = scheduler(reg)
    log: pytest.LogCaptureFixture

    if reload:
        one_conf.update(test_conf)
        run_dir = Path(get_workflow_run_dir(reg))
        async with run(schd) as log:
            # Shouldn't be any errors at this stage:
            assert not log_filter(log, level=logging.ERROR)
            # Modify flow.cylc:
            _make_flow(get_cylc_run_dir(), run_dir, one_conf, '')
            schd.queue_command('reload_workflow', {})
        assert log_filter(
            log, level=logging.ERROR,
            exact_match=f"Command failed: reload_workflow()\n{expected_msg}"
        )
    else:
        with pytest.raises(ParsecError):
            async with start(schd) as log:
                pass
        assert log_filter(
            log,
            level=logging.ERROR,
            exact_match=f"Workflow shutting down - {expected_msg}"
        )

    assert TRACEBACK_MSG not in log.text
示例#2
0
def main(parser: COP, opts: 'Values', reg: Optional[str] = None) -> None:
    run_dir: Optional[Path]
    if reg is None:
        try:
            reg = str(Path.cwd().relative_to(
                Path(get_cylc_run_dir()).resolve()))
        except ValueError:
            raise WorkflowFilesError(
                "The current working directory is not a workflow run directory"
            )
    else:
        reg, _ = parse_reg(reg)
    run_dir = Path(get_workflow_run_dir(reg))
    if not run_dir.is_dir():
        raise WorkflowFilesError(f'"{reg}" is not an installed workflow.')
    source, source_symlink = get_workflow_source_dir(run_dir)
    if not source:
        raise WorkflowFilesError(
            f'"{reg}" was not installed with cylc install.')
    if not Path(source).is_dir():
        raise WorkflowFilesError(
            f'Workflow source dir is not accessible: "{source}".\n'
            f'Restore the source or modify the "{source_symlink}"'
            ' symlink to continue.')
    for entry_point in iter_entry_points('cylc.pre_configure'):
        try:
            entry_point.resolve()(srcdir=source, opts=opts)
        except Exception as exc:
            # NOTE: except Exception (purposefully vague)
            # this is to separate plugin from core Cylc errors
            raise PluginError('cylc.pre_configure', entry_point.name,
                              exc) from None

    reinstall_workflow(
        named_run=reg,
        rundir=run_dir,
        source=source,
        dry_run=False  # TODO: ready for dry run implementation
    )

    for entry_point in iter_entry_points('cylc.post_install'):
        try:
            entry_point.resolve()(srcdir=source,
                                  opts=opts,
                                  rundir=str(run_dir))
        except Exception as exc:
            # NOTE: except Exception (purposefully vague)
            # this is to separate plugin from core Cylc errors
            raise PluginError('cylc.post_install', entry_point.name,
                              exc) from None
示例#3
0
async def harness(
    mod_run,
    mod_scheduler,
    mod_flow,
    mod_one_conf,
    mod_test_dir,
):
    """Create three workflows, two running, one stopped."""
    reg_prefix = mod_test_dir.relative_to(get_cylc_run_dir())
    # abc:running
    reg1 = mod_flow(mod_one_conf, name='abc')
    schd1 = mod_scheduler(reg1)
    # def:running
    reg2 = mod_flow(mod_one_conf, name='def')
    schd2 = mod_scheduler(reg2)
    # ghi:stopped
    reg3 = mod_flow(mod_one_conf, name='ghi')
    async with mod_run(schd1):
        async with mod_run(schd2):
            yield reg_prefix, reg1, reg2, reg3
示例#4
0
async def scan(
    run_dir: Optional[Path] = None,
    scan_dir: Optional[Path] = None,
    max_depth: Optional[int] = None
) -> AsyncGenerator[Dict[str, Union[str, Path]], None]:
    """List flows installed on the filesystem.

    Args:
        run_dir:
            The run dir to look for workflows in, defaults to ~/cylc-run.

            All workflow registrations will be given relative to this path.
        scan_dir:
            The directory to scan for workflows in.

            Use in combination with run_dir if you want to scan a subdir
            within the run_dir.
        max_depth:
            The maximum number of levels to descend before bailing.

            * ``max_depth=1`` will pick up top-level workflows (e.g. ``foo``).
            * ``max_depth=2`` will pick up nested workflows (e.g. ``foo/bar``).

    Yields:
        dict - Dictionary containing information about the flow.

    """
    cylc_run_dir = Path(get_cylc_run_dir())
    if not run_dir:
        run_dir = cylc_run_dir
    if not scan_dir:
        scan_dir = run_dir
    if max_depth is None:
        max_depth = glbl_cfg().get(['install', 'max depth'])

    running: List[asyncio.tasks.Task] = []

    # wrapper for scandir to preserve context
    async def _scandir(path: Path, depth: int) -> Tuple[Path, int, List[Path]]:
        contents = await scandir(path)
        return path, depth, contents

    def _scan_subdirs(listing: List[Path], depth: int) -> None:
        for subdir in listing:
            if subdir.is_dir() and subdir.stem not in EXCLUDE_FILES:
                running.append(asyncio.create_task(_scandir(subdir,
                                                            depth + 1)))

    # perform the first directory listing
    scan_dir_listing = await scandir(scan_dir)
    if scan_dir != cylc_run_dir and dir_is_flow(scan_dir_listing):
        # If the scan_dir itself is a workflow run dir, yield nothing
        return

    _scan_subdirs(scan_dir_listing, depth=0)

    # perform all further directory listings
    while running:
        # wait here until there's something to do
        done, _ = await asyncio.wait(running,
                                     return_when=asyncio.FIRST_COMPLETED)
        for task in done:
            path, depth, contents = task.result()
            running.remove(task)
            if dir_is_flow(contents):
                # this is a flow directory
                yield {
                    'name': str(path.relative_to(run_dir)),
                    'path': path,
                }
            elif depth < max_depth:
                # we may have a nested flow, lets see...
                _scan_subdirs(contents, depth)
        # don't allow this to become blocking
        await asyncio.sleep(0)
示例#5
0
def main(parser: COP, options: 'Values', workflow: str) -> None:
    workflow, _ = parse_reg(workflow)

    if options.use_task_point and options.cycle:
        raise UserInputError(
            "cannot specify a cycle point and use environment variable")

    if options.use_task_point:
        if "CYLC_TASK_CYCLE_POINT" not in os.environ:
            raise UserInputError("CYLC_TASK_CYCLE_POINT is not defined")
        options.cycle = os.environ["CYLC_TASK_CYCLE_POINT"]

    if options.offset and not options.cycle:
        raise UserInputError(
            "You must target a cycle point to use an offset")

    # Attempt to apply specified offset to the targeted cycle
    if options.offset:
        options.cycle = str(add_offset(options.cycle, options.offset))

    # Exit if both task state and message are to being polled
    if options.status and options.msg:
        raise UserInputError("cannot poll both status and custom output")

    if options.msg and not options.task and not options.cycle:
        raise UserInputError("need a taskname and cyclepoint")

    # Exit if an invalid status is requested
    if (options.status and
            options.status not in TASK_STATUSES_ORDERED and
            options.status not in CylcWorkflowDBChecker.STATE_ALIASES):
        raise UserInputError(f"invalid status '{options.status}'")

    # this only runs locally
    if options.run_dir:
        run_dir = expand_path(options.run_dir)
    else:
        run_dir = get_cylc_run_dir()

    pollargs = {
        'workflow': workflow,
        'run_dir': run_dir,
        'task': options.task,
        'cycle': options.cycle,
        'status': options.status,
        'message': options.msg,
    }

    spoller = WorkflowPoller("requested state",
                             options.interval,
                             options.max_polls,
                             args=pollargs)

    connected, formatted_pt = spoller.connect()

    if not connected:
        raise CylcError("cannot connect to the workflow DB")

    if options.status and options.task and options.cycle:
        # check a task status
        spoller.condition = options.status
        if not spoller.poll():
            sys.exit(1)
    elif options.msg:
        # Check for a custom task output
        spoller.condition = "output: %s" % options.msg
        if not spoller.poll():
            sys.exit(1)
    else:
        # just display query results
        spoller.checker.display_maps(
            spoller.checker.workflow_state_query(
                task=options.task,
                cycle=formatted_pt,
                status=options.status))
示例#6
0
def run_dir():
    """The cylc run directory for this host."""
    path = Path(get_cylc_run_dir())
    path.mkdir(exist_ok=True)
    yield path
def workflow_state(workflow,
                   task,
                   point,
                   offset=None,
                   status='succeeded',
                   message=None,
                   cylc_run_dir=None):
    """Connect to a workflow DB and query the requested task state.

    * Reports satisfied only if the remote workflow state has been achieved.
    * Returns all workflow state args to pass on to triggering tasks.

    Arguments:
        workflow (str):
            The workflow to interrogate.
        task (str):
            The name of the task to query.
        point (str):
            The cycle point.
        offset (str):
            The offset between the cycle this xtrigger is used in and the one
            it is querying for as an ISO8601 time duration.
            e.g. PT1H (one hour).
        status (str):
            The task status required for this xtrigger to be satisfied.
        message (str):
            The custom task output required for this xtrigger to be satisfied.
            .. note::

               This cannot be specified in conjunction with ``status``.

        cylc_run_dir (str):
            The directory in which the workflow to interrogate.

            .. note::

               This only needs to be supplied if the workflow is running in a
               different location to what is specified in the global
               configuration (usually ``~/cylc-run``).

    Returns:
        tuple: (satisfied, results)

        satisfied (bool):
            True if ``satisfied`` else ``False``.
        results (dict):
            Dictionary containing the args / kwargs which were provided
            to this xtrigger.

    """
    if cylc_run_dir:
        cylc_run_dir = expand_path(cylc_run_dir)
    else:
        cylc_run_dir = get_cylc_run_dir()
    if offset is not None:
        point = str(add_offset(point, offset))
    try:
        checker = CylcWorkflowDBChecker(cylc_run_dir, workflow)
    except (OSError, sqlite3.Error):
        # Failed to connect to DB; target workflow may not be started.
        return (False, None)
    fmt = checker.get_remote_point_format()
    if fmt:
        my_parser = TimePointParser()
        point = str(my_parser.parse(point, dump_format=fmt))
    if message is not None:
        satisfied = checker.task_state_met(task, point, message=message)
    else:
        satisfied = checker.task_state_met(task, point, status=status)
    results = {
        'workflow': workflow,
        'task': task,
        'point': point,
        'offset': offset,
        'status': status,
        'message': message,
        'cylc_run_dir': cylc_run_dir
    }
    return satisfied, results