Esempio n. 1
0
async def harness(mod_flow, mod_scheduler, mod_run):
    reg = mod_flow({
        'scheduling': {
            'graph': {
                'R1':
                '''
                    a
                    b
                ''',
            },
        },
        'runtime': {
            'A1': {
                'inherit': 'A2'
            },
            'A2': {},
            'a': {
                'inherit': 'A1'
            },
            'b': {},
        },
    })
    schd = mod_scheduler(reg)
    async with mod_run(schd):
        client = WorkflowRuntimeClient(reg)

        async def _query(query_string):
            nonlocal client
            return await client.async_request(
                'graphql', {
                    'request_string': 'query { %s } ' % query_string,
                })

        yield schd, client, _query
 async def _connect(self, wid, flow):
     """Open a connection to a running workflow."""
     self.active[wid] = flow
     flow['req_client'] = WorkflowRuntimeClient(flow['name'])
     await self.uiserver.data_store_mgr.sync_workflow(
         wid,
         flow
     )
Esempio n. 3
0
 async def _connect(self, wid, flow):
     """Open a connection to a running workflow."""
     try:
         flow['req_client'] = WorkflowRuntimeClient(flow['name'])
     except ClientError as exc:
         self.log.debug(f'Could not connect to {wid}: {exc}')
         return False
     self.workflows[wid] = flow
     await self.uiserver.data_store_mgr.connect_workflow(wid, flow)
     return True
Esempio n. 4
0
async def run(options: 'Values', workflow_id: str) -> None:
    pclient = WorkflowRuntimeClient(workflow_id, timeout=options.comms_timeout)

    mutation_kwargs = {
        'request_string': MUTATION,
        'variables': {
            'wFlows': [workflow_id],
        }
    }

    await pclient.async_request('graphql', mutation_kwargs)
Esempio n. 5
0
def main(parser: COP, options: 'Values', workflow: str) -> None:
    workflow, _ = parse_reg(workflow)
    pclient = WorkflowRuntimeClient(workflow, timeout=options.comms_timeout)

    mutation_kwargs = {
        'request_string': MUTATION,
        'variables': {
            'wFlows': [workflow],
        }
    }

    pclient('graphql', mutation_kwargs)
Esempio n. 6
0
def main(parser, options, workflow):
    workflow = os.path.normpath(workflow)
    pclient = WorkflowRuntimeClient(workflow, timeout=options.comms_timeout)

    mutation_kwargs = {
        'request_string': MUTATION,
        'variables': {
            'wFlows': [workflow],
        }
    }

    pclient('graphql', mutation_kwargs)
Esempio n. 7
0
def main(_, options, workflow, func):
    pclient = WorkflowRuntimeClient(workflow, timeout=options.comms_timeout)
    if options.no_input:
        kwargs = {}
    else:
        kwargs = json.load(sys.stdin)
    sys.stdin.close()
    res = pclient(func, kwargs)
    if func in PB_METHOD_MAP:
        if 'element_type' in kwargs:
            pb_msg = PB_METHOD_MAP[func][kwargs['element_type']]()
        else:
            pb_msg = PB_METHOD_MAP[func]()
        pb_msg.ParseFromString(res)
        res_msg = MessageToDict(pb_msg)
    else:
        res_msg = res
    sys.stdout.write(json.dumps(res_msg, indent=4) + '\n')
async def est_workflow(reg, host, port, pub_port, context=None, timeout=None):
    """Establish communication with workflow, instantiating REQ client."""
    if is_remote_host(host):
        try:
            host = get_host_ip_by_name(host)  # IP reduces DNS traffic
        except socket.error as exc:
            if flags.verbosity > 1:
                raise
            logger.error("ERROR: %s: %s\n", exc, host)
            return (reg, host, port, pub_port, None)

    # NOTE: Connect to the workflow by host:port. This way the
    #       WorkflowRuntimeClient will not attempt to check the contact file
    #       which would be unnecessary as we have already done so.
    # NOTE: This part of the scan *is* IO blocking.
    client = WorkflowRuntimeClient(reg, context=context, timeout=timeout)
    _, result = await workflow_request(client, 'identify')
    return (reg, host, port, pub_port, client, result)
Esempio n. 9
0
async def harness(mod_flow, mod_scheduler, mod_run):
    flow_def = {
        'scheduler': {
            'allow implicit tasks': True
        },
        'scheduling': {
            'graph': {
                'R1': 'a => b & c => d'
            }
        },
        'runtime': {
            'A': {},
            'B': {
                'inherit': 'A',
            },
            'b': {
                'inherit': 'B',
            },
        },
    }
    reg: str = mod_flow(flow_def)
    schd: 'Scheduler' = mod_scheduler(reg)
    async with mod_run(schd):
        client = WorkflowRuntimeClient(reg)
        schd.pool.hold_tasks('*')
        schd.resume_workflow()
        # Think this is needed to save the data state at first start (?)
        # Fails without it.. and a test needs to overwrite schd data with this.
        # data = schd.data_store_mgr.data[schd.data_store_mgr.workflow_id]

        workflow_tokens = Tokens(
            user=schd.owner,
            workflow=schd.workflow,
        )

        yield schd, client, workflow_tokens
Esempio n. 10
0
def scheduler_cli(options: 'Values', workflow_id: str) -> None:
    """Run the workflow.

    This function should contain all of the command line facing
    functionality of the Scheduler, exit codes, logging, etc.

    The Scheduler itself should be a Python object you can import and
    run in a regular Python session so cannot contain this kind of
    functionality.

    """
    # Parse workflow name but delay Cylc 7 suiter.rc deprecation warning
    # until after the start-up splash is printed.
    # TODO: singleton
    (workflow_id, ), _ = parse_ids(
        workflow_id,
        constraint='workflows',
        max_workflows=1,
        # warn_depr=False,  # TODO
    )
    try:
        detect_old_contact_file(workflow_id)
    except ServiceFileError as exc:
        print(f"Resuming already-running workflow\n\n{exc}")
        pclient = WorkflowRuntimeClient(
            workflow_id,
            timeout=options.comms_timeout,
        )
        mutation_kwargs = {
            'request_string': RESUME_MUTATION,
            'variables': {
                'wFlows': [workflow_id]
            }
        }
        pclient('graphql', mutation_kwargs)
        sys.exit(0)

    # re-execute on another host if required
    _distribute(options.host)

    # print the start message
    if (cylc.flow.flags.verbosity > -1
            and (options.no_detach or options.format == 'plain')):
        print(cparse(cylc_header()))

    if cylc.flow.flags.cylc7_back_compat:
        LOG.warning(SUITERC_DEPR_MSG)

    # setup the scheduler
    # NOTE: asyncio.run opens an event loop, runs your coro,
    #       then shutdown async generators and closes the event loop
    scheduler = Scheduler(workflow_id, options)
    asyncio.run(_setup(scheduler))

    # daemonize if requested
    # NOTE: asyncio event loops cannot persist across daemonization
    #       ensure you have tidied up all threads etc before daemonizing
    if not options.no_detach:
        from cylc.flow.daemonize import daemonize
        daemonize(scheduler)

    # setup loggers
    _open_logs(workflow_id, options.no_detach)

    # run the workflow
    ret = asyncio.run(_run(scheduler))

    # exit
    # NOTE: we must clean up all asyncio / threading stuff before exiting
    # NOTE: any threads which include sleep statements could cause
    #       sys.exit to hang if not shutdown properly
    LOG.info("DONE")
    close_log(LOG)
    sys.exit(ret)
Esempio n. 11
0
async def harness(mod_flow, mod_scheduler, mod_run, mod_one_conf):
    reg = mod_flow(mod_one_conf)
    schd = mod_scheduler(reg)
    async with mod_run(schd):
        client = WorkflowRuntimeClient(reg)
        yield schd, client
Esempio n. 12
0
def scheduler_cli(parser, options, reg):
    """Run the workflow.

    This function should contain all of the command line facing
    functionality of the Scheduler, exit codes, logging, etc.

    The Scheduler itself should be a Python object you can import and
    run in a regular Python session so cannot contain this kind of
    functionality.

    """
    workflow_files.validate_flow_name(reg)
    reg = os.path.normpath(reg)
    try:
        workflow_files.detect_old_contact_file(reg)
    except ServiceFileError as exc:
        print(f"Resuming already-running workflow\n\n{exc}")
        pclient = WorkflowRuntimeClient(reg, timeout=options.comms_timeout)
        mutation_kwargs = {
            'request_string': RESUME_MUTATION,
            'variables': {
                'wFlows': [reg]
            }
        }
        pclient('graphql', mutation_kwargs)
        sys.exit(0)

    # re-execute on another host if required
    _distribute(options.host)

    # print the start message
    if (cylc.flow.flags.verbosity > -1
            and (options.no_detach or options.format == 'plain')):
        print(cparse(cylc_header()))

    # setup the scheduler
    # NOTE: asyncio.run opens an event loop, runs your coro,
    #       then shutdown async generators and closes the event loop
    scheduler = Scheduler(reg, options)
    asyncio.run(_setup(scheduler))

    # daemonize if requested
    # NOTE: asyncio event loops cannot persist across daemonization
    #       ensure you have tidied up all threads etc before daemonizing
    if not options.no_detach:
        from cylc.flow.daemonize import daemonize
        daemonize(scheduler)

    # setup loggers
    _open_logs(reg, options.no_detach)

    # run the workflow
    ret = asyncio.run(_run(scheduler))

    # exit
    # NOTE: we must clean up all asyncio / threading stuff before exiting
    # NOTE: any threads which include sleep statements could cause
    #       sys.exit to hang if not shutdown properly
    LOG.info("DONE")
    _close_logs()
    sys.exit(ret)
Esempio n. 13
0
async def graphql_query(flow, fields, filters=None):
    """Obtain information from a GraphQL request to the flow.

    Requires:
        * is_active(True)
        * contact_info

    Args:
        flow (dict):
            Flow information dictionary, provided by scan through the pipe.
        fields (iterable):
            Iterable containing the fields to request e.g::

               ['id', 'name']

            One level of nesting is supported e.g::

               {'name': None, 'meta': ['title']}
        filters (list):
            Filter by the data returned from the query.
            List in the form ``[(key, ...), value]``, e.g::

               # state must be running
               [('state',), 'running']

               # state must be running or paused
               [('state',), ('running', 'paused')]

    """
    query = f'query {{ workflows(ids: ["{flow["name"]}"]) {{ {fields} }} }}'
    try:
        client = WorkflowRuntimeClient(
            flow['name'],
            # use contact_info data if present for efficiency
            host=flow.get('CYLC_WORKFLOW_HOST'),
            port=flow.get('CYLC_WORKFLOW_PORT'))
    except WorkflowStopped:
        LOG.warning(f'Workflow not running: {flow["name"]}')
        return False
    try:
        ret = await client.async_request('graphql', {
            'request_string': query,
            'variables': {}
        })
    except ClientTimeout:
        LOG.exception(f'Timeout: name: {flow["name"]}, '
                      f'host: {client.host}, '
                      f'port: {client.port}')
        return False
    except ClientError as exc:
        LOG.exception(exc)
        return False
    else:
        # stick the result into the flow object
        for item in ret:
            if 'error' in item:
                LOG.exception(item['error']['message'])
                return False
            for workflow in ret.get('workflows', []):
                flow.update(workflow)

        # process filters
        for field, value in filters or []:
            for field_ in field:
                value_ = flow[field_]
            if isinstance(value, Iterable):
                if value_ not in value:
                    return False
            else:
                if value_ != value:
                    return False

        return flow