async def run( options: 'Values', workflow_id, *tokens_list, ) -> int: if len(tokens_list) > 1: raise Exception('Multiple TODO') # parse the stop-task or stop-cycle if provided stop_task = stop_cycle = None if tokens_list: tokens = tokens_list[0] if tokens['task']: stop_task = tokens.relative_id elif tokens['cycle']: stop_cycle = tokens['cycle'] _validate(options, stop_task, stop_cycle, *tokens_list) pclient = get_client(workflow_id, timeout=options.comms_timeout) if int(options.max_polls) > 0: # (test to avoid the "nothing to do" warning for # --max-polls=0) spoller = StopPoller( pclient, "workflow stopped", options.interval, options.max_polls, ) # mode defaults to 'Clean' mode = None if stop_task or stop_cycle: pass elif options.kill: mode = WorkflowStopMode.Kill.name elif options.now > 1: mode = WorkflowStopMode.NowNow.name elif options.now: mode = WorkflowStopMode.Now.name mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [workflow_id], 'stopMode': mode, 'cyclePoint': stop_cycle, 'clockTime': options.wall_clock, 'task': stop_task, 'flowNum': options.flow_num } } await pclient.async_request('graphql', mutation_kwargs) if int(options.max_polls) > 0 and not await spoller.poll(): # (test to avoid the "nothing to do" warning for # --max-polls=0) return 1 return 0
def main(parser, options, suite, event_msg, event_id): suite = os.path.normpath(suite) LOG.info('Send to suite %s: "%s" (%s)', suite, event_msg, event_id) pclient = get_client(suite, timeout=options.comms_timeout) max_n_tries = int(options.max_n_tries) retry_intvl_secs = float(options.retry_intvl_secs) mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [suite], 'eventMsg': event_msg, 'eventId': event_id, } } for i_try in range(max_n_tries): try: pclient('graphql', mutation_kwargs) except ClientError as exc: LOG.exception(exc) LOG.info(MSG_SEND_FAILED, i_try + 1, max_n_tries) if i_try == max_n_tries - 1: # final attempt raise CylcError('send failed') LOG.info(MSG_SEND_RETRY, retry_intvl_secs, options.comms_timeout) sleep(retry_intvl_secs) else: if i_try > 0: LOG.info(MSG_SEND_SUCCEED, i_try + 1, max_n_tries) break
def send_messages(workflow, task_job, messages, event_time): workflow = os.path.normpath(workflow) try: pclient = get_client(workflow) except WorkflowStopped: # on a remote host this means the contact file is not present # either the workflow is stopped or the contact file is not present # on the job host (i.e. comms method is polling) # eitherway don't try messaging pass except Exception: # Backward communication not possible if cylc.flow.flags.verbosity > 1: import traceback traceback.print_exc() else: mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [workflow], 'taskJob': task_job, 'eventTime': event_time, 'messages': messages, } } pclient('graphql', mutation_kwargs)
def main(parser: COP, options: 'Values', workflow: str, event_msg: str, event_id: str) -> None: workflow, _ = parse_reg(workflow) LOG.info('Send to workflow %s: "%s" (%s)', workflow, event_msg, event_id) pclient = get_client(workflow, timeout=options.comms_timeout) max_n_tries = int(options.max_n_tries) retry_intvl_secs = float(options.retry_intvl_secs) mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [workflow], 'eventMsg': event_msg, 'eventId': event_id, } } for i_try in range(max_n_tries): try: pclient('graphql', mutation_kwargs) except ClientError as exc: LOG.exception(exc) LOG.info(MSG_SEND_FAILED, i_try + 1, max_n_tries) if i_try == max_n_tries - 1: # final attempt raise CylcError('send failed') LOG.info(MSG_SEND_RETRY, retry_intvl_secs, options.comms_timeout) sleep(retry_intvl_secs) else: if i_try > 0: LOG.info(MSG_SEND_SUCCEED, i_try + 1, max_n_tries) break
def main(parser: COP, options: 'Values', reg: str) -> None: reg, _ = parse_reg(reg) pclient = get_client(reg, timeout=options.comms_timeout) query_kwargs = {'request_string': QUERY, 'variables': {'wFlows': [reg]}} result = pclient('graphql', query_kwargs) for workflow in result['workflows']: print(workflow['cylcVersion'])
def main(parser: COP, options: 'Values', reg: str, shutdown_arg: Optional[str] = None) -> None: if shutdown_arg is not None and options.kill: parser.error("ERROR: --kill is not compatible with [STOP]") if options.kill and options.now: parser.error("ERROR: --kill is not compatible with --now") if options.flow_label and int(options.max_polls) > 0: parser.error("ERROR: --flow is not compatible with --max-polls") reg, _ = parse_reg(reg) pclient = get_client(reg, timeout=options.comms_timeout) if int(options.max_polls) > 0: # (test to avoid the "nothing to do" warning for # --max-polls=0) spoller = StopPoller(pclient, "workflow stopped", options.interval, options.max_polls) # mode defaults to 'Clean' mode = None task = None cycle_point = None if shutdown_arg is not None and TaskID.is_valid_id(shutdown_arg): # STOP argument detected task = shutdown_arg elif shutdown_arg is not None: # not a task ID, may be a cycle point cycle_point = shutdown_arg elif options.kill: mode = WorkflowStopMode.Kill.name elif options.now > 1: mode = WorkflowStopMode.NowNow.name elif options.now: mode = WorkflowStopMode.Now.name mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [reg], 'stopMode': mode, 'cyclePoint': cycle_point, 'clockTime': options.wall_clock, 'task': task, 'flowLabel': options.flow_label, } } pclient('graphql', mutation_kwargs) if int(options.max_polls) > 0 and not spoller.poll(): # (test to avoid the "nothing to do" warning for # --max-polls=0) sys.exit(1)
async def run(options: 'Values', workflow_id: str) -> None: pclient = get_client(workflow_id, timeout=options.comms_timeout) mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [workflow_id], } } await pclient.async_request('graphql', mutation_kwargs)
def get_snapshot(self): """Contact the workflow, return a tree structure In the event of error contacting the workflow the message is written to this Widget's header. Returns: dict if successful, else False """ try: if not self.client: self.client = get_client(self.reg, timeout=self.CLIENT_TIMEOUT) data = self.client( 'graphql', { 'request_string': QUERY, 'variables': { # list of task states we want to see 'taskStates': [ state for state, is_on in self.filter_states.items() if is_on ] } } ) except WorkflowStopped: self.client = None return dummy_flow({ 'name': self.reg, 'id': self.reg, 'status': 'stopped', 'stateTotals': {} }) except (ClientError, ClientTimeout) as exc: # catch network / client errors self.set_header([('workflow_error', str(exc))]) return False if isinstance(data, list): # catch GraphQL errors try: message = data[0]['error']['message'] except (IndexError, KeyError): message = str(data) self.set_header([('workflow_error', message)]) return False if len(data['workflows']) != 1: # multiple workflows in returned data - shouldn't happen raise ValueError() return compute_tree(data['workflows'][0])
def main(parser, options, suite): suite = os.path.normpath(suite) pclient = get_client(suite, timeout=options.comms_timeout) mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [suite], } } pclient('graphql', mutation_kwargs)
def main(parser, options, suite): pclient = get_client(suite, timeout=options.comms_timeout) query_kwargs = { 'request_string': QUERY, 'variables': {'wFlows': [suite]} } result = pclient('graphql', query_kwargs) for workflow in result['workflows']: print(workflow['cylcVersion'])
async def run(options: 'Values', workflow_id: str, *tokens_list): pclient = get_client(workflow_id, timeout=options.comms_timeout) mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [workflow_id], 'tasks': [tokens.relative_id for tokens in tokens_list], } } await pclient.async_request('graphql', mutation_kwargs)
def main(parser: COP, options: 'Values', workflow: str) -> None: workflow, _ = parse_reg(workflow) pclient = get_client(workflow, timeout=options.comms_timeout) mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [workflow], } } pclient('graphql', mutation_kwargs)
def main(parser, options, workflow): workflow = os.path.normpath(workflow) pclient = get_client(workflow, timeout=options.comms_timeout) mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [workflow], } } pclient('graphql', mutation_kwargs)
async def run(options, workflow_id, *_): pclient = get_client(workflow_id, timeout=options.comms_timeout) query_kwargs = { 'request_string': QUERY, 'variables': {'wFlows': [workflow_id]} } result = await pclient.async_request('graphql', query_kwargs) for workflow in result['workflows']: return workflow['cylcVersion']
def main(parser, options, workflow, shutdown_arg=None): if shutdown_arg is not None and options.kill: parser.error("ERROR: --kill is not compatible with [STOP]") if options.kill and options.now: parser.error("ERROR: --kill is not compatible with --now") if options.flow_label and int(options.max_polls) > 0: parser.error("ERROR: --flow is not compatible with --max-polls") workflow = os.path.normpath(workflow) pclient = get_client(workflow, timeout=options.comms_timeout) if int(options.max_polls) > 0: # (test to avoid the "nothing to do" warning for # --max-polls=0) spoller = StopPoller(pclient, "workflow stopped", options.interval, options.max_polls) # mode defaults to 'Clean' mode = None task = None cycle_point = None if shutdown_arg is not None and TaskID.is_valid_id(shutdown_arg): # STOP argument detected task = shutdown_arg elif shutdown_arg is not None: # not a task ID, may be a cycle point cycle_point = shutdown_arg elif options.kill: mode = 'Kill' elif options.now > 1: mode = 'NowNow' elif options.now: mode = 'Now' mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [workflow], 'stopMode': mode, 'cyclePoint': cycle_point, 'clockTime': options.wall_clock, 'task': task, 'flowLabel': options.flow_label, } } pclient('graphql', mutation_kwargs) if int(options.max_polls) > 0: # (test to avoid the "nothing to do" warning for # --max-polls=0) if not spoller.poll(): sys.exit(1)
def record_messages(suite, task_job, messages): """Record task job messages. Print the messages according to their severity. Write the messages in the job status file. Send the messages to the suite, if possible. Arguments: suite (str): Suite name. task_job (str): Task job identifier "CYCLE/TASK_NAME/SUBMIT_NUM". messages (list): List of messages "[[severity, message], ...]". """ # Record the event time, in case the message is delayed in some way. event_time = get_current_time_string( override_use_utc=(os.getenv('CYLC_UTC') == 'True')) # Print to stdout/stderr for severity, message in messages: if severity in STDERR_LEVELS: handle = sys.stderr else: handle = sys.stdout handle.write('%s %s - %s\n' % (event_time, severity, message)) handle.flush() # Write to job.status _append_job_status_file(suite, task_job, event_time, messages) # Send messages suite = os.path.normpath(suite) try: pclient = get_client(suite) except SuiteStopped: # on a remote host this means the contact file is not present # either the suite is stopped or the contact file is not present # on the job host (i.e. comms method is polling) # eitherway don't try messaging pass except Exception: # Backward communication not possible if cylc.flow.flags.debug: import traceback traceback.print_exc() else: mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [suite], 'taskJob': task_job, 'eventTime': event_time, 'messages': messages, } } pclient('graphql', mutation_kwargs)
def main(parser: COP, options: 'Values', workflow: str, *task_globs: str): """CLI of "cylc kill".""" workflow, _ = parse_reg(workflow) pclient = get_client(workflow, timeout=options.comms_timeout) mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [workflow], 'tasks': list(task_globs), } } pclient('graphql', mutation_kwargs)
def main(parser: COP, options: 'Values', reg: str, *task_globs: str) -> None: reg, _ = parse_reg(reg) pclient = get_client(reg, timeout=options.comms_timeout) mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [reg], 'tasks': list(task_globs), 'outputs': options.outputs, } } pclient('graphql', mutation_kwargs)
def main(parser, options, suite, *task_globs): """CLI of "cylc kill".""" suite = os.path.normpath(suite) pclient = get_client(suite, timeout=options.comms_timeout) mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [suite], 'tasks': list(task_globs), } } pclient('graphql', mutation_kwargs)
async def run( options: 'Values', workflow_id: str, *tokens_list, ) -> Dict: pclient = get_client(workflow_id, timeout=options.comms_timeout) ret: Dict[str, Any] = { 'stdout': [], 'stderr': [], 'exit': 0 } flow_kwargs: Dict[str, Any] = { 'request_string': FLOW_QUERY, 'variables': {'wFlows': [workflow_id]} } task_kwargs: Dict[str, Any] = { 'request_string': TASK_QUERY, } # ping called on the workflow result = await pclient.async_request('graphql', flow_kwargs) msg = "" for flow in result['workflows']: w_name = flow['name'] w_port = flow['port'] w_pub_port = flow['pubPort'] if cylc.flow.flags.verbosity > 0: ret['stdout'].append( f'{w_name} running on ' f'{pclient.host}:{w_port} {w_pub_port}\n' ) # ping called with task-like objects for tokens in tokens_list: task_kwargs['variables'] = { 'tProxy': tokens.relative_id } task_result = await pclient.async_request('graphql', task_kwargs) string_id = tokens.relative_id if not task_result.get('taskProxy'): msg = f"task not found: {string_id}" elif task_result['taskProxy']['state'] != TASK_STATUS_RUNNING: msg = f"task not {TASK_STATUS_RUNNING}: {string_id}" if msg: ret['stderr'].append(cparse(f'<red>{msg}</red>')) ret['exit'] = 1 return ret
def main(parser, options, workflow, *task_globs): """CLI for "cylc trigger".""" workflow = os.path.normpath(workflow) pclient = get_client(workflow, timeout=options.comms_timeout) mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [workflow], 'tasks': list(task_globs), 'reflow': options.reflow, } } pclient('graphql', mutation_kwargs)
def main( parser: COP, options: 'Values', workflow: str, task_id: Optional[str] = None ) -> None: workflow, _ = parse_reg(workflow) pclient = get_client(workflow, timeout=options.comms_timeout) if task_id and not TaskID.is_valid_id(task_id): raise UserInputError("Invalid task ID: %s" % task_id) flow_kwargs = { 'request_string': FLOW_QUERY, 'variables': {'wFlows': [workflow]} } task_kwargs: Dict[str, Any] = { 'request_string': TASK_QUERY, } # cylc ping WORKFLOW result = pclient('graphql', flow_kwargs) msg = "" for flow in result['workflows']: w_name = flow['name'] w_port = flow['port'] w_pub_port = flow['pubPort'] if cylc.flow.flags.verbosity > 0: sys.stdout.write( f'{w_name} running on ' f'{pclient.host}:{w_port} {w_pub_port}\n' ) # cylc ping WORKFLOW TASKID if task_id: task, point = TaskID.split(task_id) w_id = flow['id'] task_kwargs['variables'] = { 'tProxy': f'{w_id}{ID_DELIM}{point}{ID_DELIM}{task}' } task_result = pclient('graphql', task_kwargs) if not task_result.get('taskProxy'): msg = "task not found" elif task_result['taskProxy']['state'] != TASK_STATUS_RUNNING: msg = f"task not {TASK_STATUS_RUNNING}" if msg: print(cparse(f'<red>{msg}</red>')) sys.exit(1)
def main(parser: COP, options: 'Values', reg: str, severity_str: str) -> None: try: severity = LOG_LEVELS[severity_str] except KeyError: parser.error("Illegal logging level, %s" % severity_str) reg, _ = parse_reg(reg) pclient = get_client(reg, timeout=options.comms_timeout) mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [reg], 'level': severity, } } pclient('graphql', mutation_kwargs)
def main(parser: COP, options: 'Values', workflow: str, *task_globs: str): """CLI for "cylc trigger".""" if options.flow_descr and not options.reflow: parser.error("--meta requires --reflow") workflow, _ = parse_reg(workflow) pclient = get_client(workflow, timeout=options.comms_timeout) mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [workflow], 'tasks': list(task_globs), 'reflow': options.reflow, 'flowDescr': options.flow_descr, } } pclient('graphql', mutation_kwargs)
def main(parser, options, workflow, severity_str): try: severity = LOG_LEVELS[severity_str] except KeyError: parser.error("Illegal logging level, %s" % severity_str) workflow = os.path.normpath(workflow) pclient = get_client(workflow, timeout=options.comms_timeout) mutation_kwargs = { 'request_string': MUTATION, 'variables': { 'wFlows': [workflow], 'level': severity, } } pclient('graphql', mutation_kwargs)
def main(_, options: 'Values', *ids) -> None: """Implement "cylc show" CLI.""" workflow_args, _ = parse_ids( *ids, constraint='mixed', max_workflows=1, ) workflow_id = list(workflow_args)[0] tokens_list = workflow_args[workflow_id] if tokens_list and options.task_defs: raise UserInputError( 'Cannot query both live tasks and task definitions.') pclient = get_client(workflow_id, timeout=options.comms_timeout) json_filter: 'Dict' = {} ret = 0 if options.task_defs: ret = task_meta_query( workflow_id, options.task_defs, pclient, options, json_filter, ) elif not tokens_list: ret = workflow_meta_query(workflow_id, pclient, options, json_filter) else: ret = prereqs_and_outputs_query( workflow_id, tokens_list, pclient, options, json_filter, ) if options.json: print(json.dumps(json_filter, indent=4)) sys.exit(ret)
async def run(options, workflow_id, *tokens_list): _validate(options, *tokens_list) pclient = get_client(workflow_id, timeout=options.comms_timeout) if options.hold_point_string: mutation = SET_HOLD_POINT_MUTATION args = {'point': options.hold_point_string} else: mutation = HOLD_MUTATION args = {'tasks': [id_.relative_id for id_ in tokens_list]} mutation_kwargs = { 'request_string': mutation, 'variables': { 'wFlows': [workflow_id], **args } } await pclient.async_request('graphql', mutation_kwargs)
async def run(options: 'Values', workflow_id, *tokens_list): _validate(options, *tokens_list) pclient = get_client(workflow_id, timeout=options.comms_timeout) if options.release_all: mutation = RELEASE_HOLD_POINT_MUTATION args = {'tasks': ['*/*']} else: mutation = RELEASE_MUTATION args = {'tasks': [tokens.relative_id for tokens in tokens_list]} mutation_kwargs = { 'request_string': mutation, 'variables': { 'wFlows': [workflow_id], **args } } await pclient.async_request('graphql', mutation_kwargs)
def main(parser: COP, options: 'Options', workflow: str, *task_globs: str): _validate(options, *task_globs) workflow = os.path.normpath(workflow) pclient = get_client(workflow, timeout=options.comms_timeout) if options.hold_point_string: mutation = SET_HOLD_POINT_MUTATION args = {'point': options.hold_point_string} else: mutation = HOLD_MUTATION args = {'tasks': list(task_globs)} mutation_kwargs = { 'request_string': mutation, 'variables': { 'wFlows': [workflow], **args } } pclient('graphql', mutation_kwargs)
def main(parser: COP, options: 'Values', workflow: str, *task_globs: str): _validate(options, *task_globs) workflow, _ = parse_reg(workflow) pclient = get_client(workflow, timeout=options.comms_timeout) if options.release_all: mutation = RELEASE_HOLD_POINT_MUTATION args = {} else: mutation = RELEASE_MUTATION args = {'tasks': list(task_globs)} mutation_kwargs = { 'request_string': mutation, 'variables': { 'wFlows': [workflow], **args } } pclient('graphql', mutation_kwargs)