def main(cmd_args, version, help_): with pycoverage(cmd_args): if not cmd_args: if version: cli_version() else: cli_help() else: cmd_args = list(cmd_args) command = cmd_args.pop(0) if command == "version": cli_version("--long" in cmd_args) if command == "help": help_ = True if not len(cmd_args): cli_help() elif cmd_args == ['all']: print_command_list() sys.exit(0) else: command = cmd_args.pop(0) if command in ALIASES: # this is an alias to a command command = ALIASES[command] if command in DEAD_ENDS: # this command has been removed but not aliased # display a helpful message and move on# print(cparse(f'<red>{DEAD_ENDS[command]}</red>')) sys.exit(42) if command not in COMMANDS: # check if this is a command abbreviation or exit command = match_command(command) if command == "jobs-submit": if len(cmd_args) > 1: for arg in cmd_args: if not arg.startswith("-"): cmd_args.insert(cmd_args.index(arg) + 1, "--") break elif command == "message": if cmd_args: if cmd_args[0] in ['-s', '--severity', '-p', '--priority']: dd_index = 2 else: dd_index = 0 cmd_args.insert(dd_index, "--") if help_: execute_cmd(command, "--help") else: if version: cmd_args.append("--version") execute_cmd(command, *cmd_args)
def format_shell_examples(string): """Put comments in the terminal "dimished" colour.""" return cparse( re.sub( r'^(\s*(?:\$[^#]+)?)(#.*)$', r'\1<dim>\2</dim>', string, flags=re.M ) )
def scheduler_cli(parser, options, args, is_restart=False): """Implement cylc (run|restart). This function should contain all of the command line facing functionality of the Scheduler, exit codes, logging, etc. The Scheduler itself should be a Python object you can import and run in a regular Python session so cannot contain this kind of functionality. """ reg = args[0] # Check suite is not already running before start of host selection. try: suite_files.detect_old_contact_file(reg) except SuiteServiceFileError as exc: sys.exit(exc) _check_registration(reg) # re-execute on another host if required _distribute(options.host, is_restart) # print the start message if options.no_detach or options.format == 'plain': print(cparse(cylc_header())) # setup the scheduler # NOTE: asyncio.run opens an event loop, runs your coro, # then shutdown async generators and closes the event loop scheduler = Scheduler(reg, options, is_restart=is_restart) asyncio.run(_setup(parser, options, reg, is_restart, scheduler)) # daemonize if requested # NOTE: asyncio event loops cannot persist across daemonization # ensure you have tidied up all threads etc before daemonizing if not options.no_detach: from cylc.flow.daemonize import daemonize daemonize(scheduler) # setup loggers _open_logs(reg, options.no_detach) # run the workflow ret = asyncio.run(_run(parser, options, reg, is_restart, scheduler)) # exit # NOTE: we must clean up all asyncio / threading stuff before exiting # NOTE: any threads which include sleep statements could cause # sys.exit to hang if not shutdown properly LOG.info("DONE") _close_logs() sys.exit(ret)
def print_command_list(commands=None, indent=0): """Print list of Cylc commands. Args: commands (list): List of commands to display. indent (int): Number of spaces to put at the start of each line. """ contents = [(cmd, desc) for cmd, desc, _, in iter_commands() if not commands or cmd in commands] print_contents(contents, indent=indent, char=cparse('<dim>.</dim>'))
def main(): opts, cmd_args = get_arg_parser().parse_known_args() with pycoverage(cmd_args): if not cmd_args: if opts.version: cli_version() else: cli_help() else: cmd_args = list(cmd_args) command = cmd_args.pop(0) if command == "version": cli_version("--long" in cmd_args) if command == "help": opts.help_ = True if not len(cmd_args): cli_help() elif cmd_args == ['all']: print_command_list() sys.exit(0) elif cmd_args == ['id']: print_id_help() sys.exit(0) if cmd_args in (['license'], ['licence']): print_license() sys.exit(0) else: command = cmd_args.pop(0) if command in ALIASES: # this is an alias to a command command = ALIASES[command] if command in DEAD_ENDS: # this command has been removed but not aliased # display a helpful message and move on# print(cparse(f'<red>{DEAD_ENDS[command]}</red>')) sys.exit(42) if command not in COMMANDS: # check if this is a command abbreviation or exit command = match_command(command) if opts.help_: execute_cmd(command, *cmd_args, "--help") else: if opts.version: cmd_args.append("--version") execute_cmd(command, *cmd_args)
async def run( options: 'Values', workflow_id: str, *tokens_list, ) -> Dict: pclient = get_client(workflow_id, timeout=options.comms_timeout) ret: Dict[str, Any] = { 'stdout': [], 'stderr': [], 'exit': 0 } flow_kwargs: Dict[str, Any] = { 'request_string': FLOW_QUERY, 'variables': {'wFlows': [workflow_id]} } task_kwargs: Dict[str, Any] = { 'request_string': TASK_QUERY, } # ping called on the workflow result = await pclient.async_request('graphql', flow_kwargs) msg = "" for flow in result['workflows']: w_name = flow['name'] w_port = flow['port'] w_pub_port = flow['pubPort'] if cylc.flow.flags.verbosity > 0: ret['stdout'].append( f'{w_name} running on ' f'{pclient.host}:{w_port} {w_pub_port}\n' ) # ping called with task-like objects for tokens in tokens_list: task_kwargs['variables'] = { 'tProxy': tokens.relative_id } task_result = await pclient.async_request('graphql', task_kwargs) string_id = tokens.relative_id if not task_result.get('taskProxy'): msg = f"task not found: {string_id}" elif task_result['taskProxy']['state'] != TASK_STATUS_RUNNING: msg = f"task not {TASK_STATUS_RUNNING}: {string_id}" if msg: ret['stderr'].append(cparse(f'<red>{msg}</red>')) ret['exit'] = 1 return ret
def main( parser: COP, options: 'Values', workflow: str, task_id: Optional[str] = None ) -> None: workflow, _ = parse_reg(workflow) pclient = get_client(workflow, timeout=options.comms_timeout) if task_id and not TaskID.is_valid_id(task_id): raise UserInputError("Invalid task ID: %s" % task_id) flow_kwargs = { 'request_string': FLOW_QUERY, 'variables': {'wFlows': [workflow]} } task_kwargs: Dict[str, Any] = { 'request_string': TASK_QUERY, } # cylc ping WORKFLOW result = pclient('graphql', flow_kwargs) msg = "" for flow in result['workflows']: w_name = flow['name'] w_port = flow['port'] w_pub_port = flow['pubPort'] if cylc.flow.flags.verbosity > 0: sys.stdout.write( f'{w_name} running on ' f'{pclient.host}:{w_port} {w_pub_port}\n' ) # cylc ping WORKFLOW TASKID if task_id: task, point = TaskID.split(task_id) w_id = flow['id'] task_kwargs['variables'] = { 'tProxy': f'{w_id}{ID_DELIM}{point}{ID_DELIM}{task}' } task_result = pclient('graphql', task_kwargs) if not task_result.get('taskProxy'): msg = "task not found" elif task_result['taskProxy']['state'] != TASK_STATUS_RUNNING: msg = f"task not {TASK_STATUS_RUNNING}" if msg: print(cparse(f'<red>{msg}</red>')) sys.exit(1)
def main(parser, options, suite, task_id=None): pclient = SuiteRuntimeClient(suite, timeout=options.comms_timeout) if task_id and not TaskID.is_valid_id(task_id): raise UserInputError("Invalid task ID: %s" % task_id) flow_kwargs = { 'request_string': FLOW_QUERY, 'variables': { 'wFlows': [suite] } } task_kwargs = { 'request_string': TASK_QUERY, } # cylc ping SUITE result = pclient('graphql', flow_kwargs) msg = "" for flow in result['workflows']: w_name = flow['name'] w_port = flow['port'] w_pub_port = flow['pubPort'] if cylc.flow.flags.verbose: sys.stdout.write(f'{w_name} running on ' f'{pclient.host}:{w_port} {w_pub_port}\n') # cylc ping SUITE TASKID if task_id: task, point = TaskID.split(task_id) w_id = flow['id'] task_kwargs['variables'] = { 'tProxy': f'{w_id}{ID_DELIM}{point}{ID_DELIM}{task}' } task_result = pclient('graphql', task_kwargs) if not task_result.get('taskProxy'): msg = "task not found" elif task_result['taskProxy']['state'] != TASK_STATUS_RUNNING: msg = f"task not {TASK_STATUS_RUNNING}" if msg: print(cparse(f'<red>{msg}</red>')) sys.exit(1)
from textwrap import wrap from functools import wraps from subprocess import PIPE, Popen # nosec from ansimarkup import parse as cparse from colorama import init as color_init import cylc.flow.flags from cylc.flow.exceptions import CylcError from cylc.flow.loggingutil import CylcLogFormatter from cylc.flow.parsec.exceptions import ParsecError # CLI exception message format EXC_EXIT = cparse('<red><bold>{name}: </bold>{exc}</red>') def is_terminal(): """Determine if running in a terminal.""" return hasattr(sys.stderr, 'isatty') and sys.stderr.isatty() def get_width(default=80): """Return the terminal width or `default` if it is not determinable.""" # stty can have different install locs so don't use absolute path proc = Popen(['stty', 'size'], stdout=PIPE, stderr=PIPE) # nosec if proc.wait(): return default try: return int(proc.communicate()[0].split()[1])
def scheduler_cli(options: 'Values', workflow_id: str) -> None: """Run the workflow. This function should contain all of the command line facing functionality of the Scheduler, exit codes, logging, etc. The Scheduler itself should be a Python object you can import and run in a regular Python session so cannot contain this kind of functionality. """ # Parse workflow name but delay Cylc 7 suiter.rc deprecation warning # until after the start-up splash is printed. # TODO: singleton (workflow_id, ), _ = parse_ids( workflow_id, constraint='workflows', max_workflows=1, # warn_depr=False, # TODO ) try: detect_old_contact_file(workflow_id) except ServiceFileError as exc: print(f"Resuming already-running workflow\n\n{exc}") pclient = WorkflowRuntimeClient( workflow_id, timeout=options.comms_timeout, ) mutation_kwargs = { 'request_string': RESUME_MUTATION, 'variables': { 'wFlows': [workflow_id] } } pclient('graphql', mutation_kwargs) sys.exit(0) # re-execute on another host if required _distribute(options.host) # print the start message if (cylc.flow.flags.verbosity > -1 and (options.no_detach or options.format == 'plain')): print(cparse(cylc_header())) if cylc.flow.flags.cylc7_back_compat: LOG.warning(SUITERC_DEPR_MSG) # setup the scheduler # NOTE: asyncio.run opens an event loop, runs your coro, # then shutdown async generators and closes the event loop scheduler = Scheduler(workflow_id, options) asyncio.run(_setup(scheduler)) # daemonize if requested # NOTE: asyncio event loops cannot persist across daemonization # ensure you have tidied up all threads etc before daemonizing if not options.no_detach: from cylc.flow.daemonize import daemonize daemonize(scheduler) # setup loggers _open_logs(workflow_id, options.no_detach) # run the workflow ret = asyncio.run(_run(scheduler)) # exit # NOTE: we must clean up all asyncio / threading stuff before exiting # NOTE: any threads which include sleep statements could cause # sys.exit to hang if not shutdown properly LOG.info("DONE") close_log(LOG) sys.exit(ret)
class CylcOptionParser(OptionParser): """Common options for all cylc CLI commands.""" MULTITASK_USAGE = cparse( dedent(''' This command can operate on multiple tasks, globs and selectors may be used: Multiple Tasks: <dim># Operate on two tasks</dim> workflow //cycle-1/task-1 //cycle-2/task-2 Globs (note: globs should be quoted and only match active tasks): <dim># Match any the active task "foo" in all cycles</dim> '//*/foo' <dim># Match the tasks "foo-1" and "foo-2"</dim> '//*/foo-[12]' Selectors (note: selectors only match active tasks): <dim># match all failed tasks in cycle "1"</dim> //1:failed See `cylc help id` for more details. ''')) MULTIWORKFLOW_USAGE = cparse( dedent(''' This command can operate on multiple workflows, globs may also be used: Multiple Workflows: <dim># Operate on two workflows</dim> workflow-1 workflow-2 Globs (note: globs should be quoted): <dim># Match all workflows</dim> '*' <dim># Match the workflows foo-1, foo-2</dim> 'foo-[12]' See `cylc help id` for more details. ''')) def __init__(self, usage: str, argdoc: Optional[List[Tuple[str, str]]] = None, comms: bool = False, jset: bool = False, multitask: bool = False, multiworkflow: bool = False, prep: bool = False, auto_add: bool = True, icp: bool = False, color: bool = True, segregated_log: bool = False) -> None: self.auto_add = auto_add if argdoc is None: if prep: # TODO argdoc = [('WORKFLOW | PATH', 'Workflow ID or path')] else: argdoc = [('WORKFLOW', 'Workflow ID')] if multiworkflow: usage += self.MULTIWORKFLOW_USAGE if multitask: usage += self.MULTITASK_USAGE args = "" self.n_compulsory_args = 0 self.n_optional_args = 0 self.unlimited_args = False self.comms = comms self.jset = jset self.prep = prep self.icp = icp self.color = color # Whether to log messages that are below warning level to stdout # instead of stderr: self.segregated_log = segregated_log maxlen = 0 for arg in argdoc: if len(arg[0]) > maxlen: maxlen = len(arg[0]) if argdoc: usage += "\n\nArguments:" for arg in argdoc: if arg[0].startswith('['): self.n_optional_args += 1 else: self.n_compulsory_args += 1 if arg[0].endswith('...]'): self.unlimited_args = True args += arg[0] + " " pad = (maxlen - len(arg[0])) * ' ' + ' ' usage += "\n " + arg[0] + pad + arg[1] usage = usage.replace('ARGS', args) OptionParser.__init__(self, usage, option_class=CylcOption, formatter=CylcHelpFormatter()) def add_std_option(self, *args, **kwargs): """Add a standard option, ignoring override.""" with suppress(OptionConflictError): self.add_option(*args, **kwargs) def add_std_options(self): """Add standard options if they have not been overridden.""" self.add_std_option( "-q", "--quiet", help="Decrease verbosity.", action='decrement', dest='verbosity', ) self.add_std_option("-v", "--verbose", help="Increase verbosity.", dest='verbosity', action='count', default=env_to_verbosity(os.environ)) self.add_std_option("--debug", help="Equivalent to -v -v", dest="verbosity", action='store_const', const=2) self.add_std_option("--no-timestamp", help="Don't timestamp logged messages.", action="store_false", dest="log_timestamp", default=True) if self.color: self.add_std_option( '--color', '--colour', metavar='WHEN', action='store', default='auto', choices=['never', 'auto', 'always'], help=("When to use color/bold text in terminal output." " Options are 'never', 'auto' and 'always'.")) if self.comms: self.add_std_option( "--comms-timeout", metavar='SEC', help=("Set a timeout for network connections " "to the running workflow. The default is no timeout. " "For task messaging connections see " "site/user config file documentation."), action="store", default=None, dest="comms_timeout") if self.jset: self.add_std_option( "-s", "--set", metavar="NAME=VALUE", help=("Set the value of a Jinja2 template variable in the" " workflow definition." " Values should be valid Python literals so strings" " must be quoted" " e.g. 'STR=\"string\"', INT=43, BOOL=True." " This option can be used multiple " " times on the command line." " NOTE: these settings persist across workflow restarts," " but can be set again on the \"cylc play\"" " command line if they need to be overridden."), action="append", default=[], dest="templatevars") self.add_std_option( "--set-file", metavar="FILE", help=("Set the value of Jinja2 template variables in the " "workflow definition from a file containing NAME=VALUE " "pairs (one per line). " "As with --set values should be valid Python literals " "so strings must be quoted e.g. STR='string'. " "NOTE: these settings persist across workflow restarts, " "but can be set again on the \"cylc play\" " "command line if they need to be overridden."), action="store", default=None, dest="templatevars_file") if self.icp: self.add_option( "--initial-cycle-point", "--icp", metavar="CYCLE_POINT", help=("Set the initial cycle point. " "Required if not defined in flow.cylc."), action="store", dest="icp", ) def add_cylc_rose_options(self) -> None: """Add extra options for cylc-rose plugin if it is installed.""" try: __import__('cylc.rose') except ImportError: return self.add_option("--opt-conf-key", "-O", help=("Use optional Rose Config Setting " "(If Cylc-Rose is installed)"), action="append", default=[], dest="opt_conf_keys") self.add_option( "--define", '-D', help=("Each of these overrides the `[SECTION]KEY` setting in a " "`rose-suite.conf` file. " "Can be used to disable a setting using the syntax " "`--define=[SECTION]!KEY` or even `--define=[!SECTION]`."), action="append", default=[], dest="defines") self.add_option( "--rose-template-variable", '-S', help=("As `--define`, but with an implicit `[SECTION]` for " "workflow variables."), action="append", default=[], dest="rose_template_vars") def parse_args(self, api_args, remove_opts=None): """Parse options and arguments, overrides OptionParser.parse_args. Args: api_args (list): Command line options if passed via Python as opposed to sys.argv remove_opts (list): List of standard options to remove before parsing. """ if self.auto_add: # Add common options after command-specific options. self.add_std_options() if remove_opts: for opt in remove_opts: with suppress(ValueError): self.remove_option(opt) (options, args) = OptionParser.parse_args(self, api_args) if len(args) < self.n_compulsory_args: self.error("Wrong number of arguments (too few)") elif (not self.unlimited_args and len(args) > self.n_compulsory_args + self.n_optional_args): self.error("Wrong number of arguments (too many)") if self.jset and options.templatevars_file: options.templatevars_file = os.path.abspath( os.path.expanduser(options.templatevars_file)) cylc.flow.flags.verbosity = options.verbosity # Set up stream logging for CLI. Note: # 1. On choosing STDERR: Log messages are diagnostics, so STDERR is the # better choice for the logging stream. This allows us to use STDOUT # for verbosity agnostic outputs. # 2. Scheduler will remove this handler when it becomes a daemon. if options.verbosity < 0: LOG.setLevel(logging.WARNING) elif options.verbosity > 0: LOG.setLevel(logging.DEBUG) else: LOG.setLevel(logging.INFO) RSYNC_LOG.setLevel(logging.INFO) # Remove NullHandler before add the StreamHandler for log in (LOG, RSYNC_LOG): while log.handlers: log.handlers[0].close() log.removeHandler(log.handlers[0]) log_handler = logging.StreamHandler(sys.stderr) log_handler.setFormatter( CylcLogFormatter(timestamp=options.log_timestamp, dev_info=(options.verbosity > 2))) LOG.addHandler(log_handler) if self.segregated_log: setup_segregated_log_streams(LOG, log_handler) return (options, args)
def main(_, options, reg): """cylc validate CLI.""" profiler = Profiler(None, options.profile_mode) profiler.start() if not cylc.flow.flags.debug: # for readability omit timestamps from logging unless in debug mode for handler in LOG.handlers: if isinstance(handler.formatter, CylcLogFormatter): handler.formatter.configure(timestamp=False) suite, flow_file = parse_suite_arg(options, reg) cfg = SuiteConfig( suite, flow_file, options, load_template_vars(options.templatevars, options.templatevars_file), output_fname=options.output, mem_log_func=profiler.log_memory) # Check bounds of sequences out_of_bounds = [str(seq) for seq in cfg.sequences if seq.get_first_point(cfg.start_point) is None] if out_of_bounds: if len(out_of_bounds) > 1: # avoid spamming users with multiple warnings msg = ('multiple sequences out of bounds for initial cycle point ' '%s:\n%s' % ( cfg.start_point, '\n'.join(textwrap.wrap(', '.join(out_of_bounds), 70)))) else: msg = '%s: sequence out of bounds for initial cycle point %s' % ( out_of_bounds[0], cfg.start_point) if options.strict: LOG.warning(msg) elif cylc.flow.flags.verbose: sys.stderr.write(' + %s\n' % msg) # Instantiate tasks and force evaluation of trigger expressions. # (Taken from config.py to avoid circular import problems.) # TODO - This is not exhaustive, it only uses the initial cycle point. if cylc.flow.flags.verbose: print('Instantiating tasks to check trigger expressions') flow_label = FlowLabelMgr().get_new_label() for name, taskdef in cfg.taskdefs.items(): try: itask = TaskProxy(taskdef, cfg.start_point, flow_label) except TaskProxySequenceBoundsError: # Should already failed above in strict mode. mesg = 'Task out of bounds for %s: %s\n' % (cfg.start_point, name) if cylc.flow.flags.verbose: sys.stderr.write(' + %s\n' % mesg) continue except Exception as exc: raise SuiteConfigError( 'failed to instantiate task %s: %s' % (name, exc)) # force trigger evaluation now try: itask.state.prerequisites_eval_all() except TriggerExpressionError as exc: err = str(exc) if '@' in err: print(f"ERROR, {name}: xtriggers can't be in conditional" f" expressions: {err}", file=sys.stderr) else: print('ERROR, %s: bad trigger: %s' % (name, err), file=sys.stderr) raise SuiteConfigError("ERROR: bad trigger") except Exception as exc: print(str(exc), file=sys.stderr) raise SuiteConfigError( '%s: failed to evaluate triggers.' % name) if cylc.flow.flags.verbose: print(' + %s ok' % itask.identity) print(cparse('<green>Valid for cylc-%s</green>' % CYLC_VERSION)) profiler.stop()
def report_bad_options(bad_options, is_set=False): bad_opts = get_broadcast_bad_options_report(bad_options, is_set=is_set) if bad_opts is not None: return cparse(f'<red>{bad_opts}</red>') return bad_opts
def main(parser: COP, options: 'Values', workflow_id: str) -> None: """cylc validate CLI.""" profiler = Profiler(None, options.profile_mode) profiler.start() if cylc.flow.flags.verbosity < 2: disable_timestamps(LOG) workflow_id, _, flow_file = parse_id( workflow_id, src=True, constraint='workflows', ) cfg = WorkflowConfig(workflow_id, flow_file, options, get_template_vars(options), output_fname=options.output, mem_log_func=profiler.log_memory) # Instantiate tasks and force evaluation of trigger expressions. # (Taken from config.py to avoid circular import problems.) # TODO - This is not exhaustive, it only uses the initial cycle point. if cylc.flow.flags.verbosity > 0: print('Instantiating tasks to check trigger expressions') for name, taskdef in cfg.taskdefs.items(): try: itask = TaskProxy(taskdef, cfg.start_point) except TaskProxySequenceBoundsError: # Should already failed above mesg = 'Task out of bounds for %s: %s\n' % (cfg.start_point, name) if cylc.flow.flags.verbosity > 0: sys.stderr.write(' + %s\n' % mesg) continue except Exception as exc: raise WorkflowConfigError('failed to instantiate task %s: %s' % (name, exc)) # force trigger evaluation now try: itask.state.prerequisites_eval_all() except TriggerExpressionError as exc: err = str(exc) if '@' in err: print( f"ERROR, {name}: xtriggers can't be in conditional" f" expressions: {err}", file=sys.stderr) else: print('ERROR, %s: bad trigger: %s' % (name, err), file=sys.stderr) raise WorkflowConfigError("ERROR: bad trigger") except Exception as exc: print(str(exc), file=sys.stderr) raise WorkflowConfigError('%s: failed to evaluate triggers.' % name) if cylc.flow.flags.verbosity > 0: print(' + %s ok' % itask.identity) print(cparse('<green>Valid for cylc-%s</green>' % CYLC_VERSION)) profiler.stop()
class CylcLogFormatter(logging.Formatter): """Format log record in standard Cylc way. Message in '%(asctime)s %(levelname)-2s - %(message)s' format. Indent continuation in multi-line messages. Date time in ISO date time with correct time zone. """ COLORS = { 'CRITICAL': cparse('<red><bold>{0}</bold></red>'), 'ERROR': cparse('<red>{0}</red>'), 'WARNING': cparse('<yellow>{0}</yellow>'), 'DEBUG': cparse('<fg #888888>{0}</fg #888888>') } # default hard-coded max width for log entries # NOTE: this should be sufficiently long that log entries read by the # deamonise script (url, pid) are not wrapped MAX_WIDTH = 999 def __init__(self, timestamp=True, color=False, max_width=None): self.timestamp = None self.color = None self.max_width = self.MAX_WIDTH self.wrapper = None self.configure(timestamp, color, max_width) # You may find adding %(filename)s %(lineno)d are useful when debugging logging.Formatter.__init__( self, '%(asctime)s %(levelname)-2s - %(message)s', '%Y-%m-%dT%H:%M:%S%Z') def configure(self, timestamp=None, color=None, max_width=None): """Reconfigure the format settings.""" if timestamp is not None: self.timestamp = timestamp if color is not None: self.color = color if max_width is not None: self.max_width = max_width if self.max_width is None: self.wrapper = lambda x: [x] else: self.wrapper = partial(textwrap.wrap, width=self.max_width) def format(self, record): """Indent continuation lines in multi-line messages.""" text = logging.Formatter.format(self, record) if not self.timestamp: _, text = text.split(' ', 1) # ISO8601 time points have no spaces if self.color and record.levelname in self.COLORS: text = self.COLORS[record.levelname].format(text) return '\n\t'.join((wrapped_line for line in text.splitlines() for wrapped_line in self.wrapper(line))) def formatTime(self, record, datefmt=None): """Formats the record time as an ISO date time with correct time zone. Note: This should become redundant in Python 3, because "time.strftime" will handle time zone from "localtime" properly. """ return get_time_string_from_unix_time(record.created)
class CylcLogFormatter(logging.Formatter): """Format log record in standard Cylc way. Message in '%(asctime)s %(levelname)-2s - %(message)s' format. Indent continuation in multi-line messages. Date time in ISO date time with correct time zone. """ COLORS = { 'CRITICAL': cparse('<red><bold>{0}</bold></red>'), 'ERROR': cparse('<red>{0}</red>'), 'WARNING': cparse('<yellow>{0}</yellow>'), 'DEBUG': cparse('<fg #888888>{0}</fg #888888>') } # default hard-coded max width for log entries # NOTE: this should be sufficiently long that log entries read by the # daemonise script (url, pid) are not wrapped MAX_WIDTH = 999 def __init__(self, timestamp: bool = True, color: bool = False, max_width: Optional[int] = None, dev_info: bool = False) -> None: self.timestamp = None self.color = None self.max_width = self.MAX_WIDTH self.configure(timestamp, color, max_width) prefix = '%(asctime)s %(levelname)-2s - ' if dev_info is True: prefix += '[%(module)s:%(lineno)d] - ' logging.Formatter.__init__(self, prefix + '%(message)s', '%Y-%m-%dT%H:%M:%S%Z') def configure(self, timestamp=None, color=None, max_width=None): """Reconfigure the format settings.""" if timestamp is not None: self.timestamp = timestamp if color is not None: self.color = color if max_width is not None: self.max_width = max_width def format(self, record): # noqa: A003 (method name not local) """Indent continuation lines in multi-line messages.""" text = logging.Formatter.format(self, record) if not self.timestamp: _, text = text.split(' ', 1) # ISO8601 time points have no spaces if self.color and record.levelname in self.COLORS: text = self.COLORS[record.levelname].format(text) if self.max_width: return '\n'.join( line for part_num, part in enumerate(text.splitlines()) for line in textwrap.wrap( part, width=self.max_width, initial_indent='' if part_num == 0 else ' ', subsequent_indent=' ', )) else: return '\n '.join(text.splitlines()) def formatTime(self, record, datefmt=None): """Formats the record time as an ISO date time with correct time zone. Note: This should become redundant in Python 3, because "time.strftime" will handle time zone from "localtime" properly. """ return get_time_string_from_unix_time(record.created)
Filters allow you to filter for specific states. Filters are prefixed by a colon (:). Examples: *:running # All running workflows workflow//*:running # All running cycles in workflow workflow//cycle/*:running # All running tasks in workflow//cycle workflow//cycle/task/*:running # All running jobs in # workflow//cycle/task ''' # because this command is not served from behind cli_function like the # other cylc commands we have to manually patch in colour support USAGE = format_shell_examples(USAGE) USAGE = cparse(USAGE) # all sub-commands # {name: entry_point} COMMANDS: dict = { entry_point.name: entry_point for entry_point in iter_entry_points('cylc.command') } # aliases for sub-commands # {alias_name: command_name} ALIASES = { 'bcast': 'broadcast', 'compare': 'diff', 'cyclepoint': 'cycle-point', 'cycletime': 'cycle-point',
def scheduler_cli(parser, options, reg): """Run the workflow. This function should contain all of the command line facing functionality of the Scheduler, exit codes, logging, etc. The Scheduler itself should be a Python object you can import and run in a regular Python session so cannot contain this kind of functionality. """ workflow_files.validate_flow_name(reg) reg = os.path.normpath(reg) try: workflow_files.detect_old_contact_file(reg) except ServiceFileError as exc: print(f"Resuming already-running workflow\n\n{exc}") pclient = WorkflowRuntimeClient(reg, timeout=options.comms_timeout) mutation_kwargs = { 'request_string': RESUME_MUTATION, 'variables': { 'wFlows': [reg] } } pclient('graphql', mutation_kwargs) sys.exit(0) # re-execute on another host if required _distribute(options.host) # print the start message if (cylc.flow.flags.verbosity > -1 and (options.no_detach or options.format == 'plain')): print(cparse(cylc_header())) # setup the scheduler # NOTE: asyncio.run opens an event loop, runs your coro, # then shutdown async generators and closes the event loop scheduler = Scheduler(reg, options) asyncio.run(_setup(scheduler)) # daemonize if requested # NOTE: asyncio event loops cannot persist across daemonization # ensure you have tidied up all threads etc before daemonizing if not options.no_detach: from cylc.flow.daemonize import daemonize daemonize(scheduler) # setup loggers _open_logs(reg, options.no_detach) # run the workflow ret = asyncio.run(_run(scheduler)) # exit # NOTE: we must clean up all asyncio / threading stuff before exiting # NOTE: any threads which include sleep statements could cause # sys.exit to hang if not shutdown properly LOG.info("DONE") _close_logs() sys.exit(ret)
def __init__(self, usage, argdoc=None, comms=False, jset=False, multitask=False, multitask_nocycles=False, prep=False, auto_add=True, icp=False, color=True): self.auto_add = auto_add if argdoc is None: if prep: argdoc = [('SUITE', 'Suite name or path')] else: argdoc = [('REG', 'Suite name')] # make comments grey in usage for readability usage = cparse( re.sub(r'^(\s*(?:\$[^#]+)?)(#.*)$', r'\1<dim>\2</dim>', usage, flags=re.M)) if multitask: usage += self.MULTITASKCYCLE_USAGE elif multitask_nocycles: # glob on task names but not cycle points usage += self.MULTITASK_USAGE args = "" self.n_compulsory_args = 0 self.n_optional_args = 0 self.unlimited_args = False self.comms = comms self.jset = jset self.prep = prep self.icp = icp self.suite_info = [] self.color = color maxlen = 0 for arg in argdoc: if len(arg[0]) > maxlen: maxlen = len(arg[0]) if argdoc: usage += "\n\nArguments:" for arg in argdoc: if arg[0].startswith('['): self.n_optional_args += 1 else: self.n_compulsory_args += 1 if arg[0].endswith('...]'): self.unlimited_args = True args += arg[0] + " " pad = (maxlen - len(arg[0])) * ' ' + ' ' usage += "\n " + arg[0] + pad + arg[1] usage = usage.replace('ARGS', args) OptionParser.__init__(self, usage)
def main(parser: COP, options: 'Values', reg: str) -> None: """cylc validate CLI.""" profiler = Profiler(None, options.profile_mode) profiler.start() if cylc.flow.flags.verbosity < 2: disable_timestamps(LOG) workflow, flow_file = parse_reg(reg, src=True) cfg = WorkflowConfig( workflow, flow_file, options, get_template_vars(options), output_fname=options.output, mem_log_func=profiler.log_memory ) # Check bounds of sequences out_of_bounds = [str(seq) for seq in cfg.sequences if seq.get_first_point(cfg.start_point) is None] if out_of_bounds: if len(out_of_bounds) > 1: # avoid spamming users with multiple warnings out_of_bounds_str = '\n'.join( textwrap.wrap(', '.join(out_of_bounds), 70)) msg = ( "multiple sequences out of bounds for initial cycle point " f"{cfg.start_point}:\n{out_of_bounds_str}") else: msg = ( f"{out_of_bounds[0]}: sequence out of bounds for " f"initial cycle point {cfg.start_point}") LOG.warning(msg) # Instantiate tasks and force evaluation of trigger expressions. # (Taken from config.py to avoid circular import problems.) # TODO - This is not exhaustive, it only uses the initial cycle point. if cylc.flow.flags.verbosity > 0: print('Instantiating tasks to check trigger expressions') for name, taskdef in cfg.taskdefs.items(): try: itask = TaskProxy(taskdef, cfg.start_point) except TaskProxySequenceBoundsError: # Should already failed above mesg = 'Task out of bounds for %s: %s\n' % (cfg.start_point, name) if cylc.flow.flags.verbosity > 0: sys.stderr.write(' + %s\n' % mesg) continue except Exception as exc: raise WorkflowConfigError( 'failed to instantiate task %s: %s' % (name, exc)) # force trigger evaluation now try: itask.state.prerequisites_eval_all() except TriggerExpressionError as exc: err = str(exc) if '@' in err: print(f"ERROR, {name}: xtriggers can't be in conditional" f" expressions: {err}", file=sys.stderr) else: print('ERROR, %s: bad trigger: %s' % (name, err), file=sys.stderr) raise WorkflowConfigError("ERROR: bad trigger") except Exception as exc: print(str(exc), file=sys.stderr) raise WorkflowConfigError( '%s: failed to evaluate triggers.' % name) if cylc.flow.flags.verbosity > 0: print(' + %s ok' % itask.identity) print(cparse('<green>Valid for cylc-%s</green>' % CYLC_VERSION)) profiler.stop()