def parse_args(self, api_args, remove_opts=None): """Parse options and arguments, overrides OptionParser.parse_args. Args: api_args (list): Command line options if passed via Python as opposed to sys.argv remove_opts (list): List of standard options to remove before parsing. """ if self.auto_add: # Add common options after command-specific options. self.add_std_options() if remove_opts: for opt in remove_opts: try: self.remove_option(opt) except ValueError: pass (options, args) = OptionParser.parse_args(self, api_args) if len(args) < self.n_compulsory_args: self.error("Wrong number of arguments (too few)") elif not self.unlimited_args and \ len(args) > self.n_compulsory_args + self.n_optional_args: self.error("Wrong number of arguments (too many)") if self.jset: if options.templatevars_file: options.templatevars_file = os.path.abspath( os.path.expanduser(options.templatevars_file)) cylc.flow.flags.verbose = options.verbose cylc.flow.flags.debug = options.debug # Set up stream logging for CLI. Note: # 1. On choosing STDERR: Log messages are diagnostics, so STDERR is the # better choice for the logging stream. This allows us to use STDOUT # for verbosity agnostic outputs. # 2. Suite server programs will remove this handler when it becomes a # daemon. if options.debug or options.verbose: LOG.setLevel(logging.DEBUG) else: LOG.setLevel(logging.INFO) # Remove NullHandler before add the StreamHandler RSYNC_LOG.setLevel(logging.INFO) while LOG.handlers: LOG.handlers[0].close() LOG.removeHandler(LOG.handlers[0]) errhandler = logging.StreamHandler(sys.stderr) errhandler.setFormatter( CylcLogFormatter(timestamp=options.log_timestamp)) LOG.addHandler(errhandler) return (options, args)
def test_value_error_raises_system_exit( self, mocked_glbl_cfg, mocked_get_suite_run_log_name, ): """Test that a ValueError when writing to a log stream won't result in multiple exceptions (what could lead to infinite loop in some occasions. Instead, it **must** raise a SystemExit""" with tempfile.NamedTemporaryFile() as tf: # mock objects used when creating the file handler mocked = mock.MagicMock() mocked_glbl_cfg.return_value = mocked mocked.get.return_value = 100 mocked_get_suite_run_log_name.return_value = tf.name file_handler = TimestampRotatingFileHandler("suiteA", False) # next line is important as pytest can have a "Bad file descriptor" # due to a FileHandler with default "a" (pytest tries to r/w). file_handler.mode = "a+" # enable the logger LOG.setLevel(logging.INFO) LOG.addHandler(file_handler) # Disable raising uncaught exceptions in logging, due to file # handler using stdin.fileno. See the following links for more. # https://github.com/pytest-dev/pytest/issues/2276 & # https://github.com/pytest-dev/pytest/issues/1585 logging.raiseExceptions = False # first message will initialize the stream and the handler LOG.info("What could go") # here we change the stream of the handler old_stream = file_handler.stream file_handler.stream = mock.MagicMock() file_handler.stream.seek = mock.MagicMock() # in case where file_handler.stream.seek.side_effect = ValueError try: # next call will call the emit method and use the mocked stream LOG.info("wrong?!") self.fail("Exception SystemError was not raised") except SystemExit: pass finally: # clean up file_handler.stream = old_stream # for log_handler in LOG.handlers: # log_handler.close() file_handler.close() LOG.removeHandler(file_handler) logging.raiseExceptions = True
def test_value_error_raises_system_exit(self, mocked_glbl_cfg): """Test that a ValueError when writing to a log stream won't result in multiple exceptions (what could lead to infinite loop in some occasions. Instead, it **must** raise a SystemExit""" with tempfile.NamedTemporaryFile() as tf: # mock objects used when creating the file handler mocked = mock.MagicMock() mocked_glbl_cfg.return_value = mocked mocked.get_derived_host_item.return_value = tf.name mocked.get.return_value = 100 file_handler = TimestampRotatingFileHandler("suiteA", False) # next line is important as pytest can have a "Bad file descriptor" # due to a FileHandler with default "a" (pytest tries to r/w). file_handler.mode = "a+" # enable the logger LOG.setLevel(logging.INFO) LOG.addHandler(file_handler) # Disable raising uncaught exceptions in logging, due to file # handler using stdin.fileno. See the following links for more. # https://github.com/pytest-dev/pytest/issues/2276 & # https://github.com/pytest-dev/pytest/issues/1585 logging.raiseExceptions = False # first message will initialize the stream and the handler LOG.info("What could go") # here we change the stream of the handler old_stream = file_handler.stream file_handler.stream = mock.MagicMock() file_handler.stream.seek = mock.MagicMock() # in case where file_handler.stream.seek.side_effect = ValueError try: # next call will call the emit method and use the mocked stream LOG.info("wrong?!") self.fail("Exception SystemError was not raised") except SystemExit: pass finally: # clean up file_handler.stream = old_stream # for log_handler in LOG.handlers: # log_handler.close() file_handler.close() LOG.removeHandler(file_handler) logging.raiseExceptions = True
def parse_args(self, api_args, remove_opts=None): """Parse options and arguments, overrides OptionParser.parse_args. Args: api_args (list): Command line options if passed via Python as opposed to sys.argv remove_opts (list): List of standard options to remove before parsing. """ if self.auto_add: # Add common options after command-specific options. self.add_std_options() if remove_opts: for opt in remove_opts: with suppress(ValueError): self.remove_option(opt) (options, args) = OptionParser.parse_args(self, api_args) if len(args) < self.n_compulsory_args: self.error("Wrong number of arguments (too few)") elif (not self.unlimited_args and len(args) > self.n_compulsory_args + self.n_optional_args): self.error("Wrong number of arguments (too many)") if self.jset and options.templatevars_file: options.templatevars_file = os.path.abspath( os.path.expanduser(options.templatevars_file)) cylc.flow.flags.verbosity = options.verbosity # Set up stream logging for CLI. Note: # 1. On choosing STDERR: Log messages are diagnostics, so STDERR is the # better choice for the logging stream. This allows us to use STDOUT # for verbosity agnostic outputs. # 2. Scheduler will remove this handler when it becomes a daemon. if options.verbosity < 0: LOG.setLevel(logging.WARNING) elif options.verbosity > 0: LOG.setLevel(logging.DEBUG) else: LOG.setLevel(logging.INFO) RSYNC_LOG.setLevel(logging.INFO) # Remove NullHandler before add the StreamHandler for log in (LOG, RSYNC_LOG): while log.handlers: log.handlers[0].close() log.removeHandler(log.handlers[0]) log_handler = logging.StreamHandler(sys.stderr) log_handler.setFormatter( CylcLogFormatter(timestamp=options.log_timestamp, dev_info=(options.verbosity > 2))) LOG.addHandler(log_handler) if self.segregated_log: setup_segregated_log_streams(LOG, log_handler) return (options, args)
def main(parser, options, suite, *task_ids): """cylc submit CLI. No TASK EVENT HOOKS are set for the submit command because there is no scheduler instance watching for task failure etc. Note: a suite contact env file is not written by this command (it would overwrite the real one if the suite is running). """ if not options.verbose and not options.debug: LOG.setLevel(WARNING) for task_id in task_ids: if not TaskID.is_valid_id(task_id): raise UserInputError("Invalid task ID %s" % task_id) suiterc = get_suite_rc(suite) suite_dir = os.path.dirname(suiterc) # For user-defined batch system handlers sys.path.append(os.path.join(suite_dir, 'python')) # Load suite config and tasks config = SuiteConfig( suite, suiterc, options, load_template_vars(options.templatevars, options.templatevars_file)) itasks = [] for task_id in task_ids: name_str, point_str = TaskID.split(task_id) taskdefs = config.find_taskdefs(name_str) if not taskdefs: raise UserInputError("No task found for %s" % task_id) for taskdef in taskdefs: itasks.append( TaskProxy(taskdef, get_point(point_str).standardise(), is_startup=True)) # Initialise job submit environment make_suite_run_tree(suite) # Extract job.sh from library, for use in job scripts. extract_resources(get_suite_srv_dir(suite), ['etc/job.sh']) pool = SubProcPool() owner = get_user() job_pool = JobPool(suite, owner) db_mgr = SuiteDatabaseManager() task_job_mgr = TaskJobManager( suite, pool, db_mgr, TaskEventsManager(suite, pool, db_mgr, BroadcastMgr(db_mgr), job_pool), job_pool) task_job_mgr.task_remote_mgr.single_task_mode = True task_job_mgr.job_file_writer.set_suite_env({ 'CYLC_UTC': str(config.cfg['cylc']['UTC mode']), 'CYLC_DEBUG': str(cylc.flow.flags.debug).lower(), 'CYLC_VERBOSE': str(cylc.flow.flags.verbose).lower(), 'CYLC_SUITE_NAME': suite, 'CYLC_CYCLING_MODE': str(config.cfg['scheduling']['cycling mode']), 'CYLC_SUITE_INITIAL_CYCLE_POINT': str(config.cfg['scheduling']['initial cycle point']), 'CYLC_SUITE_FINAL_CYCLE_POINT': str(config.cfg['scheduling']['final cycle point']), }) ret_code = 0 waiting_tasks = list(itasks) if options.dry_run: while waiting_tasks: prep_tasks, bad_tasks = task_job_mgr.prep_submit_task_jobs( suite, waiting_tasks, dry_run=True) for itask in prep_tasks + bad_tasks: waiting_tasks.remove(itask) if waiting_tasks: task_job_mgr.proc_pool.process() sleep(1.0) for itask in itasks: if itask.local_job_file_path: print(('JOB SCRIPT=%s' % itask.local_job_file_path)) else: print(('Unable to prepare job file for %s' % itask.identity), file=sys.stderr) ret_code = 1 else: while waiting_tasks: for itask in task_job_mgr.submit_task_jobs(suite, waiting_tasks): waiting_tasks.remove(itask) if waiting_tasks: task_job_mgr.proc_pool.process() sleep(1.0) while task_job_mgr.proc_pool.is_not_done(): task_job_mgr.proc_pool.process() for itask in itasks: if itask.summary.get('submit_method_id') is not None: print(('[%s] Job ID: %s' % (itask.identity, itask.summary['submit_method_id']))) if itask.state(TASK_STATUS_SUBMIT_FAILED): ret_code = 1 sys.exit(ret_code)