def _build_IOLogRecord(cls, record_repr): """ Convert the representation of IOLogRecord back the the object """ _validate(record_repr, value_type=list) delay = _validate(record_repr, key=0, value_type=float) if delay < 0: # TRANSLATORS: please keep delay untranslated raise CorruptedSessionError(_("delay cannot be negative")) stream_name = _validate( record_repr, key=1, value_type=str, value_choice=['stdout', 'stderr']) data = _validate(record_repr, key=2, value_type=str) # Each data item is a base64 string created by encoding the bytes and # converting them to ASCII. To get the original we need to undo that # operation. try: data = data.encode("ASCII") except UnicodeEncodeError: raise CorruptedSessionError( _("record data {!r} is not ASCII").format(data)) try: data = base64.standard_b64decode(data) except binascii.Error: raise CorruptedSessionError( _("record data {!r} is not correct base64").format(data)) return IOLogRecord(delay, stream_name, data)
def _restore_SessionState_desired_job_list(cls, session, session_repr): """ Extract the representation of desired_job_list from the session and set it back to the session object. This method should be called after all the jobs are discovered. :raises CorruptedSessionError: if desired_job_list refers to unknown job """ # List of all the _ids_ of the jobs that were selected desired_job_list = [ _validate( job_id, value_type=str, value_type_msg=_("Each job id must be a string")) for job_id in _validate( session_repr, key='desired_job_list', value_type=list)] # Restore job selection logger.debug( _("calling update_desired_job_list(%r)"), desired_job_list) try: session.update_desired_job_list([ session.job_state_map[job_id].job for job_id in desired_job_list]) except KeyError as exc: raise CorruptedSessionError( _("'desired_job_list' refers to unknown job {!r}").format( exc.args[0]))
def register_parser(self, subparsers): """ Overridden method of CommandBase. :param subparsers: The argparse subparsers objects in which command line argument specification should be created. This method is invoked by the command line handling code to register arguments specific to this sub-command. It must also register itself as the command class with the ``command`` default. """ parser = self.add_subcommand(subparsers) parser.add_argument( "-n", "--dry-run", default=False, action="store_true", help=_("don't actually do anything")) group = parser.add_argument_group(title=_("actions to perform")) group.add_argument( "--dont-update-pot", default=False, action="store_false", help=_("do not update the translation template")) group.add_argument( "--dont-merge-po", default=False, action="store_true", help=_("do not merge translation files with the template")) group.add_argument( "--dont-build-mo", default=False, action="store_true", help=_("do not build binary translation files"))
def manager(self): logger.debug(_(".manager accessed")) if self._commit_hint is not None: self._commit_manager() if self._manager is None: raise AttributeError(_("Session not ready, did you call open()?")) return self._manager
def main(self, argv=None): """ Run as if invoked from command line directly """ # Another try/catch block for catching KeyboardInterrupt # This one is really only meant for the early init abort # (when someone runs main but bails out before we really # get to the point when we do something useful and setup # all the exception handlers). try: self.early_init() early_ns = self._early_parser.parse_args(argv) self.late_init(early_ns) # Construct the full command line argument parser self._parser = self.construct_parser() logger.debug(_("parsed early namespace: %s"), early_ns) # parse the full command line arguments, this is also where we # do argcomplete-dictated exit if bash shell completion # is requested ns = self._parser.parse_args(argv) logger.debug(_("parsed full namespace: %s"), ns) self.final_init(ns) except KeyboardInterrupt: pass else: return self.dispatch_and_catch_exceptions(ns)
def register_parser(self, subparsers): """ Overridden method of CommandBase. :param subparsers: The argparse subparsers objects in which command line argument specification should be created. This method is invoked by the command line handling code to register arguments specific to this sub-command. It must also register itself as the command class with the ``command`` default. """ parser = self.add_subcommand(subparsers) parser.add_argument( "--prefix", default="/usr/local", help=_("installation prefix")) parser.add_argument( '--layout', default='flat', choices=sorted(self._INSTALL_LAYOUT.keys()), # TRANSLATORS: don't translate %(defaults)s help=_("installation directory layout (default: %(default)s)")) parser.add_argument( "--root", default="", help=_("install everything relative to this alternate root" " directory")) parser.set_defaults(command=self)
def configured_filesystem(self, job, config): """ Context manager for handling filesystem aspects of job execution. :param job: The JobDefinition to execute :param config: A PlainBoxConfig instance which can be used to load missing environment definitions that apply to all jobs. It is used to provide values for missing environment variables that are required by the job (as expressed by the environ key in the job definition file). :returns: Pathname of the executable symlink nest directory. """ # Create a nest for all the private executables needed for execution prefix = 'nest-' suffix = '.{}'.format(job.checksum) with tempfile.TemporaryDirectory(suffix, prefix) as nest_dir: logger.debug(_("Symlink nest for executables: %s"), nest_dir) nest = SymLinkNest(nest_dir) # Add all providers sharing namespace with the current job to PATH for provider in self._provider_list: if job.provider.namespace == provider.namespace: nest.add_provider(provider) logger.debug(_("Symlink nest for executables: %s"), nest_dir) yield nest_dir
def _run_command(self, job, config): """ Run the shell command associated with the specified job. :returns: (return_code, record_path) where return_code is the number returned by the exiting child process while record_path is a pathname of a gzipped content readable with :class:`IOLogRecordReader` """ # Bail early if there is nothing do do if job.command is None: return None, () # Get an extcmd delegate for observing all the IO the way we need delegate, io_log_gen = self._prepare_io_handling(job, config) # Create a subprocess.Popen() like object that uses the delegate # system to observe all IO as it occurs in real time. extcmd_popen = extcmd.ExternalCommandWithDelegate(delegate) # Stream all IOLogRecord entries to disk record_path = os.path.join(self._jobs_io_log_dir, "{}.record.gz".format(slugify(job.id))) with gzip.open(record_path, mode="wb") as gzip_stream, io.TextIOWrapper( gzip_stream, encoding="UTF-8" ) as record_stream: writer = IOLogRecordWriter(record_stream) io_log_gen.on_new_record.connect(writer.write_record) # Start the process and wait for it to finish getting the # result code. This will actually call a number of callbacks # while the process is running. It will also spawn a few # threads although all callbacks will be fired from a single # thread (which is _not_ the main thread) logger.debug(_("job[%s] starting command: %s"), job.id, job.command) # Run the job command using extcmd return_code = self._run_extcmd(job, config, extcmd_popen) logger.debug(_("job[%s] command return code: %r"), job.id, return_code) return return_code, record_path
def gen_rfc822_records_from_io_log(job, result): """ Convert io_log from a job result to a sequence of rfc822 records """ logger.debug(_("processing output from a job: %r"), job) # Select all stdout lines from the io log line_gen = (record[2].decode('UTF-8', errors='replace') for record in result.get_io_log() if record[1] == 'stdout') # Allow the generated records to be traced back to the job that defined # the command which produced (printed) them. source = JobOutputTextSource(job) try: # Parse rfc822 records from the subsequent lines for record in gen_rfc822_records(line_gen, source=source): yield record except RFC822SyntaxError as exc: # When this exception happens we will _still_ store all the # preceding records. This is worth testing logger.warning( # TRANSLATORS: keep the word "local" untranslated. It is a # special type of job that needs to be distinguished. _("local script %s returned invalid RFC822 data: %s"), job, exc)
def __init__(self, filename, text, provider): """ Initialize the plug-in with the specified name text """ self._filename = filename self._job_list = [] logger.debug(_("Loading jobs definitions from %r..."), filename) try: records = load_rfc822_records( text, source=FileTextSource(filename)) except RFC822SyntaxError as exc: raise PlugInError( _("Cannot load job definitions from {!r}: {}").format( filename, exc)) for record in records: try: job = JobDefinition.from_rfc822_record(record) except ValueError as exc: raise PlugInError( _("Cannot define job from record {!r}: {}").format( record, exc)) else: job._provider = provider self._job_list.append(job) logger.debug(_("Loaded %r"), job)
def _print_dependency_report(self): print(_("[Dependency Report]").center(80, '=')) if self.problem_list: for problem in self.problem_list: print(" * {}".format(problem)) else: print(_("Selected jobs have no dependency problems"))
def __init__(self, cause, related_job=None, related_expression=None): """ Initialize a new inhibitor with the specified cause. If cause is other than UNDESIRED a related_job is necessary. If cause is either PENDING_RESOURCE or FAILED_RESOURCE related_expression is necessary as well. A ValueError is raised when this is violated. """ if cause not in self._cause_display: raise ValueError(_("unsupported value for cause")) if cause != self.UNDESIRED and related_job is None: raise ValueError( # TRANSLATORS: please don't translate related_job, None and # cause _("related_job must not be None when cause is {}").format( self._cause_display[cause])) if cause in (self.PENDING_RESOURCE, self.FAILED_RESOURCE) \ and related_expression is None: raise ValueError(_( # TRANSLATORS: please don't translate related_expression, None # and cause. "related_expression must not be None when cause is {}" ).format(self._cause_display[cause])) self.cause = cause self.related_job = related_job self.related_expression = related_expression
def _get_matching_job_list(self, ns, job_list): logger.debug("_get_matching_job_list(%r, %r)", ns, job_list) qualifier_list = [] # Add whitelists for whitelist_file in ns.whitelist: qualifier = self.get_whitelist_from_file( whitelist_file.name, whitelist_file) if qualifier is not None: qualifier_list.append(qualifier) # Add all the --include jobs for pattern in ns.include_pattern_list: try: qualifier = RegExpJobQualifier( '^{}$'.format(pattern), inclusive=True) except Exception as exc: logger.warning( _("Incorrect pattern %r: %s"), pattern, exc) else: qualifier_list.append(qualifier) # Add all the --exclude jobs for pattern in ns.exclude_pattern_list: try: qualifier = RegExpJobQualifier( '^{}$'.format(pattern), inclusive=False) except Exception as exc: logger.warning( _("Incorrect pattern %r: %s"), pattern, exc) else: qualifier_list.append(qualifier) logger.debug("select_jobs(%r, %r)", job_list, qualifier_list) return select_jobs(job_list, qualifier_list)
def get_storage_list(self): """ Enumerate stored sessions in the repository. If the repository directory is not present then an empty list is returned. :returns: list of :class:`SessionStorage` representing discovered sessions """ logger.debug(_("Enumerating sessions in %s"), self._location) try: # Try to enumerate the directory item_list = os.listdir(self._location) except OSError as exc: # If the directory does not exist, # silently return empty collection if exc.errno == errno.ENOENT: return [] # Don't silence any other errors raise session_list = [] # Check each item by looking for directories for item in item_list: pathname = os.path.join(self.location, item) # Make sure not to follow any symlinks here stat_result = os.lstat(pathname) # Consider directories only if stat.S_ISDIR(stat_result.st_mode): logger.debug(_("Found possible session in %r"), pathname) session = SessionStorage(pathname) session_list.append(session) # Return the full list return session_list
def run(self): logger.debug(_("a debug message")) logger.info(_("a info message")) logger.warning(_("a warning message")) logger.error(_("an error message")) logger.critical(_("a critical message")) return 0
def create_parser_object(self): parser = argparse.ArgumentParser( prog=self.get_exec_name(), # TRANSLATORS: please keep 'manage.py', '--help', '--version' # untranslated. Translate only '[options]' usage=_("manage.py [--help] [--version] [options]")) parser.add_argument( "--version", action="version", version=self.get_exec_version(), help=_("show program's version number and exit")) return parser
def register_parser(self, subparsers): parser = subparsers.add_parser( "list", help=_("list and describe various objects")) parser.add_argument( '-a', '--attrs', default=False, action="store_true", help=_("show object attributes")) parser.add_argument( 'group', nargs='?', help=_("list objects from the specified group")) parser.set_defaults(command=self)
def KindValidator(variable, new_value): """ A validator ensuring that values match the "kind" of the variable. """ if not isinstance(new_value, variable.kind): return { bool: _("expected a boolean"), int: _("expected an integer"), float: _("expected a floating point number"), str: _("expected a string"), }[variable.kind]
def register_parser(self, subparsers): parser = self.add_subcommand(subparsers) parser.add_argument( 'name', metavar=_("name"), type=IQN, # TRANSLATORS: please keep the YYYY.example... text unchanged or at # the very least translate only YYYY and some-name. In either case # some-name must be a reasonably-ASCII string (should be safe for a # portable directory name) help=_("provider name, eg: YYYY.example.org:some-name")) parser.set_defaults(command=self)
def _save_results(self, output_file, input_stream): if output_file is sys.stdout: print(_("[ Results ]").center(80, '=')) # This requires a bit more finesse, as exporters output bytes # and stdout needs a string. translating_stream = ByteStringStreamTranslator( output_file, "utf-8") copyfileobj(input_stream, translating_stream) else: print(_("Saving results to {}").format(output_file.name)) copyfileobj(input_stream, output_file) if output_file is not sys.stdout: output_file.close()
def write_summary(self, data): self.worksheet2.set_column(0, 0, 5) self.worksheet2.set_column(1, 1, 2) self.worksheet2.set_column(3, 3, 27) self.worksheet2.write(3, 1, _('Failures summary'), self.format03) self.worksheet2.write(4, 1, '✔', self.format10) self.worksheet2.write( 4, 2, ( ngettext('{} Test passed', '{} Tests passed', self.total_pass).format(self.total_pass) + " - " + _('Success Rate: {:.2f}% ({}/{})').format( self.total_pass / self.total * 100, self.total_pass, self.total) ), self.format02) self.worksheet2.write(5, 1, '✘', self.format11) self.worksheet2.write( 5, 2, ( ngettext('{} Test failed', '{} Tests failed', self.total_fail) + ' - ' + _('Failure Rate: {:.2f}% ({}/{})').format( self.total_fail / self.total * 100, self.total_fail, self.total) ), self.format02) self.worksheet2.write(6, 1, '-', self.format12) self.worksheet2.write( 6, 2, ( ngettext('{} Test skipped', '{} Tests skipped', self.total_skip) + ' - ' + _('Skip Rate: {:.2f}% ({}/{})').format( self.total_skip / self.total * 100, self.total_skip, self.total) ), self.format02) self.worksheet2.write_column( 'L3', [_('Fail'), _('Skip'), _('Pass')], self.format14) self.worksheet2.write_column( 'M3', [self.total_fail, self.total_skip, self.total_pass], self.format14) # Configure the series. chart = self.workbook.add_chart({'type': 'pie'}) chart.set_legend({'position': 'none'}) chart.add_series({ 'points': [ {'fill': {'color': 'red'}}, {'fill': {'color': 'gray'}}, {'fill': {'color': 'lime'}}, ], 'categories': '=' + _("Summary") + '!$L$3:$L$5', 'values': '=' + _("Summary") + '!$M$3:$M$5'} ) # Insert the chart into the worksheet. self.worksheet2.insert_chart('F4', chart, { 'x_offset': 0, 'y_offset': 10, 'x_scale': 0.25, 'y_scale': 0.25 })
def _build_SessionState(self, session_repr, early_cb=None): """ Reconstruct the session state object. This method creates a fresh SessionState instance and restores jobs, results, meta-data and desired job list using helper methods. """ # Construct a fresh session object. session = SessionState(self.job_list) logger.debug(_("Constructed new session for resume %r"), session) # Give early_cb a chance to see the session before we start resuming. # This way applications can see, among other things, generated jobs # as they are added to the session, by registering appropriate signal # handlers on the freshly-constructed session instance. if early_cb is not None: logger.debug(_("Invoking early callback %r"), early_cb) new_session = early_cb(session) if new_session is not None: logger.debug( _("Using different session for resume: %r"), new_session) session = new_session # Restore bits and pieces of state logger.debug( _("Starting to restore jobs and results to %r..."), session) self._restore_SessionState_jobs_and_results(session, session_repr) logger.debug(_("Starting to restore metadata...")) self._restore_SessionState_metadata(session, session_repr) logger.debug(_("Starting to restore desired job list...")) self._restore_SessionState_desired_job_list(session, session_repr) logger.debug(_("Starting to restore job list...")) self._restore_SessionState_job_list(session, session_repr) # Return whatever we've got logger.debug(_("Resume complete!")) return session
def adjust_logging(self, level=None, trace_list=None, debug_console=False): # Bump logging on the root logger if requested if level is not None: logging.getLogger(None).setLevel(level) logger.debug(_("Enabled %r on root logger"), level) logging.getLogger("plainbox").setLevel(level) # Enable tracing on specified loggers if trace_list is not None: for name in trace_list: logging.getLogger(name).setLevel(logging.DEBUG) logger.debug(_("Enabled debugging on logger %r"), name) if debug_console and (level == 'DEBUG' or trace_list): # Enable DEBUG logging to console if explicitly requested logging.config.dictConfig(self.DEBUG_CONSOLE_CONFIG)
def _run_extcmd(self, job, config, extcmd_popen): # Compute the score of each controller ctrl_score = [(ctrl, ctrl.get_score(job)) for ctrl in self._execution_ctrl_list] # Sort scores ctrl_score.sort(key=lambda pair: pair[1]) # Get the best score ctrl, score = ctrl_score[-1] # Ensure that the controler is viable if score < 0: raise RuntimeError(_("No exec controller supports job {}").format(job)) logger.debug( _("Selected execution controller %s (score %d) for job %r"), ctrl.__class__.__name__, score, job.id ) # Delegate and execute return ctrl.execute_job(job, config, extcmd_popen)
def _process_job(self, session, jobs_repr, results_repr, job_id): """ Process all representation details associated with a particular job This method takes a session object, representation of all the jobs and all the results (and a job id) and tries to reconstruct the state associated with that job in the session object. Jobs are verified to match existing (known) jobs. Results are rebuilt from their representation and presented back to the session for processing (this restores resources and generated jobs). This method can fail in normal operation, when the job that was being processed is a generated job and has not been reintroduced into the session. When that happens a KeyError is raised. .. note:: Since the representation format for results can support storing and restoring a list of results (per job) but the SessionState cannot yet do that the implementation of this method restores the state of the _last_ result object only. """ _validate(job_id, value_type=str) # Get the checksum from the representation checksum = _validate( jobs_repr, key=job_id, value_type=str) # Look up the actual job definition in the session. # This can raise KeyError but it is okay, callers expect that job = session.job_state_map[job_id].job # Check if job definition has not changed if job.checksum != checksum: raise IncompatibleJobError( _("Definition of job {!r} has changed").format(job_id)) # Collect all of the result objects into result_list result_list = [] result_list_repr = _validate( results_repr, key=job_id, value_type=list, value_none=True) for result_repr in result_list_repr: _validate(result_repr, value_type=dict) result = self._build_JobResult(result_repr) result_list.append(result) # Show the _LAST_ result to the session. Currently we only store one # result but showing the most recent (last) result should be good # in general. if len(result_list) > 0: logger.debug( _("calling update_job_result(%r, %r)"), job, result_list[-1]) session.update_job_result(job, result_list[-1])
def _commit_resume(self): logger.debug("_commit_resume()") last_storage = SessionStorageRepository().get_last_storage() assert last_storage is not None, "no saved session to resume" self._manager = SessionManager.load_session( self.job_list, last_storage, lambda session: self) logger.debug(_("_commit_resume() finished"))
def execute_job(self, job, config, extcmd_popen): """ Execute the specified job using the specified subprocess-like object :param job: The JobDefinition to execute :param config: A PlainBoxConfig instance which can be used to load missing environment definitions that apply to all jobs. It is used to provide values for missing environment variables that are required by the job (as expressed by the environ key in the job definition file). :param extcmd_popen: A subprocess.Popen like object :returns: The return code of the command, as returned by subprocess.call() """ # CHECKBOX_DATA is where jobs can share output. # It has to be an directory that scripts can assume exists. if not os.path.isdir(self.CHECKBOX_DATA): os.makedirs(self.CHECKBOX_DATA) # Setup the executable nest directory with self.configured_filesystem(job, config) as nest_dir: # Get the command and the environment. # of this execution controller cmd = self.get_execution_command(job, config, nest_dir) env = self.get_execution_environment(job, config, nest_dir) # run the command logger.debug(_("job[%s] executing %r with env %r"), job.id, cmd, env) return extcmd_popen.call(cmd, env=env)
def run_local_job(self, checksum, env): """ Run a job with and interpret the stdout as a job definition. :param checksum: The checksum of the job to execute :param env: Environment to execute the job in. :returns: A list of job definitions that were parsed out of the output. :raises LookupError: If the checksum does not match any known job """ job = self.find_job(checksum) cmd = ["bash", "-c", job.command] output = subprocess.check_output(cmd, universal_newlines=True, env=self.modify_execution_environment(env)) job_list = [] source = JobOutputTextSource(job) try: record_list = load_rfc822_records(output, source=source) except RFC822SyntaxError as exc: logging.error(_("Syntax error in job generated from %s: %s"), job, exc) else: for record in record_list: job = JobDefinition.from_rfc822_record(record) job_list.append(job) return job_list
def __init__(self, name=None, *, section='DEFAULT', kind=str, default=Unset, validator_list=None, help_text=None): # Ensure kind is correct if kind not in self._KIND_CHOICE: raise ValueError(_("unsupported kind")) # Ensure that we have a validator_list, even if empty if validator_list is None: validator_list = [] if validator_list and isinstance(validator_list[0], NotUnsetValidator): # XXX: Kludge ahead, beware! # Insert a KindValidator as the second validator to run # just after the NotUnsetValidator # TODO: To properly handle this without any special-casing we # should drop the implicit insertion of the KindValidator and # convert all users to properly order KindValidator and # NotUnsetValidator instances so that the error message is helpful # to the user. The whole idea is to validate Unset before we try to # validate the type. validator_list.insert(1, KindValidator) else: # Insert a KindValidator as the first validator to run validator_list.insert(0, KindValidator) # Assign all the attributes self._name = name self._section = section self._kind = kind self._default = default self._validator_list = validator_list self._help_text = help_text # Workaround for Sphinx breaking if __doc__ is a property self.__doc__ = self.help_text or self.__class__.__doc__
def wrap_and_add_plugin(self, plugin_name, plugin_obj): """ Internal method of PlugInCollectionBase. :param plugin_name: plugin name, some arbitrary string :param plugin_obj: plugin object, some arbitrary object. This method prepares a wrapper (PlugIn subclass instance) for the specified plugin name/object by attempting to instantiate the wrapper class. If a PlugInError exception is raised then it is added to the problem_list and the corresponding plugin is not added to the collection of plugins. """ try: wrapper = self._wrapper( plugin_name, plugin_obj, *self._wrapper_args, **self._wrapper_kwargs) except PlugInError as exc: logger.warning( _("Unable to prepare plugin %s: %s"), plugin_name, exc) self._problem_list.append(exc) else: self._plugins[plugin_name] = wrapper
def considering_job(self, job, job_state): print(self.C.header(job.tr_summary(), fill='-')) print(_("ID: {0}").format(job.id)) print(_("Category: {0}").format(job_state.effective_category_id))
def _too_many_device_context_objects(self): raise ValueError( _("session manager currently doesn't support sessions" " involving multiple devices (a.k.a multi-node testing)"))
def __str__(self): return _("expression {!r} needs unavailable resource {!r}").format( self.expression.text, self.resource_id)
def __str__(self): return _("expression did not reference any resources")
def __str__(self): return _("this kind of python code is not allowed: {}").format( ast.dump(self.node))
def parse( text: str, lineno: int=1, col_offset: int=0 ) -> "Union[IncludeStmt, Error]": """ Parse a single test plan include line Using correct syntax will result in a IncludeStmt node with appropriate data in the ``pattern`` and ``overrides`` fields. Note that ``pattern`` may be either a :class:`RePattern` or a :class:`ReFixed` or :class:`ReErr` which is not a valid pattern and cannot be used. Overrides are a list of :class:`OverrideExpression`. The list may contain incorrect, or duplicate values but that's up to higher-level analysis to check for. The whole overrides section is optional so a single pattern is a good include statement: >>> IncludeStmt.parse("usb.*") ... # doctest: +NORMALIZE_WHITESPACE IncludeStmt(pattern=RePattern(text='usb.*', re=re.compile('usb.*')), overrides=[]) Any number of key=value override pairs can be used using commas in between each pair: >>> IncludeStmt.parse("usb.* f1=o1") ... # doctest: +NORMALIZE_WHITESPACE IncludeStmt(pattern=RePattern(text='usb.*', re=re.compile('usb.*')), overrides=[OverrideExpression(field=Text(text='f1'), value=Text(text='o1'))]) >>> IncludeStmt.parse("usb.* f1=o1, f2=o2") ... # doctest: +NORMALIZE_WHITESPACE IncludeStmt(pattern=RePattern(text='usb.*', re=re.compile('usb.*')), overrides=[OverrideExpression(field=Text(text='f1'), value=Text(text='o1')), OverrideExpression(field=Text(text='f2'), value=Text(text='o2'))]) >>> IncludeStmt.parse("usb.* f1=o1, f2=o2, f3=o3") ... # doctest: +NORMALIZE_WHITESPACE IncludeStmt(pattern=RePattern(text='usb.*', re=re.compile('usb.*')), overrides=[OverrideExpression(field=Text(text='f1'), value=Text(text='o1')), OverrideExpression(field=Text(text='f2'), value=Text(text='o2')), OverrideExpression(field=Text(text='f3'), value=Text(text='o3'))]) Obviously some things can fail, the following examples show various error states that are possible. In each state an Error node is returned instead of the whole statement. >>> IncludeStmt.parse("") Error(msg='expected pattern') >>> IncludeStmt.parse("pattern field") Error(msg="expected '='") >>> IncludeStmt.parse("pattern field=") Error(msg='expected override value') >>> IncludeStmt.parse("pattern field=override junk") Error(msg="expected ','") >>> IncludeStmt.parse("pattern field=override, ") Error(msg='expected override field') """ scanner = WordScanner(text) # PATTERN ... token, lexeme = scanner.get_token() if token != scanner.TokenEnum.WORD: return Error(lineno, col_offset, _("expected pattern")) pattern = Re.parse(lexeme, lineno, col_offset) overrides = [] for i in itertools.count(): # PATTERN FIELD ... token, lexeme = scanner.get_token() if token == scanner.TokenEnum.EOF and i == 0: # The whole override section is optional so the sequence may # end with EOF on the first iteration of the loop. break elif token != scanner.TokenEnum.WORD: return Error(lineno, col_offset, _("expected override field")) field = Text(lineno, col_offset, lexeme) # PATTERN FIELD = ... token, lexeme = scanner.get_token() if token != scanner.TokenEnum.EQUALS: return Error(lineno, col_offset, _("expected '='")) # PATTERN FIELD = VALUE ... token, lexeme = scanner.get_token() if token != scanner.TokenEnum.WORD: return Error(lineno, col_offset, _("expected override value")) value = Text(lineno, col_offset, lexeme) expr = OverrideExpression(lineno, col_offset, field, value) overrides.append(expr) # is there any more? # PATTERN FIELD = VALUE , ... token, lexeme = scanner.get_token() if token == scanner.TokenEnum.COMMA: # (and again) continue elif token == scanner.TokenEnum.EOF: break else: return Error(lineno, col_offset, _("expected ','")) return IncludeStmt(lineno, col_offset, pattern, overrides)
class Meta: name = N_('job') class fields(SymbolDef): """ Symbols for each field that a JobDefinition can have """ name = 'name' summary = 'summary' plugin = 'plugin' command = 'command' description = 'description' user = '******' environ = 'environ' estimated_duration = 'estimated_duration' depends = 'depends' after = 'after' requires = 'requires' shell = 'shell' imports = 'imports' flags = 'flags' category_id = 'category_id' purpose = 'purpose' steps = 'steps' verification = 'verification' qml_file = 'qml_file' certification_status = 'certification_status' field_validators = { fields.name: [ UntranslatableFieldValidator, TemplateVariantFieldValidator, DeprecatedFieldValidator( _("use 'id' and 'summary' instead of 'name'")), ], # NOTE: 'id' validators are "inherited" so we don't have it here fields.summary: [ TranslatableFieldValidator, TemplateVariantFieldValidator, PresentFieldValidator(severity=Severity.advice), # We want the summary to be a single line CorrectFieldValueValidator( lambda summary: summary.count("\n") == 0, Problem.wrong, Severity.warning, message=_("please use only one line"), onlyif=lambda unit: unit.summary is not None), # We want the summary to be relatively short CorrectFieldValueValidator( lambda summary: len(summary) <= 80, Problem.wrong, Severity.warning, message=_("please stay under 80 characters"), onlyif=lambda unit: unit.summary is not None), ], fields.plugin: [ UntranslatableFieldValidator, TemplateInvariantFieldValidator, PresentFieldValidator, CorrectFieldValueValidator( lambda plugin: ( plugin in JobDefinition.plugin.get_all_symbols()), message=_('valid values are: {}').format( ', '.join(str(sym) for sym in sorted( _PluginValues.get_all_symbols())))), CorrectFieldValueValidator( lambda plugin: plugin != 'local', Problem.deprecated, Severity.advice, message=_("please migrate to job templates, " "see plainbox-template-unit(7) for details")), CorrectFieldValueValidator( lambda plugin: plugin != 'user-verify', Problem.deprecated, Severity.advice, message=_("please migrate to user-interact-verify")), ], fields.command: [ UntranslatableFieldValidator, # All jobs except for manual must have a command PresentFieldValidator( message=_("command is mandatory for non-manual jobs"), onlyif=lambda unit: unit.plugin not in ('manual', 'qml')), # Manual jobs cannot have a command UselessFieldValidator( message=_("command on a manual or qml job makes no sense"), onlyif=lambda unit: unit.plugin in ('manual', 'qml')), # We don't want to refer to CHECKBOX_SHARE anymore CorrectFieldValueValidator( lambda command: "CHECKBOX_SHARE" not in command, Problem.deprecated, Severity.advice, message=_("please use PLAINBOX_PROVIDER_DATA" " instead of CHECKBOX_SHARE"), onlyif=lambda unit: unit.command is not None), # We don't want to refer to CHECKBOX_DATA anymore CorrectFieldValueValidator( lambda command: "CHECKBOX_DATA" not in command, Problem.deprecated, Severity.advice, message=_("please use PLAINBOX_SESSION_SHARE" " instead of CHECKBOX_DATA"), onlyif=lambda unit: unit.command is not None), # We want to catch silly mistakes that shlex can detect ShellProgramValidator, ], fields.description: [ TranslatableFieldValidator, TemplateVariantFieldValidator, # Description is mandatory for manual jobs PresentFieldValidator( message=_("manual jobs must have a description field, or a" " set of purpose, steps, and verification " "fields"), onlyif=lambda unit: unit.plugin == 'manual' and unit.purpose is None and unit.steps is None and unit.verification is None ), # Description or a set of purpose, steps and verification # fields is recommended for all other jobs PresentFieldValidator( severity=Severity.advice, message=_("all jobs should have a description field, or a " "set of purpose, steps and verification fields"), onlyif=lambda unit: ( 'simple' not in unit.get_flag_set() and unit.plugin != 'manual' and ( unit.purpose is None and unit.steps is None and unit.verification is None))), ], fields.purpose: [ TranslatableFieldValidator, PresentFieldValidator( severity=Severity.advice, message=("please use purpose, steps, and verification" " fields. See http://plainbox.readthedocs.org" "/en/latest/author/faq.html#faq-2"), onlyif=lambda unit: unit.startup_user_interaction_required and unit.get_record_value('summary') is None), ], fields.steps: [ TranslatableFieldValidator, PresentFieldValidator( severity=Severity.advice, message=("please use purpose, steps, and verification" " fields. See http://plainbox.readthedocs.org" "/en/latest/author/faq.html#faq-2"), onlyif=lambda unit: unit.startup_user_interaction_required), ], fields.verification: [ TranslatableFieldValidator, PresentFieldValidator( severity=Severity.advice, message=("please use purpose, steps, and verification" " fields. See http://plainbox.readthedocs.org" "/en/latest/author/faq.html#faq-2"), onlyif=lambda unit: unit.plugin in ( 'manual', 'user-verify', 'user-interact-verify')), ], fields.user: [ UntranslatableFieldValidator, TemplateInvariantFieldValidator, # User should be either None or 'root' CorrectFieldValueValidator( message=_("user can only be 'root'"), correct_fn=lambda user: user in (None, 'root')), # User is useless without a command to run UselessFieldValidator( message=_("user without a command makes no sense"), onlyif=lambda unit: unit.command is None) ], fields.environ: [ UntranslatableFieldValidator, TemplateInvariantFieldValidator, # Environ is useless without a command to run UselessFieldValidator( message=_("environ without a command makes no sense"), onlyif=lambda unit: unit.command is None), ], fields.estimated_duration: [ UntranslatableFieldValidator, TemplateInvariantFieldValidator, PresentFieldValidator( severity=Severity.advice, onlyif=lambda unit: 'simple' not in unit.get_flag_set() ), CorrectFieldValueValidator( lambda duration: float(duration) > 0, message="value must be a positive number", onlyif=lambda unit: ( unit.get_record_value('estimated_duration'))), ], fields.depends: [ UntranslatableFieldValidator, CorrectFieldValueValidator( lambda value, unit: ( unit.get_direct_dependencies() is not None)), UnitReferenceValidator( lambda unit: unit.get_direct_dependencies(), constraints=[ ReferenceConstraint( lambda referrer, referee: referee.unit == 'job', message=_("the referenced unit is not a job"))]) # TODO: should not refer to deprecated jobs, # onlyif job itself is not deprecated ], fields.after: [ UntranslatableFieldValidator, CorrectFieldValueValidator( lambda value, unit: ( unit.get_after_dependencies() is not None)), UnitReferenceValidator( lambda unit: unit.get_after_dependencies(), constraints=[ ReferenceConstraint( lambda referrer, referee: referee.unit == 'job', message=_("the referenced unit is not a job"))]) ], fields.requires: [ UntranslatableFieldValidator, CorrectFieldValueValidator( lambda value, unit: unit.get_resource_program(), onlyif=lambda unit: unit.requires is not None), UnitReferenceValidator( lambda unit: unit.get_resource_dependencies(), constraints=[ ReferenceConstraint( lambda referrer, referee: referee.unit == 'job', message=_("the referenced unit is not a job")), ReferenceConstraint( lambda referrer, referee: ( referee.plugin == 'resource'), onlyif=lambda referrer, referee: ( referee.unit == 'job'), message=_( "the referenced job is not a resource job")), ]), # TODO: should not refer to deprecated jobs, # onlyif job itself is not deprecated ], fields.shell: [ UntranslatableFieldValidator, TemplateInvariantFieldValidator, # Shell should be only '/bin/sh', or None (which gives bash) CorrectFieldValueValidator( lambda shell: shell in ('/bin/sh', '/bin/bash', 'bash'), message=_("only /bin/sh and /bin/bash are allowed")), ], fields.imports: [ UntranslatableFieldValidator, TemplateInvariantFieldValidator, CorrectFieldValueValidator( lambda value, unit: ( list(unit.get_imported_jobs()) is not None)), UnitReferenceValidator( lambda unit: [ job_id for job_id, identifier in unit.get_imported_jobs()], constraints=[ ReferenceConstraint( lambda referrer, referee: referee.unit == 'job', message=_("the referenced unit is not a job"))]), # TODO: should not refer to deprecated jobs, # onlyif job itself is not deprecated ], fields.category_id: [ UntranslatableFieldValidator, TemplateInvariantFieldValidator, UnitReferenceValidator( lambda unit: ( [unit.get_category_id()] if unit.category_id else ()), constraints=[ ReferenceConstraint( lambda referrer, referee: ( referee.unit == 'category'), message=_( "the referenced unit is not a category"))]), # TODO: should not refer to deprecated categories, # onlyif job itself is not deprecated ], fields.flags: [ UntranslatableFieldValidator, TemplateInvariantFieldValidator, CorrectFieldValueValidator( lambda value, unit: ( 'simple' in unit.get_flag_set() or 'preserve-locale' in unit.get_flag_set()), Problem.expected_i18n, Severity.advice, message=_( 'please ensure that the command supports' ' non-C locale then set the preserve-locale flag' ), onlyif=lambda unit: unit.command), CorrectFieldValueValidator( lambda value, unit: ( not ('explicit-fail' in unit.get_flag_set() and unit.plugin in { 'shell', 'user-interact', 'attachment', 'local', 'resource'})), Problem.useless, Severity.advice, message=_('explicit-fail makes no sense for job which ' 'outcome is automatically determined.') ), # The has-leftovers flag is useless without a command CorrectFieldValueValidator( lambda value, unit: ( 'has-leftovers' not in unit.get_flag_set()), Problem.useless, Severity.advice, message=_( 'has-leftovers makes no sense without a command' ), onlyif=lambda unit: unit.command is None), ], fields.qml_file: [ UntranslatableFieldValidator, TemplateInvariantFieldValidator, PresentFieldValidator( onlyif=lambda unit: unit.plugin == 'qml'), CorrectFieldValueValidator( lambda value: value.endswith('.qml'), Problem.wrong, Severity.advice, message=_('use the .qml extension for all QML files'), onlyif=lambda unit: (unit.plugin == 'qml' and unit.qml_file)), CorrectFieldValueValidator( lambda value, unit: os.path.isfile(unit.qml_file), message=_('please point to an existing QML file'), onlyif=lambda unit: (unit.plugin == 'qml' and unit.qml_file)), ], fields.certification_status: [ UntranslatableFieldValidator, TemplateInvariantFieldValidator, CorrectFieldValueValidator( lambda certification_status: ( certification_status in _CertificationStatusValues.get_all_symbols()), message=_('valid values are: {}').format( ', '.join(str(sym) for sym in sorted( _CertificationStatusValues.get_all_symbols())))), ], }
def __call__(self, variable, values): has_common_with_a = bool(self.a_set & set(values)) has_common_with_b = bool(self.b_set & set(values)) if has_common_with_a and has_common_with_b: return _('{} can only use values from {} or from {}'.format( variable.name, self.a_set, self.b_set))
from plainbox.impl.unit.validators import PresentFieldValidator from plainbox.impl.unit.validators import TemplateInvariantFieldValidator from plainbox.impl.unit.validators import TemplateVariantFieldValidator from plainbox.impl.unit.validators import TranslatableFieldValidator from plainbox.impl.unit.validators import UntranslatableFieldValidator translatable = TranslatableFieldValidator() templateVariant = TemplateVariantFieldValidator() templateInvariant = TemplateInvariantFieldValidator() untranslatable = UntranslatableFieldValidator() present = PresentFieldValidator() localDeprecated = CorrectFieldValueValidator( lambda plugin: plugin != 'local', Problem.deprecated, Severity.advice, message=_("please migrate to job templates, see plainbox-template-unit(7)" " for details")) oneLine = CorrectFieldValueValidator( lambda field: field is not None and field.count("\n") == 0, Problem.wrong, Severity.warning, message=_("please use only one line")) shortValue = CorrectFieldValueValidator( lambda field: field is not None and len(field) <= 80, Problem.wrong, Severity.warning, message=_("please stay under 80 characters"))
def __init__(self, msg=None): if msg is None: msg = _("must be set to something") self.msg = msg
def get_session_data_subset(self, session_manager): """ Compute a subset of session data. The subset of the data that should be saved may depend on a particular saver class and options selected by the user. Must return a collection that can be handled by :meth:`dump()`. Special care must be taken when processing io_log (and in the future, attachments) as those can be arbitrarily large. """ data = {'result_map': {}} session = session_manager.state if self.OPTION_WITH_JOB_LIST in self._option_list: data['job_list'] = [job.id for job in session.job_list] if self.OPTION_WITH_RUN_LIST in self._option_list: data['run_list'] = [job.id for job in session.run_list] if self.OPTION_WITH_DESIRED_JOB_LIST in self._option_list: data['desired_job_list'] = [ job.id for job in session.desired_job_list ] if self.OPTION_WITH_RESOURCE_MAP in self._option_list: data['resource_map'] = { # TODO: there is no method to get all data from a Resource # instance and there probably should be. Or just let there be # a way to promote _data to a less hidden-but-non-conflicting # property. resource_name: [ object.__getattribute__(resource, "_data") for resource in resource_list ] # TODO: turn session._resource_map to a public property for resource_name, resource_list in session._resource_map.items() } if self.OPTION_WITH_ATTACHMENTS in self._option_list: data['attachment_map'] = {} if self.OPTION_WITH_CATEGORY_MAP in self._option_list: wanted_category_ids = frozenset({ job_state.effective_category_id for job_state in session.job_state_map.values() }) data['category_map'] = { unit.id: unit.tr_name() for unit in session.unit_list if unit.Meta.name == 'category' and unit.id in wanted_category_ids } # Inject the special, built-in 'uncategorized' category, if any # job needs it UNCATEGORISED = 'com.canonical.plainbox::uncategorised' if UNCATEGORISED in wanted_category_ids: data['category_map'][UNCATEGORISED] = _("Uncategorised") for job_id, job_state in session.job_state_map.items(): if job_state.result.outcome is None: continue data['result_map'][job_id] = OrderedDict() data['result_map'][job_id]['summary'] = job_state.job.tr_summary() data['result_map'][job_id]['category_id'] = \ job_state.effective_category_id data['result_map'][job_id]['outcome'] = job_state.result.outcome if job_state.result.execution_duration: data['result_map'][job_id]['execution_duration'] = \ job_state.result.execution_duration if self.OPTION_WITH_COMMENTS in self._option_list: data['result_map'][job_id]['comments'] = \ job_state.result.comments # Add Job hash if requested if self.OPTION_WITH_JOB_HASH in self._option_list: data['result_map'][job_id]['hash'] = job_state.job.checksum # Add Job definitions if requested if self.OPTION_WITH_JOB_DEFS in self._option_list: for prop in ( 'plugin', 'requires', 'depends', 'command', 'description', ): if not getattr(job_state.job, prop): continue data['result_map'][job_id][prop] = getattr( job_state.job, prop) # Add Attachments if requested if job_state.job.plugin == 'attachment': if self.OPTION_WITH_ATTACHMENTS in self._option_list: self._build_attachment_map(data, job_id, job_state) continue # Don't add attachments IO logs to the result_map # Add IO log if requested if self.OPTION_WITH_IO_LOG in self._option_list: # If requested, squash the IO log so that only textual data is # saved, discarding stream name and the relative timestamp. if self.OPTION_SQUASH_IO_LOG in self._option_list: io_log_data = self._squash_io_log( job_state.result.get_io_log()) elif self.OPTION_FLATTEN_IO_LOG in self._option_list: io_log_data = self._flatten_io_log( job_state.result.get_io_log()) else: io_log_data = self._io_log(job_state.result.get_io_log()) data['result_map'][job_id]['io_log'] = io_log_data # Add certification status if requested if self.OPTION_WITH_CERTIFICATION_STATUS in self._option_list: data['result_map'][job_id]['certification_status'] = ( job_state.effective_certification_status) return data
def __init__(self, msg=None): if msg is None: msg = _("cannot be empty") self.msg = msg
def __str__(self): if self.arg_name is not None: return _("command line argument {}={!a}").format( self.arg_name, self.arg_value) else: return _("command line argument {!a}").format(self.arg_value)
def __str__(self): return _("missing dependency: {!r} ({})").format( self.missing_job_id, self.dep_type)
def visit_Error_node(visitor, node: Error): logger.warning(_("unable to parse depends: %s"), node.msg)
def __str__(self): return _("duplicate job id: {!r}").format(self.affected_job.id)
def parse( text: str, lineno: int=1, col_offset: int=0 ) -> "Union[FieldOverride, Error]": """ Parse a single test plan field override line Using correct syntax will result in a FieldOverride node with appropriate data in the ``value`` and ``pattern`` fields. Note that ``pattern`` may be either a :class:`RePattern` or a :class:`ReFixed` or :class:`ReErr` which is not a valid pattern and cannot be used. >>> FieldOverride.parse("apply new-value to pattern") ... # doctest: +NORMALIZE_WHITESPACE FieldOverride(value=Text(text='new-value'), pattern=ReFixed(text='pattern')) >>> FieldOverride.parse("apply blocker to .*") ... # doctest: +NORMALIZE_WHITESPACE FieldOverride(value=Text(text='blocker'), pattern=RePattern(text='.*', re=re.compile('.*'))) Using incorrect syntax will result in a single Error node being returned. The message (``msg``) field contains useful information on the cause of the problem, as depicted below: >>> FieldOverride.parse("") Error(msg="expected 'apply' near ''") >>> FieldOverride.parse("apply") Error(msg='expected override value') >>> FieldOverride.parse("apply value") Error(msg="expected 'to' near ''") >>> FieldOverride.parse("apply value to") Error(msg='expected override pattern') >>> FieldOverride.parse("apply value to pattern junk") Error(msg="unexpected garbage: 'junk'") Lastly, shell-style comments are supported. They are discarded by the scanner code though. >>> FieldOverride.parse("apply value to pattern # comment") ... # doctest: +NORMALIZE_WHITESPACE FieldOverride(value=Text(text='value'), pattern=ReFixed(text='pattern')) """ # XXX Until our home-grown scanner is ready col_offset values below # are all dummy. This is not strictly critical but should be improved # upon later. scanner = WordScanner(text) # 'APPLY' ... token, lexeme = scanner.get_token() if token != scanner.TokenEnum.WORD or lexeme != 'apply': return Error(lineno, col_offset, _("expected {!a} near {!r}").format('apply', lexeme)) # 'APPLY' VALUE ... token, lexeme = scanner.get_token() if token != scanner.TokenEnum.WORD: return Error(lineno, col_offset, _("expected override value")) value = Text(lineno, col_offset, lexeme) # 'APPLY' VALUE 'TO' ... token, lexeme = scanner.get_token() if token != scanner.TokenEnum.WORD or lexeme != 'to': return Error(lineno, col_offset, _("expected {!a} near {!r}").format('to', lexeme)) # 'APPLY' VALUE 'TO' PATTERN... token, lexeme = scanner.get_token() if token != scanner.TokenEnum.WORD: return Error(lineno, col_offset, _("expected override pattern")) pattern = Re.parse(lexeme, lineno, col_offset) # 'APPLY' VALUE 'TO' PATTERN <EOF> token, lexeme = scanner.get_token() if token != scanner.TokenEnum.EOF: return Error(lineno, col_offset, _("unexpected garbage: {!r}").format(lexeme)) return FieldOverride(lineno, col_offset, value, pattern)
def __str__(self): return _("dependency cycle detected: {}").format(" -> ".join( [job.id for job in self.job_list]))
def add_early_parser_arguments(self, parser): group = parser.add_argument_group(title=_("logging and debugging")) # Add the --log-level argument group.add_argument("-l", "--log-level", action="store", choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'), default=None, help=argparse.SUPPRESS) # Add the --verbose argument group.add_argument( "-v", "--verbose", dest="log_level", action="store_const", const="INFO", # TRANSLATORS: please keep --log-level=INFO untranslated help=_("be more verbose (same as --log-level=INFO)")) # Add the --debug flag group.add_argument( "-D", "--debug", dest="log_level", action="store_const", const="DEBUG", # TRANSLATORS: please keep DEBUG untranslated help=_("enable DEBUG messages on the root logger")) # Add the --debug flag group.add_argument( "-C", "--debug-console", action="store_true", # TRANSLATORS: please keep DEBUG untranslated help=_("display DEBUG messages in the console")) # Add the --trace flag group.add_argument( "-T", "--trace", metavar=_("LOGGER"), action="append", default=[], # TRANSLATORS: please keep DEBUG untranslated help=_("enable DEBUG messages on the specified logger " "(can be used multiple times)")) # Add the --pdb flag group.add_argument( "-P", "--pdb", action="store_true", default=False, # TRANSLATORS: please keep pdb untranslated help=_("jump into pdb (python debugger) when a command crashes")) # Add the --debug-interrupt flag group.add_argument( "-I", "--debug-interrupt", action="store_true", default=False, # TRANSLATORS: please keep SIGINT/KeyboardInterrupt and --pdb # untranslated help=_("crash on SIGINT/KeyboardInterrupt, useful with --pdb"))
def register_parser(self, subparsers): parser = self.add_subcommand(subparsers) parser.prog = 'plainbox session' parser.set_defaults(default_session_cmd='list') # Duplicate the default value of --only-ids This is only used when # we use the default command aka when 'plainbox session' runs. parser.set_defaults(only_ids=False) session_subparsers = parser.add_subparsers( title=_('available session subcommands')) list_parser = session_subparsers.add_parser( 'list', help=_('list available sessions')) list_parser.add_argument('--only-ids', help=_('print one id per line only'), action='store_true', default=False) list_parser.set_defaults(session_cmd='list') remove_parser = session_subparsers.add_parser( 'remove', help=_('remove one more more sessions')) remove_parser.add_argument( 'session_id_list', metavar=_('SESSION-ID'), nargs="+", help=_('Identifier of the session to remove')) remove_parser.set_defaults(session_cmd='remove') show_parser = session_subparsers.add_parser( 'show', help=_('show a single session')) show_parser.add_argument('session_id_list', metavar=_('SESSION-ID'), nargs="+", help=_('Identifier of the session to show')) show_parser.add_argument( '-r', '--resume', action='store_true', help=_("resume the session (useful for debugging)")) show_parser.add_argument( '-f', '--flag', action='append', metavar=_("FLAG"), help=_("pass this resume flag to the session resume code")) show_parser.set_defaults(session_cmd='show') archive_parser = session_subparsers.add_parser( 'archive', help=_('archive a single session')) archive_parser.add_argument( 'session_id', metavar=_('SESSION-ID'), help=_('Identifier of the session to archive')) archive_parser.add_argument('archive', metavar=_('ARCHIVE'), help=_('Name of the archive to create')) archive_parser.set_defaults(session_cmd='archive') export_parser = session_subparsers.add_parser( 'export', help=_('export a single session')) export_parser.add_argument( 'session_id', metavar=_('SESSION-ID'), help=_('Identifier of the session to export')) export_parser.add_argument( '--flag', action='append', metavar=_("FLAG"), help=_("pass this resume flag to the session resume code")) export_parser.set_defaults(session_cmd='export') group = export_parser.add_argument_group(_("output options")) group.add_argument('-f', '--output-format', default='text', metavar=_('FORMAT'), help=_('save test results in the specified FORMAT' ' (pass ? for a list of choices)')) group.add_argument( '-p', '--output-options', default='', metavar=_('OPTIONS'), help=_('comma-separated list of options for the export mechanism' ' (pass ? for a list of choices)')) group.add_argument('-o', '--output-file', default='-', metavar=_('FILE'), type=FileType("wb"), help=_('save test results to the specified FILE' ' (or to stdout if FILE is -)'))
def __str__(self): return _("expression {!r} evaluated to a non-true result").format( self.expression.text)
def __str__(self): return _("Problem with field {}: {}").format(self.field, self.problem)
def __str__(self): return _("syntax error in resource expression")
def report_issue(self, unit, field, kind, severity, message=None, *, offset=0, origin=None): """ Helper method that aids in adding issues :param unit: A :class:`Unit` that the issue refers to or a list of such objects :param field: Name of the field the issue is specific to :param kind: Type of the issue, this can be an arbitrary symbol. If it is not known to the :meth:`explain()` then a message must be provided or a ValueError will be raised. :param severity: A symbol that represents the severity of the issue. See :class:`plainbox.impl.validation.Severity`. :param message: An (optional) message to use instead of a stock message. This argument is required if :meth:`explain()` doesn't know about the specific value of ``kind`` used :param offset: An (optional, keyword-only) offset within the field itself. If specified it is used to point to a specific line in a multi-line field. :param origin: An (optional, keyword-only) origin to use to report the issue. If specified it totally overrides all implicit origin detection. The ``offset`` is not applied in this case. :returns: The reported issue :raises ValueError: if ``kind`` is not known to :meth:`explain()` and ``message`` is None. """ # compute the actual message message = self.explain(unit[0] if isinstance(unit, list) else unit, field, kind, message) if message is None: raise ValueError( _("unable to deduce message and no message provided")) # compute the origin if isinstance(unit, list): cls = MultiUnitFieldIssue if origin is None: origin = unit[0].origin if field in unit[0].field_offset_map: origin = origin.with_offset( unit[0].field_offset_map[field] + offset).just_line() elif '_{}'.format(field) in unit[0].field_offset_map: if origin is None: origin = origin.with_offset( unit[0].field_offset_map['_{}'.format(field)] + offset).just_line() else: cls = UnitFieldIssue if origin is None: origin = unit.origin if field in unit.field_offset_map: origin = origin.with_offset(unit.field_offset_map[field] + offset).just_line() elif '_{}'.format(field) in unit.field_offset_map: if origin is None: origin = origin.with_offset( unit.field_offset_map['_{}'.format(field)] + offset).just_line() issue = cls(message, severity, kind, origin, unit, field) self.issue_list.append(issue) return issue
def register_parser(self, subparsers): parser = subparsers.add_parser("run", help=_("run a test job"), prog="plainbox run") parser.set_defaults(command=self) group = parser.add_argument_group(title=_("user interface options")) parser.set_defaults(color=None) group.add_argument('--no-color', dest='color', action='store_false', help=SUPPRESS) group.add_argument('--non-interactive', action='store_true', help=_("skip tests that require interactivity")) group.add_argument('-n', '--dry-run', action='store_true', help=_("don't really run most jobs")) group.add_argument( '--dont-suppress-output', action="store_true", default=False, help=_("don't suppress the output of certain job plugin types")) group = parser.add_argument_group(_("output options")) group.add_argument('-f', '--output-format', default='com.canonical.plainbox::text', metavar=_('FORMAT'), help=_('save test results in the specified FORMAT' ' (pass ? for a list of choices)')) group.add_argument( '-p', '--output-options', default='', metavar=_('OPTIONS'), help=_('comma-separated list of options for the export mechanism' ' (pass ? for a list of choices)')) group.add_argument('-o', '--output-file', default='-', metavar=_('FILE'), type=FileType("wb"), help=_('save test results to the specified FILE' ' (or to stdout if FILE is -)')) group.add_argument('-t', '--transport', metavar=_('TRANSPORT'), choices=[_('?')] + list(get_all_transports().keys()), help=_('use TRANSPORT to send results somewhere' ' (pass ? for a list of choices)')) group.add_argument( '--transport-where', metavar=_('WHERE'), help=_('where to send data using the selected transport')) group.add_argument( '--transport-options', metavar=_('OPTIONS'), help=_('comma-separated list of key-value options (k=v) to ' 'be passed to the transport')) # Call enhance_parser from CheckBoxCommandMixIn self.enhance_parser(parser)
def __new__(mcls, name, bases, ns): # mro = super().__new__(mcls, name, bases, ns).__mro__ base_meta_list = [base.Meta for base in bases if hasattr(base, 'Meta')] our_meta = ns.get('Meta') if our_meta is not None and base_meta_list: new_meta_ns = dict(our_meta.__dict__) new_meta_ns['__doc__'] = """ Collection of meta-data about :class:`{}` This class is partially automatically generated. It always inherits the Meta class of the base unit type. This class has (at most) three attributes: `field_validators`: A dictionary mapping from each field to a list of :class:`IFieldvalidator:` that check that particular field for correctness. `fields`: A :class`SymbolDef` with a symbol for each field that this unit defines. This does not include dynamically created fields that are not a part of the unit itself. `validator_cls`: A :class:`UnitValidator` subclass that can be used to check this unit for correctness """.format(name) new_meta_bases = tuple(base_meta_list) # Merge custom field_validators with base unit validators if 'field_validators' in our_meta.__dict__: merged_validators = dict() for base_meta in base_meta_list: if hasattr(base_meta, 'field_validators'): merged_validators.update(base_meta.field_validators) merged_validators.update(our_meta.field_validators) new_meta_ns['field_validators'] = merged_validators # Merge fields with base unit fields if 'fields' in our_meta.__dict__: # Look at all the base Meta classes and collect each # Meta.fields class as our (real) list of base classes. assert our_meta.fields.__bases__ == (SymbolDef, ) merged_fields_bases = [ base_meta.fields for base_meta in base_meta_list if hasattr(base_meta, 'fields') ] # If there are no base classes then let's just inherit from the # base SymbolDef class (not that we're actually ignoring any # base classes on the our_meta.fields class as it can only be # SymbolDef and nothing else is supported or makes sense. if not merged_fields_bases: merged_fields_bases.append(SymbolDef) # The list of base fields needs to be a tuple merged_fields_bases = tuple(merged_fields_bases) # Copy all of the Symbol objects out of the our_meta.field # class that we're re-defining. merged_fields_ns = SymbolDefNs() for sym_name in dir(our_meta.fields): sym = getattr(our_meta.fields, sym_name) if isinstance(sym, Symbol): merged_fields_ns[sym_name] = sym merged_fields_ns['__doc__'] = """ A symbol definition containing all fields used by :class:`{}` This class is partially automatically generated. It always inherits from the Meta.fields class of the base unit class. """.format(name) # Create a new class in place of the 'fields' defined in # our_meta.fields. fields = SymbolDefMeta('fields', merged_fields_bases, merged_fields_ns) fields.__qualname__ = '{}.Meta.fields'.format(name) new_meta_ns['fields'] = fields # Ensure that Meta.name is explicitly defined if 'name' not in our_meta.__dict__: raise TypeError( _("Please define 'name' in {}.Meta").format(name)) ns['Meta'] = type('Meta', new_meta_bases, new_meta_ns) ns['fields'] = ns['Meta'].fields return super().__new__(mcls, name, bases, ns)
def _propagate_test_plans(self): logger.debug(_("Propagating test plans to all devices")) test_plans = self.test_plans for context in self.device_context_list: context.set_test_plan_list(test_plans)
class UnitValidator: """ Validator class for basic :class:`Unit` type Typically validators are not used directly. Instead, please call :meth:`Unit.check()` and iterate over the returned issues. :attr issue_list: A list of :class`plainbox.impl.validate.Issue` """ def __init__(self): """ Initialize a new validator """ self.issue_list = [] def check(self, unit): """ Check a specific unit for correctness :param unit: The :class:`Unit` to check :returns: A generator yielding subsequent issues """ for field_validator, field in self.make_field_validators(unit): for issue in field_validator.check(self, unit, field): yield issue def check_in_context(self, unit, context): """ Check a specific unit for correctness in a broader context :param unit: The :class:`Unit` to check :param context: A :class:`UnitValidationContext` to use as context :returns: A generator yielding subsequent issues """ for field_validator, field in self.make_field_validators(unit): for issue in field_validator.check_in_context( self, unit, field, context): yield issue def make_field_validators(self, unit): """ Convert unit meta-data to a sequence of validators :returns: A generator for pairs (field_validator, field) where field_validator is an instance of :class:`IFieldValidator` and field is a symbol with the field name. """ for field, spec in sorted(unit.Meta.field_validators.items()): if isinstance(spec, type): validator_list = [spec] elif isinstance(spec, list): validator_list = spec else: raise TypeError( _("{}.Meta.fields[{!r}] is not a validator").format( unit.__class__.__name__, field)) for index, spec in enumerate(validator_list): # If it's a validator class, instantiate it if isinstance(spec, type) \ and issubclass(spec, IFieldValidator): yield spec(), field # If it's a validator instance, just return it elif isinstance(spec, IFieldValidator): yield spec, field else: raise TypeError( _("{}.Meta.fields[{!r}][{}] is not a validator"). format(unit.__class__.__name__, field, index)) def advice(self, unit, field, kind, message=None, *, offset=0, origin=None): """ Shortcut for :meth:`report_issue` with severity=Severity.advice """ return self.report_issue(unit, field, kind, Severity.advice, message, offset=offset, origin=origin) def warning(self, unit, field, kind, message=None, *, offset=0, origin=None): """ Shortcut for :meth:`report_issue` with severity=Severity.warning """ return self.report_issue(unit, field, kind, Severity.warning, message, offset=offset, origin=origin) def error(self, unit, field, kind, message=None, *, offset=0, origin=None): """ Shortcut for :meth:`report_issue` with severity=Severity.error """ return self.report_issue(unit, field, kind, Severity.error, message, offset=offset, origin=origin) def report_issue(self, unit, field, kind, severity, message=None, *, offset=0, origin=None): """ Helper method that aids in adding issues :param unit: A :class:`Unit` that the issue refers to or a list of such objects :param field: Name of the field the issue is specific to :param kind: Type of the issue, this can be an arbitrary symbol. If it is not known to the :meth:`explain()` then a message must be provided or a ValueError will be raised. :param severity: A symbol that represents the severity of the issue. See :class:`plainbox.impl.validation.Severity`. :param message: An (optional) message to use instead of a stock message. This argument is required if :meth:`explain()` doesn't know about the specific value of ``kind`` used :param offset: An (optional, keyword-only) offset within the field itself. If specified it is used to point to a specific line in a multi-line field. :param origin: An (optional, keyword-only) origin to use to report the issue. If specified it totally overrides all implicit origin detection. The ``offset`` is not applied in this case. :returns: The reported issue :raises ValueError: if ``kind`` is not known to :meth:`explain()` and ``message`` is None. """ # compute the actual message message = self.explain(unit[0] if isinstance(unit, list) else unit, field, kind, message) if message is None: raise ValueError( _("unable to deduce message and no message provided")) # compute the origin if isinstance(unit, list): cls = MultiUnitFieldIssue if origin is None: origin = unit[0].origin if field in unit[0].field_offset_map: origin = origin.with_offset( unit[0].field_offset_map[field] + offset).just_line() elif '_{}'.format(field) in unit[0].field_offset_map: if origin is None: origin = origin.with_offset( unit[0].field_offset_map['_{}'.format(field)] + offset).just_line() else: cls = UnitFieldIssue if origin is None: origin = unit.origin if field in unit.field_offset_map: origin = origin.with_offset(unit.field_offset_map[field] + offset).just_line() elif '_{}'.format(field) in unit.field_offset_map: if origin is None: origin = origin.with_offset( unit.field_offset_map['_{}'.format(field)] + offset).just_line() issue = cls(message, severity, kind, origin, unit, field) self.issue_list.append(issue) return issue def explain(self, unit, field, kind, message): """ Lookup an explanatory string for a given issue kind :returns: A string (explanation) or None if the issue kind is not known to this method. """ stock_msg = self._explain_map.get(kind) if message or stock_msg: return _("field {field!a}, {message}").format(field=str(field), message=message or stock_msg) _explain_map = { Problem.missing: _("required field missing"), Problem.wrong: _("incorrect value supplied"), Problem.useless: _("definition useless in this context"), Problem.deprecated: _("deprecated field used"), Problem.constant: _("value must be variant (parametrized)"), Problem.variable: _("value must be invariant (unparametrized)"), Problem.unknown_param: _("field refers to unknown parameter"), Problem.not_unique: _("field value is not unique"), Problem.expected_i18n: _("field should be marked as translatable"), Problem.unexpected_i18n: (_("field should not be marked as translatable")), Problem.syntax_error: _("syntax error inside the field"), Problem.bad_reference: _("bad reference to another unit"), }
def about_to_execute_program(self, args, kwargs): if self.show_cmd_output: print(self.C.BLACK("... 8< -".ljust(80, '-'))) else: print(self.C.BLACK("(" + _("Command output hidden") + ")"))
def __str__(self): return _("???")