Ejemplo n.º 1
0
 def _build_IOLogRecord(cls, record_repr):
     """
     Convert the representation of IOLogRecord back the the object
     """
     _validate(record_repr, value_type=list)
     delay = _validate(record_repr, key=0, value_type=float)
     if delay < 0:
         # TRANSLATORS: please keep delay untranslated
         raise CorruptedSessionError(_("delay cannot be negative"))
     stream_name = _validate(
         record_repr, key=1, value_type=str,
         value_choice=['stdout', 'stderr'])
     data = _validate(record_repr, key=2, value_type=str)
     # Each data item is a base64 string created by encoding the bytes and
     # converting them to ASCII. To get the original we need to undo that
     # operation.
     try:
         data = data.encode("ASCII")
     except UnicodeEncodeError:
         raise CorruptedSessionError(
             _("record data {!r} is not ASCII").format(data))
     try:
         data = base64.standard_b64decode(data)
     except binascii.Error:
         raise CorruptedSessionError(
             _("record data {!r} is not correct base64").format(data))
     return IOLogRecord(delay, stream_name, data)
Ejemplo n.º 2
0
    def _restore_SessionState_desired_job_list(cls, session, session_repr):
        """
        Extract the representation of desired_job_list from the session and
        set it back to the session object. This method should be called after
        all the jobs are discovered.

        :raises CorruptedSessionError:
            if desired_job_list refers to unknown job
        """
        # List of all the _ids_ of the jobs that were selected
        desired_job_list = [
            _validate(
                job_id, value_type=str,
                value_type_msg=_("Each job id must be a string"))
            for job_id in _validate(
                session_repr, key='desired_job_list', value_type=list)]
        # Restore job selection
        logger.debug(
            _("calling update_desired_job_list(%r)"), desired_job_list)
        try:
            session.update_desired_job_list([
                session.job_state_map[job_id].job
                for job_id in desired_job_list])
        except KeyError as exc:
            raise CorruptedSessionError(
                _("'desired_job_list' refers to unknown job {!r}").format(
                    exc.args[0]))
Ejemplo n.º 3
0
 def _print_dependency_report(self):
     print(_("[Dependency Report]").center(80, '='))
     if self.problem_list:
         for problem in self.problem_list:
             print(" * {}".format(problem))
     else:
         print(_("Selected jobs have no dependency problems"))
Ejemplo n.º 4
0
    def _run_command(self, job, config):
        """
        Run the shell command associated with the specified job.

        :returns: (return_code, record_path) where return_code is the number
        returned by the exiting child process while record_path is a pathname
        of a gzipped content readable with :class:`IOLogRecordReader`
        """
        # Bail early if there is nothing do do
        if job.command is None:
            return None, ()
        # Get an extcmd delegate for observing all the IO the way we need
        delegate, io_log_gen = self._prepare_io_handling(job, config)
        # Create a subprocess.Popen() like object that uses the delegate
        # system to observe all IO as it occurs in real time.
        extcmd_popen = extcmd.ExternalCommandWithDelegate(delegate)
        # Stream all IOLogRecord entries to disk
        record_path = os.path.join(self._jobs_io_log_dir, "{}.record.gz".format(slugify(job.id)))
        with gzip.open(record_path, mode="wb") as gzip_stream, io.TextIOWrapper(
            gzip_stream, encoding="UTF-8"
        ) as record_stream:
            writer = IOLogRecordWriter(record_stream)
            io_log_gen.on_new_record.connect(writer.write_record)
            # Start the process and wait for it to finish getting the
            # result code. This will actually call a number of callbacks
            # while the process is running. It will also spawn a few
            # threads although all callbacks will be fired from a single
            # thread (which is _not_ the main thread)
            logger.debug(_("job[%s] starting command: %s"), job.id, job.command)
            # Run the job command using extcmd
            return_code = self._run_extcmd(job, config, extcmd_popen)
            logger.debug(_("job[%s] command return code: %r"), job.id, return_code)
        return return_code, record_path
Ejemplo n.º 5
0
    def __init__(self, cause, related_job=None, related_expression=None):
        """
        Initialize a new inhibitor with the specified cause.

        If cause is other than UNDESIRED a related_job is necessary. If cause
        is either PENDING_RESOURCE or FAILED_RESOURCE related_expression is
        necessary as well. A ValueError is raised when this is violated.
        """
        if cause not in self._cause_display:
            raise ValueError(_("unsupported value for cause"))
        if cause != self.UNDESIRED and related_job is None:
            raise ValueError(
                # TRANSLATORS: please don't translate related_job, None and
                # cause
                _("related_job must not be None when cause is {}").format(
                    self._cause_display[cause]))
        if cause in (self.PENDING_RESOURCE, self.FAILED_RESOURCE) \
                and related_expression is None:
            raise ValueError(_(
                # TRANSLATORS: please don't translate related_expression, None
                # and cause.
                "related_expression must not be None when cause is {}"
            ).format(self._cause_display[cause]))
        self.cause = cause
        self.related_job = related_job
        self.related_expression = related_expression
Ejemplo n.º 6
0
    def get_storage_list(self):
        """
        Enumerate stored sessions in the repository.

        If the repository directory is not present then an empty list is
        returned.

        :returns:
            list of :class:`SessionStorage` representing discovered sessions
        """
        logger.debug(_("Enumerating sessions in %s"), self._location)
        try:
            # Try to enumerate the directory
            item_list = os.listdir(self._location)
        except OSError as exc:
            # If the directory does not exist,
            # silently return empty collection
            if exc.errno == errno.ENOENT:
                return []
            # Don't silence any other errors
            raise
        session_list = []
        # Check each item by looking for directories
        for item in item_list:
            pathname = os.path.join(self.location, item)
            # Make sure not to follow any symlinks here
            stat_result = os.lstat(pathname)
            # Consider directories only
            if stat.S_ISDIR(stat_result.st_mode):
                logger.debug(_("Found possible session in %r"), pathname)
                session = SessionStorage(pathname)
                session_list.append(session)
        # Return the full list
        return session_list
Ejemplo n.º 7
0
 def run(self):
     logger.debug(_("a debug message"))
     logger.info(_("a info message"))
     logger.warning(_("a warning message"))
     logger.error(_("an error message"))
     logger.critical(_("a critical message"))
     return 0
Ejemplo n.º 8
0
    def register_parser(self, subparsers):
        """
        Overridden method of CommandBase.

        :param subparsers:
            The argparse subparsers objects in which command line argument
            specification should be created.

        This method is invoked by the command line handling code to register
        arguments specific to this sub-command. It must also register itself as
        the command class with the ``command`` default.
        """
        parser = self.add_subcommand(subparsers)
        parser.add_argument(
            "--prefix", default="/usr/local", help=_("installation prefix"))
        parser.add_argument(
            '--layout',
            default='flat',
            choices=sorted(self._INSTALL_LAYOUT.keys()),
            # TRANSLATORS: don't translate %(defaults)s
            help=_("installation directory layout (default: %(default)s)"))
        parser.add_argument(
            "--root", default="",
            help=_("install everything relative to this alternate root"
                   " directory"))
        parser.set_defaults(command=self)
Ejemplo n.º 9
0
 def _get_matching_job_list(self, ns, job_list):
     logger.debug("_get_matching_job_list(%r, %r)", ns, job_list)
     qualifier_list = []
     # Add whitelists
     for whitelist_file in ns.whitelist:
         qualifier = self.get_whitelist_from_file(
             whitelist_file.name, whitelist_file)
         if qualifier is not None:
             qualifier_list.append(qualifier)
     # Add all the --include jobs
     for pattern in ns.include_pattern_list:
         try:
             qualifier = RegExpJobQualifier(
                 '^{}$'.format(pattern), inclusive=True)
         except Exception as exc:
             logger.warning(
                 _("Incorrect pattern %r: %s"), pattern, exc)
         else:
             qualifier_list.append(qualifier)
     # Add all the --exclude jobs
     for pattern in ns.exclude_pattern_list:
         try:
             qualifier = RegExpJobQualifier(
                 '^{}$'.format(pattern), inclusive=False)
         except Exception as exc:
             logger.warning(
                 _("Incorrect pattern %r: %s"), pattern, exc)
         else:
             qualifier_list.append(qualifier)
     logger.debug("select_jobs(%r, %r)", job_list, qualifier_list)
     return select_jobs(job_list, qualifier_list)
Ejemplo n.º 10
0
def gen_rfc822_records_from_io_log(job, result):
    """
    Convert io_log from a job result to a sequence of rfc822 records
    """
    logger.debug(_("processing output from a job: %r"), job)
    # Select all stdout lines from the io log
    line_gen = (record[2].decode('UTF-8', errors='replace')
                for record in result.get_io_log()
                if record[1] == 'stdout')
    # Allow the generated records to be traced back to the job that defined
    # the command which produced (printed) them.
    source = JobOutputTextSource(job)
    try:
        # Parse rfc822 records from the subsequent lines
        for record in gen_rfc822_records(line_gen, source=source):
            yield record
    except RFC822SyntaxError as exc:
        # When this exception happens we will _still_ store all the
        # preceding records. This is worth testing

        logger.warning(
            # TRANSLATORS: keep the word "local" untranslated. It is a
            # special type of job that needs to be distinguished.
            _("local script %s returned invalid RFC822 data: %s"),
            job, exc)
Ejemplo n.º 11
0
 def manager(self):
     logger.debug(_(".manager accessed"))
     if self._commit_hint is not None:
         self._commit_manager()
     if self._manager is None:
         raise AttributeError(_("Session not ready, did you call open()?"))
     return self._manager
Ejemplo n.º 12
0
    def configured_filesystem(self, job, config):
        """
        Context manager for handling filesystem aspects of job execution.

        :param job:
            The JobDefinition to execute
        :param config:
            A PlainBoxConfig instance which can be used to load missing
            environment definitions that apply to all jobs. It is used to
            provide values for missing environment variables that are required
            by the job (as expressed by the environ key in the job definition
            file).
        :returns:
            Pathname of the executable symlink nest directory.
        """
        # Create a nest for all the private executables needed for execution
        prefix = 'nest-'
        suffix = '.{}'.format(job.checksum)
        with tempfile.TemporaryDirectory(suffix, prefix) as nest_dir:
            logger.debug(_("Symlink nest for executables: %s"), nest_dir)
            nest = SymLinkNest(nest_dir)
            # Add all providers sharing namespace with the current job to PATH
            for provider in self._provider_list:
                if job.provider.namespace == provider.namespace:
                    nest.add_provider(provider)
            logger.debug(_("Symlink nest for executables: %s"), nest_dir)
            yield nest_dir
Ejemplo n.º 13
0
 def main(self, argv=None):
     """
     Run as if invoked from command line directly
     """
     # Another try/catch block for catching KeyboardInterrupt
     # This one is really only meant for the early init abort
     # (when someone runs main but bails out before we really
     # get to the point when we do something useful and setup
     # all the exception handlers).
     try:
         self.early_init()
         early_ns = self._early_parser.parse_args(argv)
         self.late_init(early_ns)
         # Construct the full command line argument parser
         self._parser = self.construct_parser()
         logger.debug(_("parsed early namespace: %s"), early_ns)
         # parse the full command line arguments, this is also where we
         # do argcomplete-dictated exit if bash shell completion
         # is requested
         ns = self._parser.parse_args(argv)
         logger.debug(_("parsed full namespace: %s"), ns)
         self.final_init(ns)
     except KeyboardInterrupt:
         pass
     else:
         return self.dispatch_and_catch_exceptions(ns)
Ejemplo n.º 14
0
 def __init__(self, filename, text, provider):
     """
     Initialize the plug-in with the specified name text
     """
     self._filename = filename
     self._job_list = []
     logger.debug(_("Loading jobs definitions from %r..."), filename)
     try:
         records = load_rfc822_records(
             text, source=FileTextSource(filename))
     except RFC822SyntaxError as exc:
         raise PlugInError(
             _("Cannot load job definitions from {!r}: {}").format(
                 filename, exc))
     for record in records:
         try:
             job = JobDefinition.from_rfc822_record(record)
         except ValueError as exc:
             raise PlugInError(
                 _("Cannot define job from record {!r}: {}").format(
                     record, exc))
         else:
             job._provider = provider
             self._job_list.append(job)
             logger.debug(_("Loaded %r"), job)
Ejemplo n.º 15
0
    def register_parser(self, subparsers):
        """
        Overridden method of CommandBase.

        :param subparsers:
            The argparse subparsers objects in which command line argument
            specification should be created.

        This method is invoked by the command line handling code to register
        arguments specific to this sub-command. It must also register itself as
        the command class with the ``command`` default.
        """
        parser = self.add_subcommand(subparsers)
        parser.add_argument(
            "-n", "--dry-run", default=False, action="store_true",
            help=_("don't actually do anything"))
        group = parser.add_argument_group(title=_("actions to perform"))
        group.add_argument(
            "--dont-update-pot", default=False, action="store_false",
            help=_("do not update the translation template"))
        group.add_argument(
            "--dont-merge-po", default=False, action="store_true",
            help=_("do not merge translation files with the template"))
        group.add_argument(
            "--dont-build-mo", default=False, action="store_true",
            help=_("do not build binary translation files"))
Ejemplo n.º 16
0
 def create_parser_object(self):
     parser = argparse.ArgumentParser(
         prog=self.get_exec_name(),
         # TRANSLATORS: please keep 'manage.py', '--help', '--version'
         # untranslated. Translate only '[options]'
         usage=_("manage.py [--help] [--version] [options]"))
     parser.add_argument(
         "--version", action="version", version=self.get_exec_version(),
         help=_("show program's version number and exit"))
     return parser
Ejemplo n.º 17
0
 def register_parser(self, subparsers):
     parser = subparsers.add_parser(
         "list", help=_("list and describe various objects"))
     parser.add_argument(
         '-a', '--attrs', default=False, action="store_true",
         help=_("show object attributes"))
     parser.add_argument(
         'group', nargs='?',
         help=_("list objects from the specified group"))
     parser.set_defaults(command=self)
Ejemplo n.º 18
0
def KindValidator(variable, new_value):
    """
    A validator ensuring that values match the "kind" of the variable.
    """
    if not isinstance(new_value, variable.kind):
        return {
            bool: _("expected a boolean"),
            int: _("expected an integer"),
            float: _("expected a floating point number"),
            str: _("expected a string"),
        }[variable.kind]
Ejemplo n.º 19
0
 def register_parser(self, subparsers):
     parser = self.add_subcommand(subparsers)
     parser.add_argument(
         'name',
         metavar=_("name"),
         type=IQN,
         # TRANSLATORS: please keep the YYYY.example... text unchanged or at
         # the very least translate only YYYY and some-name. In either case
         # some-name must be a reasonably-ASCII string (should be safe for a
         # portable directory name)
         help=_("provider name, eg: YYYY.example.org:some-name"))
     parser.set_defaults(command=self)
Ejemplo n.º 20
0
 def _save_results(self, output_file, input_stream):
     if output_file is sys.stdout:
         print(_("[ Results ]").center(80, '='))
         # This requires a bit more finesse, as exporters output bytes
         # and stdout needs a string.
         translating_stream = ByteStringStreamTranslator(
             output_file, "utf-8")
         copyfileobj(input_stream, translating_stream)
     else:
         print(_("Saving results to {}").format(output_file.name))
         copyfileobj(input_stream, output_file)
     if output_file is not sys.stdout:
         output_file.close()
Ejemplo n.º 21
0
 def adjust_logging(self, level=None, trace_list=None, debug_console=False):
     # Bump logging on the root logger if requested
     if level is not None:
         logging.getLogger(None).setLevel(level)
         logger.debug(_("Enabled %r on root logger"), level)
         logging.getLogger("plainbox").setLevel(level)
     # Enable tracing on specified loggers
     if trace_list is not None:
         for name in trace_list:
             logging.getLogger(name).setLevel(logging.DEBUG)
             logger.debug(_("Enabled debugging on logger %r"), name)
     if debug_console and (level == 'DEBUG' or trace_list):
         # Enable DEBUG logging to console if explicitly requested
         logging.config.dictConfig(self.DEBUG_CONSOLE_CONFIG)
Ejemplo n.º 22
0
 def write_summary(self, data):
     self.worksheet2.set_column(0, 0, 5)
     self.worksheet2.set_column(1, 1, 2)
     self.worksheet2.set_column(3, 3, 27)
     self.worksheet2.write(3, 1, _('Failures summary'), self.format03)
     self.worksheet2.write(4, 1, '✔', self.format10)
     self.worksheet2.write(
         4, 2, (
             ngettext('{} Test passed', '{} Tests passed',
                      self.total_pass).format(self.total_pass)
             + " - "
             + _('Success Rate: {:.2f}% ({}/{})').format(
                 self.total_pass / self.total * 100,
                 self.total_pass, self.total)
         ), self.format02)
     self.worksheet2.write(5, 1, '✘', self.format11)
     self.worksheet2.write(
         5, 2, (
             ngettext('{} Test failed', '{} Tests failed', self.total_fail)
             + ' - '
             + _('Failure Rate: {:.2f}% ({}/{})').format(
                 self.total_fail / self.total * 100,
                 self.total_fail, self.total)
         ), self.format02)
     self.worksheet2.write(6, 1, '-', self.format12)
     self.worksheet2.write(
         6, 2, (
             ngettext('{} Test skipped', '{} Tests skipped',
                      self.total_skip)
             + ' - '
             + _('Skip Rate: {:.2f}% ({}/{})').format(
                 self.total_skip / self.total * 100,
                 self.total_skip, self.total)
         ), self.format02)
     self.worksheet2.write_column(
         'L3', [_('Fail'), _('Skip'), _('Pass')], self.format14)
     self.worksheet2.write_column(
         'M3', [self.total_fail, self.total_skip, self.total_pass],
         self.format14)
     # Configure the series.
     chart = self.workbook.add_chart({'type': 'pie'})
     chart.set_legend({'position': 'none'})
     chart.add_series({
         'points': [
             {'fill': {'color': 'red'}},
             {'fill': {'color': 'gray'}},
             {'fill': {'color': 'lime'}},
         ],
         'categories': '=' + _("Summary") + '!$L$3:$L$5',
         'values': '=' + _("Summary") + '!$M$3:$M$5'}
     )
     # Insert the chart into the worksheet.
     self.worksheet2.insert_chart('F4', chart, {
         'x_offset': 0, 'y_offset': 10, 'x_scale': 0.25, 'y_scale': 0.25
     })
Ejemplo n.º 23
0
    def _build_SessionState(self, session_repr, early_cb=None):
        """
        Reconstruct the session state object.

        This method creates a fresh SessionState instance and restores
        jobs, results, meta-data and desired job list using helper methods.
        """
        # Construct a fresh session object.
        session = SessionState(self.job_list)
        logger.debug(_("Constructed new session for resume %r"), session)
        # Give early_cb a chance to see the session before we start resuming.
        # This way applications can see, among other things, generated jobs
        # as they are added to the session, by registering appropriate signal
        # handlers on the freshly-constructed session instance.
        if early_cb is not None:
            logger.debug(_("Invoking early callback %r"), early_cb)
            new_session = early_cb(session)
            if new_session is not None:
                logger.debug(
                    _("Using different session for resume: %r"), new_session)
                session = new_session
        # Restore bits and pieces of state
        logger.debug(
            _("Starting to restore jobs and results to %r..."), session)
        self._restore_SessionState_jobs_and_results(session, session_repr)
        logger.debug(_("Starting to restore metadata..."))
        self._restore_SessionState_metadata(session, session_repr)
        logger.debug(_("Starting to restore desired job list..."))
        self._restore_SessionState_desired_job_list(session, session_repr)
        logger.debug(_("Starting to restore job list..."))
        self._restore_SessionState_job_list(session, session_repr)
        # Return whatever we've got
        logger.debug(_("Resume complete!"))
        return session
Ejemplo n.º 24
0
 def _run_extcmd(self, job, config, extcmd_popen):
     # Compute the score of each controller
     ctrl_score = [(ctrl, ctrl.get_score(job)) for ctrl in self._execution_ctrl_list]
     # Sort scores
     ctrl_score.sort(key=lambda pair: pair[1])
     # Get the best score
     ctrl, score = ctrl_score[-1]
     # Ensure that the controler is viable
     if score < 0:
         raise RuntimeError(_("No exec controller supports job {}").format(job))
     logger.debug(
         _("Selected execution controller %s (score %d) for job %r"), ctrl.__class__.__name__, score, job.id
     )
     # Delegate and execute
     return ctrl.execute_job(job, config, extcmd_popen)
Ejemplo n.º 25
0
    def _process_job(self, session, jobs_repr, results_repr, job_id):
        """
        Process all representation details associated with a particular job

        This method takes a session object, representation of all the jobs
        and all the results (and a job id) and tries to reconstruct the
        state associated with that job in the session object.

        Jobs are verified to match existing (known) jobs. Results are
        rebuilt from their representation and presented back to the session
        for processing (this restores resources and generated jobs).

        This method can fail in normal operation, when the job that was
        being processed is a generated job and has not been reintroduced into
        the session. When that happens a KeyError is raised.

        .. note::
            Since the representation format for results can support storing
            and restoring a list of results (per job) but the SessionState
            cannot yet do that the implementation of this method restores
            the state of the _last_ result object only.
        """
        _validate(job_id, value_type=str)
        # Get the checksum from the representation
        checksum = _validate(
            jobs_repr, key=job_id, value_type=str)
        # Look up the actual job definition in the session.
        # This can raise KeyError but it is okay, callers expect that
        job = session.job_state_map[job_id].job
        # Check if job definition has not changed
        if job.checksum != checksum:
            raise IncompatibleJobError(
                _("Definition of job {!r} has changed").format(job_id))
        # Collect all of the result objects into result_list
        result_list = []
        result_list_repr = _validate(
            results_repr, key=job_id, value_type=list, value_none=True)
        for result_repr in result_list_repr:
            _validate(result_repr, value_type=dict)
            result = self._build_JobResult(result_repr)
            result_list.append(result)
        # Show the _LAST_ result to the session. Currently we only store one
        # result but showing the most recent (last) result should be good
        # in general.
        if len(result_list) > 0:
            logger.debug(
                _("calling update_job_result(%r, %r)"), job, result_list[-1])
            session.update_job_result(job, result_list[-1])
Ejemplo n.º 26
0
    def wrap_and_add_plugin(self, plugin_name, plugin_obj):
        """
        Internal method of PlugInCollectionBase.

        :param plugin_name:
            plugin name, some arbitrary string
        :param plugin_obj:
            plugin object, some arbitrary object.

        This method prepares a wrapper (PlugIn subclass instance) for the
        specified plugin name/object by attempting to instantiate the wrapper
        class. If a PlugInError exception is raised then it is added to the
        problem_list and the corresponding plugin is not added to the
        collection of plugins.
        """
        try:
            wrapper = self._wrapper(
                plugin_name, plugin_obj,
                *self._wrapper_args, **self._wrapper_kwargs)
        except PlugInError as exc:
            logger.warning(
                _("Unable to prepare plugin %s: %s"), plugin_name, exc)
            self._problem_list.append(exc)
        else:
            self._plugins[plugin_name] = wrapper
Ejemplo n.º 27
0
 def __init__(self, name=None, *, section='DEFAULT', kind=str,
              default=Unset, validator_list=None, help_text=None):
     # Ensure kind is correct
     if kind not in self._KIND_CHOICE:
         raise ValueError(_("unsupported kind"))
     # Ensure that we have a validator_list, even if empty
     if validator_list is None:
         validator_list = []
     if validator_list and isinstance(validator_list[0], NotUnsetValidator):
         # XXX: Kludge ahead, beware!
         # Insert a KindValidator as the second validator to run
         # just after the NotUnsetValidator
         # TODO: To properly handle this without any special-casing we
         # should drop the implicit insertion of the KindValidator and
         # convert all users to properly order KindValidator and
         # NotUnsetValidator instances so that the error message is helpful
         # to the user. The whole idea is to validate Unset before we try to
         # validate the type.
         validator_list.insert(1, KindValidator)
     else:
         # Insert a KindValidator as the first validator to run
         validator_list.insert(0, KindValidator)
     # Assign all the attributes
     self._name = name
     self._section = section
     self._kind = kind
     self._default = default
     self._validator_list = validator_list
     self._help_text = help_text
     # Workaround for Sphinx breaking if __doc__ is a property
     self.__doc__ = self.help_text or self.__class__.__doc__
Ejemplo n.º 28
0
    def run_local_job(self, checksum, env):
        """
        Run a job with and interpret the stdout as a job definition.

        :param checksum:
            The checksum of the job to execute
        :param env:
            Environment to execute the job in.
        :returns:
            A list of job definitions that were parsed out of the output.
        :raises LookupError:
            If the checksum does not match any known job
        """
        job = self.find_job(checksum)
        cmd = ["bash", "-c", job.command]
        output = subprocess.check_output(cmd, universal_newlines=True, env=self.modify_execution_environment(env))
        job_list = []
        source = JobOutputTextSource(job)
        try:
            record_list = load_rfc822_records(output, source=source)
        except RFC822SyntaxError as exc:
            logging.error(_("Syntax error in job generated from %s: %s"), job, exc)
        else:
            for record in record_list:
                job = JobDefinition.from_rfc822_record(record)
                job_list.append(job)
        return job_list
Ejemplo n.º 29
0
 def _commit_resume(self):
     logger.debug("_commit_resume()")
     last_storage = SessionStorageRepository().get_last_storage()
     assert last_storage is not None, "no saved session to resume"
     self._manager = SessionManager.load_session(
         self.job_list, last_storage, lambda session: self)
     logger.debug(_("_commit_resume() finished"))
Ejemplo n.º 30
0
    def execute_job(self, job, config, extcmd_popen):
        """
        Execute the specified job using the specified subprocess-like object

        :param job:
            The JobDefinition to execute
        :param config:
            A PlainBoxConfig instance which can be used to load missing
            environment definitions that apply to all jobs. It is used to
            provide values for missing environment variables that are required
            by the job (as expressed by the environ key in the job definition
            file).
        :param extcmd_popen:
            A subprocess.Popen like object
        :returns:
            The return code of the command, as returned by subprocess.call()
        """
        # CHECKBOX_DATA is where jobs can share output.
        # It has to be an directory that scripts can assume exists.
        if not os.path.isdir(self.CHECKBOX_DATA):
            os.makedirs(self.CHECKBOX_DATA)
        # Setup the executable nest directory
        with self.configured_filesystem(job, config) as nest_dir:
            # Get the command and the environment.
            # of this execution controller
            cmd = self.get_execution_command(job, config, nest_dir)
            env = self.get_execution_environment(job, config, nest_dir)
            # run the command
            logger.debug(_("job[%s] executing %r with env %r"),
                         job.id, cmd, env)
            return extcmd_popen.call(cmd, env=env)