Beispiel #1
0
 def _run_command(self, session, job, parent):
     """
     Run a Job command in a separate thread
     """
     ui_io_delegate = UIOutputPrinter(self)
     runner = JobRunner(session.session_dir, session.jobs_io_log_dir,
                        command_io_delegate=ui_io_delegate)
     return_code, record_path = runner._run_command(job, None)
     parent._command_callback(return_code, record_path)
Beispiel #2
0
 def _run_local_jobs(self):
     print(_("[Running Local Jobs]").center(80, '='))
     manager = SessionManager.create_with_state(self.session)
     try:
         manager.state.metadata.title = "plainbox dev analyze session"
         manager.state.metadata.flags = [SessionMetaData.FLAG_INCOMPLETE]
         manager.checkpoint()
         runner = JobRunner(manager.storage.location,
                            self.provider_list,
                            os.path.join(manager.storage.location,
                                         'io-logs'),
                            command_io_delegate=self)
         again = True
         while again:
             for job in self.session.run_list:
                 if job.plugin == 'local':
                     if self.session.job_state_map[
                             job.id].result.outcome is None:
                         self._run_local_job(manager, runner, job)
                         break
             else:
                 again = False
         manager.state.metadata.flags = []
         manager.checkpoint()
     finally:
         manager.destroy()
Beispiel #3
0
 def run(self):
     # Compute the run list, this can give us notification about problems in
     # the selected jobs. Currently we just display each problem
     # Create a session that handles most of the stuff needed to run jobs
     try:
         self.session = SessionState(self.job_list)
     except DependencyDuplicateError as exc:
         # Handle possible DependencyDuplicateError that can happen if
         # someone is using plainbox for job development.
         print("The job database you are currently using is broken")
         print("At least two jobs contend for the name {0}".format(
             exc.job.id))
         print("First job defined in: {0}".format(exc.job.origin))
         print("Second job defined in: {0}".format(
             exc.duplicate_job.origin))
         raise SystemExit(exc)
     with self.session.open():
         self._set_job_selection()
         self.runner = JobRunner(self.session.session_dir,
                                 self.provider_list,
                                 self.session.jobs_io_log_dir,
                                 command_io_delegate=self,
                                 dry_run=self.ns.dry_run)
         self._run_all_jobs()
         if self.config.fallback_file is not Unset:
             self._save_results()
         self._submit_results()
     # FIXME: sensible return value
     return 0
Beispiel #4
0
 def _run_jobs(self, ns, manager):
     runner = JobRunner(manager.storage.location,
                        self.provider_list,
                        os.path.join(manager.storage.location, 'io-logs'),
                        command_io_delegate=self)
     self._run_jobs_with_session(ns, manager, runner)
     if not self._local_only:
         self.save_results(manager)
Beispiel #5
0
 def run(self):
     job = self._get_job()
     if job is None:
         print(_("There is no job called {!a}").format(self.job_id))
         print(_("See `plainbox special --list-jobs` for a list of choices"))
         return 126
     elif job.command is None:
         print(_("Selected job does not have a command"))
         return 125
     with TemporaryDirectory() as scratch, TemporaryDirectory() as iologs:
         runner = JobRunner(scratch, self.provider_list, iologs)
         bait_dir = os.path.join(scratch, "files-created-in-current-dir")
         os.mkdir(bait_dir)
         with TestCwd(bait_dir):
             return_code, record_path = runner._run_command(job, self.config)
         self._display_side_effects(scratch)
         self._display_script_outcome(job, return_code)
     return return_code
Beispiel #6
0
 def run(self):
     job = self._get_job()
     if job is None:
         print("There is no job called {!a}".format(self.job_name))
         print("See `plainbox special --list-jobs` for a list of choices")
         return 126
     elif job.command is None:
         print("Selected job does not have a command")
         return 125
     with TemporaryDirectory() as scratch, TemporaryDirectory() as iologs:
         runner = JobRunner(scratch, iologs)
         bait_dir = os.path.join(scratch, 'files-created-in-current-dir')
         os.mkdir(bait_dir)
         with TestCwd(bait_dir):
             return_code, record_path = runner._run_command(
                 job, self.config)
         self._display_side_effects(scratch)
         self._display_script_outcome(job, return_code)
     return return_code
Beispiel #7
0
 def run(self):
     job = self._get_job()
     if job is None:
         print(_("There is no job called {!a}").format(self.job_id))
         print(
             _("See `plainbox special --list-jobs` for a list of choices"))
         return 126
     elif job.command is None:
         print(_("Selected job does not have a command"))
         return 125
     with TemporaryDirectory() as scratch, TemporaryDirectory() as iologs:
         runner = JobRunner(scratch, self.provider_list, iologs)
         job_state = JobState(job)
         ctrl = runner._get_ctrl_for_job(job)
         runner.log_leftovers = False
         runner.on_leftover_files.connect(self._on_leftover_files)
         return_code, record_path = runner._run_command(
             job, job_state, self.config, ctrl)
         self._display_script_outcome(job, return_code)
     return return_code
Beispiel #8
0
 def _run(self, session, job, running_job_wrapper):
     """
     Start a JobRunner in a separate thread
     """
     runner = JobRunner(
         session.session_dir,
         session.jobs_io_log_dir,
         command_io_delegate=running_job_wrapper.ui_io_delegate,
         interaction_callback=running_job_wrapper.emitAskForOutcomeSignal
     )
     job_state = session.job_state_map[job.name]
     if job_state.can_start():
         job_result = runner.run_job(job)
     else:
         job_result = MemoryJobResult({
             'outcome': IJobResult.OUTCOME_NOT_SUPPORTED,
             'comments': job_state.get_readiness_description()
         })
     if job_result is not None:
         running_job_wrapper.update_job_result_callback(job, job_result)
Beispiel #9
0
 def test_user_env_without_environ_keys(self):
     with patch.dict('os.environ', {'foo': 'bar'}):
         job = JobDefinition({
             'name': 'name',
             'plugin': 'plugin',
         })
         job._provider = Mock()
         job._provider.extra_PYTHONPATH = None
         job._provider.extra_PATH = ""
         self.assertIn(
             "foo",
             JobRunner._get_script_env(Mock(), job, only_changes=False))
Beispiel #10
0
 def _run(self, session, job, running_job_wrapper):
     """
     Start a JobRunner in a separate thread
     """
     runner = JobRunner(
         session.session_dir,
         session.jobs_io_log_dir,
         command_io_delegate=running_job_wrapper.ui_io_delegate,
         interaction_callback=running_job_wrapper.emitAskForOutcomeSignal)
     job_state = session.job_state_map[job.name]
     if job_state.can_start():
         job_result = runner.run_job(job)
     else:
         job_result = MemoryJobResult({
             'outcome':
             IJobResult.OUTCOME_NOT_SUPPORTED,
             'comments':
             job_state.get_readiness_description()
         })
     if job_result is not None:
         running_job_wrapper.update_job_result_callback(job, job_result)
Beispiel #11
0
 def test_user_env_without_environ_keys(self):
     with patch.dict('os.environ', {'foo': 'bar'}):
         job = JobDefinition({
             'name': 'name',
             'plugin': 'plugin',
         })
         job._provider = Mock()
         job._provider.extra_PYTHONPATH = None
         job._provider.extra_PATH = ""
         self.assertIn(
             "foo",
             JobRunner._get_script_env(Mock(), job, only_changes=False))
Beispiel #12
0
 def test_root_env_without_environ_keys(self):
     with patch.dict('os.environ', {'foo': 'bar'}):
         job = JobDefinition({
             'name': 'name',
             'plugin': 'plugin',
             'user': '******',
         })
         job._checkbox = Mock()
         job._checkbox.extra_PYTHONPATH = None
         job._checkbox.extra_PATH = ""
         self.assertNotIn(
             "foo",
             JobRunner._get_script_env(Mock(), job, only_changes=True))
 def test_get_warm_up_sequence(self):
     # create a mocked execution controller
     ctrl = Mock(spec_set=IExecutionController, name='ctrl')
     # create a fake warm up function
     warm_up_func = Mock(name='warm_up_func')
     # make the execution controller accept any job
     ctrl.get_score.return_value = 1
     # make the execution controller return warm_up_func as warm-up
     ctrl.get_warm_up_for_job.return_value = warm_up_func
     # make a pair of mock jobs for our controller to see
     job1 = Mock(spec_set=IJobDefinition, name='job1')
     job2 = Mock(spec_set=IJobDefinition, name='job2')
     with TemporaryDirectory() as session_dir:
         # Create a real runner with a fake execution controller, empty list
         # of providers and fake io-log directory.
         runner = JobRunner(session_dir,
                            provider_list=[],
                            jobs_io_log_dir=os.path.join(
                                session_dir, 'io-log'),
                            execution_ctrl_list=[ctrl])
         # Ensure that we got the warm up function we expected
         self.assertEqual(runner.get_warm_up_sequence([job1, job2]),
                          [warm_up_func])
    def create_runner(self):
        """
        Create a job runner.

        This sets the attr:`_runner` which enables :meth:`runner` property.

        Requires the manager to be created (we need the storage object)
        """
        self._runner = JobRunner(
            self.storage.location,
            self.provider_list,
            # TODO: tie this with well-known-dirs helper
            os.path.join(self.storage.location, 'io-logs'),
            command_io_delegate=self,
            dry_run=self.ns.dry_run)
Beispiel #15
0
 def _run_local_jobs(self):
     print("[Running Local Jobs]".center(80, '='))
     with self.session.open():
         runner = JobRunner(
             self.session.session_dir, self.session.jobs_io_log_dir,
             command_io_delegate=self, interaction_callback=None)
         again = True
         while again:
             for job in self.session.run_list:
                 if job.plugin == 'local':
                     if self.session.job_state_map[job.name].result.outcome is None:
                         self._run_local_job(runner, job)
                         break
             else:
                 again = False
Beispiel #16
0
    def __init__(self, service, session, provider_list, job):
        """
        Initialize a primed job.

        This should not be called by applications.
        Please call :meth:`Service.prime_job()` instead.
        """
        self._service = service
        self._session = session
        self._provider_list = provider_list
        self._job = job
        self._runner = JobRunner(
            session.session_dir,
            self._provider_list,
            session.jobs_io_log_dir,
            # Pass a dummy IO delegate, we don't want to get any tracing here
            # Later on this could be configurable but it's better if it's
            # simple and limited rather than complete but broken somehow.
            command_io_delegate=self)
Beispiel #17
0
class PlainBox:
    """
    High-level plainbox object
    """

    def __init__(self):
        self._checkbox = CheckBox()
        self._context = ResourceContext()
        self._scratch = Scratch()
        self._runner = JobRunner(self._checkbox, self._context, self._scratch)

    def main(self, argv=None):
        basicConfig(level="WARNING")
        # TODO: setup sane logging system that works just as well for Joe user
        # that runs checkbox from the CD as well as for checkbox developers and
        # custom debugging needs.  It would be perfect^Hdesirable not to create
        # another broken, never-rotated, uncapped logging crap that kills my
        # SSD by writing junk to ~/.cache/
        parser = ArgumentParser(prog="plainbox")
        parser.add_argument(
            "-v", "--version", action="version",
            version="{}.{}.{}".format(*version[:3]))
        parser.add_argument(
            "-l", "--log-level", action="store",
            choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),
            help="Set logging level")
        group = parser.add_argument_group(title="user interface options")
        group.add_argument(
            "-u", "--ui", action="store",
            default=None, choices=('headless', 'text', 'graphics'),
            help="select the UI front-end (defaults to auto)")
        group = parser.add_argument_group(title="job definition options")
        group.add_argument(
            "--load-extra", action="append",
            metavar="FILE", default=[],
            help="Load extra job definitions from FILE",
            type=FileType("rt"))
        group.add_argument(
            '-r', '--run-pattern', action="append",
            metavar='PATTERN', default=[],
            help="Run jobs matching the given pattern")
        group.add_argument(
            '--list-jobs', help="List all jobs",
            action="store_true")
        ns = parser.parse_args(argv)
        # Set the desired log level
        if ns.log_level:
            getLogger("").setLevel(ns.log_level)
        # Load built-in job definitions
        job_list = self.get_builtin_jobs()
        # Load additional job definitions
        job_list.extend(self._load_jobs(ns.load_extra))
        if ns.list_jobs:
            print("Available jobs:")
            for job in job_list:
                print(" - {}".format(job))
        else:
            # And run them
            with self._scratch:
                return self.run(
                    run_pattern_list=ns.run_pattern,
                    job_list=job_list,
                    ui=ns.ui)

    def run(self, run_pattern_list, job_list, **kwargs):
        job_map = {job.name: job for job in job_list}
        matching_job_list = []
        # Find jobs that matched patterns
        print("[ Searching for Matching Jobs ]".center(80, '='))
        for job in job_list:
            for pattern in run_pattern_list:
                if fnmatch(job.name, pattern):
                    matching_job_list.append(job)
                    break
        print("Matching jobs: {}".format(
            ', '.join((job.name for job in matching_job_list))))
        # Compute required resources
        print("[ Analyzing Jobs ]".center(80, '='))
        needed_resource_jobs = set()
        resource_job_list = []
        for job in matching_job_list:
            prog = job.get_resource_program()
            if prog is None:
                continue
            for resource_name in prog.required_resources:
                if resource_name in needed_resource_jobs:
                    continue
                else:
                    needed_resource_jobs.add(resource_name)
                try:
                    required_job = job_map[resource_name]
                except KeyError:
                    print("Unable to find resource {!r} required by job"
                          " {}".format(resource_name, job))
                    print("Job {} will not run".format(job))
                    matching_job_list.remove(job)
                if required_job.plugin != "resource":
                    print("Job {} references resource {!r} but job {} uses"
                          " non-resource plugin {!r}".format(
                              job, resource_name, required_job,
                              required_job.plugin))
                    print("Job {} will not run".format(job))
                    matching_job_list.remove(job)
                else:
                    resource_job_list.append(required_job)
        # Resolve dependencies in resource jobs
        # XXX: not implemented
        print("Required resource jobs: {}".format(
            ', '.join((job.name for job in resource_job_list))))
        # Run resource jobs
        print("[ Gathering Resources ]".center(80, '='))
        if not resource_job_list:
            print("No resource jobs required")
        else:
            self._run_jobs(resource_job_list)
        # Run non-resource jobs
        result_list = []
        other_job_list = [
            job
            for job in matching_job_list
            if job.plugin != "resource"]
        print("[ Testing ]".center(80, '='))
        if not other_job_list:
            print("No jobs selected")
        else:
            result_list = self._run_jobs(other_job_list)
            print("[ Results ]".center(80, '='))
            for result in result_list:
                print(" * {}: {}".format(
                    result.job.name, result.outcome))

    def get_builtin_jobs(self):
        logger.debug("Loading built-in jobs...")
        return self._load_builtin_jobs()

    def save(self, something, somewhere):
        raise NotImplementedError()

    def load(self, somewhere):
        if isinstance(somewhere, str):
            # Load data from a file with the given name
            filename = somewhere
            with open(filename, 'rt', encoding='UTF-8') as stream:
                return load(stream)
        if isinstance(somewhere, TextIOWrapper):
            stream = somewhere
            logger.debug("Loading jobs definitions from %r...", stream.name)
            record_list = load_rfc822_records(stream)
            job_list = []
            for record in record_list:
                job = JobDefinition.from_rfc822_record(record)
                logger.debug("Loaded %r", job)
                job_list.append(job)
            return job_list
        else:
            raise TypeError(
                "Unsupported type of 'somewhere': {!r}".format(
                    type(somewhere)))

    def _run_jobs(self, job_list):
        result_list = []
        for job in job_list:
            print("[ {} ]".format(job.name).center(80, '-'))
            if job.description:
                print()
                print(job.description)
                print()
                print("_" * 80)
            print(" * job attributes set: {}".format(
                ", ".join((attr for attr in job._data))))
            print(" * job type: {}".format(job.plugin))
            if job.command:
                print(" * job command: {!r}".format(job.command))
            if job.depends is not None:
                print(" * job dependencies: {}".format(', '.join(job.depends)))
            prog = job.get_resource_program()
            if prog:
                met = prog.evaluate(self._context.resources)
                print(" * job requirements: {}".format(
                    "met" if met else "not met"))
                for expression in prog.expression_list:
                    print("   - {}".format(expression.text))
            try:
                print(" * starting job... ", end="")
                result = self._runner.run_job(job)
            except NotImplementedError:
                print("error")
                logger.exception("Something was not implemented fully")
            else:
                print("done")
                if result is not None:
                    result_list.append(result)
                elif job.plugin == "resource":
                    pass
                else:
                    logger.warning("Job %s did not return a result", job)
        return result_list

    def _load_jobs(self, source_list):
        """
        Load jobs from the list of sources
        """
        job_list = []
        for source in source_list:
            job_list.extend(self.load(source))
        return job_list

    def _load_builtin_jobs(self):
        """
        Load jobs from built into CheckBox
        """
        return self._load_jobs([
            join(self._checkbox.jobs_dir, name)
            for name in listdir(self._checkbox.jobs_dir)
            if name.endswith(".txt") or name.endswith(".txt.in")])
Beispiel #18
0
 def __init__(self):
     self._checkbox = CheckBox()
     self._context = ResourceContext()
     self._scratch = Scratch()
     self._runner = JobRunner(self._checkbox, self._context, self._scratch)
Beispiel #19
0
 def __init__(self):
     self._checkbox = CheckBox()
     self._context = ResourceContext()
     self._scratch = Scratch()
     self._runner = JobRunner(self._checkbox, self._context, self._scratch)
Beispiel #20
0
class PlainBox:
    """
    High-level plainbox object
    """
    def __init__(self):
        self._checkbox = CheckBox()
        self._context = ResourceContext()
        self._scratch = Scratch()
        self._runner = JobRunner(self._checkbox, self._context, self._scratch)

    def main(self, argv=None):
        basicConfig(level="WARNING")
        # TODO: setup sane logging system that works just as well for Joe user
        # that runs checkbox from the CD as well as for checkbox developers and
        # custom debugging needs.  It would be perfect^Hdesirable not to create
        # another broken, never-rotated, uncapped logging crap that kills my
        # SSD by writing junk to ~/.cache/
        parser = ArgumentParser(prog="plainbox")
        parser.add_argument("-v",
                            "--version",
                            action="version",
                            version="{}.{}.{}".format(*version[:3]))
        parser.add_argument("-l",
                            "--log-level",
                            action="store",
                            choices=('DEBUG', 'INFO', 'WARNING', 'ERROR',
                                     'CRITICAL'),
                            help="Set logging level")
        group = parser.add_argument_group(title="user interface options")
        group.add_argument("-u",
                           "--ui",
                           action="store",
                           default=None,
                           choices=('headless', 'text', 'graphics'),
                           help="select the UI front-end (defaults to auto)")
        group = parser.add_argument_group(title="job definition options")
        group.add_argument("--load-extra",
                           action="append",
                           metavar="FILE",
                           default=[],
                           help="Load extra job definitions from FILE",
                           type=FileType("rt"))
        group.add_argument('-r',
                           '--run-pattern',
                           action="append",
                           metavar='PATTERN',
                           default=[],
                           help="Run jobs matching the given pattern")
        group.add_argument('--list-jobs',
                           help="List all jobs",
                           action="store_true")
        ns = parser.parse_args(argv)
        # Set the desired log level
        if ns.log_level:
            getLogger("").setLevel(ns.log_level)
        # Load built-in job definitions
        job_list = self.get_builtin_jobs()
        # Load additional job definitions
        job_list.extend(self._load_jobs(ns.load_extra))
        if ns.list_jobs:
            print("Available jobs:")
            for job in job_list:
                print(" - {}".format(job))
        else:
            # And run them
            with self._scratch:
                return self.run(run_pattern_list=ns.run_pattern,
                                job_list=job_list,
                                ui=ns.ui)

    def run(self, run_pattern_list, job_list, **kwargs):
        job_map = {job.name: job for job in job_list}
        matching_job_list = []
        # Find jobs that matched patterns
        print("[ Searching for Matching Jobs ]".center(80, '='))
        for job in job_list:
            for pattern in run_pattern_list:
                if fnmatch(job.name, pattern):
                    matching_job_list.append(job)
                    break
        print("Matching jobs: {}".format(', '.join(
            (job.name for job in matching_job_list))))
        # Compute required resources
        print("[ Analyzing Jobs ]".center(80, '='))
        needed_resource_jobs = set()
        resource_job_list = []
        for job in matching_job_list:
            prog = job.get_resource_program()
            if prog is None:
                continue
            for resource_name in prog.required_resources:
                if resource_name in needed_resource_jobs:
                    continue
                else:
                    needed_resource_jobs.add(resource_name)
                try:
                    required_job = job_map[resource_name]
                except KeyError:
                    print("Unable to find resource {!r} required by job"
                          " {}".format(resource_name, job))
                    print("Job {} will not run".format(job))
                    matching_job_list.remove(job)
                if required_job.plugin != "resource":
                    print("Job {} references resource {!r} but job {} uses"
                          " non-resource plugin {!r}".format(
                              job, resource_name, required_job,
                              required_job.plugin))
                    print("Job {} will not run".format(job))
                    matching_job_list.remove(job)
                else:
                    resource_job_list.append(required_job)
        # Resolve dependencies in resource jobs
        # XXX: not implemented
        print("Required resource jobs: {}".format(', '.join(
            (job.name for job in resource_job_list))))
        # Run resource jobs
        print("[ Gathering Resources ]".center(80, '='))
        if not resource_job_list:
            print("No resource jobs required")
        else:
            self._run_jobs(resource_job_list)
        # Run non-resource jobs
        result_list = []
        other_job_list = [
            job for job in matching_job_list if job.plugin != "resource"
        ]
        print("[ Testing ]".center(80, '='))
        if not other_job_list:
            print("No jobs selected")
        else:
            result_list = self._run_jobs(other_job_list)
            print("[ Results ]".center(80, '='))
            for result in result_list:
                print(" * {}: {}".format(result.job.name, result.outcome))

    def get_builtin_jobs(self):
        logger.debug("Loading built-in jobs...")
        return self._load_builtin_jobs()

    def save(self, something, somewhere):
        raise NotImplementedError()

    def load(self, somewhere):
        if isinstance(somewhere, str):
            # Load data from a file with the given name
            filename = somewhere
            with open(filename, 'rt', encoding='UTF-8') as stream:
                return load(stream)
        if isinstance(somewhere, TextIOWrapper):
            stream = somewhere
            logger.debug("Loading jobs definitions from %r...", stream.name)
            record_list = load_rfc822_records(stream)
            job_list = []
            for record in record_list:
                job = JobDefinition.from_rfc822_record(record)
                logger.debug("Loaded %r", job)
                job_list.append(job)
            return job_list
        else:
            raise TypeError("Unsupported type of 'somewhere': {!r}".format(
                type(somewhere)))

    def _run_jobs(self, job_list):
        result_list = []
        for job in job_list:
            print("[ {} ]".format(job.name).center(80, '-'))
            if job.description:
                print()
                print(job.description)
                print()
                print("_" * 80)
            print(" * job attributes set: {}".format(", ".join(
                (attr for attr in job._data))))
            print(" * job type: {}".format(job.plugin))
            if job.command:
                print(" * job command: {!r}".format(job.command))
            if job.depends is not None:
                print(" * job dependencies: {}".format(', '.join(job.depends)))
            prog = job.get_resource_program()
            if prog:
                met = prog.evaluate(self._context.resources)
                print(" * job requirements: {}".format(
                    "met" if met else "not met"))
                for expression in prog.expression_list:
                    print("   - {}".format(expression.text))
            try:
                print(" * starting job... ", end="")
                result = self._runner.run_job(job)
            except NotImplementedError:
                print("error")
                logger.exception("Something was not implemented fully")
            else:
                print("done")
                if result is not None:
                    result_list.append(result)
                elif job.plugin == "resource":
                    pass
                else:
                    logger.warning("Job %s did not return a result", job)
        return result_list

    def _load_jobs(self, source_list):
        """
        Load jobs from the list of sources
        """
        job_list = []
        for source in source_list:
            job_list.extend(self.load(source))
        return job_list

    def _load_builtin_jobs(self):
        """
        Load jobs from built into CheckBox
        """
        return self._load_jobs([
            join(self._checkbox.jobs_dir, name)
            for name in listdir(self._checkbox.jobs_dir)
            if name.endswith(".txt") or name.endswith(".txt.in")
        ])
Beispiel #21
0
    def _run_jobs(self, ns, job_list, exporter, transport=None):
        # Compute the run list, this can give us notification about problems in
        # the selected jobs. Currently we just display each problem
        matching_job_list = self._get_matching_job_list(ns, job_list)
        print(_("[ Analyzing Jobs ]").center(80, '='))
        # Create a session that handles most of the stuff needed to run jobs
        try:
            session = SessionState(job_list)
        except DependencyDuplicateError as exc:
            # Handle possible DependencyDuplicateError that can happen if
            # someone is using plainbox for job development.
            print(_("The job database you are currently using is broken"))
            print(_("At least two jobs contend for the id {0}").format(
                exc.job.id))
            print(_("First job defined in: {0}").format(exc.job.origin))
            print(_("Second job defined in: {0}").format(
                exc.duplicate_job.origin))
            raise SystemExit(exc)
        with session.open():
            if session.previous_session_file():
                if self.ask_for_resume():
                    session.resume()
                    self._maybe_skip_last_job_after_resume(session)
                else:
                    session.clean()
            session.metadata.title = " ".join(sys.argv)
            session.persistent_save()
            self._update_desired_job_list(session, matching_job_list)
            # Ask the password before anything else in order to run jobs
            # requiring privileges
            if self._auth_warmup_needed(session):
                print(_("[ Authentication ]").center(80, '='))
                return_code = authenticate_warmup()
                if return_code:
                    raise SystemExit(return_code)
            runner = JobRunner(
                session.session_dir, self.provider_list,
                session.jobs_io_log_dir, dry_run=ns.dry_run)
            self._run_jobs_with_session(ns, session, runner)
            # Get a stream with exported session data.
            exported_stream = io.BytesIO()
            data_subset = exporter.get_session_data_subset(session)
            exporter.dump(data_subset, exported_stream)
            exported_stream.seek(0)  # Need to rewind the file, puagh
            # Write the stream to file if requested
            self._save_results(ns.output_file, exported_stream)
            # Invoke the transport?
            if transport:
                exported_stream.seek(0)
                try:
                    transport.send(exported_stream.read())
                except InvalidSchema as exc:
                    print(_("Invalid destination URL: {0}").format(exc))
                except ConnectionError as exc:
                    print(_("Unable to connect "
                            "to destination URL: {0}").format(exc))
                except HTTPError as exc:
                    print(_("Server returned an error when "
                            "receiving or processing: {0}").format(exc))

        # FIXME: sensible return value
        return 0