Example #1
0
 def run(self):
     # Compute the run list, this can give us notification about problems in
     # the selected jobs. Currently we just display each problem
     # Create a session that handles most of the stuff needed to run jobs
     try:
         self.session = SessionState(self.job_list)
     except DependencyDuplicateError as exc:
         # Handle possible DependencyDuplicateError that can happen if
         # someone is using plainbox for job development.
         print("The job database you are currently using is broken")
         print("At least two jobs contend for the name {0}".format(
             exc.job.id))
         print("First job defined in: {0}".format(exc.job.origin))
         print("Second job defined in: {0}".format(
             exc.duplicate_job.origin))
         raise SystemExit(exc)
     with self.session.open():
         self._set_job_selection()
         self.runner = JobRunner(self.session.session_dir,
                                 self.provider_list,
                                 self.session.jobs_io_log_dir,
                                 command_io_delegate=self,
                                 dry_run=self.ns.dry_run)
         self._run_all_jobs()
         if self.config.fallback_file is not Unset:
             self._save_results()
         self._submit_results()
     # FIXME: sensible return value
     return 0
Example #2
0
 def _run_local_jobs(self):
     print(_("[Running Local Jobs]").center(80, '='))
     manager = SessionManager.create_with_state(self.session)
     try:
         manager.state.metadata.title = "plainbox dev analyze session"
         manager.state.metadata.flags = [SessionMetaData.FLAG_INCOMPLETE]
         manager.checkpoint()
         runner = JobRunner(manager.storage.location,
                            self.provider_list,
                            os.path.join(manager.storage.location,
                                         'io-logs'),
                            command_io_delegate=self)
         again = True
         while again:
             for job in self.session.run_list:
                 if job.plugin == 'local':
                     if self.session.job_state_map[
                             job.id].result.outcome is None:
                         self._run_local_job(manager, runner, job)
                         break
             else:
                 again = False
         manager.state.metadata.flags = []
         manager.checkpoint()
     finally:
         manager.destroy()
Example #3
0
 def _run_jobs(self, ns, manager):
     runner = JobRunner(manager.storage.location,
                        self.provider_list,
                        os.path.join(manager.storage.location, 'io-logs'),
                        command_io_delegate=self)
     self._run_jobs_with_session(ns, manager, runner)
     if not self._local_only:
         self.save_results(manager)
    def create_runner(self):
        """
        Create a job runner.

        This sets the attr:`_runner` which enables :meth:`runner` property.

        Requires the manager to be created (we need the storage object)
        """
        self._runner = JobRunner(
            self.storage.location,
            self.provider_list,
            # TODO: tie this with well-known-dirs helper
            os.path.join(self.storage.location, 'io-logs'),
            command_io_delegate=self,
            dry_run=self.ns.dry_run)
Example #5
0
 def _run_local_jobs(self):
     print("[Running Local Jobs]".center(80, '='))
     with self.session.open():
         runner = JobRunner(
             self.session.session_dir, self.session.jobs_io_log_dir,
             command_io_delegate=self, interaction_callback=None)
         again = True
         while again:
             for job in self.session.run_list:
                 if job.plugin == 'local':
                     if self.session.job_state_map[job.name].result.outcome is None:
                         self._run_local_job(runner, job)
                         break
             else:
                 again = False
Example #6
0
    def __init__(self, service, session, provider_list, job):
        """
        Initialize a primed job.

        This should not be called by applications.
        Please call :meth:`Service.prime_job()` instead.
        """
        self._service = service
        self._session = session
        self._provider_list = provider_list
        self._job = job
        self._runner = JobRunner(
            session.session_dir,
            self._provider_list,
            session.jobs_io_log_dir,
            # Pass a dummy IO delegate, we don't want to get any tracing here
            # Later on this could be configurable but it's better if it's
            # simple and limited rather than complete but broken somehow.
            command_io_delegate=self)
Example #7
0
 def run(self):
     job = self._get_job()
     if job is None:
         print("There is no job called {!a}".format(self.job_name))
         print("See `plainbox special --list-jobs` for a list of choices")
         return 126
     elif job.command is None:
         print("Selected job does not have a command")
         return 125
     with TemporaryDirectory() as scratch, TemporaryDirectory() as iologs:
         runner = JobRunner(scratch, iologs)
         bait_dir = os.path.join(scratch, 'files-created-in-current-dir')
         os.mkdir(bait_dir)
         with TestCwd(bait_dir):
             return_code, record_path = runner._run_command(
                 job, self.config)
         self._display_side_effects(scratch)
         self._display_script_outcome(job, return_code)
     return return_code
Example #8
0
 def run(self):
     job = self._get_job()
     if job is None:
         print(_("There is no job called {!a}").format(self.job_id))
         print(
             _("See `plainbox special --list-jobs` for a list of choices"))
         return 126
     elif job.command is None:
         print(_("Selected job does not have a command"))
         return 125
     with TemporaryDirectory() as scratch, TemporaryDirectory() as iologs:
         runner = JobRunner(scratch, self.provider_list, iologs)
         job_state = JobState(job)
         ctrl = runner._get_ctrl_for_job(job)
         runner.log_leftovers = False
         runner.on_leftover_files.connect(self._on_leftover_files)
         return_code, record_path = runner._run_command(
             job, job_state, self.config, ctrl)
         self._display_script_outcome(job, return_code)
     return return_code
Example #9
0
 def _run(self, session, job, running_job_wrapper):
     """
     Start a JobRunner in a separate thread
     """
     runner = JobRunner(
         session.session_dir,
         session.jobs_io_log_dir,
         command_io_delegate=running_job_wrapper.ui_io_delegate,
         interaction_callback=running_job_wrapper.emitAskForOutcomeSignal)
     job_state = session.job_state_map[job.name]
     if job_state.can_start():
         job_result = runner.run_job(job)
     else:
         job_result = MemoryJobResult({
             'outcome':
             IJobResult.OUTCOME_NOT_SUPPORTED,
             'comments':
             job_state.get_readiness_description()
         })
     if job_result is not None:
         running_job_wrapper.update_job_result_callback(job, job_result)
 def test_get_warm_up_sequence(self):
     # create a mocked execution controller
     ctrl = Mock(spec_set=IExecutionController, name='ctrl')
     # create a fake warm up function
     warm_up_func = Mock(name='warm_up_func')
     # make the execution controller accept any job
     ctrl.get_score.return_value = 1
     # make the execution controller return warm_up_func as warm-up
     ctrl.get_warm_up_for_job.return_value = warm_up_func
     # make a pair of mock jobs for our controller to see
     job1 = Mock(spec_set=IJobDefinition, name='job1')
     job2 = Mock(spec_set=IJobDefinition, name='job2')
     with TemporaryDirectory() as session_dir:
         # Create a real runner with a fake execution controller, empty list
         # of providers and fake io-log directory.
         runner = JobRunner(session_dir,
                            provider_list=[],
                            jobs_io_log_dir=os.path.join(
                                session_dir, 'io-log'),
                            execution_ctrl_list=[ctrl])
         # Ensure that we got the warm up function we expected
         self.assertEqual(runner.get_warm_up_sequence([job1, job2]),
                          [warm_up_func])
Example #11
0
 def __init__(self):
     self._checkbox = CheckBox()
     self._context = ResourceContext()
     self._scratch = Scratch()
     self._runner = JobRunner(self._checkbox, self._context, self._scratch)
Example #12
0
    def _run_jobs(self, ns, job_list, exporter, transport=None):
        # Compute the run list, this can give us notification about problems in
        # the selected jobs. Currently we just display each problem
        matching_job_list = self._get_matching_job_list(ns, job_list)
        print(_("[ Analyzing Jobs ]").center(80, '='))
        # Create a session that handles most of the stuff needed to run jobs
        try:
            session = SessionState(job_list)
        except DependencyDuplicateError as exc:
            # Handle possible DependencyDuplicateError that can happen if
            # someone is using plainbox for job development.
            print(_("The job database you are currently using is broken"))
            print(_("At least two jobs contend for the id {0}").format(
                exc.job.id))
            print(_("First job defined in: {0}").format(exc.job.origin))
            print(_("Second job defined in: {0}").format(
                exc.duplicate_job.origin))
            raise SystemExit(exc)
        with session.open():
            if session.previous_session_file():
                if self.ask_for_resume():
                    session.resume()
                    self._maybe_skip_last_job_after_resume(session)
                else:
                    session.clean()
            session.metadata.title = " ".join(sys.argv)
            session.persistent_save()
            self._update_desired_job_list(session, matching_job_list)
            # Ask the password before anything else in order to run jobs
            # requiring privileges
            if self._auth_warmup_needed(session):
                print(_("[ Authentication ]").center(80, '='))
                return_code = authenticate_warmup()
                if return_code:
                    raise SystemExit(return_code)
            runner = JobRunner(
                session.session_dir, self.provider_list,
                session.jobs_io_log_dir, dry_run=ns.dry_run)
            self._run_jobs_with_session(ns, session, runner)
            # Get a stream with exported session data.
            exported_stream = io.BytesIO()
            data_subset = exporter.get_session_data_subset(session)
            exporter.dump(data_subset, exported_stream)
            exported_stream.seek(0)  # Need to rewind the file, puagh
            # Write the stream to file if requested
            self._save_results(ns.output_file, exported_stream)
            # Invoke the transport?
            if transport:
                exported_stream.seek(0)
                try:
                    transport.send(exported_stream.read())
                except InvalidSchema as exc:
                    print(_("Invalid destination URL: {0}").format(exc))
                except ConnectionError as exc:
                    print(_("Unable to connect "
                            "to destination URL: {0}").format(exc))
                except HTTPError as exc:
                    print(_("Server returned an error when "
                            "receiving or processing: {0}").format(exc))

        # FIXME: sensible return value
        return 0