def __init__(self, ns, config): self.ns = ns self.checkbox = CheckBox() self.config = config self.whitelist = WhiteList.from_file(os.path.join( self.checkbox.whitelists_dir, "sru.whitelist")) self.job_list = self.checkbox.get_builtin_jobs() # XXX: maybe allow specifying system_id from command line? self.exporter = XMLSessionStateExporter(system_id=None) self.session = None self.runner = None
def __init__(self): self._checkbox = CheckBox() self._context = ResourceContext() self._scratch = Scratch() self._runner = JobRunner(self._checkbox, self._context, self._scratch)
class _SRUInvocation: """ Helper class instantiated to perform a particular invocation of the sru command. Unlike the SRU command itself, this class is instantiated each time. """ def __init__(self, ns, config): self.ns = ns self.checkbox = CheckBox() self.config = config self.whitelist = WhiteList.from_file(os.path.join( self.checkbox.whitelists_dir, "sru.whitelist")) self.job_list = self.checkbox.get_builtin_jobs() # XXX: maybe allow specifying system_id from command line? self.exporter = XMLSessionStateExporter(system_id=None) self.session = None self.runner = None def run(self): # Compute the run list, this can give us notification about problems in # the selected jobs. Currently we just display each problem # Create a session that handles most of the stuff needed to run jobs try: self.session = SessionState(self.job_list) except DependencyDuplicateError as exc: # Handle possible DependencyDuplicateError that can happen if # someone is using plainbox for job development. print("The job database you are currently using is broken") print("At least two jobs contend for the name {0}".format( exc.job.name)) print("First job defined in: {0}".format(exc.job.origin)) print("Second job defined in: {0}".format( exc.duplicate_job.origin)) raise SystemExit(exc) with self.session.open(): self._set_job_selection() self.runner = JobRunner( self.session.session_dir, self.session.jobs_io_log_dir, command_io_delegate=self, outcome_callback=None, # SRU runs are never interactive dry_run=self.ns.dry_run ) self._run_all_jobs() if self.config.fallback_file is not Unset: self._save_results() self._submit_results() # FIXME: sensible return value return 0 def _set_job_selection(self): desired_job_list = get_matching_job_list(self.job_list, self.whitelist) problem_list = self.session.update_desired_job_list(desired_job_list) if problem_list: logger.warning("There were some problems with the selected jobs") for problem in problem_list: logger.warning("- %s", problem) logger.warning("Problematic jobs will not be considered") def _save_results(self): print("Saving results to {0}".format(self.config.fallback_file)) data = self.exporter.get_session_data_subset(self.session) with open(self.config.fallback_file, "wt", encoding="UTF-8") as stream: translating_stream = ByteStringStreamTranslator(stream, "UTF-8") self.exporter.dump(data, translating_stream) def _submit_results(self): print("Submitting results to {0} for secure_id {1}".format( self.config.c3_url, self.config.secure_id)) options_string = "secure_id={0}".format(self.config.secure_id) # Create the transport object try: transport = CertificationTransport( self.config.c3_url, options_string, self.config) except InvalidSecureIDError as exc: print(exc) return False # Prepare the data for submission data = self.exporter.get_session_data_subset(self.session) with tempfile.NamedTemporaryFile(mode='w+b') as stream: # Dump the data to the temporary file self.exporter.dump(data, stream) # Flush and rewind stream.flush() stream.seek(0) try: # Send the data, reading from the temporary file result = transport.send(stream) if 'url' in result: print("Successfully sent, submission status at {0}".format( result['url'])) else: print("Successfully sent, server response: {0}".format( result)) except InvalidSchema as exc: print("Invalid destination URL: {0}".format(exc)) except ConnectionError as exc: print("Unable to connect to destination URL: {0}".format(exc)) except HTTPError as exc: print(("Server returned an error when " "receiving or processing: {0}").format(exc)) except IOError as exc: print("Problem reading a file: {0}".format(exc)) def _run_all_jobs(self): again = True while again: again = False for job in self.session.run_list: # Skip jobs that already have result, this is only needed when # we run over the list of jobs again, after discovering new # jobs via the local job output result = self.session.job_state_map[job.name].result if result.outcome is not None: continue self._run_single_job(job) self.session.persistent_save() if job.plugin == "local": # After each local job runs rebuild the list of matching # jobs and run everything again self._set_job_selection() again = True break def _run_single_job(self, job): print("- {}:".format(job.name), end=' ') job_state, job_result = run_job_if_possible( self.session, self.runner, self.config, job) print("{0}".format(job_result.outcome)) if job_result.comments is not None: print("comments: {0}".format(job_result.comments)) if job_state.readiness_inhibitor_list: print("inhibitors:") for inhibitor in job_state.readiness_inhibitor_list: print(" * {}".format(inhibitor)) self.session.update_job_result(job, job_result)
def get_parameter_values(cls): for job in CheckBox().get_builtin_jobs(): yield (job, )