def run(self): # Compute the run list, this can give us notification about problems in # the selected jobs. Currently we just display each problem # Create a session that handles most of the stuff needed to run jobs try: self.session = SessionState(self.job_list) except DependencyDuplicateError as exc: # Handle possible DependencyDuplicateError that can happen if # someone is using plainbox for job development. print("The job database you are currently using is broken") print("At least two jobs contend for the name {0}".format( exc.job.id)) print("First job defined in: {0}".format(exc.job.origin)) print("Second job defined in: {0}".format( exc.duplicate_job.origin)) raise SystemExit(exc) with self.session.open(): self._set_job_selection() self.runner = JobRunner(self.session.session_dir, self.provider_list, self.session.jobs_io_log_dir, command_io_delegate=self, dry_run=self.ns.dry_run) self._run_all_jobs() if self.config.fallback_file is not Unset: self._save_results() self._submit_results() # FIXME: sensible return value return 0
def run(self): # Compute the run list, this can give us notification about problems in # the selected jobs. Currently we just display each problem # Create a session that handles most of the stuff needed to run jobs try: self.session = SessionState(self.job_list) except DependencyDuplicateError as exc: # Handle possible DependencyDuplicateError that can happen if # someone is using plainbox for job development. print("The job database you are currently using is broken") print("At least two jobs contend for the name {0}".format( exc.job.id)) print("First job defined in: {0}".format(exc.job.origin)) print("Second job defined in: {0}".format( exc.duplicate_job.origin)) raise SystemExit(exc) with self.session.open(): self._set_job_selection() self.runner = JobRunner( self.session.session_dir, self.provider_list, self.session.jobs_io_log_dir, command_io_delegate=self, dry_run=self.ns.dry_run) self._run_all_jobs() if self.config.fallback_file is not Unset: self._save_results() self._submit_results() # FIXME: sensible return value return 0
def __init__(self, provider, ns): super(AnalyzeInvocation, self).__init__(provider) self.ns = ns self.job_list = self.get_job_list(ns) self.desired_job_list = self._get_matching_job_list(ns, self.job_list) self.session = SessionState(self.job_list) self.problem_list = self.session.update_desired_job_list( self.desired_job_list)
class AnalyzeInvocation(CheckBoxInvocationMixIn): def __init__(self, provider, ns): super(AnalyzeInvocation, self).__init__(provider) self.ns = ns self.job_list = self.get_job_list(ns) self.desired_job_list = self._get_matching_job_list(ns, self.job_list) self.session = SessionState(self.job_list) self.problem_list = self.session.update_desired_job_list( self.desired_job_list) def run(self): if self.ns.run_local: self._run_local_jobs() if self.ns.print_stats: self._print_general_stats() if self.ns.print_dependency_report: self._print_dependency_report() if self.ns.print_interactivity_report: self._print_interactivity_report() def _run_local_jobs(self): print("[Running Local Jobs]".center(80, '=')) with self.session.open(): runner = JobRunner( self.session.session_dir, self.session.jobs_io_log_dir, command_io_delegate=self, interaction_callback=None) again = True while again: for job in self.session.run_list: if job.plugin == 'local': if self.session.job_state_map[job.name].result.outcome is None: self._run_local_job(runner, job) break else: again = False def _run_local_job(self, runner, job): print("{job}".format(job=job.name)) result = runner.run_job(job) self.session.update_job_result(job, result) new_desired_job_list = self._get_matching_job_list( self.ns, self.session.job_list) new_problem_list = self.session.update_desired_job_list( new_desired_job_list) if new_problem_list: print("Problem list", new_problem_list) self.problem_list.extend(new_problem_list) def _print_general_stats(self): print("[General Statistics]".center(80, '=')) print("Known jobs: {}".format(len(self.job_list))) print("Selected jobs: {}".format(len(self.desired_job_list))) def _print_dependency_report(self): print("[Dependency Report]".center(80, '=')) if self.problem_list: for problem in self.problem_list: print(" * {}".format(problem)) else: print("Selected jobs have no dependency problems") def _print_interactivity_report(self): print("[Interactivity Report]".center(80, '=')) is_interactive = { 'shell': False, 'local': False, 'resource': False, 'attachment': False, 'user-verify': True, 'user-interact': True, 'manual': True } if not self.session.run_list: return max_job_len = max(len(job.name) for job in self.session.run_list) fmt = "{{job:{}}}: {{interactive}}".format(max_job_len) for job in self.session.run_list: print(fmt.format( job=job.name, interactive=( "interactive" if is_interactive[job.plugin] else "automatic")))
def _run_jobs(self, ns, job_list): # Create a session that handles most of the stuff needed to run jobs try: session = SessionState(job_list) except DependencyDuplicateError as exc: # Handle possible DependencyDuplicateError that can happen if # someone is using plainbox for job development. print("The job database you are currently using is broken") print("At least two jobs contend for the name {0}".format( exc.job.name)) print("First job defined in: {0}".format(exc.job.origin)) print("Second job defined in: {0}".format( exc.duplicate_job.origin)) raise SystemExit(exc) with session.open(): desired_job_list = [] for whitelist in self.whitelists: desired_job_list.extend(get_matching_job_list(job_list, whitelist)) self._update_desired_job_list(session, desired_job_list) if session.previous_session_file(): if self.is_interactive and self.ask_for_resume(): session.resume() self._maybe_skip_last_job_after_resume(session) else: session.clean() session.metadata.title = " ".join(sys.argv) session.persistent_save() # Ask the password before anything else in order to run jobs # requiring privileges if self.is_interactive and self._auth_warmup_needed(session): print("[ Authentication ]".center(80, '=')) return_code = authenticate_warmup() if return_code: raise SystemExit(return_code) runner = JobRunner( session.session_dir, self.provider_list, session.jobs_io_log_dir) self._run_jobs_with_session(ns, session, runner) self.save_results(session) session.remove() # FIXME: sensible return value return 0
class _SRUInvocation(CheckBoxInvocationMixIn): """ Helper class instantiated to perform a particular invocation of the sru command. Unlike the SRU command itself, this class is instantiated each time. """ def __init__(self, provider_list, config, ns): super().__init__(provider_list, conifg) self.ns = ns if self.ns.whitelist: self.whitelist = self.get_whitelist_from_file( self.ns.whitelist[0].name, self.ns.whitelist) elif self.config.whitelist is not Unset: self.whitelist = self.get_whitelist_from_file( self.config.whitelist) else: self.whitelist = get_whitelist_by_name(provider_list, 'sru') self.job_list = self.get_job_list(ns) # XXX: maybe allow specifying system_id from command line? self.exporter = XMLSessionStateExporter(system_id=None) self.session = None self.runner = None def run(self): # Compute the run list, this can give us notification about problems in # the selected jobs. Currently we just display each problem # Create a session that handles most of the stuff needed to run jobs try: self.session = SessionState(self.job_list) except DependencyDuplicateError as exc: # Handle possible DependencyDuplicateError that can happen if # someone is using plainbox for job development. print("The job database you are currently using is broken") print("At least two jobs contend for the name {0}".format( exc.job.id)) print("First job defined in: {0}".format(exc.job.origin)) print("Second job defined in: {0}".format( exc.duplicate_job.origin)) raise SystemExit(exc) with self.session.open(): self._set_job_selection() self.runner = JobRunner(self.session.session_dir, self.provider_list, self.session.jobs_io_log_dir, command_io_delegate=self, dry_run=self.ns.dry_run) self._run_all_jobs() if self.config.fallback_file is not Unset: self._save_results() self._submit_results() # FIXME: sensible return value return 0 def _set_job_selection(self): desired_job_list = get_matching_job_list(self.job_list, self.whitelist) problem_list = self.session.update_desired_job_list(desired_job_list) if problem_list: logger.warning("There were some problems with the selected jobs") for problem in problem_list: logger.warning("- %s", problem) logger.warning("Problematic jobs will not be considered") def _save_results(self): print("Saving results to {0}".format(self.config.fallback_file)) data = self.exporter.get_session_data_subset(self.session) with open(self.config.fallback_file, "wt", encoding="UTF-8") as stream: translating_stream = ByteStringStreamTranslator(stream, "UTF-8") self.exporter.dump(data, translating_stream) def _submit_results(self): print("Submitting results to {0} for secure_id {1}".format( self.config.c3_url, self.config.secure_id)) options_string = "secure_id={0}".format(self.config.secure_id) # Create the transport object try: transport = CertificationTransport(self.config.c3_url, options_string, self.config) except InvalidSecureIDError as exc: print(exc) return False # Prepare the data for submission data = self.exporter.get_session_data_subset(self.session) with tempfile.NamedTemporaryFile(mode='w+b') as stream: # Dump the data to the temporary file self.exporter.dump(data, stream) # Flush and rewind stream.flush() stream.seek(0) try: # Send the data, reading from the temporary file result = transport.send(stream) if 'url' in result: print("Successfully sent, submission status at {0}".format( result['url'])) else: print("Successfully sent, server response: {0}".format( result)) except InvalidSchema as exc: print("Invalid destination URL: {0}".format(exc)) except ConnectionError as exc: print("Unable to connect to destination URL: {0}".format(exc)) except HTTPError as exc: print(("Server returned an error when " "receiving or processing: {0}").format(exc)) except IOError as exc: print("Problem reading a file: {0}".format(exc)) def _run_all_jobs(self): again = True while again: again = False for job in self.session.run_list: # Skip jobs that already have result, this is only needed when # we run over the list of jobs again, after discovering new # jobs via the local job output result = self.session.job_state_map[job.id].result if result.outcome is not None: continue self._run_single_job(job) self.session.persistent_save() if job.plugin == "local": # After each local job runs rebuild the list of matching # jobs and run everything again self._set_job_selection() again = True break def _run_single_job(self, job): print("- {}:".format(job.id), end=' ') sys.stdout.flush() job_state, job_result = run_job_if_possible(self.session, self.runner, self.config, job) print("{0}".format(job_result.outcome)) sys.stdout.flush() if job_result.comments is not None: print("comments: {0}".format(job_result.comments)) if job_state.readiness_inhibitor_list: print("inhibitors:") for inhibitor in job_state.readiness_inhibitor_list: print(" * {}".format(inhibitor)) self.session.update_job_result(job, job_result)
class AnalyzeInvocation(CheckBoxInvocationMixIn): def __init__(self, provider_list, ns): super(AnalyzeInvocation, self).__init__(provider_list) self.ns = ns self.job_list = self.get_job_list(ns) self.desired_job_list = self._get_matching_job_list(ns, self.job_list) self.session = SessionState(self.job_list) self.problem_list = self.session.update_desired_job_list( self.desired_job_list) def run(self): if self.ns.run_local: if self.ns.print_desired_job_list: self._print_desired_job_list() if self.ns.print_run_list: self._print_run_list() self._run_local_jobs() if self.ns.print_stats: self._print_general_stats() if self.ns.print_dependency_report: self._print_dependency_report() if self.ns.print_interactivity_report: self._print_interactivity_report() if self.ns.print_estimated_duration_report: self._print_estimated_duration_report() if self.ns.print_validation_report: self._print_validation_report(self.ns.only_errors) if self.ns.print_requirement_report: self._print_requirement_report() if self.ns.print_desired_job_list: self._print_desired_job_list() if self.ns.print_run_list: self._print_run_list() def _print_desired_job_list(self): print("[Desired Job List]".center(80, '=')) for job in self.session.desired_job_list: print("{}".format(job.name)) def _print_run_list(self): print("[Run List]".center(80, '=')) for job in self.session.run_list: print("{}".format(job.name)) def _run_local_jobs(self): print("[Running Local Jobs]".center(80, '=')) with self.session.open(): runner = JobRunner( self.session.session_dir, self.provider_list, self.session.jobs_io_log_dir, command_io_delegate=self) again = True while again: for job in self.session.run_list: if job.plugin == 'local': if self.session.job_state_map[job.name].result.outcome is None: self._run_local_job(runner, job) break else: again = False def _run_local_job(self, runner, job): print("{job}".format(job=job.name)) result = runner.run_job(job) self.session.update_job_result(job, result) new_desired_job_list = self._get_matching_job_list( self.ns, self.session.job_list) new_problem_list = self.session.update_desired_job_list( new_desired_job_list) if new_problem_list: print("Problem list", new_problem_list) self.problem_list.extend(new_problem_list) def _print_general_stats(self): print("[General Statistics]".center(80, '=')) print("Known jobs: {}".format(len(self.job_list))) print("Selected jobs: {}".format(len(self.desired_job_list))) def _print_dependency_report(self): print("[Dependency Report]".center(80, '=')) if self.problem_list: for problem in self.problem_list: print(" * {}".format(problem)) else: print("Selected jobs have no dependency problems") def _print_interactivity_report(self): print("[Interactivity Report]".center(80, '=')) if not self.session.run_list: return max_job_len = max(len(job.name) for job in self.session.run_list) fmt = "{{job:{}}} : {{interactive:11}} : {{duration}}".format( max_job_len) for job in self.session.run_list: print( fmt.format( job=job.name, interactive=( "automatic" if job.automated else "interactive"), duration=( timedelta(seconds=job.estimated_duration) if job.estimated_duration is not None else "unknown") ) ) def _print_estimated_duration_report(self): print("[Estimated Duration Report]".center(80, '=')) print("Estimated test duration:") automated, manual = self.session.get_estimated_duration() print(" automated tests: {}".format( timedelta(seconds=automated) if automated is not None else "cannot estimate")) print(" manual tests: {}".format( timedelta(seconds=manual) if manual is not None else "cannot estimate")) print(" total: {}".format( timedelta(seconds=manual + automated) if manual is not None and automated is not None else "cannot estimate")) def _print_validation_report(self, only_errors): print("[Validation Report]".center(80, '=')) if not self.session.run_list: return max_job_len = max(len(job.name) for job in self.session.run_list) fmt = "{{job:{}}} : {{problem}}".format(max_job_len) problem = None for job in self.session.run_list: try: job.validate() except ValueError as exc: problem = str(exc) else: if only_errors: continue problem = "" print(fmt.format(job=job.name, problem=problem)) if problem: print("Job defined in {}".format(job.origin)) if only_errors and problem is None: print("No problems found") def _print_requirement_report(self): print("[Requirement Report]".center(80, '=')) if not self.session.run_list: return requirements = set() for job in self.session.run_list: if job.requires: resource_program = job.get_resource_program() if 'package' in resource_program.required_resources: for packages in [ resource.text for resource in resource_program.expression_list if resource.resource_name == 'package']: node = ast.parse(packages) visitor = RequirementNodeVisitor() visitor.visit(node) requirements.add((' | ').join(visitor.packages_seen)) if requirements: print(',\n'.join(sorted(requirements)))
class _SRUInvocation(CheckBoxInvocationMixIn): """ Helper class instantiated to perform a particular invocation of the sru command. Unlike the SRU command itself, this class is instantiated each time. """ def __init__(self, provider_list, config, ns): super().__init__(provider_list, conifg) self.ns = ns if self.ns.whitelist: self.whitelist = self.get_whitelist_from_file( self.ns.whitelist[0].name, self.ns.whitelist) elif self.config.whitelist is not Unset: self.whitelist = self.get_whitelist_from_file( self.config.whitelist) else: self.whitelist = get_whitelist_by_name(provider_list, 'sru') self.job_list = self.get_job_list(ns) # XXX: maybe allow specifying system_id from command line? self.exporter = XMLSessionStateExporter(system_id=None) self.session = None self.runner = None def run(self): # Compute the run list, this can give us notification about problems in # the selected jobs. Currently we just display each problem # Create a session that handles most of the stuff needed to run jobs try: self.session = SessionState(self.job_list) except DependencyDuplicateError as exc: # Handle possible DependencyDuplicateError that can happen if # someone is using plainbox for job development. print("The job database you are currently using is broken") print("At least two jobs contend for the name {0}".format( exc.job.id)) print("First job defined in: {0}".format(exc.job.origin)) print("Second job defined in: {0}".format( exc.duplicate_job.origin)) raise SystemExit(exc) with self.session.open(): self._set_job_selection() self.runner = JobRunner( self.session.session_dir, self.provider_list, self.session.jobs_io_log_dir, command_io_delegate=self, dry_run=self.ns.dry_run) self._run_all_jobs() if self.config.fallback_file is not Unset: self._save_results() self._submit_results() # FIXME: sensible return value return 0 def _set_job_selection(self): desired_job_list = get_matching_job_list(self.job_list, self.whitelist) problem_list = self.session.update_desired_job_list(desired_job_list) if problem_list: logger.warning("There were some problems with the selected jobs") for problem in problem_list: logger.warning("- %s", problem) logger.warning("Problematic jobs will not be considered") def _save_results(self): print("Saving results to {0}".format(self.config.fallback_file)) data = self.exporter.get_session_data_subset(self.session) with open(self.config.fallback_file, "wt", encoding="UTF-8") as stream: translating_stream = ByteStringStreamTranslator(stream, "UTF-8") self.exporter.dump(data, translating_stream) def _submit_results(self): print("Submitting results to {0} for secure_id {1}".format( self.config.c3_url, self.config.secure_id)) options_string = "secure_id={0}".format(self.config.secure_id) # Create the transport object try: transport = CertificationTransport( self.config.c3_url, options_string, self.config) except InvalidSecureIDError as exc: print(exc) return False # Prepare the data for submission data = self.exporter.get_session_data_subset(self.session) with tempfile.NamedTemporaryFile(mode='w+b') as stream: # Dump the data to the temporary file self.exporter.dump(data, stream) # Flush and rewind stream.flush() stream.seek(0) try: # Send the data, reading from the temporary file result = transport.send(stream) if 'url' in result: print("Successfully sent, submission status at {0}".format( result['url'])) else: print("Successfully sent, server response: {0}".format( result)) except InvalidSchema as exc: print("Invalid destination URL: {0}".format(exc)) except ConnectionError as exc: print("Unable to connect to destination URL: {0}".format(exc)) except HTTPError as exc: print(("Server returned an error when " "receiving or processing: {0}").format(exc)) except IOError as exc: print("Problem reading a file: {0}".format(exc)) def _run_all_jobs(self): again = True while again: again = False for job in self.session.run_list: # Skip jobs that already have result, this is only needed when # we run over the list of jobs again, after discovering new # jobs via the local job output result = self.session.job_state_map[job.id].result if result.outcome is not None: continue self._run_single_job(job) self.session.persistent_save() if job.plugin == "local": # After each local job runs rebuild the list of matching # jobs and run everything again self._set_job_selection() again = True break def _run_single_job(self, job): print("- {}:".format(job.id), end=' ') sys.stdout.flush() job_state, job_result = run_job_if_possible( self.session, self.runner, self.config, job) print("{0}".format(job_result.outcome)) sys.stdout.flush() if job_result.comments is not None: print("comments: {0}".format(job_result.comments)) if job_state.readiness_inhibitor_list: print("inhibitors:") for inhibitor in job_state.readiness_inhibitor_list: print(" * {}".format(inhibitor)) self.session.update_job_result(job, job_result)
def _run_jobs(self, ns, job_list, exporter, transport=None): # Compute the run list, this can give us notification about problems in # the selected jobs. Currently we just display each problem matching_job_list = self._get_matching_job_list(ns, job_list) print(_("[ Analyzing Jobs ]").center(80, '=')) # Create a session that handles most of the stuff needed to run jobs try: session = SessionState(job_list) except DependencyDuplicateError as exc: # Handle possible DependencyDuplicateError that can happen if # someone is using plainbox for job development. print(_("The job database you are currently using is broken")) print(_("At least two jobs contend for the id {0}").format( exc.job.id)) print(_("First job defined in: {0}").format(exc.job.origin)) print(_("Second job defined in: {0}").format( exc.duplicate_job.origin)) raise SystemExit(exc) with session.open(): if session.previous_session_file(): if self.ask_for_resume(): session.resume() self._maybe_skip_last_job_after_resume(session) else: session.clean() session.metadata.title = " ".join(sys.argv) session.persistent_save() self._update_desired_job_list(session, matching_job_list) # Ask the password before anything else in order to run jobs # requiring privileges if self._auth_warmup_needed(session): print(_("[ Authentication ]").center(80, '=')) return_code = authenticate_warmup() if return_code: raise SystemExit(return_code) runner = JobRunner( session.session_dir, self.provider_list, session.jobs_io_log_dir, dry_run=ns.dry_run) self._run_jobs_with_session(ns, session, runner) # Get a stream with exported session data. exported_stream = io.BytesIO() data_subset = exporter.get_session_data_subset(session) exporter.dump(data_subset, exported_stream) exported_stream.seek(0) # Need to rewind the file, puagh # Write the stream to file if requested self._save_results(ns.output_file, exported_stream) # Invoke the transport? if transport: exported_stream.seek(0) try: transport.send(exported_stream.read()) except InvalidSchema as exc: print(_("Invalid destination URL: {0}").format(exc)) except ConnectionError as exc: print(_("Unable to connect " "to destination URL: {0}").format(exc)) except HTTPError as exc: print(_("Server returned an error when " "receiving or processing: {0}").format(exc)) # FIXME: sensible return value return 0