class ScriptsInfo(Plugin): # Executable path for running scripts. scripts_path = Path(default="%(checkbox_share)s/scripts") def register(self, manager): super(ScriptsInfo, self).register(manager) self._manager.reactor.call_on("gather", self.gather, -1000) def gather(self): prepend_path(self.scripts_path) append_path("/sbin") append_path("/usr/sbin")
class LockPrompt(Plugin): # Filename where the application lock is stored. filename = Path(default="%(checkbox_data)s/lock") # Timeout after which to show an error prompt. timeout = Int(default=0) logger = logging.getLogger() def register(self, manager): super(LockPrompt, self).register(manager) self._lock = None self._fd = None self._manager.reactor.call_on( "prompt-begin", self.prompt_begin, -1000) self._manager.reactor.call_on("stop", self.release, 1000) def prompt_begin(self, interface): directory = posixpath.dirname(self.filename) safe_make_directory(directory) # Try to lock the process self._lock = GlobalLock(self.filename, logger=self.logger) try: self._lock.acquire() except LockAlreadyAcquired: if time() - os.stat(self.filename).st_atime > self.timeout: self._manager.reactor.fire("prompt-error", interface, _("There is another checkbox running. Please close it first.")) self._manager.reactor.stop_all() # Stop the process if the lock is deleted def handler(signum, frame): if not posixpath.exists(self.filename): self._manager.reactor.stop_all() signal.signal(signal.SIGIO, handler) self._fd = os.open(directory, os.O_RDONLY) fcntl.fcntl(self._fd, fcntl.F_SETSIG, 0) fcntl.fcntl(self._fd, fcntl.F_NOTIFY, fcntl.DN_DELETE|fcntl.DN_MULTISHOT) def release(self): # Properly release to the lock self._lock.release(skip_delete=True) os.close(self._fd) os.unlink(self.filename)
class PersistInfo(Plugin): # Filename where to persist information filename = Path(default="%(checkbox_data)s/plugins.bpickle") def register(self, manager): super(PersistInfo, self).register(manager) self.persist = None for (rt, rh) in [("begin", self.begin), ("prompt-begin", self.begin), ("prompt-job", self.save), ("report-jobs", self.save)]: self._manager.reactor.call_on(rt, rh, -100) # Save persist data last self._manager.reactor.call_on("stop", self.save, 900) #This should fire first thing during the gathering phase. self._manager.reactor.call_on("gather", self.begin_gather, -900) #This should fire last during gathering (i.e. after #all other gathering callbacks are finished) self._manager.reactor.call_on("gather", self.end_gather, 900) def begin(self, interface=None): if self.persist is None: self.persist = Persist(self.filename) self._manager.reactor.fire("begin-persist", self.persist) def save(self, *args): # Flush data to disk if self.persist: self.persist.save() def begin_gather(self): #Speed boost during the gathering phase. Not critical data anyway. self.persist._backend.safe_file_closing = False def end_gather(self): #Back to saving data very carefully once gathering is done. self.persist._backend.safe_file_closing = True
class SubunitReport(Plugin): # Filename where to store the subunit report filename = Path(default="%(checkbox_data)s/subunit.log") def register(self, manager): super(SubunitReport, self).register(manager) for (rt, rh) in [ ("gather", self.gather), ("prompt-test", self.prompt_test)]: self._manager.reactor.call_on(rt, rh) def gather(self): logging.debug("Opening filename: %s", self.filename) self.file = open(self.filename, "w", errors="ignore") def prompt_test(self, interface, test): file = self.file # Test if "suite" in test: name = "%s %s" % (test["suite"], test["name"]) else: name = test["name"] file.write("test: %s\n" % name) # TODO: determine where to handle requires # Status status = STATUS_TO_SUBUNIT[test["status"]] file.write("%s: %s" % (status, name)) # Data data = test.get("data") if data: # Prepend whitespace to the data data = data.replace("\n", "\n ").strip() file.write(" [\n %s\n]" % data) file.write("\n")
class JobsInfo(Plugin): # Domain for internationalization domain = String(default="checkbox") # Space separated list of directories where job files are stored. directories = List(Path(), default_factory=lambda: "%(checkbox_share)s/jobs") # List of jobs to blacklist blacklist = List(String(), default_factory=lambda: "") # Path to blacklist file blacklist_file = Path(required=False) # List of jobs to whitelist whitelist = List(String(), default_factory=lambda: "") # Path to whitelist file whitelist_file = Path(required=False) def register(self, manager): super(JobsInfo, self).register(manager) self.whitelist_patterns = self.get_patterns(self.whitelist, self.whitelist_file) self.blacklist_patterns = self.get_patterns(self.blacklist, self.blacklist_file) self.selected_jobs = defaultdict(list) self._missing_dependencies_report = "" self._manager.reactor.call_on("prompt-begin", self.prompt_begin) self._manager.reactor.call_on("gather", self.gather) if logging.getLogger().getEffectiveLevel() <= logging.DEBUG: self._manager.reactor.call_on("prompt-gather", self.post_gather, 90) self._manager.reactor.call_on("report-job", self.report_job, -100) def prompt_begin(self, interface): """ Capture interface object to use it later to display errors """ self.interface = interface self.unused_patterns = (self.whitelist_patterns + self.blacklist_patterns) def check_ordered_messages(self, messages): """Return whether the list of messages are ordered or not. Also populates a _missing_dependencies_report string variable with a report of any jobs that are required but not present in the whitelist.""" names_so_far = set() all_names = set([message['name'] for message in messages]) messages_ordered = True missing_dependencies = defaultdict(set) for message in messages: name = message["name"] for dependency in message.get("depends", []): if dependency not in names_so_far: messages_ordered = False #Two separate checks :) we *could* save a negligible #bit of time by putting this inside the previous "if" #but we're not in *that* big a hurry. if dependency not in all_names: missing_dependencies[name].add(dependency) names_so_far.add(name) #Now assemble the list of missing deps into a nice report jobs_and_missing_deps = [ "{} required by {}".format( job_name, ", ".join(missing_dependencies[job_name])) for job_name in missing_dependencies ] self._missing_dependencies_report = "\n".join(jobs_and_missing_deps) return messages_ordered def get_patterns(self, strings, filename=None): """Return the list of strings as compiled regular expressions.""" if filename: try: file = open(filename) except IOError as e: error_message = (_("Failed to open file '%s': %s") % (filename, e.strerror)) logging.critical(error_message) sys.stderr.write("%s\n" % error_message) sys.exit(os.EX_NOINPUT) else: strings.extend([l.strip() for l in file.readlines()]) return [ re.compile(r"^%s$" % s) for s in strings if s and not s.startswith("#") ] def get_unique_messages(self, messages): """Return the list of messages without any duplicates, giving precedence to messages that are the longest. """ unique_messages = [] unique_indexes = {} for message in messages: name = message["name"] index = unique_indexes.get(name) if index is None: unique_indexes[name] = len(unique_messages) unique_messages.append(message) elif len(message) > len(unique_messages[index]): unique_messages[index] = message return unique_messages def gather(self): # Register temporary handler for report-message events messages = [] def report_message(message): if self.whitelist_patterns: name = message["name"] names = [ name for p in self.whitelist_patterns if p.match(name) ] if not names: return messages.append(message) # Set domain and message event handler old_domain = gettext.textdomain() gettext.textdomain(self.domain) event_id = self._manager.reactor.call_on("report-message", report_message, 100) for directory in self.directories: self._manager.reactor.fire("message-directory", directory) for message in messages: self._manager.reactor.fire("report-job", message) # Unset domain and event handler self._manager.reactor.cancel_call(event_id) gettext.textdomain(old_domain) # Get unique messages from the now complete list messages = self.get_unique_messages(messages) # Apply whitelist ordering if self.whitelist_patterns: def key_function(obj): name = obj["name"] for pattern in self.whitelist_patterns: if pattern.match(name): return self.whitelist_patterns.index(pattern) messages = sorted(messages, key=key_function) if not self.check_ordered_messages(messages): #One of two things may have happened if we enter this code path. #Either the jobs are not in topological ordering, #Or they are in topological ordering but a dependency is #missing. old_message_names = [ message["name"] + "\n" for message in messages ] resolver = Resolver(key_func=lambda m: m["name"]) for message in messages: resolver.add(message, *message.get("depends", [])) messages = resolver.get_dependents() if (self.whitelist_patterns and logging.getLogger().getEffectiveLevel() <= logging.DEBUG): new_message_names = [ message["name"] + "\n" for message in messages ] #This will contain a report of out-of-order jobs. detailed_text = "".join( difflib.unified_diff(old_message_names, new_message_names, "old whitelist", "new whitelist")) #First, we report missing dependencies, if any. if self._missing_dependencies_report: primary = _("Dependencies are missing so some jobs " "will not run.") secondary = _("To fix this, close checkbox and add " "the missing dependencies to the " "whitelist.") self._manager.reactor.fire( "prompt-warning", self.interface, primary, secondary, self._missing_dependencies_report) #If detailed_text is empty, it means the problem #was missing dependencies, which we already reported. #Otherwise, we also need to report reordered jobs here. if detailed_text: primary = _("Whitelist not topologically ordered") secondary = _("Jobs will be reordered to fix broken " "dependencies") self._manager.reactor.fire("prompt-warning", self.interface, primary, secondary, detailed_text) self._manager.reactor.fire("report-jobs", messages) def post_gather(self, interface): """ Verify that all patterns were used """ if logging.getLogger().getEffectiveLevel() > logging.DEBUG: return orphan_test_cases = [] for name, jobs in self.selected_jobs.items(): is_test = any(job.get('type') == 'test' for job in jobs) has_suite = any(job.get('suite') for job in jobs) if is_test and not has_suite: orphan_test_cases.append(name) if orphan_test_cases: detailed_error = \ ('Test cases not included in any test suite:\n' '{0}\n\n' 'This might cause problems ' 'when uploading test cases results.\n' 'Please make sure that the patterns you used are up-to-date\n' .format('\n'.join(['- {0}'.format(tc) for tc in orphan_test_cases]))) self._manager.reactor.fire( 'prompt-warning', self.interface, 'Orphan test cases detected', "Some test cases aren't included " 'in any test suite', detailed_error) if self.unused_patterns: detailed_error = \ ('Unused patterns:\n' '{0}\n\n' "Please make sure that the patterns you used are up-to-date\n" .format('\n'.join(['- {0}'.format(p.pattern[1:-1]) for p in self.unused_patterns]))) self._manager.reactor.fire( 'prompt-warning', self.interface, 'Unused patterns', 'Please make sure that the patterns ' 'you used are up-to-date', detailed_error) @coerce_arguments(job=job_schema) def report_job(self, job): name = job["name"] patterns = self.whitelist_patterns or self.blacklist_patterns if patterns: match = next((p for p in patterns if p.match(name)), None) if match: # Keep track of which patterns didn't match any job if match in self.unused_patterns: self.unused_patterns.remove(match) self.selected_jobs[name].append(job) else: # Stop if job not in whitelist or in blacklist self._manager.reactor.stop()
class JobsPrompt(Plugin): # Directory where messages are stored store_directory = Path(default="%(checkbox_data)s/store") # Maximum number of messages per directory store_directory_size = Int(default=1000) @property def persist(self): if self._persist is None: self._persist = Persist(backend=MemoryBackend()) return self._persist.root_at("jobs_prompt") @property def store(self): if self._store is None: self._store = JobStore(self.persist, self.store_directory, self.store_directory_size) return self._store def register(self, manager): super(JobsPrompt, self).register(manager) self._ignore = [] self._persist = None self._store = None self._fail_current = False for (rt, rh) in [("expose-msgstore", self.expose_msgstore), ("begin-persist", self.begin_persist), ("begin-recover", self.begin_recover), ("ignore-jobs", self.ignore_jobs), ("prompt-job", self.prompt_job), ("prompt-jobs", self.prompt_jobs), ("prompt-finish", self.prompt_finish), ("report", self.report), ("report-job", self.report_job), ("report-jobs", self.report_jobs)]: self._manager.reactor.call_on(rt, rh) #This should fire first thing during the gathering phase. self._manager.reactor.call_on("gather", self.begin_gather, -900) #This should fire last during gathering (i.e. after #all other gathering callbacks are finished) self._manager.reactor.call_on("gather", self.end_gather, 900) def expose_msgstore(self): self._manager.reactor.fire("store-access", self.store) def begin_persist(self, persist): self._persist = persist def begin_recover(self, recover): if recover == RERUN_ANSWER: logging.debug("Recovering from last job") elif recover == CONTINUE_ANSWER: logging.debug("Marking last job failed, starting from next job") self._fail_current = True else: self.store.delete_all_messages() def begin_gather(self): #Speed boost during the gathering phase. Not critical data anyway. self.store.safe_file_closing = False def end_gather(self): #Back to saving data very carefully once gathering is done. self.store.safe_file_closing = True def ignore_jobs(self, jobs): self._ignore = jobs def report_job(self, job): # Update job job.setdefault("status", UNINITIATED) self._manager.reactor.fire("report-%s" % job["plugin"], job) def report_jobs(self, jobs): for job in jobs: self.store.add(job) def prompt_job(self, interface, job): attribute = "description" if job.get("type") == "suite" else "name" if job[attribute] in self._ignore: job["status"] = UNTESTED else: if "depends" in job: offset = self.store.get_pending_offset() self.store.set_pending_offset(0) messages = self.store.get_pending_messages() self.store.set_pending_offset(offset) # Skip if any message in the depends doesn't pass depends = job["depends"] for message in messages: if message["name"] in depends and \ message["status"] != PASS: return self._manager.reactor.fire("prompt-%s" % job["plugin"], interface, job) def prompt_jobs(self, interface): while True: if interface.direction == PREV: if not self.store.remove_pending_offset(): break if self._fail_current: msg_to_fail = self.store.get_pending_messages(1) job_to_fail = msg_to_fail[0] job_to_fail["status"] = "fail" logging.warning("Marking job %s as failed" "at user request" % job_to_fail["name"]) self.store.update(job_to_fail) self.store.add_pending_offset() self._fail_current = False messages = self.store.get_pending_messages(1) if not messages: break done_count = self.store.get_pending_offset() pending_count = self.store.count_pending_messages() progress = (done_count, done_count + pending_count) self._manager.reactor.fire("set-progress", progress) job = messages[0] self._manager.reactor.fire("prompt-job", interface, job) self.store.update(job) if interface.direction == NEXT: self.store.add_pending_offset() def prompt_finish(self, interface): if interface.direction == NEXT: self.store.delete_all_messages() def report(self): self.store.set_pending_offset(0) messages = self.store.get_pending_messages() self.store.add_pending_offset(len(messages)) tests = [m for m in messages if m.get("type") in ("test", "metric")] self._manager.reactor.fire("report-tests", tests) suites = [m for m in messages if m.get("type") == "suite"] self._manager.reactor.fire("report-suites", suites) attachments = [m for m in messages if m.get("type") == "attachment" and "data" in m] self._manager.reactor.fire("report-attachments", attachments)
class LaunchpadReport(Plugin): # Filename where submission information is cached. filename = Path(default="%(checkbox_data)s/submission.xml") # Prompt for place to save the submission file submission_path_prompt = String(default="") # XML Schema schema = Path(default="%(checkbox_share)s/report/hardware-1_0.rng") # XSL Stylesheet stylesheet = Path(default="%(checkbox_share)s/report/checkbox.xsl") def register(self, manager): super(LaunchpadReport, self).register(manager) self._report = { "summary": { "private": False, "contactable": False, "live_cd": False}, "hardware": {}, "software": { "packages": []}, "questions": [], "context": []} for (rt, rh) in [ ("report-attachments", self.report_attachments), ("report-client", self.report_client), ("report-cpuinfo", self.report_cpuinfo), ("report-datetime", self.report_datetime), ("report-dpkg", self.report_dpkg), ("report-lsb", self.report_lsb), ("report-package", self.report_package), ("report-uname", self.report_uname), ("report-system_id", self.report_system_id), ("report-suites", self.report_suites), ("report-review", self.report_review), ("report-tests", self.report_tests)]: self._manager.reactor.call_on(rt, rh) # Launchpad report should be generated last. self._manager.reactor.call_on("report", self.report, 100) #Ask where to put submission file self._manager.reactor.call_on("prompt-begin", self.prompt_begin, 110) def prompt_begin(self, interface): if self.submission_path_prompt: # Ignore whether to submit to HEXR new_filename = interface.show_entry( self.submission_path_prompt, self.filename)[0] if new_filename != "": self.filename = new_filename def report_attachments(self, attachments): for attachment in attachments: name = attachment["name"] if "sysfs_attachment" in name: self._report["hardware"]["sysfs-attributes"] = \ attachment["data"] elif "dmi_attachment" in name: self._report["hardware"]["dmi"] = attachment["data"] elif "udev_attachment" in name: self._report["hardware"]["udev"] = attachment["data"] elif (all(c in printable for c in attachment["data"]) and attachment['status'] != 'unsupported'): self._report["context"].append({ "command": attachment["command"], "data": attachment["data"]}) def report_client(self, client): self._report["summary"]["client"] = client def report_cpuinfo(self, resources): cpuinfo = resources[0] processors = [] for i in range(int(cpuinfo["count"])): cpuinfo = dict(cpuinfo) cpuinfo["name"] = i processors.append(cpuinfo) self._report["hardware"]["processors"] = processors def report_datetime(self, datetime): self._report["summary"]["date_created"] = datetime def report_dpkg(self, resources): dpkg = resources[0] self._report["summary"]["architecture"] = dpkg["architecture"] def report_lsb(self, resources): lsb = resources[0] self._report["software"]["lsbrelease"] = dict(lsb) self._report["summary"]["distribution"] = lsb["distributor_id"] self._report["summary"]["distroseries"] = lsb["release"] def report_package(self, resources): self._report["software"]["packages"] = resources def report_uname(self, resources): uname = resources[0] self._report["summary"]["kernel-release"] = ( "{release} {version}".format(release=uname["release"], version=uname["version"])) def report_system_id(self, system_id): self._report["summary"]["system_id"] = system_id def report_tests(self, tests): self.tests = tests for test in tests: question = { "name": test["name"], "answer": test["status"], "comment": test.get("data", "")} self._report["questions"].append(question) def report(self): # Prepare the payload and attach it to the form stylesheet_path = os.path.join( os.path.dirname(self.filename), os.path.basename(self.stylesheet)) report_manager = LaunchpadReportManager( "system", "1.0", stylesheet_path, self.schema) payload = report_manager.dumps(self._report).toprettyxml("") # Write the report stylesheet_data = open(self.stylesheet).read() % os.environ open(stylesheet_path, "w").write(stylesheet_data) directory = os.path.dirname(self.filename) safe_make_directory(directory) open(self.filename, "w").write(payload) # Validate the report if not report_manager.validate(payload): self._manager.reactor.fire("report-error", _("""\ The generated report seems to have validation errors, so it might not be processed by Launchpad.""")) self._manager.reactor.fire("launchpad-report", self.filename) def report_review(self, interface): """ Show test report in the interface """ report = {} def add_job(job): is_suite = 'type' in job and job['type'] == 'suite' if 'suite' in job: suite_name = job['suite'] parent_node = add_job(self.suites[suite_name]) if is_suite: if job['description'] in parent_node: return parent_node[job['description']] node = {} parent_node[job['description']] = node return node parent_node[job['name']] = job else: if is_suite: field = 'description' else: field = 'name' if job[field] in report: return report[job[field]] node = {} report[job[field]] = node return node for test in self.tests: add_job(test) try: interface.show_report("Test case results report", report) except NotImplementedError: # Silently ignore the interfaces that don't implement the method pass def report_suites(self, suites): """ Get tests results and store it to display them later """ self.suites = dict([(suite['name'], suite) for suite in suites])
class BackendInfo(Plugin): # how long to wait for I/O from/to the backend before the call returns. # How we behave if I/O times out is dependent on the situation. timeout = Float(default=60.0) command = Path(default="%(checkbox_share)s/backend") next_sequence = 0 expected_sequence = 0 def write_to_parent(self, object): message = ( self.next_sequence, object, ) logging.debug("Sending message with sequence number %s to backend" % self.next_sequence) self.parent_writer.write_object(message) self.expected_sequence = self.next_sequence self.next_sequence += 1 def read_from_parent(self): correct_sequence = False while not correct_sequence: ro = self.parent_reader.read_object() if ro: sequence, result = ro logging.debug("Expecting sequence number %s from backend, " "got sequence number %s" % (self.expected_sequence, sequence)) if (self.expected_sequence == sequence): correct_sequence = True else: logging.warning("Backend sent wrong sequence number, " "Discarding message and re-reading") else: #If we timed out, just return nothing, the rest of #the code knows how to handle this. return ro return result def register(self, manager): super(BackendInfo, self).register(manager) for (rt, rh) in [("message-exec", self.message_exec), ("stop", self.stop)]: self._manager.reactor.call_on(rt, rh) # Backend should run as early as possible self._manager.reactor.call_on("gather", self.gather, -100) def get_command(self, *args): command = [self.command, "--path=%s" % os.environ["PATH"]] return command + list(args) def get_root_command(self, *args): uid = os.getuid() password_text = _("SYSTEM TESTING: Please enter your password. " "Some tests require root access to run properly. " "Your password will never be stored and will never " "be submitted with test results.") password_prompt = _("PASSWORD: "******"which", "pkexec"], stdout=PIPE, stderr=PIPE) == 0: prefix = ["pkexec"] #We fall back to good old sudo if pkexec is not present. Sudo #*should* be present in any Ubuntu installation. else: prefix = ["sudo", "-p", password_text + " " + password_prompt] return prefix + self.get_command(*args) def spawn_backend(self, input_fifo, output_fifo): self.pid = os.fork() if self.pid == 0: root_command = self.get_root_command(input_fifo, output_fifo) os.execvp(root_command[0], root_command) # Should never get here def ping_backend(self): if not self.parent_reader or not self.parent_writer: return False self.write_to_parent("ping") result = self.read_from_parent() return result == "pong" def gather(self): self.directory = mkdtemp(prefix="checkbox") child_input = create_fifo(os.path.join(self.directory, "input"), 0o600) child_output = create_fifo(os.path.join(self.directory, "output"), 0o600) self.backend_is_alive = False for attempt in range(1, 4): self.spawn_backend(child_input, child_output) #Only returns if I'm still the parent, #so I can do parent stuff here self.parent_writer = FifoWriter(child_input, timeout=self.timeout) self.parent_reader = FifoReader(child_output, timeout=self.timeout) if self.ping_backend(): logging.debug("Backend responded, continuing execution.") self.backend_is_alive = True break else: logging.debug("Backend didn't respond, " "trying to create again.") if not self.backend_is_alive: logging.warning("Privileged backend not responding. " + "jobs specifying user will not be run") def message_exec(self, message): if "user" in message: if "environ" in message: #Prepare variables to be "exported" from my environment #to the backend's. backend_environ = [ "%s=%s" % (key, os.environ[key]) for key in message["environ"] if key in os.environ ] message = dict(message) # so as to not wreck the # original message message["environ"] = backend_environ if (self.backend_is_alive and not self.ping_backend()): self.backend_is_alive = False if self.backend_is_alive: self.write_to_parent(message) while True: result = self.read_from_parent() if result: break else: logging.info("Waiting for result...") else: result = ( FAIL, "Unable to test. Privileges are " + "required for this job.", 0, ) if result: self._manager.reactor.fire("message-result", *result) def stop(self): self.write_to_parent("stop") self.parent_writer.close() self.parent_reader.close() shutil.rmtree(self.directory) if self.backend_is_alive: os.waitpid(self.pid, 0)
class UserInterface(Plugin): # Module where the user interface implementation is defined. interface_module = String(default="checkbox.user_interface") # Class implementing the UserInterface interface. interface_class = String(default="UserInterface") # HACK: this is only a temporary workaround to internationalize the # user interface title and should be eventually removed. gettext.textdomain("checkbox") # Title of the user interface title = String(default=_("System Testing")) # Path where data files are stored. data_path = Path(required=False) @property def persist(self): if self._persist is None: self._persist = Persist(backend=MemoryBackend()) return self._persist.root_at("user_interface") def register(self, manager): super(UserInterface, self).register(manager) self._persist = None self._manager.reactor.call_on("prompt-begin", self.prompt_begin) self._manager.reactor.call_on("stop", self.save_persist) self._manager.reactor.call_on("begin-persist", self.begin_persist) self._manager.reactor.call_on("run", self.run) self._manager.reactor.call_on("launchpad-report", self.launchpad_report) self._manager.reactor.call_on("set-progress", self.set_progress) self._manager.reactor.call_on("prompt-job", self.update_status, 101) def update_status(self, interface, job): #The UI can choose to implement this method to get #information about each job that completes interface.update_status(job) def begin_persist(self, persist): self._persist = persist def prompt_begin(self, interface): self._interface.ui_flags = self.persist.get("ui_flags", {}) def save_persist(self, *args): self.persist.set("ui_flags", self._interface.ui_flags) self.persist.save() def set_progress(self, progress): self._interface.progress = progress def run(self): interface_module = __import__(self.interface_module, None, None, ['']) interface_class = getattr(interface_module, self.interface_class) interface = interface_class(self.title, self.data_path) self._interface = interface event_types = [ "prompt-begin", "prompt-gather", "prompt-jobs", "prompt-report", "prompt-exchange", "prompt-finish" ] index = 0 while index < len(event_types): event_type = event_types[index] self._manager.reactor.fire(event_type, interface) if interface.direction == PREV: if index > 0: index -= 1 else: index += 1 def launchpad_report(self, launchpad_report): self._interface.report_url = "file://%s" % posixpath.abspath( launchpad_report)