def find_solution_ureport(self, db, ureport, osr=None): ureport = ureport2(ureport) validate(ureport) db_report = self._get_db_report(db, ureport) if db_report is None: return None if db_report.problem is None: return None for posr in db_report.problem.opsysreleases: if osr is None or posr.opsysrelease_id == osr.id: if posr.probable_fix_build is not None: db_build = posr.probable_fix_build for pkg in ureport["packages"]: if pkg.get("package_role", "") == "affected": break if pkg.get("package_role", "") != "affected": return None # Fixing version must be greater than affected version if cmp_evr((pkg["epoch"], pkg["version"], pkg["release"]), (db_build.epoch, db_build.version, db_build.release)) < 0: return self._posr_to_solution(posr) else: return None return None
def find_solution_ureport(self, db, ureport, osr=None): ureport = ureport2(ureport) validate(ureport) db_report = self._get_db_report(db, ureport) if db_report is None: return None if db_report.problem is None: return None for posr in db_report.problem.opsysreleases: if osr is None or posr.opsysrelease_id == osr.id: if posr.probable_fix_build is not None: db_build = posr.probable_fix_build for pkg in ureport["packages"]: if pkg.get("package_role", "") == "affected": break if pkg.get("package_role", "") != "affected": return None # Fixing version must be greater than affected version if cmp_evr((pkg["epoch"], pkg["version"], pkg["release"]), (db_build.epoch, db_build.version, db_build.release)) < 0: return self._posr_to_solution(posr) return None return None
def text_overview(self, cmdline, db, opsys, release): release_ids = get_release_ids(db, opsys, release) num_days = 7 if cmdline.last: num_days = int(cmdline.last) since = datetime.datetime.now() - datetime.timedelta(days=num_days) hot = query_hot_problems(db, release_ids, history=self.history_type, last_date=since) if not cmdline.include_low_quality: hot = [x for x in hot if x.quality >= 0] ptypes = "" if len(self.ptypes) != len(problemtypes): ptypes = " " + ", ".join(self.ptypes) out = "Overview of the top {0}{1} crashes over the last {2} days:\n".format( cmdline.count, ptypes, num_days) hot = [p for p in hot if p.type in self.ptypes] for (rank, problem) in enumerate(hot[:cmdline.count]): out += "#{0} {1} - {2}x\n".format( rank + 1, ', '.join(problem.unique_component_names), problem.count) # Reports with bugzillas for this OpSysRelease go first reports = sorted( problem.reports, cmp=lambda x, y: len( [b for b in x.bugs if b.opsysrelease_id in release_ids]) - len([b for b in y.bugs if b.opsysrelease_id in release_ids]), reverse=True) if webfaf_installed(): for report in reports[:3]: out += "{0}\n".format( reverse("reports.bthash_forward", bthash=report.hashes[0].hash)) for bug in report.bugs: out += " {0}\n".format(bug.url) else: for report in reports[:3]: out += "Report BT hash: {0}\n".format( report.hashes[0].hash) if len(problem.reports) > 3: out += "... and {0} more.\n".format(len(problem.reports) - 3) if problem.tainted: out += "Kernel tainted.\n" crash_function = problem.crash_function if crash_function: out += "Crash function: {0}\n".format(crash_function) affected_all = [] for report in problem.reports: affected_known = [ (affected.build.base_package_name, affected.build.epoch, affected.build.version, affected.build.release) for affected in get_crashed_package_for_report( db, report.id) ] affected_unknown = \ get_crashed_unknown_package_nevr_for_report(db, report.id) affected_all += affected_known + affected_unknown affected_all = sorted(set(affected_all), cmp=lambda a, b: cmp_evr(a[1:], b[1:]), reverse=True) if affected_all: out += "Affected builds: {0}".format(", ".join([ "{0}-{1}:{2}-{3}".format(n, e, v, r) for (n, e, v, r) in affected_all[:5] ])) if len(problem.reports) > 5: out += " and {0} more.".format(len(problem.reports) - 5) out += "\n" pfix = problem.probable_fix_for_opsysrelease_ids(release_ids) if len(pfix) > 0: out += ("Problem seems to be fixed since the release of {0}\n". format(pfix)) out += "\n" return out
def run(self, cmdline, db): """ Mark a problem probably fixed if there is a new build of the problem's affected package, for which no crash reports have come in. """ try: tasks = self._get_tasks(cmdline, db) except FafError as ex: self.log_error( "Unable to process command line arguments: {0}".format( str(ex))) return 1 problems = get_problems(db) task_i = 0 for osplugin, db_release in tasks: task_i += 1 self.log_info("[{0} / {1}] Processing '{2} {3}'".format( task_i, len(tasks), osplugin.nice_name, db_release.version)) self.log_debug("Getting builds...") opsys_builds = osplugin.get_released_builds(db_release.version) newest_builds = {} all_builds = {} now = datetime.now() for build in opsys_builds: age = now - build["completion_time"] # If a hot new build comes out, we need to wait a certain # period of time for people to use it before we can make # conclusions about it being a probable fix. if age.days >= osplugin.build_aging_days: if build["name"] not in newest_builds: newest_builds[build["name"]] = build if build["name"] not in all_builds: all_builds[build["name"]] = [ build, ] else: all_builds[build["name"]].append(build) probably_fixed_total = 0 problems_in_release = 0 problem_counter = 0 for problem in problems: problem_counter += 1 self.log_debug("Processing problem ID:{0} {1}/{2}:".format( problem.id, problem_counter, len(problems))) affected_newest = {} affected_not_found = False reports_for_release = \ get_reports_for_opsysrelease(db, problem.id, db_release.id) # For all the reports, we need the affected packages and their # newest versions. if reports_for_release: problems_in_release += 1 else: self.log_debug( " This problem doesn't appear in this release.") self._save_probable_fix(db, problem, db_release, None) # Next problem continue for report in reports_for_release: # First we try to find the affected package among the known # packages. affected_known = [ (affected.build.base_package_name, affected.build.epoch, affected.build.version, affected.build.release) for affected in get_crashed_package_for_report( db, report.id) ] # Then among the unknown packages. affected_unknown = \ get_crashed_unknown_package_nevr_for_report(db, report.id) # We get the base package name directly from the report affected_unknown = [(report.component.name, affected[1], affected[2], affected[3]) for affected in affected_unknown] affected_all = affected_known + affected_unknown if not affected_all: affected_not_found = True break for affected in affected_all: if affected[0] in affected_newest: # If a problem contains multiple reports with the same # affected package, we only want the newest version of # it. affected_newest[affected[0]]['reports'].append( report) if cmp_evr( affected[1:], affected_newest[affected[0]] ['nevr'][1:]) > 0: affected_newest[affected[0]]['nevr'] = affected else: affected_newest[affected[0]] = { 'reports': [ report, ], 'nevr': affected } if affected_not_found or not affected_newest: # Affected package of one of the reports was not found. # We can't make any conclusions. self.log_debug(" Affected package not found.") self._save_probable_fix(db, problem, db_release, None) # Next problem continue if len(affected_newest) > 1: # Multiple different affected packages => cannot be fixed # by a single package update self.log_debug( " Multiple affected packages. No simple fix.") self._save_probable_fix(db, problem, db_release, None) # Next problem continue probably_fixed_since = datetime.fromtimestamp(0) pkg = list(affected_newest.values())[0] name = pkg['nevr'][0] newest_build = newest_builds.get(name, False) if newest_build: newest_evr = (newest_build["epoch"] or 0, newest_build["version"], newest_build["release"]) if newest_build and cmp_evr(newest_evr, pkg['nevr'][1:]) > 0: # Newest available build is newer than the newest version # of the affected package. Now find the oldest such # probable fix. i = 0 while i < len(all_builds[name]) and cmp_evr( (all_builds[name][i]["epoch"] or 0, all_builds[name][i]["version"], all_builds[name][i]["release"]), pkg['nevr'][1:]) > 0: i += 1 completion_time = all_builds[name][i - 1]["completion_time"] probably_fixed_since = max(completion_time, probably_fixed_since) pkg["probable_fix"] = (name, all_builds[name][i - 1]["epoch"] or 0, all_builds[name][i - 1]["version"], all_builds[name][i - 1]["release"]) self._save_probable_fix(db, problem, db_release, pkg["probable_fix"], probably_fixed_since) self.log_debug(" Probably fixed for {0} days.".format( (datetime.now() - probably_fixed_since).days)) probably_fixed_total += 1 else: self._save_probable_fix(db, problem, db_release, None) self.log_debug(" Not fixed.") db.session.flush() if problems_in_release > 0: self.log_info( "{0}% of problems in this release probably fixed.".format( (probably_fixed_total * 100) // problems_in_release)) else: self.log_info("No problems found in this release.") return 0
def run(self, cmdline, db): """ Mark a problem probably fixed if there is a new build of the problem's affected package, for which no crash reports have come in. """ try: tasks = self._get_tasks(cmdline, db) except FafError as ex: self.log_error("Unable to process command line arguments: {0}" .format(str(ex))) return 1 problems = get_problems(db) task_i = 0 for osplugin, db_release in tasks: task_i += 1 self.log_info("[{0} / {1}] Processing '{2} {3}'" .format(task_i, len(tasks), osplugin.nice_name, db_release.version)) self.log_debug("Getting builds...") opsys_builds = osplugin.get_released_builds(db_release.version) newest_builds = {} all_builds = {} now = datetime.now() for build in opsys_builds: age = now - build["completion_time"] # If a hot new build comes out, we need to wait a certain # period of time for people to use it before we can make # conclusions about it being a probable fix. if age.days >= osplugin.build_aging_days: if build["name"] not in newest_builds: newest_builds[build["name"]] = build if build["name"] not in all_builds: all_builds[build["name"]] = [build, ] else: all_builds[build["name"]].append(build) probably_fixed_total = 0 problems_in_release = 0 problem_counter = 0 for problem in problems: problem_counter += 1 self.log_debug("Processing problem ID:{0} {1}/{2}:" .format(problem.id, problem_counter, len(problems))) affected_newest = {} affected_not_found = False reports_for_release = \ get_reports_for_opsysrelease(db, problem.id, db_release.id) # For all the reports, we need the affected packages and their # newest versions. if reports_for_release: problems_in_release += 1 else: self.log_debug(" This problem doesn't appear in this release.") self._save_probable_fix(db, problem, db_release, None) # Next problem continue for report in reports_for_release: # First we try to find the affected package among the known # packages. affected_known = [ (affected.build.base_package_name, affected.build.epoch, affected.build.version, affected.build.release) for affected in get_crashed_package_for_report(db, report.id)] # Then among the unknown packages. affected_unknown = \ get_crashed_unknown_package_nevr_for_report(db, report.id) # We get the base package name directly from the report affected_unknown = [(report.component.name, affected[1], affected[2], affected[3]) for affected in affected_unknown] affected_all = affected_known + affected_unknown if not affected_all: affected_not_found = True break for affected in affected_all: if affected[0] in affected_newest: # If a problem contains multiple reports with the same # affected package, we only want the newest version of # it. affected_newest[affected[0]]['reports'].append(report) if cmp_evr(affected[1:], affected_newest[affected[0]]['nevr'][1:]) > 0: affected_newest[affected[0]]['nevr'] = affected else: affected_newest[affected[0]] = { 'reports': [report, ], 'nevr': affected } if affected_not_found or not affected_newest: # Affected package of one of the reports was not found. # We can't make any conclusions. self.log_debug(" Affected package not found.") self._save_probable_fix(db, problem, db_release, None) # Next problem continue if len(affected_newest) > 1: # Multiple different affected packages => cannot be fixed # by a single package update self.log_debug(" Multiple affected packages. No simple fix.") self._save_probable_fix(db, problem, db_release, None) # Next problem continue probably_fixed_since = datetime.fromtimestamp(0) pkg = list(affected_newest.values())[0] name = pkg['nevr'][0] newest_build = newest_builds.get(name, False) if newest_build: newest_evr = (newest_build["epoch"] or 0, newest_build["version"], newest_build["release"]) if newest_build and cmp_evr(newest_evr, pkg['nevr'][1:]) > 0: # Newest available build is newer than the newest version # of the affected package. Now find the oldest such # probable fix. i = 0 while i < len(all_builds[name]) and cmp_evr( (all_builds[name][i]["epoch"] or 0, all_builds[name][i]["version"], all_builds[name][i]["release"]), pkg['nevr'][1:]) > 0: i += 1 completion_time = all_builds[name][i-1]["completion_time"] probably_fixed_since = max(completion_time, probably_fixed_since) pkg["probable_fix"] = (name, all_builds[name][i-1]["epoch"] or 0, all_builds[name][i-1]["version"], all_builds[name][i-1]["release"]) self._save_probable_fix(db, problem, db_release, pkg["probable_fix"], probably_fixed_since) self.log_debug(" Probably fixed for {0} days.".format( (datetime.now() - probably_fixed_since).days)) probably_fixed_total += 1 else: self._save_probable_fix(db, problem, db_release, None) self.log_debug(" Not fixed.") db.session.flush() if problems_in_release > 0: self.log_info("{0}% of problems in this release probably fixed.".format( (probably_fixed_total * 100) // problems_in_release)) else: self.log_info("No problems found in this release.")
def text_overview(self, cmdline, db, opsys, release): release_ids = get_release_ids(db, opsys, release) num_days = 7 if cmdline.last: num_days = int(cmdline.last) since = datetime.datetime.now() - datetime.timedelta(days=num_days) hot = query_hot_problems(db, release_ids, history=self.history_type, last_date=since) if not cmdline.include_low_quality: hot = [x for x in hot if x.quality >= 0] ptypes = "" if len(self.ptypes) != len(problemtypes): ptypes = " "+", ".join(self.ptypes) out = "Overview of the top {0}{1} crashes over the last {2} days:\n".format( cmdline.count, ptypes, num_days) hot = [p for p in hot if p.type in self.ptypes] for (rank, problem) in enumerate(hot[:cmdline.count]): out += "#{0} {1} - {2}x\n".format( rank+1, ', '.join(problem.unique_component_names), problem.count) # Reports with bugzillas for this OpSysRelease go first reports = sorted(problem.reports, cmp=lambda x, y: len([b for b in x.bugs if b.opsysrelease_id in release_ids]) - len([b for b in y.bugs if b.opsysrelease_id in release_ids]), reverse=True) if webfaf_installed(): for report in reports[:3]: out += "{0}\n".format(reverse("reports.bthash_forward", bthash=report.hashes[0].hash)) for bug in report.bugs: out += " {0}\n".format(bug.url) else: for report in reports[:3]: out += "Report BT hash: {0}\n".format(report.hashes[0].hash) if len(problem.reports) > 3: out += "... and {0} more.\n".format(len(problem.reports)-3) if problem.tainted: out += "Kernel tainted.\n" crash_function = problem.crash_function if crash_function: out += "Crash function: {0}\n".format(crash_function) affected_all = [] for report in problem.reports: affected_known = [ (affected.build.base_package_name, affected.build.epoch, affected.build.version, affected.build.release) for affected in get_crashed_package_for_report(db, report.id)] affected_unknown = \ get_crashed_unknown_package_nevr_for_report(db, report.id) affected_all += affected_known + affected_unknown affected_all = sorted(set(affected_all), cmp=lambda a, b: cmp_evr(a[1:], b[1:]), reverse=True) if affected_all: out += "Affected builds: {0}".format(", ".join( ["{0}-{1}:{2}-{3}".format(n, e, v, r) for (n, e, v, r) in affected_all[:5]])) if len(problem.reports) > 5: out += " and {0} more.".format(len(problem.reports)-5) out += "\n" pfix = problem.probable_fix_for_opsysrelease_ids(release_ids) if pfix: out += ("Problem seems to be fixed since the release of {0}\n" .format(pfix)) out += "\n" return out
def text_overview(self, cmdline, db, opsys, release): release_ids = get_release_ids(db, opsys, release) num_days = 7 if cmdline.last: num_days = int(cmdline.last) since = datetime.datetime.now() - datetime.timedelta(days=num_days) hot = query_hot_problems(db, release_ids, history=self.history_type, last_date=since) if not cmdline.include_low_quality: hot = filter(lambda x: x.quality >= 0, hot) out = "Overview of the top {0} crashes over the last {1} days:\n".format( cmdline.count, num_days) for (rank, problem) in enumerate(hot[:cmdline.count]): out += "#{0} {1} - {2}x\n".format( rank+1, ', '.join(problem.unique_component_names), problem.count) if webfaf_installed(): for report in problem.reports: out += "{0}\n".format(reverse("webfaf.reports.views.bthash_forward", args=[report.hashes[0].hash])) else: for report in problem.reports: out += "Report BT hash: {0}\n".format(report.hashes[0].hash) if problem.tainted: out += "Kernel tainted.\n" crash_function = problem.crash_function if crash_function: out += "Crash function: {0}\n".format(crash_function) affected_all = [] for report in problem.reports: affected_known = [ (affected.build.base_package_name, affected.build.epoch, affected.build.version, affected.build.release) for affected in get_crashed_package_for_report(db, report.id)] affected_unknown = \ get_crashed_unknown_package_nevr_for_report(db, report.id) affected_all += affected_known + affected_unknown affected_all = sorted(set(affected_all), cmp=lambda a, b: cmp_evr(a[1:], b[1:])) if affected_all: out += "Affected builds: {0}\n".format(", ".join( ["{0}-{1}:{2}-{3}".format(n, e, v, r) for (n, e, v, r) in affected_all])) pfix = problem.probable_fix_for_opsysrelease_ids(release_ids) if len(pfix) > 0: out += ("Problem seems to be fixed since the release of {0}\n" .format(pfix)) out += "\n" return out