def _GetCLDataForTryJob(date_start, date_end): """Gets All triaged CLs which were found by try job approaches.""" suspected_cls_query = WfSuspectedCL.query( ndb.AND(WfSuspectedCL.status.IN(TRIAGED_STATUS), WfSuspectedCL.approaches == analysis_approach_type.TRY_JOB, WfSuspectedCL.updated_time >= date_start, WfSuspectedCL.updated_time < date_end)) suspected_cls = suspected_cls_query.fetch() try_job_cls_dict = defaultdict(lambda: defaultdict(int)) both_cls_dict = defaultdict(lambda: defaultdict(int)) for cl in suspected_cls: if not cl.builds: continue failures = [] for build in cl.builds.values(): # Deduplicate and ignore the builds which were not triaged. if (build['failures'] in failures or build['status'] is None): # pragma: no cover continue failures.append(build['failures']) try_job_cls_dict[build['failure_type']][build['status']] += 1 if analysis_approach_type.HEURISTIC in build['approaches']: # Both heuristic and try job found this CL on this build. both_cls_dict[build['failure_type']][build['status']] += 1 return try_job_cls_dict, both_cls_dict
def _GetCLDataForHeuristic(date_start, date_end): """Gets All triaged CLs which were found by heuristic approaches.""" suspected_cls_query = WfSuspectedCL.query( ndb.AND(WfSuspectedCL.status.IN(TRIAGED_STATUS), WfSuspectedCL.approaches == analysis_approach_type.HEURISTIC, WfSuspectedCL.updated_time >= date_start, WfSuspectedCL.updated_time < date_end)) suspected_cls = suspected_cls_query.fetch() cl_by_top_score_dict = defaultdict( lambda: defaultdict(lambda: defaultdict(int))) for cl in suspected_cls: if not cl.builds: continue failures = [] for build in cl.builds.values(): # Deduplicate and ignore the builds which were not found by heuristic # approach and ignore the builds which were not triaged. if (build['failures'] in failures or not build['top_score'] or build['status'] is None): # pragma: no cover continue failures.append(build['failures']) failure = build['failure_type'] top_score = build['top_score'] status = build['status'] cl_by_top_score_dict[failure][top_score][status] += 1 return cl_by_top_score_dict
def _GetDailyNumberOfRevertedCulprits(limit): earliest_time = time_util.GetUTCNow() - timedelta(days=1) # TODO(chanli): improve the check for a rare case when two pipelines revert # at the same time. return WfSuspectedCL.query( ndb.AND(WfSuspectedCL.failure_type == failure_type.COMPILE, WfSuspectedCL.revert_created_time >= earliest_time)).count(limit)
def _GetAnalysesWithinDateRange(start_date, end_date, page_size=_DEFAULT_PAGE_SIZE): all_suspected_cls = [] more = True cursor = None while more: suspected_cls, cursor, more = WfSuspectedCL.query( WfSuspectedCL.identified_time >= start_date, WfSuspectedCL.identified_time < end_date).fetch_page( page_size, start_cursor=cursor) all_suspected_cls.extend(suspected_cls) return all_suspected_cls
def _GetCLDataForHeuristic(failure_args, date_start, date_end): suspected_cls_query = WfSuspectedCL.query( remote_api.ndb.AND( WfSuspectedCL.status.IN(TRIAGED_STATUS), WfSuspectedCL.approaches == analysis_approach_type.HEURISTIC)) suspected_cls_query = _AddMoreConstrainsToQuery(suspected_cls_query, failure_args, date_start, date_end) suspected_cls = suspected_cls_query.fetch() cl_by_top_score_dict = defaultdict( lambda: defaultdict(lambda: defaultdict(int))) for cl in suspected_cls: if not cl.builds: continue failures = [] for build in cl.builds.values(): if (build['failures'] in failures or not build['top_score'] or build['status'] is None): continue if (('compile' in failure_args and build['failure_type'] == failure_type.TEST) or ('test' in failure_args and build['failure_type'] == failure_type.COMPILE)): continue failures.append(build['failures']) failure = build['failure_type'] top_score = build['top_score'] status = build['status'] cl_by_top_score_dict[failure][top_score][status] += 1 return cl_by_top_score_dict
def _GetRevertCLData(start_date, end_date): data = { 'start_date': time_util.FormatDatetime(start_date), 'end_date': time_util.FormatDatetime(end_date), 'processed': [], 'undetermined': [] } query = WfSuspectedCL.query(WfSuspectedCL.identified_time >= start_date, WfSuspectedCL.identified_time < end_date) more = True cursor = None all_suspected_cls = [] while more: suspected_cls, cursor, more = query.fetch_page(_DEFAULT_PAGE_SIZE, start_cursor=cursor) all_suspected_cls.extend(suspected_cls) for suspected_cl in all_suspected_cls: processed, review_url, outcome = _CheckRevertStatusOfSuspectedCL( suspected_cl) result = { 'cr_notification_time': time_util.FormatDatetime(suspected_cl.cr_notification_time or suspected_cl.updated_time), 'outcome': revert_cl_status.STATUS_TO_DESCRIPTION.get(outcome), 'url': review_url, } if processed: data['processed'].append(result) elif processed is None: # pragma: no branch data['undetermined'].append(result) return data
def _GetCLDataForTryJob(failure_args, date_start, date_end): suspected_cls_query = WfSuspectedCL.query( remote_api.ndb.AND( WfSuspectedCL.status.IN(TRIAGED_STATUS), WfSuspectedCL.approaches == analysis_approach_type.TRY_JOB)) suspected_cls_query = _AddMoreConstrainsToQuery(suspected_cls_query, failure_args, date_start, date_end) suspected_cls = suspected_cls_query.fetch() try_job_cls_dict = defaultdict(lambda: defaultdict(int)) both_cls_dict = defaultdict(lambda: defaultdict(int)) for cl in suspected_cls: if not cl.builds: continue failures = [] for build in cl.builds.values(): if build['failures'] in failures or build['status'] is None: continue if (('compile' in failure_args and build['failure_type'] == failure_type.TEST) or ('test' in failure_args and build['failure_type'] == failure_type.COMPILE)): continue failures.append(build['failures']) try_job_cls_dict[build['failure_type']][build['status']] += 1 if analysis_approach_type.HEURISTIC in build['approaches']: # Both heuristic and try job found this CL on this build. both_cls_dict[build['failure_type']][build['status']] += 1 return try_job_cls_dict, both_cls_dict