def test_base_stats(): """ Test simple stat management """ # Reset stats stats.metrics = [] stats.add_metric("test.a.b.c", 12) assert len(stats.metrics) == 1 metric = stats.metrics[0] assert metric["fields"] == {"value": 12} assert metric["measurement"] == "code-review.test.a.b.c" assert metric["tags"] == {"app": "code-review-bot", "channel": "test"} assert datetime.strptime(metric["time"], "%Y-%m-%dT%H:%M:%S.%f") # Flush without client does not do anything (no crash) stats.flush() assert len(stats.metrics) == 1 # Point are sent on flush stats.client = MockInflux() assert len(stats.client.points) == 0 stats.flush() assert len(stats.metrics) == 0 assert len(stats.client.points) == 1
def publish_comment(self, revision, issues, patches): """ Publish issues through Phabricator comment """ # Load existing comments for this revision existing_comments = self.api.list_comments(revision.phid) logger.info("Found {} existing comments on review".format( len(existing_comments))) coverage_issues = [ issue for issue in issues if isinstance(issue, CoverageIssue) ] non_coverage_issues = [ issue for issue in issues if not isinstance(issue, CoverageIssue) ] patches_analyzers = set(p.analyzer for p in patches) # First publish inlines as drafts # * skipping coverage issues as they get a dedicated comment # * skipping issues reported in a patch inlines = list( filter( None, [ self.comment_inline(revision, issue, existing_comments) for issue in issues if issue in non_coverage_issues and issue.analyzer not in patches_analyzers ], )) if not inlines and not patches and not coverage_issues: logger.info( "No new comments found, skipping Phabricator publication") return logger.info("Added inline comments", ids=[i["id"] for i in inlines]) # Then publish top comment if len(non_coverage_issues): self.api.comment( revision.id, self.build_comment( revision=revision, issues=non_coverage_issues, patches=patches, bug_report_url=BUG_REPORT_URL, ), ) # Then publish top coverage comment if len(coverage_issues): self.api.comment( revision.id, self.build_coverage_comment(issues=coverage_issues, bug_report_url=BUG_REPORT_URL), ) stats.add_metric("report.phabricator.issues", len(inlines)) stats.add_metric("report.phabricator") logger.info("Published phabricator comment")
def publish(self, revision, issues, task_failures, links): """ Publish issues on selected reporters """ # Publish patches on Taskcluster # or write locally for local development for patch in revision.improvement_patches: if settings.taskcluster.local: patch.write() else: patch.publish() # Report issues publication stats nb_issues = len(issues) nb_publishable = len([i for i in issues if i.is_publishable()]) nb_publishable_errors = sum( 1 for i in issues if i.is_publishable() and i.level == Level.Error ) self.index( revision, state="analyzed", issues=nb_issues, issues_publishable=nb_publishable, ) stats.add_metric("analysis.issues.publishable", nb_publishable) # Publish reports about these issues with stats.timer("runtime.reports"): for reporter in self.reporters.values(): reporter.publish(issues, revision, task_failures, links) self.index( revision, state="done", issues=nb_issues, issues_publishable=nb_publishable ) # Publish final HarborMaster state self.update_status( revision, BuildState.Fail if nb_publishable_errors > 0 or task_failures else BuildState.Pass, )
def publish(self, issues, revision, task_failures, links): """ Publish issues on Phabricator: * publishable issues use lint results * build errors are displayed as unit test results """ # Use only publishable issues and patches # and avoid publishing a patch from a de-activated analyzer issues = [ issue for issue in issues if issue.is_publishable() and issue.analyzer.name not in self.analyzers_skipped ] patches = [ patch for patch in revision.improvement_patches if patch.analyzer.name not in self.analyzers_skipped ] if issues or task_failures or links: if issues: # Publish on Harbormaster all at once # * All non coverage publishable issues as lint issues # * All build errors as unit test results self.publish_harbormaster(revision, issues) if issues or patches or task_failures or links: # Publish comment summarizing issues self.publish_summary(revision, issues, patches, task_failures, links) # Publish statistics stats.add_metric("report.phabricator.issues", len(issues)) stats.add_metric("report.phabricator") else: logger.info("No issues to publish on phabricator") return issues, patches
def analyze_patch(self): """ Analyze loaded patch to extract modified lines and statistics """ assert self.patch is not None, "Missing patch" assert isinstance(self.patch, str), "Invalid patch type" # List all modified lines from current revision changes patch_stats = rs_parsepatch.get_lines(self.patch) assert len(patch_stats) > 0, "Empty patch" self.lines = {stat["filename"]: stat["added_lines"] for stat in patch_stats} # Shortcut to files modified self.files = self.lines.keys() # Report nb of files and lines analyzed stats.add_metric("analysis.files", len(self.files)) stats.add_metric( "analysis.lines", sum(len(line) for line in self.lines.values()) )
def analyze_patch(self): """ Analyze loaded patch to extract modified lines and statistics """ assert self.patch is not None, "Missing patch" assert isinstance(self.patch, str), "Invalid patch type" # List all modified lines from current revision changes patch = Patch.parse_patch(self.patch, skip_comments=False) assert patch != {}, "Empty patch" self.lines = { # Use all changes in new files filename: diff.get("touched", []) + diff.get("added", []) for filename, diff in patch.items() } # Shortcut to files modified self.files = self.lines.keys() # Report nb of files and lines analyzed stats.add_metric("analysis.files", len(self.files)) stats.add_metric("analysis.lines", sum(len(line) for line in self.lines.values()))
def publish(self, revision, issues): """ Publish issues on selected reporters """ # Publish patches on Taskcluster # or write locally for local development for patch in revision.improvement_patches: if settings.taskcluster.local: patch.write() else: patch.publish(self.queue_service) # Report issues publication stats nb_issues = len(issues) nb_publishable = len([i for i in issues if i.is_publishable()]) self.index( revision, state="analyzed", issues=nb_issues, issues_publishable=nb_publishable, ) stats.add_metric("analysis.issues.publishable", nb_publishable) # Publish reports about these issues with stats.timer("runtime.reports"): for reporter in self.reporters.values(): reporter.publish(issues, revision) self.index(revision, state="done", issues=nb_issues, issues_publishable=nb_publishable) # Publish final HarborMaster state revision.update_status(nb_publishable > 0 and BuildState.Fail or BuildState.Pass)
def publish_comment(self, revision, issues, patches, task_failures): """ Publish issues through Phabricator comment """ # Load existing comments for this revision existing_comments = self.api.list_comments(revision.phid) logger.info("Found {} existing comments on review".format( len(existing_comments))) coverage_issues = [ issue for issue in issues if isinstance(issue, CoverageIssue) ] non_coverage_issues = [ issue for issue in issues if not isinstance(issue, CoverageIssue) ] errors = [ issue for issue in issues if issue.level == Level.Error and self.publish_errors ] patches_analyzers = set(p.analyzer for p in patches) # First publish inlines as drafts # * skipping coverage issues as they get a dedicated comment # * skipping issues reported in a patch # * skipping issues not in the current patch # * skipping errors as they are reported as lint (when enabled) def _is_inline(issue): # Do not publish errors as inline when we are already # publishing them as lint if self.publish_errors and issue.level == Level.Error: return False return (issue in non_coverage_issues and issue.analyzer not in patches_analyzers and revision.contains(issue)) inlines = list( filter( None, [ self.comment_inline(revision, issue, existing_comments) for issue in issues if _is_inline(issue) ], )) if (not inlines and not patches and not coverage_issues and not task_failures and not errors): logger.info( "No new comments found, skipping Phabricator publication") return logger.info("Added inline comments", ids=[i["id"] for i in inlines]) # Then publish top comment if len(non_coverage_issues) or task_failures: self.api.comment( revision.id, self.build_comment( revision=revision, issues=non_coverage_issues, patches=patches, bug_report_url=BUG_REPORT_URL, task_failures=task_failures, ), ) # Then publish top coverage comment if len(coverage_issues): self.api.comment( revision.id, self.build_coverage_comment(issues=coverage_issues, bug_report_url=BUG_REPORT_URL), ) stats.add_metric("report.phabricator.issues", len(inlines)) stats.add_metric("report.phabricator") logger.info("Published phabricator comment")