def critique(ui, repo, entire=False, node=None, **kwargs): """Perform a critique of a changeset.""" # We run into weird import issues when running static analysis if the # demandimporter is enabled. with demandimport.deactivated(): from flake8.engine import get_style_guide from pep8 import DiffReport, parse_udiff style = get_style_guide(parse_argv=False, ignore='E128') ctx = repo[node] # Tell the reporter to ignore lines we didn't touch as part of this change. if not entire: diff = ''.join(ctx.diff()) style.options.selected_lines = {} for k, v in parse_udiff(diff).items(): if k.startswith('./'): k = k[2:] style.options.selected_lines[k] = v style.options.report = DiffReport(style.options) deleted = repo.status(ctx.p1().node(), ctx.node()).deleted files = [f for f in ctx.files() if f.endswith('.py') and f not in deleted] style.check_files(files)
def critique(ui, repo, entire=False, node=None, **kwargs): """Perform a critique of a changeset.""" demandimport.disable() from flake8.engine import get_style_guide from pep8 import DiffReport, parse_udiff style = get_style_guide(parse_argv=False, ignore='E128') ctx = repo[node] if not entire: diff = ''.join(ctx.diff()) style.options.selected_lines = {} for k, v in parse_udiff(diff).items(): if k.startswith('./'): k = k[2:] style.options.selected_lines[k] = v style.options.report = DiffReport(style.options) deleted = repo.status(ctx.p1().node(), ctx.node())[2] files = [f for f in ctx.files() if f.endswith('.py') and f not in deleted] style.check_files(files) demandimport.enable()
def critique(ui, repo, entire=False, node=None, **kwargs): """Perform a critique of a changeset.""" # We run into weird import issues when running static analysis if the # demandimporter is enabled. with demandimport.deactivated(): from flake8.engine import get_style_guide from pep8 import DiffReport, parse_udiff style = get_style_guide(parse_argv=False, ignore='E128') ctx = repo[node] # Tell the reporter to ignore lines we didn't touch as part of this change. if not entire: diff = ''.join(ctx.diff()) style.options.selected_lines = {} for k, v in parse_udiff(diff).items(): if k.startswith('./'): k = k[2:] style.options.selected_lines[k] = v style.options.report = DiffReport(style.options) deleted = repo.status(ctx.p1().node(), ctx.node()).deleted files = [ f for f in ctx.files() if f.endswith('.py') and f not in deleted ] style.check_files(files)
def pep8_parse_diff(self, diffContent): try: diffResult = pep8.parse_udiff(diffContent) errorcode = None except SystemExit: errorcode = sys.exc_info()[1].code return diffResult, sys.stderr.getvalue(), errorcode
def process_commit(self, review, landing_repo_url, repo_url, commit): revision = commit['rev'] self.logger.info('reviewing revision: %s (review request: %d)' % (revision[:12], commit['review_request_id'])) repo_path = self.ensure_hg_repo_exists(landing_repo_url, repo_url, revision) self.hg_commit_changes(repo_path, revision, diff_context=0) adds, dels, mods, copies, diff = self.hg_commit_changes(repo_path, revision, diff_context=0) rel_adds = set(f for f in adds if f.endswith('.py')) rel_mods = set(f for f in mods if f.endswith('.py')) relevant = rel_adds | rel_mods if not relevant: self.logger.info('not reviewing revision: %s no relevant ' 'python changes in commit' % revision) return # flake8's multiprocessing default doesn't work synchronously for # some reason. Probably because our saved state isn't being # transferred across process boundaries. Specify jobs=0 to get # results. style = get_style_guide(parse_argv=False, jobs=0) style.options.selected_lines = {} for k, v in parse_udiff(diff).items(): if k.startswith('./'): k = k[2:] style.options.selected_lines[k] = v style.options.report = CapturingDiffReport(style.options) oldcwd = os.getcwd() try: os.chdir(repo_path) results = style.check_files(relevant) finally: os.chdir(oldcwd) error_count = 0 for filename, errors in sorted(results.file_results.items()): if not errors: continue errors = sorted(errors, cmp=_cmp_errors) for line, offset, code, text, doc in errors: error_count += 1 num_lines = 1 comment = '%s: %s' % (code, text) if code in LINE_ADJUSTMENTS: line_offset, num_lines = LINE_ADJUSTMENTS[code] line += line_offset review.comment(filename, line, num_lines, comment) commentlines = [] if error_count: commentlines.extend([ random.choice(ERRORS_QUIPS), '', 'I analyzed your Python changes and found %d errors.' % (error_count), ]) else: commentlines.extend([ random.choice(NO_ERRORS_QUIPS), '', 'Congratulations, there were no Python static analysis ' 'issues with this patch!', ]) commentlines.extend([ '', 'The following files were examined:', '', ]) commentlines.extend(' %s' % f for f in sorted(relevant)) review.publish(body_top='\n'.join(commentlines), ship_it=error_count == 0) self.strip_nonpublic_changesets(repo_path)
def process_commit(self, review, landing_repo_url, repo_url, commit): revision = commit['rev'] self.logger.info('reviewing revision: %s (review request: %d)' % (revision[:12], commit['review_request_id'])) repo_path = self.ensure_hg_repo_exists(landing_repo_url, repo_url, revision) self.hg_commit_changes(repo_path, revision, diff_context=0) adds, dels, mods, copies, diff = self.hg_commit_changes(repo_path, revision, diff_context=0) rel_adds = set(f for f in adds if f.endswith('.py')) rel_mods = set(f for f in mods if f.endswith('.py')) relevant = rel_adds | rel_mods if not relevant: self.logger.info('not reviewing revision: %s no relevant ' 'python changes in commit' % revision) return # flake8's multiprocessing default doesn't work synchronously for # some reason. Probably because our saved state isn't being # transferred across process boundaries. Specify jobs=0 to get # results. style = get_style_guide(parse_argv=False, jobs=0) style.options.selected_lines = {} for k, v in parse_udiff(diff).items(): if k.startswith('./'): k = k[2:] style.options.selected_lines[k] = v style.options.report = CapturingDiffReport(style.options) oldcwd = os.getcwd() try: os.chdir(repo_path) results = style.check_files(relevant) finally: os.chdir(oldcwd) error_count = 0 for filename, errors in sorted(results.file_results.items()): if not errors: continue errors = sorted(errors, cmp=_cmp_errors) for line, offset, code, text, doc in errors: error_count += 1 num_lines = 1 comment = '%s: %s' % (code, text) if code in LINE_ADJUSTMENTS: line_offset, num_lines = LINE_ADJUSTMENTS[code] line += line_offset review.comment(filename, line, num_lines, comment) commentlines = [] if error_count: commentlines.extend([ random.choice(ERRORS_QUIPS), '', 'I analyzed your Python changes and found %d errors.' % ( error_count), ]) else: commentlines.extend([ random.choice(NO_ERRORS_QUIPS), '', 'Congratulations, there were no Python static analysis ' 'issues with this patch!', ]) commentlines.extend([ '', 'The following files were examined:', '', ]) commentlines.extend(' %s' % f for f in sorted(relevant)) review.publish(body_top='\n'.join(commentlines), ship_it=error_count == 0) self.strip_nonpublic_changesets(repo_path)