def test_parent_and_new_failures(self, get_options, post, get_base_failures): def get_test_desc(build, testcase, test_name): test_link = build_web_uri('/build_test/{0}/{1}/'.format( build.id.hex, testcase.id.hex, )) return "[%s](%s)" % (test_name, test_link) get_options.return_value = {'phabricator.notify': '1'} project = self.create_project(name='Server', slug='project-slug') self.assertEquals(post.call_count, 0) patch = self.create_patch() source = self.create_source(project, revision_sha='1235', patch=patch) build = self.create_build(project, result=Result.failed, target='D1', source=source, status=Status.finished) job = self.create_job(build=build) testcase = self.create_test( package='test.group.ClassName', name='test.group.ClassName.test_foo', job=job, duration=134, result=Result.failed, ) testcase2 = self.create_test( package='test.group.ClassName', name='test.group.ClassName.test_foo2', job=job, duration=134, result=Result.failed, ) get_base_failures.return_value = {testcase.name} build_finished_handler(build_id=build.id.hex) get_options.assert_called_once_with(project.id) build_link = build_web_uri('/find_build/{0}/'.format(build.id.hex)) failure_link = build_web_uri('/build_tests/{0}/'.format(build.id.hex)) test_desc = get_test_desc(build, testcase, 'test_foo') test_desc2 = get_test_desc(build, testcase2, 'test_foo2') expected_msg = """(IMPORTANT) Failing builds!\n\n - [Server]({0}). There were 1 new [test failures]({1}) **New failures (1):** |Test Name | Package| |--|--| |{2}|test.group.ClassName| **Failures in parent revision (1):** |Test Name | Package| |--|--| |{3}|test.group.ClassName|""" post.assert_called_once_with( '1', expected_msg.format(build_link, failure_link, test_desc2, test_desc), mock.ANY)
def test_multiple_builds(self, get_options, post): get_options.return_value = { 'phabricator.notify': '1' } project1 = self.create_project(name='Server', slug='project-slug') project2 = self.create_project(name='Server2', slug='project-slug2') self.assertEquals(post.call_count, 0) collection_id = uuid.uuid4() def create_build(result, project): base_source = self.create_source(project, revision_sha='1235') base_build = self.create_build(project, result=Result.passed, source=base_source, status=Status.finished) self.create_job(build=base_build) patch = self.create_patch() source = self.create_source(project, revision_sha='1235', patch=patch) build = self.create_build(project, result=result, target='D1', source=source, status=Status.finished, collection_id=collection_id) job = self.create_job(build=build) testcase = self.create_test( package='test.group.ClassName', name='test.group.ClassName.test_foo', job=job, duration=134, result=result, ) return build, testcase build1, testcase1 = create_build(Result.failed, project1) build2, testcase2 = create_build(Result.passed, project2) build_finished_handler(build_id=build1.id.hex) build_link = build_web_uri('/find_build/{0}/'.format(build1.id.hex)) build2_link = build_web_uri('/find_build/{0}/'.format(build2.id.hex)) failure_link = build_web_uri('/build_tests/{0}/'.format(build1.id.hex)) test_link = build_web_uri('/build_test/{0}/{1}/'.format( build1.id.hex, testcase1.id.hex, )) test_desc = "[test_foo](%s)" % test_link expected_msg = """(IMPORTANT) Failing builds!\n\n - [Server]({0}). There were 1 new [test failures]({1}) **New failures (1):** |Test Name | Package| |--|--| |{2}|test.group.ClassName| (NOTE) Passing builds: - [Server2]({3}).""" post.assert_called_once_with('1', expected_msg.format(build_link, failure_link, test_desc, build2_link), mock.ANY)
def test_parent_and_new_failures(self, get_options, post, get_base_failures): def get_test_desc(build, testcase, test_name): test_link = build_web_uri('/build_test/{0}/{1}/'.format( build.id.hex, testcase.id.hex, )) return "[%s](%s)" % (test_name, test_link) get_options.return_value = { 'phabricator.notify': '1' } project = self.create_project(name='Server', slug='project-slug') self.assertEquals(post.call_count, 0) patch = self.create_patch() source = self.create_source(project, revision_sha='1235', patch=patch) build = self.create_build(project, result=Result.failed, target='D1', source=source, status=Status.finished) job = self.create_job(build=build) testcase = self.create_test( package='test.group.ClassName', name='test.group.ClassName.test_foo', job=job, duration=134, result=Result.failed, ) testcase2 = self.create_test( package='test.group.ClassName', name='test.group.ClassName.test_foo2', job=job, duration=134, result=Result.failed, ) get_base_failures.return_value = {testcase.name} build_finished_handler(build_id=build.id.hex) get_options.assert_called_once_with(project.id) build_link = build_web_uri('/find_build/{0}/'.format(build.id.hex)) failure_link = build_web_uri('/build_tests/{0}/'.format(build.id.hex)) test_desc = get_test_desc(build, testcase, 'test_foo') test_desc2 = get_test_desc(build, testcase2, 'test_foo2') expected_msg = """Server build Failed {{icon times, color=red}} ([results]({0})). There were 1 new [test failures]({1}) **New failures (1):** |Test Name | Package| |--|--| |{2}|test.group.ClassName| **Failures in parent revision (1):** |Test Name | Package| |--|--| |{3}|test.group.ClassName|""" post.assert_called_once_with('1', expected_msg.format(build_link, failure_link, test_desc2, test_desc), mock.ANY)
def _generate_remarkup_table_for_tests(build, tests): num_failures = len(tests) did_truncate = False max_shown = current_app.config.get('MAX_SHOWN_ITEMS_PER_BUILD_PHABRICATOR', 10) if num_failures > max_shown: tests = tests[:max_shown] did_truncate = True table = ['|Test Name | Package|', '|--|--|'] for test in tests: pkg = test.package name = test.name if pkg and name.startswith(pkg): name = name[len(pkg) + 1:] test_link = build_web_uri('/build_test/{0}/{1}/'.format( build.id.hex, test.id.hex, )) table = table + ['|[%s](%s)|%s|' % (name, test_link, pkg)] if did_truncate: table += ['|...more...|...|'] return '\n'.join(table)
def _generate_remarkup_table_for_tests(build, tests): num_failures = len(tests) did_truncate = False max_shown = current_app.config.get('MAX_SHOWN_ITEMS_PER_BUILD_PHABRICATOR', 10) if num_failures > max_shown: tests = tests[:max_shown] did_truncate = True table = ['|Test Name | Package|', '|--|--|'] for test in tests: pkg = test.package name = test.name if pkg and name.startswith(pkg): name = name[len(pkg) + 1:] test_link = build_web_uri('/build_test/{0}/{1}/'.format( build.id.hex, test.id.hex, )) table = table + ['|[%s](%s)|%s|' % (name, test_link, pkg)] if did_truncate: table += ['|...more...|...|'] return '\n'.join(table)
def test_slug_escape(self, get_options, post): get_options.return_value = {'phabricator.notify': '1'} project = self.create_project(name='Server', slug='project-(slug)') self.assertEquals(post.call_count, 0) patch = self.create_patch() source = self.create_source(project, revision_sha='1235', patch=patch) build = self.create_build(project, result=Result.passed, target='D1', source=source, status=Status.finished) job = self.create_job(build=build) testcase = self.create_test( package='test.group.ClassName', name='test.group.ClassName.test_foo', job=job, duration=134, result=Result.passed, ) build_finished_handler(build_id=build.id.hex) get_options.assert_called_once_with(project.id) build_link = build_web_uri('/find_build/{0}/'.format(build.id.hex)) expected_msg = '(NOTE) Passing builds:\n\n - [Server]({0}).' post.assert_called_once_with('1', expected_msg.format(build_link), mock.ANY)
def test_slug_escape(self, get_options, post): get_options.return_value = { 'phabricator.notify': '1' } project = self.create_project(name='Server', slug='project-(slug)') self.assertEquals(post.call_count, 0) patch = self.create_patch() source = self.create_source(project, revision_sha='1235', patch=patch) build = self.create_build(project, result=Result.passed, target='D1', source=source, status=Status.finished) job = self.create_job(build=build) testcase = self.create_test( package='test.group.ClassName', name='test.group.ClassName.test_foo', job=job, duration=134, result=Result.passed, ) build_finished_handler(build_id=build.id.hex) get_options.assert_called_once_with(project.id) build_link = build_web_uri('/find_build/{0}/'.format(build.id.hex)) expected_msg = 'Server build Passed {{icon check, color=green}} ([results]({0})).' post.assert_called_once_with('1', expected_msg.format(build_link), mock.ANY)
def test_build_failure_with_tests_and_no_base_job(self, get_options, post): get_options.return_value = {'phabricator.notify': '1'} project = self.create_project(name='Server', slug='project-slug') base_source = self.create_source(project, revision_sha='1235') base_build = self.create_build(project, result=Result.passed, source=base_source, status=Status.finished) self.assertEquals(post.call_count, 0) patch = self.create_patch() source = self.create_source(project, revision_sha='1235', patch=patch) build = self.create_build(project, result=Result.failed, target='D1', source=source, status=Status.finished) job = self.create_job(build=build) testcase = self.create_test( package='test.group.ClassName', name='test.group.ClassName.test_foo', job=job, duration=134, result=Result.failed, ) build_finished_handler(build_id=build.id.hex) get_options.assert_called_once_with(project.id) build_link = build_web_uri('/find_build/{0}/'.format(build.id.hex)) failure_link = build_web_uri('/build_tests/{0}/'.format(build.id.hex)) test_link = build_web_uri('/build_test/{0}/{1}/'.format( build.id.hex, testcase.id.hex, )) test_desc = "[test_foo](%s)" % test_link expected_msg = """(IMPORTANT) Failing builds!\n\n - [Server]({0}). There were a total of 1 [test failures]({1}), but we could not determine if any of these tests were previously failing. **All failures (1):** |Test Name | Package| |--|--| |{2}|test.group.ClassName|""" post.assert_called_once_with( '1', expected_msg.format(build_link, failure_link, test_desc), mock.ANY)
def test_max_shown_build_failures(self, get_options, post): get_options.return_value = {'phabricator.notify': '1'} project = self.create_project(name='Server', slug='project-slug') self.assertEquals(post.call_count, 0) patch = self.create_patch() source = self.create_source(project, revision_sha='1235', patch=patch) build = self.create_build(project, result=Result.failed, target='D1', source=source, status=Status.finished) job = self.create_job(build=build) max_shown = current_app.config.get( 'MAX_SHOWN_ITEMS_PER_BUILD_PHABRICATOR', 10) total_test_count = max_shown + 1 testcases = [] for i in range(total_test_count): testcases.append( self.create_test( package='test.group.ClassName', name='test.group.ClassName.test_foo{}'.format(i), job=job, duration=134, result=Result.failed, )) build_finished_handler(build_id=build.id.hex) get_options.assert_called_once_with(project.id) build_link = build_web_uri('/find_build/{0}/'.format(build.id.hex)) failure_link = build_web_uri('/build_tests/{0}/'.format(build.id.hex)) assert post.call_count == 1 (diff_id, comment, phab), _ = post.call_args assert diff_id == '1' shown_test_count = 0 for testcase in testcases: test_link = build_web_uri('/build_test/{0}/{1}/'.format( build.id.hex, testcase.id.hex, )) if test_link in comment: shown_test_count += 1 assert shown_test_count == max_shown assert '[Server]({0}). There were a total of {2} [test failures]({1})'.format( build_link, failure_link, total_test_count) in comment assert '|...more...|...|' in comment
def test_build_failure_with_tests_and_no_base_job(self, get_options, post): get_options.return_value = { 'phabricator.notify': '1' } project = self.create_project(name='Server', slug='project-slug') base_source = self.create_source(project, revision_sha='1235') base_build = self.create_build(project, result=Result.passed, source=base_source, status=Status.finished) self.assertEquals(post.call_count, 0) patch = self.create_patch() source = self.create_source(project, revision_sha='1235', patch=patch) build = self.create_build(project, result=Result.failed, target='D1', source=source, status=Status.finished) job = self.create_job(build=build) testcase = self.create_test( package='test.group.ClassName', name='test.group.ClassName.test_foo', job=job, duration=134, result=Result.failed, ) build_finished_handler(build_id=build.id.hex) get_options.assert_called_once_with(project.id) build_link = build_web_uri('/find_build/{0}/'.format(build.id.hex)) failure_link = build_web_uri('/build_tests/{0}/'.format(build.id.hex)) test_link = build_web_uri('/build_test/{0}/{1}/'.format( build.id.hex, testcase.id.hex, )) test_desc = "[test_foo](%s)" % test_link expected_msg = """Server build Failed {{icon times, color=red}} ([results]({0})). There were a total of 1 [test failures]({1}), but we could not determine if any of these tests were previously failing. **All failures (1):** |Test Name | Package| |--|--| |{2}|test.group.ClassName|""" post.assert_called_once_with('1', expected_msg.format(build_link, failure_link, test_desc), mock.ANY)
def crumble(self, instance, attrs): return { 'id': instance.id.hex, 'diff': instance.diff, 'link': build_web_uri('/patches/{0}/'.format(instance.id.hex)), 'parentRevision': { 'sha': instance.parent_revision_sha, }, 'dateCreated': instance.date_created, }
def test_max_shown_build_failures(self, get_options, post): get_options.return_value = { 'phabricator.notify': '1' } project = self.create_project(name='Server', slug='project-slug') self.assertEquals(post.call_count, 0) patch = self.create_patch() source = self.create_source(project, revision_sha='1235', patch=patch) build = self.create_build(project, result=Result.failed, target='D1', source=source, status=Status.finished) job = self.create_job(build=build) max_shown = current_app.config.get('MAX_SHOWN_ITEMS_PER_BUILD_PHABRICATOR', 10) total_test_count = max_shown + 1 testcases = [] for i in range(total_test_count): testcases.append(self.create_test( package='test.group.ClassName', name='test.group.ClassName.test_foo{}'.format(i), job=job, duration=134, result=Result.failed, )) build_finished_handler(build_id=build.id.hex) get_options.assert_called_once_with(project.id) build_link = build_web_uri('/find_build/{0}/'.format(build.id.hex)) failure_link = build_web_uri('/build_tests/{0}/'.format(build.id.hex)) assert post.call_count == 1 (diff_id, comment, phab), _ = post.call_args assert diff_id == '1' shown_test_count = 0 for testcase in testcases: test_link = build_web_uri('/build_test/{0}/{1}/'.format( build.id.hex, testcase.id.hex, )) if test_link in comment: shown_test_count += 1 assert shown_test_count == max_shown assert 'Server build Failed {{icon times, color=red}} ([results]({0})). There were {2} new [test failures]({1})'.format(build_link, failure_link, total_test_count) assert '|...more...|...|' in comment
def test_build_failure_with_tests(self, get_options, post): get_options.return_value = { 'phabricator.notify': '1' } project = self.create_project(name='Server', slug='project-slug') base_source = self.create_source(project, revision_sha='1235') base_build = self.create_build(project, result=Result.passed, source=base_source, status=Status.finished) self.create_job(build=base_build) self.assertEquals(post.call_count, 0) patch = self.create_patch() source = self.create_source(project, revision_sha='1235', patch=patch) build = self.create_build(project, result=Result.failed, target='D1', source=source, status=Status.finished) job = self.create_job(build=build) testcase = self.create_test( package='test.group.ClassName', name='test.group.ClassName.test_foo', job=job, duration=134, result=Result.failed, ) build_finished_handler(build_id=build.id.hex) get_options.assert_called_once_with(project.id) build_link = build_web_uri('/find_build/{0}/'.format(build.id.hex)) failure_link = build_web_uri('/build_tests/{0}/'.format(build.id.hex)) test_link = build_web_uri('/build_test/{0}/{1}/'.format( build.id.hex, testcase.id.hex, )) test_desc = "[test_foo](%s)" % test_link expected_msg = """(IMPORTANT) Failing builds!\n\n - [Server]({0}). There were 1 new [test failures]({1}) **New failures (1):** |Test Name | Package| |--|--| |{2}|test.group.ClassName|""" post.assert_called_once_with('1', expected_msg.format(build_link, failure_link, test_desc), mock.ANY)
def _get_job_context(job): # type: (Job) -> Dict[str, Any] def get_job_failing_tests(job, limit=500): failing_tests = TestCase.query.options( subqueryload_all('messages') ).filter( TestCase.job_id == job.id, TestCase.result == Result.failed, ).order_by(TestCase.name.asc()) failing_tests = [ { 'test_case': test_case, 'uri': build_web_uri(_get_test_case_uri(test_case)), 'message': xunit.get_testcase_messages(test_case), } for test_case in failing_tests[:limit] ] failing_tests_count = len(failing_tests) return failing_tests, failing_tests_count def get_job_failing_log_sources(job): failing_log_sources = LogSource.query.join( JobStep, LogSource.step_id == JobStep.id, ).filter( JobStep.result == Result.failed, JobStep.job_id == job.id, ).order_by(JobStep.date_created) failing_logs = [ { 'text': _get_log_clipping( log_source, max_size=5000, max_lines=25), 'name': log_source.name, 'uri': build_web_uri(_get_log_uri(log_source)), } for log_source in failing_log_sources if not log_source.is_infrastructural() ] failing_log_sources_count = len(failing_logs) return failing_logs, failing_log_sources_count failing_tests, failing_tests_count = get_job_failing_tests(job) failing_logs, failing_logs_count = get_job_failing_log_sources(job) context = { 'job': job, 'uri': build_web_uri(_get_job_uri(job)), 'failing_tests': failing_tests, 'failing_tests_count': len(failing_tests), 'failing_logs': failing_logs, 'failing_logs_count': len(failing_logs), } return context
def crumble(self, instance, attrs): return { 'id': instance.id.hex, 'slug': instance.slug, 'name': instance.name, 'repository': { 'id': instance.repository_id, }, 'status': instance.status, 'dateCreated': instance.date_created, 'link': build_web_uri('/projects/{0}/'.format(instance.slug)), }
def _get_job_context(job): def get_job_failing_tests(job): failing_tests = TestCase.query.options( subqueryload_all('messages') ).filter( TestCase.job_id == job.id, TestCase.result == Result.failed, ).order_by(TestCase.name.asc()) failing_tests = [ { 'test_case': test_case, 'uri': build_web_uri(_get_test_case_uri(test_case)), 'message': xunit.get_testcase_messages(test_case), } for test_case in failing_tests ] failing_tests_count = len(failing_tests) return failing_tests, failing_tests_count def get_job_failing_log_sources(job): failing_log_sources = LogSource.query.join( JobStep, LogSource.step_id == JobStep.id, ).filter( JobStep.result == Result.failed, JobStep.job_id == job.id, ).order_by(JobStep.date_created) failing_logs = [ { 'text': _get_log_clipping( log_source, max_size=5000, max_lines=25), 'name': log_source.name, 'uri': build_web_uri(_get_log_uri(log_source)), } for log_source in failing_log_sources if not log_source.is_infrastructural() ] failing_log_sources_count = len(failing_logs) return failing_logs, failing_log_sources_count failing_tests, failing_tests_count = get_job_failing_tests(job) failing_logs, failing_logs_count = get_job_failing_log_sources(job) context = { 'job': job, 'uri': build_web_uri(_get_job_uri(job)), 'failing_tests': failing_tests, 'failing_tests_count': len(failing_tests), 'failing_logs': failing_logs, 'failing_logs_count': len(failing_logs), } return context
def _get_message_for_build_context(build_context): build = build_context['build'] safe_slug = urllib.quote_plus(build.project.slug) message = u' - [{project}]({link}).'.format( project=build.project.name, link=build_web_uri('/find_build/{0}/'.format(build.id.hex))) test_failures = [t['test_case'] for t in build_context['failing_tests']] if build_context['failing_tests_count'] > 0: message += get_test_failure_remarkup(build, test_failures) return message
def get_project_stats(self, start_period, end_period): projects_by_id = dict((p.id, p) for p in self.projects) project_ids = projects_by_id.keys() # fetch overall build statistics per project query = db.session.query( Build.project_id, Build.result, func.count(Build.id).label('num'), func.avg(Build.duration).label('duration'), ).join( Source, Source.id == Build.source_id, ).filter(Build.project_id.in_(project_ids), Build.status == Status.finished, Build.result.in_([Result.failed, Result.passed]), Build.date_created >= start_period, Build.date_created < end_period, *build_type.get_any_commit_build_filters()).group_by( Build.project_id, Build.result) project_results = {} for project in self.projects: project_results[project] = { 'total_builds': 0, 'green_builds': 0, 'green_percent': None, 'avg_duration': 0, 'link': build_web_uri('/project/{0}/'.format(project.slug)), } for project_id, result, num_builds, duration in query: if duration is None: duration = 0 project = projects_by_id[project_id] if result == Result.passed: project_results[project]['avg_duration'] = duration project_results[project]['total_builds'] += num_builds if result == Result.passed: project_results[project]['green_builds'] += num_builds for project, stats in project_results.iteritems(): if stats['total_builds']: stats['green_percent'] = percent(stats['green_builds'], stats['total_builds']) else: stats['green_percent'] = None return project_results
def process(self, fp, artifact): try: phase_config = json.load(fp) except ValueError: uri = build_web_uri('/find_build/{0}/'.format(self.step.job.build_id.hex)) self.logger.warning('Failed to parse json; (step=%s, build=%s)', self.step.id.hex, uri, exc_info=True) self.report_malformed() else: _, implementation = JobPlan.get_build_step_for_job(job_id=self.step.job_id) try: implementation.expand_jobs(self.step, phase_config) except ArtifactParseError: uri = build_web_uri('/find_build/{0}/'.format(self.step.job.build_id.hex)) self.logger.warning('malformed %s artifact (step=%s, build=%s)', self.FILENAMES[0], self.step.id.hex, uri, exc_info=True) self.report_malformed() except Exception: uri = build_web_uri('/find_build/{0}/'.format(self.step.job.build_id.hex)) self.logger.warning('expand_jobs failed (step=%s, build=%s)', self.step.id.hex, uri, exc_info=True) self.step.result = Result.infra_failed db.session.add(self.step) db.session.commit()
def _get_message_for_build_context(build_context): build = build_context['build'] safe_slug = urllib.quote_plus(build.project.slug) message = u' - [{project}]({link}).'.format( project=build.project.name, link=build_web_uri('/find_build/{0}/'.format(build.id.hex)) ) test_failures = [t['test_case'] for t in build_context['failing_tests']] if build_context['failing_tests_count'] > 0: message += get_test_failure_remarkup(build, test_failures) return message
def get_test_failure_remarkup(build, tests): safe_slug = urllib.quote_plus(build.project.slug) base_commit_failures = get_test_failures_in_base_commit(build) if base_commit_failures is None: total_failures = [t for t in tests] failures_in_parent = [] message = ' There were a total of ' \ '{num_failures} [test failures]({link}), but we could not ' \ 'determine if any of these tests were previously failing.'.format( num_failures=len(tests), link=build_web_uri('/build_tests/{0}/'.format(build.id.hex)) ) message += '\n\n**All failures ({failure_count}):**\n'.format( failure_count=len(total_failures) ) message += _generate_remarkup_table_for_tests(build, total_failures) else: new_failures = [t for t in tests if t.name not in base_commit_failures] failures_in_parent = [t for t in tests if t.name in base_commit_failures] message = ' There were {new_failures} new [test failures]({link})'.format( new_failures=len(new_failures), link=build_web_uri('/build_tests/{0}/'.format(build.id.hex)) ) if new_failures: message += '\n\n**New failures ({new_failure_count}):**\n'.format( new_failure_count=len(new_failures) ) message += _generate_remarkup_table_for_tests(build, new_failures) if failures_in_parent: message += '\n\n**Failures in parent revision ({parent_failure_count}):**\n'.format( parent_failure_count=len(failures_in_parent) ) message += _generate_remarkup_table_for_tests(build, failures_in_parent) return message
def get_project_stats(self, start_period, end_period): projects_by_id = dict((p.id, p) for p in self.projects) project_ids = projects_by_id.keys() # fetch overall build statistics per project query = db.session.query( Build.project_id, Build.result, func.count(Build.id).label('num'), func.avg(Build.duration).label('duration'), ).join( Source, Source.id == Build.source_id, ).filter( Build.project_id.in_(project_ids), Build.status == Status.finished, Build.result.in_([Result.failed, Result.passed]), Build.date_created >= start_period, Build.date_created < end_period, *build_type.get_any_commit_build_filters() ).group_by(Build.project_id, Build.result) project_results = {} for project in self.projects: project_results[project] = { 'total_builds': 0, 'green_builds': 0, 'green_percent': None, 'avg_duration': 0, 'link': build_web_uri('/project/{0}/'.format(project.slug)), } for project_id, result, num_builds, duration in query: if duration is None: duration = 0 project = projects_by_id[project_id] if result == Result.passed: project_results[project]['avg_duration'] = duration project_results[project]['total_builds'] += num_builds if result == Result.passed: project_results[project]['green_builds'] += num_builds for project, stats in project_results.iteritems(): if stats['total_builds']: stats['green_percent'] = percent(stats['green_builds'], stats['total_builds']) else: stats['green_percent'] = None return project_results
def crumble(self, instance, attrs): result = { 'id': instance.id.hex, 'name': instance.label, 'project': instance.project, 'author': instance.author, 'message': instance.message, 'link': build_web_uri('/changes/%s/' % (instance.id.hex, )), 'dateCreated': instance.date_created.isoformat(), 'dateModified': instance.date_modified.isoformat(), } if hasattr(instance, 'last_job'): result['lastBuild'] = instance.last_job return result
def crumble(self, instance, attrs): result = { 'id': instance.id.hex, 'name': instance.label, 'project': instance.project, 'author': instance.author, 'message': instance.message, 'link': build_web_uri('/changes/%s/' % (instance.id.hex,)), 'dateCreated': instance.date_created.isoformat(), 'dateModified': instance.date_modified.isoformat(), } if hasattr(instance, 'last_job'): result['lastBuild'] = instance.last_job return result
def get_test_failure_remarkup(build, tests): safe_slug = urllib.quote_plus(build.project.slug) base_commit_failures = get_test_failures_in_base_commit(build) if base_commit_failures is None: total_failures = [t for t in tests] failures_in_parent = [] message = ' There were a total of ' \ '{num_failures} [test failures]({link}), but we could not ' \ 'determine if any of these tests were previously failing.'.format( num_failures=len(tests), link=build_web_uri('/build_tests/{0}/'.format(build.id.hex)) ) message += '\n\n**All failures ({failure_count}):**\n'.format( failure_count=len(total_failures)) message += _generate_remarkup_table_for_tests(build, total_failures) else: new_failures = [t for t in tests if t.name not in base_commit_failures] failures_in_parent = [ t for t in tests if t.name in base_commit_failures ] message = ' There were {new_failures} new [test failures]({link})'.format( new_failures=len(new_failures), link=build_web_uri('/build_tests/{0}/'.format(build.id.hex))) if new_failures: message += '\n\n**New failures ({new_failure_count}):**\n'.format( new_failure_count=len(new_failures)) message += _generate_remarkup_table_for_tests(build, new_failures) if failures_in_parent: message += '\n\n**Failures in parent revision ({parent_failure_count}):**\n'.format( parent_failure_count=len(failures_in_parent)) message += _generate_remarkup_table_for_tests(build, failures_in_parent) return message
def process(self, fp, artifact): try: phase_config = json.load(fp) except ValueError: uri = build_web_uri('/find_build/{0}/'.format( self.step.job.build_id.hex)) self.logger.warning('Failed to parse json; (step=%s, build=%s)', self.step.id.hex, uri, exc_info=True) self.report_malformed() else: _, implementation = JobPlan.get_build_step_for_job( job_id=self.step.job_id) try: implementation.expand_jobs(self.step, phase_config) except ArtifactParseError: uri = build_web_uri('/find_build/{0}/'.format( self.step.job.build_id.hex)) self.logger.warning( 'malformed %s artifact (step=%s, build=%s)', self.FILENAMES[0], self.step.id.hex, uri, exc_info=True) self.report_malformed() except Exception: uri = build_web_uri('/find_build/{0}/'.format( self.step.job.build_id.hex)) self.logger.warning('expand_jobs failed (step=%s, build=%s)', self.step.id.hex, uri, exc_info=True) self.step.result = Result.infra_failed db.session.add(self.step) db.session.commit()
def test_whitelisted_project(self, get_options, post): get_options.return_value = { 'phabricator.notify': '1' } project = self.create_project(name='test', slug='project-slug') self.assertEquals(post.call_count, 0) build = self.create_build(project, result=Result.failed, target='D1', status=Status.finished) build_finished_handler(build_id=build.id.hex) get_options.assert_called_once_with(project.id) build_link = build_web_uri('/find_build/{0}/'.format(build.id.hex)) expected_msg = "test build Failed {{icon times, color=red}} ([results]({0})).".format( build_link ) post.assert_called_once_with('1', expected_msg, mock.ANY)
def get_html_label(self, build): link = build_web_uri('/projects/{0}/builds/{1}/tests/?result=failed'.format(build.project.slug, build.id.hex)) try: test_failures = ( s.value for s in build.stats if s.name == 'test_failures' ).next() except StopIteration: return Markup('There were an <a href="{link}">unknown number of test failures</a>.'.format( link=link, )) return Markup('There were <a href="{link}">{count} failing tests</a>.'.format( link=link, count=test_failures, ))
def test_whitelisted_project(self, get_options, post): get_options.return_value = {'phabricator.notify': '1'} project = self.create_project(name='test', slug='project-slug') self.assertEquals(post.call_count, 0) build = self.create_build(project, result=Result.failed, target='D1', status=Status.finished) build_finished_handler(build_id=build.id.hex) get_options.assert_called_once_with(project.id) build_link = build_web_uri('/find_build/{0}/'.format(build.id.hex)) expected_msg = "(IMPORTANT) Failing builds!\n\n - [test]({0}).".format( build_link) post.assert_called_once_with('1', expected_msg, mock.ANY)
def get_test_suites(self, fp): try: start = fp.tell() try: return XunitDelegate(self.step).parse(fp) except expat.ExpatError as e: if e.message == expat.errors.XML_ERROR_UNKNOWN_ENCODING: # If the encoding is not known, assume it's UTF-8 fp.seek(start) return XunitDelegate(self.step, 'UTF-8').parse(fp) else: raise e except Exception as e: uri = build_web_uri('/find_build/{0}/'.format(self.step.job.build_id.hex)) self.logger.warning('Failed to parse XML; (step=%s, build=%s); exception %s', self.step.id.hex, uri, e.message, exc_info=True) self.report_malformed() return []
def get_job_failing_tests(job): failing_tests = TestCase.query.options( subqueryload_all('messages') ).filter( TestCase.job_id == job.id, TestCase.result == Result.failed, ).order_by(TestCase.name.asc()) failing_tests = [ { 'test_case': test_case, 'uri': build_web_uri(_get_test_case_uri(test_case)), 'message': xunit.get_testcase_messages(test_case), } for test_case in failing_tests ] failing_tests_count = len(failing_tests) return failing_tests, failing_tests_count
def _get_build_target(build): """ Returns the build's target and target uri (normally a phabricator revision and diff url). """ source_data = build.source.data or {} phabricator_rev_id = source_data.get('phabricator.revisionID') phabricator_uri = source_data.get('phabricator.revisionURL') if phabricator_rev_id and phabricator_uri: target = 'D{}'.format(phabricator_rev_id) target_uri = phabricator_uri else: # TODO: Make sure that the phabricator source data is present to # make this obsolete. target = None target_uri = build_web_uri(_get_source_uri(build, build.source)) return target, target_uri
def get_job_failing_tests(job, limit=500): failing_tests = TestCase.query.options( subqueryload_all('messages') ).filter( TestCase.job_id == job.id, TestCase.result == Result.failed, ).order_by(TestCase.name.asc()) failing_tests = [ { 'test_case': test_case, 'uri': build_web_uri(_get_test_case_uri(test_case)), 'message': xunit.get_testcase_messages(test_case), } for test_case in failing_tests[:limit] ] failing_tests_count = len(failing_tests) return failing_tests, failing_tests_count
def get_job_failing_log_sources(job): failing_log_sources = LogSource.query.join( JobStep, LogSource.step_id == JobStep.id, ).filter( JobStep.result == Result.failed, JobStep.job_id == job.id, ).order_by(JobStep.date_created) failing_logs = [ { 'text': _get_log_clipping( log_source, max_size=5000, max_lines=25), 'name': log_source.name, 'uri': build_web_uri(_get_log_uri(log_source)), } for log_source in failing_log_sources if not log_source.is_infrastructural() ] failing_log_sources_count = len(failing_logs) return failing_logs, failing_log_sources_count
def get_job_failing_log_sources(job): failing_log_sources = LogSource.query.join( JobStep, LogSource.step_id == JobStep.id, ).filter( JobStep.result == Result.failed, JobStep.job_id == job.id, ).order_by(JobStep.date_created) failing_logs = [ { 'text': _get_log_clipping( log_source, max_size=5000, max_lines=25), 'name': log_source.name, 'uri': build_web_uri(_get_log_uri(log_source)), } for log_source in failing_log_sources if not log_source.is_infrastructural() ] failing_log_sources_count = len(failing_logs) return failing_logs, failing_log_sources_count
def _get_build_target(build): # type: (Build) -> Tuple[Optional[str], str] """ Returns the build's target and target uri (normally a phabricator revision and diff url). """ source_data = build.source.data or {} phabricator_rev_id = source_data.get('phabricator.revisionID') phabricator_uri = source_data.get('phabricator.revisionURL') if phabricator_rev_id and phabricator_uri: target = 'D{}'.format(phabricator_rev_id) target_uri = phabricator_uri else: # TODO: Make sure that the phabricator source data is present to # make this obsolete. target = None target_uri = build_web_uri(_get_source_uri(build, build.source)) return target, target_uri
def get_slow_tests_for_project(self, project, start_period, end_period): latest_build = Build.query.filter( Build.project == project, Build.status == Status.finished, Build.result == Result.passed, Build.date_created >= start_period, Build.date_created < end_period, ).order_by(Build.date_created.desc(), ).limit(1).first() if not latest_build: return [] job_list = list(latest_build.jobs) if not job_list: return [] queryset = TestCase.query.filter( TestCase.job_id.in_(j.id for j in job_list), TestCase.result == Result.passed, TestCase.date_created > start_period, TestCase.date_created <= end_period, ).order_by(TestCase.duration.desc()).limit(MAX_SLOW_TESTS) slow_list = [] for test in queryset: slow_list.append({ 'project': project, 'name': test.short_name, 'package': test.package, 'duration': '%.2f s' % (test.duration / 1000.0, ), 'duration_raw': test.duration, 'link': build_web_uri('/project_test/{0}/{1}/'.format( project.id.hex, test.name_sha)), }) return slow_list
def get_slow_tests_for_project(self, project, start_period, end_period): latest_build = Build.query.filter( Build.project == project, Build.status == Status.finished, Build.result == Result.passed, Build.date_created >= start_period, Build.date_created < end_period, ).order_by( Build.date_created.desc(), ).limit(1).first() if not latest_build: return [] job_list = list(latest_build.jobs) if not job_list: return [] queryset = TestCase.query.filter( TestCase.job_id.in_(j.id for j in job_list), TestCase.result == Result.passed, TestCase.date_created > start_period, TestCase.date_created <= end_period, ).order_by( TestCase.duration.desc() ).limit(MAX_SLOW_TESTS) slow_list = [] for test in queryset: slow_list.append({ 'project': project, 'name': test.short_name, 'package': test.package, 'duration': '%.2f s' % (test.duration / 1000.0,), 'duration_raw': test.duration, 'link': build_web_uri('/project_test/{0}/{1}/'.format( project.id.hex, test.name_sha)), }) return slow_list
def _get_message_for_build_context(build_context): build = build_context['build'] result = build.result if result == Result.passed: result_image = '{icon check, color=green}' elif result == Result.failed: result_image = '{icon times, color=red}' else: result_image = '{icon question, color=orange}' safe_slug = urllib.quote_plus(build.project.slug) message = u'{project} build {result} {image} ([results]({link})).'.format( project=build.project.name, image=result_image, result=unicode(build.result), link=build_web_uri('/find_build/{0}/'.format(build.id.hex)) ) test_failures = [t['test_case'] for t in build_context['failing_tests']] if build_context['failing_tests_count'] > 0: message += get_test_failure_remarkup(build, test_failures) return message
def _get_build_context(build, get_parent=True): jobs = list(Job.query.filter(Job.build_id == build.id)) jobs_context = map(_get_job_context, jobs) parent_build_context = None if get_parent: parent_build = get_parents_last_builds(build) if parent_build: parent_build_context = _get_build_context( parent_build[0], get_parent=False) return { 'build': build, 'parent_build': parent_build_context, 'jobs': jobs_context, 'uri': build_web_uri(_get_build_uri(build)), 'is_passing': build.result == Result.passed, 'is_failing': build.result == Result.failed, 'result_string': str(build.result).lower(), 'failing_tests': list(chain(*[j['failing_tests'] for j in jobs_context])), 'failing_tests_count': _aggregate_count(jobs_context, 'failing_tests_count'), 'failing_logs_count': _aggregate_count(jobs_context, 'failing_logs_count'), }
def _get_build_context(build, get_parent=True): # type: (Build, bool) -> Dict[str, Any] jobs = list(Job.query.filter(Job.build_id == build.id)) jobs_context = map(_get_job_context, jobs) parent_build_context = None if get_parent: parent_build = get_parents_last_builds(build) if parent_build: parent_build_context = _get_build_context( parent_build[0], get_parent=False) return { 'build': build, 'parent_build': parent_build_context, 'jobs': jobs_context, 'uri': build_web_uri(_get_build_uri(build)), 'is_passing': build.result == Result.passed, 'is_failing': build.result == Result.failed, 'result_string': str(build.result).lower(), 'failing_tests': list(chain(*[j['failing_tests'] for j in jobs_context])), 'failing_tests_count': _aggregate_count(jobs_context, 'failing_tests_count'), 'failing_logs_count': _aggregate_count(jobs_context, 'failing_logs_count'), }
def crumble(self, item, attrs): if item.project_id: avg_build_time = item.project.avg_build_time else: avg_build_time = None target = item.target if target is None and item.source and item.source.revision_sha: target = item.source.revision_sha[:12] return { 'id': item.id.hex, 'collection_id': item.collection_id, 'number': item.number, 'name': item.label, 'target': target, 'result': item.result, 'status': item.status, 'project': item.project, 'cause': item.cause, 'author': item.author, 'source': item.source, 'message': item.message, 'tags': item.tags or [], 'duration': item.duration, 'estimatedDuration': avg_build_time, 'dateCreated': item.date_created.isoformat(), 'dateModified': item.date_modified.isoformat() if item.date_modified else None, 'dateStarted': item.date_started.isoformat() if item.date_started else None, 'dateFinished': item.date_finished.isoformat() if item.date_finished else None, 'dateDecided': item.date_decided.isoformat() if item.date_decided else None, 'stats': attrs['stats'], 'failures': attrs['failures'], 'link': build_web_uri('/projects/{0}/builds/{1}/'.format( item.project.slug, item.id.hex)), }
def get_flaky_tests(start_period, end_period, projects, maxFlakyTests): test_queryset = TestCase.query.filter( TestCase.project_id.in_(p.id for p in projects), TestCase.result == Result.passed, TestCase.date_created >= start_period, TestCase.date_created < end_period, ).join( Job, Job.id == TestCase.job_id, ).join( Build, Build.id == Job.build_id, ).join( Source, Source.id == Build.source_id, ).filter( Source.patch_id == None, # NOQA ) flaky_test_queryset = test_queryset.with_entities( TestCase.name_sha, TestCase.project_id, func.sum(case([(TestCase.reruns > 0, 1)], else_=0)).label('reruns'), func.sum(case([(TestCase.reruns > 1, 1)], else_=0)).label('double_reruns'), func.count('*').label('count') ).group_by( TestCase.name_sha, TestCase.project_id ).order_by( func.sum(TestCase.reruns).desc() ).limit(maxFlakyTests) project_names = {p.id: p.name for p in projects} flaky_list = [] for name_sha, project_id, reruns, double_reruns, count in flaky_test_queryset: if reruns == 0: continue rerun = test_queryset.filter( TestCase.name_sha == name_sha, TestCase.project_id == project_id, TestCase.reruns > 0, ).order_by( TestCase.date_created.desc() ).first() flaky_list.append({ 'id': rerun.id, 'name': rerun.name, 'short_name': rerun.short_name, 'package': rerun.package, 'hash': name_sha, 'project_id': rerun.project_id, 'project_name': project_names[rerun.project_id], 'flaky_runs': reruns, 'double_reruns': double_reruns, 'passing_runs': count, 'link': build_web_uri('/projects/{0}/builds/{1}/jobs/{2}/tests/{3}/'.format( rerun.project.slug, rerun.job.build.id.hex, rerun.job.id.hex, rerun.id.hex)), }) return flaky_list
def crumble(self, item, attrs): if item.project_id: avg_build_time = item.project.avg_build_time else: avg_build_time = None target = item.target if target is None and item.source and item.source.revision_sha: target = item.source.revision_sha[:12] selective_testing_policy = item.selective_testing_policy if item.selective_testing_policy else SelectiveTestingPolicy.disabled return { 'id': item.id.hex, 'collection_id': item.collection_id, 'number': item.number, 'name': item.label, 'target': target, 'result': item.result, 'status': item.status, 'selectiveTestingPolicy': selective_testing_policy, 'project': item.project, 'cause': item.cause, 'author': item.author, 'source': item.source, 'message': item.message, 'tags': item.tags or [], 'duration': item.duration, 'estimatedDuration': avg_build_time, 'dateCreated': item.date_created.isoformat(), 'dateModified': item.date_modified.isoformat() if item.date_modified else None, 'dateStarted': item.date_started.isoformat() if item.date_started else None, 'dateFinished': item.date_finished.isoformat() if item.date_finished else None, 'dateDecided': item.date_decided.isoformat() if item.date_decided else None, 'stats': attrs['stats'], 'failures': attrs['failures'], 'link': build_web_uri('/projects/{0}/builds/{1}/'.format( item.project.slug, item.id.hex)), }
def get_test_desc(build, testcase, test_name): test_link = build_web_uri('/build_test/{0}/{1}/'.format( build.id.hex, testcase.id.hex, )) return "[%s](%s)" % (test_name, test_link)
def revision_result_updated_handler(revision_result_id, **kwargs): """Update the latest green build the revision result is green. Also, send a green build notification if the project is configured to do so. """ revision_result = RevisionResult.query.get(revision_result_id) if revision_result is None: return build = revision_result.build if build is None: return if revision_result.result != Result.passed: return source = build.source vcs = source.repository.get_vcs() if vcs is None: logger.info('Repository has no VCS set: %s', source.repository.id) return # ensure we have the latest changes if vcs.exists(): try: vcs.update() except ConcurrentUpdateError: # Retry once if it was already updating. vcs.update() else: vcs.clone() # set latest_green_build if latest for each branch: _set_latest_green_build_for_each_branch(build, source, vcs) # Send green build notifications if configured to do so. url = current_app.config.get('GREEN_BUILD_URL') if not url: logger.info('GREEN_BUILD_URL not set') return auth = current_app.config['GREEN_BUILD_AUTH'] if not auth: logger.info('GREEN_BUILD_AUTH not set') return options = get_options(build.project_id) if options.get('green-build.notify', '1') != '1': logger.info('green-build.notify disabled for project: %s', build.project_id) return branch_names = filter(bool, options.get('build.branch-names', '*').split(' ')) if not source.revision.should_build_branch(branch_names): return release_id = get_release_id(source, vcs) project = options.get('green-build.project') or build.project.slug committed_timestamp_sec = calendar.timegm(source.revision.date_committed.utctimetuple()) logging.info('Making green_build request to %s', url) try: requests.post(url, auth=auth, timeout=10, data={ 'project': project, 'id': release_id, 'build_url': build_web_uri('/projects/{0}/builds/{1}/'.format( build.project.slug, build.id.hex)), 'build_server': 'changes', 'author_name': source.revision.author.name, 'author_email': source.revision.author.email, 'commit_timestamp': committed_timestamp_sec, 'revision_message': source.revision.message, }).raise_for_status() except HTTPError as ex: # Conflicts aren't necessarily failures; some green build receivers # report conflict if they see out-of-order results (not uncommon in Changes). # We want to track those situations independently of other non-success responses. # NOTE: We compare `ex.response` to None explicitly because any non-200 response # evaluates to `False`. if ex.response is not None and ex.response.status_code == 409: logger.warning("Conflict when reporting green build", extra={ 'data': { 'project': project, 'release_id': release_id, 'build_id': build.id.hex, } }) else: logger.exception('Failed to report green build') status = 'fail' except Exception: logger.exception('Failed to report green build') status = 'fail' else: status = 'success' create_or_update(Event, where={ 'type': EventType.green_build, 'item_id': build.id, }, values={ 'data': { 'status': status, }, 'date_modified': datetime.utcnow(), })
def revision_result_updated_handler(revision_result_id, **kwargs): """Update the latest green build the revision result is green. Also, send a green build notification if the project is configured to do so. """ revision_result = RevisionResult.query.get(revision_result_id) if revision_result is None: return build = revision_result.build if build is None: return if revision_result.result != Result.passed: return source = build.source vcs = source.repository.get_vcs() if vcs is None: logger.info('Repository has no VCS set: %s', source.repository.id) return # ensure we have the latest changes if vcs.exists(): try: vcs.update() except ConcurrentUpdateError: # Retry once if it was already updating. vcs.update() else: vcs.clone() # set latest_green_build if latest for each branch: _set_latest_green_build_for_each_branch(build, source, vcs) # Send green build notifications if configured to do so. url = current_app.config.get('GREEN_BUILD_URL') if not url: logger.info('GREEN_BUILD_URL not set') return auth = current_app.config['GREEN_BUILD_AUTH'] if not auth: logger.info('GREEN_BUILD_AUTH not set') return options = get_options(build.project_id) if options.get('green-build.notify', '1') != '1': logger.info('green-build.notify disabled for project: %s', build.project_id) return branch_names = filter(bool, options.get('build.branch-names', '*').split(' ')) if not source.revision.should_build_branch(branch_names): return release_id = get_release_id(source, vcs) project = options.get('green-build.project') or build.project.slug committed_timestamp_sec = calendar.timegm( source.revision.date_committed.utctimetuple()) logging.info('Making green_build request to %s', url) try: requests.post(url, auth=auth, timeout=10, data={ 'project': project, 'id': release_id, 'build_url': build_web_uri('/projects/{0}/builds/{1}/'.format( build.project.slug, build.id.hex)), 'build_server': 'changes', 'author_name': source.revision.author.name, 'author_email': source.revision.author.email, 'commit_timestamp': committed_timestamp_sec, 'revision_message': source.revision.message, }).raise_for_status() except HTTPError as ex: # Conflicts aren't necessarily failures; some green build receivers # report conflict if they see out-of-order results (not uncommon in Changes). # We want to track those situations independently of other non-success responses. # NOTE: We compare `ex.response` to None explicitly because any non-200 response # evaluates to `False`. if ex.response is not None and ex.response.status_code == 409: logger.warning("Conflict when reporting green build", extra={ 'data': { 'project': project, 'release_id': release_id, 'build_id': build.id.hex, } }) else: logger.exception('Failed to report green build') status = 'fail' except Exception: logger.exception('Failed to report green build') status = 'fail' else: status = 'success' create_or_update(Event, where={ 'type': EventType.green_build, 'item_id': build.id, }, values={ 'data': { 'status': status, }, 'date_modified': datetime.utcnow(), })
def get_test_desc(build, testcase, test_name): test_link = build_web_uri('/build_test/{0}/{1}/'.format( build.id.hex, testcase.id.hex, )) return "[%s](%s)" % (test_name, test_link)
def test_multiple_builds(self, get_options, post): get_options.return_value = {'phabricator.notify': '1'} project1 = self.create_project(name='Server', slug='project-slug') project2 = self.create_project(name='Server2', slug='project-slug2') self.assertEquals(post.call_count, 0) collection_id = uuid.uuid4() def create_build(result, project): base_source = self.create_source(project, revision_sha='1235') base_build = self.create_build(project, result=Result.passed, source=base_source, status=Status.finished) self.create_job(build=base_build) patch = self.create_patch() source = self.create_source(project, revision_sha='1235', patch=patch) build = self.create_build(project, result=result, target='D1', source=source, status=Status.finished, collection_id=collection_id) job = self.create_job(build=build) testcase = self.create_test( package='test.group.ClassName', name='test.group.ClassName.test_foo', job=job, duration=134, result=result, ) return build, testcase build1, testcase1 = create_build(Result.failed, project1) build2, testcase2 = create_build(Result.passed, project2) build_finished_handler(build_id=build1.id.hex) build_link = build_web_uri('/find_build/{0}/'.format(build1.id.hex)) build2_link = build_web_uri('/find_build/{0}/'.format(build2.id.hex)) failure_link = build_web_uri('/build_tests/{0}/'.format(build1.id.hex)) test_link = build_web_uri('/build_test/{0}/{1}/'.format( build1.id.hex, testcase1.id.hex, )) test_desc = "[test_foo](%s)" % test_link expected_msg = """(IMPORTANT) Failing builds!\n\n - [Server]({0}). There were 1 new [test failures]({1}) **New failures (1):** |Test Name | Package| |--|--| |{2}|test.group.ClassName| (NOTE) Passing builds: - [Server2]({3}).""" post.assert_called_once_with( '1', expected_msg.format(build_link, failure_link, test_desc, build2_link), mock.ANY)