def testUpdateTryJobMetadataForBuildError(self):
    error_data = {
        'reason': 'BUILD_NOT_FOUND',
        'message': 'message'
    }
    error = buildbucket_client.BuildbucketError(error_data)
    try_job_data = WfTryJobData.Create('1')

    MonitorTryJobPipeline._UpdateTryJobMetadataForBuildError(
        try_job_data, error)
    self.assertEqual(try_job_data.error, error_data)
  def testUpdateTryJobResultAnalyzing(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 1
    try_job_id = '3'

    try_job = WfTryJob.Create(master_name, builder_name, build_number).put()

    pipeline = MonitorTryJobPipeline()
    pipeline._UpdateTryJobResult(
        buildbucket_client.BuildbucketBuild.STARTED, master_name, builder_name,
        build_number, TryJobType.TEST, try_job_id, 'url')
    try_job = WfTryJob.Get(master_name, builder_name, build_number)
    self.assertEqual(analysis_status.RUNNING, try_job.status)
  def testGetTryJobsForTestSuccess(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 1
    try_job_id = '3'

    try_job = WfTryJob.Create(master_name, builder_name, build_number)
    try_job.test_results = [
        {
            'report': None,
            'url': 'url',
            'try_job_id': '3',
        }
    ]
    try_job.status = analysis_status.RUNNING
    try_job.put()
    self._MockGetTryJobs(try_job_id)

    pipeline = MonitorTryJobPipeline()
    test_result = pipeline.run(
        master_name, builder_name, build_number, TryJobType.TEST,
        try_job_id)

    expected_test_result = {
        'report': {
            'result': {
                'rev1': {
                    'a_test': {
                        'status': 'passed',
                        'valid': True
                    }
                },
                'rev2': {
                    'a_test': {
                        'status': 'failed',
                        'valid': True,
                        'failures': ['test1', 'test2']
                    }
                }
            }
        },
        'url': 'url',
        'try_job_id': '3',
    }
    self.assertEqual(expected_test_result, test_result)

    try_job = WfTryJob.Get(master_name, builder_name, build_number)
    self.assertEqual(expected_test_result, try_job.test_results[-1])
    self.assertEqual(analysis_status.RUNNING, try_job.status)
    def testUpdateTryJobResultAnalyzing(self):
        master_name = 'm'
        builder_name = 'b'
        build_number = 1
        try_job_id = '3'

        try_job = WfTryJob.Create(master_name, builder_name, build_number)
        try_job.put()

        pipeline = MonitorTryJobPipeline()
        pipeline._UpdateTryJobResult(
            try_job.key.urlsafe(), failure_type.TEST, try_job_id, 'url',
            buildbucket_client.BuildbucketBuild.STARTED)
        try_job = WfTryJob.Get(master_name, builder_name, build_number)
        self.assertEqual(analysis_status.RUNNING, try_job.status)
  def testGetTryJobsForCompileSuccess(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 1
    try_job_id = '1'
    regression_range_size = 2

    try_job = WfTryJob.Create(master_name, builder_name, build_number)
    try_job_data = WfTryJobData.Create(try_job_id)
    try_job_data.put()
    try_job.compile_results = [
        {
            'report': None,
            'url': 'url',
            'try_job_id': '1',
        }
    ]
    try_job.status = analysis_status.RUNNING
    try_job.put()
    self._MockGetTryJobs(try_job_id)

    pipeline = MonitorTryJobPipeline()
    compile_result = pipeline.run(
        master_name, builder_name, build_number, TryJobType.COMPILE,
        try_job_id)

    expected_compile_result = {
        'report': {
            'result': {
                'rev1': 'passed',
                'rev2': 'failed'
            },
            'metadata': {
                'regression_range_size': regression_range_size
            }
        },
        'url': 'url',
        'try_job_id': '1',
    }

    self.assertEqual(expected_compile_result, compile_result)

    try_job = WfTryJob.Get(master_name, builder_name, build_number)
    self.assertEqual(expected_compile_result, try_job.compile_results[-1])
    self.assertEqual(analysis_status.RUNNING, try_job.status)

    try_job_data = WfTryJobData.Get(try_job_id)
    self.assertEqual(try_job_data.regression_range_size, regression_range_size)
    def testReturnNoneIfNoTryJobId(self):
        master_name = 'm'
        builder_name = 'b'
        build_number = 1
        pipeline = MonitorTryJobPipeline()
        pipeline.start_test()
        try_job = WfTryJob.Create(master_name, builder_name, build_number)
        pipeline.run(try_job.key.urlsafe(), failure_type.TEST, None)

        # Reload from ID to get all internal properties in sync.
        pipeline = MonitorTryJobPipeline.from_id(pipeline.pipeline_id)
        pipeline.finalized()
        test_result = pipeline.outputs.default.value
        self.assertIsNone(test_result)
    def testUpdateFlakeTryJobResult(self):
        master_name = 'm'
        builder_name = 'b'
        step_name = 's'
        test_name = 't'
        git_hash = 'a1b2c3d4'
        try_job_id = '2'
        try_job = FlakeTryJob.Create(master_name, builder_name, step_name,
                                     test_name, git_hash)
        try_job.put()

        pipeline = MonitorTryJobPipeline()
        pipeline._UpdateTryJobResult(
            try_job.key.urlsafe(), failure_type.FLAKY_TEST, try_job_id, 'url',
            buildbucket_client.BuildbucketBuild.STARTED)
        try_job = FlakeTryJob.Get(master_name, builder_name, step_name,
                                  test_name, git_hash)
        self.assertEqual(analysis_status.RUNNING, try_job.status)
  def testUpdateTryJobMetadataForCompletedBuild(self):
    try_job_id = '1'
    url = 'url'
    build_data = {
        'id': try_job_id,
        'url': url,
        'status': 'COMPLETED',
        'completed_ts': '1454367574000000',
        'created_ts': '1454367570000000',
        'result_details_json': json.dumps({
            'properties': {
                'report': {
                    'result': {
                        'rev1': 'passed',
                        'rev2': 'failed'
                    },
                    'metadata': {
                        'regression_range_size': 2
                    }
                }
            }
        })
    }
    build = buildbucket_client.BuildbucketBuild(build_data)
    try_job_data = WfTryJobData.Create(try_job_id)

    MonitorTryJobPipeline._UpdateTryJobMetadataForCompletedBuild(
        try_job_data, build, None, timed_out=False)
    try_job_data = WfTryJobData.Get(try_job_id)
    self.assertIsNone(try_job_data.error)
    self.assertEqual(try_job_data.regression_range_size, 2)
    self.assertEqual(try_job_data.number_of_commits_analyzed, 2)
    self.assertEqual(try_job_data.end_time, datetime(2016, 2, 1, 22, 59, 34))
    self.assertEqual(try_job_data.request_time,
                     datetime(2016, 2, 1, 22, 59, 30))
    self.assertEqual(try_job_data.try_job_url, url)

    MonitorTryJobPipeline._UpdateTryJobMetadataForCompletedBuild(
        try_job_data, build, None, timed_out=True)
    self.assertEqual(try_job_data.error,
                     {'message': MonitorTryJobPipeline.TIMEOUT,
                      'reason': MonitorTryJobPipeline.TIMEOUT})
 def testMicrosecondsToDatetime(self):
   self.assertEqual(
       datetime(2016, 2, 1, 22, 59, 34),
       MonitorTryJobPipeline._MicrosecondsToDatetime(1454367574000000))
   self.assertIsNone(MonitorTryJobPipeline._MicrosecondsToDatetime(None))
    def testGetTryJobsForCompileSuccessSwarming(self, mock_buildbucket,
                                                mock_report):
        master_name = 'm'
        builder_name = 'b'
        build_number = 1
        try_job_id = '1'
        try_job_url = 'https://luci-milo.appspot.com/swarming/task/3595be5002f4bc10'
        regression_range_size = 2

        try_job = WfTryJob.Create(master_name, builder_name, build_number)
        try_job_data = WfTryJobData.Create(try_job_id)
        try_job_data.try_job_key = try_job.key
        try_job_data.try_job_url = try_job_url
        try_job_data.put()
        try_job.compile_results = [{
            'report': None,
            'url': try_job_url,
            'try_job_id': '1',
        }]
        try_job.status = analysis_status.RUNNING
        try_job.put()

        build_response = {
            'id': '1',
            'url': try_job_url,
            'status': 'COMPLETED',
        }
        report = {
            'result': {
                'rev1': 'passed',
                'rev2': 'failed'
            },
            'metadata': {
                'regression_range_size': 2
            }
        }
        mock_buildbucket.GetTryJobs.return_value = [
            (None, buildbucket_client.BuildbucketBuild(build_response))
        ]
        mock_report.return_value = json.dumps(report)

        pipeline = MonitorTryJobPipeline()
        pipeline.start_test()
        pipeline.run(try_job.key.urlsafe(), failure_type.COMPILE, try_job_id)
        pipeline.callback(callback_params=pipeline.last_params)

        # Reload from ID to get all internal properties in sync.
        pipeline = MonitorTryJobPipeline.from_id(pipeline.pipeline_id)
        pipeline.finalized()
        compile_result = pipeline.outputs.default.value

        expected_compile_result = {
            'report': {
                'result': {
                    'rev1': 'passed',
                    'rev2': 'failed'
                },
                'metadata': {
                    'regression_range_size': regression_range_size
                }
            },
            'url': try_job_url,
            'try_job_id': '1',
        }

        self.assertEqual(expected_compile_result, compile_result)

        try_job = WfTryJob.Get(master_name, builder_name, build_number)
        self.assertEqual(expected_compile_result, try_job.compile_results[-1])
        self.assertEqual(analysis_status.RUNNING, try_job.status)

        try_job_data = WfTryJobData.Get(try_job_id)
        self.assertEqual(try_job_data.regression_range_size,
                         regression_range_size)
    def testGetTryJobsForCompileSuccessBackwardCompatibleCallback(
            self, mock_buildbucket, mock_report):
        master_name = 'm'
        builder_name = 'b'
        build_number = 1
        try_job_id = '1'
        regression_range_size = 2

        try_job = WfTryJob.Create(master_name, builder_name, build_number)
        try_job_data = WfTryJobData.Create(try_job_id)
        try_job_data.try_job_key = try_job.key
        try_job_data.try_job_url = (
            'https://build.chromium.org/p/m/builders/b/builds/1234')
        try_job_data.put()
        try_job.compile_results = [{
            'report': None,
            'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
            'try_job_id': '1',
        }]
        try_job.status = analysis_status.RUNNING
        try_job.put()

        report = {
            'result': {
                'rev1': 'passed',
                'rev2': 'failed'
            },
            'metadata': {
                'regression_range_size': 2
            }
        }

        build_response = {
            'id': '1',
            'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
            'status': 'COMPLETED',
            'completed_ts': '1454367574000000',
            'created_ts': '1454367570000000',
            'updated_ts': '1454367574000000',
        }
        mock_buildbucket.GetTryJobs.return_value = [
            (None, buildbucket_client.BuildbucketBuild(build_response))
        ]
        mock_report.return_value = json.dumps(report)

        pipeline = MonitorTryJobPipeline(try_job.key.urlsafe(),
                                         failure_type.COMPILE, try_job_id)
        pipeline.start_test()
        pipeline.run(try_job.key.urlsafe(), failure_type.COMPILE, try_job_id)
        pipeline.callback(**pipeline.last_params)

        # Reload from ID to get all internal properties in sync.
        pipeline = MonitorTryJobPipeline.from_id(pipeline.pipeline_id)
        pipeline.finalized()
        compile_result = pipeline.outputs.default.value

        expected_compile_result = {
            'report': {
                'result': {
                    'rev1': 'passed',
                    'rev2': 'failed'
                },
                'metadata': {
                    'regression_range_size': regression_range_size
                }
            },
            'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
            'try_job_id': '1',
        }

        self.assertEqual(expected_compile_result, compile_result)

        try_job = WfTryJob.Get(master_name, builder_name, build_number)
        self.assertEqual(expected_compile_result, try_job.compile_results[-1])
        self.assertEqual(analysis_status.RUNNING, try_job.status)

        try_job_data = WfTryJobData.Get(try_job_id)
        self.assertEqual(try_job_data.regression_range_size,
                         regression_range_size)
        self.assertIsInstance(try_job_data.start_time, datetime)
    def testGetTryJobsForFlakeSuccess(self, mock_buildbucket, mock_report):
        master_name = 'm'
        builder_name = 'b'
        step_name = 's'
        test_name = 't'
        git_hash = 'a1b2c3d4'
        try_job_id = '1'

        try_job = FlakeTryJob.Create(master_name, builder_name, step_name,
                                     test_name, git_hash)
        try_job.flake_results = [{
            'report': None,
            'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
            'try_job_id': '1',
        }]
        try_job.status = analysis_status.RUNNING
        try_job.put()

        try_job_data = FlakeTryJobData.Create(try_job_id)
        try_job_data.try_job_key = try_job.key
        try_job_data.try_job_url = (
            'https://build.chromium.org/p/m/builders/b/builds/1234')
        try_job_data.put()

        build_response = {
            'id': '1',
            'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
            'status': 'COMPLETED',
        }
        report = {
            'result': {
                'r0': {
                    'gl_tests': {
                        'status': 'passed',
                        'valid': True,
                        'pass_fail_counts': {
                            'Test.One': {
                                'pass_count': 100,
                                'fail_count': 0
                            }
                        }
                    }
                }
            }
        }
        mock_buildbucket.GetTryJobs.return_value = [
            (None, buildbucket_client.BuildbucketBuild(build_response))
        ]
        mock_report.return_value = json.dumps(report)

        pipeline = MonitorTryJobPipeline()
        pipeline.start_test()
        pipeline.run(try_job.key.urlsafe(), failure_type.FLAKY_TEST,
                     try_job_id)
        pipeline.callback(callback_params=pipeline.last_params)

        # Reload from ID to get all internal properties in sync.
        pipeline = MonitorTryJobPipeline.from_id(pipeline.pipeline_id)
        pipeline.finalized()
        flake_result = pipeline.outputs.default.value

        expected_flake_result = {
            'report': {
                'result': {
                    'r0': {
                        'gl_tests': {
                            'status': 'passed',
                            'valid': True,
                            'pass_fail_counts': {
                                'Test.One': {
                                    'pass_count': 100,
                                    'fail_count': 0
                                }
                            }
                        }
                    }
                }
            },
            'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
            'try_job_id': '1',
        }

        self.assertEqual(expected_flake_result, flake_result)

        try_job = FlakeTryJob.Get(master_name, builder_name, step_name,
                                  test_name, git_hash)
        self.assertEqual(expected_flake_result, try_job.flake_results[-1])
        self.assertEqual(analysis_status.RUNNING, try_job.status)

        try_job_data = FlakeTryJobData.Get(try_job_id)
        self.assertEqual(try_job_data.last_buildbucket_response,
                         build_response)
    def testGetTryJobsForTestSuccess(self, mock_buildbucket, mock_report):
        master_name = 'm'
        builder_name = 'b'
        build_number = 1
        try_job_id = '3'

        try_job = WfTryJob.Create(master_name, builder_name, build_number)
        try_job.test_results = [{
            'report': None,
            'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
            'try_job_id': try_job_id,
        }]
        try_job.status = analysis_status.RUNNING
        try_job.put()

        try_job_data = WfTryJobData.Create(try_job_id)
        try_job_data.try_job_key = try_job.key
        try_job_data.try_job_url = (
            'https://build.chromium.org/p/m/builders/b/builds/1234')
        try_job_data.put()

        data = [{
            'build': {
                'id': '3',
                'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
                'status': 'STARTED'
            }
        }, {
            'error': {
                'reason': 'BUILD_NOT_FOUND',
                'message': 'message',
            }
        }, {
            'build': {
                'id': '3',
                'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
                'status': 'STARTED'
            }
        }, {
            'error': {
                'reason': 'BUILD_NOT_FOUND',
                'message': 'message',
            }
        }, {
            'build': {
                'id': '3',
                'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
                'status': 'COMPLETED',
            }
        }]

        report = {
            'result': {
                'rev1': {
                    'a_test': {
                        'status': 'passed',
                        'valid': True
                    }
                },
                'rev2': {
                    'a_test': {
                        'status': 'failed',
                        'valid': True,
                        'failures': ['test1', 'test2']
                    }
                }
            }
        }

        get_tryjobs_responses = [
            [(None, buildbucket_client.BuildbucketBuild(data[0]['build']))],
            [(buildbucket_client.BuildbucketError(data[1]['error']), None)],
            [(None, buildbucket_client.BuildbucketBuild(data[2]['build']))],
            [(buildbucket_client.BuildbucketError(data[3]['error']), None)],
            [(None, buildbucket_client.BuildbucketBuild(data[4]['build']))],
        ]
        mock_buildbucket.GetTryJobs.side_effect = get_tryjobs_responses
        mock_report.return_value = json.dumps(report)

        pipeline = MonitorTryJobPipeline()
        pipeline.start_test()
        pipeline.run(try_job.key.urlsafe(), failure_type.TEST, try_job_id)
        pipeline.run(try_job.key.urlsafe(), failure_type.TEST, try_job_id)
        # Since run() calls callback() immediately, we use -1.
        for _ in range(len(get_tryjobs_responses) - 1):
            pipeline.callback(callback_params=pipeline.last_params)

        # Reload from ID to get all internal properties in sync.
        pipeline = MonitorTryJobPipeline.from_id(pipeline.pipeline_id)
        pipeline.finalized()
        test_result = pipeline.outputs.default.value

        expected_test_result = {
            'report': {
                'result': {
                    'rev1': {
                        'a_test': {
                            'status': 'passed',
                            'valid': True
                        }
                    },
                    'rev2': {
                        'a_test': {
                            'status': 'failed',
                            'valid': True,
                            'failures': ['test1', 'test2']
                        }
                    }
                }
            },
            'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
            'try_job_id': '3',
        }
        self.assertEqual(expected_test_result, test_result)

        try_job = WfTryJob.Get(master_name, builder_name, build_number)
        self.assertEqual(expected_test_result, try_job.test_results[-1])
        self.assertEqual(analysis_status.RUNNING, try_job.status)
    def run(self, master_name, builder_name, build_number, failure_info,
            signals, heuristic_result, build_completed, force_try_job):
        """Starts a try job if one is needed for the given failure."""

        if not build_completed:  # Only start try-jobs for completed builds.
            return

        need_try_job, try_job_key = try_job_util.NeedANewWaterfallTryJob(
            master_name, builder_name, build_number, failure_info, signals,
            heuristic_result, force_try_job)

        if not need_try_job:
            return

        try_job_type = failure_info['failure_type']
        last_pass = _GetLastPass(build_number, failure_info, try_job_type)
        if last_pass is None:  # pragma: no cover
            logging.warning(
                'Couldn"t start try job for build %s, %s, %d because'
                ' last_pass is not found.', master_name, builder_name,
                build_number)
            return

        good_revision = failure_info['builds'][str(
            last_pass)]['chromium_revision']
        bad_revision = failure_info['builds'][str(
            build_number)]['chromium_revision']
        suspected_revisions = _GetSuspectsFromHeuristicResult(heuristic_result)

        if try_job_type == failure_type.COMPILE:
            compile_targets = try_job_util.GetFailedTargetsFromSignals(
                signals, master_name, builder_name)
            dimensions = waterfall_config.GetTrybotDimensions(
                master_name, builder_name)
            cache_name = swarming_util.GetCacheName(master_name, builder_name)
            try_job_id = yield ScheduleCompileTryJobPipeline(
                master_name, builder_name, build_number, good_revision,
                bad_revision, try_job_type, compile_targets,
                suspected_revisions, cache_name, dimensions)
        else:
            # If try_job_type is other type, the pipeline has returned.
            # So here the try_job_type is failure_type.TEST.

            # Waits and gets the swarming tasks' results.
            task_results = []
            for step_name, step_failure in failure_info[
                    'failed_steps'].iteritems():
                step_has_first_time_failure = _HasFirstTimeFailure(
                    step_failure.get('tests', {}), build_number)
                if not step_has_first_time_failure:
                    continue
                task_result = yield ProcessSwarmingTaskResultPipeline(
                    master_name, builder_name, build_number, step_name)
                task_results.append(task_result)

            yield UpdateAnalysisWithFlakeInfoPipeline(master_name,
                                                      builder_name,
                                                      build_number,
                                                      *task_results)

            parent_mastername = failure_info.get(
                'parent_mastername') or master_name
            parent_buildername = failure_info.get('parent_buildername') or (
                builder_name)
            dimensions = waterfall_config.GetTrybotDimensions(
                parent_mastername, parent_buildername)
            cache_name = swarming_util.GetCacheName(parent_mastername,
                                                    parent_buildername)

            try_job_id = yield ScheduleTestTryJobPipeline(
                master_name, builder_name, build_number, good_revision,
                bad_revision, try_job_type, suspected_revisions, cache_name,
                dimensions, *task_results)

        try_job_result = yield MonitorTryJobPipeline(try_job_key.urlsafe(),
                                                     try_job_type, try_job_id)

        yield IdentifyTryJobCulpritPipeline(master_name, builder_name,
                                            build_number, try_job_type,
                                            try_job_id, try_job_result)
Пример #15
0
    def run(self, urlsafe_flake_analysis_key, commit_position, revision,
            lower_boundary_commit_position, cache_name, dimensions):
        """Runs a try job at a revision to determine its flakiness.

    Args:
      urlsafe_flake_analysis_key (str): The urlsafe-key of the flake analysis
          for which the try jobs are to analyze.
      commit_position (int): The commit position corresponding to |revision| to
          analyze.
      revision (str): The revision to run the try job against corresponding to
          |commit_position|.
      lower_boundary_commit_position (int): The lower bound of commit position
          that can run a try job.
      cache_name (str): A string to identify separate directories for different
          waterfall bots on the trybots.
      dimensions (list): A list of strings in the format
          ["key1:value1", "key2:value2"].
    """
        analysis = ndb.Key(urlsafe=urlsafe_flake_analysis_key).get()
        assert analysis

        if analysis.error or analysis.status != analysis_status.COMPLETED:
            # Don't start try-jobs if analysis at the build level did not complete
            # successfully.
            return

        # TODO(lijeffrey): support force/rerun.

        try_job = _CreateTryJobEntity(analysis.master_name,
                                      analysis.builder_name,
                                      analysis.canonical_step_name,
                                      analysis.test_name, revision)

        if analysis.try_job_status != analysis_status.RUNNING:  # pragma: no branch
            # Set try_job_status as RUNNING to indicate the analysis is in try-job
            # mode.
            analysis.try_job_status = analysis_status.RUNNING
        analysis.last_attempted_revision = revision
        analysis.put()

        with pipeline.InOrder():
            iterations_to_rerun = analysis.algorithm_parameters.get(
                'try_job_rerun', {}).get('iterations_to_rerun')
            try_job_id = yield ScheduleFlakeTryJobPipeline(
                analysis.master_name, analysis.builder_name,
                analysis.canonical_step_name, analysis.test_name, revision,
                analysis.key.urlsafe(), cache_name, dimensions,
                iterations_to_rerun)

            try_job_result = yield MonitorTryJobPipeline(
                try_job.key.urlsafe(), failure_type.FLAKY_TEST, try_job_id)

            yield ProcessFlakeTryJobResultPipeline(revision, commit_position,
                                                   try_job_result,
                                                   try_job.key.urlsafe(),
                                                   urlsafe_flake_analysis_key)

            yield NextCommitPositionPipeline(urlsafe_flake_analysis_key,
                                             try_job.key.urlsafe(),
                                             lower_boundary_commit_position,
                                             cache_name, dimensions)