def update_histograms(): update_histogram(Flake.query().filter(Flake.last_day == True), 'count_day', occurrences_per_flake_day) update_histogram(Flake.query().filter(Flake.last_week == True), 'count_week', occurrences_per_flake_week) update_histogram(Flake.query().filter(Flake.last_month == True), 'count_month', occurrences_per_flake_month)
def test_normalizes_step_name(self): key = Flake(name='my_unittests (with patch)').put() response = self.test_app.get( '/search?q=my_unittests+(with+patch)+on+NVIDIA+GPU', status=302) self.assertIn('location', response.headers) self.assertTrue(response.headers['location'].endswith( '/all_flake_occurrences?key=%s' % key.urlsafe()))
def update_flake_month_counter(): # pragma: no cover query = Flake.query().filter(Flake.last_month == True, Flake.last_week == False, Flake.last_day == False, Flake.last_hour == False) for flake in query: update_flake_counters(flake)
def get(self): if Issue.query().count(1) > 0 or FlakeType.query().count(1) > 0: self.response.out.write( 'Found Issue or FlakeType entities in datastore. ' 'Please remove them before trying to migrate ' 'data again.') self.response.set_status(400) return flakes = Flake.query(Flake.issue_id > 0).fetch() flake_types_by_issue = collections.defaultdict(list) for flake_number, flake in enumerate(flakes, 1): flake_types_by_issue[flake.issue_id].extend( _get_flake_types_from_flake(flake)) if flake_number % 500 == 0: # pragma: no cover logging.info('Processed %d flakes so far.' % flake_number) logging.info('Done processing FlakeTypes. Starting to process Issues') for issue_id, flake_type_keys in flake_types_by_issue.iteritems(): # We might have found the same flake_type more than once. flake_type_keys = list(set(flake_type_keys)) Issue(project='chromium', issue_id=issue_id, flake_type_keys=flake_type_keys).put() logging.info('Done processing Issues. Migration completed.')
def test_converts_step_flake(self): last_updated = datetime.datetime.now() Flake(issue_id=1, is_step=True, name='fake_step', issue_last_updated=last_updated).put() self.test_app.get('/migrate') flake_types = FlakeType.query().fetch() self.assertEqual(len(flake_types), 1) flake_type = flake_types[0] self.assertEqual(flake_type.project, 'chromium') self.assertEqual(flake_type.step_name, 'fake_step') self.assertIsNone(flake_type.test_name) self.assertIsNone(flake_type.config) self.assertEqual(flake_type.last_updated, last_updated) issues = Issue.query().fetch() self.assertEqual(len(issues), 1) issue = issues[0] self.assertEqual(issue.issue_id, 1) self.assertEqual(issue.project, 'chromium') self.assertEqual(sorted(issue.flake_type_keys), sorted(flake_type.key for flake_type in flake_types))
def update_issue_tracker(): """File/update issues for flakes on issue_tracker.""" # Only process flakes that happened at least MIN_REQUIRED_FLAKY_RUNS times in # the last 24 hours. for flake in Flake.query(Flake.count_day >= MIN_REQUIRED_FLAKY_RUNS, projection=[Flake.count_day]): taskqueue.add(queue_name='issue-updates', url='/issues/process/%s' % flake.key.urlsafe())
def update_issue_tracker(): """File/update issues for flakes on issue_tracker.""" # Only process flakes that happened at least MIN_REQUIRED_FLAKY_RUNS times in # the last 24 hours. for flake in Flake.query(Flake.count_day >= MIN_REQUIRED_FLAKY_RUNS, projection=[Flake.count_day]): logging.info('Created processing task for %s' % flake.key) taskqueue.add(queue_name='issue-updates', url='/issues/process/%s' % flake.key.urlsafe())
def get(self): search = self.request.get('q') flake = Flake.query().filter(Flake.name == search).get() if not flake: self.response.write('No flake entry found for ' + search) return self.response.write(show_all_flakes(flake, 0))
def index(time_range, cursor=None): flakes_query = Flake.query() if time_range == 'hour': flakes_query = flakes_query.filter(Flake.last_hour == True) flakes_query = flakes_query.order(-Flake.count_hour) elif time_range == 'day': flakes_query = flakes_query.filter(Flake.last_day == True) flakes_query = flakes_query.order(-Flake.count_day) elif time_range == 'week': flakes_query = flakes_query.filter(Flake.last_week == True) flakes_query = flakes_query.order(-Flake.count_week) elif time_range == 'month': flakes_query = flakes_query.filter(Flake.last_month == True) flakes_query = flakes_query.order(-Flake.count_month) else: flakes_query = flakes_query.order(-Flake.count_all) flakes_query = flakes_query.order(-Flake.last_time_seen) flakes, next_cursor, more = flakes_query.fetch_page(10, start_cursor=cursor) # Filter out occurrences so that we only show ones for the selected time # range. This is less confusing to read, and also less cluttered and renders # faster when not viewing all range. def filter_by_range(t): if time_range == 'hour': return is_last_hour(t) elif time_range == 'day': return is_last_day(t) elif time_range == 'week': return is_last_week(t) elif time_range == 'month': return is_last_month(t) else: return True time_format = '' if time_range == 'hour': time_format = '%H:%M:%S' else: time_format = '%Y-%m-%d %H:%M:%S' def time_formatter(t): return t.strftime(time_format) for f in flakes: f.filtered_occurrences = GetFilteredOccurences( f, time_formatter, filter_by_range) if len(f.occurrences) > MAX_OCCURRENCES_PER_FLAKE_ON_INDEX_PAGE: f.more_occurrences = True return { 'range': time_range, 'flakes': flakes, 'more': more, 'cursor': next_cursor.urlsafe() if next_cursor else '', }
def test_ignores_if_issue_id_is_0(self): last_updated = datetime.datetime.now() Flake(issue_id=0, is_step=True, name='fake_step', issue_last_updated=last_updated).put() self.test_app.get('/migrate') self.assertEqual(len(FlakeType.query().fetch()), 0)
def test_ignores_null_flaky_runs(self): last_updated = datetime.datetime.now() fake_build_key = BuildRun(buildnumber=1, result=1, time_finished=last_updated).put() flake_run_key = FlakyRun(failure_run=fake_build_key, success_run=fake_build_key, failure_run_time_finished=last_updated, flakes=[ FlakeOccurrence(name='fake_step', failure='fake_test_name'), FlakeOccurrence(name='fake_step2', failure='fake_test_name') ]).put() null_flake_run_key = ndb.Key('FlakyRun', 'fake-key') Flake(issue_id=1, is_step=False, name='fake_test_name', issue_last_updated=last_updated, occurrences=[ flake_run_key, null_flake_run_key, ]).put() self.test_app.get('/migrate') flake_types = FlakeType.query().fetch() self.assertEqual(len(flake_types), 2) flake_type_1 = flake_types[0] self.assertEqual(flake_type_1.project, 'chromium') self.assertEqual(flake_type_1.step_name, 'fake_step') self.assertEqual(flake_type_1.test_name, 'fake_test_name') self.assertIsNone(flake_type_1.config) self.assertEqual(flake_type_1.last_updated, last_updated) flake_type_2 = flake_types[1] self.assertEqual(flake_type_2.project, 'chromium') self.assertEqual(flake_type_2.step_name, 'fake_step2') self.assertEqual(flake_type_2.test_name, 'fake_test_name') self.assertIsNone(flake_type_2.config) self.assertEqual(flake_type_2.last_updated, last_updated) issues = Issue.query().fetch() self.assertEqual(len(issues), 1) issue = issues[0] self.assertEqual(issue.issue_id, 1) self.assertEqual(issue.project, 'chromium') self.assertEqual(sorted(issue.flake_type_keys), sorted(flake_type.key for flake_type in flake_types))
def add_failure_to_flake(name, flaky_run_key, failure_time, is_step): flake = Flake.get_by_id(name) if not flake: flake = Flake(name=name, id=name, last_time_seen=datetime.datetime.min, is_step=is_step) flake.put() flake.occurrences.append(flaky_run_key) # TODO(sergiyb): This is necessary to update existing flakes. Remove in July # 2016 or later. flake.is_step = is_step util.add_occurrence_time_to_flake(flake, failure_time) flake.put()
def test_does_not_create_too_many_issues(self): with mock.patch('handlers.flake_issues.MAX_UPDATED_ISSUES_PER_DAY', 5): with mock.patch('handlers.flake_issues.MIN_REQUIRED_FLAKY_RUNS', 2): for _ in range(10): key = self._create_flake().put() response = self.test_app.post('/issues/process/%s' % key.urlsafe()) self.assertEqual(200, response.status_int) issue_ids = [flake.issue_id for flake in Flake.query() if flake.issue_id] self.assertEqual(len(issue_ids), 5) self.assertEqual(len(self.mock_api.issues), 5)
def get(self): search = self.request.get('q') flake = Flake.query().filter(Flake.name == search).get() if flake: self.redirect('/all_flake_occurrences?key=%s' % flake.key.urlsafe()) return # Users might search using full step name. Try normalizing it before # searching. Note that this won't find flakes in a step where # chromium-try-flakes was able to determine which test has failed. Instead, # users should search using the test name. normalized_step_name = normalize_test_type(search) flake = Flake.query().filter(Flake.name == normalized_step_name).get() if flake: self.redirect('/all_flake_occurrences?key=%s' % flake.key.urlsafe()) return self.response.write('No flake entry found for ' + search)
def add_failure_to_flake(name, flaky_run): flake = Flake.get_by_id(name) if not flake: flake = Flake(name=name, id=name, last_time_seen=datetime.datetime.min) flake.put() flake.occurrences.append(flaky_run.key) flaky_run_time = flaky_run.failure_run.get().time_finished add_occurance_time_to_flake(flake, flaky_run_time) flake.put()
def add_failure_to_flake(name, flaky_run_key, failure_time): flake = Flake.get_by_id(name) if not flake: flake = Flake(name=name, id=name, last_time_seen=datetime.datetime.min) flake.put() flake.occurrences.append(flaky_run_key) util.add_occurrence_time_to_flake(flake, failure_time) flake.put()
def test_adds_occurrence_time_to_flake(self): flake = Flake(name='foo.bar', last_time_seen=datetime.datetime.min) now = datetime.datetime.utcnow() util.add_occurrence_time_to_flake(flake, now) self.assertEqual(flake.last_time_seen, now) self.assertEqual(flake.count_hour, 1) self.assertEqual(flake.count_day, 1) self.assertEqual(flake.count_week, 1) self.assertEqual(flake.count_month, 1) self.assertEqual(flake.last_hour, True) self.assertEqual(flake.last_day, True) self.assertEqual(flake.last_week, True) self.assertEqual(flake.last_month, True)
def test_delete_old_flakes(self): # Create old FlakyRuns. now = datetime.datetime.utcnow() old_flakes = [] for i in range(1, 101): # id can not be 0 key = FlakyRun(key=ndb.Key('FlakyRun', i), failure_run_time_finished=now - datetime.timedelta(days=100 + i), failure_run=ndb.Key('BuildRun', 1), success_run=ndb.Key('BuildRun', 2)).put() old_flakes.append(key) # Create new FlakyRuns. new_flakes = [] for i in range(101, 201): key = FlakyRun(key=ndb.Key('FlakyRun', i), failure_run_time_finished=now - datetime.timedelta(hours=100 + i), failure_run=ndb.Key('BuildRun', 1), success_run=ndb.Key('BuildRun', 2)).put() new_flakes.append(key) non_existant_flake = [ndb.Key('FlakyRun', '201')] # Create Flakes. Flake(key=ndb.Key('Flake', 'foo'), name='foo', occurrences=old_flakes + new_flakes, last_time_seen=now).put() Flake(key=ndb.Key('Flake', 'bar'), name='bar', occurrences=old_flakes + new_flakes[:50], last_time_seen=now).put() Flake(key=ndb.Key('Flake', 'baz'), name='baz', occurrences=non_existant_flake + new_flakes, last_time_seen=now).put() path = '/cron/delete_old_flake_occurrences' response = self.test_app.get(path, headers={'X-AppEngine-Cron': 'true'}) self.assertEqual(200, response.status_int) # Removed old flakes. self.assertEqual(set(Flake.get_by_id('foo').occurrences), set(new_flakes)) # Kept old flakes since there are just 50 new flakes. self.assertEqual(len(Flake.get_by_id('bar').occurrences), 150) # Make sure that non existant flake got removed. self.assertNotIn(non_existant_flake, Flake.get_by_id('bar').occurrences) # Make sure that we do not delete any FlakyRun entities. self.assertEqual(FlakyRun.query().count(limit=300), 200)
def test_does_not_modify_flake(self): little_time_ago = datetime.datetime.utcnow() - datetime.timedelta( hours=2) flake = Flake(name='foo.bar', last_time_seen=little_time_ago) long_time_ago = datetime.datetime.utcnow() - datetime.timedelta( days=60) util.add_occurrence_time_to_flake(flake, long_time_ago) self.assertEqual(flake.last_time_seen, little_time_ago) self.assertEqual(flake.count_hour, 0) self.assertEqual(flake.count_day, 0) self.assertEqual(flake.count_week, 0) self.assertEqual(flake.count_month, 0) self.assertEqual(flake.last_hour, False) self.assertEqual(flake.last_day, False) self.assertEqual(flake.last_week, False) self.assertEqual(flake.last_month, False)
def delete_old_flake_occurrences(): """Delete old and invalid flake occurrences from flakes. Old occurrences (FlakyRuns) are those which have finished more than 3 months ago. Invalid ones are those that that do not exist in datastore, but are listed in Flake.occurrences field. We do not remove any occurrences from a given flake unless there is there at least 100 recent valid occurrences in the occurrences field. """ old_occ_cutoff = datetime.datetime.utcnow() - datetime.timedelta(days=90) for flake in Flake.query(Flake.last_time_seen > old_occ_cutoff): flaky_runs = ndb.get_multi(flake.occurrences) occurrence_map = {} map(occurrence_map.__setitem__, flake.occurrences, flaky_runs) # Sort flake occurrences. new_occurrences = [] old_occurrences = [] invalid_occurrences = [] for key, flaky_run in occurrence_map.iteritems(): if not flaky_run: invalid_occurrences.append(key) elif flaky_run.failure_run_time_finished < old_occ_cutoff: old_occurrences.append(key) else: new_occurrences.append(key) # We use a nested function with transaction enabled to make sure that we do # not loose any new flakes being added to the Flake at the same time. @ndb.transactional def remove_occurrences(flake_key, occurrences_to_remove): flake = flake_key.get() flake.occurrences = [ occ for occ in flake.occurrences if occ not in occurrences_to_remove ] flake.put() occurrences_to_remove = old_occurrences + invalid_occurrences if len(new_occurrences) >= 100 and occurrences_to_remove: remove_occurrences(flake.key, occurrences_to_remove) logging.info( 'Removed %d old and %d invalid occurrences from flake %s', len(old_occurrences), len(invalid_occurrences), flake.name)
def _create_flake(): tf = datetime.datetime.utcnow() ts = tf - datetime.timedelta(hours=1) p = PatchsetBuilderRuns(issue=1, patchset=1, master='tryserver.bar', builder='baz').put() br_f0 = BuildRun(parent=p, buildnumber=10, result=2, time_started=ts, time_finished=tf).put() br_f1 = BuildRun(parent=p, buildnumber=20, result=2, time_started=ts, time_finished=tf).put() br_s0 = BuildRun(parent=p, buildnumber=30, result=0, time_started=ts, time_finished=tf).put() occ1 = FlakyRun(failure_run=br_f0, success_run=br_s0, failure_run_time_started=ts, failure_run_time_finished=tf, flakes=[ FlakeOccurrence(name='step1', failure='testX'), ]) occ2 = FlakyRun(failure_run=br_f1, success_run=br_s0, failure_run_time_started=ts, failure_run_time_finished=tf, flakes=[ FlakeOccurrence(name='step2', failure='testX'), FlakeOccurrence(name='step3', failure='step3'), ]) f = Flake(name='testX', count_day=10, occurrences=[occ1.put(), occ2.put()], is_step=True, issue_id=123456) return f, [occ1, occ2]
def test_create_tasks_to_update_issue_tracker(self): Flake(name='foo1', count_day=1).put() key2 = Flake(name='foo2', count_day=3).put() key3 = Flake(name='foo3', count_day=5).put() Flake(name='foo4', count_day=2).put() key5 = Flake(name='foo5', count_day=200).put() path = '/cron/update_issue_tracker' response = self.test_app.get(path, headers={'X-AppEngine-Cron': 'true'}) self.assertEqual(200, response.status_int) tasks = self.taskqueue_stub.get_filtered_tasks(queue_names='issue-updates') self.assertEqual(len(tasks), 3) self.assertEqual(tasks[0].url, '/issues/process/%s' % key2.urlsafe()) self.assertEqual(tasks[1].url, '/issues/process/%s' % key3.urlsafe()) self.assertEqual(tasks[2].url, '/issues/process/%s' % key5.urlsafe())
def is_duplicate_occurrence(flake_id, flaky_run): """Returns true if the given flaky run has already been reported.""" flake = Flake.get_by_id(flake_id) if not flake: return False # Get the changelist/patchset. patchset_builder_runs = flaky_run.failure_run.parent().get() changelist_issue = patchset_builder_runs.issue builder = patchset_builder_runs.builder # Compare the changelist/patchset for uniqueness. for occurrence in ndb.get_multi(flake.occurrences): # Skip null occurrences or occurrences without a failure run. if not occurrence or not occurrence.failure_run: # pragma: no cover continue n_patchset_builder_runs = occurrence.failure_run.parent().get() if (n_patchset_builder_runs.issue == changelist_issue and n_patchset_builder_runs.builder == builder): return True return False
def test_create_tasks_to_update_stale_issues(self): Flake(name='foo1', issue_id=123).put() Flake(name='foo2', issue_id=234).put() Flake(name='foo3', issue_id=234).put() Flake(name='foo4', issue_id=345).put() Flake(name='foo5', issue_id=345).put() Flake(name='foo6', issue_id=234).put() path = '/cron/update_stale_issues' response = self.test_app.get(path, headers={'X-AppEngine-Cron': 'true'}) self.assertEqual(200, response.status_int) tasks = self.taskqueue_stub.get_filtered_tasks(queue_names='issue-updates') self.assertEqual(len(tasks), 3) self.assertEqual(tasks[0].url, '/issues/update-if-stale/123') self.assertEqual(tasks[1].url, '/issues/update-if-stale/234') self.assertEqual(tasks[2].url, '/issues/update-if-stale/345')
def _create_flake(self): tf = datetime.datetime(2016, 8, 6, 10, 20, 30) ts = tf - datetime.timedelta(hours=1) tf2 = tf - datetime.timedelta(days=5) ts2 = tf2 - datetime.timedelta(hours=1) p = PatchsetBuilderRuns(issue=123456, patchset=1, master='tryserver.test', builder='test-builder').put() br_f0 = BuildRun(parent=p, buildnumber=0, result=2, time_started=ts2, time_finished=tf2).put() br_f1 = BuildRun(parent=p, buildnumber=1, result=2, time_started=ts, time_finished=tf).put() br_s1 = BuildRun(parent=p, buildnumber=2, result=0, time_started=ts, time_finished=tf).put() br_f2 = BuildRun(parent=p, buildnumber=3, result=4, time_started=ts, time_finished=tf).put() br_s2 = BuildRun(parent=p, buildnumber=4, result=0, time_started=ts, time_finished=tf).put() occ_key1 = FlakyRun(failure_run=br_f0, success_run=br_s2, flakes=[ FlakeOccurrence(name='foo (x)', failure='foo.bar'), FlakeOccurrence(name='foo (x)', failure='other')], failure_run_time_started=ts2, failure_run_time_finished=tf2).put() occ_key2 = FlakyRun(failure_run=br_f1, success_run=br_s1, flakes=[ FlakeOccurrence(name='bar (y)', failure='foo.bar')], failure_run_time_started=ts, failure_run_time_finished=tf).put() occ_key3 = FlakyRun(failure_run=br_f2, success_run=br_s2, flakes=[ FlakeOccurrence( name='foo (x)', failure='foo.bar', issue_id=100), FlakeOccurrence( name='bar (y)', failure='foo.bar', issue_id=200)], failure_run_time_started=ts, failure_run_time_finished=tf).put() return Flake(name='foo.bar', count_day=10, is_step=False, occurrences=[occ_key1, occ_key2, occ_key3])
def _create_flakes(ts, tf, ts2, tf2): p = PatchsetBuilderRuns(issue=123456, patchset=1, master='tryserver.test', builder='test-builder').put() br_f0 = BuildRun(parent=p, buildnumber=0, result=2, time_started=ts2, time_finished=tf2).put() br_f1 = BuildRun(parent=p, buildnumber=1, result=2, time_started=ts, time_finished=tf).put() br_s1 = BuildRun(parent=p, buildnumber=2, result=0, time_started=ts, time_finished=tf).put() br_f2 = BuildRun(parent=p, buildnumber=3, result=4, time_started=ts, time_finished=tf2).put() br_s2 = BuildRun(parent=p, buildnumber=4, result=0, time_started=ts, time_finished=tf2).put() occ_key1 = FlakyRun(failure_run=br_f0, success_run=br_s2, failure_run_time_started=ts2, failure_run_time_finished=tf2).put() occ_key2 = FlakyRun(failure_run=br_f1, success_run=br_s1, failure_run_time_started=ts, failure_run_time_finished=tf).put() occ_key3 = FlakyRun(failure_run=br_f2, success_run=br_s2, failure_run_time_started=ts, failure_run_time_finished=tf).put() Flake(name='foo', last_hour=True, last_day=True, last_week=True, last_month=True).put() Flake(name='bar', last_hour=True, last_day=True, last_week=True, last_month=True, occurrences=[occ_key1, occ_key2]).put() Flake(name='baz', last_hour=True, last_day=True, last_week=True, last_month=True, occurrences=[occ_key3]).put() Flake(name='zee', last_hour=False, last_day=False, last_week=True, last_month=False).put()
def update_flake_hour_counter(): # pragma: no cover query = Flake.query().filter(Flake.last_hour == True) for flake in query: update_flake_counters(flake)
def update_stale_issues(): for flake in Flake.query(Flake.issue_id > 0, projection=[Flake.issue_id], distinct=True): taskqueue.add(queue_name='issue-updates', url='/issues/update-if-stale/%s' % flake.issue_id)
def test_get_flaky_run_reason(self): now = datetime.datetime.utcnow() br_f, br_s = self._create_build_runs(now - datetime.timedelta(hours=1), now) urlfetch_mock = mock.Mock(side_effect = [ # JSON results for the build. mock.Mock(status_code=200, content=TEST_BUILDBOT_JSON_REPLY), # JSON results for step "foo1". mock.Mock(status_code=200, content=TEST_TEST_RESULTS_REPLY), # For step "foo8 xx (with patch)", something failed while parsing JSON, # step text ("bar13") should be reported as flake. Exception(), ]) # We also create one Flake to test that it is correctly updated. Other Flake # entities will be created automatically. Flake(id='foo2', name='foo2', occurrences=[], last_time_seen=datetime.datetime.min).put() with mock.patch('google.appengine.api.urlfetch.fetch', urlfetch_mock): self.test_app.post('/issues/create_flaky_run', {'failure_run_key': br_f.urlsafe(), 'success_run_key': br_s.urlsafe()}) flaky_runs = FlakyRun.query().fetch(100) self.assertEqual(len(flaky_runs), 1) flaky_run = flaky_runs[0] self.assertEqual(flaky_run.failure_run, br_f) self.assertEqual(flaky_run.success_run, br_s) self.assertEqual(flaky_run.failure_run_time_finished, now) self.assertEqual(flaky_run.failure_run_time_started, now - datetime.timedelta(hours=1)) urlfetch_mock.assert_has_calls([ # Verify that we've used correct URL to access buildbot JSON endpoint. mock.call( 'http://build.chromium.org/p/test.master/json/builders/test-builder/' 'builds/100'), # Verify that we've used correct URLs to retrieve test-results GTest JSON. mock.call( 'http://test-results.appspot.com/testfile?builder=test-builder&' 'name=full_results.json&master=test.master&testtype=foo1&' 'buildnumber=100'), mock.call( 'http://test-results.appspot.com/testfile?builder=test-builder&' 'name=full_results.json&master=test.master&' 'testtype=foo8%20%28with%20patch%29&buildnumber=100')]) # Expected flakes to be found: list of (step_name, test_name). expected_flakes = [ ('foo1', 'test2.a'), ('foo1', 'test2.d'), ('foo2', 'foo2'), ('foo8 xx (with patch)', 'foo8 (with patch)'), ] flake_occurrences = flaky_run.flakes self.assertEqual(len(flake_occurrences), len(expected_flakes)) actual_flake_occurrences = [ (fo.name, fo.failure) for fo in flake_occurrences] self.assertEqual(expected_flakes, actual_flake_occurrences) # We compare sets below, because order of flakes returned by datastore # doesn't have to be same as steps above. flakes = Flake.query().fetch() self.assertEqual(len(flakes), len(expected_flakes)) expected_flake_names = set([ef[1] for ef in expected_flakes]) actual_flake_names = set([f.name for f in flakes]) self.assertEqual(expected_flake_names, actual_flake_names) for flake in flakes: self.assertEqual(flake.occurrences, [flaky_run.key])
def get(self): time_range = self.request.get('range', default_value='day') cursor = Cursor(urlsafe=self.request.get('cursor')) flakes_query = Flake.query() if time_range == 'hour': flakes_query = flakes_query.filter(Flake.last_hour == True) flakes_query = flakes_query.order(-Flake.count_hour) elif time_range == 'day': flakes_query = flakes_query.filter(Flake.last_day == True) flakes_query = flakes_query.order(-Flake.count_day) elif time_range == 'week': flakes_query = flakes_query.filter(Flake.last_week == True) flakes_query = flakes_query.order(-Flake.count_week) elif time_range == 'month': flakes_query = flakes_query.filter(Flake.last_month == True) flakes_query = flakes_query.order(-Flake.count_month) else: flakes_query = flakes_query.order(-Flake.count_all) flakes_query = flakes_query.order(-Flake.last_time_seen) flakes, next_cursor, more = flakes_query.fetch_page(10, start_cursor=cursor) # Filter out occurrences so that we only show ones for the selected time # range. This is less confusing to read, and also less cluttered and renders # faster when not viewing all range. for f in flakes: # get_multi is much faster than calling .get for each f.occurrences occurrences = ndb.get_multi(f.occurrences) failure_run_keys = [] patchsets_keys = [] for o in occurrences: failure_run_keys.append(o.failure_run) patchsets_keys.append(o.failure_run.parent()) failure_runs = ndb.get_multi(failure_run_keys) patchsets = ndb.get_multi(patchsets_keys) f.filtered_occurrences = [] # tryserver pages show PST time so do so here as well for easy comparison. pst_timezone = pytz.timezone("US/Pacific") for index, r in enumerate(failure_runs): if (time_range == 'hour' and is_last_hour(r.time_finished)) or \ (time_range == 'day' and is_last_day(r.time_finished)) or \ (time_range == 'week' and is_last_week(r.time_finished)) or \ (time_range == 'month' and is_last_month(r.time_finished)) or \ time_range == 'all': r.patchset_url = patchsets[index].getURL() r.builder = patchsets[index].builder time_format = '' if time_range == 'hour': time_format = '%I:%M %p' elif (time_range == 'day' or time_range == 'week' or time_range == 'month'): time_format = '%m/%d %I:%M %p' else: time_format = '%m/%d/%y %I:%M %p' r.formatted_time = r.time_finished.replace(tzinfo=pytz.utc). \ astimezone(pst_timezone).strftime(time_format) f.filtered_occurrences.append(r) # Do simple sorting of occurances by builder to make reading easier. f.filtered_occurrences = sorted(f.filtered_occurrences, key=FlakeSortFunction) values = { 'range': time_range, 'flakes': flakes, 'more': more, 'cursor': next_cursor.urlsafe() if next_cursor else '', } self.response.write(template.render('templates/index.html', values))
def _remove_issue_from_flakes(self, issue_id): for flake in Flake.query(Flake.issue_id == issue_id): logging.info('Removing issue_id %s from %s', issue_id, flake.key) flake.old_issue_id = issue_id flake.issue_id = 0 flake.put()
def _update_all_flakes_with_new_issue_id(self, old_issue_id, new_issue_id): for flake in Flake.query(Flake.issue_id == old_issue_id): logging.info( 'Updating issue_id from %s to %s', old_issue_id, new_issue_id) flake.issue_id = new_issue_id flake.put()
def put_flake(self, attr, count): flake = Flake(name='foo') setattr(flake, 'last_%s' % attr, True) setattr(flake, 'count_%s' % attr, count) flake.put()
def test_no_occurrences(self): flake_key = Flake(name='foo.bar').put() self.test_app.get('/all_flake_occurrences?key=%s' % flake_key.urlsafe())
def update_flake_week_counter(): query = Flake.query().filter(Flake.last_week == True, Flake.last_day == False, Flake.last_hour == False) for flake in query: update_flake_counters(flake)
def get(self): time_range = self.request.get('range', default_value='day') cursor = Cursor(urlsafe=self.request.get('cursor')) flakes_query = Flake.query() if time_range == 'hour': flakes_query = flakes_query.filter(Flake.last_hour == True) flakes_query = flakes_query.order(-Flake.count_hour) elif time_range == 'day': flakes_query = flakes_query.filter(Flake.last_day == True) flakes_query = flakes_query.order(-Flake.count_day) elif time_range == 'week': flakes_query = flakes_query.filter(Flake.last_week == True) flakes_query = flakes_query.order(-Flake.count_week) elif time_range == 'month': flakes_query = flakes_query.filter(Flake.last_month == True) flakes_query = flakes_query.order(-Flake.count_month) else: flakes_query = flakes_query.order(-Flake.count_all) flakes_query = flakes_query.order(-Flake.last_time_seen) flakes, next_cursor, more = flakes_query.fetch_page( 10, start_cursor=cursor) # Filter out occurrences so that we only show ones for the selected time # range. This is less confusing to read, and also less cluttered and renders # faster when not viewing all range. for f in flakes: # get_multi is much faster than calling .get for each f.occurrences occurrences = ndb.get_multi(f.occurrences) failure_run_keys = [] patchsets_keys = [] for o in occurrences: failure_run_keys.append(o.failure_run) patchsets_keys.append(o.failure_run.parent()) failure_runs = ndb.get_multi(failure_run_keys) patchsets = ndb.get_multi(patchsets_keys) f.filtered_occurrences = [] # tryserver pages show PST time so do so here as well for easy comparison. pst_timezone = pytz.timezone("US/Pacific") for index, r in enumerate(failure_runs): if (time_range == 'hour' and is_last_hour(r.time_finished)) or \ (time_range == 'day' and is_last_day(r.time_finished)) or \ (time_range == 'week' and is_last_week(r.time_finished)) or \ (time_range == 'month' and is_last_month(r.time_finished)) or \ time_range == 'all': r.patchset_url = patchsets[index].getURL() r.builder = patchsets[index].builder time_format = '' if time_range == 'hour': time_format = '%I:%M %p' elif (time_range == 'day' or time_range == 'week' or time_range == 'month'): time_format = '%m/%d %I:%M %p' else: time_format = '%m/%d/%y %I:%M %p' r.formatted_time = r.time_finished.replace(tzinfo=pytz.utc). \ astimezone(pst_timezone).strftime(time_format) f.filtered_occurrences.append(r) # Do simple sorting of occurances by builder to make reading easier. f.filtered_occurrences = sorted(f.filtered_occurrences, key=FlakeSortFunction) values = { 'range': time_range, 'flakes': flakes, 'more': more, 'cursor': next_cursor.urlsafe() if next_cursor else '', } self.response.write(template.render('templates/index.html', values))