Esempio n. 1
0
def update_histograms():
    update_histogram(Flake.query().filter(Flake.last_day == True), 'count_day',
                     occurrences_per_flake_day)
    update_histogram(Flake.query().filter(Flake.last_week == True),
                     'count_week', occurrences_per_flake_week)
    update_histogram(Flake.query().filter(Flake.last_month == True),
                     'count_month', occurrences_per_flake_month)
Esempio n. 2
0
def update_histograms():
  update_histogram(Flake.query().filter(Flake.last_day == True),
                   'count_day', occurrences_per_flake_day)
  update_histogram(Flake.query().filter(Flake.last_week == True),
                   'count_week', occurrences_per_flake_week)
  update_histogram(Flake.query().filter(Flake.last_month == True),
                   'count_month', occurrences_per_flake_month)
Esempio n. 3
0
    def get(self):
        if Issue.query().count(1) > 0 or FlakeType.query().count(1) > 0:
            self.response.out.write(
                'Found Issue or FlakeType entities in datastore. '
                'Please remove them before trying to migrate '
                'data again.')
            self.response.set_status(400)
            return

        flakes = Flake.query(Flake.issue_id > 0).fetch()
        flake_types_by_issue = collections.defaultdict(list)
        for flake_number, flake in enumerate(flakes, 1):
            flake_types_by_issue[flake.issue_id].extend(
                _get_flake_types_from_flake(flake))
            if flake_number % 500 == 0:  # pragma: no cover
                logging.info('Processed %d flakes so far.' % flake_number)

        logging.info('Done processing FlakeTypes. Starting to process Issues')

        for issue_id, flake_type_keys in flake_types_by_issue.iteritems():
            # We might have found the same flake_type more than once.
            flake_type_keys = list(set(flake_type_keys))
            Issue(project='chromium',
                  issue_id=issue_id,
                  flake_type_keys=flake_type_keys).put()

        logging.info('Done processing Issues. Migration completed.')
Esempio n. 4
0
def update_flake_month_counter():  # pragma: no cover
    query = Flake.query().filter(Flake.last_month == True,
                                 Flake.last_week == False,
                                 Flake.last_day == False,
                                 Flake.last_hour == False)
    for flake in query:
        update_flake_counters(flake)
Esempio n. 5
0
def update_flake_month_counter():  # pragma: no cover
  query = Flake.query().filter(Flake.last_month == True,
                               Flake.last_week == False,
                               Flake.last_day == False,
                               Flake.last_hour == False)
  for flake in query:
    update_flake_counters(flake)
Esempio n. 6
0
def update_issue_tracker():
  """File/update issues for flakes on issue_tracker."""
  # Only process flakes that happened at least MIN_REQUIRED_FLAKY_RUNS times in
  # the last 24 hours.
  for flake in Flake.query(Flake.count_day >= MIN_REQUIRED_FLAKY_RUNS,
                           projection=[Flake.count_day]):
    taskqueue.add(queue_name='issue-updates',
                  url='/issues/process/%s' % flake.key.urlsafe())
Esempio n. 7
0
    def get(self):
        search = self.request.get('q')

        flake = Flake.query().filter(Flake.name == search).get()
        if not flake:
            self.response.write('No flake entry found for ' + search)
            return

        self.response.write(show_all_flakes(flake, 0))
Esempio n. 8
0
def update_issue_tracker():
    """File/update issues for flakes on issue_tracker."""
    # Only process flakes that happened at least MIN_REQUIRED_FLAKY_RUNS times in
    # the last 24 hours.
    for flake in Flake.query(Flake.count_day >= MIN_REQUIRED_FLAKY_RUNS,
                             projection=[Flake.count_day]):
        logging.info('Created processing task for %s' % flake.key)
        taskqueue.add(queue_name='issue-updates',
                      url='/issues/process/%s' % flake.key.urlsafe())
Esempio n. 9
0
  def get(self):
    search = self.request.get('q')

    flake = Flake.query().filter(Flake.name == search).get()
    if not flake:
      self.response.write('No flake entry found for ' + search)
      return

    self.response.write(show_all_flakes(flake, 0))
Esempio n. 10
0
  def index(time_range, cursor=None):
    flakes_query = Flake.query()
    if time_range == 'hour':
      flakes_query = flakes_query.filter(Flake.last_hour == True)
      flakes_query = flakes_query.order(-Flake.count_hour)
    elif time_range == 'day':
      flakes_query = flakes_query.filter(Flake.last_day == True)
      flakes_query = flakes_query.order(-Flake.count_day)
    elif time_range == 'week':
      flakes_query = flakes_query.filter(Flake.last_week == True)
      flakes_query = flakes_query.order(-Flake.count_week)
    elif time_range == 'month':
      flakes_query = flakes_query.filter(Flake.last_month == True)
      flakes_query = flakes_query.order(-Flake.count_month)
    else:
      flakes_query = flakes_query.order(-Flake.count_all)

    flakes_query = flakes_query.order(-Flake.last_time_seen)
    flakes, next_cursor, more = flakes_query.fetch_page(10, start_cursor=cursor)

    # Filter out occurrences so that we only show ones for the selected time
    # range. This is less confusing to read, and also less cluttered and renders
    # faster when not viewing all range.
    def filter_by_range(t):
      if time_range == 'hour':
        return is_last_hour(t)
      elif time_range == 'day':
        return is_last_day(t)
      elif time_range == 'week':
        return is_last_week(t)
      elif time_range == 'month':
        return is_last_month(t)
      else:
        return True

    time_format = ''
    if time_range == 'hour':
      time_format = '%H:%M:%S'
    else:
      time_format = '%Y-%m-%d %H:%M:%S'

    def time_formatter(t):
      return t.strftime(time_format)

    for f in flakes:
      f.filtered_occurrences = GetFilteredOccurences(
          f, time_formatter, filter_by_range)
      if len(f.occurrences) > MAX_OCCURRENCES_PER_FLAKE_ON_INDEX_PAGE:
        f.more_occurrences = True

    return {
      'range': time_range,
      'flakes': flakes,
      'more': more,
      'cursor': next_cursor.urlsafe() if next_cursor else '',
    }
Esempio n. 11
0
  def test_does_not_create_too_many_issues(self):
    with mock.patch('handlers.flake_issues.MAX_UPDATED_ISSUES_PER_DAY', 5):
      with mock.patch('handlers.flake_issues.MIN_REQUIRED_FLAKY_RUNS', 2):
        for _ in range(10):
          key = self._create_flake().put()
          response = self.test_app.post('/issues/process/%s' % key.urlsafe())
          self.assertEqual(200, response.status_int)

    issue_ids = [flake.issue_id for flake in Flake.query() if flake.issue_id]
    self.assertEqual(len(issue_ids), 5)
    self.assertEqual(len(self.mock_api.issues), 5)
Esempio n. 12
0
    def get(self):
        search = self.request.get('q')

        flake = Flake.query().filter(Flake.name == search).get()
        if flake:
            self.redirect('/all_flake_occurrences?key=%s' %
                          flake.key.urlsafe())
            return

        # Users might search using full step name. Try normalizing it before
        # searching. Note that this won't find flakes in a step where
        # chromium-try-flakes was able to determine which test has failed. Instead,
        # users should search using the test name.
        normalized_step_name = normalize_test_type(search)
        flake = Flake.query().filter(Flake.name == normalized_step_name).get()
        if flake:
            self.redirect('/all_flake_occurrences?key=%s' %
                          flake.key.urlsafe())
            return

        self.response.write('No flake entry found for ' + search)
Esempio n. 13
0
def delete_old_flake_occurrences():
    """Delete old and invalid flake occurrences from flakes.

  Old occurrences (FlakyRuns) are those which have finished more than 3 months
  ago. Invalid ones are those that that do not exist in datastore, but are
  listed in Flake.occurrences field. We do not remove any occurrences from a
  given flake unless there is there at least 100 recent valid occurrences in the
  occurrences field.
  """
    old_occ_cutoff = datetime.datetime.utcnow() - datetime.timedelta(days=90)
    for flake in Flake.query(Flake.last_time_seen > old_occ_cutoff):
        flaky_runs = ndb.get_multi(flake.occurrences)
        occurrence_map = {}
        map(occurrence_map.__setitem__, flake.occurrences, flaky_runs)

        # Sort flake occurrences.
        new_occurrences = []
        old_occurrences = []
        invalid_occurrences = []
        for key, flaky_run in occurrence_map.iteritems():
            if not flaky_run:
                invalid_occurrences.append(key)
            elif flaky_run.failure_run_time_finished < old_occ_cutoff:
                old_occurrences.append(key)
            else:
                new_occurrences.append(key)

        # We use a nested function with transaction enabled to make sure that we do
        # not loose any new flakes being added to the Flake at the same time.
        @ndb.transactional
        def remove_occurrences(flake_key, occurrences_to_remove):
            flake = flake_key.get()
            flake.occurrences = [
                occ for occ in flake.occurrences
                if occ not in occurrences_to_remove
            ]
            flake.put()

        occurrences_to_remove = old_occurrences + invalid_occurrences
        if len(new_occurrences) >= 100 and occurrences_to_remove:
            remove_occurrences(flake.key, occurrences_to_remove)
            logging.info(
                'Removed %d old and %d invalid occurrences from flake %s',
                len(old_occurrences), len(invalid_occurrences), flake.name)
Esempio n. 14
0
    def get(self):
        time_range = self.request.get('range', default_value='day')
        cursor = Cursor(urlsafe=self.request.get('cursor'))
        flakes_query = Flake.query()
        if time_range == 'hour':
            flakes_query = flakes_query.filter(Flake.last_hour == True)
            flakes_query = flakes_query.order(-Flake.count_hour)
        elif time_range == 'day':
            flakes_query = flakes_query.filter(Flake.last_day == True)
            flakes_query = flakes_query.order(-Flake.count_day)
        elif time_range == 'week':
            flakes_query = flakes_query.filter(Flake.last_week == True)
            flakes_query = flakes_query.order(-Flake.count_week)
        elif time_range == 'month':
            flakes_query = flakes_query.filter(Flake.last_month == True)
            flakes_query = flakes_query.order(-Flake.count_month)
        else:
            flakes_query = flakes_query.order(-Flake.count_all)

        flakes_query = flakes_query.order(-Flake.last_time_seen)
        flakes, next_cursor, more = flakes_query.fetch_page(
            10, start_cursor=cursor)

        # Filter out occurrences so that we only show ones for the selected time
        # range. This is less confusing to read, and also less cluttered and renders
        # faster when not viewing all range.
        for f in flakes:
            # get_multi is much faster than calling .get for each f.occurrences
            occurrences = ndb.get_multi(f.occurrences)

            failure_run_keys = []
            patchsets_keys = []
            for o in occurrences:
                failure_run_keys.append(o.failure_run)
                patchsets_keys.append(o.failure_run.parent())

            failure_runs = ndb.get_multi(failure_run_keys)
            patchsets = ndb.get_multi(patchsets_keys)

            f.filtered_occurrences = []
            # tryserver pages show PST time so do so here as well for easy comparison.
            pst_timezone = pytz.timezone("US/Pacific")
            for index, r in enumerate(failure_runs):
                if (time_range == 'hour' and is_last_hour(r.time_finished)) or \
                   (time_range == 'day' and is_last_day(r.time_finished)) or \
                   (time_range == 'week' and is_last_week(r.time_finished)) or \
                   (time_range == 'month' and is_last_month(r.time_finished)) or \
                   time_range == 'all':
                    r.patchset_url = patchsets[index].getURL()
                    r.builder = patchsets[index].builder

                    time_format = ''
                    if time_range == 'hour':
                        time_format = '%I:%M %p'
                    elif (time_range == 'day' or time_range == 'week'
                          or time_range == 'month'):
                        time_format = '%m/%d %I:%M %p'
                    else:
                        time_format = '%m/%d/%y %I:%M %p'
                    r.formatted_time = r.time_finished.replace(tzinfo=pytz.utc). \
                        astimezone(pst_timezone).strftime(time_format)
                    f.filtered_occurrences.append(r)

            # Do simple sorting of occurances by builder to make reading easier.
            f.filtered_occurrences = sorted(f.filtered_occurrences,
                                            key=FlakeSortFunction)

        values = {
            'range': time_range,
            'flakes': flakes,
            'more': more,
            'cursor': next_cursor.urlsafe() if next_cursor else '',
        }
        self.response.write(template.render('templates/index.html', values))
Esempio n. 15
0
  def get(self):
    time_range = self.request.get('range', default_value='day')
    cursor = Cursor(urlsafe=self.request.get('cursor'))
    flakes_query = Flake.query()
    if time_range == 'hour':
      flakes_query = flakes_query.filter(Flake.last_hour == True)
      flakes_query = flakes_query.order(-Flake.count_hour)
    elif time_range == 'day':
      flakes_query = flakes_query.filter(Flake.last_day == True)
      flakes_query = flakes_query.order(-Flake.count_day)
    elif time_range == 'week':
      flakes_query = flakes_query.filter(Flake.last_week == True)
      flakes_query = flakes_query.order(-Flake.count_week)
    elif time_range == 'month':
      flakes_query = flakes_query.filter(Flake.last_month == True)
      flakes_query = flakes_query.order(-Flake.count_month)
    else:
      flakes_query = flakes_query.order(-Flake.count_all)

    flakes_query = flakes_query.order(-Flake.last_time_seen)
    flakes, next_cursor, more = flakes_query.fetch_page(10, start_cursor=cursor)

    # Filter out occurrences so that we only show ones for the selected time
    # range. This is less confusing to read, and also less cluttered and renders
    # faster when not viewing all range.
    for f in flakes:
      # get_multi is much faster than calling .get for each f.occurrences
      occurrences = ndb.get_multi(f.occurrences)

      failure_run_keys = []
      patchsets_keys = []
      for o in occurrences:
        failure_run_keys.append(o.failure_run)
        patchsets_keys.append(o.failure_run.parent())

      failure_runs = ndb.get_multi(failure_run_keys)
      patchsets = ndb.get_multi(patchsets_keys)

      f.filtered_occurrences = []
      # tryserver pages show PST time so do so here as well for easy comparison.
      pst_timezone = pytz.timezone("US/Pacific")
      for index, r in enumerate(failure_runs):
        if (time_range == 'hour' and is_last_hour(r.time_finished)) or \
           (time_range == 'day' and is_last_day(r.time_finished)) or \
           (time_range == 'week' and is_last_week(r.time_finished)) or \
           (time_range == 'month' and is_last_month(r.time_finished)) or \
           time_range == 'all':
          r.patchset_url = patchsets[index].getURL()
          r.builder = patchsets[index].builder

          time_format = ''
          if time_range == 'hour':
            time_format = '%I:%M %p'
          elif (time_range == 'day' or time_range == 'week' or
                time_range == 'month'):
            time_format = '%m/%d %I:%M %p'
          else:
            time_format = '%m/%d/%y %I:%M %p'
          r.formatted_time = r.time_finished.replace(tzinfo=pytz.utc). \
              astimezone(pst_timezone).strftime(time_format)
          f.filtered_occurrences.append(r)

      # Do simple sorting of occurances by builder to make reading easier.
      f.filtered_occurrences = sorted(f.filtered_occurrences,
                                      key=FlakeSortFunction)

    values = {
      'range': time_range,
      'flakes': flakes,
      'more': more,
      'cursor': next_cursor.urlsafe() if next_cursor else '',
    }
    self.response.write(template.render('templates/index.html', values))
Esempio n. 16
0
def update_flake_hour_counter():  # pragma: no cover
    query = Flake.query().filter(Flake.last_hour == True)
    for flake in query:
        update_flake_counters(flake)
Esempio n. 17
0
def update_stale_issues():
    for flake in Flake.query(Flake.issue_id > 0,
                             projection=[Flake.issue_id],
                             distinct=True):
        taskqueue.add(queue_name='issue-updates',
                      url='/issues/update-if-stale/%s' % flake.issue_id)
Esempio n. 18
0
 def _remove_issue_from_flakes(self, issue_id):
   for flake in Flake.query(Flake.issue_id == issue_id):
     logging.info('Removing issue_id %s from %s', issue_id, flake.key)
     flake.old_issue_id = issue_id
     flake.issue_id = 0
     flake.put()
Esempio n. 19
0
  def test_get_flaky_run_reason(self):
    now = datetime.datetime.utcnow()
    br_f, br_s = self._create_build_runs(now - datetime.timedelta(hours=1), now)

    urlfetch_mock = mock.Mock(side_effect = [
        # JSON results for the build.
        mock.Mock(status_code=200, content=TEST_BUILDBOT_JSON_REPLY),
        # JSON results for step "foo1".
        mock.Mock(status_code=200, content=TEST_TEST_RESULTS_REPLY),
        # For step "foo8 xx (with patch)", something failed while parsing JSON,
        # step text ("bar13") should be reported as flake.
        Exception(),
    ])

    # We also create one Flake to test that it is correctly updated. Other Flake
    # entities will be created automatically.
    Flake(id='foo2', name='foo2', occurrences=[],
          last_time_seen=datetime.datetime.min).put()

    with mock.patch('google.appengine.api.urlfetch.fetch', urlfetch_mock):
      self.test_app.post('/issues/create_flaky_run',
                         {'failure_run_key': br_f.urlsafe(),
                          'success_run_key': br_s.urlsafe()})

    flaky_runs = FlakyRun.query().fetch(100)
    self.assertEqual(len(flaky_runs), 1)
    flaky_run = flaky_runs[0]
    self.assertEqual(flaky_run.failure_run, br_f)
    self.assertEqual(flaky_run.success_run, br_s)
    self.assertEqual(flaky_run.failure_run_time_finished, now)
    self.assertEqual(flaky_run.failure_run_time_started,
                     now - datetime.timedelta(hours=1))

    urlfetch_mock.assert_has_calls([
      # Verify that we've used correct URL to access buildbot JSON endpoint.
      mock.call(
        'http://build.chromium.org/p/test.master/json/builders/test-builder/'
        'builds/100'),
      # Verify that we've used correct URLs to retrieve test-results GTest JSON.
      mock.call(
        'http://test-results.appspot.com/testfile?builder=test-builder&'
        'name=full_results.json&master=test.master&testtype=foo1&'
        'buildnumber=100'),
      mock.call(
        'http://test-results.appspot.com/testfile?builder=test-builder&'
        'name=full_results.json&master=test.master&'
        'testtype=foo8%20%28with%20patch%29&buildnumber=100')])

    # Expected flakes to be found: list of (step_name, test_name).
    expected_flakes = [
        ('foo1', 'test2.a'),
        ('foo1', 'test2.d'),
        ('foo2', 'foo2'),
        ('foo8 xx (with patch)', 'foo8 (with patch)'),
    ]

    flake_occurrences = flaky_run.flakes
    self.assertEqual(len(flake_occurrences), len(expected_flakes))
    actual_flake_occurrences = [
        (fo.name, fo.failure) for fo in flake_occurrences]
    self.assertEqual(expected_flakes, actual_flake_occurrences)

    # We compare sets below, because order of flakes returned by datastore
    # doesn't have to be same as steps above.
    flakes = Flake.query().fetch()
    self.assertEqual(len(flakes), len(expected_flakes))
    expected_flake_names = set([ef[1] for ef in expected_flakes])
    actual_flake_names = set([f.name for f in flakes])
    self.assertEqual(expected_flake_names, actual_flake_names)

    for flake in flakes:
      self.assertEqual(flake.occurrences, [flaky_run.key])
Esempio n. 20
0
 def _update_all_flakes_with_new_issue_id(self, old_issue_id, new_issue_id):
   for flake in Flake.query(Flake.issue_id == old_issue_id):
     logging.info(
         'Updating issue_id from %s to %s', old_issue_id, new_issue_id)
     flake.issue_id = new_issue_id
     flake.put()
Esempio n. 21
0
 def _remove_issue_from_flakes(self, issue_id):
   for flake in Flake.query(Flake.issue_id == issue_id):
     logging.info('Removing issue_id %s from %s', issue_id, flake.key)
     flake.old_issue_id = issue_id
     flake.issue_id = 0
     flake.put()
Esempio n. 22
0
def update_flake_hour_counter():  # pragma: no cover
  query = Flake.query().filter(Flake.last_hour == True)
  for flake in query:
    update_flake_counters(flake)
Esempio n. 23
0
def update_stale_issues():
  for flake in Flake.query(Flake.issue_id > 0, projection=[Flake.issue_id],
                           distinct=True):
    taskqueue.add(queue_name='issue-updates',
                  url='/issues/update-if-stale/%s' % flake.issue_id)
Esempio n. 24
0
def update_flake_week_counter():
  query = Flake.query().filter(Flake.last_week == True,
                               Flake.last_day == False,
                               Flake.last_hour == False)
  for flake in query:
    update_flake_counters(flake)