Esempio n. 1
0
def ParseGroupingLabelChartNameAndTraceName(test_path):
    """Returns grouping_label, chart_name, trace_name from a test path."""
    test_path_parts = test_path.split('/')
    suite = test_path_parts[2]
    if suite in _NON_CHROME_TARGETS:
        return '', '', ''

    test = ndb.Key('TestMetadata', '/'.join(test_path_parts)).get()
    grouping_label, chart_name, trace_name = utils.ParseTelemetryMetricParts(
        test_path)
    if trace_name and test.unescaped_story_name:
        trace_name = test.unescaped_story_name
    return grouping_label, chart_name, trace_name
Esempio n. 2
0
 def testParseTelemetryMetricParts_3Part(self):
     self.assertEqual(('TIRLabel', 'Measurement', 'Story'),
                      utils.ParseTelemetryMetricParts(
                          'M/B/Suite/Measurement/TIRLabel/Story'))
Esempio n. 3
0
 def testParseTelemetryMetricParts_1Part(self):
     self.assertEqual(
         ('', 'Measurement', ''),
         utils.ParseTelemetryMetricParts('M/B/Suite/Measurement'))
Esempio n. 4
0
 def testParseTelemetryMetricParts_TooLong(self):
     with self.assertRaises(utils.ParseTelemetryMetricFailed):
         utils.ParseTelemetryMetricParts('M/B/S/1/2/3/4')
Esempio n. 5
0
 def testParseTelemetryMetricParts_TooShort(self):
     with self.assertRaises(utils.ParseTelemetryMetricFailed):
         utils.ParseTelemetryMetricParts('M/B/S')
Esempio n. 6
0
def _UpdateDescriptor(test_suite,
                      namespace,
                      start_cursor=None,
                      measurements=(),
                      bots=(),
                      cases=(),
                      case_tags=None):
    logging.info('%s %s %d %d %d', test_suite, namespace, len(measurements),
                 len(bots), len(cases))
    # This function always runs in the taskqueue as an anonymous user.
    if namespace == datastore_hooks.INTERNAL:
        datastore_hooks.SetPrivilegedRequest()

    measurements = set(measurements)
    bots = set(bots)
    cases = set(cases)
    case_tags = case_tags or {}
    tags_futures = []

    tests, next_cursor, more = _QueryTestSuite(test_suite).fetch_page(
        TESTS_TO_FETCH,
        start_cursor=start_cursor,
        use_cache=False,
        use_memcache=False)

    for test in tests:
        bots.add(test.bot_name)

        try:
            _, measurement, story = utils.ParseTelemetryMetricParts(
                test.test_path)
        except utils.ParseTelemetryMetricFailed as e:
            # Log the error and process the rest of the test suite.
            logging.error('Parsing error encounted: %s', e)
            continue

        if test.unescaped_story_name:
            story = test.unescaped_story_name

        if measurement:
            measurements.add(measurement)

        if story and story not in cases:
            cases.add(story)
            tags_futures.append(_QueryCaseTags(test.test_path, story))

    _CollectCaseTags(tags_futures, case_tags)

    logging.info('%d keys, %d measurements, %d bots, %d cases, %d tags',
                 len(tests), len(measurements), len(bots), len(cases),
                 len(case_tags))

    if more:
        logging.info('continuing')
        deferred.defer(_UpdateDescriptor, test_suite, namespace, next_cursor,
                       measurements, bots, cases, case_tags)
        return

    desc = {
        'measurements': list(sorted(measurements)),
        'bots': list(sorted(bots)),
        'cases': list(sorted(cases)),
        'caseTags':
        {tag: sorted(cases)
         for tag, cases in list(case_tags.items())}
    }

    key = namespaced_stored_object.NamespaceKey(CacheKey(test_suite),
                                                namespace)
    stored_object.Set(key, desc)
Esempio n. 7
0
def MakeBisectionRequest(test,
                         commit_range,
                         issue,
                         comparison_mode,
                         target,
                         comparison_magnitude=None,
                         user=None,
                         name=None,
                         story_filter=None,
                         priority=0,
                         pin=None,
                         tags=None):
    """Generate a valid pinpoint bisection request.

  Args:
    test: TestMetadata entiry
    commit_range: CommitRange including git hash start and end
    issue: Related issue id
    comparison_mode: performance or functional
    target: Isolation target
    comparison_magnitude: Magnitude used in bisection
    user: User email triggered the request
    name: Pinpoint job name
    story_filter: Test story
    priority: Job priority
    pin: Pin a base version
    tags: Extra tags

  Returns:
    Pinpoint request to start a new bisection job
  """

    story = story_filter or test.unescaped_story_name

    grouping_label = ''
    chart = ''
    trace = ''
    if comparison_mode == 'performance' and test.suite_name not in ['v8']:
        grouping_label, chart, trace = utils.ParseTelemetryMetricParts(
            test.test_path)
        if trace and test.unescaped_story_name:
            trace = test.unescaped_story_name
    chart, statistic = utils.ParseStatisticNameFromChart(chart)

    pinpoint_params = {
        'configuration': test.bot_name,
        'benchmark': test.suite_name,
        'chart': chart,
        'start_git_hash': commit_range.start,
        'end_git_hash': commit_range.end,
        'comparison_mode': comparison_mode,
        'target': target,
        'priority': priority,
        'tags': json.dumps(tags or {}),
    }

    pinpoint_params.update({
        k: v
        for k, v in [
            ('user', user),
            ('name', name),
            ('bug_id', issue and issue.issue_id),
            ('project', issue and issue.project_id),
            ('comparison_magnitude', comparison_magnitude),
            ('pin', pin),
            ('statistic', statistic),
            ('story', story),
            ('grouping_label', grouping_label),
            ('trace', trace),
        ] if v
    })

    return pinpoint_params