Ejemplo n.º 1
0
def post_collection(project, th_collection, status=None, expect_errors=False, consumer_key=None, consumer_secret=None):

    # Set the credentials
    OAuthCredentials.set_credentials(SampleData.get_credentials())

    credentials = OAuthCredentials.get_credentials(project)

    # The only time the credentials should be overridden are when
    # a client needs to test authentication failure confirmation
    if consumer_key:
        credentials["consumer_key"] = consumer_key

    if consumer_secret:
        credentials["consumer_secret"] = consumer_secret

    tr = TreeherderRequest(
        protocol="http",
        host="localhost",
        project=project,
        oauth_key=credentials["consumer_key"],
        oauth_secret=credentials["consumer_secret"],
    )

    signed_uri = tr.oauth_client.get_signed_uri(
        th_collection.to_json(), tr.get_uri(th_collection.endpoint_base), "POST"
    )

    response = TestApp(application).post_json(
        str(signed_uri), params=th_collection.get_collection_data(), status=status
    )

    return response
Ejemplo n.º 2
0
    def _post_json_data(url, data):

        th_collection = data[jm.project]

        OAuthCredentials.set_credentials( SampleData.get_credentials() )
        credentials = OAuthCredentials.get_credentials(jm.project)

        tr = TreeherderRequest(
            protocol='http',
            host='localhost',
            project=jm.project,
            oauth_key=credentials['consumer_key'],
            oauth_secret=credentials['consumer_secret']
            )
        signed_uri = tr.oauth_client.get_signed_uri(
            th_collection.to_json(),
            tr.get_uri(th_collection.endpoint_base),
            "POST"
            )

        response = TestApp(application).post_json(
            str(signed_uri), params=th_collection.get_collection_data()
            )

        response.getcode = lambda: response.status_int
        return response
Ejemplo n.º 3
0
def post_job_data(
        project, uri, data, status=None, expect_errors=False):

    # Since the uri is passed in it's not generated by the
    # treeherder request or collection and is missing the protocol
    # and host. Add those missing elements here.
    uri = 'http://localhost{0}'.format(uri)

    # Set the credentials
    OAuthCredentials.set_credentials(SampleData.get_credentials())

    credentials = OAuthCredentials.get_credentials(project)

    tr = TreeherderRequest(
        protocol='http',
        host='localhost',
        project=project,
        oauth_key=credentials['consumer_key'],
        oauth_secret=credentials['consumer_secret']
    )

    signed_uri = tr.get_signed_uri(
        json.dumps(data), uri
    )

    response = TestApp(application).post_json(
        str(signed_uri), params=data, status=status,
        expect_errors=expect_errors
    )

    return response
Ejemplo n.º 4
0
    def check_json(self, filename, expected_timestamps):
        """Parse JSON produced by http://graphs.mozilla.org/api/test/runs"""
        # Configuration for TalosAnalyzer
        FORE_WINDOW = 12
        BACK_WINDOW = 12
        THRESHOLD = 7
        MACHINE_THRESHOLD = 15
        MACHINE_HISTORY_SIZE = 5

        payload = SampleData.get_perf_data(os.path.join('graphs', filename))
        runs = payload['test_runs']
        data = [
            PerfDatum(r[2],
                      r[3],
                      testrun_id=r[0],
                      machine_id=r[6],
                      testrun_timestamp=r[2],
                      buildid=r[1][1],
                      revision=r[1][2]) for r in runs
        ]

        a = TalosAnalyzer()
        a.addData(data)
        results = a.analyze_t(BACK_WINDOW, FORE_WINDOW, THRESHOLD,
                              MACHINE_THRESHOLD, MACHINE_HISTORY_SIZE)
        regression_timestamps = [
            d.testrun_timestamp for d in results if d.state == 'regression'
        ]
        self.assertEqual(regression_timestamps, expected_timestamps)
Ejemplo n.º 5
0
def post_collection(
        project, th_collection, status=None, expect_errors=False,
        consumer_key=None, consumer_secret=None):

    # Set the credentials
    OAuthCredentials.set_credentials(SampleData.get_credentials())

    credentials = OAuthCredentials.get_credentials(project)

    # The only time the credentials should be overridden are when
    # a client needs to test authentication failure confirmation
    consumer_key = consumer_key or credentials['consumer_key']
    consumer_secret = consumer_secret or credentials['consumer_secret']

    auth = TreeherderAuth(consumer_key, consumer_secret, project)
    client = TreeherderClient(protocol='http', host='localhost', auth=auth)
    uri = client._get_project_uri(project, th_collection.endpoint_base)

    req = Request('POST', uri,
                  json=th_collection.get_collection_data(),
                  auth=auth)
    prepped_request = req.prepare()

    response = TestApp(application).post_json(
        prepped_request.url,
        params=th_collection.get_collection_data(),
        status=status
    )

    return response
Ejemplo n.º 6
0
def post_collection(
        project, th_collection, status=None, expect_errors=False,
        consumer_key=None, consumer_secret=None):

    # Set the credentials
    OAuthCredentials.set_credentials(SampleData.get_credentials())

    credentials = OAuthCredentials.get_credentials(project)

    # The only time the credentials should be overridden are when
    # a client needs to test authentication failure confirmation
    if consumer_key:
        credentials['consumer_key'] = consumer_key

    if consumer_secret:
        credentials['consumer_secret'] = consumer_secret

    cli = TreeherderClient(
        protocol='http',
        host='localhost',
    )

    jsondata = th_collection.to_json()
    signed_uri = cli._get_uri(project, th_collection.endpoint_base,
                              data=jsondata,
                              oauth_key=credentials['consumer_key'],
                              oauth_secret=credentials['consumer_secret'],
                              method='POST')

    response = TestApp(application).post_json(
        str(signed_uri), params=th_collection.get_collection_data(),
        status=status
    )

    return response
Ejemplo n.º 7
0
def post_job_data(project, uri, data, status=None, expect_errors=False):

    # Since the uri is passed in it's not generated by the
    # treeherder request or collection and is missing the protocol
    # and host. Add those missing elements here.
    uri = 'http://localhost{0}'.format(uri)

    # Set the credentials
    OAuthCredentials.set_credentials(SampleData.get_credentials())

    credentials = OAuthCredentials.get_credentials(project)

    tr = TreeherderRequest(protocol='http',
                           host='localhost',
                           project=project,
                           oauth_key=credentials['consumer_key'],
                           oauth_secret=credentials['consumer_secret'])

    signed_uri = tr.get_signed_uri(json.dumps(data), uri)

    response = TestApp(application).post_json(str(signed_uri),
                                              params=data,
                                              status=status,
                                              expect_errors=expect_errors)

    return response
Ejemplo n.º 8
0
def load_exp(filename):
    """
    Load in an expected result json and return as an obj.
    """
    path = SampleData().get_log_path(filename)
    with open(path) as f:
        return json.load(f)
def test_post_talos_artifact(test_project, test_repository, result_set_stored,
                             mock_post_json):
    test_repository.save()

    tjc = client.TreeherderJobCollection()
    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision_hash': result_set_stored[0]['revision_hash'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'project': test_repository.name,
            'option_collection': {'opt': True},
            'artifacts': [{
                'blob': {'talos_data': SampleData.get_minimal_talos_perf_data()},
                'type': 'json',
                'name': 'talos_data',
                'job_guid': job_guid
            }]
        }
    })

    tjc.add(tj)

    post_collection(test_project, tjc)

    # we'll just validate that we got the expected number of results for
    # talos (we have validation elsewhere for the actual data adapters)
    assert PerformanceSignature.objects.count() == 1
    assert PerformanceDatum.objects.count() == 1
Ejemplo n.º 10
0
def add_log_response(filename):
    """
    Set up responses for a local gzipped log and return the url for it.
    """
    log_path = SampleData().get_log_path(filename)
    log_url = "http://my-log.mozilla.org/{}".format(filename)

    with open(log_path, 'rb') as log_file:
        responses.add(responses.GET,
                      log_url,
                      body=log_file.read(),
                      adding_headers={
                          "Content-Encoding": "gzip",
                      })
    return log_url
Ejemplo n.º 11
0
def load_exp(filename):
    """
    Load in an expected result json and return as an obj.

    If the file doesn't exist, it will be created, but the test will
    fail, due to no content.  This is to make it easier during test
    development.
    """
    path = SampleData().get_log_path(filename)
    with open(path, "a+") as f:
        try:
            return json.loads(f.read())
        except ValueError:
            # if it's not parse-able, return an empty dict
            return {}
Ejemplo n.º 12
0
    def check_json(self, filename, expected_timestamps):
        """Parse JSON produced by http://graphs.mozilla.org/api/test/runs"""
        # Configuration for Analyzer
        FORE_WINDOW = 12
        BACK_WINDOW = 12
        THRESHOLD = 7

        payload = SampleData.get_perf_data(os.path.join('graphs', filename))
        runs = payload['test_runs']
        a = Analyzer()
        for r in runs:
            a.add_data(r[2], r[3], testrun_id=r[0],
                       revision_id=r[1][2])

        results = a.analyze_t(BACK_WINDOW, FORE_WINDOW, THRESHOLD)
        regression_timestamps = [d.push_timestamp for d in results if
                                 d.state == 'regression']
        self.assertEqual(regression_timestamps, expected_timestamps)
Ejemplo n.º 13
0
    def _send(th_request, th_collection):

        OAuthCredentials.set_credentials(SampleData.get_credentials())
        credentials = OAuthCredentials.get_credentials(jm.project)

        th_request.oauth_key = credentials['consumer_key']
        th_request.oauth_secret = credentials['consumer_secret']

        signed_uri = th_request.get_signed_uri(
            th_collection.to_json(), th_request.get_uri(th_collection)
        )

        response = TestApp(application).post_json(
            str(signed_uri), params=th_collection.get_collection_data()
        )

        response.getcode = lambda: response.status_int
        return response
Ejemplo n.º 14
0
def test_adapt_and_load():

    talos_perf_data = SampleData.get_talos_perf_data()

    tda = TalosDataAdapter()

    result_count = 0
    for datum in talos_perf_data:

        datum = {
            "job_guid": 'oqiwy0q847365qiu',
            "name": "test",
            "type": "test",
            "blob": datum
        }

        job_data = {
            "oqiwy0q847365qiu": {
                "id": 1,
                "result_set_id": 1,
                "push_timestamp": 1402692388
            }
        }

        reference_data = {
            "property1": "value1",
            "property2": "value2",
            "property3": "value3"
        }

        # one extra result for the summary series
        result_count += len(datum['blob']["results"]) + 1

        # we create one performance series per counter
        if 'talos_counters' in datum['blob']:
            result_count += len(datum['blob']["talos_counters"])

        # Mimic production environment, the blobs are serialized
        # when the web service receives them
        datum['blob'] = json.dumps({'talos_data': [datum['blob']]})
        tda.adapt_and_load(reference_data, job_data, datum)

    assert result_count == len(tda.performance_artifact_placeholders)
Ejemplo n.º 15
0
def add_log_response(filename):
    """
    Set up responses for a local gzipped log and return the url for it.
    """
    log_path = SampleData().get_log_path(filename)
    log_url = "http://my-log.mozilla.org/{}".format(filename)

    with open(log_path, 'rb') as log_file:
        content = log_file.read()
        responses.add(
            responses.GET,
            log_url,
            body=content,
            adding_headers={
                'Content-Encoding': 'gzip',
                'Content-Length': str(len(content)),
            },
        )
    return log_url
Ejemplo n.º 16
0
def test_detect_changes_historical_data(filename, expected_timestamps):
    """Parse JSON produced by http://graphs.mozilla.org/api/test/runs"""
    # Configuration for Analyzer
    FORE_WINDOW = 12
    MIN_BACK_WINDOW = 12
    MAX_BACK_WINDOW = 24
    THRESHOLD = 7

    payload = SampleData.get_perf_data(os.path.join('graphs', filename))
    runs = payload['test_runs']
    data = [RevisionDatum(r[2], r[2], [r[3]]) for r in runs]

    results = detect_changes(data,
                             min_back_window=MIN_BACK_WINDOW,
                             max_back_window=MAX_BACK_WINDOW,
                             fore_window=FORE_WINDOW,
                             t_threshold=THRESHOLD)
    regression_timestamps = [d.push_timestamp for d in results if
                             d.change_detected]
    assert regression_timestamps == expected_timestamps
Ejemplo n.º 17
0
    def _send(th_request, endpoint, method=None, data=None):

        OAuthCredentials.set_credentials(SampleData.get_credentials())
        credentials = OAuthCredentials.get_credentials(jm.project)

        th_request.oauth_key = credentials['consumer_key']
        th_request.oauth_secret = credentials['consumer_secret']

        if data and not isinstance(data, str):
            data = json.dumps(data)

        signed_uri = th_request.oauth_client.get_signed_uri(
            data, th_request.get_uri(endpoint), method)

        response = getattr(TestApp(application),
                           method.lower())(str(signed_uri),
                                           params=data,
                                           content_type='application/json')

        response.getcode = lambda: response.status_int
        return response
Ejemplo n.º 18
0
def test_post_talos_artifact(test_project, test_repository, result_set_stored,
                             mock_post_json):
    test_repository.save()

    tjc = client.TreeherderJobCollection()
    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    tj = client.TreeherderJob({
        'project':
        test_repository.name,
        'revision_hash':
        result_set_stored[0]['revision_hash'],
        'job': {
            'job_guid':
            job_guid,
            'state':
            'completed',
            'project':
            test_repository.name,
            'option_collection': {
                'opt': True
            },
            'artifacts': [{
                'blob': {
                    'talos_data': SampleData.get_minimal_talos_perf_data()
                },
                'type': 'json',
                'name': 'talos_data',
                'job_guid': job_guid
            }]
        }
    })

    tjc.add(tj)

    post_collection(test_project, tjc)

    # we'll just validate that we got the expected number of results for
    # talos (we have validation elsewhere for the actual data adapters)
    assert PerformanceSignature.objects.count() == 1
    assert PerformanceDatum.objects.count() == 1
Ejemplo n.º 19
0
def post_collection(project,
                    th_collection,
                    status=None,
                    expect_errors=False,
                    consumer_key=None,
                    consumer_secret=None):

    # Set the credentials
    OAuthCredentials.set_credentials(SampleData.get_credentials())

    credentials = OAuthCredentials.get_credentials(project)

    # The only time the credentials should be overridden are when
    # a client needs to test authentication failure confirmation
    if consumer_key:
        credentials['consumer_key'] = consumer_key

    if consumer_secret:
        credentials['consumer_secret'] = consumer_secret

    cli = TreeherderClient(
        protocol='http',
        host='localhost',
    )

    jsondata = th_collection.to_json()
    signed_uri = cli._get_project_uri(
        project,
        th_collection.endpoint_base,
        data=jsondata,
        oauth_key=credentials['consumer_key'],
        oauth_secret=credentials['consumer_secret'],
        method='POST')

    response = TestApp(application).post_json(
        str(signed_uri),
        params=th_collection.get_collection_data(),
        status=status)

    return response
Ejemplo n.º 20
0
    def check_json(self, filename, expected_timestamps):
        """Parse JSON produced by http://graphs.mozilla.org/api/test/runs"""
        # Configuration for TalosAnalyzer
        FORE_WINDOW = 12
        BACK_WINDOW = 12
        THRESHOLD = 7
        MACHINE_THRESHOLD = 15
        MACHINE_HISTORY_SIZE = 5

        payload = SampleData.get_perf_data(os.path.join('graphs', filename))
        runs = payload['test_runs']
        data = [PerfDatum(r[2], r[3], testrun_id=r[0], machine_id=r[6],
                          testrun_timestamp=r[2], buildid=r[1][1],
                          revision=r[1][2]) for r in runs]

        a = TalosAnalyzer()
        a.addData(data)
        results = a.analyze_t(BACK_WINDOW, FORE_WINDOW, THRESHOLD,
                              MACHINE_THRESHOLD, MACHINE_HISTORY_SIZE)
        regression_timestamps = [d.testrun_timestamp for d in results if
                                 d.state == 'regression']
        self.assertEqual(regression_timestamps, expected_timestamps)
Ejemplo n.º 21
0
    def check_json(self, filename, expected_timestamps):
        """Parse JSON produced by http://graphs.mozilla.org/api/test/runs"""
        # Configuration for Analyzer
        FORE_WINDOW = 12
        MIN_BACK_WINDOW = 12
        MAX_BACK_WINDOW = 24
        THRESHOLD = 7

        payload = SampleData.get_perf_data(os.path.join('graphs', filename))
        runs = payload['test_runs']
        data = []
        for r in runs:
            data.append(Datum(r[2], r[3], testrun_id=r[0],
                              revision_id=r[1][2]))

        results = detect_changes(data, min_back_window=MIN_BACK_WINDOW,
                                 max_back_window=MAX_BACK_WINDOW,
                                 fore_window=FORE_WINDOW,
                                 t_threshold=THRESHOLD)
        regression_timestamps = [d.push_timestamp for d in results if
                                 d.state == 'regression']
        self.assertEqual(regression_timestamps, expected_timestamps)
Ejemplo n.º 22
0
    def _send(th_request, endpoint,  method=None, data=None):

        OAuthCredentials.set_credentials(SampleData.get_credentials())
        credentials = OAuthCredentials.get_credentials(jm.project)

        th_request.oauth_key = credentials['consumer_key']
        th_request.oauth_secret = credentials['consumer_secret']

        if data and not isinstance(data, str):
            data = json.dumps(data)

        signed_uri = th_request.oauth_client.get_signed_uri(
            data, th_request.get_uri(endpoint), method
        )

        response = getattr(TestApp(application), method.lower())(
            str(signed_uri),
            params=data,
            content_type='application/json'
        )

        response.getcode = lambda: response.status_int
        return response
Ejemplo n.º 23
0
def test_post_talos_artifact(test_project, test_repository, result_set_stored,
                             mock_post_json):
    test_repository.save()

    # delete any previously-created perf objects until bug 1133273 is fixed
    # https://bugzilla.mozilla.org/show_bug.cgi?id=1133273 (this can be really
    # slow if the local database has a lot of objects in it)
    PerformanceSignature.objects.all().delete()
    PerformanceDatum.objects.all().delete()

    tjc = client.TreeherderJobCollection()
    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision_hash': result_set_stored[0]['revision_hash'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'project': test_repository.name,
            'option_collection': {'opt': True},
            'artifacts': [{
                'blob': {'talos_data': SampleData.get_minimal_talos_perf_data()},
                'type': 'json',
                'name': 'talos_data',
                'job_guid': job_guid
            }]
        }
    })

    tjc.add(tj)

    do_post_collection(test_project, tjc)

    # we'll just validate that we got the expected number of results for
    # talos (we have validation elsewhere for the actual data adapters)
    assert PerformanceSignature.objects.count() == 2
    assert PerformanceDatum.objects.count() == 2
Ejemplo n.º 24
0
    def check_json(self, filename, expected_timestamps):
        """Parse JSON produced by http://graphs.mozilla.org/api/test/runs"""
        # Configuration for Analyzer
        FORE_WINDOW = 12
        MIN_BACK_WINDOW = 12
        MAX_BACK_WINDOW = 24
        THRESHOLD = 7

        payload = SampleData.get_perf_data(os.path.join('graphs', filename))
        runs = payload['test_runs']
        data = []
        for r in runs:
            data.append(Datum(r[2], r[3], testrun_id=r[0],
                              revision_id=r[1][2]))

        results = detect_changes(data,
                                 min_back_window=MIN_BACK_WINDOW,
                                 max_back_window=MAX_BACK_WINDOW,
                                 fore_window=FORE_WINDOW,
                                 t_threshold=THRESHOLD)
        regression_timestamps = [
            d.push_timestamp for d in results if d.state == 'regression'
        ]
        self.assertEqual(regression_timestamps, expected_timestamps)
Ejemplo n.º 25
0
def test_load_talos_data(test_project, test_repository,
                         perf_option_collection, perf_platform,
                         perf_job_data, perf_reference_data):

    PerformanceFramework.objects.create(name='talos')

    talos_perf_data = SampleData.get_talos_perf_data()
    for talos_datum in talos_perf_data:
        datum = {
            "job_guid": "fake_job_guid",
            "name": "test",
            "type": "test",
            "blob": talos_datum
        }

        # Mimic production environment, the blobs are serialized
        # when the web service receives them
        datum['blob'] = json.dumps({'talos_data': [datum['blob']]})
        load_talos_artifacts(test_repository.name, perf_reference_data,
                             perf_job_data, datum)

        # base: subtests + one extra result for the summary series
        expected_result_count = len(talos_datum["results"]) + 1

        # we create one performance series per counter
        if 'talos_counters' in talos_datum:
            expected_result_count += len(talos_datum["talos_counters"])

        # result count == number of signatures
        assert expected_result_count == PerformanceSignature.objects.all().count()

        expected_push_timestamp = datetime.datetime.fromtimestamp(
            perf_job_data['fake_job_guid']['push_timestamp'])

        # verify that we have signatures for the subtests
        for (testname, results) in talos_datum["results"].iteritems():
            signature = PerformanceSignature.objects.get(test=testname)

            datum = PerformanceDatum.objects.get(signature=signature)
            if talos_datum.get('summary'):
                # if we have a summary, ensure the subtest summary values made
                # it in and that we ingested lowerIsBetter ok (if it was there)
                subtest = talos_datum['summary']['subtests'][testname]
                assert round(subtest['filtered'], 2) == datum.value
                assert signature.lower_is_better == subtest.get('lowerIsBetter', True)
            else:
                # this is an old style talos blob without a summary. these are
                # going away, so I'm not going to bother testing the
                # correctness. however let's at least verify that some values
                # are being generated here
                assert datum.value
            assert datum.push_timestamp == expected_push_timestamp
        # if we have counters, verify that the series for them is as expected
        for (counter, results) in talos_datum.get('talos_counters',
                                                  {}).iteritems():
            signature = PerformanceSignature.objects.get(test=counter)
            datum = PerformanceDatum.objects.get(signature=signature)
            assert round(float(results['mean']), 2) == datum.value
            assert datum.push_timestamp == expected_push_timestamp

        # we should be left with just the summary series
        signature = PerformanceSignature.objects.get(
            test='',
            suite=talos_datum['testrun']['suite'])
        datum = PerformanceDatum.objects.get(signature=signature)
        if talos_datum.get('summary'):
            assert round(talos_datum['summary']['suite'], 2) == datum.value
        else:
            # old style talos blob without summary. again, going away,
            # but let's at least test that we have the value
            assert datum.value

        assert datum.push_timestamp == expected_push_timestamp

        # delete perf objects for next iteration
        PerformanceSignature.objects.all().delete()
        PerformanceDatum.objects.all().delete()
Ejemplo n.º 26
0
def set_oauth_credentials():
    OAuthCredentials.set_credentials(SampleData.get_credentials())
Ejemplo n.º 27
0
def set_oauth_credentials():
    OAuthCredentials.set_credentials(SampleData.get_credentials())
Ejemplo n.º 28
0
def sample_data():
    """Returns a SampleData() object"""
    from sampledata import SampleData
    return SampleData()
Ejemplo n.º 29
0
    def test_adapt_and_load(self):

        talos_perf_data = SampleData.get_talos_perf_data()

        for talos_datum in talos_perf_data:

            datum = {
                "job_guid": 'oqiwy0q847365qiu',
                "name": "test",
                "type": "test",
                "blob": talos_datum
            }

            job_data = {
                "oqiwy0q847365qiu": {
                    "id": 1,
                    "result_set_id": 1,
                    "push_timestamp": 1402692388
                }
            }

            reference_data = {
                "property1": "value1",
                "property2": "value2",
                "property3": "value3"
            }

            # Mimic production environment, the blobs are serialized
            # when the web service receives them
            datum['blob'] = json.dumps({'talos_data': [datum['blob']]})
            tda = TalosDataAdapter()
            tda.adapt_and_load(reference_data, job_data, datum)

            # base: subtests + one extra result for the summary series
            expected_result_count = len(talos_datum["results"]) + 1

            # we create one performance series per counter
            if 'talos_counters' in talos_datum:
                expected_result_count += len(talos_datum["talos_counters"])

            # result count == number of signatures
            self.assertEqual(expected_result_count, len(tda.signatures.keys()))

            # verify that we have signatures for the subtests
            signature_placeholders = copy.copy(
                tda.signature_property_placeholders)
            for (testname, results) in talos_datum["results"].iteritems():
                signature_placeholder = filter(
                    lambda p: p[2] == testname, signature_placeholders)
                self.assertEqual(len(signature_placeholder), 1)
                signature_hash = signature_placeholder[0][0]
                perfdata = tda.signatures[signature_hash][0]
                if talos_datum.get('summary'):
                    # if we have a summary, ensure the subtest summary values made
                    # it in
                    for measure in ['min', 'max', 'std', 'mean', 'median']:
                        self.assertEqual(
                            round(talos_datum['summary']['subtests'][testname][measure], 2),
                            perfdata[measure])
                else:
                    # this is an old style talos blob without a summary. these are going
                    # away, so I'm not going to bother testing the correctness. however
                    # let's at least verify that some values are being generated here
                    for measure in ['min', 'max', 'std', 'mean', 'median']:
                        self.assertTrue(perfdata[measure])

                # filter out this signature from data to process
                signature_placeholders = filter(
                    lambda p: p[0] != signature_hash, signature_placeholders)

            # if we have counters, verify that the series for them is as expected
            for (counter, results) in talos_datum.get('talos_counters',
                                                      {}).iteritems():
                signature_placeholder = filter(
                    lambda p: p[2] == counter, signature_placeholders)
                self.assertEqual(len(signature_placeholder), 1)
                signature_hash = signature_placeholder[0][0]
                perfdata = tda.signatures[signature_hash][0]
                for measure in ['max', 'mean']:
                    self.assertEqual(round(float(results[measure]), 2),
                                     perfdata[measure])
                # filter out this signature from data to process
                signature_placeholders = filter(
                    lambda p: p[0] != signature_hash, signature_placeholders)

            # we should be left with just summary signature placeholders
            self.assertEqual(len(signature_placeholders), 2)
            perfdata = tda.signatures[signature_placeholders[0][0]][0]
            if talos_datum.get('summary'):
                self.assertEqual(round(talos_datum['summary']['suite'], 2),
                                 perfdata['geomean'])
            else:
                # old style talos blob without summary. again, going away,
                # but let's at least test that we have the 'geomean' value
                # generated
                self.assertTrue(perfdata['geomean'])
Ejemplo n.º 30
0
    def test_adapt_and_load(self):

        talos_perf_data = SampleData.get_talos_perf_data()
        for talos_datum in talos_perf_data:
            # delete any previously-created perf objects
            # FIXME: because of https://bugzilla.mozilla.org/show_bug.cgi?id=1133273
            # this can be really slow if we have a dev database with lots of
            # performance data in it (if the test succeeds, the transaction
            # will be rolled back so at least it won't pollute the production
            # database)
            PerformanceSignature.objects.all().delete()
            PerformanceDatum.objects.all().delete()

            datum = {
                "job_guid": 'oqiwy0q847365qiu',
                "name": "test",
                "type": "test",
                "blob": talos_datum
            }

            job_data = {
                "oqiwy0q847365qiu": {
                    "id": 1,
                    "result_set_id": 1,
                    "push_timestamp": 1402692388
                }
            }

            reference_data = {
                "option_collection_hash": self.OPTION_HASH,
                "machine_platform": self.MACHINE_PLATFORM,
                "property1": "value1",
                "property2": "value2",
                "property3": "value3"
            }

            # Mimic production environment, the blobs are serialized
            # when the web service receives them
            datum['blob'] = json.dumps({'talos_data': [datum['blob']]})
            tda = TalosDataAdapter()
            tda.adapt_and_load(self.REPO_NAME, reference_data, job_data, datum)

            # base: subtests + one extra result for the summary series
            expected_result_count = len(talos_datum["results"]) + 1

            # we create one performance series per counter
            if 'talos_counters' in talos_datum:
                expected_result_count += len(talos_datum["talos_counters"])

            # result count == number of signatures
            self.assertEqual(expected_result_count,
                             PerformanceSignature.objects.all().count())

            # verify that we have signatures for the subtests
            for (testname, results) in talos_datum["results"].iteritems():
                signature = PerformanceSignature.objects.get(test=testname)
                datum = PerformanceDatum.objects.get(signature=signature)
                if talos_datum.get('summary'):
                    # if we have a summary, ensure the subtest summary values made
                    # it in
                    self.assertEqual(
                        round(talos_datum['summary']['subtests'][testname]['filtered'], 2),
                        datum.value)
                else:
                    # this is an old style talos blob without a summary. these are going
                    # away, so I'm not going to bother testing the correctness. however
                    # let's at least verify that some values are being generated here
                    self.assertTrue(datum.value)

            # if we have counters, verify that the series for them is as expected
            for (counter, results) in talos_datum.get('talos_counters',
                                                      {}).iteritems():
                signature = PerformanceSignature.objects.get(test=counter)
                datum = PerformanceDatum.objects.get(signature=signature)
                self.assertEqual(round(float(results['mean']), 2),
                                 datum.value)

            # we should be left with just the summary series
            signature = PerformanceSignature.objects.get(
                test='',
                suite=talos_datum['testrun']['suite'])
            datum = PerformanceDatum.objects.get(signature=signature)
            if talos_datum.get('summary'):
                self.assertEqual(round(talos_datum['summary']['suite'], 2),
                                 datum.value)
            else:
                # old style talos blob without summary. again, going away,
                # but let's at least test that we have the value
                self.assertTrue(datum.value)
def test_adapt_and_load():

    talos_perf_data = SampleData.get_talos_perf_data()

    tda = TalosDataAdapter()

    result_count = 0
    for datum in talos_perf_data:

        datum = {
            "job_guid": 'oqiwy0q847365qiu',
            "name": "test",
            "type": "test",
            "blob": datum
        }

        job_data = {
            "oqiwy0q847365qiu": {
                "id": 1,
                "result_set_id": 1,
                "push_timestamp": 1402692388
            }
        }

        reference_data = {
            "property1": "value1",
            "property2": "value2",
            "property3": "value3"
        }

        # one extra result for the summary series
        result_count += len(datum['blob']["results"]) + 1

        # we create one performance series per counter
        if 'talos_counters' in datum['blob']:
            result_count += len(datum['blob']["talos_counters"])

        # Mimic production environment, the blobs are serialized
        # when the web service receives them
        datum['blob'] = json.dumps({'talos_data': [datum['blob']]})
        tda.adapt_and_load(reference_data, job_data, datum)

        # we upload a summary with a suite and subtest values, +1 for suite
        if 'summary' in datum['blob']:
            results = json.loads(zlib.decompress(tda.performance_artifact_placeholders[-1][4]))
            data = json.loads(datum['blob'])['talos_data'][0]
            assert results["blob"]["performance_series"]["geomean"] == data['summary']['suite']

            # deal with the subtests now
            for i in range(0, len(data['summary']['subtests'])):
                subresults = json.loads(zlib.decompress(tda.performance_artifact_placeholders[-1 - i][4]))
                if 'subtest_signatures' in subresults["blob"]['signature_properties']:
                    # ignore summary signatures
                    continue

                subdata = data['summary']['subtests'][subresults["blob"]['signature_properties']['test']]
                for datatype in ['min', 'max', 'mean', 'median', 'std']:
                    assert subdata[datatype] == subresults["blob"]["performance_series"][datatype]
                if 'value' in subdata.keys():
                    assert subdata['value'] == subresults["blob"]["performance_series"]['value']
        else:
            # FIXME: the talos data blob we're currently using contains datums with summaries and those without
            # we should probably test non-summarized data as well
            pass

    assert result_count == len(tda.performance_artifact_placeholders)
Ejemplo n.º 32
0
    def test_adapt_and_load(self):

        talos_perf_data = SampleData.get_talos_perf_data()

        for talos_datum in talos_perf_data:

            datum = {
                "job_guid": 'oqiwy0q847365qiu',
                "name": "test",
                "type": "test",
                "blob": talos_datum
            }

            job_data = {
                "oqiwy0q847365qiu": {
                    "id": 1,
                    "result_set_id": 1,
                    "push_timestamp": 1402692388
                }
            }

            reference_data = {
                "property1": "value1",
                "property2": "value2",
                "property3": "value3"
            }

            # Mimic production environment, the blobs are serialized
            # when the web service receives them
            datum['blob'] = json.dumps({'talos_data': [datum['blob']]})
            tda = TalosDataAdapter()
            tda.adapt_and_load(reference_data, job_data, datum)

            # base: subtests + one extra result for the summary series
            expected_result_count = len(talos_datum["results"]) + 1

            # we create one performance series per counter
            if 'talos_counters' in talos_datum:
                expected_result_count += len(talos_datum["talos_counters"])

            # result count == number of signatures
            self.assertEqual(expected_result_count, len(tda.signatures.keys()))

            # verify that we have signatures for the subtests
            signature_placeholders = copy.copy(
                tda.signature_property_placeholders)
            for (testname, results) in talos_datum["results"].iteritems():
                signature_placeholder = filter(lambda p: p[2] == testname,
                                               signature_placeholders)
                self.assertEqual(len(signature_placeholder), 1)
                signature_hash = signature_placeholder[0][0]
                perfdata = tda.signatures[signature_hash][0]
                if talos_datum.get('summary'):
                    # if we have a summary, ensure the subtest summary values made
                    # it in
                    for measure in ['min', 'max', 'std', 'mean', 'median']:
                        self.assertEqual(
                            round(
                                talos_datum['summary']['subtests'][testname]
                                [measure], 2), perfdata[measure])
                else:
                    # this is an old style talos blob without a summary. these are going
                    # away, so I'm not going to bother testing the correctness. however
                    # let's at least verify that some values are being generated here
                    for measure in ['min', 'max', 'std', 'mean', 'median']:
                        self.assertTrue(perfdata[measure])

                # filter out this signature from data to process
                signature_placeholders = filter(
                    lambda p: p[0] != signature_hash, signature_placeholders)

            # if we have counters, verify that the series for them is as expected
            for (counter, results) in talos_datum.get('talos_counters',
                                                      {}).iteritems():
                signature_placeholder = filter(lambda p: p[2] == counter,
                                               signature_placeholders)
                self.assertEqual(len(signature_placeholder), 1)
                signature_hash = signature_placeholder[0][0]
                perfdata = tda.signatures[signature_hash][0]
                for measure in ['max', 'mean']:
                    self.assertEqual(round(float(results[measure]), 2),
                                     perfdata[measure])
                # filter out this signature from data to process
                signature_placeholders = filter(
                    lambda p: p[0] != signature_hash, signature_placeholders)

            # we should be left with just summary signature placeholders
            self.assertEqual(len(signature_placeholders), 2)
            perfdata = tda.signatures[signature_placeholders[0][0]][0]
            if talos_datum.get('summary'):
                self.assertEqual(round(talos_datum['summary']['suite'], 2),
                                 perfdata['geomean'])
            else:
                # old style talos blob without summary. again, going away,
                # but let's at least test that we have the 'geomean' value
                # generated
                self.assertTrue(perfdata['geomean'])
Ejemplo n.º 33
0
def test_load_talos_data(test_project, test_repository, perf_option_collection,
                         perf_platform, perf_job_data, perf_reference_data):

    PerformanceFramework.objects.create(name='talos')

    talos_perf_data = SampleData.get_talos_perf_data()
    for talos_datum in talos_perf_data:
        datum = {
            "job_guid": "fake_job_guid",
            "name": "test",
            "type": "test",
            "blob": talos_datum
        }

        # Mimic production environment, the blobs are serialized
        # when the web service receives them
        datum['blob'] = json.dumps({'talos_data': [datum['blob']]})
        load_talos_artifacts(test_repository.name, perf_reference_data,
                             perf_job_data, datum)

        # base: subtests + one extra result for the summary series
        expected_result_count = len(talos_datum["results"]) + 1

        # we create one performance series per counter
        if 'talos_counters' in talos_datum:
            expected_result_count += len(talos_datum["talos_counters"])

        # result count == number of signatures
        assert expected_result_count == PerformanceSignature.objects.all(
        ).count()

        expected_push_timestamp = datetime.datetime.fromtimestamp(
            perf_job_data['fake_job_guid']['push_timestamp'])

        # verify that we have signatures for the subtests
        for (testname, results) in talos_datum["results"].iteritems():
            signature = PerformanceSignature.objects.get(test=testname)

            datum = PerformanceDatum.objects.get(signature=signature)
            if talos_datum.get('summary'):
                # if we have a summary, ensure the subtest summary values made
                # it in and that we ingested lowerIsBetter ok (if it was there)
                subtest = talos_datum['summary']['subtests'][testname]
                assert round(subtest['filtered'], 2) == datum.value
                assert signature.lower_is_better == subtest.get(
                    'lowerIsBetter', True)
            else:
                # this is an old style talos blob without a summary. these are
                # going away, so I'm not going to bother testing the
                # correctness. however let's at least verify that some values
                # are being generated here
                assert datum.value
            assert datum.push_timestamp == expected_push_timestamp
        # if we have counters, verify that the series for them is as expected
        for (counter, results) in talos_datum.get('talos_counters',
                                                  {}).iteritems():
            signature = PerformanceSignature.objects.get(test=counter)
            datum = PerformanceDatum.objects.get(signature=signature)
            assert round(float(results['mean']), 2) == datum.value
            assert datum.push_timestamp == expected_push_timestamp

        # we should be left with just the summary series
        signature = PerformanceSignature.objects.get(
            test='', suite=talos_datum['testrun']['suite'])
        datum = PerformanceDatum.objects.get(signature=signature)
        if talos_datum.get('summary'):
            assert round(talos_datum['summary']['suite'], 2) == datum.value
        else:
            # old style talos blob without summary. again, going away,
            # but let's at least test that we have the value
            assert datum.value

        assert datum.push_timestamp == expected_push_timestamp

        # delete perf objects for next iteration
        PerformanceSignature.objects.all().delete()
        PerformanceDatum.objects.all().delete()
    def test_load_talos_data(self):

        PerformanceFramework.objects.get_or_create(name='talos')

        talos_perf_data = SampleData.get_talos_perf_data()
        for talos_datum in talos_perf_data:
            (job_data, reference_data) = self._get_job_and_reference_data()

            datum = {
                "job_guid": self.JOB_GUID,
                "name": "test",
                "type": "test",
                "blob": talos_datum
            }

            # Mimic production environment, the blobs are serialized
            # when the web service receives them
            datum['blob'] = json.dumps({'talos_data': [datum['blob']]})
            load_talos_artifacts(self.REPO_NAME, reference_data, job_data, datum)

            # base: subtests + one extra result for the summary series
            expected_result_count = len(talos_datum["results"]) + 1

            # we create one performance series per counter
            if 'talos_counters' in talos_datum:
                expected_result_count += len(talos_datum["talos_counters"])

            # result count == number of signatures
            self.assertEqual(expected_result_count,
                             PerformanceSignature.objects.all().count())

            # verify that we have signatures for the subtests
            for (testname, results) in talos_datum["results"].iteritems():
                signature = PerformanceSignature.objects.get(test=testname)
                datum = PerformanceDatum.objects.get(signature=signature)
                if talos_datum.get('summary'):
                    # if we have a summary, ensure the subtest summary values made
                    # it in and that we ingested lowerIsBetter ok (if it was there)
                    subtest = talos_datum['summary']['subtests'][testname]
                    self.assertEqual(
                        round(subtest['filtered'], 2), datum.value)
                    self.assertEqual(signature.lower_is_better,
                                     subtest.get('lowerIsBetter', True))
                else:
                    # this is an old style talos blob without a summary. these are going
                    # away, so I'm not going to bother testing the correctness. however
                    # let's at least verify that some values are being generated here
                    self.assertTrue(datum.value)
                self.assertEqual(datum.push_timestamp,
                                 datetime.datetime.fromtimestamp(
                                     self.PUSH_TIMESTAMP))

            # if we have counters, verify that the series for them is as expected
            for (counter, results) in talos_datum.get('talos_counters',
                                                      {}).iteritems():
                signature = PerformanceSignature.objects.get(test=counter)
                datum = PerformanceDatum.objects.get(signature=signature)
                self.assertEqual(round(float(results['mean']), 2),
                                 datum.value)
                self.assertEqual(datum.push_timestamp,
                                 datetime.datetime.fromtimestamp(
                                     self.PUSH_TIMESTAMP))

            # we should be left with just the summary series
            signature = PerformanceSignature.objects.get(
                test='',
                suite=talos_datum['testrun']['suite'])
            datum = PerformanceDatum.objects.get(signature=signature)
            if talos_datum.get('summary'):
                self.assertEqual(round(talos_datum['summary']['suite'], 2),
                                 datum.value)
            else:
                # old style talos blob without summary. again, going away,
                # but let's at least test that we have the value
                self.assertTrue(datum.value)
            self.assertEqual(datum.push_timestamp,
                             datetime.datetime.fromtimestamp(
                                 self.PUSH_TIMESTAMP))

            # delete perf objects for next iteration
            PerformanceSignature.objects.all().delete()
            PerformanceDatum.objects.all().delete()