コード例 #1
0
ファイル: process_objects.py プロジェクト: hfeeki/datazilla
    def handle_project(self, project, **options):
        self.stdout.write("Processing project {0}\n".format(project))

        pushlog_project = options.get("pushlog_project", 'pushlog')
        loadlimit = int(options.get("loadlimit", 1))
        debug = options.get("debug", None)

        test_run_ids = []
        ptm = PerformanceTestModel(project)
        test_run_ids = ptm.process_objects(loadlimit)
        ptm.disconnect()

        metrics_exclude_projects = set(['b2g', 'stoneridge'])

        if project not in metrics_exclude_projects:
            #minimum required number of replicates for
            #metrics processing
            replicate_min = 5
            compute_test_run_metrics(
                project, pushlog_project, debug, replicate_min, test_run_ids
                )
コード例 #2
0
def setup_pushlog_walk_tests(
    mtm, ptm, plm, monkeypatch, load_objects=False
    ):
    """
    Builds the sample pushlog, iterates through each push storing
    a modified version of perftest_data where the testrun.date is
    set to the associated push date and test_build.revision is set to
    the push node.  In addition, two specialized sample data structures
    are created to test missing perftest data and abnormally high test
    values that should cause t-test failure.

    The setup_data structure returned can be used to test metric values,
    thresholds, summary data, and push log walking logic.
    """

    setup_data = {}
    now = int( time.time() )

    #monkey patch in sample pushlog
    def mock_urlopen(nuttin_honey):
        return get_pushlog_json_readable(get_pushlog_json_set())
    monkeypatch.setattr(urllib, 'urlopen', mock_urlopen)

    branch = 'Firefox'
    result = plm.store_pushlogs("test_host", 1, branch=branch)

    #load perftest data that corresponds to the pushlog data
    #store parent chain for tests
    setup_data['branch'] = branch
    setup_data['testsuite_name'] = ""
    setup_data['skip_revision'] = ""
    setup_data['skip_index'] = 2
    setup_data['fail_revision'] = ""
    setup_data['test_fail_index'] = 4
    setup_data['sample_revisions'] = []
    setup_data['sample_dates'] = {}

    #This is only populated if caller requests load_objects
    setup_data['test_run_ids'] = {}

    setup_data['branch_pushlog'] = plm.get_branch_pushlog(1)

    #Build list of revisions to operate on
    for index, node in enumerate( setup_data['branch_pushlog'] ):
        revision = mtm.truncate_revision(node['node'])

        setup_data['sample_dates'][revision] = node['date']

        setup_data['sample_revisions'].append(revision)
        if index == setup_data['skip_index']:
            setup_data['skip_revision'] = revision
            continue

    #Load sample data for all of the revisions
    for index, revision in enumerate( setup_data['sample_revisions'] ):

        #if revision == setup_data['skip_revision']:
        if index == setup_data['skip_index']:
            continue

        sample_data = {}

        if index == setup_data['test_fail_index']:
            #Set up test run values to fail ttest
            data = [50000, 60000, 70000]

            sample_data = TestData( perftest_data(
                testrun={ 'date':setup_data['sample_dates'][revision] },
                test_build={ 'revision': revision, 'branch':branch },
                results={'one.com':data,
                         'two.com':data,
                         'three.com':data}
                )
            )
            setup_data['fail_revision'] = revision

        else:
            sample_data = TestData( perftest_data(
                testrun={ 'date':setup_data['sample_dates'][revision] },
                test_build={ 'revision': revision, 'branch':branch },
                )
            )

        if not setup_data['testsuite_name']:
            setup_data['testsuite_name'] = sample_data['testrun']['suite']

        if load_objects:
            ptm.store_test_data( json.dumps( sample_data ) )
            test_run_ids = ptm.process_objects(2)
            compute_test_run_metrics(
                ptm.project, plm.project, False, 3, test_run_ids
            )

            setup_data['test_run_ids'][revision] = test_run_ids

        else:
            #Load sample data
            ptm.load_test_data(sample_data)

    revision_count = len( setup_data['sample_revisions'] )

    setup_data['target_revision_index'] = revision_count - 1

    return setup_data
コード例 #3
0
def setup_pushlog_walk_tests(mtm, ptm, plm, monkeypatch, load_objects=False):
    """
    Builds the sample pushlog, iterates through each push storing
    a modified version of perftest_data where the testrun.date is
    set to the associated push date and test_build.revision is set to
    the push node.  In addition, two specialized sample data structures
    are created to test missing perftest data and abnormally high test
    values that should cause t-test failure.

    The setup_data structure returned can be used to test metric values,
    thresholds, summary data, and push log walking logic.
    """

    setup_data = {}
    now = int(time.time())

    #monkey patch in sample pushlog
    def mock_urlopen(nuttin_honey):
        return get_pushlog_json_readable(get_pushlog_json_set())

    monkeypatch.setattr(urllib, 'urlopen', mock_urlopen)

    branch = 'Firefox'
    result = plm.store_pushlogs("test_host", 1, branch=branch)

    #load perftest data that corresponds to the pushlog data
    #store parent chain for tests
    setup_data['branch'] = branch
    setup_data['testsuite_name'] = ""
    setup_data['skip_revision'] = ""
    setup_data['skip_index'] = 2
    setup_data['fail_revision'] = ""
    setup_data['test_fail_index'] = 4
    setup_data['sample_revisions'] = []
    setup_data['sample_dates'] = {}

    #This is only populated if caller requests load_objects
    setup_data['test_run_ids'] = {}

    setup_data['branch_pushlog'] = plm.get_branch_pushlog(1)

    #Build list of revisions to operate on
    for index, node in enumerate(setup_data['branch_pushlog']):
        revision = mtm.truncate_revision(node['node'])

        setup_data['sample_dates'][revision] = node['date']

        setup_data['sample_revisions'].append(revision)
        if index == setup_data['skip_index']:
            setup_data['skip_revision'] = revision
            continue

    #Load sample data for all of the revisions
    for index, revision in enumerate(setup_data['sample_revisions']):

        #if revision == setup_data['skip_revision']:
        if index == setup_data['skip_index']:
            continue

        sample_data = {}

        if index == setup_data['test_fail_index']:
            #Set up test run values to fail ttest
            data = [50000, 60000, 70000]

            sample_data = TestData(
                perftest_data(
                    testrun={'date': setup_data['sample_dates'][revision]},
                    test_build={
                        'revision': revision,
                        'branch': branch
                    },
                    results={
                        'one.com': data,
                        'two.com': data,
                        'three.com': data
                    }))
            setup_data['fail_revision'] = revision

        else:
            sample_data = TestData(
                perftest_data(
                    testrun={'date': setup_data['sample_dates'][revision]},
                    test_build={
                        'revision': revision,
                        'branch': branch
                    },
                ))

        if not setup_data['testsuite_name']:
            setup_data['testsuite_name'] = sample_data['testrun']['suite']

        if load_objects:
            ptm.store_test_data(json.dumps(sample_data))
            test_run_ids = ptm.process_objects(2)
            compute_test_run_metrics(ptm.project, plm.project, False, 3,
                                     test_run_ids)

            setup_data['test_run_ids'][revision] = test_run_ids

        else:
            #Load sample data
            ptm.load_test_data(sample_data)

    revision_count = len(setup_data['sample_revisions'])

    setup_data['target_revision_index'] = revision_count - 1

    return setup_data