def test_claim_objects(ptm):
    """``claim_objects`` claims & returns unclaimed rows up to a limit."""
    blobs = [
        perftest_json(testrun={"date": "1330454755"}),
        perftest_json(testrun={"date": "1330454756"}),
        perftest_json(testrun={"date": "1330454757"}),
    ]

    for blob in blobs:
        ptm.store_test_data(blob)

    rows1 = ptm.claim_objects(2)

    # a separate worker with a separate connection
    from datazilla.model import PerformanceTestModel
    dm2 = PerformanceTestModel(ptm.project)

    rows2 = dm2.claim_objects(2)

    loading_rows = ptm.sources["objectstore"].dhub.execute(
        proc="objectstore_test.counts.loading")[0]["loading_count"]

    assert len(rows1) == 2
    # second worker asked for two rows but only got one that was left
    assert len(rows2) == 1

    # all three blobs were fetched by one of the workers
    assert set([r["json_blob"] for r in rows1 + rows2]) == set(blobs)

    # the blobs are all marked as "loading" in the database
    assert loading_rows == 3
Esempio n. 2
0
def set_test_data(request, project=""):
    """
    Post a JSON blob of data for the specified project.

    Store the JSON in the objectstore where it will be held for
    later processing.

    """
    #####
    #This conditional provides backwords compatibility with
    #the talos production environment.  It should
    #be removed after the production environment
    #is uniformaly using the new url format.
    ####
    if project == 'views':
        project = 'talos'

    # default to bad request if the JSON is malformed or not present
    status = 400

    try:
        json_data = request.POST['data']
    except KeyError:
        result = {"status":"No POST data found"}
    else:
        unquoted_json_data = urllib.unquote(json_data)

        error = None

        try:
            json.loads( unquoted_json_data )
        except ValueError as e:
            error = "Malformed JSON: {0}".format(e.message)
            result = {"status": "Malformed JSON", "message": error}
        else:
            result = {
                "status": "well-formed JSON stored",
                "size": len(unquoted_json_data),
            }

        try:
            dm = PerformanceTestModel(project)
            id = dm.store_test_data(unquoted_json_data, error)
            dm.disconnect()
        except Exception as e:
            status = 500
            result = {"status": "Unknown error", "message": str(e)}
        else:

            location = "/{0}/refdata/objectstore/json_blob/{1}".format(
                project, str(id)
                )

            result['url'] = request.build_absolute_uri(location)

            if not error:
                status = 200


    return HttpResponse(json.dumps(result), mimetype=APP_JS, status=status)
Esempio n. 3
0
def graphs(request, project=""):

    ####
    #Load any signals provided in the page
    ####
    signals = []
    time_ranges = utils.get_time_ranges()

    for s in SIGNALS:
        if s in request.POST:
            signals.append( { 'value':urllib.unquote( request.POST[s] ),
                              'name':s } )
    ###
    #Get reference data
    ###
    ptm = PerformanceTestModel(project)
    json_data = ptm.get_test_reference_data()
    ptm.disconnect()

    time_key = 'days_30'

    data = { 'time_key':time_key,
             'reference_json':json_data,
             'signals':signals }

    ####
    #Caller has provided the view parent of the signals, load in page.
    #This occurs when a data view is in its Pane form and is detached
    #to exist on it's own page.
    ####
    parent_index_key = 'dv_parent_dview_index'
    if parent_index_key in request.POST:
        data[parent_index_key] = request.POST[parent_index_key]

    return render_to_response('graphs.views.html', data)
Esempio n. 4
0
    def handle_project(self, project, **options):

        debug = options.get("debug", None)
        max_iterations = int(options.get("iterations", 50))

        ptm = PerformanceTestModel(project)

        sql_targets = {}

        cycle_iterations = max_iterations

        while cycle_iterations > 0:

            sql_targets = ptm.cycle_data(sql_targets)

            if debug:
                print "Iterations: {0}".format(str(cycle_iterations))
                print "sql_targets"
                print sql_targets

            cycle_iterations -= 1

            if sql_targets['total_count'] == 0:
                cycle_iterations = 0

        ptm.disconnect()
def test_claim_objects(ptm):
    """``claim_objects`` claims & returns unclaimed rows up to a limit."""
    blobs = [
        perftest_json(testrun={"date": "1330454755"}),
        perftest_json(testrun={"date": "1330454756"}),
        perftest_json(testrun={"date": "1330454757"}),
        ]

    for blob in blobs:
        ptm.store_test_data(blob)

    rows1 = ptm.claim_objects(2)

    # a separate worker with a separate connection
    from datazilla.model import PerformanceTestModel
    dm2 = PerformanceTestModel(ptm.project)

    rows2 = dm2.claim_objects(2)

    loading_rows = ptm.sources["objectstore"].dhub.execute(
        proc="objectstore_test.counts.loading")[0]["loading_count"]

    assert len(rows1) == 2
    # second worker asked for two rows but only got one that was left
    assert len(rows2) == 1

    # all three blobs were fetched by one of the workers
    assert set([r["json_blob"] for r in rows1 + rows2]) == set(blobs)

    # the blobs are all marked as "loading" in the database
    assert loading_rows == 3
Esempio n. 6
0
    def handle_project(self, project, **options):

        debug = options.get("debug", None)
        max_iterations = int(options.get("iterations", 50))

        ptm = PerformanceTestModel(project)

        sql_targets = {}

        cycle_iterations = max_iterations

        while cycle_iterations > 0:

            sql_targets = ptm.cycle_data(sql_targets)

            if debug:
                print "Iterations: {0}".format(str(cycle_iterations))
                print "sql_targets"
                print sql_targets

            cycle_iterations -= 1

            if sql_targets['total_count'] == 0:
                cycle_iterations = 0

        ptm.disconnect()
Esempio n. 7
0
def load_test_collection(project):

    ptm = PerformanceTestModel(project)

    products = ptm.get_products('id')

    product_names = {}

    for id in products:

        if products[ id ]['product'] and \
           products[ id ]['version'] and \
           products[ id ]['branch']:

            name = get_test_collection_name(
                products[ id ]['product'],
                products[ id ]['version'],
                products[ id ]['branch']
                )

            product_names[name] = id

    test_collection_names = ptm.get_test_collection_set()

    new_name_set = set(
        product_names.keys()
        ).difference( test_collection_names )

    for new_name in new_name_set:
        id = ptm.set_test_collection(new_name, "")
        ptm.set_test_collection_map(id, product_names[ new_name ])

    ptm.cache_ref_data()

    ptm.disconnect()
Esempio n. 8
0
    def handle_project(self, project, options):
        self.stdout.write("Processing project {0}\n".format(project))

        loadlimit = int(options.get("loadlimit", 1))

        dm = PerformanceTestModel(project)
        dm.process_objects(loadlimit)
        dm.disconnect()
Esempio n. 9
0
def cache_test_summaries(project):

    ptm = PerformanceTestModel(project)

    ###
    #New reference data could be found in the cached data
    #summary structures. Update the reference data cached
    #every time the sumary data is cached.
    ###
    ptm.cache_ref_data()
    ptm.cache_default_project()

    data_iter = ptm.get_all_summary_cache()

    key_test = []
    for d in data_iter:
        for data in d:
            key = utils.get_summary_cache_key(
                project,
                data['item_id'],
                data['item_data'],
            )
            rv = cache.set(key, zlib.compress(data['value']))
            if not rv:
                msg = "ERROR: Failed to store object in memcache: %s, %s\n" % \
                        ( str(data['item_id']), data['item_data'] )
                sys.stderr.write(msg)

    ptm.disconnect()
Esempio n. 10
0
def dataview(request, project="", method=""):

    proc_path = "perftest.views."

    ##Full proc name including base path in json file##
    full_proc_path = "%s%s" % (proc_path, method)

    if settings.DEBUG:
        ###
        #Write IP address and datetime to log
        ###
        print "Client IP:%s" % (request.META['REMOTE_ADDR'])
        print "Request Datetime:%s" % (str(datetime.datetime.now()))

    json = ""
    if method in DATAVIEW_ADAPTERS:
        dm = PerformanceTestModel(project)
        pt_dhub = dm.sources["perftest"].dhub

        if 'adapter' in DATAVIEW_ADAPTERS[method]:
            json = DATAVIEW_ADAPTERS[method]['adapter'](project,
                                                        method,
                                                        request,
                                                        dm)
        else:
            if 'fields' in DATAVIEW_ADAPTERS[method]:
                fields = []
                for f in DATAVIEW_ADAPTERS[method]['fields']:
                    if f in request.GET:
                        fields.append( int( request.GET[f] ) )

                if len(fields) == len(DATAVIEW_ADAPTERS[method]['fields']):
                    json = pt_dhub.execute(proc=full_proc_path,
                                           debug_show=settings.DEBUG,
                                           placeholders=fields,
                                           return_type='table_json')

                else:
                    json = '{ "error":"{0} fields required, {1} provided" }'.format(
                        (str(len(DATAVIEW_ADAPTERS[method]['fields'])),
                        str(len(fields))) )

            else:

                json = pt_dhub.execute(proc=full_proc_path,
                                       debug_show=settings.DEBUG,
                                       return_type='table_json')

        dm.disconnect();

    else:
        json = '{ "error":"Data view name %s not recognized" }' % method

    return HttpResponse(json, mimetype=APP_JS)
Esempio n. 11
0
    def _wrap_oauth(request, *args, **kwargs):
        project = kwargs.get('project', None)

        ###
        # Until the production environment for talos can make use of
        # OAuth or use API/Keys we need to bypass OAuth to injest data.
        # This needs to be removed as soon as talos can support OAuth.
        ###
        if project in ['talos', 'views']:
            return func(request, *args, **kwargs)

        dm = PerformanceTestModel(project)

        #Get the consumer key
        key = request.REQUEST.get('oauth_consumer_key', None)

        if key is None:
            result = {"status": "No OAuth credentials provided."}
            return HttpResponse(json.dumps(result),
                                content_type=APP_JS,
                                status=403)

        try:
            #Get the consumer secret stored with this key
            ds_consumer_secret = dm.get_oauth_consumer_secret(key)
        except DatasetNotFoundError:
            result = {"status": "Unknown project '%s'" % project}
            return HttpResponse(json.dumps(result),
                                content_type=APP_JS,
                                status=404)

        #Construct the OAuth request based on the django request object
        req_obj = oauth.Request(request.method, request.build_absolute_uri(),
                                request.REQUEST, '', False)

        server = oauth.Server()

        #Get the consumer object
        cons_obj = oauth.Consumer(key, ds_consumer_secret)

        #Set the signature method
        server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())

        try:
            #verify oauth django request and consumer object match
            server.verify_request(req_obj, cons_obj, None)
        except oauth.Error:
            status = 403
            result = {"status": "Oauth verification error."}
            return HttpResponse(json.dumps(result),
                                content_type=APP_JS,
                                status=status)

        return func(request, *args, **kwargs)
Esempio n. 12
0
    def _wrap_oauth(request, *args, **kwargs):
        project = kwargs.get('project', None)

        ###
        # Until the production environment for talos can make use of
        # OAuth or use API/Keys we need to bypass OAuth to injest data.
        # This needs to be removed as soon as talos can support OAuth.
        ###
        if project in ['talos', 'views']:
            return func(request, *args, **kwargs)

        dm = PerformanceTestModel(project)

        #Get the consumer key
        key = request.REQUEST.get('oauth_consumer_key', None)

        if key is None:
            result = {"status": "No OAuth credentials provided."}
            return HttpResponse(
                json.dumps(result), content_type=APP_JS, status=403)

        try:
            #Get the consumer secret stored with this key
            ds_consumer_secret = dm.get_oauth_consumer_secret(key)
        except DatasetNotFoundError:
            result = {"status": "Unknown project '%s'" % project}
            return HttpResponse(
                json.dumps(result), content_type=APP_JS, status=404)

        #Construct the OAuth request based on the django request object
        req_obj = oauth.Request(request.method,
                                request.build_absolute_uri(),
                                request.REQUEST,
                                '',
                                False)

        server = oauth.Server()

        #Get the consumer object
        cons_obj = oauth.Consumer(key, ds_consumer_secret)

        #Set the signature method
        server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())

        try:
            #verify oauth django request and consumer object match
            server.verify_request(req_obj, cons_obj, None)
        except oauth.Error:
            status = 403
            result = {"status": "Oauth verification error."}
            return HttpResponse(
                json.dumps(result), content_type=APP_JS, status=status)

        return func(request, *args, **kwargs)
Esempio n. 13
0
def dataview(request, project="", method=""):

    proc_path = "perftest.views."

    ##Full proc name including base path in json file##
    full_proc_path = "%s%s" % (proc_path, method)

    if settings.DEBUG:
        ###
        #Write IP address and datetime to log
        ###
        print "Client IP:%s" % (request.META['REMOTE_ADDR'])
        print "Request Datetime:%s" % (str(datetime.datetime.now()))

    json = ""
    if method in DATAVIEW_ADAPTERS:
        dm = PerformanceTestModel(project)
        pt_dhub = dm.sources["perftest"].dhub

        if 'adapter' in DATAVIEW_ADAPTERS[method]:
            json = DATAVIEW_ADAPTERS[method]['adapter'](project, method,
                                                        request, dm)
        else:
            if 'fields' in DATAVIEW_ADAPTERS[method]:
                fields = []
                for f in DATAVIEW_ADAPTERS[method]['fields']:
                    if f in request.GET:
                        fields.append(int(request.GET[f]))

                if len(fields) == len(DATAVIEW_ADAPTERS[method]['fields']):
                    json = pt_dhub.execute(proc=full_proc_path,
                                           debug_show=settings.DEBUG,
                                           placeholders=fields,
                                           return_type='table_json')

                else:
                    json = '{ "error":"{0} fields required, {1} provided" }'.format(
                        (str(len(DATAVIEW_ADAPTERS[method]['fields'])),
                         str(len(fields))))

            else:

                json = pt_dhub.execute(proc=full_proc_path,
                                       debug_show=settings.DEBUG,
                                       return_type='table_json')

        dm.disconnect()

    else:
        json = '{ "error":"Data view name %s not recognized" }' % method

    return HttpResponse(json, mimetype=APP_JS)
Esempio n. 14
0
def cache_test_summaries(project):

    ptm = PerformanceTestModel(project)

    ###
    #New reference data could be found in the cached data
    #summary structures. Update the reference data cached 
    #every time the sumary data is cached.
    ###
    ptm.cache_ref_data()
    ptm.cache_default_project()

    data_iter = ptm.get_all_summary_cache()

    for d in data_iter:
        for data in d:
            key = utils.get_summary_cache_key(
                project,
                data['item_id'],
                data['item_data'],
                )

            rv = cache.set(key, zlib.compress( data['value'] ))
            if not rv:
                msg = "ERROR: Failed to store object in memcache: %s, %s\n" % \
                        ( str(data['item_id']), data['item_data'] )
                sys.stderr.write(msg)

    ptm.disconnect()
Esempio n. 15
0
def set_default_products(project):

    ptm = PerformanceTestModel(project)

    products = ptm.get_products('id')

    versions = [products[id]['version'] for id in products]

    #sort version strings
    versions.sort(key=lambda s: map(numeric_prefix, s.split('.')),
                  reverse=True)

    if versions:

        current_version = versions[0]

        default_count = 0
        for id in products:
            default = 0
            if current_version == products[id]['version']:
                default_count += 1
                #Don't load more than 10 datasets by default
                if default_count <= 10:
                    default = 1

            ptm.set_default_product(id, default)

        ptm.cache_default_project()
Esempio n. 16
0
def pytest_sessionstart(session):
    """
    Set up the test environment.

    Set DJANGO_SETTINGS_MODULE, adds the vendor lib, and sets up a test
    database.

    """
    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "datazilla.settings.base")
    add_vendor_lib()

    from django.conf import settings
    from django.test.simple import DjangoTestSuiteRunner
    # we don't actually let Django run the tests, but we need to use some
    # methods of its runner for setup/teardown of dbs and some other things
    session.django_runner = DjangoTestSuiteRunner()
    # this provides templates-rendered debugging info and locmem mail storage
    session.django_runner.setup_test_environment()
    # support custom db prefix for tests for the main datazilla datasource
    # as well as for the testproj and testpushlog dbs
    prefix = getattr(settings, "TEST_DB_PREFIX", "")
    settings.DATABASES["default"]["TEST_NAME"] = "{0}test_datazilla".format(prefix)
    # this sets up a clean test-only database
    session.django_db_config = session.django_runner.setup_databases()
    # store the name of the test project/pushlog based on user custom settings
    session.perftest_name = "{0}testproj".format(prefix)
    session.pushlog_name = "{0}testpushlog".format(prefix)

    increment_cache_key_prefix()

    from datazilla.model import PerformanceTestModel, PushLogModel
    ptm = PerformanceTestModel.create(
        session.perftest_name,
        cron_batch="small",
        )
    PushLogModel.create(project=session.pushlog_name)

    # patch in additional test-only procs on the datasources
    objstore = ptm.sources["objectstore"]
    del objstore.dhub.procs[objstore.datasource.key]
    objstore.dhub.data_sources[objstore.datasource.key]["procs"].append(
        os.path.join(
            os.path.abspath(os.path.dirname(__file__)),
            "objectstore_test.json",
            )
        )
    objstore.dhub.load_procs(objstore.datasource.key)

    perftest = ptm.sources["perftest"]
    del perftest.dhub.procs[perftest.datasource.key]
    perftest.dhub.data_sources[perftest.datasource.key]["procs"].append(
        os.path.join(
            os.path.abspath(os.path.dirname(__file__)),
            "perftest_test.json",
            )
        )
    perftest.dhub.load_procs(perftest.datasource.key)
Esempio n. 17
0
    def handle_project(self, project, **options):

        self.stdout.write("Processing project {0}\n".format(project))

        pushlog_project = options.get("pushlog_project", 'pushlog')
        loadlimit = int(options.get("loadlimit", 1))
        debug = options.get("debug", None)

        test_run_ids = []
        ptm = PerformanceTestModel(project)
        test_run_ids = ptm.process_objects(loadlimit)
        ptm.disconnect()

        """
        metrics_exclude_projects = set(['b2g', 'games', 'jetperf', 'marketapps', 'microperf', 'stoneridge', 'test', 'webpagetest'])
        if project not in metrics_exclude_projects:
            #minimum required number of replicates for
            #metrics processing
            replicate_min = 5
            compute_test_run_metrics(
                project, pushlog_project, debug, replicate_min, test_run_ids
                )
        """

        mtm = MetricsTestModel(project)
        revisions_without_push_data = mtm.load_test_data_all_dimensions(
            test_run_ids)

        if revisions_without_push_data:

            revision_nodes = {}
            plm = PushLogModel(pushlog_project)

            for revision in revisions_without_push_data:

                node = plm.get_node_from_revision(
                    revision, revisions_without_push_data[revision])

                revision_nodes[revision] = node

            plm.disconnect()
            mtm.set_push_data_all_dimensions(revision_nodes)

        mtm.disconnect()
Esempio n. 18
0
def pytest_sessionstart(session):
    """
    Set up the test environment.

    Set DJANGO_SETTINGS_MODULE, adds the vendor lib, and sets up a test
    database.

    """
    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "datazilla.settings.base")
    add_vendor_lib()

    from django.conf import settings
    from django.test.simple import DjangoTestSuiteRunner
    # we don't actually let Django run the tests, but we need to use some
    # methods of its runner for setup/teardown of dbs and some other things
    session.django_runner = DjangoTestSuiteRunner()
    # this provides templates-rendered debugging info and locmem mail storage
    session.django_runner.setup_test_environment()
    # support custom db prefix for tests for the main datazilla datasource
    # as well as for the testproj and testpushlog dbs
    prefix = getattr(settings, "TEST_DB_PREFIX", "")
    settings.DATABASES["default"]["TEST_NAME"] = "{0}test_datazilla".format(
        prefix)
    # this sets up a clean test-only database
    session.django_db_config = session.django_runner.setup_databases()
    # store the name of the test project/pushlog based on user custom settings
    session.perftest_name = "{0}testproj".format(prefix)
    session.pushlog_name = "{0}testpushlog".format(prefix)

    increment_cache_key_prefix()

    from datazilla.model import PerformanceTestModel, PushLogModel
    ptm = PerformanceTestModel.create(
        session.perftest_name,
        cron_batch="small",
    )
    PushLogModel.create(project=session.pushlog_name)

    # patch in additional test-only procs on the datasources
    objstore = ptm.sources["objectstore"]
    del objstore.dhub.procs[objstore.datasource.key]
    objstore.dhub.data_sources[objstore.datasource.key]["procs"].append(
        os.path.join(
            os.path.abspath(os.path.dirname(__file__)),
            "objectstore_test.json",
        ))
    objstore.dhub.load_procs(objstore.datasource.key)

    perftest = ptm.sources["perftest"]
    del perftest.dhub.procs[perftest.datasource.key]
    perftest.dhub.data_sources[perftest.datasource.key]["procs"].append(
        os.path.join(
            os.path.abspath(os.path.dirname(__file__)),
            "perftest_test.json",
        ))
    perftest.dhub.load_procs(perftest.datasource.key)
Esempio n. 19
0
def pytest_funcarg__ptm(request):
    """
    Give a test access to a PerformanceTestModel instance.

    """
    from datazilla.model import PerformanceTestModel

    ptm = PerformanceTestModel(request._pyfuncitem.session.perftest_name)
    request.addfinalizer(partial(truncate, ptm, ["metric", "metric_value"]))

    return ptm
Esempio n. 20
0
    def handle_project(self, project, **options):
        self.stdout.write("Processing project {0}\n".format(project))

        pushlog_project = options.get("pushlog_project", 'pushlog')
        loadlimit = int(options.get("loadlimit", 1))
        debug = options.get("debug", None)

        test_run_ids = []
        ptm = PerformanceTestModel(project)
        test_run_ids = ptm.process_objects(loadlimit)
        ptm.disconnect()

        metrics_exclude_projects = set(['b2g', 'stoneridge'])

        if project not in metrics_exclude_projects:
            #minimum required number of replicates for
            #metrics processing
            replicate_min = 5
            compute_test_run_metrics(
                project, pushlog_project, debug, replicate_min, test_run_ids
                )
Esempio n. 21
0
def set_default_products(project):

    ptm = PerformanceTestModel(project)

    products = ptm.get_products('id')

    versions = [ products[id]['version'] for id in products ]

    #sort version strings
    versions.sort(
        key=lambda s: map(numeric_prefix, s.split('.')), reverse=True
        )

    if versions:

        current_version = versions[0]

        default_count = 0
        for id in products:
            default = 0
            if current_version == products[id]['version']:
                default_count += 1
                #Don't load more than 10 datasets by default
                if default_count <= 10:
                    default = 1

            ptm.set_default_product(id, default)


        ptm.cache_default_project()
Esempio n. 22
0
def pytest_sessionfinish(session):
    """Tear down the test environment, including databases."""
    print("\n")

    from django.conf import settings
    from datazilla.model import PerformanceTestModel, PushLogModel
    import MySQLdb

    source_list = PerformanceTestModel(session.perftest_name).sources.values()
    source_list.extend(PushLogModel(project=session.pushlog_name).sources.values())
    for sds in source_list:
        conn = MySQLdb.connect(
            host=sds.datasource.host,
            user=settings.DATAZILLA_DATABASE_USER,
            passwd=settings.DATAZILLA_DATABASE_PASSWORD,
            )
        cur = conn.cursor()
        cur.execute("DROP DATABASE {0}".format(sds.datasource.name))
        conn.close()

    session.django_runner.teardown_databases(session.django_db_config)
    session.django_runner.teardown_test_environment()
Esempio n. 23
0
def graphs(request, project=""):

    ####
    #Load any signals provided in the page
    ####
    signals = []
    time_ranges = utils.get_time_ranges()

    for s in SIGNALS:
        if s in request.POST:
            signals.append({
                'value': urllib.unquote(request.POST[s]),
                'name': s
            })
    ###
    #Get reference data
    ###
    ptm = PerformanceTestModel(project)
    json_data = ptm.get_test_reference_data()
    ptm.disconnect()

    time_key = 'days_30'

    data = {
        'time_key': time_key,
        'reference_json': json_data,
        'signals': signals
    }

    ####
    #Caller has provided the view parent of the signals, load in page.
    #This occurs when a data view is in its Pane form and is detached
    #to exist on it's own page.
    ####
    parent_index_key = 'dv_parent_dview_index'
    if parent_index_key in request.POST:
        data[parent_index_key] = request.POST[parent_index_key]

    return render_to_response('graphs.views.html', data)
Esempio n. 24
0
def pytest_sessionfinish(session):
    """Tear down the test environment, including databases."""
    print("\n")

    from django.conf import settings
    from datazilla.model import PerformanceTestModel, PushLogModel
    import MySQLdb

    source_list = PerformanceTestModel(session.perftest_name).sources.values()
    source_list.extend(
        PushLogModel(project=session.pushlog_name).sources.values())

    for sds in source_list:
        conn = MySQLdb.connect(
            host=sds.datasource.host,
            user=settings.DATAZILLA_DATABASE_USER,
            passwd=settings.DATAZILLA_DATABASE_PASSWORD,
        )
        cur = conn.cursor()
        cur.execute("DROP DATABASE {0}".format(sds.datasource.name))
        conn.close()

    session.django_runner.teardown_databases(session.django_db_config)
    session.django_runner.teardown_test_environment()
Esempio n. 25
0
def pytest_runtest_teardown(item):
    """
    Per-test teardown.

    Roll back the Django ORM transaction and truncates tables in the
    test PerformanceTestModel database.

    """
    from django.test.testcases import restore_transaction_methods
    from django.db import transaction
    from datazilla.model import PerformanceTestModel

    restore_transaction_methods()
    transaction.rollback()
    transaction.leave_transaction_management()

    ptm = PerformanceTestModel(item.session.perftest_name)
    truncate(ptm, set(['metric', 'metric_value']))
Esempio n. 26
0
def build_test_summaries(project):

    ptm = PerformanceTestModel(project)

    time_ranges = utils.get_time_ranges()

    products = ptm.get_products()

    for product_name in products:

        for tr in ['days_7', 'days_30']:

            table = ptm.get_test_run_summary(str(time_ranges[tr]['start']),
                                             str(time_ranges[tr]['stop']),
                                             [products[product_name]], [], [])

            json_data = json.dumps(table)
            ptm.set_summary_cache(products[product_name], tr, json_data)

    ptm.disconnect()
    def handle_project(self, project, **options):
        """ Create databases for a new project based on the options value. """

        cron_batch = options.get("cron_batch")

        hosts = dict(
            perftest=options.get("perftest_host"),
            objectstore=options.get("objectstore_host"),
        )

        types = dict(
            perftest=options.get("perftest_type"),
            objectstore=options.get("objectstore_type"),
        )

        dm = PerformanceTestModel.create(
            project,
            hosts=hosts,
            types=types,
            cron_batch=cron_batch,
        )
        self.stdout.write("Perftest project created: {0}\n".format(project))
        dm.disconnect()
    def handle_project(self, project, **options):
        """ Create databases for a new project based on the options value. """

        cron_batch = options.get("cron_batch")

        hosts = dict(
            perftest=options.get("perftest_host"),
            objectstore=options.get("objectstore_host"),
            )

        types = dict(
            perftest=options.get("perftest_type"),
            objectstore=options.get("objectstore_type"),
            )

        dm = PerformanceTestModel.create(
            project,
            hosts=hosts,
            types=types,
            cron_batch=cron_batch,
            )
        self.stdout.write("Perftest project created: {0}\n".format(project))
        dm.disconnect()
Esempio n. 29
0
def build_test_summaries(project):

    ptm = PerformanceTestModel(project)

    time_ranges = utils.get_time_ranges()

    products = ptm.get_products()

    for product_name in products:

        for tr in ['days_7', 'days_30']:

            table = ptm.get_test_run_summary(str( time_ranges[tr]['start']),
                                         str( time_ranges[tr]['stop']),
                                         [ products[ product_name ] ],
                                         [],
                                         [])

            json_data = json.dumps( table )
            ptm.set_summary_cache( products[ product_name ], tr, json_data )

    ptm.disconnect()
Esempio n. 30
0
 def get_ptm(self, product):
     product = product.lower()
     if not product in self.models:
         self.models[product] = PerformanceTestModel(product)
     return self.models[product]
Esempio n. 31
0
    def handle(self, *args, **options):
        """ Transfer data to a development project based on the args value. """

        host = options.get("host")
        dev_project = options.get("dev_project")
        prod_project = options.get("prod_project")
        branch = options.get("branch")
        days_ago = options.get("days_ago")
        logfile = options.get("logfile")

        if not host:
            self.println("You must supply a host name to retrieve data from " +
                         "--host hostname")
            return

        if not dev_project:
            self.println("You must supply a dev_project name to load data in.")
            return

        if not branch:
            self.println("You must supply a branch name to retrieve data for.")
            return

        #Set timeout so we don't hang
        timeout = 120
        socket.setdefaulttimeout(timeout)

        revisions_uri = 'refdata/pushlog/list'
        params = 'days_ago={0}&branches={1}'.format(days_ago, branch)
        url = "https://{0}/{1}?{2}".format(host, revisions_uri, params)

        json_data = ""

        #Retrieve revisions to iterate over
        try:
            response = urllib.urlopen(url)
            json_data = response.read()
        except socket.timeout:
            self.stdout.write("URL: {0}\nTimedout {1} seconds\n".format(
                url, timeout))
            sys.exit(0)

        data = json.loads(json_data)
        all_keys = data.keys()
        all_keys.sort()

        ##Keep track of revision already loaded##
        file_obj = open(logfile, 'w+')
        revisions_seen = set()
        for line in file_obj.readlines():
            revisions_seen.add(line.strip())

        revisions = []

        for key in all_keys:
            for revision in data[key]['revisions']:
                if revision not in revisions_seen:
                    revisions.append(revision)

        dm = PerformanceTestModel(dev_project)

        for revision in revisions:

            rawdata_uri = '{0}/testdata/raw/{1}/{2}/'.format(
                prod_project, branch, revision)
            rawdata_url = "https://{0}/{1}".format(host, rawdata_uri)

            raw_json_data = ""

            try:
                rawdata_response = urllib.urlopen(rawdata_url)
                raw_json_data = rawdata_response.read()
            except socket.timeout:
                self.stdout.write("URL: {0}\nTimedout {1} seconds\n".format(
                    rawdata_url, timeout))
                sys.exit(0)

            test_object_data = json.loads(raw_json_data)

            for test_object in test_object_data:
                id = dm.store_test_data(json.dumps(test_object), "")
                self.stdout.write("Revision:{0} Id:{1}\n".format(
                    revision, str(id)))

            #Record the revision as loaded
            file_obj.write(revision + "\n")

        file_obj.close()
        dm.disconnect()
    def handle(self, *args, **options):
        """ Transfer data to a development project based on the args value. """

        host = options.get("host")
        dev_project = options.get("dev_project")
        prod_project = options.get("prod_project")
        branch = options.get("branch")
        days_ago = options.get("days_ago")
        logfile = options.get("logfile")

        if not host:
            self.println("You must supply a host name to retrieve data from " +
                     "--host hostname")
            return

        if not dev_project:
            self.println("You must supply a dev_project name to load data in.")
            return

        if not branch:
            self.println("You must supply a branch name to retrieve data for.")
            return

        #Set timeout so we don't hang
        timeout = 120
        socket.setdefaulttimeout(timeout)

        revisions_uri = 'refdata/pushlog/list'
        params = 'days_ago={0}&branches={1}'.format(days_ago, branch)
        url = "https://{0}/{1}?{2}".format(host, revisions_uri, params)

        json_data = ""

        #Retrieve revisions to iterate over
        try:
            response = urllib.urlopen(url)
            json_data = response.read()
        except socket.timeout:
            self.stdout.write( "URL: {0}\nTimedout {1} seconds\n".format(
                url, timeout
                ) )
            sys.exit(0)

        data = json.loads(json_data)
        all_keys = data.keys()
        all_keys.sort()

        ##Keep track of revision already loaded##
        file_obj = open(logfile, 'w+')
        revisions_seen = set()
        for line in file_obj.readlines():
            revisions_seen.add(line.strip())

        revisions = []

        for key in all_keys:
            for revision in data[key]['revisions']:
                if revision not in revisions_seen:
                    revisions.append(revision)

        dm = PerformanceTestModel(dev_project)

        for revision in revisions:

            rawdata_uri = '{0}/testdata/raw/{1}/{2}/'.format(
                prod_project, branch, revision
                )
            rawdata_url = "https://{0}/{1}".format(host, rawdata_uri)

            raw_json_data = ""

            try:
                rawdata_response = urllib.urlopen(rawdata_url)
                raw_json_data = rawdata_response.read()
            except socket.timeout:
                self.stdout.write( "URL: {0}\nTimedout {1} seconds\n".format(
                    rawdata_url, timeout) )
                sys.exit(0)

            test_object_data = json.loads(raw_json_data)

            for test_object in test_object_data:
                id = dm.store_test_data( json.dumps(test_object), "" )
                self.stdout.write( "Revision:{0} Id:{1}\n".format(revision, str(id)))

            #Record the revision as loaded
            file_obj.write(revision + "\n")

        file_obj.close()
        dm.disconnect()
Esempio n. 33
0
def load_test_collection(project):

    ptm = PerformanceTestModel(project)

    products = ptm.get_products('id')

    product_names = {}

    for id in products:

        if products[ id ]['product'] and \
           products[ id ]['version'] and \
           products[ id ]['branch']:

            name = get_test_collection_name(products[id]['product'],
                                            products[id]['version'],
                                            products[id]['branch'])

            product_names[name] = id

    test_collection_names = ptm.get_test_collection_set()

    new_name_set = set(product_names.keys()).difference(test_collection_names)

    for new_name in new_name_set:
        id = ptm.set_test_collection(new_name, "")
        ptm.set_test_collection_map(id, product_names[new_name])

    ptm.cache_ref_data()

    ptm.disconnect()
Esempio n. 34
0
def set_test_data(request, project=""):
    """
    Post a JSON blob of data for the specified project.

    Store the JSON in the objectstore where it will be held for
    later processing.

    """
    #####
    #This conditional provides backwords compatibility with
    #the talos production environment.  It should
    #be removed after the production environment
    #is uniformaly using the new url format.
    ####
    if project == 'views':
        project = 'talos'

    # default to bad request if the JSON is malformed or not present
    status = 400

    try:
        json_data = request.POST['data']
    except KeyError:
        result = {"status": "No POST data found"}
    else:
        unquoted_json_data = urllib.unquote(json_data)

        error = None
        deserialized_json = {}

        try:
            deserialized_json = json.loads(unquoted_json_data)
        except ValueError as e:
            error = "Malformed JSON: {0}".format(e.message)
            result = {"status": "Malformed JSON", "message": error}
        else:
            result = {
                "status": "well-formed JSON stored",
                "size": len(unquoted_json_data),
            }

        try:
            dm = PerformanceTestModel(project)

            dm.pre_process_data(unquoted_json_data, deserialized_json)

            id = dm.store_test_data(unquoted_json_data, error)

            dm.disconnect()
        except Exception as e:
            status = 500
            result = {"status": "Unknown error", "message": str(e)}
        else:

            location = "/{0}/refdata/objectstore/json_blob/{1}".format(
                project, str(id))

            result['url'] = request.build_absolute_uri(location)

            if not error:
                status = 200

    return HttpResponse(json.dumps(result), mimetype=APP_JS, status=status)