예제 #1
0
    def handle_project(self, project, **options):

        debug = options.get("debug", None)
        max_iterations = int(options.get("iterations", 50))

        ptm = PerformanceTestModel(project)

        sql_targets = {}

        cycle_iterations = max_iterations

        while cycle_iterations > 0:

            sql_targets = ptm.cycle_data(sql_targets)

            if debug:
                print "Iterations: {0}".format(str(cycle_iterations))
                print "sql_targets"
                print sql_targets

            cycle_iterations -= 1

            if sql_targets['total_count'] == 0:
                cycle_iterations = 0

        ptm.disconnect()
예제 #2
0
def cache_test_summaries(project):

    ptm = PerformanceTestModel(project)

    ###
    #New reference data could be found in the cached data
    #summary structures. Update the reference data cached
    #every time the sumary data is cached.
    ###
    ptm.cache_ref_data()
    ptm.cache_default_project()

    data_iter = ptm.get_all_summary_cache()

    key_test = []
    for d in data_iter:
        for data in d:
            key = utils.get_summary_cache_key(
                project,
                data['item_id'],
                data['item_data'],
            )
            rv = cache.set(key, zlib.compress(data['value']))
            if not rv:
                msg = "ERROR: Failed to store object in memcache: %s, %s\n" % \
                        ( str(data['item_id']), data['item_data'] )
                sys.stderr.write(msg)

    ptm.disconnect()
예제 #3
0
def set_default_products(project):

    ptm = PerformanceTestModel(project)

    products = ptm.get_products('id')

    versions = [products[id]['version'] for id in products]

    #sort version strings
    versions.sort(key=lambda s: map(numeric_prefix, s.split('.')),
                  reverse=True)

    if versions:

        current_version = versions[0]

        default_count = 0
        for id in products:
            default = 0
            if current_version == products[id]['version']:
                default_count += 1
                #Don't load more than 10 datasets by default
                if default_count <= 10:
                    default = 1

            ptm.set_default_product(id, default)

        ptm.cache_default_project()
def test_claim_objects(ptm):
    """``claim_objects`` claims & returns unclaimed rows up to a limit."""
    blobs = [
        perftest_json(testrun={"date": "1330454755"}),
        perftest_json(testrun={"date": "1330454756"}),
        perftest_json(testrun={"date": "1330454757"}),
    ]

    for blob in blobs:
        ptm.store_test_data(blob)

    rows1 = ptm.claim_objects(2)

    # a separate worker with a separate connection
    from datazilla.model import PerformanceTestModel
    dm2 = PerformanceTestModel(ptm.project)

    rows2 = dm2.claim_objects(2)

    loading_rows = ptm.sources["objectstore"].dhub.execute(
        proc="objectstore_test.counts.loading")[0]["loading_count"]

    assert len(rows1) == 2
    # second worker asked for two rows but only got one that was left
    assert len(rows2) == 1

    # all three blobs were fetched by one of the workers
    assert set([r["json_blob"] for r in rows1 + rows2]) == set(blobs)

    # the blobs are all marked as "loading" in the database
    assert loading_rows == 3
예제 #5
0
def load_test_collection(project):

    ptm = PerformanceTestModel(project)

    products = ptm.get_products('id')

    product_names = {}

    for id in products:

        if products[ id ]['product'] and \
           products[ id ]['version'] and \
           products[ id ]['branch']:

            name = get_test_collection_name(products[id]['product'],
                                            products[id]['version'],
                                            products[id]['branch'])

            product_names[name] = id

    test_collection_names = ptm.get_test_collection_set()

    new_name_set = set(product_names.keys()).difference(test_collection_names)

    for new_name in new_name_set:
        id = ptm.set_test_collection(new_name, "")
        ptm.set_test_collection_map(id, product_names[new_name])

    ptm.cache_ref_data()

    ptm.disconnect()
예제 #6
0
    def _wrap_oauth(request, *args, **kwargs):
        project = kwargs.get('project', None)

        ###
        # Until the production environment for talos can make use of
        # OAuth or use API/Keys we need to bypass OAuth to injest data.
        # This needs to be removed as soon as talos can support OAuth.
        ###
        if project in ['talos', 'views']:
            return func(request, *args, **kwargs)

        dm = PerformanceTestModel(project)

        #Get the consumer key
        key = request.REQUEST.get('oauth_consumer_key', None)

        if key is None:
            result = {"status": "No OAuth credentials provided."}
            return HttpResponse(json.dumps(result),
                                content_type=APP_JS,
                                status=403)

        try:
            #Get the consumer secret stored with this key
            ds_consumer_secret = dm.get_oauth_consumer_secret(key)
        except DatasetNotFoundError:
            result = {"status": "Unknown project '%s'" % project}
            return HttpResponse(json.dumps(result),
                                content_type=APP_JS,
                                status=404)

        #Construct the OAuth request based on the django request object
        req_obj = oauth.Request(request.method, request.build_absolute_uri(),
                                request.REQUEST, '', False)

        server = oauth.Server()

        #Get the consumer object
        cons_obj = oauth.Consumer(key, ds_consumer_secret)

        #Set the signature method
        server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())

        try:
            #verify oauth django request and consumer object match
            server.verify_request(req_obj, cons_obj, None)
        except oauth.Error:
            status = 403
            result = {"status": "Oauth verification error."}
            return HttpResponse(json.dumps(result),
                                content_type=APP_JS,
                                status=status)

        return func(request, *args, **kwargs)
예제 #7
0
def pytest_funcarg__ptm(request):
    """
    Give a test access to a PerformanceTestModel instance.

    """
    from datazilla.model import PerformanceTestModel

    ptm = PerformanceTestModel(request._pyfuncitem.session.perftest_name)
    request.addfinalizer(partial(truncate, ptm, ["metric", "metric_value"]))

    return ptm
예제 #8
0
def dataview(request, project="", method=""):

    proc_path = "perftest.views."

    ##Full proc name including base path in json file##
    full_proc_path = "%s%s" % (proc_path, method)

    if settings.DEBUG:
        ###
        #Write IP address and datetime to log
        ###
        print "Client IP:%s" % (request.META['REMOTE_ADDR'])
        print "Request Datetime:%s" % (str(datetime.datetime.now()))

    json = ""
    if method in DATAVIEW_ADAPTERS:
        dm = PerformanceTestModel(project)
        pt_dhub = dm.sources["perftest"].dhub

        if 'adapter' in DATAVIEW_ADAPTERS[method]:
            json = DATAVIEW_ADAPTERS[method]['adapter'](project, method,
                                                        request, dm)
        else:
            if 'fields' in DATAVIEW_ADAPTERS[method]:
                fields = []
                for f in DATAVIEW_ADAPTERS[method]['fields']:
                    if f in request.GET:
                        fields.append(int(request.GET[f]))

                if len(fields) == len(DATAVIEW_ADAPTERS[method]['fields']):
                    json = pt_dhub.execute(proc=full_proc_path,
                                           debug_show=settings.DEBUG,
                                           placeholders=fields,
                                           return_type='table_json')

                else:
                    json = '{ "error":"{0} fields required, {1} provided" }'.format(
                        (str(len(DATAVIEW_ADAPTERS[method]['fields'])),
                         str(len(fields))))

            else:

                json = pt_dhub.execute(proc=full_proc_path,
                                       debug_show=settings.DEBUG,
                                       return_type='table_json')

        dm.disconnect()

    else:
        json = '{ "error":"Data view name %s not recognized" }' % method

    return HttpResponse(json, mimetype=APP_JS)
예제 #9
0
def pytest_runtest_teardown(item):
    """
    Per-test teardown.

    Roll back the Django ORM transaction and truncates tables in the
    test PerformanceTestModel database.

    """
    from django.test.testcases import restore_transaction_methods
    from django.db import transaction
    from datazilla.model import PerformanceTestModel

    restore_transaction_methods()
    transaction.rollback()
    transaction.leave_transaction_management()

    ptm = PerformanceTestModel(item.session.perftest_name)
    truncate(ptm, set(['metric', 'metric_value']))
예제 #10
0
    def handle_project(self, project, **options):

        self.stdout.write("Processing project {0}\n".format(project))

        pushlog_project = options.get("pushlog_project", 'pushlog')
        loadlimit = int(options.get("loadlimit", 1))
        debug = options.get("debug", None)

        test_run_ids = []
        ptm = PerformanceTestModel(project)
        test_run_ids = ptm.process_objects(loadlimit)
        ptm.disconnect()

        """
        metrics_exclude_projects = set(['b2g', 'games', 'jetperf', 'marketapps', 'microperf', 'stoneridge', 'test', 'webpagetest'])
        if project not in metrics_exclude_projects:
            #minimum required number of replicates for
            #metrics processing
            replicate_min = 5
            compute_test_run_metrics(
                project, pushlog_project, debug, replicate_min, test_run_ids
                )
        """

        mtm = MetricsTestModel(project)
        revisions_without_push_data = mtm.load_test_data_all_dimensions(
            test_run_ids)

        if revisions_without_push_data:

            revision_nodes = {}
            plm = PushLogModel(pushlog_project)

            for revision in revisions_without_push_data:

                node = plm.get_node_from_revision(
                    revision, revisions_without_push_data[revision])

                revision_nodes[revision] = node

            plm.disconnect()
            mtm.set_push_data_all_dimensions(revision_nodes)

        mtm.disconnect()
예제 #11
0
def build_test_summaries(project):

    ptm = PerformanceTestModel(project)

    time_ranges = utils.get_time_ranges()

    products = ptm.get_products()

    for product_name in products:

        for tr in ['days_7', 'days_30']:

            table = ptm.get_test_run_summary(str(time_ranges[tr]['start']),
                                             str(time_ranges[tr]['stop']),
                                             [products[product_name]], [], [])

            json_data = json.dumps(table)
            ptm.set_summary_cache(products[product_name], tr, json_data)

    ptm.disconnect()
예제 #12
0
def graphs(request, project=""):

    ####
    #Load any signals provided in the page
    ####
    signals = []
    time_ranges = utils.get_time_ranges()

    for s in SIGNALS:
        if s in request.POST:
            signals.append({
                'value': urllib.unquote(request.POST[s]),
                'name': s
            })
    ###
    #Get reference data
    ###
    ptm = PerformanceTestModel(project)
    json_data = ptm.get_test_reference_data()
    ptm.disconnect()

    time_key = 'days_30'

    data = {
        'time_key': time_key,
        'reference_json': json_data,
        'signals': signals
    }

    ####
    #Caller has provided the view parent of the signals, load in page.
    #This occurs when a data view is in its Pane form and is detached
    #to exist on it's own page.
    ####
    parent_index_key = 'dv_parent_dview_index'
    if parent_index_key in request.POST:
        data[parent_index_key] = request.POST[parent_index_key]

    return render_to_response('graphs.views.html', data)
예제 #13
0
def pytest_sessionfinish(session):
    """Tear down the test environment, including databases."""
    print("\n")

    from django.conf import settings
    from datazilla.model import PerformanceTestModel, PushLogModel
    import MySQLdb

    source_list = PerformanceTestModel(session.perftest_name).sources.values()
    source_list.extend(
        PushLogModel(project=session.pushlog_name).sources.values())

    for sds in source_list:
        conn = MySQLdb.connect(
            host=sds.datasource.host,
            user=settings.DATAZILLA_DATABASE_USER,
            passwd=settings.DATAZILLA_DATABASE_PASSWORD,
        )
        cur = conn.cursor()
        cur.execute("DROP DATABASE {0}".format(sds.datasource.name))
        conn.close()

    session.django_runner.teardown_databases(session.django_db_config)
    session.django_runner.teardown_test_environment()
예제 #14
0
def set_test_data(request, project=""):
    """
    Post a JSON blob of data for the specified project.

    Store the JSON in the objectstore where it will be held for
    later processing.

    """
    #####
    #This conditional provides backwords compatibility with
    #the talos production environment.  It should
    #be removed after the production environment
    #is uniformaly using the new url format.
    ####
    if project == 'views':
        project = 'talos'

    # default to bad request if the JSON is malformed or not present
    status = 400

    try:
        json_data = request.POST['data']
    except KeyError:
        result = {"status": "No POST data found"}
    else:
        unquoted_json_data = urllib.unquote(json_data)

        error = None
        deserialized_json = {}

        try:
            deserialized_json = json.loads(unquoted_json_data)
        except ValueError as e:
            error = "Malformed JSON: {0}".format(e.message)
            result = {"status": "Malformed JSON", "message": error}
        else:
            result = {
                "status": "well-formed JSON stored",
                "size": len(unquoted_json_data),
            }

        try:
            dm = PerformanceTestModel(project)

            dm.pre_process_data(unquoted_json_data, deserialized_json)

            id = dm.store_test_data(unquoted_json_data, error)

            dm.disconnect()
        except Exception as e:
            status = 500
            result = {"status": "Unknown error", "message": str(e)}
        else:

            location = "/{0}/refdata/objectstore/json_blob/{1}".format(
                project, str(id))

            result['url'] = request.build_absolute_uri(location)

            if not error:
                status = 200

    return HttpResponse(json.dumps(result), mimetype=APP_JS, status=status)
예제 #15
0
    def handle(self, *args, **options):
        """ Transfer data to a development project based on the args value. """

        host = options.get("host")
        dev_project = options.get("dev_project")
        prod_project = options.get("prod_project")
        branch = options.get("branch")
        days_ago = options.get("days_ago")
        logfile = options.get("logfile")

        if not host:
            self.println("You must supply a host name to retrieve data from " +
                         "--host hostname")
            return

        if not dev_project:
            self.println("You must supply a dev_project name to load data in.")
            return

        if not branch:
            self.println("You must supply a branch name to retrieve data for.")
            return

        #Set timeout so we don't hang
        timeout = 120
        socket.setdefaulttimeout(timeout)

        revisions_uri = 'refdata/pushlog/list'
        params = 'days_ago={0}&branches={1}'.format(days_ago, branch)
        url = "https://{0}/{1}?{2}".format(host, revisions_uri, params)

        json_data = ""

        #Retrieve revisions to iterate over
        try:
            response = urllib.urlopen(url)
            json_data = response.read()
        except socket.timeout:
            self.stdout.write("URL: {0}\nTimedout {1} seconds\n".format(
                url, timeout))
            sys.exit(0)

        data = json.loads(json_data)
        all_keys = data.keys()
        all_keys.sort()

        ##Keep track of revision already loaded##
        file_obj = open(logfile, 'w+')
        revisions_seen = set()
        for line in file_obj.readlines():
            revisions_seen.add(line.strip())

        revisions = []

        for key in all_keys:
            for revision in data[key]['revisions']:
                if revision not in revisions_seen:
                    revisions.append(revision)

        dm = PerformanceTestModel(dev_project)

        for revision in revisions:

            rawdata_uri = '{0}/testdata/raw/{1}/{2}/'.format(
                prod_project, branch, revision)
            rawdata_url = "https://{0}/{1}".format(host, rawdata_uri)

            raw_json_data = ""

            try:
                rawdata_response = urllib.urlopen(rawdata_url)
                raw_json_data = rawdata_response.read()
            except socket.timeout:
                self.stdout.write("URL: {0}\nTimedout {1} seconds\n".format(
                    rawdata_url, timeout))
                sys.exit(0)

            test_object_data = json.loads(raw_json_data)

            for test_object in test_object_data:
                id = dm.store_test_data(json.dumps(test_object), "")
                self.stdout.write("Revision:{0} Id:{1}\n".format(
                    revision, str(id)))

            #Record the revision as loaded
            file_obj.write(revision + "\n")

        file_obj.close()
        dm.disconnect()
예제 #16
0
 def get_ptm(self, product):
     product = product.lower()
     if not product in self.models:
         self.models[product] = PerformanceTestModel(product)
     return self.models[product]