def getTestData(series, start_time, data_type):
    if not data_type:
        data_type = 'average'

    q = sa.select(
        [db.test_runs.id, db.test_runs.machine_id, db.builds.ref_build_id,
            db.test_runs.date_run, db.test_runs.average, db.test_runs.geomean,
            db.builds.ref_changeset, db.test_runs.run_number,
            db.builds.branch_id],
        sa.and_(
        db.test_runs.test_id == series.test_id,
        db.builds.branch_id == series.branch_id,
        db.machines.os_id == series.os_id,
        db.test_runs.machine_id == db.machines.id,
        db.test_runs.build_id == db.builds.id,
        db.test_runs.date_run > start_time,
        goodNameClause,
        ))

    data = []
    for row in q.execute():
        if row[data_type] is None:
            continue
        t = row.date_run
        d = PerfDatum(row.id, row.machine_id, row.date_run, row[data_type], row.ref_build_id, t, row.ref_changeset)
        d.run_number = row.run_number
        data.append(d)
    return data
Beispiel #2
0
def getTestData(series, start_time):
    q = sa.select([
        db.test_runs.id, db.test_runs.machine_id, db.builds.ref_build_id,
        db.test_runs.date_run, db.test_runs.average, db.builds.ref_changeset,
        db.test_runs.run_number, db.builds.branch_id
    ],
                  sa.and_(
                      db.test_runs.test_id == series.test_id,
                      db.builds.branch_id == series.branch_id,
                      db.machines.os_id == series.os_id,
                      db.test_runs.machine_id == db.machines.id,
                      db.test_runs.build_id == db.builds.id,
                      db.test_runs.date_run > start_time,
                      goodNameClause,
                      sa.not_(db.machines.name.like("%stage%")),
                  ))

    data = []
    for row in q.execute():
        if row.average is None:
            continue
        t = row.date_run
        d = PerfDatum(row.id, row.machine_id, row.date_run, row.average,
                      row.ref_build_id, t, row.ref_changeset)
        d.run_number = row.run_number
        data.append(d)
    return data
Beispiel #3
0
def getTestData(series, start_time):
    q = sa.select(
        [db.test_runs.id, db.test_runs.machine_id, db.builds.ref_build_id,
            db.test_runs.date_run, db.test_runs.average,
            db.builds.ref_changeset, db.test_runs.run_number,
            db.builds.branch_id],
        sa.and_(
        db.test_runs.test_id == series.test_id,
        db.builds.branch_id == series.branch_id,
        db.machines.os_id == series.os_id,
        db.test_runs.machine_id == db.machines.id,
        db.test_runs.build_id == db.builds.id,
        db.test_runs.date_run > start_time,
        goodNameClause,
        sa.not_(db.machines.name.like("%stage%")),
        ))

    data = []
    for row in q.execute():
        if row.average is None:
            continue
        t = row.date_run
        d = PerfDatum(row.id, row.machine_id, row.date_run, row.average, row.ref_build_id, t, row.ref_changeset)
        d.run_number = row.run_number
        data.append(d)
    return data
 def get_data(self):
     return [
         PerfDatum(0, 0, time() + 0, 0.0, 0, 0, state='good'),
         PerfDatum(1, 1, time() + 1, 0.0, 1, 1, state='good'),
         PerfDatum(2, 2, time() + 2, 0.0, 2, 2, state='good'),
         PerfDatum(3, 3, time() + 3, 0.0, 3, 3, state='good'),
         PerfDatum(4, 4, time() + 4, 1.0, 4, 4, state='regression'),
         PerfDatum(5, 5, time() + 5, 1.0, 5, 5, state='good'),
         PerfDatum(6, 6, time() + 6, 1.0, 6, 6, state='good'),
         PerfDatum(7, 7, time() + 7, 1.0, 7, 7, state='good'),
     ]
Beispiel #5
0
    def getTestData(self, series):
        base = self.baseurl
        retval = []
        seen = {}
        test_id = series.test_id
        branch_id = series.branch_id
        os_id = series.os_id
        url = "%(base)s/test/runs?id=%(test_id)s&branchid=%(branch_id)s&platformid=%(os_id)s" % locals(
        )
        try:
            log.debug("Getting %s", url)
            req = urllib.urlopen(url)
            results = json.load(req)
        except KeyboardInterrupt:
            raise
        except:
            log.exception("Couldn't load or parse %s", url)
            return []

        if 'test_runs' not in results:
            log.debug("No data from %s", url)
            return []

        for item in results['test_runs']:
            testrunid, build, date, average, run_number, annotations, machine_id, row_geo = item
            if average is None:
                continue

            d = PerfDatum(testrunid, machine_id, date, average, build[1], date,
                          build[2])
            d.run_number = run_number
            retval.append(d)
            t = (d.buildid, date, average, machine_id)
            #if t in seen:
            #if seen[t].run_number == run_number:
            #continue
            #log.error("%s %s %s", seen[t], seen[t].machine_id, seen[t].run_number)
            #log.error("%s %s %s", d, d.machine_id, d.run_number)
            #log.error(url)
            #else:
            #seen[t] = d

        return retval
    def getTestData(self, series):
        base = self.baseurl
        retval = []
        seen = {}
        test_id = series.test_id
        branch_id = series.branch_id
        os_id = series.os_id
        url = "%(base)s/test/runs?id=%(test_id)s&branchid=%(branch_id)s&platformid=%(os_id)s" % locals()
        try:
            log.debug("Getting %s", url)
            req = urllib.urlopen(url)
            results = json.load(req)
        except KeyboardInterrupt:
            raise
        except:
            log.exception("Couldn't load or parse %s", url)
            return []

        if 'test_runs' not in results:
            log.debug("No data from %s", url)
            return []

        for item in results['test_runs']:
            testrunid, build, date, average, run_number, annotations, machine_id, row_geo = item
            if average is None:
                continue

            d = PerfDatum(testrunid, machine_id, date, average, build[1], date, build[2])
            d.run_number = run_number
            retval.append(d)
            t = (d.buildid, date, average, machine_id)
            #if t in seen:
                #if seen[t].run_number == run_number:
                    #continue
                #log.error("%s %s %s", seen[t], seen[t].machine_id, seen[t].run_number)
                #log.error("%s %s %s", d, d.machine_id, d.run_number)
                #log.error(url)
            #else:
                #seen[t] = d

        return retval
    def test_shouldSendWarning(self):
        runner = self.create_runner()
        d = PerfDatum(0, 0, time() + 0, 0.0, 0, 0)
        d.historical_stats = { 'avg': 100.0 }

        # 1% increase
        d.forward_stats = { 'avg': 101.0 }
        self.assertFalse(runner.shouldSendWarning(d, 'some test'))

        # 10% increase
        d.forward_stats = { 'avg': 110.0 }
        self.assertTrue(runner.shouldSendWarning(d, 'some test'))

        # 1% decrease
        d.forward_stats = { 'avg': 99.0 }
        self.assertFalse(runner.shouldSendWarning(d, 'some test'))

        # 10% decrease
        d.forward_stats = { 'avg': 90.0 }
        self.assertTrue(runner.shouldSendWarning(d, 'some test'))

        # 1% increase, ignore percentage
        d.forward_stats = { 'avg': 101.0 }
        self.assertTrue(runner.shouldSendWarning(d, 'LibXUL Memory during link'))
Beispiel #8
0
    "select time, sequence_number, text from events where device='flame-kk' and branch='v2.2' and memory='319' and time > now() - 4w;"
)
sequence_rev_map = {}
for (timestamp, sequence_number, text) in revinfo[0]['points']:
    sequence_rev_map[timestamp] = text
perf_data = []
prev_timestamp = None
values = []
for (timestamp, sequence_number, value) in clocknumbers[0]['points']:
    if prev_timestamp and prev_timestamp == timestamp:
        values.append(value)
    elif prev_timestamp:
        # add everything to perf data
        avg = float(sum(values)) / len(values)
        perf_data.append(
            PerfDatum(prev_timestamp, 0, prev_timestamp, avg, None,
                      prev_timestamp))
        # start again
        values = [value]
        prev_timestamp = timestamp
    else:
        # first value
        prev_timestamp = timestamp
        values = [value]

ta = TalosAnalyzer()
ta.addData(perf_data)
for r in ta.analyze_t(5, 5, 3):
    #  if r.state == 'regression':
    if sequence_rev_map.get(r.timestamp):
        print "date: %s" % datetime.datetime.fromtimestamp(
            r.timestamp).strftime('%Y-%m-%d %H:%M:%S')
 def __init__(self, push_timestamp, value, gaia_revision=None, **kwargs):
     PerfDatum.__init__(self, push_timestamp, value, **kwargs)
     self.gaia_revision = gaia_revision
Beispiel #10
0
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.

import phclient
import sys

from analyze import PerfDatum, TalosAnalyzer

pc = phclient.Client()

(projectname, signature) = (sys.argv[1], sys.argv[2])

s = pc.get_series(projectname,
                  signature,
                  time_interval=phclient.TimeInterval.NINETY_DAYS)

perf_data = []
for (result_set_id, timestamp, geomean) in zip(s['result_set_id'],
                                               s['push_timestamp'],
                                               s['geomean']):
    perf_data.append(
        PerfDatum(result_set_id, 0, timestamp, geomean, None, timestamp))

ta = TalosAnalyzer()
ta.addData(perf_data)
for r in ta.analyze_t(5, 5, 2):
    if r.state == 'regression':
        print(r.testrun_id, r.t,
              pc.get_revision(projectname, r.testrun_id)[0:12])
Beispiel #11
0
 def __init__(self, push_timestamp, value, gaia_revision=None, **kwargs):
     PerfDatum.__init__(self, push_timestamp, value, **kwargs)
     self.gaia_revision = gaia_revision