示例#1
0
def get_alerts(client, revinfo, appname, context):
    numbers = client.query("select time, percentile(value, 95) from coldlaunch.visuallyLoaded where appName = '%s' and context = '%s' and device='%s' and branch='%s' and memory='%s' and time > now() - 7d group by time(1000u) order asc;" % (appname, context, DEVICE, BRANCH, MEMORY))

    perf_data = []
    values = []
    ret = []

    if not numbers:
        return ret

    for (timestamp, value) in numbers[0]['points']:
        if not timestamp in revinfo:
            continue
        else:
            gaia_rev = revinfo[timestamp][0]
            gecko_rev = revinfo[timestamp][1]
            perf_data.append(B2GPerfDatum(timestamp, value, gaia_revision=gaia_rev, revision=gecko_rev))

    ta = TalosAnalyzer()
    ta.addData(perf_data)
    vals = ta.analyze_t()
    for (i, r) in enumerate(vals):
        if r.state == 'regression':
            prevr = vals[i-1]
            ret.append({ 'push_timestamp': r.push_timestamp,
                         'confidence': r.t,
                         'gecko_revision': r.revision,
                         'gaia_revision': r.gaia_revision,
                         'prev_gecko_revision': prevr.revision,
                         'prev_gaia_revision': prevr.gaia_revision,
                         'oldavg': r.historical_stats['avg'],
                         'newavg': r.forward_stats['avg'] })

    return ret
    def handleSeries(self, s):
        if self.config.has_option('os', s.os_name):
            s.os_name = self.config.get('os', s.os_name)

        # Check if we should skip this test
        ignore_tests = []
        if self.config.has_option('main', 'ignore_tests'):
            for i in self.config.get('main', 'ignore_tests').split(','):
                i = i.strip()
                if i:
                    ignore_tests.append(i)

        if self.config.has_option(s.branch_name, 'ignore_tests'):
            for i in self.config.get(s.branch_name, 'ignore_tests').split(','):
                i = i.strip()
                if i:
                    ignore_tests.append(i)

        for i in ignore_tests:
            if re.search(i, s.test_name):
                log.debug("Skipping %s %s %s", s.branch_name, s.os_name, s.test_name)
                return

        log.info("Processing %s %s %s", s.branch_name, s.os_name, s.test_name)

        # Get all the test data for all machines running this combination
        t = time.time()
        data = self.source.getTestData(s, options.start_time, self.data_type)
        log.debug("%.2f to fetch data", time.time() - t)

        if data:
            m = max(d.testrun_id for d in data)
            if self.last_run < m:
                log.debug("Setting last_run to %s", m)
                self.last_run = m

        self.updateTimes(s.branch_name, data)

        a = TalosAnalyzer()
        a.addData(data)

        analysis_gen = a.analyze_t(self.back_window, self.fore_window,
                self.threshold, machine_threshold=self.machine_threshold,
                machine_history_size=self.machine_history_size)

        if s.branch_name not in self.warning_history:
            self.warning_history[s.branch_name] = {}
        if s.os_name not in self.warning_history[s.branch_name]:
            self.warning_history[s.branch_name][s.os_name] = {}
        if s.test_name not in self.warning_history[s.branch_name][s.os_name]:
            self.warning_history[s.branch_name][s.os_name][s.test_name] = []
        warnings = self.warning_history[s.branch_name][s.os_name][s.test_name]

        series_data = self.processSeries(analysis_gen, warnings)
        for d, skip, last_good in series_data:
            self.handleData(s, d, d.state, skip, last_good)

        if self.config.has_option('main', 'graph_dir'):
            self.outputGraphs(s, series_data)
示例#3
0
def get_alerts(client, appname, context):
    numbers = client.query(
        "select time, mean(value) from coldlaunch.visuallyLoaded where appName = '%s' and context = '%s' and device='flame-kk' and branch='master' and memory='319' and time > '2015-03-31' group by time(1s) order ASC;"
        % (appname, context))
    revinfo = client.query(
        "select time, text from events where device='flame-kk' and branch='master' and memory='319' and time > '2015-03-31' group by time(1s) order ASC;"
    )

    sequence_rev_list = []
    for (timestamp, sequence_number, text) in revinfo[0]['points']:
        (gaia_revision,
         gecko_revision) = re.match("^Gaia: (.*)<br/>Gecko: (.*)$",
                                    text).groups()
        sequence_rev_list.append((timestamp, gaia_revision, gecko_revision))

    perf_data = []
    values = []
    for (timestamp, value) in numbers[0]['points']:
        # we sometimes have duplicate test runs for the same timestamp? or
        # the revision info is otherwise missing. this is probably bad, and
        # resulting in inaccurate data...
        for (sequence_timestamp, gaia_revision,
             gecko_revision) in sequence_rev_list:
            if sequence_timestamp <= timestamp:
                revinfo = (sequence_timestamp, gaia_revision, gecko_revision)
            else:
                break
        perf_data.append(
            B2GPerfDatum(revinfo[0],
                         value,
                         gaia_revision=revinfo[1],
                         revision=revinfo[2]))

    ta = TalosAnalyzer()
    ta.addData(perf_data)
    vals = ta.analyze_t()
    ret = []
    for (i, r) in enumerate(vals):
        if r.state == 'regression':
            prevr = vals[i - 1]
            ret.append({
                'push_timestamp': r.push_timestamp,
                'confidence': r.t,
                'gecko_revision': r.revision,
                'gaia_revision': r.gaia_revision,
                'prev_gecko_revision': prevr.revision,
                'prev_gaia_revision': prevr.gaia_revision,
                'oldavg': r.historical_stats['avg'],
                'newavg': r.forward_stats['avg']
            })

    return ret
示例#4
0
def get_alerts(client, revinfo, appname, context):
    numbers = client.query(
        "select time, percentile(value, 95) from coldlaunch.visuallyLoaded where appName = '%s' and context = '%s' and device='%s' and branch='%s' and memory='%s' and time > now() - 7d group by time(1000u) order asc;"
        % (appname, context, DEVICE, BRANCH, MEMORY))

    perf_data = []
    values = []
    ret = []

    if not numbers:
        return ret

    for (timestamp, value) in numbers[0]['points']:
        if not timestamp in revinfo:
            continue
        else:
            gaia_rev = revinfo[timestamp][0]
            gecko_rev = revinfo[timestamp][1]
            perf_data.append(
                B2GPerfDatum(timestamp,
                             value,
                             gaia_revision=gaia_rev,
                             revision=gecko_rev))

    ta = TalosAnalyzer()
    ta.addData(perf_data)
    vals = ta.analyze_t()
    for (i, r) in enumerate(vals):
        if r.state == 'regression':
            prevr = vals[i - 1]
            ret.append({
                'push_timestamp': r.push_timestamp,
                'confidence': r.t,
                'gecko_revision': r.revision,
                'gaia_revision': r.gaia_revision,
                'prev_gecko_revision': prevr.revision,
                'prev_gaia_revision': prevr.gaia_revision,
                'oldavg': r.historical_stats['avg'],
                'newavg': r.forward_stats['avg']
            })

    return ret
示例#5
0
    def handleSeries(self, s):
        if self.config.has_option('os', s.os_name):
            s.os_name = self.config.get('os', s.os_name)

        # Check if we should skip this test
        ignore_tests = []
        if self.config.has_option('main', 'ignore_tests'):
            for i in self.config.get('main', 'ignore_tests').split(','):
                i = i.strip()
                if i:
                    ignore_tests.append(i)

        if self.config.has_option(s.branch_name, 'ignore_tests'):
            for i in self.config.get(s.branch_name, 'ignore_tests').split(','):
                i = i.strip()
                if i:
                    ignore_tests.append(i)

        for i in ignore_tests:
            if re.search(i, s.test_name):
                log.debug("Skipping %s %s %s", s.branch_name, s.os_name, s.test_name)
                return

        log.info("Processing %s %s %s", s.branch_name, s.os_name, s.test_name)

        # Get all the test data for all machines running this combination
        t = time.time()
        data = self.source.getTestData(s, options.start_time)
        log.debug("%.2f to fetch data", time.time() - t)

        if data:
            m = max(d.testrun_id for d in data)
            if self.last_run < m:
                log.debug("Setting last_run to %s", m)
                self.last_run = m

        self.updateTimes(s.branch_name, data)

        a = TalosAnalyzer()
        a.addData(data)

        analysis_gen = a.analyze_t(self.back_window, self.fore_window,
                self.threshold, self.machine_threshold,
                self.machine_history_size)

        if s.branch_name not in self.warning_history:
            self.warning_history[s.branch_name] = {}
        if s.os_name not in self.warning_history[s.branch_name]:
            self.warning_history[s.branch_name][s.os_name] = {}
        if s.test_name not in self.warning_history[s.branch_name][s.os_name]:
            self.warning_history[s.branch_name][s.os_name][s.test_name] = []
        warnings = self.warning_history[s.branch_name][s.os_name][s.test_name]

        last_good = None
        last_err = None
        last_err_good = None
        # Uncomment this for debugging!
        #cutoff = self.options.start_time
        cutoff = time.time() - 7*24*3600
        series_data = []
        for d, state in analysis_gen:
            skip = False
            if d.timestamp < cutoff:
                continue

            if state != "good":
                # Skip warnings about regressions we've already
                # warned people about
                if (d.buildid, d.timestamp) in warnings:
                    skip = True
                else:
                    warnings.append((d.buildid, d.timestamp))
                    if state == "machine":
                        machine_name = self.source.getMachineName(d.machine_id)
                        if 'bad_machines' not in self.warning_history:
                            self.warning_history['bad_machines'] = {}
                        # When did we last warn about this machine?
                        if self.warning_history['bad_machines'].get(machine_name, 0) > time.time() - 7*24*3600:
                            skip = True
                        else:
                            # If it was over a week ago, then send another warning
                            self.warning_history['bad_machines'][machine_name] = time.time()

                if not last_err:
                    last_err = d
                    last_err_good = last_good
                elif last_err_good == last_good:
                    skip = True

            else:
                last_err = None
                last_good = d

            series_data.append((s, d, state, skip, last_good))
            self.handleData(s, d, state, skip, last_good)

        if self.config.has_option('main', 'graph_dir'):
            self.outputGraphs(s, series_data)
示例#6
0
values = []
for (timestamp, sequence_number, value) in clocknumbers[0]['points']:
    if prev_timestamp and prev_timestamp == timestamp:
        values.append(value)
    elif prev_timestamp:
        # add everything to perf data
        avg = float(sum(values)) / len(values)
        perf_data.append(
            PerfDatum(prev_timestamp, 0, prev_timestamp, avg, None,
                      prev_timestamp))
        # start again
        values = [value]
        prev_timestamp = timestamp
    else:
        # first value
        prev_timestamp = timestamp
        values = [value]

ta = TalosAnalyzer()
ta.addData(perf_data)
for r in ta.analyze_t(5, 5, 3):
    #  if r.state == 'regression':
    if sequence_rev_map.get(r.timestamp):
        print "date: %s" % datetime.datetime.fromtimestamp(
            r.timestamp).strftime('%Y-%m-%d %H:%M:%S')
        print "confidence (higher is more confident): %s" % r.t
        print "revision: %s" % sequence_rev_map[r.timestamp]
        print "old average: %s" % r.historical_stats['avg']
        print "new average: %s" % r.forward_stats['avg']
        print "================================="
示例#7
0
    def handleSeries(self, s):
        if self.config.has_option('os', s.os_name):
            s.os_name = self.config.get('os', s.os_name)

        # Check if we should skip this test
        ignore_tests = []
        if self.config.has_option('main', 'ignore_tests'):
            for i in self.config.get('main', 'ignore_tests').split(','):
                i = i.strip()
                if i:
                    ignore_tests.append(i)

        if self.config.has_option(s.branch_name, 'ignore_tests'):
            for i in self.config.get(s.branch_name, 'ignore_tests').split(','):
                i = i.strip()
                if i:
                    ignore_tests.append(i)

        for i in ignore_tests:
            if re.search(i, s.test_name):
                log.debug("Skipping %s %s %s", s.branch_name, s.os_name, s.test_name)
                return

        log.info("Processing %s %s %s", s.branch_name, s.os_name, s.test_name)

        # Get all the test data for all machines running this combination
        t = time.time()
        data = self.source.getTestData(s, options.start_time)
        log.debug("%.2f to fetch data", time.time() - t)

        # Add it to our dashboard data
        sevenDaysAgo = time.time() - 7*24*60*60
        importantTests = []
        for t in re.split(r"(?<!\\),", self.config.get("dashboard", "tests")):
            t = t.replace("\\,", ",").strip()
            importantTests.append(t)

        if data:
            m = max(d.testrun_id for d in data)
            if self.last_run < m:
                log.debug("Setting last_run to %s", m)
                self.last_run = m

        if s.test_name in importantTests and len(data) > 0:
            # We want to merge the Tp3 (Memset) and Tp3 (RSS) results together
            # for the dashboard, since they're just different names for the
            # same thing on different platforms
            test_name = s.test_name
            if test_name == "Tp3 (Memset)":
                test_name = "Tp3 (RSS)"
            elif test_name == "Tp4 (Memset)":
                test_name = "Tp4 (RSS)"
            self.dashboard_data.setdefault(s.branch_name, {})
            self.dashboard_data[s.branch_name].setdefault(test_name, {'_testid': s.test_id})
            self.dashboard_data[s.branch_name][test_name].setdefault(s.os_name, {'_platformid': s.os_id, '_graphURL': self.makeChartUrl(s)})
            _d = self.dashboard_data[s.branch_name][test_name][s.os_name]

            for d in data:
                if d.timestamp < sevenDaysAgo:
                    continue
                machine_name = self.source.getMachineName(d.machine_id)
                if machine_name not in _d:
                    _d[machine_name] = {
                            'results': [],
                            'stats': [],
                            }
                results = _d[machine_name]['results']
                results.append(d.timestamp)
                results.append(d.value)

            for machine_name in _d:
                if machine_name.startswith("_"):
                    continue
                results = _d[machine_name]['results']
                values = [results[i+1] for i in range(0, len(results), 2)]
                _d[machine_name]['stats'] = [avg(values), max(values), min(values)]

        self.updateTimes(s.branch_name, data)

        a = TalosAnalyzer()
        a.addData(data)

        analysis_gen = a.analyze_t(self.back_window, self.fore_window,
                self.threshold, self.machine_threshold,
                self.machine_history_size)

        if s.branch_name not in self.warning_history:
            self.warning_history[s.branch_name] = {}
        if s.os_name not in self.warning_history[s.branch_name]:
            self.warning_history[s.branch_name][s.os_name] = {}
        if s.test_name not in self.warning_history[s.branch_name][s.os_name]:
            self.warning_history[s.branch_name][s.os_name][s.test_name] = []
        warnings = self.warning_history[s.branch_name][s.os_name][s.test_name]

        last_good = None
        last_err = None
        last_err_good = None
        # Uncomment this for debugging!
        #cutoff = self.options.start_time
        cutoff = time.time() - 7*24*3600
        series_data = []
        for d, state in analysis_gen:
            skip = False
            if d.timestamp < cutoff:
                continue

            if state != "good":
                # Skip warnings about regressions we've already
                # warned people about
                if (d.buildid, d.timestamp) in warnings:
                    skip = True
                else:
                    warnings.append((d.buildid, d.timestamp))
                    if state == "machine":
                        machine_name = self.source.getMachineName(d.machine_id)
                        if 'bad_machines' not in self.warning_history:
                            self.warning_history['bad_machines'] = {}
                        # When did we last warn about this machine?
                        if self.warning_history['bad_machines'].get(machine_name, 0) > time.time() - 7*24*3600:
                            skip = True
                        else:
                            # If it was over a week ago, then send another warning
                            self.warning_history['bad_machines'][machine_name] = time.time()

                if not last_err:
                    last_err = d
                    last_err_good = last_good
                elif last_err_good == last_good:
                    skip = True

            else:
                last_err = None
                last_good = d

            series_data.append((s, d, state, skip, last_good))
            self.handleData(s, d, state, skip, last_good)

        if self.config.has_option('main', 'graph_dir'):
            self.outputGraphs(s, series_data)
示例#8
0
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.

import phclient
import sys

from analyze import PerfDatum, TalosAnalyzer

pc = phclient.Client()

(projectname, signature) = (sys.argv[1], sys.argv[2])

s = pc.get_series(projectname, signature, time_interval=phclient.TimeInterval.NINETY_DAYS)

perf_data = []
for (result_set_id, timestamp, geomean) in zip(
        s['result_set_id'], s['push_timestamp'], s['geomean']):
    perf_data.append(PerfDatum(result_set_id, 0, timestamp, geomean, None,
                               timestamp))

ta = TalosAnalyzer()
ta.addData(perf_data)
for r in ta.analyze_t(5, 5, 2):
    if r.state == 'regression':
        print (r.testrun_id, r.t, pc.get_revision(projectname, r.testrun_id)[0:12])
示例#9
0
    def handleSeries(self, s):
        if self.config.has_option('os', s.os_name):
            s.os_name = self.config.get('os', s.os_name)

        # Check if we should skip this test
        ignore_tests = []
        if self.config.has_option('main', 'ignore_tests'):
            for i in self.config.get('main', 'ignore_tests').split(','):
                i = i.strip()
                if i:
                    ignore_tests.append(i)

        if self.config.has_option(s.branch_name, 'ignore_tests'):
            for i in self.config.get(s.branch_name, 'ignore_tests').split(','):
                i = i.strip()
                if i:
                    ignore_tests.append(i)

        for i in ignore_tests:
            if re.search(i, s.test_name):
                log.debug("Skipping %s %s %s", s.branch_name, s.os_name,
                          s.test_name)
                return

        log.info("Processing %s %s %s", s.branch_name, s.os_name, s.test_name)

        # Get all the test data for all machines running this combination
        t = time.time()
        data = self.source.getTestData(s, options.start_time)
        log.debug("%.2f to fetch data", time.time() - t)

        if data:
            m = max(d.testrun_id for d in data)
            if self.last_run < m:
                log.debug("Setting last_run to %s", m)
                self.last_run = m

        self.updateTimes(s.branch_name, data)

        a = TalosAnalyzer()
        a.addData(data)

        analysis_gen = a.analyze_t(self.back_window, self.fore_window,
                                   self.threshold, self.machine_threshold,
                                   self.machine_history_size)

        if s.branch_name not in self.warning_history:
            self.warning_history[s.branch_name] = {}
        if s.os_name not in self.warning_history[s.branch_name]:
            self.warning_history[s.branch_name][s.os_name] = {}
        if s.test_name not in self.warning_history[s.branch_name][s.os_name]:
            self.warning_history[s.branch_name][s.os_name][s.test_name] = []
        warnings = self.warning_history[s.branch_name][s.os_name][s.test_name]

        last_good = None
        last_err = None
        last_err_good = None
        # Uncomment this for debugging!
        #cutoff = self.options.start_time
        cutoff = time.time() - 7 * 24 * 3600
        series_data = []
        for d, state in analysis_gen:
            skip = False
            if d.timestamp < cutoff:
                continue

            if state != "good":
                # Skip warnings about regressions we've already
                # warned people about
                if (d.buildid, d.timestamp) in warnings:
                    skip = True
                else:
                    warnings.append((d.buildid, d.timestamp))
                    if state == "machine":
                        machine_name = self.source.getMachineName(d.machine_id)
                        if 'bad_machines' not in self.warning_history:
                            self.warning_history['bad_machines'] = {}
                        # When did we last warn about this machine?
                        if self.warning_history['bad_machines'].get(
                                machine_name, 0) > time.time() - 7 * 24 * 3600:
                            skip = True
                        else:
                            # If it was over a week ago, then send another warning
                            self.warning_history['bad_machines'][
                                machine_name] = time.time()

                if not last_err:
                    last_err = d
                    last_err_good = last_good
                elif last_err_good == last_good:
                    skip = True

            else:
                last_err = None
                last_good = d

            series_data.append((s, d, state, skip, last_good))
            self.handleData(s, d, state, skip, last_good)

        if self.config.has_option('main', 'graph_dir'):
            self.outputGraphs(s, series_data)
示例#10
0
    def handleSeries(self, s):
        if self.config.has_option('os', s.os_name):
            s.os_name = self.config.get('os', s.os_name)

        # Check if we should skip this test
        ignore_tests = []
        if self.config.has_option('main', 'ignore_tests'):
            for i in self.config.get('main', 'ignore_tests').split(','):
                i = i.strip()
                if i:
                    ignore_tests.append(i)

        if self.config.has_option(s.branch_name, 'ignore_tests'):
            for i in self.config.get(s.branch_name, 'ignore_tests').split(','):
                i = i.strip()
                if i:
                    ignore_tests.append(i)

        for i in ignore_tests:
            if re.search(i, s.test_name):
                log.debug("Skipping %s %s %s", s.branch_name, s.os_name,
                          s.test_name)
                return

        log.info("Processing %s %s %s", s.branch_name, s.os_name, s.test_name)

        # Get all the test data for all machines running this combination
        t = time.time()
        data = self.source.getTestData(s, options.start_time, self.data_type)
        log.debug("%.2f to fetch data", time.time() - t)

        if data:
            m = max(d.testrun_id for d in data)
            if self.last_run < m:
                log.debug("Setting last_run to %s", m)
                self.last_run = m

        self.updateTimes(s.branch_name, data)

        a = TalosAnalyzer()
        a.addData(data)

        analysis_gen = a.analyze_t(
            self.back_window,
            self.fore_window,
            self.threshold,
            machine_threshold=self.machine_threshold,
            machine_history_size=self.machine_history_size)

        if s.branch_name not in self.warning_history:
            self.warning_history[s.branch_name] = {}
        if s.os_name not in self.warning_history[s.branch_name]:
            self.warning_history[s.branch_name][s.os_name] = {}
        if s.test_name not in self.warning_history[s.branch_name][s.os_name]:
            self.warning_history[s.branch_name][s.os_name][s.test_name] = []
        warnings = self.warning_history[s.branch_name][s.os_name][s.test_name]

        series_data = self.processSeries(analysis_gen, warnings)
        for d, skip, last_good in series_data:
            self.handleData(s, d, d.state, skip, last_good)

        if self.config.has_option('main', 'graph_dir'):
            self.outputGraphs(s, series_data)