Пример #1
0
def FetchAlertsData(args):
    api = dashboard_api.PerfDashboardCommunicator(args)
    with tables.DbSession(args.database_file) as con:
        # Get alerts.
        num_alerts = 0
        bug_ids = set()
        # TODO: This loop may be slow when fetching thousands of alerts, needs a
        # better progress indicator.
        for data in api.IterAlertData(args.benchmark, args.sheriff, args.days):
            alerts = tables.alerts.DataFrameFromJson(data)
            pandas_sqlite.InsertOrReplaceRecords(con, 'alerts', alerts)
            num_alerts += len(alerts)
            bug_ids.update(alerts['bug_id'].unique())
        print '%d alerts found!' % num_alerts

        # Get set of bugs associated with those alerts.
        bug_ids.discard(0)  # A bug_id of 0 means untriaged.
        print '%d bugs found!' % len(bug_ids)

        # Filter out bugs already in cache.
        if args.use_cache:
            known_bugs = set(b for b in bug_ids
                             if tables.bugs.Get(con, b) is not None)
            if known_bugs:
                print '(skipping %d bugs already in the database)' % len(
                    known_bugs)
                bug_ids.difference_update(known_bugs)

    # Use worker pool to fetch bug data.
    total_seconds = worker_pool.Run(
        'Fetching data of %d bugs: ' % len(bug_ids), _FetchBugsWorker, args,
        bug_ids)
    print '[%.1f bugs per second]' % (len(bug_ids) / total_seconds)
Пример #2
0
def FetchTimeseriesData(args):
    def _MatchesAllFilters(test_path):
        return all(f in test_path for f in args.filters)

    if args.benchmark is not None:
        api = dashboard_api.PerfDashboardCommunicator(args)
        test_paths = api.ListTestPaths(args.benchmark, sheriff=args.sheriff)
    elif args.input_file is not None:
        test_paths = list(_ReadTestPathsFromFile(args.input_file))
    else:
        raise NotImplementedError('Expected --benchmark or --input-file')

    if args.filters:
        test_paths = filter(_MatchesAllFilters, test_paths)
    num_found = len(test_paths)
    print '%d test paths found!' % num_found

    con = sqlite3.connect(args.database_file)
    try:
        tables.CreateIfNeeded(con)
        if args.use_cache:
            test_paths = list(_IterStaleTestPaths(con, test_paths))
            num_skipped = num_found - len(test_paths)
            if num_skipped:
                print '(skipping %d test paths already in the database)' % num_skipped
    finally:
        con.close()

    total_seconds = worker_pool.Run(
        'Fetching data of %d timeseries: ' % len(test_paths),
        _FetchTimeseriesWorker, args, test_paths)
    print '[%.1f test paths per second]' % (len(test_paths) / total_seconds)
Пример #3
0
def FetchAlertsData(args):
    api = dashboard_api.PerfDashboardCommunicator(args)
    con = sqlite3.connect(args.database_file)
    try:
        tables.CreateIfNeeded(con)
        alerts = tables.alerts.DataFrameFromJson(
            api.GetAlertData(args.benchmark, args.sheriff, args.days))
        print '%d alerts found!' % len(alerts)
        pandas_sqlite.InsertOrReplaceRecords(con, 'alerts', alerts)

        bug_ids = set(alerts['bug_id'].unique())
        bug_ids.discard(0)  # A bug_id of 0 means untriaged.
        print '%d bugs found!' % len(bug_ids)
        if args.use_cache:
            known_bugs = set(b for b in bug_ids
                             if tables.bugs.Get(con, b) is not None)
            if known_bugs:
                print '(skipping %d bugs already in the database)' % len(
                    known_bugs)
                bug_ids.difference_update(known_bugs)
    finally:
        con.close()

    total_seconds = worker_pool.Run(
        'Fetching data of %d bugs: ' % len(bug_ids), _FetchBugsWorker, args,
        bug_ids)
    print '[%.1f bugs per second]' % (len(bug_ids) / total_seconds)
Пример #4
0
def FetchTimeseriesData(args):
    def _MatchesAllFilters(test_path):
        return all(f in test_path for f in args.filters)

    with _ApiAndDbSession(args) as (api, con):
        # Get test_paths.
        if args.benchmark is not None:
            api = dashboard_api.PerfDashboardCommunicator(args)
            test_paths = api.ListTestPaths(args.benchmark,
                                           sheriff=args.sheriff)
        elif args.input_file is not None:
            test_paths = list(_ReadTestPathsFromFile(args.input_file))
        else:
            raise NotImplementedError('Expected --benchmark or --input-file')

        # Apply --filter's to test_paths.
        if args.filters:
            test_paths = filter(_MatchesAllFilters, test_paths)
        num_found = len(test_paths)
        print '%d test paths found!' % num_found

        # Filter out test_paths already in cache.
        if args.use_cache:
            test_paths = list(_IterStaleTestPaths(con, test_paths))
            num_skipped = num_found - len(test_paths)
            if num_skipped:
                print '(skipping %d test paths already in the database)' % num_skipped

    # Use worker pool to fetch test path data.
    total_seconds = worker_pool.Run(
        'Fetching data of %d timeseries: ' % len(test_paths),
        _FetchTimeseriesWorker, args, test_paths)
    print '[%.1f test paths per second]' % (len(test_paths) / total_seconds)
Пример #5
0
def FetchTimeseriesData(args):
    def _MatchesAllFilters(test_path):
        return all(f in test_path for f in args.filters)

    api = dashboard_api.PerfDashboardCommunicator(args)
    with tables.DbSession(args.database_file) as con:
        # Get test_paths.
        if args.benchmark is not None:
            api = dashboard_api.PerfDashboardCommunicator(args)
            test_paths = api.dashboard.ListTestPaths(args.benchmark,
                                                     sheriff=args.sheriff)
        elif args.input_file is not None:
            test_paths = list(_ReadTestPathsFromFile(args.input_file))
        elif args.study is not None:
            test_paths = list(args.study.IterTestPaths(api))
        else:
            raise ValueError('No source for test paths specified')

        # Apply --filter's to test_paths.
        if args.filters:
            test_paths = filter(_MatchesAllFilters, test_paths)
        num_found = len(test_paths)
        print '%d test paths found!' % num_found

        # Filter out test_paths already in cache.
        if args.use_cache:
            test_paths = list(_IterStaleTestPaths(con, test_paths))
            num_skipped = num_found - len(test_paths)
            if num_skipped:
                print '(skipping %d test paths already in the database)' % num_skipped

    # Use worker pool to fetch test path data.
    total_seconds = worker_pool.Run(
        'Fetching data of %d timeseries: ' % len(test_paths),
        _FetchTimeseriesWorker, args, test_paths)
    print '[%.1f test paths per second]' % (len(test_paths) / total_seconds)

    if args.output_csv is not None:
        print
        print 'Post-processing data for study ...'
        dfs = []
        with tables.DbSession(args.database_file) as con:
            for test_path in test_paths:
                df = tables.timeseries.GetTimeSeries(con, test_path)
                dfs.append(df)
        df = studies.PostProcess(pandas.concat(dfs, ignore_index=True))
        with utils.OpenWrite(args.output_csv) as f:
            df.to_csv(f, index=False)
        print 'Wrote timeseries data to:', args.output_csv
Пример #6
0
 def testWorkerPoolRun(self):
     tempdir = tempfile.mkdtemp()
     try:
         args = argparse.Namespace()
         args.database_file = os.path.join(tempdir, 'test.db')
         args.processes = 3
         items = range(20)  # We'll write these in the database.
         con = sqlite3.connect(args.database_file)
         pandas_sqlite.CreateTableIfNotExists(con, 'items', [('item', int)])
         with open(os.devnull, 'w') as devnull:
             worker_pool.Run('Processing:',
                             TestWorker,
                             args,
                             items,
                             stream=devnull)
         df = pandas.read_sql('SELECT * FROM items', con)
         # Check all of our items were written.
         self.assertItemsEqual(df['item'], items)
     finally:
         shutil.rmtree(tempdir)