def FetchTimeseriesData(args): def _MatchesAllFilters(test_path): return all(f in test_path for f in args.filters) if args.benchmark is not None: api = dashboard_api.PerfDashboardCommunicator(args) test_paths = api.ListTestPaths(args.benchmark, sheriff=args.sheriff) elif args.input_file is not None: test_paths = list(_ReadTestPathsFromFile(args.input_file)) else: raise NotImplementedError('Expected --benchmark or --input-file') if args.filters: test_paths = filter(_MatchesAllFilters, test_paths) num_found = len(test_paths) print '%d test paths found!' % num_found con = sqlite3.connect(args.database_file) try: tables.CreateIfNeeded(con) if args.use_cache: test_paths = list(_IterStaleTestPaths(con, test_paths)) num_skipped = num_found - len(test_paths) if num_skipped: print '(skipping %d test paths already in the database)' % num_skipped finally: con.close() total_seconds = worker_pool.Run( 'Fetching data of %d timeseries: ' % len(test_paths), _FetchTimeseriesWorker, args, test_paths) print '[%.1f test paths per second]' % (len(test_paths) / total_seconds)
def FetchTimeseriesData(args): def _MatchesAllFilters(test_path): return all(f in test_path for f in args.filters) api = dashboard_api.PerfDashboardCommunicator(args) con = sqlite3.connect(args.database_file) try: tables.CreateIfNeeded(con) test_paths = api.ListTestPaths(args.benchmark, sheriff=args.sheriff) if args.filters: test_paths = filter(_MatchesAllFilters, test_paths) num_found = len(test_paths) print '%d test paths found!' % num_found if args.use_cache: test_paths = list(_IterStaleTestPaths(con, test_paths)) num_skipped = num_found - len(test_paths) if num_skipped: print '(skipping %d test paths already in the database)' % num_skipped for test_path in test_paths: data = api.GetTimeseries(test_path, days=args.days) timeseries = tables.timeseries.DataFrameFromJson(data) pandas_sqlite.InsertOrReplaceRecords(con, 'timeseries', timeseries) finally: con.close()
def FetchAlertsData(args): api = dashboard_api.PerfDashboardCommunicator(args) con = sqlite3.connect(args.database_file) try: tables.CreateIfNeeded(con) alerts = tables.alerts.DataFrameFromJson( api.GetAlertData(args.benchmark, args.sheriff, args.days)) print '%d alerts found!' % len(alerts) pandas_sqlite.InsertOrReplaceRecords(con, 'alerts', alerts) bug_ids = set(alerts['bug_id'].unique()) bug_ids.discard(0) # A bug_id of 0 means untriaged. print '%d bugs found!' % len(bug_ids) if args.use_cache: known_bugs = set(b for b in bug_ids if tables.bugs.Get(con, b) is not None) if known_bugs: print '(skipping %d bugs already in the database)' % len( known_bugs) bug_ids.difference_update(known_bugs) finally: con.close() total_seconds = worker_pool.Run( 'Fetching data of %d bugs: ' % len(bug_ids), _FetchBugsWorker, args, bug_ids) print '[%.1f bugs per second]' % (len(bug_ids) / total_seconds)
def _ApiAndDbSession(args): """Context manage a session with API and DB connections. Ensures API has necessary credentials and DB tables have been initialized. """ api = dashboard_api.PerfDashboardCommunicator(args) con = sqlite3.connect(args.database_file) # Tell sqlite to use a write-ahead log, which drastically increases its # concurrency capabilities. This helps prevent 'database is locked' exceptions # when we have many workers writing to a single database. This mode is sticky, # so we only need to set it once and future connections will automatically # use the log. More details are available at https://www.sqlite.org/wal.html. con.execute('PRAGMA journal_mode=WAL') try: tables.CreateIfNeeded(con) yield api, con finally: con.close()