def testInsertOrReplaceRecords_existingRecords(self): column_types = (('bug_id', int), ('summary', str), ('status', str)) rows1 = [(123, 'Some bug', 'Started'), (456, 'Another bug', 'Assigned')] df1 = pandas_sqlite.DataFrame(column_types, index='bug_id', rows=rows1) rows2 = [(123, 'Some bug', 'Fixed'), (789, 'A new bug', 'Untriaged')] df2 = pandas_sqlite.DataFrame(column_types, index='bug_id', rows=rows2) con = sqlite3.connect(':memory:') try: pandas_sqlite.CreateTableIfNotExists(con, 'bugs', df1) # Write first data frame to database. pandas_sqlite.InsertOrReplaceRecords(con, 'bugs', df1) df = pandas.read_sql('SELECT * FROM bugs', con, index_col='bug_id') self.assertEqual(len(df), 2) self.assertEqual(df.loc[123]['status'], 'Started') # Write second data frame to database. pandas_sqlite.InsertOrReplaceRecords(con, 'bugs', df2) df = pandas.read_sql('SELECT * FROM bugs', con, index_col='bug_id') self.assertEqual(len(df), 3) # Only one extra record added. self.assertEqual(df.loc[123]['status'], 'Fixed') # Bug is now fixed. self.assertItemsEqual(df.index, (123, 456, 789)) finally: con.close()
def testGetTimeSeries_withSummaryMetric(self): test_path = tables.timeseries.Key(test_suite='loading.mobile', measurement='timeToFirstInteractive', bot='ChromiumPerf:android-nexus5', test_case='') data = { 'improvement_direction': 'down', 'units': 'ms', 'data': [ SamplePoint(547397, 2300.3), SamplePoint(547398, 2750.9), SamplePoint(547423, 2342.2), ] } timeseries_in = tables.timeseries.DataFrameFromJson(test_path, data) with tables.DbSession(':memory:') as con: pandas_sqlite.InsertOrReplaceRecords(con, 'timeseries', timeseries_in) timeseries_out = tables.timeseries.GetTimeSeries(con, test_path) # Both DataFrame's should be equal, except the one we get out of the db # does not have an index defined. timeseries_in = timeseries_in.reset_index() self.assertTrue(timeseries_in.equals(timeseries_out))
def testInsertOrReplaceRecords_tableNotExistsRaises(self): column_types = (('bug_id', int), ('summary', str), ('status', str)) rows = [(123, 'Some bug', 'Started'), (456, 'Another bug', 'Assigned')] df = pandas_sqlite.DataFrame(column_types, index='bug_id', rows=rows) con = sqlite3.connect(':memory:') try: with self.assertRaises(AssertionError): pandas_sqlite.InsertOrReplaceRecords(con, 'bugs', df) finally: con.close()
def Process(test_path): try: if isinstance(test_path, tables.timeseries.Key): params = test_path.AsApiParams() params['min_timestamp'] = min_timestamp data = dashboard_service.Timeseries2(**params) else: data = dashboard_service.Timeseries(test_path, days=args.days) except KeyError: logging.info('Timeseries not found: %s', test_path) return timeseries = tables.timeseries.DataFrameFromJson(test_path, data) pandas_sqlite.InsertOrReplaceRecords(con, 'timeseries', timeseries)
def testGetMostRecentPoint_success(self): test_path = tables.timeseries.Key(test_suite='loading.mobile', measurement='timeToFirstInteractive', bot='ChromiumPerf:android-nexus5', test_case='Wikipedia') data = { 'improvement_direction': 'down', 'units': 'ms', 'data': [ SamplePoint(547397, 2300.3), SamplePoint(547398, 2750.9), SamplePoint(547423, 2342.2), ] } timeseries = tables.timeseries.DataFrameFromJson(test_path, data) with tables.DbSession(':memory:') as con: pandas_sqlite.InsertOrReplaceRecords(con, 'timeseries', timeseries) point = tables.timeseries.GetMostRecentPoint(con, test_path) self.assertEqual(point['point_id'], 547423)
def FetchAlertsData(args): params = { 'test_suite': args.benchmark, 'min_timestamp': cli_utils.DaysAgoToTimestamp(args.days) } if args.sheriff != 'all': params['sheriff'] = args.sheriff with tables.DbSession(args.database_file) as con: # Get alerts. num_alerts = 0 bug_ids = set() # TODO: This loop may be slow when fetching thousands of alerts, needs a # better progress indicator. for data in dashboard_service.IterAlerts(**params): alerts = tables.alerts.DataFrameFromJson(data) pandas_sqlite.InsertOrReplaceRecords(con, 'alerts', alerts) num_alerts += len(alerts) bug_ids.update(alerts['bug_id'].unique()) print '%d alerts found!' % num_alerts # Get set of bugs associated with those alerts. bug_ids.discard(0) # A bug_id of 0 means untriaged. print '%d bugs found!' % len(bug_ids) # Filter out bugs already in cache. if args.use_cache: known_bugs = set(b for b in bug_ids if tables.bugs.Get(con, b) is not None) if known_bugs: print '(skipping %d bugs already in the database)' % len( known_bugs) bug_ids.difference_update(known_bugs) # Use worker pool to fetch bug data. total_seconds = worker_pool.Run( 'Fetching data of %d bugs: ' % len(bug_ids), _FetchBugsWorker, args, bug_ids) print '[%.1f bugs per second]' % (len(bug_ids) / total_seconds)
def Process(bug_id): bugs = tables.bugs.DataFrameFromJson([dashboard_service.Bugs(bug_id)]) pandas_sqlite.InsertOrReplaceRecords(con, 'bugs', bugs)