def testAddRowsToCache(self): self._AddMockData() rows = [] stored_object.Set( 'externally_visible__num_revisions_ChromiumPerf/win7/dromaeo/dom', [[10, 2, 3], [15, 4, 5], [100, 6, 7]]) test_key = utils.TestKey('ChromiumPerf/win7/dromaeo/dom') test_container_key = utils.GetTestContainerKey(test_key) ts1 = datetime.datetime(2013, 1, 1) row1 = graph_data.Row(parent=test_container_key, id=1, value=9, timestamp=ts1) rows.append(row1) ts2 = datetime.datetime(2013, 1, 2) row2 = graph_data.Row(parent=test_container_key, id=12, value=90, timestamp=ts2) rows.append(row2) ts3 = datetime.datetime(2013, 1, 3) row3 = graph_data.Row(parent=test_container_key, id=102, value=99, timestamp=ts3) rows.append(row3) graph_revisions.AddRowsToCache(rows) self.assertEqual( [[1, 9, utils.TimestampMilliseconds(ts1)], [10, 2, 3], [12, 90, utils.TimestampMilliseconds(ts2)], [15, 4, 5], [100, 6, 7], [102, 99, utils.TimestampMilliseconds(ts3)]], stored_object.Get('externally_visible__num_revisions_' 'ChromiumPerf/win7/dromaeo/dom'))
def testPost_ReturnsAndCachesCorrectRevisions(self): self._AddMockData() response = self.testapp.post( '/graph_revisions', {'test_path': 'ChromiumPerf/win7/dromaeo/dom'}) cached_rows = stored_object.Get( 'externally_visible__num_revisions_ChromiumPerf/win7/dromaeo/dom') for index, row in enumerate(json.loads(response.body)): expected_rev = 15000 + (index * 5) expected_value = int(expected_rev) * 2.5 expected_timestamp = utils.TimestampMilliseconds( datetime.datetime(2013, 8, 1)) self.assertEqual([expected_rev, expected_value, expected_timestamp], row) self.assertEqual([expected_rev, expected_value, expected_timestamp], cached_rows[index])
def _GetFlotJson(revision_map, tests): """Constructs JSON in the format expected by Flot. Args: revision_map: A dict which maps revision numbers to data point info. tests: A list of Test entities. Returns: JSON serialization of a dict with line data, annotations, error range data, (This data may not be passed exactly as-is to the Flot plot funciton, but it will all be used when plotting.) """ # TODO(qyearsley): Break this function into smaller functions. # Each entry in the following dict is one Flot series object. The actual # x-y values will be put into the 'data' properties for each object. cols = {i: _FlotSeries(i) for i in range(len(tests))} flot_annotations = {} flot_annotations['series'] = _GetSeriesAnnotations(tests) # For each Test (which corresponds to a trace line), the shaded error # region is specified by two series objects. For a demo, see: # http://www.flotcharts.org/flot/examples/percentiles/index.html error_bars = { x: [{ 'id': 'bottom_%d' % x, 'data': [], 'color': x, 'clickable': False, 'hoverable': False, 'lines': { 'show': True, 'lineWidth': 0, 'fill': 0.2, }, 'fillBetween': 'line_%d' % x, }, { 'id': 'top_%d' % x, 'data': [], 'color': x, 'clickable': False, 'hoverable': False, 'lines': { 'show': True, 'lineWidth': 0, 'fill': 0.2, }, 'fillBetween': 'line_%d' % x, }] for x, _ in enumerate(tests) } test_keys = [t.key.urlsafe() for t in tests] for revision in sorted(revision_map.keys()): for series_index, key in enumerate(test_keys): point_info = revision_map[revision].get(key, None) if not point_info: continue timestamp = point_info.get('timestamp') if timestamp and type(timestamp) is datetime.datetime: point_info['timestamp'] = utils.TimestampMilliseconds( timestamp) point_list = [revision, point_info['value']] if 'error' in point_info: error = point_info['error'] error_bars[series_index][0]['data'].append( [revision, point_info['value'] - error]) error_bars[series_index][1]['data'].append( [revision, point_info['value'] + error]) cols[series_index]['data'].append(point_list) data_index = len(cols[series_index]['data']) - 1 series_dict = flot_annotations.setdefault(series_index, {}) data_dict = copy.deepcopy(point_info) del data_dict['value'] series_dict.setdefault(data_index, data_dict) return json.dumps( { 'data': cols, 'annotations': flot_annotations, 'error_bars': error_bars, }, allow_nan=False)
def _MakeTriplet(row): """Makes a 3-item list of revision, value and timestamp for a Row.""" timestamp = utils.TimestampMilliseconds(row.timestamp) return [row.revision, row.value, timestamp]
def _GetFlotJson(revision_map, tests, show_old_data_warning): """Constructs JSON in the format expected by Flot. Args: revision_map: A dict which maps revision numbers to data point info. tests: A list of Test entities. show_old_data_warning: Whether to a show a warning to the user that the graph data is out of date. Returns: JSON serialization of a dict with line data, annotations, error range data, and possibly warning information. (This data may not be passed exactly as-is to the Flot plot funciton, but it will all be used when plotting.) """ # TODO(qyearsley): Break this function into smaller functions. # Each entry in the following dict is one Flot series object. The actual # x-y values will be put into the 'data' properties for each object. cols = {i: _FlotSeries(i) for i in range(len(tests))} flot_annotations = {} flot_annotations['series'] = _GetSeriesAnnotations(tests) # For each Test (which corresponds to a trace line), the shaded error # region is specified by two series objects. For a demo, see: # http://www.flotcharts.org/flot/examples/percentiles/index.html error_bars = { x: [{ 'id': 'bottom_%d' % x, 'data': [], 'color': x, 'clickable': False, 'hoverable': False, 'lines': { 'show': True, 'lineWidth': 0, 'fill': 0.2, }, 'fillBetween': 'line_%d' % x, }, { 'id': 'top_%d' % x, 'data': [], 'color': x, 'clickable': False, 'hoverable': False, 'lines': { 'show': True, 'lineWidth': 0, 'fill': 0.2, }, 'fillBetween': 'line_%d' % x, }] for x, _ in enumerate(tests) } test_keys = [t.key.urlsafe() for t in tests] last_timestamp = None has_points = False for revision in sorted(revision_map.keys()): for series_index, key in enumerate(test_keys): point_info = revision_map[revision].get(key, None) if not point_info: continue has_points = True timestamp = point_info.get('timestamp') if timestamp: if type(timestamp) is datetime.datetime: point_info['timestamp'] = utils.TimestampMilliseconds( timestamp) if not last_timestamp or point_info[ 'timestamp'] > last_timestamp: last_timestamp = point_info['timestamp'] point_list = [revision, point_info['value']] if 'error' in point_info: error = point_info['error'] error_bars[series_index][0]['data'].append( [revision, point_info['value'] - error]) error_bars[series_index][1]['data'].append( [revision, point_info['value'] + error]) cols[series_index]['data'].append(point_list) data_index = len(cols[series_index]['data']) - 1 series_dict = flot_annotations.setdefault(series_index, {}) data_dict = copy.deepcopy(point_info) del data_dict['value'] series_dict.setdefault(data_index, data_dict) warning = None if show_old_data_warning and last_timestamp: last_timestamp = datetime.datetime.fromtimestamp(last_timestamp / 1000) if last_timestamp < datetime.datetime.now() - _STALE_DATA_DELTA: warning = ('Graph out of date! Last data received: %s' % last_timestamp.strftime('%Y/%m/%d %H:%M')) elif not has_points: warning = 'No data available.' if not utils.IsInternalUser(): warning += ' Note that some data is only available when logged in.' return json.dumps( { 'data': cols, 'annotations': flot_annotations, 'error_bars': error_bars, 'warning': warning }, allow_nan=False)
def _GetLastMondayTimestamp(): """Get timestamp of 00:00 last Monday in milliseconds as an integer.""" today = datetime.date.today() monday = today - datetime.timedelta(days=today.weekday()) return utils.TimestampMilliseconds(monday)