def get(self): """Handles getting page states.""" state_id = self.request.get('sid') if not state_id: self.ReportError('Missing required parameters.', status=400) return state = ndb.Key(page_state.PageState, state_id).get() if not state: self.ReportError('Invalid sid.', status=400) return if self.request.get('v2', None) is None: self.response.out.write(state.value) return if state.value_v2 is None: state.value_v2 = _Upgrade(state.value) # If the user is not signed in, then they won't be able to see # internal_only TestMetadata, so value_v2 will be incomplete. # If the user is signed in, then value_v2 is complete, so it's safe to # store it. if datastore_hooks.IsUnalteredQueryPermitted(): state.put() self.response.out.write(state.value_v2)
def _AuthorizeAppEngineUser(): user = users.get_current_user() if not user: raise NotLoggedInError # For now we only allow internal users access to the API. if not datastore_hooks.IsUnalteredQueryPermitted(): raise InternalOnlyError
def _Request(endpoint, params): """Sends a request to an endpoint and returns JSON data.""" assert datastore_hooks.IsUnalteredQueryPermitted() return request.RequestJson(endpoint, method='POST', use_cache=False, use_auth=True, **params)
def _post_get_hook(cls, key, future): # pylint: disable=unused-argument """Throws an exception when external users try to get() internal data.""" entity = future.get_result() if entity is None: return # Internal-only objects should never be accessed by non-internal accounts! if (getattr(entity, 'internal_only', False) and not datastore_hooks.IsUnalteredQueryPermitted()): # Keep info about the fact that we're doing an access check out of the # callstack in case app engine shows it to the user. assert False
def _Request(endpoint, params): """Sends a request to an endpoint and returns JSON data.""" assert datastore_hooks.IsUnalteredQueryPermitted() http_auth = utils.ServiceAccountHttp() _, content = http_auth.request(_PINPOINT_URL + endpoint, method='POST', body=urllib.urlencode(params), headers={'Content-length': 0}) return json.loads(content)
def _Request(endpoint, params): """Sends a request to an endpoint and returns JSON data.""" assert datastore_hooks.IsUnalteredQueryPermitted() try: return request.RequestJson(endpoint, method='POST', use_cache=False, use_auth=True, **params) except request.RequestError as e: return json.loads(e.content)
def SetCacheAsync(test_path, rows): # This first set generally only sets the internal-only cache. futures = [namespaced_stored_object.SetAsync(_CACHE_KEY % test_path, rows)] # If this is an internal_only query for externally available data, # set the cache for that too. if datastore_hooks.IsUnalteredQueryPermitted(): test = utils.TestKey(test_path).get() if test and not test.internal_only: futures.append( namespaced_stored_object.SetExternalAsync( _CACHE_KEY % test_path, rows)) yield futures
def AuthorizedPost(self, *args): """Returns timeseries data in response to API requests. Argument: test_path: Full path of test timeseries Outputs: JSON timeseries data for the test_path, see README.md. """ try: days = int(self.request.get('num_days', 30)) except ValueError: raise api_request_handler.BadRequestError( 'Invalid num_days parameter %s' % self.request.get('num_days')) if days <= 0: raise api_request_handler.BadRequestError( 'num_days cannot be negative (%s)' % days) before = datetime.datetime.now() - datetime.timedelta(days=days) test_path = args[0] test_key = utils.TestKey(test_path) test = test_key.get() if not test: raise api_request_handler.BadRequestError('Invalid test_path %s' % test_path) assert (datastore_hooks.IsUnalteredQueryPermitted() or not test.internal_only) datastore_hooks.SetSinglePrivilegedRequest() q = graph_data.Row.query() q = q.filter( graph_data.Row.parent_test == utils.OldStyleTestKey(test_key)) q = q.filter(graph_data.Row.timestamp > before) rows = q.fetch() if not rows: return [] revisions = [rev for rev in rows[0].to_dict() if rev.startswith('r_')] header = ['revision', 'value', 'timestamp'] + revisions timeseries = [header] for row in sorted(rows, key=lambda r: r.revision): timeseries.append([self._GetValue(row, a) for a in header]) return { 'timeseries': timeseries, 'test_path': test_path, 'revision_logs': namespaced_stored_object.Get('revision_info'), 'improvement_direction': test.improvement_direction, }
def _UpdateRevisionMap(revision_map, parent_test, rev, num_points, start_rev=None, end_rev=None): """Updates a dict of revisions to data point information for one test. Depending on which arguments are given, there are several ways that this function can update the dict of revisions: 1. If start_rev and end_rev are given, then revisions in this range are used. The num_points argument is ignored. 2. Otherwise, if rev is given, then revisions before and after the specified revision are used. 3. Otherwise, the latest revisions are used. Args: revision_map: A dict mapping revision numbers to dicts of point info. Each point info dict contains information from a Row entity. parent_test: A TestMetadata entity with Row children. rev: The middle revision in the revision map (could be None). num_points: The number of points to include in the revision map. start_rev: Start revision number (optional). end_rev: End revision number (optional). """ anomaly_annotation_map = _GetAnomalyAnnotationMap(parent_test.key) assert (datastore_hooks.IsUnalteredQueryPermitted() or not parent_test.internal_only) if start_rev and end_rev: rows = graph_data.GetRowsForTestInRange(parent_test.key, start_rev, end_rev, True) elif rev: assert num_points rows = graph_data.GetRowsForTestAroundRev(parent_test.key, rev, num_points, True) else: assert num_points rows = graph_data.GetLatestRowsForTest(parent_test.key, num_points, privileged=True) parent_test_key = parent_test.key.urlsafe() for row in rows: if row.revision not in revision_map: revision_map[row.revision] = {} revision_map[row.revision][parent_test_key] = _PointInfoDict( row, anomaly_annotation_map)
def SetCache(test_path, rows): """Sets the saved graph revisions data for a test. Args: test_path: A test path string. rows: A list of [revision, value, timestamp] triplets. """ # This first set generally only sets the internal-only cache. namespaced_stored_object.Set(_CACHE_KEY % test_path, rows) # If this is an internal_only query for externally available data, # set the cache for that too. if datastore_hooks.IsUnalteredQueryPermitted(): test = utils.TestKey(test_path).get() if test and not test.internal_only: namespaced_stored_object.SetExternal(_CACHE_KEY % test_path, rows)
def get(self): """Gets CSV from data store and outputs it. Request parameters: test_path: Full test path of one trace. rev: End revision number; if not given, latest revision is used. num_points: Number of Rows to get data for. attr: Comma-separated list of attributes (columns) to return. Outputs: CSV file contents. """ test_path = self.request.get('test_path') rev = self.request.get('rev') num_points = int(self.request.get('num_points', 500)) attributes = self.request.get('attr', 'revision,value').split(',') if not test_path: self.ReportError('No test path given.', status=400) return logging.info('Got request to /graph_csv for test: "%s".', test_path) test_key = utils.TestKey(test_path) test = test_key.get() assert (datastore_hooks.IsUnalteredQueryPermitted() or not test.internal_only) datastore_hooks.SetSinglePrivilegedRequest() q = graph_data.Row.query() q = q.filter( graph_data.Row.parent_test == utils.OldStyleTestKey(test_key)) if rev: q = q.filter(graph_data.Row.revision <= int(rev)) q = q.order(-graph_data.Row.revision) points = reversed(q.fetch(limit=num_points)) rows = self._GenerateRows(points, attributes) output = StringIO.StringIO() csv.writer(output).writerows(rows) self.response.headers['Content-Type'] = 'text/csv' self.response.headers['Content-Disposition'] = ( 'attachment; filename=%s.csv' % test.test_name) self.response.out.write(output.getvalue())
def _GetBug(self, *args): # Users must log in with privileged access to see all bugs. if not datastore_hooks.IsUnalteredQueryPermitted(): raise BadRequestError('No access.') try: bug_id = int(args[0]) except ValueError: raise BadRequestError('Invalid bug ID "%s".' % args[0]) service = issue_tracker_service.IssueTrackerService( utils.ServiceAccountHttp()) issue = service.GetIssue(bug_id) comments = service.GetIssueComments(bug_id) bisects = try_job.TryJob.query(try_job.TryJob.bug_id == bug_id).fetch() return { 'author': issue.get('author', {}).get('name'), 'legacy_bisects': [{ 'status': b.status, 'bot': b.bot, 'bug_id': b.bug_id, 'buildbucket_link': ( 'https://chromeperf.appspot.com/buildbucket_job_status/%s' % b.buildbucket_job_id), 'command': b.GetConfigDict()['command'], 'culprit': self._GetCulpritInfo(b), 'metric': (b.results_data or {}).get('metric'), } for b in bisects], 'cc': [cc.get('name') for cc in issue.get('cc', [])], 'comments': [{ 'content': comment.get('content'), 'author': comment.get('author'), 'published': self._FormatTimestampMilliseconds( comment.get('published')), } for comment in comments], 'components': issue.get('components', []), 'id': bug_id, 'labels': issue.get('labels', []), 'published': self._FormatTimestampMilliseconds(issue.get('published')), 'state': issue.get('state'), 'status': issue.get('status'), 'summary': issue.get('summary'), }
def _InequalityFilters(cls, query, equality_properties, inequality_property, bug_id, min_end_revision, max_end_revision, min_start_revision, max_start_revision, min_timestamp, max_timestamp): # A query cannot have more than one inequality filter. # inequality_property allows users to decide which property to filter in the # query, which can significantly affect performance. If other inequalities # are specified, they will be handled by post_filters. # If callers set inequality_property without actually specifying a # corresponding inequality filter, then reset the inequality_property and # compute it automatically as if it were not specified. if inequality_property == 'start_revision': if min_start_revision is None and max_start_revision is None: inequality_property = None elif inequality_property == 'end_revision': if min_end_revision is None and max_end_revision is None: inequality_property = None elif inequality_property == 'timestamp': if min_timestamp is None and max_timestamp is None: inequality_property = None elif inequality_property == 'bug_id': if bug_id != '*': inequality_property = None elif inequality_property == 'key': if equality_properties == [ 'subscription_names' ] and (min_start_revision or max_start_revision): # Use the composite index (subscription_names, start_revision, # -timestamp). See index.yaml. inequality_property = 'start_revision' else: inequality_property = None if inequality_property is None: # Compute a default inequality_property. # We prioritise the 'min' filters first because that lets us limit the # amount of data the Datastore instances might handle. if min_start_revision: inequality_property = 'start_revision' elif min_end_revision: inequality_property = 'end_revision' elif min_timestamp: inequality_property = 'timestamp' elif max_start_revision: inequality_property = 'start_revision' elif max_end_revision: inequality_property = 'end_revision' elif max_timestamp: inequality_property = 'timestamp' elif bug_id == '*': inequality_property = 'bug_id' post_filters = [] if not inequality_property: return query, post_filters if not datastore_hooks.IsUnalteredQueryPermitted(): # _DatastorePreHook will filter internal_only=False. index.yaml does not # specify indexes for `internal_only, $inequality_property, -timestamp`. # Use post_filters for all inequality properties. inequality_property = '' if bug_id == '*': if inequality_property == 'bug_id': logging.info('filter:bug_id!=None') query = query.filter(cls.bug_id != None).order(cls.bug_id) else: logging.info('post_filter:bug_id!=None') post_filters.append(lambda a: a.bug_id != None) # Apply the min filters before the max filters, because that lets us # optimise the query application for more recent data, reducing the amount # of data post-processing. if min_start_revision: min_start_revision = int(min_start_revision) if inequality_property == 'start_revision': logging.info('filter:min_start_revision=%d', min_start_revision) query = query.filter(cls.start_revision >= min_start_revision) query = query.order(cls.start_revision) else: logging.info('post_filter:min_start_revision=%d', min_start_revision) post_filters.append( lambda a: a.start_revision >= min_start_revision) if min_end_revision: min_end_revision = int(min_end_revision) if inequality_property == 'end_revision': logging.info('filter:min_end_revision=%d', min_end_revision) query = query.filter(cls.end_revision >= min_end_revision) query = query.order(cls.end_revision) else: logging.info('post_filter:min_end_revision=%d', min_end_revision) post_filters.append( lambda a: a.end_revision >= min_end_revision) if min_timestamp: if inequality_property == 'timestamp': logging.info('filter:min_timestamp=%d', time.mktime(min_timestamp.utctimetuple())) query = query.filter(cls.timestamp >= min_timestamp) else: logging.info('post_filter:min_timestamp=%d', time.mktime(min_timestamp.utctimetuple())) post_filters.append(lambda a: a.timestamp >= min_timestamp) if max_start_revision: max_start_revision = int(max_start_revision) if inequality_property == 'start_revision': logging.info('filter:max_start_revision=%d', max_start_revision) query = query.filter(cls.start_revision <= max_start_revision) query = query.order(-cls.start_revision) else: logging.info('post_filter:max_start_revision=%d', max_start_revision) post_filters.append( lambda a: a.start_revision <= max_start_revision) if max_end_revision: max_end_revision = int(max_end_revision) if inequality_property == 'end_revision': logging.info('filter:max_end_revision=%d', max_end_revision) query = query.filter(cls.end_revision <= max_end_revision) query = query.order(-cls.end_revision) else: logging.info('post_filter:max_end_revision=%d', max_end_revision) post_filters.append( lambda a: a.end_revision <= max_end_revision) if max_timestamp: if inequality_property == 'timestamp': logging.info('filter:max_timestamp=%d', time.mktime(max_timestamp.utctimetuple())) query = query.filter(cls.timestamp <= max_timestamp) else: logging.info('post_filter:max_timestamp=%d', time.mktime(max_timestamp.utctimetuple())) post_filters.append(lambda a: a.timestamp <= max_timestamp) return query, post_filters
def AuthorizedPost(self, *args): """Returns alert data in response to API requests. Argument: bug_id: issue id on the chromium issue tracker Outputs: JSON data for the bug, see README.md. """ # Users must log in with privileged access to see all bugs. if not datastore_hooks.IsUnalteredQueryPermitted(): raise api_request_handler.BadRequestError('No access.') try: bug_id = int(args[0]) except ValueError: raise api_request_handler.BadRequestError('Invalid bug ID "%s".' % args[0]) try: include_comments = api_utils.ParseBool( self.request.get('include_comments', None)) except ValueError: raise api_request_handler.BadRequestError( "value of |with_comments| should be 'true' or 'false'") service = issue_tracker_service.IssueTrackerService( utils.ServiceAccountHttp()) issue = service.GetIssue(bug_id) bisects = try_job.TryJob.query(try_job.TryJob.bug_id == bug_id).fetch() def _FormatDate(d): if not d: return '' return d.isoformat() response = { 'bug': { 'author': issue.get('author', {}).get('name'), 'owner': issue.get('owner', {}).get('name'), 'legacy_bisects': [{ 'status': b.status, 'bot': b.bot, 'bug_id': b.bug_id, 'buildbucket_link': ('https://chromeperf.appspot.com/buildbucket_job_status/%s' % b.buildbucket_job_id), 'command': b.GetConfigDict()['command'], 'culprit': self._GetCulpritInfo(b), 'metric': (b.results_data or {}).get('metric'), 'started_timestamp': _FormatDate(b.last_ran_timestamp), } for b in bisects], 'cc': [cc.get('name') for cc in issue.get('cc', [])], 'components': issue.get('components', []), 'id': bug_id, 'labels': issue.get('labels', []), 'published': issue.get('published'), 'updated': issue.get('updated'), 'state': issue.get('state'), 'status': issue.get('status'), 'summary': issue.get('summary'), } } if include_comments: comments = service.GetIssueComments(bug_id) response['bug']['comments'] = [{ 'content': comment.get('content'), 'author': comment.get('author'), 'published': comment.get('published'), } for comment in comments] return response
def _CheckUser(self): self._CheckIsLoggedIn() if not datastore_hooks.IsUnalteredQueryPermitted(): raise api_request_handler.ForbiddenError()
def _InequalityFilters(cls, query, inequality_property, bug_id, min_end_revision, max_end_revision, min_start_revision, max_start_revision, min_timestamp, max_timestamp): # A query cannot have more than one inequality filter. # inequality_property allows users to decide which property to filter in the # query, which can significantly affect performance. If other inequalities # are specified, they will be handled by post_filters. # If callers set inequality_property without actually specifying a # corresponding inequality filter, then reset the inequality_property and # compute it automatically as if it were not specified. if inequality_property == 'start_revision': if min_start_revision is None and max_start_revision is None: inequality_property = None elif inequality_property == 'end_revision': if min_end_revision is None and max_end_revision is None: inequality_property = None elif inequality_property == 'timestamp': if min_timestamp is None and max_timestamp is None: inequality_property = None elif inequality_property == 'bug_id': if bug_id != '*': inequality_property = None elif inequality_property != 'key': inequality_property = None if inequality_property is None: # Compute a default inequality_property. if min_start_revision or max_start_revision: inequality_property = 'start_revision' elif min_end_revision or max_end_revision: inequality_property = 'end_revision' elif min_timestamp or max_timestamp: inequality_property = 'timestamp' elif bug_id == '*': inequality_property = 'bug_id' post_filters = [] if not inequality_property: return query, post_filters if not datastore_hooks.IsUnalteredQueryPermitted(): # _DatastorePreHook will filter internal_only=False. index.yaml does not # specify indexes for `internal_only, $inequality_property, -timestamp`. # Use post_filters for all inequality properties. inequality_property = '' if bug_id == '*': if inequality_property == 'bug_id': query = query.filter(cls.bug_id != None).order(cls.bug_id) else: post_filters.append(lambda a: a.bug_id != None) if min_start_revision: min_start_revision = int(min_start_revision) if inequality_property == 'start_revision': logging.info('filter:min_start_revision=%d', min_start_revision) query = query.filter(cls.start_revision >= min_start_revision) query = query.order(cls.start_revision) else: post_filters.append( lambda a: a.start_revision >= min_start_revision) if max_start_revision: max_start_revision = int(max_start_revision) if inequality_property == 'start_revision': logging.info('filter:max_start_revision=%d', max_start_revision) query = query.filter(cls.start_revision <= max_start_revision) query = query.order(-cls.start_revision) else: post_filters.append( lambda a: a.start_revision <= max_start_revision) if min_end_revision: min_end_revision = int(min_end_revision) if inequality_property == 'end_revision': logging.info('filter:min_end_revision=%d', min_end_revision) query = query.filter(cls.end_revision >= min_end_revision) query = query.order(cls.end_revision) else: post_filters.append( lambda a: a.end_revision >= min_end_revision) if max_end_revision: max_end_revision = int(max_end_revision) if inequality_property == 'end_revision': logging.info('filter:max_end_revision=%d', max_end_revision) query = query.filter(cls.end_revision <= max_end_revision) query = query.order(-cls.end_revision) else: post_filters.append( lambda a: a.end_revision <= max_end_revision) if min_timestamp: if inequality_property == 'timestamp': logging.info('filter:min_timestamp=%d', time.mktime(min_timestamp.utctimetuple())) query = query.filter(cls.timestamp >= min_timestamp) else: post_filters.append(lambda a: a.timestamp >= min_timestamp) if max_timestamp: if inequality_property == 'timestamp': logging.info('filter:max_timestamp=%d', time.mktime(max_timestamp.utctimetuple())) query = query.filter(cls.timestamp <= max_timestamp) else: post_filters.append(lambda a: a.timestamp <= max_timestamp) return query, post_filters