def PinpointParamsFromPerfTryParams(params): """Takes parameters from Dashboard's pinpoint-perf-job-dialog and returns a dict with parameters for a new Pinpoint job. Args: params: A dict in the following format: { 'test_path': Test path for the metric being bisected. 'start_commit': Git hash or commit position of earlier revision. 'end_commit': Git hash or commit position of later revision. 'extra_test_args': Extra args for the swarming job. } Returns: A dict of params for passing to Pinpoint to start a job, or a dict with an 'error' field. """ if not utils.IsValidSheriffUser(): user = utils.GetEmail() raise InvalidParamsError('User "%s" not authorized.' % user) test_path = params['test_path'] test_path_parts = test_path.split('/') bot_name = test_path_parts[1] suite = test_path_parts[2] start_commit = params['start_commit'] end_commit = params['end_commit'] start_git_hash = ResolveToGitHash(start_commit, suite) end_git_hash = ResolveToGitHash(end_commit, suite) story_filter = params['story_filter'] # Pinpoint also requires you specify which isolate target to run the # test, so we derive that from the suite name. Eventually, this would # ideally be stored in a SparesDiagnostic but for now we can guess. target = _GetIsolateTarget(bot_name, suite, start_commit, end_commit, only_telemetry=True) extra_test_args = params['extra_test_args'] email = utils.GetEmail() job_name = 'Try job on %s/%s' % (bot_name, suite) pinpoint_params = { 'comparison_mode': 'try', 'configuration': bot_name, 'benchmark': suite, 'start_git_hash': start_git_hash, 'end_git_hash': end_git_hash, 'extra_test_args': extra_test_args, 'target': target, 'user': email, 'name': job_name } if story_filter: pinpoint_params['story'] = story_filter return pinpoint_params
def Authorize(): try: email = utils.GetEmail() except oauth.OAuthRequestError: raise OAuthError if not email: raise NotLoggedInError try: if not email.endswith('.gserviceaccount.com'): # For non-service account, need to verify that the OAuth client ID # is in our whitelist. client_id = oauth.get_client_id(utils.OAUTH_SCOPES) if client_id not in OAUTH_CLIENT_ID_WHITELIST: logging.error('OAuth client id %s for user %s not in whitelist', client_id, email) email = None raise OAuthError except oauth.OAuthRequestError: # Transient errors when checking the token result should result in HTTP 500, # so catch oauth.OAuthRequestError here, not oauth.Error (which would catch # both fatal and transient errors). raise OAuthError logging.info('OAuth user logged in as: %s', email) if utils.IsInternalUser(): datastore_hooks.SetPrivilegedRequest()
def Authorize(): try: email = utils.GetEmail() except oauth.OAuthRequestError: raise OAuthError if not email: raise NotLoggedInError try: # TODO(dberris): Migrate to using Cloud IAM and checking roles instead, to # allow for dynamic management of the accounts. if not email.endswith('.gserviceaccount.com'): # For non-service accounts, need to verify that the OAuth client ID # is in our allowlist. client_id = oauth.get_client_id(utils.OAUTH_SCOPES) if client_id not in OAUTH_CLIENT_ID_ALLOWLIST: logging.error('OAuth client id %s for user %s not in allowlist', client_id, email) raise OAuthError except oauth.OAuthRequestError: # Transient errors when checking the token result should result in HTTP 500, # so catch oauth.OAuthRequestError here, not oauth.Error (which would catch # both fatal and transient errors). raise OAuthError logging.info('OAuth user logged in as: %s', email) if utils.IsInternalUser(): datastore_hooks.SetPrivilegedRequest()
def Post(self): # Pull out the Job ID and reason in the request. args = self.request.params.mixed() job_id = args.get('job_id') reason = args.get('reason') if not job_id or not reason: raise api_request_handler.BadRequestError() job = job_module.JobFromId(job_id) if not job: raise api_request_handler.NotFoundError() # Enforce first that only the users that started the job and administrators # can cancel jobs. email = utils.GetEmail() if not utils.IsAdministrator() and email != job.user: raise api_request_handler.ForbiddenError() # Truncate the reason down to 255 caracters including ellipses. try: job.Cancel(email, reason[:252] + '...' if len(reason) > 255 else reason) return {'job_id': job.job_id, 'state': 'Cancelled'} except errors.CancelError as e: self.response.set_status(400) return {'job_id': job.job_id, 'message': e.message}
def GetReport(template_id, revisions): with timing.WallTimeLogger('GetReport'), timing.CpuTimeLogger('GetReport'): try: template = ndb.Key('ReportTemplate', template_id).get() except AssertionError: # InternalOnlyModel._post_get_hook asserts that the user can access the # entity. return None result = {'editable': False} if template: result['owners'] = template.owners result['editable'] = utils.GetEmail() in template.owners result['report'] = report_query.ReportQuery( template.template, revisions).FetchSync() else: for handler in ListStaticTemplates(): if handler.template.key.id() != template_id: continue template = handler.template report = handler(revisions) if isinstance(report, report_query.ReportQuery): report = report.FetchSync() result['report'] = report break if template is None: return None result['id'] = template.key.id() result['name'] = template.name result['internal'] = template.internal_only return result
def PutTemplate(template_id, name, owners, template): email = utils.GetEmail() if email is None: raise ValueError if template_id is None: if any(name == existing['name'] for existing in List()): raise ValueError entity = ReportTemplate() else: for handler in STATIC_TEMPLATES: if handler.template.key.id() == template_id: raise ValueError try: entity = ndb.Key('ReportTemplate', template_id).get() except AssertionError: # InternalOnlyModel._post_get_hook asserts that the user can access the # entity. raise ValueError if not entity or email not in entity.owners: raise ValueError if any(name == existing['name'] for existing in List() if existing['id'] != template_id): raise ValueError entity.internal_only = _GetInternalOnly(template) entity.name = name entity.owners = owners entity.template = template entity.put()
def _PrefillInfo(test_path): """Pre-fills some best guesses config form based on the test path. Args: test_path: Test path string. Returns: A dictionary indicating the result. If successful, this should contain the the fields "suite", "email", "all_metrics", and "default_metric". If not successful this will contain the field "error". """ if not test_path: return {'error': 'No test specified'} suite_path = '/'.join(test_path.split('/')[:3]) suite = utils.TestKey(suite_path).get() if not suite: return {'error': 'Invalid test %s' % test_path} graph_path = '/'.join(test_path.split('/')[:4]) graph_key = utils.TestKey(graph_path) info = {'suite': suite.test_name} info['master'] = suite.master_name info['internal_only'] = suite.internal_only info['all_bots'] = _GetAvailableBisectBots(suite.master_name) info['bisect_bot'] = GuessBisectBot(suite.master_name, suite.bot_name) user = utils.GetEmail() if not user: return {'error': 'User not logged in.'} # Secondary check for bisecting internal only tests. if suite.internal_only and not utils.IsInternalUser(): return {'error': 'Unauthorized access, please use corp account to login.'} if users.is_current_user_admin(): info['is_admin'] = True else: info['is_admin'] = False info['email'] = user info['all_metrics'] = [] metric_keys = list_tests.GetTestDescendants(graph_key, has_rows=True) for metric_key in metric_keys: metric_path = utils.TestPath(metric_key) if metric_path.endswith('/ref') or metric_path.endswith('_ref'): continue info['all_metrics'].append(GuessMetric(metric_path)) info['default_metric'] = GuessMetric(test_path) info['story_filter'] = GuessStoryFilter(test_path) return info
def _GetJobs(options): user = utils.GetEmail() if user: query = job_module.Job.query(job_module.Job.user == user) else: query = job_module.Job.query() query = query.order(-job_module.Job.created) job_future = query.fetch_async(limit=_MAX_JOBS_TO_FETCH) count_future = query.count_async(limit=_MAX_JOBS_TO_COUNT) result = { 'jobs': [], 'count': count_future.get_result(), 'max_count': _MAX_JOBS_TO_COUNT } jobs = job_future.get_result() for job in jobs: result['jobs'].append(job.AsDict(options)) return result
def post(self): """Performs one of several bisect-related actions depending on parameters. The only required parameter is "step", which indicates what to do. This end-point should always output valid JSON with different contents depending on the value of "step". """ user = utils.GetEmail() if not utils.IsValidSheriffUser(): message = 'User "%s" not authorized.' % user self.response.out.write(json.dumps({'error': message})) return step = self.request.get('step') if step == 'prefill-info': result = _PrefillInfo(self.request.get('test_path')) elif step == 'perform-bisect': result = self._PerformBisectStep(user) else: result = {'error': 'Invalid parameters.'} self.response.write(json.dumps(result))
def _ValidateUser(user): return user or utils.GetEmail()
def testEmail(self): self.SetCurrentUserOAuth(testing_common.EXTERNAL_USER) with self.PatchEnviron('/api/fake'): self.assertEqual(utils.GetEmail(), testing_common.EXTERNAL_USER.email())
def testEmail_NoUser(self): self.SetCurrentUserOAuth(None) with self.PatchEnviron('/api/fake'): self.assertIsNone(utils.GetEmail())
def IsInternalUser(): return bool(utils.GetCachedIsInternalUser(utils.GetEmail()))
def IsAdministrator(): return bool(utils.GetCachedIsAdministrator(utils.GetEmail()))
def PinpointParamsFromBisectParams(params): """Takes parameters from Dashboard's pinpoint-job-dialog and returns a dict with parameters for a new Pinpoint job. Args: params: A dict in the following format: { 'test_path': Test path for the metric being bisected. 'start_git_hash': Git hash of earlier revision. 'end_git_hash': Git hash of later revision. 'bug_id': Associated bug. } Returns: A dict of params for passing to Pinpoint to start a job, or a dict with an 'error' field. """ if not utils.IsValidSheriffUser(): user = utils.GetEmail() raise InvalidParamsError('User "%s" not authorized.' % user) test_path = params['test_path'] test_path_parts = test_path.split('/') bot_name = test_path_parts[1] suite = test_path_parts[2] story_filter = params['story_filter'] pin = params.get('pin') # If functional bisects are speciied, Pinpoint expects these parameters to be # empty. bisect_mode = params['bisect_mode'] if bisect_mode != 'performance' and bisect_mode != 'functional': raise InvalidParamsError('Invalid bisect mode %s specified.' % bisect_mode) tir_label = '' chart_name = '' trace_name = '' if bisect_mode == 'performance': tir_label, chart_name, trace_name = ParseTIRLabelChartNameAndTraceName( test_path_parts) start_commit = params['start_commit'] end_commit = params['end_commit'] start_git_hash = ResolveToGitHash(start_commit, suite) end_git_hash = ResolveToGitHash(end_commit, suite) # Pinpoint also requires you specify which isolate target to run the # test, so we derive that from the suite name. Eventually, this would # ideally be stored in a SparesDiagnostic but for now we can guess. target = _GetIsolateTarget(bot_name, suite, start_commit, end_commit) email = utils.GetEmail() job_name = '%s bisect on %s/%s' % (bisect_mode.capitalize(), bot_name, suite) # Histogram names don't include the statistic, so split these chart_name, statistic_name = ParseStatisticNameFromChart(chart_name) alert_key = '' if params.get('alerts'): alert_keys = json.loads(params.get('alerts')) if alert_keys: alert_key = alert_keys[0] alert_magnitude = None if alert_key: alert = ndb.Key(urlsafe=alert_key).get() alert_magnitude = alert.median_after_anomaly - alert.median_before_anomaly if not alert_magnitude: alert_magnitude = FindMagnitudeBetweenCommits(utils.TestKey(test_path), start_commit, end_commit) pinpoint_params = { 'configuration': bot_name, 'benchmark': suite, 'chart': chart_name, 'start_git_hash': start_git_hash, 'end_git_hash': end_git_hash, 'bug_id': params['bug_id'], 'comparison_mode': bisect_mode, 'target': target, 'user': email, 'name': job_name, 'tags': json.dumps({ 'test_path': test_path, 'alert': alert_key }), } if alert_magnitude: pinpoint_params['comparison_magnitude'] = alert_magnitude if pin: pinpoint_params['pin'] = pin if statistic_name: pinpoint_params['statistic'] = statistic_name if story_filter: pinpoint_params['story'] = story_filter if tir_label: pinpoint_params['tir_label'] = tir_label if trace_name: pinpoint_params['trace'] = trace_name return pinpoint_params
def PinpointParamsFromBisectParams(params): """Takes parameters from Dashboard's pinpoint-job-dialog and returns a dict with parameters for a new Pinpoint job. Args: params: A dict in the following format: { 'test_path': Test path for the metric being bisected. 'start_git_hash': Git hash of earlier revision. 'end_git_hash': Git hash of later revision. 'bug_id': Associated bug. 'project_id': Associated Monorail project. } Returns: A dict of params for passing to Pinpoint to start a job, or a dict with an 'error' field. """ if not utils.IsValidSheriffUser(): user = utils.GetEmail() raise InvalidParamsError('User "%s" not authorized.' % user) test_path = params['test_path'] test_path_parts = test_path.split('/') bot_name = test_path_parts[1] suite = test_path_parts[2] # If functional bisects are speciied, Pinpoint expects these parameters to be # empty. bisect_mode = params['bisect_mode'] if bisect_mode != 'performance' and bisect_mode != 'functional': raise InvalidParamsError('Invalid bisect mode %s specified.' % bisect_mode) start_commit = params['start_commit'] end_commit = params['end_commit'] start_git_hash = ResolveToGitHash(start_commit, suite) end_git_hash = ResolveToGitHash(end_commit, suite) # Pinpoint also requires you specify which isolate target to run the # test, so we derive that from the suite name. Eventually, this would # ideally be stored in a SparesDiagnostic but for now we can guess. target = GetIsolateTarget(bot_name, suite) email = utils.GetEmail() job_name = '%s bisect on %s/%s' % (bisect_mode.capitalize(), bot_name, suite) alert_key = '' if params.get('alerts'): alert_keys = json.loads(params.get('alerts')) if alert_keys: alert_key = alert_keys[0] alert_magnitude = None if alert_key: alert = ndb.Key(urlsafe=alert_key).get() alert_magnitude = alert.median_after_anomaly - alert.median_before_anomaly if not alert_magnitude: alert_magnitude = FindMagnitudeBetweenCommits(utils.TestKey(test_path), start_commit, end_commit) if isinstance(params['bug_id'], int): issue_id = params['bug_id'] if params['bug_id'] > 0 else None else: issue_id = int( params['bug_id']) if params['bug_id'].isdigit() else None issue = anomaly.Issue(project_id=params.get('project_id', 'chromium'), issue_id=issue_id) return pinpoint_service.MakeBisectionRequest( test=utils.TestKey(test_path).get(), commit_range=pinpoint_service.CommitRange(start=start_git_hash, end=end_git_hash), issue=issue, comparison_mode=bisect_mode, target=target, comparison_magnitude=alert_magnitude, user=email, name=job_name, story_filter=params['story_filter'], pin=params.get('pin'), tags={ 'test_path': test_path, 'alert': alert_key, }, )
def CheckToken(self, *args, **kwargs): email = utils.GetEmail() token = str(self.request.get('xsrf_token')) if not email or not _ValidateToken(token, email): self.abort(403) handler_method(self, *args, **kwargs)