def receive(self, bounce_message): email_addr = bounce_message.original.get('to') logging.info('Bounce was sent to: %r', email_addr) # TODO(jrobbins): Remove this work-around after issue 5804 is # fully resolved. # Disable bounce detection for @intel.com. Some of their emails # will still bounce, but we will keep trying and not triggering # issue reassignment. if '@intel' in email_addr: # both intel.com and intel-partner. return app_config = webapp2.WSGIApplication.app.config services = app_config['services'] cnxn = sql.MonorailConnection() try: user_id = services.user.LookupUserID(cnxn, email_addr) user = services.user.GetUser(cnxn, user_id) user.email_bounce_timestamp = int(time.time()) services.user.UpdateUser(cnxn, user_id, user) except exceptions.NoSuchUserException: logging.info('User %r not found, ignoring', email_addr) logging.info('Received bounce post ... [%s]', self.request) logging.info('Bounce original: %s', bounce_message.original) logging.info('Bounce notification: %s', bounce_message.notification)
def __init__(self, services, cnxn=None, requester=None, auth=None, perms=None, autocreate=True): """Construct a MonorailContext. Args: services: Connection to backends. cnxn: Optional connection to SQL database. requester: String email address of user making the request or None. auth: AuthData object used during testing. perms: PermissionSet used during testing. autocreate: Set to False to require that a row in the User table already exists for this user, otherwise raise NoSuchUserException. """ self.cnxn = cnxn or sql.MonorailConnection() self.auth = auth or authdata.AuthData.FromEmail( self.cnxn, requester, services, autocreate=autocreate) self.perms = perms # Usually None until LookupLoggedInUserPerms() called. self.profiler = profiler.Profiler() # TODO(jrobbins): make self.errors not be UI-centric. self.warnings = [] self.errors = template_helpers.EZTError()
def receive(self, bounce_message): email_addr = bounce_message.original.get('to') logging.info('Bounce was sent to: %r', email_addr) app_config = webapp2.WSGIApplication.app.config services = app_config['services'] cnxn = sql.MonorailConnection() try: user_id = services.user.LookupUserID(cnxn, email_addr) user = services.user.GetUser(cnxn, user_id) user.email_bounce_timestamp = int(time.time()) services.user.UpdateUser(cnxn, user_id, user) except exceptions.NoSuchUserException: logging.info('User %r not found, ignoring', email_addr) logging.info('Received bounce post ... [%s]', self.request) logging.info('Bounce original: %s', bounce_message.original) logging.info('Bounce notification: %s', bounce_message.notification)
def testExecute_Shard_Unavailable(self): """If a shard is unavailable, we try the next one.""" shard_id = 1 sql_cnxn_1 = self.cnxn.GetConnectionForShard(shard_id) sql_cnxn_2 = self.cnxn.GetConnectionForShard(shard_id + 1) # Simulate a recent failure on shard 1. self.cnxn.unavailable_shards[1] = int(time.time()) - 3 with mock.patch.object(self.cnxn, '_ExecuteWithSQLConnection') as ewsc: ewsc.return_value = 'db result' actual_result = self.cnxn.Execute('statement', [], shard_id=shard_id) self.assertEqual('db result', actual_result) ewsc.assert_called_once_with(sql_cnxn_2, 'statement', [], commit=True) # Even a new MonorailConnection instance shares the same state. other_cnxn = sql.MonorailConnection() other_sql_cnxn_2 = other_cnxn.GetConnectionForShard(shard_id + 1) with mock.patch.object(other_cnxn, '_ExecuteWithSQLConnection') as ewsc: ewsc.return_value = 'db result' actual_result = other_cnxn.Execute('statement', [], shard_id=shard_id) self.assertEqual('db result', actual_result) ewsc.assert_called_once_with(other_sql_cnxn_2, 'statement', [], commit=True) # Simulate an old failure on shard 1, allowing us to try using it again. self.cnxn.unavailable_shards[1] = (int(time.time()) - sql.BAD_SHARD_AVOIDANCE_SEC - 2) with mock.patch.object(self.cnxn, '_ExecuteWithSQLConnection') as ewsc: ewsc.return_value = 'db result' actual_result = self.cnxn.Execute('statement', [], shard_id=shard_id) self.assertEqual('db result', actual_result) ewsc.assert_called_once_with(sql_cnxn_1, 'statement', [], commit=True)
def xsrf_is_valid(self, body): """This method expects the body dictionary to include two fields: `token` and `user_id`. """ cnxn = sql.MonorailConnection() token = body.get('token') user = users.get_current_user() email = user.email() if user else None services = self.app.config.get('services') auth = authdata.AuthData.FromEmail(cnxn, email, services, autocreate=False) try: xsrf.ValidateToken(token, auth.user_id, xsrf.XHR_SERVLET_PATH) return True except xsrf.TokenIncorrect: return False
def GetNonviewableIIDs( self, cnxn, user, effective_ids, project, perms, shard_id): """Return a list of IIDs that the user cannot view in the project shard.""" # Project owners and site admins can see all issues. if not perms.consider_restrictions: return [] # There are two main parts to the computation that we do in parallel: # getting at-risk IIDs and getting OK-iids. cnxn_2 = sql.MonorailConnection() at_risk_iids_promise = framework_helpers.Promise( self.GetAtRiskIIDs, cnxn_2, user, effective_ids, project, perms, shard_id) ok_iids = self.GetViewableIIDs( cnxn, effective_ids, project.project_id, shard_id) at_risk_iids = at_risk_iids_promise.WaitAndGetValue() # The set of non-viewable issues is the at-risk ones minus the ones where # the user is the reporter, owner, CC'd, or granted "View" permission. nonviewable_iids = set(at_risk_iids).difference(ok_iids) return list(nonviewable_iids)
def __init__(self, params=None): """Initialize the MonorailRequest object.""" self.form_overrides = {} if params: self.form_overrides.update(params) self.warnings = [] self.errors = template_helpers.EZTError() self.debug_enabled = False self.use_cached_searches = True self.cnxn = sql.MonorailConnection() self.auth = AuthData() # Authentication info for logged-in user self.project_name = None self.project = None self.config = None self.hotlist_id = None self.hotlist = None self.hotlist_name = None self.viewed_username = None self.viewed_user_auth = AuthData()
def testConstructor_AsUsedInApp(self): """We can make an mc like it is done in the app or a test.""" self.mox.StubOutClassWithMocks(sql, 'MonorailConnection') mock_cnxn = sql.MonorailConnection() mock_cnxn.Close() requester = '*****@*****.**' self.mox.ReplayAll() mc = monorailcontext.MonorailContext(self.services, requester=requester) mc.LookupLoggedInUserPerms(self.project) self.assertEqual(mock_cnxn, mc.cnxn) self.assertEqual(requester, mc.auth.email) self.assertEqual(permissions.USER_PERMISSIONSET, mc.perms) self.assertTrue(isinstance(mc.profiler, profiler.Profiler)) self.assertEqual([], mc.warnings) self.assertTrue(isinstance(mc.errors, template_helpers.EZTError)) mc.CleanUp() self.assertIsNone(mc.cnxn) # Double Cleanup or Cleanup with no cnxn is not a crash. mc.CleanUp() self.assertIsNone(mc.cnxn)
def Run(self, handler, request, prpc_context, cnxn=None, auth=None, perms=None, start_time=None, end_time=None): """Run a Do* method in an API context. Args: handler: API handler method to call with MonorailContext and request. request: API Request proto object. prpc_context: pRPC context object with status code. cnxn: Optional connection to SQL database. auth: AuthData passed in during testing. perms: PermissionSet passed in during testing. start_time: Int timestamp passed in during testing. end_time: Int timestamp passed in during testing. Returns: The response proto returned from the handler or None if that method raised an exception that we handle. Raises: Only programming errors should be raised as exceptions. All execptions for permission checks and input validation that are raised in the Do* method are converted into pRPC status codes. """ start_time = start_time or time.time() cnxn = cnxn or sql.MonorailConnection() if self.services.cache_manager: self.services.cache_manager.DoDistributedInvalidation(cnxn) response = None client_id = None # TODO(jrobbins): consider using client ID. requester = None metadata = dict(prpc_context.invocation_metadata()) mc = None try: requester = auth.email if auth else self.GetRequester(metadata) logging.info('request proto is:\n%r\n', request) logging.info('requester is %r', requester) if self.rate_limiter: self.rate_limiter.CheckStart(client_id, requester, start_time) mc = monorailcontext.MonorailContext(self.services, cnxn=cnxn, requester=requester, auth=auth, perms=perms) if not perms: mc.LookupLoggedInUserPerms( self.GetRequestProject(mc.cnxn, request)) self.AssertBaseChecks(mc, request, metadata) response = handler(self, mc, request) except Exception as e: if not self.ProcessException(e, prpc_context, mc): raise e.__class__, e, sys.exc_info()[2] finally: if mc: mc.CleanUp() if self.rate_limiter and requester: end_time = end_time or time.time() self.rate_limiter.CheckEnd(client_id, requester, end_time, start_time) self.RecordMonitoringStats(start_time, request, response, prpc_context) return response
def ProcessMail(self, msg, project_addr): """Process an inbound email message.""" # TODO(jrobbins): If the message is HUGE, don't even try to parse # it. Silently give up. (from_addr, to_addrs, cc_addrs, references, incident_id, subject, body) = emailfmt.ParseEmailMessage(msg) logging.info('Proj addr: %r', project_addr) logging.info('From addr: %r', from_addr) logging.info('Subject: %r', subject) logging.info('To: %r', to_addrs) logging.info('Cc: %r', cc_addrs) logging.info('References: %r', references) logging.info('Incident Id: %r', incident_id) logging.info('Body: %r', body) # If message body is very large, reject it and send an error email. if emailfmt.IsBodyTooBigToParse(body): return _MakeErrorMessageReplyTask( project_addr, from_addr, self._templates['body_too_long']) # Make sure that the project reply-to address is in the To: line. if not emailfmt.IsProjectAddressOnToLine(project_addr, to_addrs): return None project_name, verb, trooper_queue = emailfmt.IdentifyProjectVerbAndLabel( project_addr) is_alert = bool(verb and verb.lower() == 'alert') error_addr = from_addr local_id = None author_addr = from_addr if is_alert: error_addr = settings.alert_escalation_email author_addr = settings.alert_service_account else: local_id = emailfmt.IdentifyIssue(project_name, subject) if not local_id: logging.info('Could not identify issue: %s %s', project_addr, subject) # No error message, because message was probably not intended for us. return None cnxn = sql.MonorailConnection() if self.services.cache_manager: self.services.cache_manager.DoDistributedInvalidation(cnxn) project = self.services.project.GetProjectByName(cnxn, project_name) # Authenticate the author_addr and perm check. try: mc = monorailcontext.MonorailContext( self.services, cnxn=cnxn, requester=author_addr, autocreate=is_alert) mc.LookupLoggedInUserPerms(project) except exceptions.NoSuchUserException: return _MakeErrorMessageReplyTask( project_addr, error_addr, self._templates['no_account']) # TODO(zhangtiff): Add separate email templates for alert error cases. if not project or project.state != project_pb2.ProjectState.LIVE: return _MakeErrorMessageReplyTask( project_addr, error_addr, self._templates['project_not_found']) if not project.process_inbound_email: return _MakeErrorMessageReplyTask( project_addr, error_addr, self._templates['replies_disabled'], project_name=project_name) # Verify that this is a reply to a notification that we could have sent. is_development = os.environ['SERVER_SOFTWARE'].startswith('Development') if not (is_alert or is_development): for ref in references: if emailfmt.ValidateReferencesHeader(ref, project, from_addr, subject): break # Found a message ID that we could have sent. if emailfmt.ValidateReferencesHeader( ref, project, from_addr.lower(), subject): break # Also match all-lowercase from-address. else: # for-else: if loop completes with no valid reference found. return _MakeErrorMessageReplyTask( project_addr, from_addr, self._templates['not_a_reply']) # Note: If the issue summary line is changed, a new thread is created, # and replies to the old thread will no longer work because the subject # line hash will not match, which seems reasonable. if mc.auth.user_pb.banned: logging.info('Banned user %s tried to post to %s', from_addr, project_addr) return _MakeErrorMessageReplyTask( project_addr, error_addr, self._templates['banned']) # If the email is an alert, switch to the alert handling path. if is_alert: alert2issue.ProcessEmailNotification( self.services, cnxn, project, project_addr, from_addr, mc.auth, subject, body, incident_id, msg, trooper_queue) return None # This email is a response to an email about a comment. self.ProcessIssueReply( mc, project, local_id, project_addr, body) return None
def GatherPageData(self, mr): """Build up a dictionary of data values to use when rendering the page. Args: mr: commonly used info parsed from the request. Returns: Dict of values used by EZT for rendering the page. """ if mr.local_id is None: self.abort(404, 'no issue specified') with work_env.WorkEnv(mr, self.services) as we: # Signed in users could edit the issue, so it must be fresh. use_cache = not mr.auth.user_id issue = we.GetIssueByLocalID( mr.project_id, mr.local_id, use_cache=use_cache) # We give no explanation of missing issues on the peek page. if issue.deleted: self.abort(404, 'issue not found') star_cnxn = sql.MonorailConnection() star_promise = framework_helpers.Promise( we.IsIssueStarred, issue, cnxn=star_cnxn) config = we.GetProjectConfig(mr.project_id) comments = we.ListIssueComments(issue) descriptions, visible_comments, cmnt_pagination = PaginateComments( mr, issue, comments, config, self.services) with mr.profiler.Phase('making user proxies'): involved_user_ids = tracker_bizobj.UsersInvolvedInIssues([issue]) group_ids = self.services.usergroup.DetermineWhichUserIDsAreGroups( mr.cnxn, involved_user_ids) comment_user_ids = tracker_bizobj.UsersInvolvedInCommentList( descriptions + visible_comments) users_by_id = framework_views.MakeAllUserViews( mr.cnxn, self.services.user, involved_user_ids, comment_user_ids, group_ids=group_ids) framework_views.RevealAllEmailsToMembers(mr.auth, mr.project, users_by_id) (issue_view, description_views, comment_views) = self._MakeIssueAndCommentViews( mr, issue, users_by_id, descriptions, visible_comments, config, issue_reporters=[], comment_reporters=[]) with mr.profiler.Phase('getting starring info'): starred = star_promise.WaitAndGetValue() star_cnxn.Close() permit_edit = permissions.CanEditIssue( mr.auth.effective_ids, mr.perms, mr.project, issue) mr.ComputeColSpec(config) restrict_to_known = config.restrict_to_known page_perms = self.MakePagePerms( mr, issue, permissions.CREATE_ISSUE, permissions.SET_STAR, permissions.EDIT_ISSUE, permissions.EDIT_ISSUE_SUMMARY, permissions.EDIT_ISSUE_STATUS, permissions.EDIT_ISSUE_OWNER, permissions.EDIT_ISSUE_CC, permissions.DELETE_ISSUE, permissions.ADD_ISSUE_COMMENT, permissions.DELETE_OWN, permissions.DELETE_ANY, permissions.VIEW_INBOUND_MESSAGES) page_perms.EditIssue = ezt.boolean(permit_edit) prevent_restriction_removal = ( mr.project.only_owners_remove_restrictions and not framework_bizobj.UserOwnsProject( mr.project, mr.auth.effective_ids)) cmd_slots, default_slot_num = self.services.features.GetRecentCommands( mr.cnxn, mr.auth.user_id, mr.project_id) cmd_slot_views = [ template_helpers.EZTItem( slot_num=slot_num, command=command, comment=comment) for slot_num, command, comment in cmd_slots] previous_locations = self.GetPreviousLocations(mr, issue) return { 'issue_tab_mode': 'issueDetail', 'issue': issue_view, 'description': description_views, 'comments': comment_views, 'labels': issue.labels, 'num_detail_rows': len(comment_views) + 4, 'noisy': ezt.boolean(tracker_helpers.IsNoisy( len(comment_views), issue.star_count)), 'cmnt_pagination': cmnt_pagination, 'colspec': mr.col_spec, 'searchtip': 'You can jump to any issue by number', 'starred': ezt.boolean(starred), 'pagegen': str(int(time.time() * 1000000)), 'restrict_to_known': ezt.boolean(restrict_to_known), 'prevent_restriction_removal': ezt.boolean( prevent_restriction_removal), 'statuses_offer_merge': config.statuses_offer_merge, 'page_perms': page_perms, 'cmd_slots': cmd_slot_views, 'default_slot_num': default_slot_num, 'quick_edit_submit_url': tracker_helpers.FormatRelativeIssueURL( issue.project_name, urls.ISSUE_PEEK + '.do', id=issue.local_id), 'previous_locations': previous_locations, # for template issue-meta-part shared by issuedetail servlet 'user_remaining_hotlists': [], 'user_issue_hotlists': [], 'involved_users_issue_hotlists': [], 'remaining_issue_hotlists': [], }
def __init__(self, request, services): requester = (endpoints.get_current_user() or oauth.get_current_user( framework_constants.OAUTH_SCOPE)) requester_email = requester.email().lower() self.cnxn = sql.MonorailConnection() self.auth = AuthData.FromEmail(self.cnxn, requester_email, services) self.me_user_id = self.auth.user_id self.viewed_username = None self.viewed_user_auth = None self.project_name = None self.project = None self.issue = None self.config = None self.granted_perms = set() # query parameters self.params = { 'can': 1, 'start': 0, 'num': 100, 'q': '', 'sort': '', 'groupby': '', 'projects': [], 'hotlists': [] } self.use_cached_searches = True self.warnings = [] self.errors = template_helpers.EZTError() self.mode = None if hasattr(request, 'projectId'): self.project_name = request.projectId self.project = services.project.GetProjectByName( self.cnxn, self.project_name) self.params['projects'].append(self.project_name) self.config = services.config.GetProjectConfig( self.cnxn, self.project_id) if hasattr(request, 'additionalProject'): self.params['projects'].extend(request.additionalProject) self.params['projects'] = list(set(self.params['projects'])) if hasattr(request, 'issueId'): self.issue = services.issue.GetIssueByLocalID( self.cnxn, self.project_id, request.issueId) self.granted_perms = tracker_bizobj.GetGrantedPerms( self.issue, self.auth.effective_ids, self.config) if hasattr(request, 'userId'): self.viewed_username = request.userId.lower() if self.viewed_username == 'me': self.viewed_username = requester_email self.viewed_user_auth = AuthData.FromEmail(self.cnxn, self.viewed_username, services) elif hasattr(request, 'groupName'): self.viewed_username = request.groupName.lower() try: self.viewed_user_auth = AuthData.FromEmail( self.cnxn, self.viewed_username, services) except user_svc.NoSuchUserException: self.viewed_user_auth = None self.perms = permissions.GetPermissions(self.auth.user_pb, self.auth.effective_ids, self.project) # Build q. if hasattr(request, 'q') and request.q: self.params['q'] = request.q if hasattr(request, 'publishedMax') and request.publishedMax: self.params['q'] += ' opened<=%d' % request.publishedMax if hasattr(request, 'publishedMin') and request.publishedMin: self.params['q'] += ' opened>=%d' % request.publishedMin if hasattr(request, 'updatedMax') and request.updatedMax: self.params['q'] += ' modified<=%d' % request.updatedMax if hasattr(request, 'updatedMin') and request.updatedMin: self.params['q'] += ' modified>=%d' % request.updatedMin if hasattr(request, 'owner') and request.owner: self.params['q'] += ' owner:%s' % request.owner if hasattr(request, 'status') and request.status: self.params['q'] += ' status:%s' % request.status if hasattr(request, 'label') and request.label: self.params['q'] += ' label:%s' % request.label if hasattr(request, 'can') and request.can: if request.can == api_pb2_v1.CannedQuery.all: self.params['can'] = 1 elif request.can == api_pb2_v1.CannedQuery.new: self.params['can'] = 6 elif request.can == api_pb2_v1.CannedQuery.open: self.params['can'] = 2 elif request.can == api_pb2_v1.CannedQuery.owned: self.params['can'] = 3 elif request.can == api_pb2_v1.CannedQuery.reported: self.params['can'] = 4 elif request.can == api_pb2_v1.CannedQuery.starred: self.params['can'] = 5 elif request.can == api_pb2_v1.CannedQuery.to_verify: self.params['can'] = 7 else: # Endpoints should have caught this. raise InputException('Canned query %s is not supported.', request.can) if hasattr(request, 'startIndex') and request.startIndex: self.params['start'] = request.startIndex if hasattr(request, 'maxResults') and request.maxResults: self.params['num'] = request.maxResults if hasattr(request, 'sort') and request.sort: self.params['sort'] = request.sort self.query_project_names = self.GetParam('projects') self.group_by_spec = self.GetParam('groupby') self.sort_spec = self.GetParam('sort') self.query = self.GetParam('q') self.can = self.GetParam('can') self.start = self.GetParam('start') self.num = self.GetParam('num')
def setUp(self): self.emp_tbl = sql.SQLTableManager('Employee') self.cnxn = sql.MonorailConnection() self.master_cnxn = self.cnxn.GetMasterConnection()
def setUp(self): self.cnxn = sql.MonorailConnection() self.orig_local_mode = settings.local_mode self.orig_num_logical_shards = settings.num_logical_shards settings.local_mode = False
def wrapper(self, *args, **kwargs): method_identifier = (ENDPOINTS_API_NAME + '.' + (method_name or func.__name__) + '/' + (method_path or func.__name__)) start_time = time_fn() approximate_http_status = 200 request = args[0] ret = None c_id = None c_email = None try: if settings.read_only and http_method.lower() != 'get': raise permissions.PermissionException( 'This request is not allowed in read-only mode') requester = endpoints.get_current_user() auth_client_ids, auth_emails = ( client_config_svc.GetClientConfigSvc().GetClientIDEmails()) auth_client_ids.append(endpoints.API_EXPLORER_CLIENT_ID) if self._services is None: self._set_services(service_manager.set_up_services()) c_id, c_email = api_base_checks( request, requester, self._services, sql.MonorailConnection(), auth_client_ids, auth_emails) self.ratelimiter.CheckStart(c_id, c_email, start_time) self.increment_request_limit(request, c_id, c_email) ret = func(self, *args, **kwargs) except user_svc.NoSuchUserException as e: approximate_http_status = 404 raise endpoints.NotFoundException( 'The user does not exist: %s' % str(e)) except (project_svc.NoSuchProjectException, issue_svc.NoSuchIssueException, config_svc.NoSuchComponentException) as e: approximate_http_status = 404 raise endpoints.NotFoundException(str(e)) except (permissions.BannedUserException, permissions.PermissionException) as e: approximate_http_status = 403 logging.info('Whitelist ID %r email %r', auth_client_ids, auth_emails) raise endpoints.ForbiddenException(str(e)) except endpoints.BadRequestException: approximate_http_status = 400 raise except endpoints.UnauthorizedException: approximate_http_status = 401 raise except actionlimit.ExcessiveActivityException as e: approximate_http_status = 403 raise endpoints.ForbiddenException( 'The requester has exceeded API quotas limit') except (usergroup_svc.GroupExistsException, config_svc.InvalidComponentNameException, ratelimiter.ApiRateLimitExceeded) as e: approximate_http_status = 400 raise endpoints.BadRequestException(str(e)) except Exception as e: approximate_http_status = 500 logging.exception('Unexpected error in monorail API') raise finally: now = time_fn() elapsed_ms = int((now - start_time) * 1000) if c_id and c_email: self.ratelimiter.CheckEnd(c_id, c_email, now, start_time) fields = { # Endpoints APIs don't return the full set of http status values. 'status': approximate_http_status, # Use the api name, not the request path, to prevent an # explosion in possible field values. 'name': method_identifier, 'is_robot': False, } ts_mon.common.http_metrics.server_durations.add( elapsed_ms, fields=fields) ts_mon.common.http_metrics.server_response_status.increment( fields=fields) ts_mon.common.http_metrics.server_request_bytes.add( len(protojson.encode_message(request)), fields=fields) response_size = 0 if ret: response_size = len(protojson.encode_message(ret)) ts_mon.common.http_metrics.server_response_bytes.add( response_size, fields=fields) return ret