def test_add(self): builder_id = build_pb2.BuilderID( project='chromium', bucket='try', builder='linux', ) build = self.add(dict(builder=builder_id)) self.assertIsNotNone(build.key) self.assertIsNotNone(build.key.id()) build = build.key.get() self.assertEqual(build.proto.id, build.key.id()) self.assertEqual(build.proto.builder, builder_id) self.assertEqual(build.proto.created_by, auth.get_current_identity().to_bytes()) self.assertEqual(build.proto.builder.project, 'chromium') self.assertEqual(build.proto.builder.bucket, 'try') self.assertEqual(build.proto.builder.builder, 'linux') self.assertEqual(build.created_by, auth.get_current_identity()) infra = model.BuildInfra.key_for(build.key).get().parse() self.assertEqual(infra.logdog.hostname, 'logs.example.com') self.assertIn( build_pb2.BuildInfra.Swarming.CacheEntry( path='git', name='git', wait_for_warm_cache=dict()), infra.swarming.caches, )
def post(self): # Forbid usage of delegation tokens for this particular call. Using # delegation when creating delegation tokens is too deep. Redelegation will # be done as separate explicit API call that accept existing delegation # token via request body, not via headers. if auth.get_current_identity() != auth.get_peer_identity(): raise auth.AuthorizationError( 'This API call must not be used with active delegation token') # Convert request body to proto (with validation). Verify IP format. try: body = self.parse_body() subtoken = subtoken_from_jsonish(body) intent = body.get('intent') or '' if not isinstance(intent, basestring): raise TypeError('"intent" must be string') except (TypeError, ValueError) as exc: self.abort_with_error(400, text=str(exc)) # Fill in defaults. assert not subtoken.requestor_identity user_id = auth.get_current_identity().to_bytes() subtoken.requestor_identity = user_id if not subtoken.delegated_identity: subtoken.delegated_identity = user_id subtoken.creation_time = int(utils.time_time()) if not subtoken.validity_duration: subtoken.validity_duration = DEF_VALIDITY_DURATION_SEC if '*' in subtoken.services: subtoken.services[:] = get_default_allowed_services(user_id) # Check ACL (raises auth.AuthorizationError on errors). rule = check_can_create_token(user_id, subtoken) # Register the token in the datastore, generate its ID. subtoken.subtoken_id = register_subtoken(subtoken, rule, intent, auth.get_peer_ip()) # Create and sign the token. try: token = delegation.serialize_token(delegation.seal_token(subtoken)) except delegation.BadTokenError as exc: # This happens if resulting token is too large. self.abort_with_error(400, text=str(exc)) self.send_response(response={ 'delegation_token': token, 'subtoken_id': str(subtoken.subtoken_id), 'validity_duration': subtoken.validity_duration, }, http_code=201)
def log(**kwargs): """Adds an error. This will indirectly notify the admins. Returns the entity id for the report. """ identity = None if not auth.get_current_identity().is_anonymous: identity = auth.get_current_identity().to_bytes() try: # Trim all the messages to 4kb to reduce spam. LIMIT = 4096 for key, value in kwargs.items(): if key not in VALID_ERROR_KEYS: logging.error('Dropping unknown detail %s: %s', key, value) kwargs.pop(key) elif isinstance(value, basestring) and len(value) > LIMIT: value = value[:LIMIT-1] + u'\u2026' kwargs[key] = value if kwargs.get('source') == 'server': # Automatically use the version of the server code. kwargs.setdefault('version', utils.get_app_version()) kwargs.setdefault('python_version', platform.python_version()) error = models.Error(identity=identity, **kwargs) error.put() key_id = error.key.integer_id() logging.error( 'Got a %s error\nhttps://%s/restricted/ereporter2/errors/%s\n%s', error.source, app_identity.get_default_version_hostname(), key_id, error.message) return key_id except (datastore_errors.BadValueError, TypeError) as e: stack = formatter._reformat_stack(traceback.format_exc()) # That's the error about the error. error = models.Error( source='server', category='exception', message='log(%s) caused: %s' % (kwargs, str(e)), exception_type=str(type(e)), stack=stack) error.put() key_id = error.key.integer_id() logging.error( 'Failed to log a %s error\n%s\n%s', error.source, key_id, error.message) return key_id
def get_graphs(self, request): logging.debug('Got %s', request) project_id = request.project_id revision, project_cfg = config.get_project_config( project_id, 'project.cfg', project_config_pb2.ProjectCfg) if revision is None: logging.warning('Project %s does not have project.cfg', project_id) return TimeSeriesPacket() graph_list = [] if not _has_access(project_cfg.access): logging.warning('Access to %s is denied for user %s', project_id, auth.get_current_identity()) return TimeSeriesPacket() namespace_manager.set_namespace('projects.%s' % project_id) for graph in TimeSeriesModel.query(): field_list = [ Field(key=field.field_key, value=field.value) for field in graph.fields ] point_list = [ Point(time=point.time, value=point.value) for point in graph.points ] graph_list.append( TimeSeries(points=point_list, fields=field_list, metric=graph.metric)) return TimeSeriesPacket(timeseries=graph_list, project_name=project_cfg.name)
def can_edit_task(task): """Can 'edit' tasks, like cancelling. The account that created a task can cancel it. """ return (_is_privileged_user() or auth.get_current_identity() == task.authenticated)
def cancel(self, build_id): """Cancels build. Does not require a lease key. The current user has to have a permission to cancel a build in the bucket. Returns: Canceled Build. """ build = model.Build.get_by_id(build_id) if build is None: raise errors.BuildNotFoundError() if not acl.can_cancel_build(build): raise current_identity_cannot('cancel build %s', build.key.id()) if build.status == model.BuildStatus.COMPLETED: if build.result == model.BuildResult.CANCELED: return build raise errors.BuildIsCompletedError( 'Cannot cancel a completed build') build.status = model.BuildStatus.COMPLETED build.status_changed_time = utils.utcnow() build.result = model.BuildResult.CANCELED build.cancelation_reason = model.CancelationReason.CANCELED_EXPLICITLY self._clear_lease(build) build.put() logging.info('Build %s was cancelled by %s', build.key.id(), auth.get_current_identity().to_bytes()) return build
def get_graphs(self, request): logging.debug('Got %s', request) project_id = request.project_id revision, project_cfg = config.get_project_config( project_id, 'project.cfg', project_config_pb2.ProjectCfg) if revision is None: logging.warning('Project %s does not have project.cfg', project_id) return TimeSeriesPacket() graph_list = [] if not _has_access(project_cfg.access): logging.warning('Access to %s is denied for user %s', project_id, auth.get_current_identity()) return TimeSeriesPacket() namespace_manager.set_namespace('projects.%s' % project_id) for graph in TimeSeriesModel.query(): field_list = [Field(key=field.field_key, value=field.value) for field in graph.fields] point_list = [Point(time=point.time, value=point.value) for point in graph.points] graph_list.append(TimeSeries(points=point_list, fields=field_list, metric=graph.metric)) return TimeSeriesPacket(timeseries=graph_list, project_name=project_cfg.name)
def add_machines(self, request): """Handles an incoming CatalogMachineBatchAdditionRequest. Batches are intended to save on RPCs only. The batched requests will not execute transactionally. """ user = auth.get_current_identity().to_bytes() logging.info( 'Received CatalogMachineBatchAdditionRequest:\nUser: %s\n%s', user, request, ) responses = [] for request in request.requests: logging.info( 'Processing CatalogMachineAdditionRequest:\n%s', request, ) error = self.check_backend(request) or self.check_hostname(request) if error: responses.append(rpc_messages.CatalogManipulationResponse( error=error, machine_addition_request=request, )) else: responses.append(self._add_machine(request)) return rpc_messages.CatalogBatchManipulationResponse(responses=responses)
def get_task_def(self, request): """Returns a swarming task definition for a build request.""" try: build_request = api.put_request_message_to_build_request( request.build_request) build_request = build_request.normalize() identity = auth.get_current_identity() if not acl.can_add_build(build_request.bucket): raise endpoints.ForbiddenException( '%s cannot schedule builds in bucket %s' % (identity, build_request.bucket)) build = build_request.create_build(1, identity) bucket_cfg, _, task_def = (swarming.prepare_task_def_async( build, fake_build=True).get_result()) task_def_json = json.dumps(task_def) res = GetTaskDefinitionResponseMessage( task_definition=task_def_json) if request.api_explorer_link != False: # pragma: no branch res.api_explorer_link = shorten_url( ('https://%s/_ah/api/explorer' '#p/swarming/v1/swarming.tasks.new?resource=%s') % (bucket_cfg.swarming.hostname, urllib.quote(task_def_json))) return res except errors.InvalidInputError as ex: raise endpoints.BadRequestException('invalid build request: %s' % ex.message)
def store(self, updated_by=None): """Stores a new version of the config entity.""" # Create an incomplete key, to be completed by 'store_new_version'. self.key = ndb.Key(self.__class__, None, parent=self._get_root_key()) self.updated_by = updated_by or auth.get_current_identity() self.updated_ts = utils.utcnow() return datastore_utils.store_new_version(self, self._get_root_model())
def get_available_buckets(): """Returns buckets available to the current identity. Results are memcached for 10 minutes per identity. Returns: Set of bucket names or None if all buckets are available. """ if auth.is_admin(): return None identity = auth.get_current_identity().to_bytes() cache_key = 'available_buckets/%s' % identity available_buckets = memcache.get(cache_key) if available_buckets is not None: return available_buckets logging.info( 'Computing a list of available buckets for %s' % identity) group_buckets_map = collections.defaultdict(set) available_buckets = set() all_buckets = config.get_buckets_async().get_result() for bucket in all_buckets: for rule in bucket.acls: if rule.identity == identity: available_buckets.add(bucket.name) if rule.group: group_buckets_map[rule.group].add(bucket.name) for group, buckets in group_buckets_map.iteritems(): if available_buckets.issuperset(buckets): continue if auth.is_group_member(group): available_buckets.update(buckets) # Cache for 10 min memcache.set(cache_key, available_buckets, 10 * 60) return available_buckets
def store_async(self, updated_by=None): """Stores a new version of the config entity.""" # Create an incomplete key, to be completed by 'store_new_version'. self.key = ndb.Key(self.__class__, None, parent=self._get_root_key()) self.updated_by = updated_by or auth.get_current_identity() self.updated_ts = utils.utcnow() return datastore_utils.store_new_version_async(self, self._get_root_model())
def get_config(self, request): """Gets a config file.""" try: validation.validate_config_set(request.config_set) validation.validate_path(request.path) except ValueError as ex: raise endpoints.BadRequestException(ex.message) res = self.GetConfigResponseMessage() if not self.can_read_config_set(request.config_set): logging.warning('%s does not have access to %s', auth.get_current_identity().to_bytes(), request.config_set) raise_config_not_found() res.revision, res.content_hash = (storage.get_config_hash_async( request.config_set, request.path, revision=request.revision).get_result()) if not res.content_hash: raise_config_not_found() if not request.hash_only: res.content = (storage.get_config_by_hash_async( res.content_hash).get_result()) if not res.content: logging.warning( 'Config hash is found, but the blob is not.\n' 'File: "%s:%s:%s". Hash: %s', request.config_set, request.revision, request.path, res.content_hash) raise_config_not_found() return res
def fetch_client_binary(self, request): """Returns signed URL that can be used to fetch CIPD client binary.""" package_name = validate_package_name(request.package_name) if not client.is_cipd_client_package(package_name): raise ValidationError('Not a CIPD client package') instance_id = validate_instance_id(request.instance_id) caller = auth.get_current_identity() if not acl.can_fetch_instance(package_name, caller): raise auth.AuthorizationError() # Grab the location of the extracted binary. instance = self.get_instance(package_name, instance_id) client_info, error_message = self.service.get_client_binary_info(instance) if error_message: raise Error(error_message) if client_info is None: return FetchClientBinaryResponse( status=Status.NOT_EXTRACTED_YET, instance=instance_to_proto(instance)) return FetchClientBinaryResponse( instance=instance_to_proto(instance), client_binary=FetchClientBinaryResponse.ClientBinary( sha1=client_info.sha1, size=client_info.size, fetch_url=client_info.fetch_url, file_name=client.get_cipd_client_filename(package_name)))
def register_instance(self, request): """Registers a new package instance in the repository.""" package_name = validate_package_name(request.package_name) instance_id = validate_instance_id(request.instance_id) caller = auth.get_current_identity() if not acl.can_register_instance(package_name, caller): raise auth.AuthorizationError() instance = self.service.get_instance(package_name, instance_id) if instance is not None: return RegisterInstanceResponse( status=Status.ALREADY_REGISTERED, instance=instance_to_proto(instance)) # Need to upload to CAS first? Open an upload session. Caller must use # CASServiceApi to finish the upload and then call registerInstance again. if not self.service.is_instance_file_uploaded(package_name, instance_id): upload_url, upload_session_id = self.service.create_upload_session( package_name, instance_id, caller) return RegisterInstanceResponse( status=Status.UPLOAD_FIRST, upload_session_id=upload_session_id, upload_url=upload_url) # Package data is in the store. Make an entity. instance, registered = self.service.register_instance( package_name=package_name, instance_id=instance_id, caller=caller, now=utils.utcnow()) return RegisterInstanceResponse( status=Status.REGISTERED if registered else Status.ALREADY_REGISTERED, instance=instance_to_proto(instance))
def get_config(self, request): """Gets a config file.""" res = self.GetConfigResponseMessage() try: has_access = acl.can_read_config_set( request.config_set, headers=self.request_state.headers) except ValueError: raise endpoints.BadRequestException('Invalid config set: %s' % request.config_set) if not has_access: logging.warning('%s does not have access to %s', auth.get_current_identity().to_bytes(), request.config_set) raise_config_not_found() res.revision, res.content_hash = storage.get_config_hash( request.config_set, request.path, revision=request.revision) if not res.content_hash: raise_config_not_found() if not request.hash_only: res.content = storage.get_config_by_hash(res.content_hash) if not res.content: logging.warning( 'Config hash is found, but the blob is not.\n' 'File: "%s:%s:%s". Hash: %s', request.config_set, request.revision, request.path, res.content_hash) raise_config_not_found() return res
def search_instances(self, request): """Returns package instances with given tag (in no particular order).""" tag = validate_instance_tag(request.tag) if request.package_name: package_name = validate_package_name(request.package_name) else: package_name = None caller = auth.get_current_identity() callback = None if package_name: # If search is limited to one package, check its ACL only once. if not acl.can_fetch_instance(package_name, caller): raise auth.AuthorizationError() else: # Filter out packages not allowed by ACL. acl_cache = {} def check_readable(package_name, _instance_id): if package_name not in acl_cache: acl_cache[package_name] = acl.can_fetch_instance(package_name, caller) return acl_cache[package_name] callback = check_readable found = self.service.search_by_tag(tag, package_name, callback) return SearchResponse(instances=[instance_to_proto(i) for i in found])
def impl(): if auth.is_admin(): raise ndb.Return(None) identity = auth.get_current_identity().to_bytes() cache_key = 'accessible_buckets_v2/%s' % identity ctx = ndb.get_context() available_buckets = yield ctx.memcache_get(cache_key) if available_buckets is not None: raise ndb.Return(available_buckets) logging.info('Computing a list of available buckets for %s' % identity) group_buckets_map = collections.defaultdict(set) available_buckets = set() all_buckets = yield config.get_buckets_async() for bucket_id, cfg in all_buckets.iteritems(): for rule in cfg.acls: if rule.identity == identity: available_buckets.add(bucket_id) elif rule.group: # pragma: no branch group_buckets_map[rule.group].add(bucket_id) for group, buckets in group_buckets_map.iteritems(): if available_buckets.issuperset(buckets): continue if auth.is_group_member(group): available_buckets.update(buckets) # Cache for 10 min yield ctx.memcache_set(cache_key, available_buckets, 10 * 60) raise ndb.Return(available_buckets)
def make_request(request, is_bot_or_admin): """Registers the request in the DB. Fills up some values. If parent_task_id is set, properties for the parent are used: - priority: defaults to parent.priority - 1 - user: overriden by parent.user """ assert request.__class__ is TaskRequest if request.parent_task_id: run_result_key = task_pack.unpack_run_result_key(request.parent_task_id) result_summary_key = task_pack.run_result_key_to_result_summary_key(run_result_key) request_key = task_pack.result_summary_key_to_request_key(result_summary_key) parent = request_key.get() if not parent: raise ValueError("parent_task_id is not a valid task") request.priority = max(min(request.priority, parent.priority - 1), 0) # Drop the previous user. request.user = parent.user # If the priority is below 100, make sure the user has right to do so. if request.priority < 100 and not is_bot_or_admin: # Silently drop the priority of normal users. request.priority = 100 request.authenticated = auth.get_current_identity() if not request.properties.is_terminate and request.properties.grace_period_secs is None: request.properties.grace_period_secs = 30 if request.properties.idempotent is None: request.properties.idempotent = False _put_request(request) return request
def fetch_client_binary(self, request): """Returns signed URL that can be used to fetch CIPD client binary.""" package_name = validate_package_name(request.package_name) if not client.is_cipd_client_package(package_name): raise ValidationError('Not a CIPD client package') instance_id = validate_instance_id(request.instance_id) caller = auth.get_current_identity() if not acl.can_fetch_instance(package_name, caller): raise auth.AuthorizationError() # Grab the location of the extracted binary. instance = self.get_instance(package_name, instance_id) client_info, error_message = self.service.get_client_binary_info(instance) if error_message: raise Error(error_message) if client_info is None: return FetchClientBinaryResponse( status=Status.NOT_EXTRACTED_YET, instance=instance_to_proto(instance)) return FetchClientBinaryResponse( instance=instance_to_proto(instance), client_binary=FetchClientBinaryResponse.ClientBinary( sha1=client_info.sha1, size=client_info.size, fetch_url=client_info.fetch_url))
def get_config(self, request): """Gets a config file.""" res = self.GetConfigResponseMessage() try: has_access = acl.can_read_config_set( request.config_set, headers=self.request_state.headers) except ValueError: raise endpoints.BadRequestException( 'Invalid config set: %s' % request.config_set) if not has_access: logging.warning( '%s does not have access to %s', auth.get_current_identity().to_bytes(), request.config_set) raise_config_not_found() res.revision, res.content_hash = storage.get_config_hash( request.config_set, request.path, revision=request.revision) if not res.content_hash: raise_config_not_found() if not request.hash_only: res.content = storage.get_config_by_hash(res.content_hash) if not res.content: logging.warning( 'Config hash is found, but the blob is not.\n' 'File: "%s:%s:%s". Hash: %s', request.config_set, request.revision, request.path, res.content_hash) raise_config_not_found() return res
def finish_upload(self, request): """Finishes pending upload or queries its status. Client should finalize Google Storage upload session first. Once GS upload is finalized and 'finishUpload' is called, the server starts hash verification. Uploading client will get 'VERIFYING' status response. It can continue polling on this method until server returns 'PUBLISHED' status. upload_session_id implicitly authorizes the request. """ service = impl.get_cas_service() if service is None: raise endpoints.InternalServerErrorException( 'Service is not configured') # Verify the signature if upload_session_id and grab the session. Broken # or expired signatures are treated in same way as missing upload sessions. # No need to provide more hits to the malicious caller. upload_session = service.fetch_upload_session( request.upload_session_id, auth.get_current_identity()) if upload_session is None: return FinishUploadResponse( status=FinishUploadResponse.Status.MISSING) # Start object verification task if necessary, returns updated copy of # |upload_session| entity. upload_session = service.maybe_finish_upload(upload_session) response = FinishUploadResponse( status=_UPLOAD_STATUS_MAPPING[upload_session.status]) if upload_session.status == impl.UploadSession.STATUS_ERROR: response.error_message = upload_session.error_message or 'Unknown error' return response
def finish_upload(self, request): """Finishes pending upload or queries its status. Client should finalize Google Storage upload session first. Once GS upload is finalized and 'finishUpload' is called, the server starts hash verification. Uploading client will get 'VERIFYING' status response. It can continue polling on this method until server returns 'PUBLISHED' status. upload_session_id implicitly authorizes the request. """ service = impl.get_cas_service() if service is None: raise endpoints.InternalServerErrorException('Service is not configured') # Verify the signature if upload_session_id and grab the session. Broken # or expired signatures are treated in same way as missing upload sessions. # No need to provide more hits to the malicious caller. upload_session = service.fetch_upload_session( request.upload_session_id, auth.get_current_identity()) if upload_session is None: return FinishUploadResponse(status=FinishUploadResponse.Status.MISSING) # Start object verification task if necessary, returns updated copy of # |upload_session| entity. upload_session = service.maybe_finish_upload(upload_session) response = FinishUploadResponse( status=_UPLOAD_STATUS_MAPPING[upload_session.status]) if upload_session.status == impl.UploadSession.STATUS_ERROR: response.error_message = upload_session.error_message or 'Unknown error' return response
class AuthDBRevisionsHandler(auth.ApiHandler): """Serves deflated AuthDB proto message with snapshot of all groups. Args: rev: version of the snapshot to get ('latest' or concrete revision number). Not all versions may be available (i.e. there may be gaps in revision numbers). skip_body: if '1' will not return actual snapshot, just its SHA256 hash, revision number and timestamp. """ @auth.require(lambda: (auth.is_admin() or acl.is_trusted_service( ) or replication.is_replica(auth.get_current_identity()))) def get(self, rev): skip_body = self.request.get('skip_body') == '1' if rev == 'latest': snapshot = replication.get_latest_auth_db_snapshot(skip_body) else: try: rev = int(rev) except ValueError: self.abort_with_error( 400, text='Bad revision number, not an integer') snapshot = replication.get_auth_db_snapshot(rev, skip_body) if not snapshot: self.abort_with_error(404, text='No such snapshot: %s' % rev) resp = { 'auth_db_rev': snapshot.key.integer_id(), 'created_ts': utils.datetime_to_timestamp(snapshot.created_ts), 'sha256': snapshot.auth_db_sha256, } if not skip_body: assert snapshot.auth_db_deflated resp['deflated_body'] = base64.b64encode(snapshot.auth_db_deflated) self.send_response({'snapshot': resp})
def cancel(self, build_id): """Cancels build. Does not require a lease key. The current user has to have a permission to cancel a build in the bucket. Returns: Canceled Build. """ build = model.Build.get_by_id(build_id) if build is None: raise errors.BuildNotFoundError() if not acl.can_cancel_build(build): raise current_identity_cannot('cancel build %s', build.key.id()) if build.status == model.BuildStatus.COMPLETED: if build.result == model.BuildResult.CANCELED: return build raise errors.BuildIsCompletedError('Cannot cancel a completed build') build.status = model.BuildStatus.COMPLETED build.status_changed_time = utils.utcnow() build.result = model.BuildResult.CANCELED build.cancelation_reason = model.CancelationReason.CANCELED_EXPLICITLY self._clear_lease(build) build.put() logging.info( 'Build %s was cancelled by %s', build.key.id(), auth.get_current_identity().to_bytes()) return build
def current_identity_cannot(action_format, *args): # pragma: no cover """Returns AuthorizationError.""" action = action_format % args msg = 'User %s cannot %s' % (auth.get_current_identity().to_bytes(), action) logging.warning(msg) return auth.AuthorizationError(msg)
def add_machines(self, request): """Handles an incoming CatalogMachineBatchAdditionRequest. Batches are intended to save on RPCs only. The batched requests will not execute transactionally. """ user = auth.get_current_identity().to_bytes() logging.info( 'Received CatalogMachineBatchAdditionRequest:\nUser: %s\n%s', user, request, ) responses = [] for request in request.requests: logging.info( 'Processing CatalogMachineAdditionRequest:\n%s', request, ) error = self.check_backend(request) or self.check_hostname(request) if error: responses.append( rpc_messages.CatalogManipulationResponse( error=error, machine_addition_request=request, )) else: responses.append(self._add_machine(request)) return rpc_messages.CatalogBatchManipulationResponse( responses=responses)
def post(self): """Responds with access token and server version.""" try: request = json.loads(self.request.body) client_protocol = str(request['protocol_version']) client_app_version = str(request['client_app_version']) pusher = request.get('pusher', True) fetcher = request.get('fetcher', True) except (ValueError, KeyError) as exc: return self.send_error( 'Invalid body of /handshake call.\nError: %s.' % exc) # This access token will be used to validate each subsequent request. access_token = self.generate_xsrf_token({'v': client_protocol}) # Log details of the handshake to the server log. logging_info = { 'Access Id': auth.get_current_identity().to_bytes(), 'Client app version': client_app_version, 'Client is fetcher': fetcher, 'Client is pusher': pusher, 'Client protocol version': client_protocol, 'Token': access_token, } logging.info( '\n'.join('%s: %s' % (k, logging_info[k]) for k in sorted(logging_info))) # Send back the response. self.send_json( { 'access_token': access_token, 'protocol_version': ISOLATE_PROTOCOL_VERSION, 'server_app_version': utils.get_app_version(), })
def get_available_buckets(): """Returns buckets available to the current identity. Results are memcached for 10 minutes per identity. Returns: Set of bucket names or None if all buckets are available. """ if auth.is_admin(): return None identity = auth.get_current_identity().to_bytes() cache_key = 'available_buckets/%s' % identity available_buckets = memcache.get(cache_key) if available_buckets is not None: return available_buckets logging.info('Computing a list of available buckets for %s' % identity) group_buckets_map = collections.defaultdict(set) available_buckets = set() all_buckets = config.get_buckets_async().get_result() for bucket in all_buckets: for rule in bucket.acls: if rule.identity == identity: available_buckets.add(bucket.name) if rule.group: group_buckets_map[rule.group].add(bucket.name) for group, buckets in group_buckets_map.iteritems(): if available_buckets.issuperset(buckets): continue if auth.is_group_member(group): available_buckets.update(buckets) # Cache for 10 min memcache.set(cache_key, available_buckets, 10 * 60) return available_buckets
class ImporterIngestTarballHandler(auth.ApiHandler): """Accepts PUT with a tarball containing a bunch of groups to import. The request body is expected to be the tarball as a raw byte stream. See proto/config.proto, GroupImporterConfig for more details. """ # For some reason webapp2 attempts to deserialize the body as a form data when # searching for XSRF token (which doesn't work when the body is tarball). # Disable this (along with the cookies-based auth, we want only OAuth2). xsrf_token_request_param = None xsrf_token_enforce_on = () @classmethod def get_auth_methods(cls, conf): return [auth.oauth_authentication] # The real authorization check is inside 'ingest_tarball'. This one just # rejects anonymous calls earlier. @auth.require(lambda: not auth.get_current_identity().is_anonymous) def put(self, name): try: groups, auth_db_rev = importer.ingest_tarball( name, self.request.body) self.send_response({ 'groups': groups, 'auth_db_rev': auth_db_rev, }) except importer.BundleImportError as e: self.abort_with_error(400, error=str(e))
def get(self, request): """Handles an incoming CatalogMachineRetrievalRequest.""" user = auth.get_current_identity().to_bytes() logging.info( 'Received CatalogMachineRetrievalRequest:\nUser: %s\n%s', user, request, ) if acl.is_catalog_admin(): if not request.backend: raise endpoints.BadRequestException( 'Backend unspecified by administrator') elif acl.is_backend_service(): current_backend = acl.get_current_backend() if request.backend is None: request.backend = current_backend if request.backend != current_backend: raise endpoints.ForbiddenException('Mismatched backend') entry = models.CatalogMachineEntry.get(request.backend, request.hostname) if not entry: raise endpoints.NotFoundException('CatalogMachineEntry not found') response = rpc_messages.CatalogMachineRetrievalResponse( dimensions=entry.dimensions, policies=entry.policies, state=entry.state, ) if entry.lease_expiration_ts: # datetime_to_timestamp returns microseconds, convert to seconds. response.lease_expiration_ts = utils.datetime_to_timestamp( entry.lease_expiration_ts) / 1000 / 1000 return response
def get(self): platform = self.request.get('platform') if not platform: self.abort(400, 'No "platform" specified.') version = self.request.get('version') if not version: self.abort(400, 'No "version" specified.') # Make sure params looks okay syntactically. Don't touch datastore yet. pkg = client.CIPD_CLIENT_PREFIX + platform if not impl.is_valid_package_path(pkg): self.abort(400, 'Invalid platform name.') if not client.is_cipd_client_package(pkg): self.abort(400, 'Unrecognized platform name.') if not impl.is_valid_instance_version(version): self.abort(400, 'Invalid version identifier.') # Client packages are usually public, but this is not hardcoded, check ACL. caller = auth.get_current_identity() if not acl.can_fetch_instance(pkg, caller): self.abort(403, 'Not allowed.') # The rest of the calls touch datastore and Google Storage, need # a configured Repo implementation. repo = impl.get_repo_service() if repo is None or not repo.is_fetch_configured(): self.abort(500, 'The service is not configured.') # Resolve a version to a concrete instance ID, if necessary. instance_id = version if not impl.is_valid_instance_id(version): ids = repo.resolve_version(pkg, version, limit=2) if not ids: self.abort(404, 'No such package.') if len(ids) > 1: self.abort( 409, 'The provided tag points to multiple instances, can\'t use it ' 'as a version identifier.') instance_id = ids[0] # Fetch metadata of the instance, make sure it exists (it may not if # the version identifier was given as an instance ID). instance = repo.get_instance(pkg, instance_id) if not instance: self.abort(404, 'No such package.') # This is "cipd.exe" on Windows or just "cipd" on other platforms, to use # in Content-Disposition header. exe_name = client.get_cipd_client_filename(pkg) # The client binary is extracted via separate process that could have failed # or still be running. client_info, err = repo.get_client_binary_info(instance, filename=exe_name) if err: self.abort(404, 'The client binary is not available. Error: %s.' % err) if not client_info: self.abort(404, 'The client binary is not extracted yet, try later.') # Success! self.redirect(client_info.fetch_url)
def _check_permission(perm, realms, identity=None): """Checks if the caller has the realm permission. Args: perm: An instance of auth.Permission. realms: List of realms. identity: An instance of auth.Identity to check permission. default is auth.get_current_identity(). Returns: None Raises: auth.AuthorizationError: if the caller is not allowed or realm is missing. """ # Remove None from list realms = [r for r in realms if r] if not identity: identity = auth.get_current_identity() if not realms: raise auth.AuthorizationError('Realm is missing') if not auth.has_permission(perm, realms, identity=identity): logging.warning( '[realms] %s "%s" does not have permission "%s" in any realms %s', identity.kind, identity.name, perm.name, realms) raise auth.AuthorizationError( '%s "%s" does not have permission "%s"' % (identity.kind, identity.name, perm.name)) logging.info('[realms] %s "%s" has permission "%s" in any realms %s', identity.kind, identity.name, perm.name, realms)
def get_config(self, request): """Gets a config file.""" try: validation.validate_config_set(request.config_set) validation.validate_path(request.path) except ValueError as ex: raise endpoints.BadRequestException(ex.message) res = self.GetConfigResponseMessage() if not self.can_read_config_set(request.config_set): logging.warning( '%s does not have access to %s', auth.get_current_identity().to_bytes(), request.config_set) raise_config_not_found() res.revision, res.content_hash = ( storage.get_config_hash_async( request.config_set, request.path, revision=request.revision) .get_result()) if not res.content_hash: raise_config_not_found() if not request.hash_only: res.content = ( storage.get_config_by_hash_async(res.content_hash).get_result()) if not res.content: logging.warning( 'Config hash is found, but the blob is not.\n' 'File: "%s:%s:%s". Hash: %s', request.config_set, request.revision, request.path, res.content_hash) raise_config_not_found() return res
def post(self): """Responds with access token and server version.""" try: request = json.loads(self.request.body) client_protocol = str(request['protocol_version']) client_app_version = str(request['client_app_version']) pusher = request.get('pusher', True) fetcher = request.get('fetcher', True) except (ValueError, KeyError) as exc: return self.send_error( 'Invalid body of /handshake call.\nError: %s.' % exc) # This access token will be used to validate each subsequent request. access_token = self.generate_xsrf_token({'v': client_protocol}) # Log details of the handshake to the server log. logging_info = { 'Access Id': auth.get_current_identity().to_bytes(), 'Client app version': client_app_version, 'Client is fetcher': fetcher, 'Client is pusher': pusher, 'Client protocol version': client_protocol, 'Token': access_token, } logging.info('\n'.join('%s: %s' % (k, logging_info[k]) for k in sorted(logging_info))) # Send back the response. self.send_json({ 'access_token': access_token, 'protocol_version': ISOLATE_PROTOCOL_VERSION, 'server_app_version': utils.get_app_version(), })
def post(self): """Responds with access token and server version.""" try: request = json.loads(self.request.body) client_protocol = str(request["protocol_version"]) client_app_version = str(request["client_app_version"]) pusher = request.get("pusher", True) fetcher = request.get("fetcher", True) except (ValueError, KeyError) as exc: return self.send_error("Invalid body of /handshake call.\nError: %s." % exc) # This access token will be used to validate each subsequent request. access_token = self.generate_xsrf_token({"v": client_protocol}) # Log details of the handshake to the server log. logging_info = { "Access Id": auth.get_current_identity().to_bytes(), "Client app version": client_app_version, "Client is fetcher": fetcher, "Client is pusher": pusher, "Client protocol version": client_protocol, "Token": access_token, } logging.info("\n".join("%s: %s" % (k, logging_info[k]) for k in sorted(logging_info))) # Send back the response. self.send_json( { "access_token": access_token, "protocol_version": ISOLATE_PROTOCOL_VERSION, "server_app_version": utils.get_app_version(), } )
def lease(self, request): """Handles an incoming LeaseRequest.""" # Hash the combination of client + client-generated request ID in order to # deduplicate responses on a per-client basis. user = auth.get_current_identity().to_bytes() request_hash = models.LeaseRequest.generate_key(user, request).id() logging.info( 'Received LeaseRequest:\nUser: %s\nRequest hash: %s\n%s', user, request_hash, request, ) duplicate = models.LeaseRequest.get_by_id(request_hash) deduplication_checksum = models.LeaseRequest.compute_deduplication_checksum( request, ) if duplicate: # Found a duplicate request ID from the same user. Attempt deduplication. if deduplication_checksum == duplicate.deduplication_checksum: # The LeaseRequest RPC we just received matches the original. # We're safe to dedupe. logging.info( 'Dropped duplicate LeaseRequest:\n%s', duplicate.response, ) return duplicate.response else: logging.warning( 'Request ID reuse:\nOriginally used for:\n%s', duplicate.request ) return rpc_messages.LeaseResponse( error=rpc_messages.LeaseRequestError.REQUEST_ID_REUSE ) else: logging.info('Storing LeaseRequest') response = rpc_messages.LeaseResponse() response.request_hash = request_hash models.LeaseRequest( deduplication_checksum=deduplication_checksum, id=request_hash, owner=auth.get_current_identity(), request=request, response=response, state=models.LeaseRequestStates.UNTRIAGED, ).put() logging.info('Sending LeaseResponse:\n%s', response) return response
def init_new_request(request, allow_high_priority): """Initializes a new TaskRequest but doesn't store it. Fills up some values and does minimal checks. If parent_task_id is set, properties for the parent are used: - priority: defaults to parent.priority - 1 - user: overridden by parent.user """ assert request.__class__ is TaskRequest, request if request.parent_task_id: run_result_key = task_pack.unpack_run_result_key(request.parent_task_id) result_summary_key = task_pack.run_result_key_to_result_summary_key( run_result_key) request_key = task_pack.result_summary_key_to_request_key( result_summary_key) parent = request_key.get() if not parent: raise ValueError('parent_task_id is not a valid task') request.priority = max(min(request.priority, parent.priority - 1), 0) # Drop the previous user. request.user = parent.user # If the priority is below 100, make sure the user has right to do so. if request.priority < 100 and not allow_high_priority: # Special case for terminate request. if not request.properties.is_terminate: # Silently drop the priority of normal users. request.priority = 100 request.authenticated = auth.get_current_identity() if (not request.properties.is_terminate and request.properties.grace_period_secs is None): request.properties.grace_period_secs = 30 if request.properties.idempotent is None: request.properties.idempotent = False request.service_account = 'none' if request.service_account_token and request.service_account_token != 'none': if request.service_account_token == 'bot': request.service_account = 'bot' else: # TODO(vadimsh): Check the token signature, verify it can be used by the # current user, extract service account email. raise auth.AuthorizationError('service_account_token is not implemented') request.tags.append('priority:%s' % request.priority) request.tags.append('user:%s' % request.user) request.tags.append('service_account:%s' % request.service_account) for key, value in request.properties.dimensions.iteritems(): request.tags.append('%s:%s' % (key, value)) request.tags = sorted(set(request.tags)) if request.properties.idempotent: request.properties_hash = request.HASHING_ALGO( utils.encode_to_json(request.properties)).digest() else: request.properties_hash = None
def store(self, name): """Stores a new version of the instance.""" # Create an incomplete key. self.key = ndb.Key(self.__class__, None, parent=self._gen_root_key(name)) self.who = auth.get_current_identity() return datastore_utils.store_new_version(self, self.ROOT_MODEL)
def test_check_pools_create_task_enforced_not_allowed(self): pool_cfg = self._mock_get_pool_config() self._has_permission_mock.return_value = False with self.assertRaises(auth.AuthorizationError): realms.check_pools_create_task(pool_cfg, True) self._has_permission_mock.assert_called_once_with( _PERM_POOLS_CREATE_TASK, [u'test:pool'], identity=auth.get_current_identity())
def can_edit_task(task): """Can 'edit' tasks, like cancelling. Since bots can create tasks, they can also cancel them. This may change in the future. """ return (is_ip_whitelisted_machine() or _is_privileged_user() or auth.get_current_identity() == task.authenticated)
def test_check_pools_create_task_enforced_allowed(self): pool_cfg = self._mock_get_pool_config() self._has_permission_mock.return_value = True used_realms = realms.check_pools_create_task(pool_cfg, True) self.assertTrue(used_realms) self._has_permission_mock.assert_called_once_with( _PERM_POOLS_CREATE_TASK, [u'test:pool'], identity=auth.get_current_identity())
def post(self): # Forbid usage of delegation tokens for this particular call. Using # delegation when creating delegation tokens is too deep. Redelegation will # be done as separate explicit API call that accept existing delegation # token via request body, not via headers. if auth.get_current_identity() != auth.get_peer_identity(): raise auth.AuthorizationError( 'This API call must not be used with active delegation token') # Convert request body to proto (with validation). try: subtoken = subtoken_from_jsonish(self.parse_body()) except (TypeError, ValueError) as exc: self.abort_with_error(400, text=str(exc)) # Fill in defaults. assert not subtoken.impersonator_id user_id = auth.get_current_identity().to_bytes() if not subtoken.issuer_id: subtoken.issuer_id = user_id if subtoken.issuer_id != user_id: subtoken.impersonator_id = user_id subtoken.creation_time = int(utils.time_time()) if not subtoken.validity_duration: subtoken.validity_duration = DEF_VALIDITY_DURATION_SEC if not subtoken.services or '*' in subtoken.services: subtoken.services[:] = get_default_allowed_services(user_id) # Check ACL (raises auth.AuthorizationError on errors). check_can_create_token(user_id, subtoken) # Create and sign the token. try: token = delegation.serialize_token( delegation.seal_token( delegation_pb2.SubtokenList(subtokens=[subtoken]))) except delegation.BadTokenError as exc: # This happens if resulting token is too large. self.abort_with_error(400, text=str(exc)) self.send_response( response={ 'delegation_token': token, 'validity_duration': subtoken.validity_duration, }, http_code=201)
def validate_config(self, request): logging.debug("requester: %s, config_set: %s, paths: %s", auth.get_current_identity().to_bytes(), request.config_set, [f.path for f in request.files]) if not request.config_set: raise endpoints.BadRequestException('Must specify a config_set') if not request.files: raise endpoints.BadRequestException( 'Must specify files to validate') for f in request.files: if not f.path: raise endpoints.BadRequestException( 'Must specify the path of a file') if not acl.has_validation_access(): logging.warning('%s does not have validation access', auth.get_current_identity().to_bytes()) raise endpoints.ForbiddenException() if not can_read_config_set(request.config_set): logging.warning('%s does not have access to %s', auth.get_current_identity().to_bytes(), request.config_set) raise endpoints.ForbiddenException() futs = [] for f in request.files: ctx = cfg_validation.Context() with ctx.prefix(f.path + ': '): futs.append( validation.validate_config_async(request.config_set, f.path, f.content, ctx=ctx)) ndb.Future.wait_all(futs) # return the severities and the texts msgs = [] for f, fut in zip(request.files, futs): for msg in fut.get_result().messages: msgs.append( cfg_endpoint.ValidationMessage( path=f.path, severity=common.Severity.lookup_by_number( msg.severity), text=msg.text)) return self.ValidateConfigResponseMessage(messages=msgs)
def write_config(config): """Updates stored configuration.""" if not is_valid_config(config): raise ValueError('Invalid config') e = GroupImporterConfig( key=config_key(), config=config, modified_by=auth.get_current_identity()) e.put()
def post(self): if config.is_remote_configured(): self.abort_with_error(409, text='The configuration is managed elsewhere') try: importer.write_config( text=self.parse_body().get('config'), modified_by=auth.get_current_identity()) except ValueError as ex: self.abort_with_error(400, text=str(ex)) self.send_response({'ok': True})
def subscriber_email(self): """Validates caller is using email for auth, returns it. Raises HTTP 400 if some other kind of authentication is used. Only emails are supported by PubSub. """ caller = auth.get_current_identity() if not caller.is_user: self.abort_with_error(400, text='Caller must use email-based auth') return caller.name
def list_packages(self, request): """Returns packages in the given directory and possibly subdirectories.""" path = request.path or '' recursive = request.recursive or False pkgs, dirs = self.service.list_packages(path, recursive) caller = auth.get_current_identity() visible_pkgs = [p for p in pkgs if acl.can_fetch_package(p, caller)] visible_dirs = [d for d in dirs if acl.can_fetch_package(d, caller)] return ListPackagesResponse(packages=visible_pkgs, directories=visible_dirs)
def test_add(self): params = {'buildername': 'linux_rel'} build = self.service.add( bucket='chromium', parameters=params, ) self.assertIsNotNone(build.key) self.assertIsNotNone(build.key.id()) self.assertEqual(build.bucket, 'chromium') self.assertEqual(build.parameters, params) self.assertEqual(build.created_by, auth.get_current_identity())
def modify_capacity(self, request): """Handles an incoming CatalogCapacityModificationRequest.""" user = auth.get_current_identity().to_bytes() logging.info( 'Received CatalogCapacityModificationRequest:\nUser: %s\n%s', user, request, ) error = self.check_backend(request) if error: return rpc_messages.CatalogManipulationResponse(error=error) return self._modify_capacity(request)
def try_lease(): build = _get_leasable_build(build_id) if build.status != model.BuildStatus.SCHEDULED or build.is_leased: return False, build build.lease_expiration_date = lease_expiration_date build.regenerate_lease_key() build.leasee = auth.get_current_identity() build.never_leased = False build.put() return True, build
def add_machine(self, request): """Handles an incoming CatalogMachineAdditionRequest.""" user = auth.get_current_identity().to_bytes() logging.info( 'Received CatalogMachineAdditionRequest:\nUser: %s\n%s', user, request, ) error = self.check_backend(request) or self.check_hostname(request) if error: return rpc_messages.CatalogManipulationResponse(error=error) return self._add_machine(request)
def fetch_package(self, request): """Returns information about a package.""" package_name = validate_package_name(request.package_name) caller = auth.get_current_identity() if not acl.can_fetch_package(package_name, caller): raise auth.AuthorizationError() pkg = self.service.get_package(package_name) if pkg is None: raise PackageNotFoundError() return FetchPackageResponse(package=package_to_proto(pkg))
def fetch_acl(self, request): """Returns access control list for a given package path.""" package_path = validate_package_path(request.package_path) caller = auth.get_current_identity() if not acl.can_fetch_acl(package_path, caller): raise auth.AuthorizationError() return FetchACLResponse( acls=package_acls_to_proto({ role: acl.get_package_acls(package_path, role) for role in acl.ROLES }))
def try_lease(): build = self._get_leasable_build(build_id) if build.status != model.BuildStatus.SCHEDULED or build.is_leased: return False, build build.lease_expiration_date = lease_expiration_date build.regenerate_lease_key() build.leasee = auth.get_current_identity() build.put() logging.info( 'Build %s was leased by %s', build.key.id(), build.leasee.to_bytes()) return True, build
def _has_access(access_list): cur_ident = auth.get_current_identity().to_bytes() for ac in access_list: if ac.startswith('group:'): if auth.is_group_member(ac.split(':', 2)[1]): return True else: identity_str = ac if ':' not in identity_str: identity_str = 'user:%s' % identity_str if cur_ident == identity_str: return True return False
def can_async(bucket, action): errors.validate_bucket_name(bucket) assert isinstance(action, Action) identity = auth.get_current_identity() cache_key = 'acl_can/%s/%s/%s' % (bucket, identity.to_bytes(), action.name) ctx = ndb.get_context() result = yield ctx.memcache_get(cache_key) if result is not None: raise ndb.Return(result) result = yield has_any_of_roles_async(bucket, ROLES_FOR_ACTION[action]) yield ctx.memcache_set(cache_key, result, time=60) raise ndb.Return(result)