def test_invalid_transitions(self, bg_request, start, end): bg_request.status = start bg_request.output = None db.create(bg_request) with pytest.raises(RequestStatusTransitionError): bg_request.status = end db.update(bg_request)
def handle_event(event): # Whenever a request is completed check to see if this process is waiting for it if event.name == Events.REQUEST_COMPLETED.name: completion_event = request_map.pop(event.payload.id, None) if completion_event: completion_event.set() # Only care about local garden if event.garden == config.get("garden.name"): if event.name == Events.GARDEN_STOPPED.name: # When shutting down we need to close all handing connections/threads # waiting for a response. This will invoke each connection/thread to be # returned the current status of the Request. for request_event in request_map: request_map[request_event].set() # Only care about downstream garden elif event.garden != config.get("garden.name"): if event.name in ( Events.REQUEST_CREATED.name, Events.REQUEST_STARTED.name, Events.REQUEST_COMPLETED.name, ): # When we send child requests to child gardens where the parent was on # the local garden we remove the parent before sending them. Only setting # the subset of fields that change "corrects" the parent existing_request = db.query_unique(Request, id=event.payload.id) if existing_request: for field in ("status", "output", "error_class"): setattr(existing_request, field, getattr(event.payload, field)) try: db.update(existing_request) except RequestStatusTransitionError: pass else: # Attempt to create the request, if it already exists then continue on try: db.create(event.payload) except NotUniqueException: pass # Required if the main process spawns a wait Request if event.name == Events.REQUEST_COMPLETED.name: if str(event.payload.id) in request_map: request_map[str(event.payload.id)].set()
def _pre_forward(operation: Operation) -> Operation: """Called before forwarding an operation""" # Validate that the operation can be forwarded if operation.operation_type not in routable_operations: raise RoutingRequestException( f"Operation type '{operation.operation_type}' can not be forwarded" ) if operation.operation_type == "REQUEST_CREATE": operation.model = ( beer_garden.requests.RequestValidator.instance().validate_request( operation.model)) # Save the request so it'll have an ID and we'll have something to update operation.model = db.create(operation.model) # Clear parent before forwarding so the child doesn't freak out about an # unknown request operation.model.parent = None operation.model.has_parent = False beer_garden.files.forward_file(operation) # Pull out and store the wait event, if it exists wait_event = operation.kwargs.pop("wait_event", None) if wait_event: beer_garden.requests.request_map[operation.model.id] = wait_event return operation
def create_job(job: Job) -> Job: """Create a new Job and add it to the scheduler Args: job: The Job to be added Returns: The added Job """ # Save first so we have an ID to pass to the scheduler job = db.create(job) return job
def create_garden(garden: Garden) -> Garden: """Create a new Garden Args: garden: The Garden to create Returns: The created Garden """ garden.status_info["heartbeat"] = datetime.utcnow() return db.create(garden)
def handle_event(event): # Only care about downstream garden if event.garden != config.get("garden.name") and not event.error: if event.name in ( Events.REQUEST_CREATED.name, Events.REQUEST_STARTED.name, Events.REQUEST_COMPLETED.name, Events.REQUEST_UPDATED.name, Events.REQUEST_CANCELED.name, ): # When we send child requests to child gardens where the parent was on # the local garden we remove the parent before sending them. Only setting # the subset of fields that change "corrects" the parent existing_request = db.query_unique(Request, id=event.payload.id) if existing_request is None: # Attempt to create the request, if it already exists then continue on try: db.create(event.payload) except NotUniqueException: pass elif event.name != Events.REQUEST_CREATED.name: request_changed = False for field in ("status", "output", "error_class", "status_updated_at"): new_value = getattr(event.payload, field) if getattr(existing_request, field) != new_value: request_changed = True setattr(existing_request, field, new_value) if request_changed: try: update_request(existing_request, _publish_error=False) except RequestStatusTransitionError: pass
def handle_event(event: Event) -> None: """Handle SYSTEM events When creating or updating a system, make sure to mark as non-local first. It's possible that we see SYSTEM_UPDATED events for systems that we don't currently know about. This will happen if a new system is created on the child while the child is operating in standalone mode. To handle that, just create the system. Args: event: The event to handle """ if event.garden != config.get("garden.name"): if event.name in (Events.SYSTEM_CREATED.name, Events.SYSTEM_UPDATED.name): event.payload.local = False if db.count(System, id=event.payload.id): db.update(event.payload) else: db.create(event.payload) elif event.name == Events.SYSTEM_REMOVED.name: db.delete(event.payload)
def create_request(request: Request) -> Request: """Create a database entry (mongo Document based Request object) from a brewtils Request model object. Some transformations happen on a copy of the supplied Request prior to saving it to the database. The returned Request object is derived from this transformed copy, while the input Request object remains unmodified. Args: request: The brewtils Request object from which a database entry will be created Returns: Request: A brewtils Request model based on the newly created database entry. The parameters of the returned object may have been modified from the during processing of files in "bytes" type parameters. """ # TODO: This deepcopy could be very memory intensive if the request contains large # file parameters. This should be revisited to see if there is a way to persist # remote requests locally without the base64 encoded data while avoiding this copy. request = deepcopy(request) replace_with_raw_file = request.namespace == config.get("garden.name") remove_bytes_parameter_base64(request.parameters, replace_with_raw_file) return db.create(request)
def create_system(system: System) -> System: """Create a new System Args: system: The System to create Returns: The created System """ if system.namespace is None: system.namespace = config.get("garden.name") # Create in the database system = db.create(system) # Also need to let the routing module know from beer_garden.router import add_routing_system add_routing_system(system=system) return system
def create_chunk(file_id: str, offset: int = None, data: str = None, **kwargs) -> FileStatus: """Saves provided chunk to the DB, updates the parent document with the chunk id Args: file_id: This should be a valid file id. offset: The offset index. (e.g. 0, 1, 2, ...) data: The base64 encoded data kwargs: The other parameters for FileChunk that we don't need to check Raises: NotFoundError: File with the requested ID doesn't exist and is expected to """ if len(data) > MAX_CHUNK_SIZE: return FileStatus( operation_complete=False, message=("Chunk data length exceeds the maximum " f"allowable length of {MAX_CHUNK_SIZE}."), file_id=file_id, offset=offset, data=data, ) file = check_file(file_id, upsert=kwargs.get("upsert", False)) chunk = FileChunk(file_id=file.id, offset=offset, data=data, owner=kwargs.get("owner", None)) # This is starting to get DB-specific, but we want to be sure this is an atomic operation. chunk = db.create(chunk) modify = {f"set__chunks__{offset}": chunk.id} file = db.modify(file, **modify) chunk = db.modify(chunk, owner=file.id) return _safe_build_object(FileStatus, file, chunk, operation_complete=True)
def create_garden(garden: Garden) -> Garden: """Create a new Garden Args: garden: The Garden to create Returns: The created Garden """ # Explicitly load default config options into garden params spec = YapconfSpec(_CONNECTION_SPEC) # bg_host is required to load brewtils garden spec defaults = spec.load_config({"bg_host": ""}) config_map = { "bg_host": "host", "bg_port": "port", "ssl_enabled": "ssl", "bg_url_prefix": "url_prefix", "ca_cert": "ca_cert", "ca_verify": "ca_verify", "client_cert": "client_cert", } if garden.connection_params is None: garden.connection_params = {} garden.connection_params.setdefault("http", {}) for key in config_map: garden.connection_params["http"].setdefault(config_map[key], defaults[key]) garden.status_info["heartbeat"] = datetime.utcnow() return db.create(garden)
def create_file( file_name: str, file_size: int, chunk_size: int, file_id: str = None, upsert: bool = False, **kwargs, ) -> FileStatus: """Creates a top-level File object to track chunks Args: file_name: The name of the file to be uploaded. file_size: The size of the file to be uploaded (in bytes). chunk_size: The size of the chunks that the file is broken into (in bytes). file_id: (Optional) The original file id upsert: (Optional) If a file ID is given, the function will modify the file metadata if it already exists kwargs: (Optional) Any other valid file fields that can be populated Returns: A dictionary with the id Raises: ValueError: Chunk size provided exceeds the size allowed ModelValidationError: File id (if provided) is not a valid ObjectId string NotUniqueException: File with the requested ID already exists """ if chunk_size > MAX_CHUNK_SIZE: raise ValueError( f"Cannot create a file with chunk size greater than {MAX_CHUNK_SIZE}." ) file = File( file_name=file_name, file_size=file_size, chunk_size=chunk_size, **kwargs, ) # Override the file id if passed in if file_id is not None: try: file.id = ObjectId(file_id) except (InvalidId, TypeError): raise ModelValidationError( f"Cannot create a file id with the string {file_id}. " "Requires 24-character hex string.") # Normal creation process, checks for uniqueness if not upsert: try: file = db.create(file) except NotUniqueException: raise NotUniqueException( f"Cannot create a file with id {file_id}; file with id already exists." ) return _safe_build_object(FileStatus, file, operation_complete=True) # Safe creation process, handles out-of-order file uploads but may # combine existing data with collision else: res = db.query_unique(File, id=file.id) if res is None: file = db.create(file) else: file = db.modify( res, **_unroll_object( file, ignore=[ "id", "chunks", "owner", "job", "request", "updated_at" ], ), ) return _safe_build_object(FileStatus, file, operation_complete=True)
def create_request(request: Request) -> Request: return db.create(request)