def check_file(file_id: str, upsert: bool = False) -> File: """Checks that the file with file_id exists in the DB Args: file_id: The id for the requested file. upsert: If the file doesn't exist create a placeholder file Returns: The file object Raises: NotFoundError: File with the requested ID doesn't exist and is expected to ModelValidationError: Incorrectly formatted ID is given """ try: ObjectId(file_id) except (InvalidId, TypeError): raise ModelValidationError( f"Cannot create a file id with the string {file_id}. " "Requires 24-character hex string.") res = db.query_unique(File, id=file_id) if res is None: if upsert: create_file("BG_placeholder", 0, 0, file_id) res = db.query_unique(File, id=file_id) else: raise NotFoundError(f"Tried to fetch an unsaved file {file_id}") db.modify(res, updated_at=datetime.utcnow()) return res
def set_owner(file_id: str, owner_id: str = None, owner_type: str = None) -> FileStatus: """Sets the owner field of the file. This is used for DB pruning. Args: file_id: The id of the file. owner_id: The id of the owner. owner_type: The type of the owner (job/request). """ if (owner_id is not None) and (owner_type is not None): file = check_file(file_id) old_owner_priority = OWNERSHIP_PRIORITY.get(file.owner_type, 1_000_000) new_owner_priority = OWNERSHIP_PRIORITY.get(owner_type, 1_000_000) # Case 1 : New owner has equal or higher priority # Case 2 : The old owner is a higher priority than the new one, but it was deleted. if new_owner_priority <= old_owner_priority or ( file.owner_type in OWNERSHIP_PRIORITY and (file.job is None and file.request is None)): if owner_type in OWNERSHIP_MAP: owner = db.query_unique(OWNERSHIP_MAP[owner_type], id=owner_id) file = db.modify( file, owner_id=owner_id, owner_type=owner_type, job=owner.id if owner is not None and owner_type == "JOB" else None, request=owner.id if owner is not None and owner_type == "REQUEST" else None, ) else: file = db.modify(file, owner_id=owner_id, owner_type=owner_type) return _safe_build_object(FileStatus, file, operation_complete=True) return _safe_build_object( FileStatus, operation_complete=False, message= (f"Owner type {owner_type} has lower priority than {file.owner_type}" ), ) return _safe_build_object( FileStatus, operation_complete=False, message=("Operation FILE_OWN requires an owner type " f"and id. Got {owner_type} and {owner_id}"), )
def remove_instance( *_, system: System = None, instance: Instance = None, **__ ) -> Instance: """Removes an Instance Args: system: The System instance: The Instance Returns: The deleted Instance """ db.modify(system, pull__instances=instance) return instance
def heartbeat( instance_id: str = None, instance: Instance = None, system: System = None, **_, ) -> Instance: """Instance heartbeat Args: instance_id: The Instance ID instance: The Instance system: The System Returns: The updated Instance """ system, instance = _from_kwargs(system=system, instance=instance, instance_id=instance_id) system = db.modify( system, query={"instances__name": instance.name}, set__instances__S__status_info__heartbeat=datetime.utcnow(), ) return system.get_instance_by_name(instance.name)
def update( instance_id: str = None, instance: Instance = None, system: System = None, new_status: str = None, metadata: dict = None, update_heartbeat: bool = True, **_, ) -> Instance: """Update an Instance status. Will also update the status_info heartbeat. Args: instance_id: The Instance ID instance: The Instance system: The System new_status: The new status metadata: New metadata update_heartbeat: Set the heartbeat to the current time Returns: The updated Instance """ system, instance = _from_kwargs(system=system, instance=instance, instance_id=instance_id) logger.debug(f"Updating instance {system}[{instance}]") updates = {} if new_status: updates["set__instances__S__status"] = new_status if new_status == "STOPPED": lpm.update(instance_id=instance_id, restart=False, stopped=True) if update_heartbeat: updates["set__instances__S__status_info__heartbeat"] = datetime.utcnow( ) if metadata: metadata_update = dict(instance.metadata) metadata_update.update(metadata) updates["set__instances__S__metadata"] = metadata_update system = db.modify(system, query={"instances__name": instance.name}, **updates) return system.get_instance_by_name(instance.name)
def create_chunk(file_id: str, offset: int = None, data: str = None, **kwargs) -> FileStatus: """Saves provided chunk to the DB, updates the parent document with the chunk id Args: file_id: This should be a valid file id. offset: The offset index. (e.g. 0, 1, 2, ...) data: The base64 encoded data kwargs: The other parameters for FileChunk that we don't need to check Raises: NotFoundError: File with the requested ID doesn't exist and is expected to """ if len(data) > MAX_CHUNK_SIZE: return FileStatus( operation_complete=False, message=("Chunk data length exceeds the maximum " f"allowable length of {MAX_CHUNK_SIZE}."), file_id=file_id, offset=offset, data=data, ) file = check_file(file_id, upsert=kwargs.get("upsert", False)) chunk = FileChunk(file_id=file.id, offset=offset, data=data, owner=kwargs.get("owner", None)) # This is starting to get DB-specific, but we want to be sure this is an atomic operation. chunk = db.create(chunk) modify = {f"set__chunks__{offset}": chunk.id} file = db.modify(file, **modify) chunk = db.modify(chunk, owner=file.id) return _safe_build_object(FileStatus, file, chunk, operation_complete=True)
def initialize( instance_id: str = None, instance: Instance = None, system: System = None, runner_id: str = None, **_, ) -> Instance: """Initializes an instance. Args: instance_id: The Instance ID instance: The Instance system: The System runner_id: The runner id to associate with this plugin, if any Returns: The updated Instance """ system, instance = _from_kwargs(system=system, instance=instance, instance_id=instance_id) logger.debug(f"Initializing instance {system}[{instance}]") queue_spec = queue.create(instance, system) system = db.modify( system, query={"instances__name": instance.name}, **{ "set__instances__S__status": "INITIALIZING", "set__instances__S__status_info__heartbeat": datetime.utcnow(), "set__instances__S__metadata__runner_id": runner_id, "set__instances__S__queue_type": queue_spec["queue_type"], "set__instances__S__queue_info": queue_spec["queue_info"], }, ) start(instance=instance, system=system) return system.get_instance_by_name(instance.name)
def update_system( system_id: str = None, system: System = None, new_commands: Sequence[Command] = None, add_instances: Sequence[Instance] = None, description: str = None, display_name: str = None, icon_name: str = None, metadata: dict = None, template: str = None, ) -> System: """Update an already existing System Args: system_id: The ID of the System to be updated system: The System to be updated new_commands: List of commands to overwrite existing commands add_instances: List of new instances that will be added to the current list description: Replacement description display_name: Replacement display_name icon_name: Replacement icon_name metadata: Dictionary that will be incorporated into current metadata template: Replacement template Returns: The updated System """ updates = {} system = system or db.query_unique(System, id=system_id) if new_commands is not None: # Convert these to DB form and back to make sure all defaults are correct mongo_commands = [db.from_brewtils(command) for command in new_commands] brew_commands = db.to_brewtils(mongo_commands) if ( system.commands and not config.get("plugin.allow_command_updates") and "dev" not in system.version and system.has_different_commands(brew_commands) ): raise ModelValidationError( f"System {system} already exists with different commands" ) updates["commands"] = mongo_commands # If we set an attribute to None mongoengine marks that attribute for deletion # That's why we explicitly test each of these if description is not None: updates["description"] = description if display_name is not None: updates["display_name"] = display_name if icon_name is not None: updates["icon_name"] = icon_name if template is not None: updates["template"] = template if metadata: metadata_update = copy.deepcopy(system.metadata) metadata_update.update(metadata) updates["metadata"] = metadata_update if add_instances: if -1 < system.max_instances < len(system.instances) + len(add_instances): raise ModelValidationError( f"Unable to add instance(s) to {system} - would exceed " f"the system instance limit of {system.max_instances}" ) updates["push_all__instances"] = [] instance_names = system.instance_names for instance in add_instances: if instance.name in instance_names: raise ModelValidationError( f"Unable to add Instance {instance} to System {system}: Duplicate " "instance names" ) updates["push_all__instances"].append(db.from_brewtils(instance)) system = db.modify(system, **updates) # Also need to let the routing module know from beer_garden.router import add_routing_system add_routing_system(system=system) return system
def create_file( file_name: str, file_size: int, chunk_size: int, file_id: str = None, upsert: bool = False, **kwargs, ) -> FileStatus: """Creates a top-level File object to track chunks Args: file_name: The name of the file to be uploaded. file_size: The size of the file to be uploaded (in bytes). chunk_size: The size of the chunks that the file is broken into (in bytes). file_id: (Optional) The original file id upsert: (Optional) If a file ID is given, the function will modify the file metadata if it already exists kwargs: (Optional) Any other valid file fields that can be populated Returns: A dictionary with the id Raises: ValueError: Chunk size provided exceeds the size allowed ModelValidationError: File id (if provided) is not a valid ObjectId string NotUniqueException: File with the requested ID already exists """ if chunk_size > MAX_CHUNK_SIZE: raise ValueError( f"Cannot create a file with chunk size greater than {MAX_CHUNK_SIZE}." ) file = File( file_name=file_name, file_size=file_size, chunk_size=chunk_size, **kwargs, ) # Override the file id if passed in if file_id is not None: try: file.id = ObjectId(file_id) except (InvalidId, TypeError): raise ModelValidationError( f"Cannot create a file id with the string {file_id}. " "Requires 24-character hex string.") # Normal creation process, checks for uniqueness if not upsert: try: file = db.create(file) except NotUniqueException: raise NotUniqueException( f"Cannot create a file with id {file_id}; file with id already exists." ) return _safe_build_object(FileStatus, file, operation_complete=True) # Safe creation process, handles out-of-order file uploads but may # combine existing data with collision else: res = db.query_unique(File, id=file.id) if res is None: file = db.create(file) else: file = db.modify( res, **_unroll_object( file, ignore=[ "id", "chunks", "owner", "job", "request", "updated_at" ], ), ) return _safe_build_object(FileStatus, file, operation_complete=True)
def run_job(job_id, request_template, **kwargs): """Spawned by the scheduler, this will kick off a new request. This method is meant to be run in a separate process. Args: job_id: The Beer-Garden job ID that triggered this event. request_template: Request template specified by the job. """ import beer_garden.router request_template.metadata["_bg_job_id"] = job_id # Attempt to inject information into the request template if "event" in kwargs and kwargs["event"] is not None: try: # This overloads the __missing__ function to allow partial injections injection_dict = InjectionDict() build_injection_dict(injection_dict, kwargs["event"], prefix="event") try: db_job = db.query_unique(Job, id=job_id) if db_job: build_injection_dict(injection_dict, db_job.trigger, prefix="trigger") except Exception as ex: logger.exception( f"Could not fetch job for parameter injection: {ex}") inject_values(request_template.parameters, injection_dict) except Exception as ex: logger.exception(f"Could not inject parameters: {ex}") db_job = db.query_unique(Job, id=job_id) wait_event = threading.Event() # I'm not sure what would cause this, but just be safe if not db_job: logger.error( f"Could not find job {job_id} in database, job will not be run") return try: logger.debug(f"About to execute {db_job!r}") request = beer_garden.router.route( Operation( operation_type="REQUEST_CREATE", model=Request.from_template(request_template), model_type="Request", kwargs={"wait_event": wait_event}, )) # Wait for the request to complete timeout = db_job.timeout or None if not wait_event.wait(timeout=timeout): logger.warning(f"Execution of job {db_job} timed out.") return request = get_request(request.id) updates = {} if request.status == "ERROR": updates["inc__error_count"] = 1 logger.debug(f"{db_job!r} request completed with ERROR status") elif request.status == "SUCCESS": logger.debug(f"{db_job!r} request completed with SUCCESS status") updates["inc__success_count"] = 1 if updates != {}: db.modify(db_job, **updates) except Exception as ex: logger.error(f"Error executing {db_job}: {ex}") db.modify(db_job, inc__error_count=1) # Be a little careful here as the job could have been removed or paused job = beer_garden.application.scheduler.get_job(job_id) if (job and job.next_run_time is not None and getattr(job.trigger, "reschedule_on_finish", False)): # This essentially resets the timer on this job, which has the effect of # making the wait time start whenever the job finishes beer_garden.application.scheduler.reschedule_job(job_id, trigger=job.trigger)