def delete_file(file_id: str) -> FileStatus: """Deletes a file and its chunks Args: file_id: The id of the file. """ file = check_file(file_id) # This should delete the associated chunks as well. db.delete(file) return FileStatus(operation_complete=True, file_id=file_id)
def handle_event(event: Event) -> None: """Handle JOB events When creating or updating a job, make sure to mark as local first. BG should only handle events that are designated for the local environment. If BG triggers off a non-local JOB, then the JOB could be ran twice. Args: event: The event to handle """ if event.garden == config.get("garden.name"): if event.name in [Events.JOB_CREATED.name, Events.JOB_UPDATED.name]: try: beer_garden.application.scheduler.add_job( run_job, trigger=event.payload.trigger, trigger_type=event.payload.trigger_type, kwargs={ "request_template": event.payload.request_template, "job_id": str(event.payload.id), }, name=event.payload.name, misfire_grace_time=event.payload.misfire_grace_time, coalesce=event.payload.coalesce, max_instances=event.payload.max_instances, jobstore="beer_garden", replace_existing=True, id=event.payload.id, ) except Exception: db.delete(event.payload) raise elif event.name == Events.JOB_PAUSED.name: beer_garden.application.scheduler.pause_job(event.payload.id, jobstore="beer_garden") elif event.name == Events.JOB_RESUMED.name: beer_garden.application.scheduler.resume_job( event.payload.id, jobstore="beer_garden") elif event.name == Events.JOB_DELETED.name: beer_garden.application.scheduler.remove_job( event.payload.id, jobstore="beer_garden") elif event.name == Events.JOB_EXECUTED.name: beer_garden.application.scheduler.execute_job( event.payload.id, jobstore="beer_garden", reset_interval=event.payload.reset_interval, )
def remove_job(self, job_id, **kwargs): """Removes the job from the corresponding scheduler Args: job_id: The job id to lookup kwargs: Any other scheduler-specific arguments """ if job_id in self._async_jobs: self._async_jobs.pop(job_id) # Clean up the if job_id in self._async_paused_jobs: self._async_paused_jobs.remove(job_id) db.delete(db.query_unique(Job, id=job_id)) else: self._sync_scheduler.remove_job(job_id, **kwargs)
def remove_garden(garden_name: str) -> None: """Remove a garden Args: garden_name: The Garden name Returns: None """ garden = db.query_unique(Garden, name=garden_name) for system in garden.systems: remove_system(system.id) db.delete(garden) return garden
def remove_system(system_id: str = None, system: System = None) -> System: """Remove a system Args: system_id: The System ID system: The System Returns: The removed System """ system = system or db.query_unique(System, id=system_id) db.delete(system) # Also need to let the routing module know from beer_garden.router import remove_routing_system remove_routing_system(system=system) return system
def remove_garden(garden_name: str) -> None: """Remove a garden Args: garden_name: The Garden name Returns: The deleted garden """ garden = get_garden(garden_name) # TODO: Switch to lookup by garden_name rather than namespace systems = get_systems(filter_params={"namespace": garden_name}) for system in systems: remove_system(system.id) # Cleanup any RemoteUser entries RemoteUser.objects.filter(garden=garden_name).delete() db.delete(garden) return garden
def handle_event(event: Event) -> None: """Handle SYSTEM events When creating or updating a system, make sure to mark as non-local first. It's possible that we see SYSTEM_UPDATED events for systems that we don't currently know about. This will happen if a new system is created on the child while the child is operating in standalone mode. To handle that, just create the system. Args: event: The event to handle """ if event.garden != config.get("garden.name"): if event.name in (Events.SYSTEM_CREATED.name, Events.SYSTEM_UPDATED.name): event.payload.local = False if db.count(System, id=event.payload.id): db.update(event.payload) else: db.create(event.payload) elif event.name == Events.SYSTEM_REMOVED.name: db.delete(event.payload)
def process_request( new_request: Union[Request, RequestTemplate], wait_event: threading.Event = None, is_admin: bool = False, priority: int = 0, ) -> Request: """Validates and publishes a Request. Args: new_request: The Request wait_event: Event that will be added to the local event_map. Event will be set when the request completes. is_admin: Flag indicating this request should be published on the admin queue priority: Number between 0 and 1, inclusive. High numbers equal higher priority Returns: The processed Request """ if type(new_request) == Request: request = new_request elif type(new_request) == RequestTemplate: request = Request.from_template(new_request) else: raise TypeError( f"new_request type is {type(new_request)}, expected " "brewtils.models.Request or brewtils.models.RequestTemplate,") # Validation is only required for non Admin commands because Admin commands # are hard coded to map Plugin functions if not is_admin: try: request = _validate_request(request) except ModelValidationError: invalid_request(request) raise if request.command_type == "EPHEMERAL": logger.debug(f"Publishing {request!r}") else: # Save after validation since validate can modify the request request = create_request(request) logger.info(f"Publishing {request!r}") if wait_event: request_map[request.id] = wait_event try: _publish_request(request, is_admin=is_admin, priority=priority) except Exception as ex: # An error publishing means this request will never complete, so remove it if not request.command_type == "EPHEMERAL": db.delete(request) if wait_event: request_map.pop(request.id, None) raise RequestPublishException( f"Error while publishing {request!r} to message broker") from ex # Metrics request_created(request) return request
def process_request( new_request: Union[Request, RequestTemplate], wait_event: threading.Event = None, is_admin: bool = False, priority: int = 0, ) -> Request: """Validates and publishes a Request. Args: new_request: The Request wait_event: Event that will be added to the local event_map. Event will be set when the request completes. is_admin: Flag indicating this request should be published on the admin queue priority: Number between 0 and 1, inclusive. High numbers equal higher priority Returns: The processed Request """ if type(new_request) == Request: request = new_request elif type(new_request) == RequestTemplate: request = Request.from_template(new_request) else: raise TypeError( f"new_request type is {type(new_request)}, expected " f"brewtils.models.Request or brewtils.models.RequestTemplate,") # Validates the request based on what is in the database. # This includes the validation of the request parameters, # systems are there, commands are there etc. # Validation is only required for non Admin commands because Admin commands # are hard coded to map Plugin functions if not is_admin: request = RequestValidator.instance().validate_request(request) # Save after validation since validate can modify the request if not request.command_type == "EPHEMERAL": request = create_request(request) if wait_event: request_map[request.id] = wait_event try: if logger.isEnabledFor(logging.DEBUG): logger.debug(f"Publishing {request!r}") else: if not request.command_type == "EPHEMERAL": logger.info(f"Publishing {request!r}") queue.put( request, is_admin=is_admin, priority=priority, confirm=True, mandatory=True, delivery_mode=pika.spec.PERSISTENT_DELIVERY_MODE, ) except Exception as ex: # An error publishing means this request will never complete, so remove it if not request.command_type == "EPHEMERAL": db.delete(request) if wait_event: request_map.pop(request.id, None) raise RequestPublishException( f"Error while publishing {request!r} to message broker") from ex # Metrics request_created(request) return request