def test_invalid_transitions(self, bg_request, start, end): bg_request.status = start bg_request.output = None db.create(bg_request) with pytest.raises(RequestStatusTransitionError): bg_request.status = end db.update(bg_request)
def handle_event(event): # Whenever a request is completed check to see if this process is waiting for it if event.name == Events.REQUEST_COMPLETED.name: completion_event = request_map.pop(event.payload.id, None) if completion_event: completion_event.set() # Only care about local garden if event.garden == config.get("garden.name"): if event.name == Events.GARDEN_STOPPED.name: # When shutting down we need to close all handing connections/threads # waiting for a response. This will invoke each connection/thread to be # returned the current status of the Request. for request_event in request_map: request_map[request_event].set() # Only care about downstream garden elif event.garden != config.get("garden.name"): if event.name in ( Events.REQUEST_CREATED.name, Events.REQUEST_STARTED.name, Events.REQUEST_COMPLETED.name, ): # When we send child requests to child gardens where the parent was on # the local garden we remove the parent before sending them. Only setting # the subset of fields that change "corrects" the parent existing_request = db.query_unique(Request, id=event.payload.id) if existing_request: for field in ("status", "output", "error_class"): setattr(existing_request, field, getattr(event.payload, field)) try: db.update(existing_request) except RequestStatusTransitionError: pass else: # Attempt to create the request, if it already exists then continue on try: db.create(event.payload) except NotUniqueException: pass # Required if the main process spawns a wait Request if event.name == Events.REQUEST_COMPLETED.name: if str(event.payload.id) in request_map: request_map[str(event.payload.id)].set()
def update_garden(garden: Garden) -> Garden: """Update a Garden Args: garden: The Garden to update Returns: The updated Garden """ return db.update(garden)
def update_job(job: Job) -> Job: """Update a Job and add it to the scheduler Args: job: The Job to be updated Returns: The added Job """ return db.update(job)
def handle_event(event: Event) -> None: """Handle SYSTEM events When creating or updating a system, make sure to mark as non-local first. It's possible that we see SYSTEM_UPDATED events for systems that we don't currently know about. This will happen if a new system is created on the child while the child is operating in standalone mode. To handle that, just create the system. Args: event: The event to handle """ if event.garden != config.get("garden.name"): if event.name in (Events.SYSTEM_CREATED.name, Events.SYSTEM_UPDATED.name): event.payload.local = False if db.count(System, id=event.payload.id): db.update(event.payload) else: db.create(event.payload) elif event.name == Events.SYSTEM_REMOVED.name: db.delete(event.payload)
def resume_job(job_id: str) -> Job: """Resume a Job Args: job_id: The Job ID Returns: The Job definition """ job = db.query_unique(Job, id=job_id) job.status = "RUNNING" job = db.update(job) return job
def pause_job(job_id: str) -> Job: """Pause a Job Args: job_id: The Job ID Returns: The Job definition """ job = db.query_unique(Job, id=job_id) job.status = "PAUSED" job = db.update(job) return job
def cancel_request(request_id: str = None, request: Request = None) -> Request: """Mark a Request as CANCELED Args: request_id: The Request ID to cancel request: The Request to cancel Returns: The modified Request Raises: ModelValidationError: The Request is already completed """ request = request or db.query_unique( Request, raise_missing=True, id=request_id) request.status = "CANCELED" request = db.update(request) # TODO - Metrics here? return request
def complete_request( request_id: str = None, request: Request = None, status: str = None, output: str = None, error_class: str = None, ) -> Request: """Mark a Request as completed Args: request_id: The Request ID to complete request: The Request to complete status: The status to apply to the Request output: The output to apply to the Request error_class: The error class to apply to the Request Returns: The modified Request Raises: ModelValidationError: The Request is already completed """ request = request or db.query_unique( Request, raise_missing=True, id=request_id) request.status = status request.output = output request.error_class = error_class request = db.update(request) # Metrics request_completed(request) return request
def start_request(request_id: str = None, request: Request = None) -> Request: """Mark a Request as IN PROGRESS Args: request_id: The Request ID to start request: The Request to start Returns: The modified Request Raises: ModelValidationError: The Request is already completed """ request = request or db.query_unique( Request, raise_missing=True, id=request_id) request.status = "IN_PROGRESS" request = db.update(request) # Metrics request_started(request) return request
def update_request(request: Request): db.update(request) return request
def run_job(job_id, request_template, **kwargs): """Spawned by the scheduler, this will kick off a new request. This method is meant to be run in a separate process. Args: job_id: The Beer-Garden job ID that triggered this event. request_template: Request template specified by the job. """ request_template.metadata["_bg_job_id"] = job_id # Attempt to inject information into the request template if "event" in kwargs and kwargs["event"] is not None: try: # This overloads the __missing__ function to allow partial injections injection_dict = InjectionDict() build_injection_dict(injection_dict, kwargs["event"], prefix="event") try: db_job = db.query_unique(Job, id=job_id) if db_job: build_injection_dict( injection_dict, db_job.trigger, prefix="trigger" ) except Exception as ex: logger.exception(f"Could not fetch job for parameter injection: {ex}") inject_values(request_template.parameters, injection_dict) except Exception as ex: logger.exception(f"Could not inject parameters: {ex}") # TODO - Possibly allow specifying blocking timeout on the job definition wait_event = threading.Event() request = beer_garden.router.route( Operation( operation_type="REQUEST_CREATE", model=request_template, model_type="RequestTemplate", kwargs={"wait_event": wait_event}, ) ) wait_event.wait() try: db_job = db.query_unique(Job, id=job_id) if db_job: request = get_request(request.id) if request.status == "ERROR": db_job.error_count += 1 elif request.status == "SUCCESS": db_job.success_count += 1 db.update(db_job) else: # If the job is not in the database, don't proceed to update scheduler return except Exception as ex: logger.exception(f"Could not update job counts: {ex}") # Be a little careful here as the job could have been removed or paused job = beer_garden.application.scheduler.get_job(job_id) if ( job and job.next_run_time is not None and getattr(job.trigger, "reschedule_on_finish", False) ): # This essentially resets the timer on this job, which has the effect of # making the wait time start whenever the job finishes beer_garden.application.scheduler.reschedule_job(job_id, trigger=job.trigger)