def _collateral( self, project_uuid: str, task_id: str, pipeline: Pipeline, run_config: Dict[str, Any], env_variables: Dict[str, Any], **kwargs, ): # Get docker ids of images to use and make it so that the images # will not be deleted in case they become outdated by an # environment rebuild. try: env_uuid_docker_id_mappings = lock_environment_images_for_run( task_id, project_uuid, pipeline.get_environments(), ) except errors.ImageNotFound as e: msg = ( "Pipeline references environments that do not exist in the" f" project, the following environments do not exist: [{e}].\n\n" "Please make sure all pipeline steps are assigned an" " environment that exists in the project." ) raise errors.ImageNotFound(msg) # Create Celery object with the Flask context and construct the # kwargs for the job. celery = make_celery(current_app) run_config["env_uuid_docker_id_mappings"] = env_uuid_docker_id_mappings run_config["user_env_variables"] = env_variables celery_job_kwargs = { "pipeline_definition": pipeline.to_dict(), "project_uuid": project_uuid, "run_config": run_config, } # Start the run as a background task on Celery. Due to circular # imports we send the task by name instead of importing the # function directly. res = celery.send_task( "app.core.tasks.run_pipeline", kwargs=celery_job_kwargs, task_id=task_id, ) # NOTE: this is only if a backend is configured. The task does # not return anything. Therefore we can forget its result and # make sure that the Celery backend releases recourses (for # storing and transmitting results) associated to the task. # Uncomment the line below if applicable. res.forget()
def _transaction( self, project_uuid: str, run_config: Dict[str, Any], pipeline: Pipeline, ): # specify the task_id beforehand to avoid race conditions # between the task and its presence in the db task_id = str(uuid.uuid4()) # NOTE: we are setting the status of the run ourselves without # using the option of celery to get the status of tasks. This # way we do not have to configure a backend (where the default # of "rpc://" does not give the results we would want). run = { "uuid": task_id, "pipeline_uuid": pipeline.properties["uuid"], "project_uuid": project_uuid, "status": "PENDING", } db.session.add(models.InteractivePipelineRun(**run)) # need to flush because otherwise the bulk insertion of pipeline # steps will lead to foreign key errors # https://docs.sqlalchemy.org/en/13/orm/persistence_techniques.html#bulk-operations-caveats db.session.flush() # Set an initial value for the status of the pipeline steps that # will be run. step_uuids = [s.properties["uuid"] for s in pipeline.steps] pipeline_steps = [] for step_uuid in step_uuids: pipeline_steps.append( models.PipelineRunStep( **{ "run_uuid": task_id, "step_uuid": step_uuid, "status": "PENDING", } ) ) db.session.bulk_save_objects(pipeline_steps) run["pipeline_steps"] = pipeline_steps try: env_uuid_to_image = environments.lock_environment_images_for_run( task_id, project_uuid, pipeline.get_environments(), ) except self_errors.PipelineDefinitionNotValid: msg = "Please make sure every pipeline step is assigned an environment." raise self_errors.PipelineDefinitionNotValid(msg) self.collateral_kwargs["project_uuid"] = project_uuid self.collateral_kwargs["task_id"] = task_id self.collateral_kwargs["pipeline"] = pipeline self.collateral_kwargs["run_config"] = run_config self.collateral_kwargs["env_variables"] = get_proj_pip_env_variables( project_uuid, pipeline.properties["uuid"] ) self.collateral_kwargs["env_uuid_to_image"] = env_uuid_to_image return run