Ejemplo n.º 1
0
 def new(db: "MephistoDB", task_run: TaskRun,
         assignment_data: Optional[Dict[str, Any]]) -> "Assignment":
     """
     Create an assignment for the given task. Initialize the folders for storing
     the results for this assignment. Can take assignment_data to save and
     load for this particular assignment.
     """
     # TODO(101) consider offloading this state management to the MephistoDB
     # as it is data handling and can theoretically be done differently
     # in different implementations
     db_id = db.new_assignment(
         task_run.db_id,
         task_run.requester_id,
         task_run.task_type,
         task_run.provider_type,
         task_run.sandbox,
     )
     run_dir = task_run.get_run_dir()
     assign_dir = os.path.join(run_dir, db_id)
     os.makedirs(assign_dir)
     if assignment_data is not None:
         with open(os.path.join(assign_dir, ASSIGNMENT_DATA_FILE),
                   "w+") as json_file:
             json.dump(assignment_data, json_file)
     assignment = Assignment(db, db_id)
     logger.debug(f"{assignment} created for {task_run}")
     return assignment
Ejemplo n.º 2
0
    def validate_and_run_config_or_die(
            self,
            run_config: DictConfig,
            shared_state: Optional[SharedTaskState] = None) -> str:
        """
        Parse the given arguments and launch a job.
        """
        if shared_state is None:
            shared_state = SharedTaskState()

        # First try to find the requester:
        requester_name = run_config.provider.requester_name
        requesters = self.db.find_requesters(requester_name=requester_name)
        if len(requesters) == 0:
            if run_config.provider.requester_name == "MOCK_REQUESTER":
                requesters = [get_mock_requester(self.db)]
            else:
                raise EntryDoesNotExistException(
                    f"No requester found with name {requester_name}")
        requester = requesters[0]
        requester_id = requester.db_id
        provider_type = requester.provider_type
        assert provider_type == run_config.provider._provider_type, (
            f"Found requester for name {requester_name} is not "
            f"of the specified type {run_config.provider._provider_type}, "
            f"but is instead {provider_type}.")

        # Next get the abstraction classes, and run validation
        # before anything is actually created in the database
        blueprint_type = run_config.blueprint._blueprint_type
        architect_type = run_config.architect._architect_type
        BlueprintClass = get_blueprint_from_type(blueprint_type)
        ArchitectClass = get_architect_from_type(architect_type)
        CrowdProviderClass = get_crowd_provider_from_type(provider_type)

        BlueprintClass.assert_task_args(run_config, shared_state)
        ArchitectClass.assert_task_args(run_config, shared_state)
        CrowdProviderClass.assert_task_args(run_config, shared_state)

        # Find an existing task or create a new one
        task_name = run_config.task.get("task_name", None)
        if task_name is None:
            task_name = blueprint_type
            logger.warning(
                f"Task is using the default blueprint name {task_name} as a name, "
                "as no task_name is provided")
        tasks = self.db.find_tasks(task_name=task_name)
        task_id = None
        if len(tasks) == 0:
            task_id = self.db.new_task(task_name, blueprint_type)
        else:
            task_id = tasks[0].db_id

        logger.info(f"Creating a task run under task name: {task_name}")

        # Create a new task run
        new_run_id = self.db.new_task_run(
            task_id,
            requester_id,
            json.dumps(OmegaConf.to_container(run_config, resolve=True)),
            provider_type,
            blueprint_type,
            requester.is_sandbox(),
        )
        task_run = TaskRun(self.db, new_run_id)

        try:
            # Register the blueprint with args to the task run,
            # ensure cached
            blueprint = task_run.get_blueprint(args=run_config,
                                               shared_state=shared_state)

            # If anything fails after here, we have to cleanup the architect
            build_dir = os.path.join(task_run.get_run_dir(), "build")
            os.makedirs(build_dir, exist_ok=True)
            architect = ArchitectClass(self.db, run_config, shared_state,
                                       task_run, build_dir)

            # Setup and deploy the server
            built_dir = architect.prepare()
            task_url = architect.deploy()

            # TODO(#102) maybe the cleanup (destruction of the server configuration?) should only
            # happen after everything has already been reviewed, this way it's possible to
            # retrieve the exact build directory to review a task for real
            architect.cleanup()

            # Create the backend runner
            task_runner = BlueprintClass.TaskRunnerClass(
                task_run, run_config, shared_state)

            # Small hack for auto appending block qualification
            existing_qualifications = shared_state.qualifications
            if run_config.blueprint.get("block_qualification",
                                        None) is not None:
                existing_qualifications.append(
                    make_qualification_dict(
                        run_config.blueprint.block_qualification,
                        QUAL_NOT_EXIST, None))
            if run_config.blueprint.get("onboarding_qualification",
                                        None) is not None:
                existing_qualifications.append(
                    make_qualification_dict(
                        OnboardingRequired.get_failed_qual(
                            run_config.blueprint.onboarding_qualification),
                        QUAL_NOT_EXIST,
                        None,
                    ))
            shared_state.qualifications = existing_qualifications

            # Register the task with the provider
            provider = CrowdProviderClass(self.db)
            provider.setup_resources_for_task_run(task_run, run_config,
                                                  shared_state, task_url)

            initialization_data_array = blueprint.get_initialization_data()

            # Link the job together
            job = self.supervisor.register_job(architect, task_runner,
                                               provider,
                                               existing_qualifications)
            if self.supervisor.sending_thread is None:
                self.supervisor.launch_sending_thread()
        except (KeyboardInterrupt, Exception) as e:
            logger.error(
                "Encountered error while launching run, shutting down",
                exc_info=True)
            try:
                architect.shutdown()
            except (KeyboardInterrupt, Exception) as architect_exception:
                logger.exception(
                    f"Could not shut down architect: {architect_exception}",
                    exc_info=True,
                )
            raise e

        launcher = TaskLauncher(self.db, task_run, initialization_data_array)
        launcher.create_assignments()
        launcher.launch_units(task_url)

        self._task_runs_tracked[task_run.db_id] = TrackedRun(
            task_run=task_run,
            task_launcher=launcher,
            task_runner=task_runner,
            architect=architect,
            job=job,
        )
        task_run.update_completion_progress(status=False)

        return task_run.db_id
Ejemplo n.º 3
0
    def _create_live_task_run(
        self,
        run_config: DictConfig,
        shared_state: SharedTaskState,
        task_run: TaskRun,
        architect_class: Type["Architect"],
        blueprint_class: Type["Blueprint"],
        provider_class: Type["CrowdProvider"],
    ) -> LiveTaskRun:
        """
        Initialize all of the members of a live task run object
        """
        # Register the blueprint with args to the task run to ensure cached
        blueprint = task_run.get_blueprint(args=run_config,
                                           shared_state=shared_state)

        # prepare the architect
        build_dir = os.path.join(task_run.get_run_dir(), "build")
        os.makedirs(build_dir, exist_ok=True)
        architect = architect_class(self.db, run_config, shared_state,
                                    task_run, build_dir)
        # Create the backend runner
        task_runner = blueprint_class.TaskRunnerClass(task_run, run_config,
                                                      shared_state)

        # Small hack for auto appending block qualification
        # TODO(OWN) we can use blueprint.mro() to discover BlueprintMixins and extract from there
        existing_qualifications = shared_state.qualifications
        if run_config.blueprint.get("block_qualification", None) is not None:
            existing_qualifications.append(
                make_qualification_dict(
                    run_config.blueprint.block_qualification, QUAL_NOT_EXIST,
                    None))
        if run_config.blueprint.get("onboarding_qualification",
                                    None) is not None:
            existing_qualifications.append(
                make_qualification_dict(
                    OnboardingRequired.get_failed_qual(
                        run_config.blueprint.onboarding_qualification),
                    QUAL_NOT_EXIST,
                    None,
                ))
        shared_state.qualifications = existing_qualifications

        # Create provider
        provider = provider_class(self.db)

        # Create the launcher
        initialization_data_iterable = blueprint.get_initialization_data()
        launcher = TaskLauncher(
            self.db,
            task_run,
            initialization_data_iterable,
            max_num_concurrent_units=run_config.task.max_num_concurrent_units,
        )

        worker_pool = WorkerPool(self.db)
        client_io = ClientIOHandler(self.db)
        live_run = LiveTaskRun(
            task_run=task_run,
            architect=architect,
            blueprint=blueprint,
            provider=provider,
            qualifications=shared_state.qualifications,
            task_runner=task_runner,
            task_launcher=launcher,
            client_io=client_io,
            worker_pool=worker_pool,
            loop_wrap=self._loop_wrapper,
        )
        worker_pool.register_run(live_run)
        client_io.register_run(live_run)

        return live_run