def evaluate_pipeline_request(self, problem_description: Problem, pipeline: Pipeline, input_data: typing.Sequence[ContainerType], *, metrics: typing.Sequence[typing.Dict], data_preparation_pipeline: Pipeline = None, scoring_pipeline: Pipeline = None, data_preparation_params: typing.Dict[ str, str] = None, scoring_params: typing.Dict[str, str] = None, timeout: float = None) -> str: request_id = str(uuid.uuid4()) pipeline_result = PipelineResult(pipeline=pipeline) pipeline_result.status = "RUNNING" pipeline_result.method_called = "evaluate" scores, results = runtime_module.evaluate( pipeline=pipeline, inputs=input_data, data_pipeline=data_preparation_pipeline, scoring_pipeline=scoring_pipeline, problem_description=problem_description, data_params=data_preparation_params, metrics=metrics, context=Context.TESTING, scoring_params=scoring_params, hyperparams=None, random_seed=self.random_seed, data_random_seed=self.random_seed, scoring_random_seed=self.random_seed, volumes_dir=self.volumes_dir, scratch_dir=self.scratch_dir, runtime_environment=self.runtime_environment) if results.has_error(): pipeline_result.status = "ERRORED" pipeline_result.error = [result.error for result in results] else: pipeline_result.status = "COMPLETED" pipeline_result.scores = runtime_module.combine_folds(scores) pipeline_result.outputs = [result.values for result in results] self.request_results[request_id] = pipeline_result return request_id
def evaluate_pipeline( self, data_handler, problem_description: Problem, pipeline: Pipeline, input_data_id: str, *, metrics: typing.Sequence[typing.Dict], data_preparation_pipeline: Pipeline = None, scoring_pipeline: Pipeline = None, data_preparation_params: typing.Dict[str, str] = None, scoring_params: typing.Dict[str, str] = None, timeout: float = None ) -> PipelineResult: with d3m_utils.silence(): pipeline_result = PipelineResult(pipeline=pipeline) pipeline_result.status = "RUNNING" pipeline_result.method_called = "evaluate" request_id = data_handler.get_data.remote(input_data_id) input_data = ray.get(request_id) with d3m_utils.silence(): scores, results = runtime_module.evaluate( pipeline=pipeline, inputs=input_data, data_pipeline=data_preparation_pipeline, scoring_pipeline=scoring_pipeline, problem_description=problem_description, data_params=data_preparation_params, metrics=metrics, context=Context.TESTING, scoring_params=scoring_params, hyperparams=None, random_seed=self.random_seed, data_random_seed=self.random_seed, scoring_random_seed=self.random_seed, volumes_dir=self.volumes_dir, scratch_dir=self.scratch_dir, runtime_environment=self.runtime_environment ) if results.has_error(): pipeline_result.status = "ERRORED" pipeline_result.error = [result.error for result in results] else: pipeline_result.status = "COMPLETED" pipeline_result.scores = runtime_module.combine_folds(scores) if self.store_results: pipeline_result.pipeline_run = save_pipeline_run(results.pipeline_runs, self.scratch_dir) return pipeline_result