Пример #1
0
    def GetProduceSolutionResults(self, request, context):
        request_id = request.request_id

        info_dict = self.get_from_stage_outputs("ProduceSolution", request_id)

        problem_doc, pipeline_json, train_datasets, eval_datasets = (
            info_dict["problem_doc"],
            info_dict["pipeline_json"],
            info_dict["train_datasets"],
            info_dict["eval_datasets"],
        )

        producer = executors.ScoreFitProduceExecutor.ScoreFitProduceExecutor(
            None, None)
        progress = 0

        for dataset_train in train_datasets:
            for dataset_test in eval_datasets:
                csv_uri = producer.process_item(
                    ("fitproduce", problem_doc, pipeline_json, dataset_train,
                     dataset_test))
                outputs = {"outputs.0": value_pb2.Value(csv_uri=csv_uri)}
                yield core_pb2.GetProduceSolutionResultsResponse(
                    progress=core_pb2.Progress(
                        state=core_pb2.COMPLETED),  # For NYU integration
                    exposed_outputs=outputs)
                progress += 1
def GetProduceSolutionRequest(fitted_solution_id, dataset_uri):
    value = value_pb2.Value(
        dataset_uri=dataset_uri)

    msg = core_pb2.ProduceSolutionRequest(
        fitted_solution_id=fitted_solution_id,
        inputs=[value])
    return msg
def GetFitSolutionRequest(solution_id, dataset_uri):
    value = value_pb2.Value(
        dataset_uri=dataset_uri)

    msg = core_pb2.FitSolutionRequest(
        solution_id=solution_id,
        inputs=[value])
    return msg
def GetSearchSolutionsRequest(dataset_uri, problem):
    value = value_pb2.Value(
        dataset_uri=dataset_uri)

    msg = core_pb2.SearchSolutionsRequest(
        user_agent='FakeTA3TestBot',
        problem=problem,
        inputs=[value, value])
    return msg
Пример #5
0
    def GetScoreSolutionResults(self, request, context):
        request_id = request.request_id
        info_dict = self.get_from_stage_outputs("ScoreSolution", request_id)
        score = info_dict["score"]
        metrics = info_dict["metrics"]
        problem_doc = info_dict["problem_doc"]
        pipeline = info_dict["pipeline_json"]
        dataset_train = info_dict["dataset"]
        eval_datasets = [
            utils.utils.convert_dataset_uri_to_dataset(problem_doc,
                                                       uri,
                                                       mode="score")
            for uri in info_dict["eval_uris"]
        ]

        # Instantiate scorer evaluator
        scorer = executors.ScoreFitProduceExecutor.ScoreFitProduceExecutor(
            None, None)

        # todo(maxlam): actually use metrics
        for dataset_eval in eval_datasets:
            eval_scores = []
            for metric in metrics:
                eval_score = scorer.process_item(
                    ("score", problem_doc,
                     ta3ta2utils.decode_performance_metric(metric)["metric"],
                     pipeline, dataset_train, dataset_eval))
                eval_scores.append(eval_score)

            eval_score_proto = core_pb2.GetScoreSolutionResultsResponse(
                scores=[
                    core_pb2.Score(value=value_pb2.Value(
                        raw=value_pb2.ValueRaw(double=escore)),
                                   metric=metric)
                    for metric, escore in zip(metrics, eval_scores)
                ],
                #progress=core_pb2.Progress(state=core_pb2.RUNNING))
                progress=core_pb2.Progress(
                    state=core_pb2.COMPLETED))  # For NYU integration
            yield eval_score_proto
def GetScoreSolutionRequest(solution_id, eval_datasets, metrics):
    msg =  core_pb2.ScoreSolutionRequest(
        solution_id=solution_id,
        inputs=[value_pb2.Value(dataset_uri=x) for x in eval_datasets],
        performance_metrics=[problem_pb2.ProblemPerformanceMetric(k=0, metric=m) for m in metrics])
    return msg