Exemple #1
0
    def TrainEvaluate(self, request, context):
        current_input = []
        core_hyperparameters = []
        for i in range(len(request.current_input.values)):
            current_input.append(request.current_input.values[i])
        for i in range(len(request.hyperparameters)):
            core_hyperparameters.append(request.hyperparameters[i])
        hyperparameters = [self.worker_id] + core_hyperparameters
        current_input_reshaped = np.array(current_input)
        tag = request.tag

        proposed_perturbations = []
        for j in range(len(request.perturbations)):
            proposed_perturbation = []
            for k in range(len(request.perturbations[j].values)):
                proposed_perturbation.append(
                    request.perturbations[j].values[k])
            proposed_perturbations.append(proposed_perturbation)

        perturbations = []
        function_values = []
        evaluation_stats = []

        for i in range(len(proposed_perturbations)):
            perturbation = np.array(proposed_perturbations[i])
            perturbations.append(
                zero_order_pb2.Vector(values=perturbation.tolist()))
            task_id_list = np.random.choice(self.task_ids,
                                            size=self.task_batch_size,
                                            replace=False)

            task_list = [
                self.blackbox_object.config.make_task_fn(task_id=task_id)
                for task_id in task_id_list
            ]

            function_value, evaluation_stat = self.blackbox_object.execute(
                current_input_reshaped + perturbation,
                task_list,
                hyperparameters=hyperparameters)

            evaluation_stats.append(evaluation_stat)
            function_values.append(function_value)

        evaluation_stats_reduced = [sum(x) for x in zip(*evaluation_stats)]
        if not proposed_perturbations:
            results = zero_order_pb2.EvaluationResponse(
                perturbations=perturbations,
                function_values=function_values,
                evaluation_stats=evaluation_stats_reduced,
                tag=tag)
        else:
            results = zero_order_pb2.EvaluationResponse(
                perturbations=[],
                function_values=function_values,
                evaluation_stats=evaluation_stats_reduced,
                tag=tag)
        return results
Exemple #2
0
    def TestEvaluate(self, request, context):
        current_input = []
        core_hyperparameters = []
        for i in range(len(request.current_input.values)):
            current_input.append(request.current_input.values[i])
        for i in range(len(request.hyperparameters)):
            core_hyperparameters.append(request.hyperparameters[i])
        hyperparameters = [self.worker_id] + core_hyperparameters
        current_input = np.array(current_input)
        iteration = request.tag

        if iteration % self.blackbox_object.config.test_frequency == 0:
            task = self.blackbox_object.config.make_task_fn(
                task_id=self.task_ids[self.worker_id])

            mamlpt_value = self.blackbox_object.task_value(
                params=current_input,
                task=task,
                hyperparameters=hyperparameters,
                test_mode=True,
                horizon=self.blackbox_object.config.horizon)

            mamlpt_value_list = [mamlpt_value]
            for _ in range(self.blackbox_object.config.test_parallel_evals -
                           1):
                temp_mamlpt_value = self.blackbox_object.task_value(
                    params=current_input,
                    task=task,
                    hyperparameters=hyperparameters,
                    test_mode=True,
                    horizon=self.blackbox_object.config.horizon)
                mamlpt_value_list.append(temp_mamlpt_value)

            adaptation_param = self.blackbox_object.adaptation_step(
                params=current_input,
                task=task,
                hyperparameters=hyperparameters,
                test_mode=True,
                horizon=self.blackbox_object.config.horizon)

            adaptation_value = self.blackbox_object.task_value(
                params=adaptation_param,
                task=task,
                hyperparameters=hyperparameters,
                test_mode=True,
                horizon=self.blackbox_object.config.horizon)

            adaptation_value_list = [adaptation_value]
            for _ in range(self.blackbox_object.config.test_parallel_evals -
                           1):
                temp_adaptation_value = self.blackbox_object.task_value(
                    params=adaptation_param,
                    task=task,
                    hyperparameters=hyperparameters,
                    test_mode=True,
                    horizon=self.blackbox_object.config.horizon)

                adaptation_value_list.append(temp_adaptation_value)

            test_vals = mamlpt_value_list + adaptation_value_list

            results = zero_order_pb2.EvaluationResponse(
                perturbations=[],
                function_values=test_vals,
                evaluation_stats=[],
                tag=0)
        else:
            results = zero_order_pb2.EvaluationResponse(perturbations=[],
                                                        function_values=[],
                                                        evaluation_stats=[],
                                                        tag=0)

        return results