Exemple #1
0
    def testOptionalObjectiveName(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            evaluation_function=sum_evaluation_function_v2,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)
Exemple #2
0
    def testEvaluationFunctionV4Numpy(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_v4_numpy,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)
Exemple #3
0
def main():

    args = parse_args()

    function_list = [get_function_by_name[name]
            for name in args.function_name_list.split(',')]
    weight_list = list(map(float, args.weight_list.split(',')))
    covariance_matrix = json.loads(args.covariance_matrix)
    evaluate_covariance = args.evaluate_covariance

    init_iter = args.init_iter
    # if init_iter > 1:
    #     raise ValueError("init_iter should be 1.")
    init_batch_size = args.init_batch_size
    update_iter = args.update_iter
    batch_size = args.batch_size
    var_coef = args.var_coef

    var_compute_type = args.var_compute_type
    num_random = args.num_random
    num_bucket = args.num_bucket

    save_path = args.save_path

    # num_control = args.num_control

    minimize = True

    groundtruth_function = get_groundtruth_function(function_list, weight_list)
    #evaluation_function = get_evaluation_function(
    #        function_list, weight_list, covariance_matrix,
    #        evaluate_covariance, var_coef)
    evaluation_function = get_evaluation_function(
            function_list, weight_list, covariance_matrix,
            var_compute_type, num_random, num_bucket)

    exp = SimpleExperiment(
        name=args.function_name_list + args.weight_list,
        search_space=get_search_space(function_list),
        evaluation_function=evaluation_function,
        objective_name="objective_name",
        minimize=minimize,
    )
    t_start = time.time()
    print(f"Start time: {t_start}")
    print(f"Sobol iteration begin...{time.time() - t_start}")
    sobol = Models.SOBOL(exp.search_space)
    for i in range(init_iter):
        if init_batch_size == 1:
            exp.new_trial(generator_run=sobol.gen(init_batch_size))
        else:
            exp.new_batch_trial(generator_run=sobol.gen(init_batch_size))
        print(f"Running sobol optimization trial {i+1}/{init_iter}..."
              f"{time.time() - t_start}")
    print(f"GPEI iteration begin...{time.time() - t_start}")
    for i in range(update_iter):
        gpei = Models.BOTORCH(experiment=exp, data=exp.eval())
        if batch_size == 1:
            exp.new_trial(generator_run=gpei.gen(batch_size))
        else:
            exp.new_batch_trial(generator_run=gpei.gen(batch_size))
        print(f"Running GPEI optimization trial {i+1}/{update_iter}..."
              f"{time.time() - t_start}")

    # Construct Result.
    ## origin data.
    data_df = copy.deepcopy(exp.eval().df)
    compare_func = min if minimize else max
    
    arm_name2mean = {}
    for _, row in data_df.iterrows():
        arm_name2mean[row["arm_name"]] = row["mean"]
    ## parameters true_mean.
    other_columns = {
        "arm_name": [], "parameters": [], "true_mean": [],
        "cur_trial_best_mean": [], "accum_trials_best_mean": []}
    atbm = None # accum_trial_best_mean
    for trial in exp.trials.values():
        ctbm = None # cur_trial_best_mean
        for arm in trial.arms:
            other_columns['arm_name'].append(arm.name)
            other_columns['parameters'].append(json.dumps(arm.parameters))
            other_columns['true_mean'].append(
                    groundtruth_function(arm.parameters))
            if ctbm is None:
                ctbm = arm_name2mean[arm.name]
            ctbm = compare_func(ctbm, arm_name2mean[arm.name])
        if atbm is None:
            atbm = ctbm
        atbm = compare_func(atbm, ctbm)
        other_columns['cur_trial_best_mean'].extend([ctbm] * len(trial.arms))
        other_columns['accum_trials_best_mean'].extend([atbm] * len(trial.arms))
    other_df = DataFrame(other_columns)

    result_df = data_df.set_index('arm_name').join(
            other_df.set_index('arm_name')).reset_index()
    
    # Save to file.
    print("Save to file.")
    sub_dir_name = "_".join([
        "ax", args.function_name_list.replace(",", "_"),
        args.weight_list.replace(",", "_"), args.covariance_matrix.replace(
            "[", "_").replace("]", "_").replace(",", "_").replace(" ", ""),
        str(args.evaluate_covariance), str(args.init_iter), str(init_batch_size),
        str(args.update_iter), str(args.batch_size), str(args.var_coef),
        str(minimize), str(var_compute_type), str(num_random), str(num_bucket)
        ])
    abs_dir_path = os.path.join(save_path, sub_dir_name)
    Path(abs_dir_path).mkdir(parents=True, exist_ok=True)
    task_id = os.environ.get('TASK_INDEX')
    cur_time = pd.Timestamp.now().strftime('%Y%m%d%H%M%S')
    filename = cur_time + "_" + str(task_id) + ".csv"
    print(os.path.join(abs_dir_path, filename))
    result_df.to_csv(os.path.join(abs_dir_path, filename))
    print("2021-01-19 19:48:00")
    print("Done...")
Exemple #4
0
class SimpleExperimentTest(TestCase):
    def setUp(self) -> None:
        self.experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            evaluation_function=sum_evaluation_function,
            objective_name="sum",
        )
        self.arms = [
            Arm(parameters={"x1": 0.75, "x2": 1}),
            Arm(parameters={"x1": 2, "x2": 7}),
            Arm(parameters={"x1": 10, "x2": 8}),
            Arm(parameters={"x1": -2, "x2": 10}),
        ]

    def testBasic(self) -> None:
        self.assertTrue(self.experiment.is_simple_experiment)
        trial = self.experiment.new_trial()
        with self.assertRaises(NotImplementedError):
            trial.runner = SyntheticRunner()
        with self.assertRaises(NotImplementedError):
            self.experiment.add_tracking_metric(Metric(name="test"))
        with self.assertRaises(NotImplementedError):
            self.experiment.update_tracking_metric(Metric(name="test"))
        self.assertTrue(self.experiment.eval_trial(trial).df.empty)
        batch = self.experiment.new_batch_trial()
        batch.add_arm(Arm(parameters={"x1": 5, "x2": 10}))
        self.assertEqual(self.experiment.eval_trial(batch).df["mean"][0], 15)
        self.experiment.new_batch_trial().add_arm(Arm(parameters={"x1": 15, "x2": 25}))
        self.assertAlmostEqual(self.experiment.eval().df["mean"][1], 40)
        self.assertEqual(batch.fetch_data().df["mean"][0], 15)
        self.assertAlmostEqual(self.experiment.fetch_data().df["mean"][1], 40)

    def testTrial(self) -> None:
        for i in range(len(self.arms)):
            self.experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(self.experiment.eval().df.empty)

    def testUnimplementedEvaluationFunction(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
        )
        with self.assertRaises(Exception):
            experiment.evaluation_function(parameterization={})

        experiment.evaluation_function = sum_evaluation_function

    def testEvaluationFunctionNumpy(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_numpy,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)

    def testEvaluationFunctionV2(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_v2,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)

    def testEvaluationFunctionV2Numpy(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_v2_numpy,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)

    def testEvaluationFunctionV3(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_v3,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)

    def testEvaluationFunctionV3Numpy(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_v3_numpy,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)

    def testEvaluationFunctionV4(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_v4,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)

    def testEvaluationFunctionV4Numpy(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_v4_numpy,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)

    def testOptionalObjectiveName(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            evaluation_function=sum_evaluation_function_v2,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)