Пример #1
0
 def test_optimize_graceful_exit_on_exception(self) -> None:
     """Tests optimization as a single call, with exception during
     candidate generation.
     """
     best, vals, exp, model = optimize(
         parameters=[  # pyre-fixme[6]
             {
                 "name": "x1",
                 "type": "range",
                 "bounds": [-10.0, 10.0]
             },
             {
                 "name": "x2",
                 "type": "range",
                 "bounds": [-10.0, 10.0]
             },
         ],
         # Booth function.
         evaluation_function=lambda p: (
             (p["x1"] + 2 * p["x2"] - 7)**2 +
             (2 * p["x1"] + p["x2"] - 5)**2,
             None,
         ),
         minimize=True,
         total_trials=6,
         generation_strategy=GenerationStrategy(
             name="Sobol",
             steps=[GenerationStep(model=Models.SOBOL, num_trials=3)]),
     )
     self.assertEqual(len(exp.trials),
                      3)  # Check that we stopped at 3 trials.
     # All the regular return values should still be present.
     self.assertIn("x1", best)
     self.assertIn("x2", best)
     self.assertIsNotNone(vals)
     self.assertIn("objective", vals[0])
     self.assertIn("objective", vals[1])
     self.assertIn("objective", vals[1]["objective"])
Пример #2
0
 def test_optimize_propagates_random_seed(self) -> None:
     """Tests optimization as a single call."""
     _, _, _, model = optimize(
         parameters=[  # pyre-fixme[6]
             {
                 "name": "x1",
                 "type": "range",
                 "bounds": [-10.0, 10.0]
             },
             {
                 "name": "x2",
                 "type": "range",
                 "bounds": [-10.0, 10.0]
             },
         ],
         # Booth function.
         evaluation_function=lambda p: (p["x1"] + 2 * p["x2"] - 7)**2 +
         (2 * p["x1"] + p["x2"] - 5)**2,
         minimize=True,
         total_trials=5,
         random_seed=12345,
     )
     self.assertEqual(12345, model.model.seed)
Пример #3
0
def hyperopt(model, params, opt_params, trials=10):
    def loss_function(p):
        m = model(params['policy'],
                  params['train_env'],
                  **p,
                  verbose=2)
        m.learn(total_timesteps=params['timesteps'])
        reward = np.mean([evaluate(m, params['eval_env']) for _ in range(100)])
        return {
            '-reward': (-reward, 0.0)}
    best_params, best_vals, experiment, exp_model = optimize(
        parameters=[{'name': name, 'type': 'range', 'bounds': bounds}
                    for name, bounds in opt_params.items()],
        evaluation_function=loss_function,
        objective_name="-reward",
        minimize=True,
        total_trials=trials)
    
    m = model(params['policy'],
              params['train_env'],
              **best_params)
    m.learn(total_timesteps=params['timesteps'],
                callback=params['eval_callback'])
    return m, best_params, best_vals, experiment, exp_model
Пример #4
0
best_parameters, values, experiment, model = optimize(
    parameters=[
        {
            "name": "input_size",
            "type": "fixed",
            "value": 1
        },
        {
            "name": "embedding_size",
            "type": "range",
            "bounds": [1, 64]
        },
        {
            "name": "hidden_size",
            "type": "range",
            "bounds": [8, 512]
        },
        {
            "name": "output_size",
            "type": "fixed",
            "value": len(dataset._vocabulary)
        },
        {
            "name": "lr",
            "type": "range",
            "bounds": [1e-6, 0.4],
            "log_scale": True
        },
        {
            "name": "epochs",
            "type": "fixed",
            "value": 5
        },
    ],
    evaluation_function=train_evaluate,
    objective_name='cross entropy loss',
    minimize=True)
Пример #5
0
best_parameters, values, experiment, model = optimize(
    parameters=[
        {
            "name": "batch_size",
            "type": "choice",
            "values": [4, 8, 16, 32, 64, 128, 256],
        },
        {
            "name": "dropout",
            "type": "range",
            "bounds": [0.05, 0.5],
            "log_scale": True,
        },
        {
            "name": "rnn_hidden_size",
            "type": "choice",
            "values": [16, 64, 128, 256, 512, 1024]
        },
        {
            "name": "rnn_num_layers",
            "type": "choice",
            "values": [1, 2, 3]
        },
        {
            "name": "floor_hidden_size",
            "type": "choice",
            "values": [16, 64, 128, 256, 512, 1024]
        },
        {
            "name": "floor_num_layers",
            "type": "choice",
            "values": [1, 2, 3]
        },
        {
            "name": "coordinates_hidden_size",
            "type": "choice",
            "values": [16, 64, 128, 256, 512, 1024]
        },
        {
            "name": "coordinates_num_layers",
            "type": "choice",
            "values": [1, 2, 3]
        },
    ],
    evaluation_function=train_evaluate,
    objective_name='mean_3d_error',
    total_trials=100  # default is 20
)
Пример #6
0
def hyperparameter_optimization(a: Namespace, c: connection, t: str):
    dtype = torch.float
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    global cur
    cur = c.cursor()
    global conn
    conn = c
    global args
    args = a
    global task
    task = t

    global ss
    global data_composition_key
    global model_key
    _, ss, data_composition_key, model_key, ntrails, epochs = task.split(":")
    args.epochs = int(epochs)

    make_sure_table_exist(args, conn, cur, args.train_results_ax_table_name)
    make_sure_table_exist(args, conn, cur,
                          args.validation_results_ax_table_name)
    make_sure_table_exist(args, conn, cur, args.test_results_ax_table_name)

    objective_wrapper({})  #initial run config

    best_parameters, values, experiment, model = optimize(
        parameters=[{
            "name": "lr",
            "type": "range",
            "bounds": [1e-7, 0.5],
            "log_scale": True
        }, {
            "name": "weight_decay",
            "type": "range",
            "bounds": [1e-8, .5],
            "log_scale": True
        }, {
            "name":
            "optimizer",
            "type":
            "choice",
            "values": [
                "Adadelta", "Adagrad", "Adam", "AdamW", "Adamax", "ASGD",
                "RMSprop", "SGD"
            ]
        }, {
            "name": "criterion",
            "type": "choice",
            "values": ["BCELoss", "MSELoss"]
        }, {
            "name": "feature_extraction",
            "type": "choice",
            "values": [True, False]
        }],
        evaluation_function=objective_wrapper,
        objective_name='accuracy',
        minimize=False,
        arms_per_trial=1,
        total_trials=int(
            ntrails)  #<---------------------------anpassen je nach task =)
    )

    save(experiment, os.path.join(res_path, "experiment.json"))

    return True
Пример #7
0
 best_parameters, values, experiment, model = optimize(
     parameters=[
         {
             "name": "mom_range",
             "type": "choice",
             "values": [0, 0]
         },
         {
             "name": "niter",
             "type": "choice",
             "values": [1000, 1000]
         },
         {
             "name": "n_res",
             "type": "range",
             "bounds": [0, 1]
         },
         {
             "name": "scheduler",
             "type": "choice",
             "values": ['ReduceLROnPlateau', 'ReduceLROnPlateau']
         },
         {
             "name": "optimizer",
             "type": "choice",
             "values": ['sgd', 'sgd']
         },
         {
             "name": "weight_decay",
             "type": "range",
             "bounds": [1e-14, 1e-1],
             "log_scale": True
         },
         {
             "name": "momentum",
             "type": "range",
             "bounds": [0.9, 1.]
         },
         {
             "name": "learning_rate",
             "type": "range",
             "bounds": [1e-4, 1e-3],
             "log_scale": True
         },
     ],
     evaluation_function=training.train,
     objective_name='loss',
     minimize=True,
     total_trials=100)
Пример #8
0
 best_parameters, values, experiment, model = optimize(
     parameters=[
         {
             "name": "n_heads",
             "type": "range",
             "bounds": [0, 3],
             "value_type":
             "int",  # Optional, defaults to inference from type of "bounds".
         },
         {
             "name": "l2_req",
             "type": "range",
             "bounds": [1e-10, 0.001],
             "log_scale": True
         },
         {
             "name": "lr",
             "type": "range",
             "bounds": [1e-10, 0.001],
             "log_scale": True
         },
         {
             "name": "epochs_max",
             "type": "range",
             "bounds": [0, 500],
         }
     ],
     experiment_name="test",
     objective_name="f_score_sum",
     evaluation_function=eval_s,
     total_trials=50)
Пример #9
0
train_dataset = LuxorDataset(args.train_dataset_path, 1, 201)
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=10 * 6,
                                           shuffle=True)

if torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = torch.device("cpu")


def train_wrapper(parameterization):
    net = Net()
    parameterization["num_epochs"] = 1
    last_epoch_loss = train(net=net,
                            data_loader=train_loader,
                            parameters=parameterization,
                            device=device)
    if math.isnan(last_epoch_loss):
        last_epoch_loss = 10**6
    return -last_epoch_loss


best_parameters, values, experiment, model = optimize(
    parameters=[
        #{"name": "lr", "type": "range", "bounds": [1e-5, 0.1], "log_scale": True},
    ],
    evaluation_function=train_wrapper)

print(best_parameters)
Пример #10
0
    svc_c = config["svc_c"]
    classifier_obj = sklearn.svm.SVC(C=svc_c, gamma="auto")

    score = sklearn.model_selection.cross_val_score(classifier_obj,
                                                    x,
                                                    y,
                                                    n_jobs=-1,
                                                    cv=3)
    accuracy = score.mean()
    return {'error_rate': (1 - accuracy, 0)}


best_parameters, values, experiment, model = optimize(
    parameters=[{
        "name": "svc_c",
        "type": "range",
        "bounds": [0.001, 0.1],
        "value_type":
        "float",  # Optional, defaults to inference from type of "bounds".
        "log_scale": True,  # Optional, defaults to False.
    }],
    experiment_name="test",
    objective_name="error_rate",
    evaluation_function=objective,
    minimize=True,  # Optional, defaults to False.
    total_trials=30,  # Optional.
)

print(best_parameters)
Пример #11
0
    """ Train and evaluate the network to find the best parameters
    Args:
        parameterization: The hyperparameters that should be evaluated
    Returns:
        float: classification accuracy """
    net = Net()
    net, _, _ = train_bayesian_optimization(net=net, input_picture=DATA['x_train'],\
            label_picture=DATA['y_train'], parameters=parameterization,)

    return eval_bayesian_optimization(net=net, input_picture=DATA['x_valid'],\
            label_picture=DATA['y_valid'],)


# Optimizating the network with bayesian optimization
BEST_PARAMETERS, VALUES, EXPERIMENT, MODEL = optimize(parameters=[{"name": "lr", "type": "range",\
    "bounds": [1e-6, 0.4], "log_scale": True},],\
    evaluation_function=evaluate_hyperparameters,\
    objective_name='accuracy',)

# Saving the results from the optimization
MEANS, COVARIANCES = VALUES

# Printing the results of the hyperparamter optimization
print(BEST_PARAMETERS)
print(MEANS, COVARIANCES)

# Findin the best hyperparameter for training the network
DATA1 = EXPERIMENT.fetch_data()
DF = DATA1.df
BEST_ARM_NAME = DF.arm_name[DF['mean'] == DF['mean'].max()].values[0]
BEST_ARM = EXPERIMENT.arms_by_name[BEST_ARM_NAME]
Пример #12
0
    SRC_DIR = Path(args.srcdir)
    TMP_DIR = Path(args.datadir)
    DATA_DIR = TMP_DIR / "data"
    RESULTS_DIR = Path(SRC_DIR / "output")
    RESULTS_DIR.mkdir(exist_ok=True)
    RUNSCRIPTS_DIR = RESULTS_DIR / "run_scripts"
    RUNSCRIPTS_DIR.mkdir(exist_ok=True)
    EXP_RESULTS = RESULTS_DIR / "results"
    EXP_RESULTS.mkdir(exist_ok=True)

    run_script_name = f"runscript-{START_TIME}.py"

    print("Ax HPT Experiment runscript written to:",
          RUNSCRIPTS_DIR / run_script_name)
    copyfile(os.path.realpath(__file__), RUNSCRIPTS_DIR / run_script_name)

    best_parameters, values, experiment, model = optimize(
        parameters=parametrization,
        evaluation_function=train_evaluation,
        objective_name="Accuracy",
        total_trials=args.trials,
    )

    print(f"Means: {values[0]}\
        Covariances: {values[1]}")

    print(best_parameters)

    save_name = str(EXP_RESULTS / f"exp-hpt-sgd-{START_TIME}.json")
    save(experiment, save_name)
Пример #13
0
best_parameters, values, experiment, model = optimize(
    parameters=[
        {
            'name': 'n_conv_layers',
            'type': 'range',
            'bounds': [2, 12],
        },
        {
            'name': 'learning_rate',
            'type': 'choice',
            'values': [1 * 10**-x for x in list(range(2, 4))],  # [0.01, 0.001]
            'is_ordered': True,
        },
        {
            'name': 'conv_filter_size',
            'type': 'choice',
            'values': [2**x for x in range(6, 10)],  # [64, 128, 256, 512]
            'is_ordered': True,
        },
        {
            'name': 'conv_kernel_size',
            'type': 'choice',
            'values': list(range(1, 6)),  # [1, 2, 3, 4, 5]
            'is_ordered': True,
        },
        {
            'name': 'target_update',
            'type': 'choice',
            'values': [2**x for x in range(7, 10, 1)],  # [128, 256, 512]
            'is_ordered': True,
        },
        {
            'name': 'neutral_cost',
            'type': 'choice',
            'values': list(range(-2000, -400,
                                 500)),  # [-2000, -1500, -1000, -500]
            'is_ordered': True,
        },
        {
            'name': 'stop_loss',
            'type': 'choice',
            'values': list(range(50, 200, 50)),  # [50, 100, 150]
            'is_ordered': True,
        },
        {
            'name': 'aggregation',
            'type': 'choice',
            'values': ['1 min', '5 min', '15 min', '30 min', '1 hour'],
            'is_ordered': True,
        },
        {
            'name': 'n_dense_layers',
            'type': 'range',
            'bounds': [2, 12],
        },
        {
            'name': 'n_nodes_dense_layers',
            'type': 'choice',
            'values': [2**x for x in range(9, 13)],  # [512, 1024, 2048, 4096]
            'is_ordered': True,
        },
    ],
    evaluation_function=forex_eval,
    objective_name='total',
    total_trials=20,
    # parameter_constraints=["n_dense_layers + n_conv_layers <= 16"],
)
Пример #14
0
def training_pipeline(dataset_path):
    start_training = perf_counter()
    # train on the GPU or on the CPU if a GPU is not available
    device = torch.device(
        "cuda") if torch.cuda.is_available() else torch.device("cpu")
    print("Training on the", device)

    # Load the dataset. Remove any unwanted images
    full_dataset = LISADataset(dataset_path,
                               transforms=get_transform(),
                               bad_images=bad_images)

    # split data into training, testing and validation sets
    data_loader_train, data_loader_test, data_loader_val = split_data(
        full_dataset)

    validation_ground_truth = create_bounding_boxes(data_loader_val,
                                                    BBType.GroundTruth,
                                                    model=None,
                                                    device=device)

    num_classes = len(CLASS_LABEL_MAP) + 1  # add 1 for background class

    # hyperparameter tuning
    best_parameters, values, experiment, ax_model = optimize(
        parameters=[
            {
                "name": "lr",
                "type": "range",
                "bounds": [1e-6, 0.01],
                "log_scale": True
            },
            {
                "name": "num_epochs",
                "type": "choice",
                "values": list(range(1, 3))
            },
        ],
        evaluation_function=lambda params: train(
            params,
            num_classes,
            device,
            data_loader_train,
            data_loader_val,
            validation_ground_truth,
        ),
        objective_name="mean_average_precision",
        total_trials=1,
    )
    end_training = perf_counter()
    training_time = end_training - start_training
    print("Training took", training_time / 60.0, "minutes")

    # evaluation
    start_eval = perf_counter()
    path_to_best_model = "model_lr_{}_epochs_{}.pth".format(
        best_parameters["lr"], best_parameters["num_epochs"])
    print("Best model saved at", path_to_best_model)
    model = torch.load(path_to_best_model, map_location=torch.device(device))

    testing_ground_truth = create_bounding_boxes(data_loader_test,
                                                 BBType.GroundTruth, model,
                                                 device)

    print("getting evaluation scores...")
    evaluation = evaluate(model, data_loader_test, device,
                          testing_ground_truth)

    # TODO: move map calculation into evaluation, just return m_a_p
    precisions = [
        0 if np.isnan(metric["AP"]) else metric["AP"] for metric in evaluation
    ]
    mean_average_precision = np.sum(precisions) / len(CLASS_LABEL_MAP)
    print("Mean average precision:", mean_average_precision)

    end_eval = perf_counter()
    eval_time = end_eval - start_eval
    print("Evaluation took", eval_time / 60.0, "minutes")
Пример #15
0
 best_parameters, values, experiment, model = optimize(
     parameters=[
         {
             "name": "lr",
             "type": "range",
             "bounds": [1e-6, 0.4],
             "log_scale": True
         },
         # {"name": "dropout", "type": "range", "bounds": [0.01, 0.5], "log_scale": True},
         {
             "name": "training_split",
             "type": "range",
             "bounds": [0.7, 0.9],
             "log_scale": True
         },
         {
             "name": "intrinsic_dimensions",
             "type": "range",
             "bounds": [256, 2048],
             "log_scale": False
         },
         {
             "name": "batch_size",
             "type": "choice",
             "values": [32, 64, 128, 256, 512]
         },
     ],
     evaluation_function=train_evaluate,
     objective_name='accuracy',
     # generation_strategy=ax.models.random.sobol.SobolGenerator,
 )
Пример #16
0
    app = MainApp()
    status, message, result = app.run(tunable_params=parametrization)
    if status:
        print("EC: ", result)
        return {"ec": (result, 0.0)}
    else:
        print(message)
        raise RuntimeError("Experiment error!. Check the robot or values!")


with open("parameters.json", "r") as f:
    params = json.load(f)

start = time.time()
best_parameters, values, experiment, model = optimize(
    parameters=params["exp_params"]["tunable_params"],
    experiment_name="bayesian_eco",
    evaluation_function=evaluation_function,
    objective_name="ec",
    minimize=True,
    total_trials=params["exp_params"]["cycles"],
)
print(time.time() - start)

exp_out = best_parameters, values, experiment
with open(f"data/{params['experiment_name']}/experiment.pkl", "wb") as f:
    pickle.dump(exp_out, f)

print(values)
print(best_parameters)
Пример #17
0
    {
        "name": "linear_size0",
        "type": "range",
        "bounds": [1, 256]
    },
    {
        "name": "output_size",
        "type": "fixed",
        "value": dataset.vocab_len()
    },
    {
        "name": "lr",
        "type": "range",
        "bounds": [0.0005, 0.05],
        "log_scale": True
    },
    {
        "name": "epochs",
        "type": "fixed",
        "value": 3
    },
]

best_parameters, values, experiment, model = optimize(
    parameters=parameters2,
    evaluation_function=train_evaluate,
    objective_name='cross entropy loss',
    minimize=True)

print(best_parameters)
Пример #18
0
def tune_model_weights():
    parser = generate.get_parser_with_args()
    parser = add_tune_args(parser)
    args = options.parse_args_and_arch(parser)
    n_models = len(args.path.split(":"))
    print(n_models)
    print(args.weight_lower_bound)
    print(args.weight_upper_bound)
    print(args.output_json_best_parameters)
    print(args.output_json_best_value)
    print(args.num_trails_ax_opt)

    def evaluation_function(parameterization):
        w1 = parameterization.get("w1")
        w2 = parameterization.get("w2")
        w3 = parameterization.get("w3")
        weight = str(w1) + "," + str(w2) + "," + str(w3)
        args.model_weights = weight
        generate.validate_args(args)
        score = generate.generate(args)
        return {"bleu_score": (score, 0.0)}

    lower_bound = args.weight_lower_bound
    upper_bound = args.weight_upper_bound
    best_parameters, values, experiment, model = optimize(
        parameters=[
            {
                "name": "w1",
                "type": "range",
                "bounds": [lower_bound, upper_bound],
                "value_type": "float",
            },
            {
                "name": "w2",
                "type": "range",
                "bounds": [lower_bound, upper_bound]
            },
            {
                "name": "w3",
                "type": "range",
                "bounds": [lower_bound, upper_bound]
            },
        ],
        experiment_name="tune_model_weights",
        objective_name="bleu_score",
        evaluation_function=evaluation_function,
        minimize=True,  # Optional, defaults to False.
        parameter_constraints=[
            "w1 + w2 + w3 <= 1",
            "w1 + w2 + w3 >= 0.99",
        ],  # Optional.
        total_trials=args.num_trails_ax_opt,  # Optional.
    )

    json_file = json.dumps(best_parameters)
    with open(args.output_json_best_parameters, "w") as f:
        f.write(json_file)
        f.close()

    json_file = json.dumps(values)
    with open(args.output_json_best_value, "w") as f:
        f.write(json_file)
        f.close()
    return best_parameters, values
        if args.focus == "map":
            print("returning map")
            return mAP
        else:
            print("returning re-rank")
            return re_rank_mAP


best_parameters, values, experiment, model = optimize(
    parameters=[
        {"name": "sigma", "type": "range", "bounds": [1e-1, 1.0]},
        {"name": "alpha", "type": "range", "bounds": [0.5, 3.0]},
        {"name": "l", "type": "range", "bounds": [1e-1, 1.0]},
        {"name": "margin", "type": "range", "bounds": [1e-6, 1.0], "log_scale": True},
        {"name": "beta_ratio", "type": "range", "bounds": [1e-6, 1.0]},
        {"name": "gamma", "type": "range", "bounds": [1e-6, 1.0]},
        {"name": "weight_decay", "type": "range", "bounds": [1e-6, 1.0]},
        # {"name": "batch_size", "type": "range", "bounds": [10, 80]},
        ],
    evaluation_function=train,
    objective_name='ranking',
    minimize=False,
    total_trials = 60,
)

print("===========")
print(best_parameters)
print("===========")
print(values)

Пример #20
0
                     load=False,
                     cv=5
                     )

    best_parameters, values, experiment, model = optimize(
        parameters=[
            {"name": "warmup", "type": "choice", "values": [0, 0]},
            {"name": "mom_range", "type": "range", "bounds": [0., 0.1]},
            {"name": "num_elements", "type": "range", "bounds": [1, 8]},
            {"name": "niter", "type": "choice", "values": [100, 1000]},
            {"name": "n_res", "type": "range", "bounds": [1, 20]},
            {"name": "z_dim", "type": "range", "bounds": [100, 256]},
            {"name": "n_flows", "type": "range", "bounds": [2, 10]},
            {"name": "scheduler", "type": "choice", "values":
                ['CycleScheduler', 'CycleScheduler']},
            {"name": "optimizer", "type": "choice", "values": ['adamw', 'adamw']},
            {"name": "l1", "type": "range", "bounds": [1e-14, 1e-1], "log_scale": True},
            {"name": "l2", "type": "range", "bounds": [1e-14, 1e-1], "log_scale": True},
            {"name": "weight_decay", "type": "range", "bounds": [1e-14, 1e-1], "log_scale": True},
            {"name": "momentum", "type": "range", "bounds": [0.9, 1.]},
            {"name": "learning_rate", "type": "range", "bounds": [1e-5, 1e-4], "log_scale": True},
        ],
        evaluation_function=training.train,
        objective_name='loss',
        minimize=True,
        total_trials=3
    )
    from matplotlib import pyplot as plt

    fig = plt.figure()
    # render(plot_contour(model=model, param_x="learning_rate", param_y="weight_decay", metric_name='Loss'))
def find_super_params():
    best_parameters, values, experiment, model = optimize(
        parameters=[
            {
                "name": "patch_size",
                "type": "range",
                "bounds": [4, 16],
                "value_type":
                "int",  # Optional, defaults to inference from type of "bounds".
                "log_scale": False,  # Optional, defaults to False.
            },
            {
                "name": "num_patches",
                "type": "range",
                "bounds": [1, 2],
            },
            {
                "name": "loc_hidden",
                "type": "range",
                "bounds": [128, 1024],
            },
            {
                "name": "glimpse_hidden",
                "type": "range",
                "bounds": [128, 1024],
            },
            {
                "name": "num_glimpses",
                "type": "range",
                "bounds": [5, 16],
            },
            {
                "name": "batch_size",
                "type": "range",
                "bounds": [256, 1024],
            },
            {
                "name": "batchnorm_flag_phi",
                "type": "range",
                "bounds": [0, 1],
            },
            {
                "name": "batchnorm_flag_l",
                "type": "range",
                "bounds": [0, 1],
            },
            {
                "name": "batchnorm_flag_g",
                "type": "range",
                "bounds": [0, 1],
            },
            {
                "name": "batchnorm_flag_h",
                "type": "range",
                "bounds": [0, 1],
            },
            {
                "name": "glimpse_scale",
                "type": "range",
                "bounds": [1, 2],
            },
        ],
        experiment_name="test",
        objective_name="valid_accu",
        evaluation_function=call_rva,
        minimize=True,  # Optional, defaults to False.
        total_trials=30,  # Optional.
        #parameter_constraints=[" = "],  # Optional.
        #outcome_constraints=["l2norm <= 1.25"],  # Optional.
    )
Пример #22
0
    return trainEval


if len(sys.argv) != 2:
    print("Use {} configName".format(sys.argv[0]))
else:
    conf = getattr(sys.modules['configurations'], sys.argv[1])

    print("====================")
    print("RUN USING {}".format(sys.argv[1]))
    print("====================")

    opt = {}
    #opt['best_parameters'], opt['values'], opt['experiment'], opt['model'] = optimize(
    opt['best_parameters'], opt['values'], _, _ = optimize(
        parameters=conf["param"],
        evaluation_function=createTrainEval(conf["conf"]),
        objective_name=conf["checkMetric"],
        total_trials=conf["trials"],
        arms_per_trial=1,
    )

    print("====================")
    print("BEST PARAMETERS")
    print(opt['best_parameters'])
    pickle.dump(opt, open(conf["saveFile"], 'wb'))

    notifier.sendMessage("Training of {} finished on {}".format(
        sys.argv[1], socket.gethostname()))
    "bounds": [0.0, 1.0]
}, {
    "name": "num_epochs",
    "fixed": 50
}]

# 대조군 하이퍼파라미터로 실험
# 최적화 전후 성능을 비교하기 위함
train_evaluate({"num_epochs": 100})  # 대조군 기본 값

# BAYESIAN OPTIMIZE 실행, 최고의 하이퍼파라미터를 구한다.
best_parameters, values, experiment, model = optimize(
    parameters=[
        {
            "name": "lr",
            "type": "range",
            "bounds": [1e-6, 0.4],
            "log_scale": True
        },
        {
            "name": "momentum",
            "type": "range",
            "bounds": [0.0, 1.0]
        },
    ],
    evaluation_function=train_evaluate,
    objective_name='accuracy',
)

# 얻은 값들로 다시 학습 시도
train_evaluate(best_parameters)
 best_parameters, values, experiment, model = optimize(
     parameters=[
         {
             "name": "x1",
             "type": "range",
             "bounds": [-15, 15],
             "value_type":
             "float",  # Optional, defaults to inference from type of "bounds".
             "log_scale": False,  # Optional, defaults to False.
         },
         {
             "name": "x2",
             "type": "range",
             "bounds": [-15, 15],
         },
         {
             "name": "x3",
             "type": "range",
             "bounds": [-15, 15],
         },
         {
             "name": "x4",
             "type": "range",
             "bounds": [-15, 15],
         },
         {
             "name": "x5",
             "type": "range",
             "bounds": [-15, 15],
         },
         {
             "name": "x6",
             "type": "range",
             "bounds": [-15, 15],
         },
     ],
     experiment_name="test",
     objective_name="hartmann6",
     evaluation_function=hartmann_evaluation_function,
     minimize=True,  # Optional, defaults to False.
     #parameter_constraints=["x1 + x2 <= 20"],  # Optional.
     total_trials=50,  # Optional.
 )