def bbbp_4_layers_100_neurons():
    trainer_obj = trainers.BBBP
    model_generator = models.get_correct_model(dataset="toy",
                                               trainer=trainer_obj)
    model, opts = model_generator.create(input_shape=1,
                                         num_neurons=100,
                                         num_layers=4)
    trainer = trainer_obj(model, opts, learning_rate=1e-3)
    model, rmse, nll = trainer.train(x_train,
                                     y_train,
                                     x_train,
                                     y_train,
                                     np.array([[1.]]),
                                     iters=iterations,
                                     batch_size=batch_size,
                                     verbose=True)
    plot_bbbp(model, os.path.join(save_fig_dir, "bbbp_4_layers_100_neurons"))
def evidence_noreg_4_layers_100_neurons():
    trainer_obj = trainers.Evidential
    model_generator = models.get_correct_model(dataset="toy",
                                               trainer=trainer_obj)
    model, opts = model_generator.create(input_shape=1,
                                         num_neurons=100,
                                         num_layers=4)
    trainer = trainer_obj(model,
                          opts,
                          learning_rate=5e-3,
                          lam=0.,
                          maxi_rate=0.)
    model, rmse, nll = trainer.train(x_train,
                                     y_train,
                                     x_train,
                                     y_train,
                                     np.array([[1.]]),
                                     iters=iterations,
                                     batch_size=batch_size,
                                     verbose=True)
    plot_ng(model,
            os.path.join(save_fig_dir, "evidence_noreg_4_layers_100_neurons"))
Пример #3
0
def compute_predictions():
    RMSE = np.zeros((len(datasets), len(training_schemes), num_trials))
    NLL = np.zeros((len(datasets), len(training_schemes), num_trials))
    df_pred_uci = pd.DataFrame(
        columns=["Dataset", "Method", "Target", "Mu", "Sigma"])
    for di, dataset in enumerate(datasets):
        for ti, trainer_obj in enumerate(training_schemes):
            for n in range(num_trials):
                (x_train,
                 y_train), (x_test,
                            y_test), y_scale = data_loader.load_dataset(
                                dataset, return_as_tensor=False)
                batch_size = h_params[dataset]["batch_size"]
                num_iterations = num_epochs * x_train.shape[0] // batch_size
                print("Num of iterations :", num_iterations)
                done = False
                while not done:
                    with tf.device(dev):
                        model_generator = models.get_correct_model(
                            dataset="toy", trainer=trainer_obj)
                        model, opts = model_generator.create(
                            input_shape=x_train.shape[1:])
                        if method_names[
                                ti] == "Laplace":  #training scheme is likelihood; as its 2nd in list
                            print("Trainienr lalpace likelihood")
                            trainer = trainer_obj(
                                model,
                                opts,
                                "laplace",
                                dataset,
                                learning_rate=h_params[dataset]
                                ["learning_rate"])
                        elif method_names[ti] == "Gaussian":
                            print("Trainienr Gaussian likelihood")
                            trainer = trainer_obj(
                                model,
                                opts,
                                "gaussian",
                                dataset,
                                learning_rate=h_params[dataset]
                                ["learning_rate"])
                        else:
                            trainer = trainer_obj(
                                model,
                                opts,
                                dataset,
                                learning_rate=h_params[dataset]
                                ["learning_rate"])
                        model, rmse, nll = trainer.train(x_train,
                                                         y_train,
                                                         x_test,
                                                         y_test,
                                                         y_scale,
                                                         iters=num_iterations,
                                                         batch_size=batch_size,
                                                         verbose=True)

                        #Compute on validation data and save predictions
                        summary_to_add = get_prediction_summary(
                            dataset, method_names[ti], model, x_test, y_test)
                        df_pred_uci = df_pred_uci.append(summary_to_add,
                                                         ignore_index=True)

                        del model
                        tf.keras.backend.clear_session()
                        done = False if np.isinf(nll) or np.isnan(
                            nll) else True
                print("saving {} {}".format(rmse, nll))
                RMSE[di, ti, n] = rmse
                NLL[di, ti, n] = nll

    RESULTS = np.hstack((RMSE, NLL))
    mu = RESULTS.mean(axis=-1)
    error = np.std(RESULTS, axis=-1)

    print("==========================")
    print("[{}]: {} pm {}".format(dataset, mu, error))
    print("==========================")

    print("TRAINERS: {}\nDATASETS: {}".format(
        [trainer.__name__ for trainer in training_schemes], datasets))
    print("MEAN: \n{}".format(mu))
    print("ERROR: \n{}".format(error))

    return df_pred_uci
num_epochs = args.num_epochs
dev = "/cpu:0" # for small datasets/models cpu is faster than gpu
"""" ================================================"""

RMSE = np.zeros((len(datasets), len(training_schemes), num_trials))
NLL = np.zeros((len(datasets), len(training_schemes), num_trials))
for di, dataset in enumerate(datasets):
    for ti, trainer_obj in enumerate(training_schemes):
        for n in range(num_trials):
            (x_train, y_train), (x_test, y_test), y_scale = data_loader.load_dataset(dataset, return_as_tensor=False)
            batch_size = h_params[dataset]["batch_size"]
            num_iterations = num_epochs * x_train.shape[0]//batch_size
            done = False
            while not done:
                with tf.device(dev):
                    model_generator = models.get_correct_model(dataset="toy", trainer=trainer_obj)
                    model, opts = model_generator.create(input_shape=x_train.shape[1:])
                    trainer = trainer_obj(model, opts, dataset, learning_rate=h_params[dataset]["learning_rate"])
                    model, rmse, nll = trainer.train(x_train, y_train, x_test, y_test, y_scale, iters=num_iterations, batch_size=batch_size, verbose=True)
                    del model
                    tf.keras.backend.clear_session()
                    done = False if np.isinf(nll) or np.isnan(nll) else True
            print("saving {} {}".format(rmse, nll))
            RMSE[di, ti, n] = rmse
            NLL[di, ti, n] = nll

RESULTS = np.hstack((RMSE, NLL))
mu = RESULTS.mean(axis=-1)
error = np.std(RESULTS, axis=-1)

print("==========================")