Пример #1
0
    def V_setup(self):
        self.explicit_gradient = False
        self.need_higherorderderiv = True
        data_dict = get_data_dict("pima_indian")
        X_np = data_dict["input"]
        y_np = data_dict["target"]
        self.dim = X_np.shape[1]
        num_ob = X_np.shape[0]
        self.num_ob = X_np.shape[0]

        self.beta = nn.Parameter(torch.zeros(self.dim), requires_grad=True)
        if self.gibbs:
            self.sigma2 = Variable(torch.zeros(1), requires_grad=False)
            self.list_hyperparam = [self.sigma2]
            self.list_param = [self.beta]
        else:
            self.log_sigma2 = nn.Parameter(torch.zeros(1), requires_grad=True)
            #self.sigma2 = Variable(self.log_sigma2.data,requires_grad=False)
        # sigma mapped to log space beecause we want it unconstrained
        # self.beta[self.dim] = log(sigma)
        #self.logsigma = nn.Parameter(torch.zeros(1),requires_grad=True)
        self.y = Variable(torch.from_numpy(y_np),
                          requires_grad=False).type(precision_type)
        self.X = Variable(torch.from_numpy(X_np),
                          requires_grad=False).type(precision_type)
        # parameter for hyperprior distribution

        self.lamb = 1
        return ()
Пример #2
0
    def V_setup(self):
        input_npdata = get_data_dict("boston")
        self.y_np = input_npdata["target"]
        self.X_np = input_npdata["input"]
        self.dim = self.X_np.shape[1]
        self.num_ob = self.X_np.shape[0]
        self.explicit_gradient = False
        self.need_higherorderderiv = True
        self.beta = nn.Parameter(torch.zeros(self.dim),requires_grad=True)
        self.y = Variable(torch.from_numpy(self.y_np),requires_grad=False).type(self.precision_type)
        self.X = Variable(torch.from_numpy(self.X_np),requires_grad=False).type(self.precision_type)
        # include
        self.sigma =1

        return()
Пример #3
0
def compute_store_logit_mean_cov(data_name, standardize_predictor):

    dataset = get_data_dict(data_name, standardize_predictor)

    X = dataset["input"]
    y = dataset["target"]
    # y = y.astype(int)
    N = X.shape[0]
    p = X.shape[1]

    data_dict = {"X": X, "y": y, "N": N, "p": p}
    address = os.environ["PYTHONPATH"] + "/stan_code/log_reg_density.stan"
    dump_address = os.environ[
        "PYTHONPATH"] + "/stan_code/log_reg_density_model.pkl"

    if os.path.isfile(dump_address):
        recompile = False
    else:
        recompile = True
    if recompile:
        mod = pystan.StanModel(file=address)
        with open(dump_address, 'wb') as f:
            pickle.dump(mod, f)
    else:
        mod = pickle.load(open(dump_address, 'rb'))

    fit = mod.sampling(data=data_dict, seed=20)
    print(fit)
    la = fit.extract(permuted=True)
    out = la["beta"]
    mean = numpy.mean(out, axis=0)
    cov = numpy.cov(out, rowvar=False)

    if standardize_predictor:
        qualify = "standardized"
    else:
        qualify = "not_standardized"
    result_dump_address = os.environ[
        "PYTHONPATH"] + "/input_data/" + "_" + data_name + qualify + ".npz"
    #result = {"mean":mean,"cov":cov}
    # with open(result_dump_address, 'wb') as f:
    #     pickle.dump(result, f)
    numpy.savez(result_dump_address, mean=mean, cov=cov, allow_pickle=False)
    return ()
Пример #4
0
from input_data.convert_data_to_dict import get_data_dict

permissible_vals = [
    "pima_indian", "boston", "subset_mnist", "subset_cifar10",
    "logistic_mnist", "logistic_cifar10", "logistic_8x8mnist"
]
permissible_vals += [
    "australian", "german", "heart", "diabetes", "breast", "8x8mnist"
]

# out = get_data_dict("logistic_cifar10")
#
# print(out["input"].shape)
# exit()

for name in permissible_vals:
    out = get_data_dict(name)
    print("input shape {}".format(out["input"].shape))
    print("target shape {}".format(out["target"].shape))
    print("file {} success".format(name))
Пример #5
0
from distributions.neural_nets.priors.prior_util import prior_generator
import os, numpy,torch
import dill as pickle
from abstract.mcmc_sampler import mcmc_sampler, mcmc_sampler_settings_dict
from adapt_util.tune_param_classes.tune_param_setting_util import *
from experiments.experiment_obj import tuneinput_class
from experiments.correctdist_experiments.prototype import check_mean_var_stan
from post_processing.ESS_nuts import ess_stan,diagnostics_stan
from post_processing.get_diagnostics import energy_diagnostics,process_diagnostics,get_params_mcmc_tensor,get_short_diagnostics
from input_data.convert_data_to_dict import get_data_dict
from post_processing.test_error import test_error
seed = 1
numpy.random.seed(seed)
torch.manual_seed(seed)

input_data = get_data_dict("8x8mnist",standardize_predictor=True)
input_data = {"input":input_data["input"][:500,],"target":input_data["target"][:500]}

prior_dict = {"name":"gaussian_inv_gamma_1"}
model_dict = {"num_units":25}

v_generator =wrap_V_class_with_input_data(class_constructor=V_fc_model_1,input_data=input_data,prior_dict=prior_dict,model_dict=model_dict)

mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,samples_per_chain=2000,num_chains=4,num_cpu=4,thin=1,tune_l_per_chain=1000,
                                   warmup_per_chain=1100,is_float=False,isstore_to_disk=False,allow_restart=False)

# input_dict = {"v_fun":[V_pima_inidan_logit],"epsilon":[0.1],"second_order":[False],
#                "evolve_L":[10],"metric_name":["unit_e"],"dynamic":[False],"windowed":[False],"criterion":[None]}

input_dict = {"v_fun":[v_generator],"epsilon":["dual"],"second_order":[False],"cov":["adapt"],"max_tree_depth":[8],
               "metric_name":["diag_e"],"dynamic":[True],"windowed":[False],"criterion":["gnuts"]}
Пример #6
0
from experiments.correctdist_experiments.result_from_long_chain.logistic.util import result_from_long_chain
from input_data.convert_data_to_dict import get_data_dict
import numpy

names_list = ["pima_indian", "australian", "german", "heart", "breast"]

for i in range(len(names_list)):
    input_data = get_data_dict(names_list[i])

    out = result_from_long_chain(input_data=input_data,
                                 data_name=names_list[i],
                                 recompile=False)

    file = numpy.load(out)

    #print(file.keys())

    print(file["correct_mean"])
    print(file["correct_cov"])
Пример #7
0
from input_data.convert_data_to_dict import get_data_dict
from experiments.correctdist_experiments.prototype import check_mean_var_stan
from abstract.util import wrap_V_class_with_input_data

mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                       samples_per_chain=1000,
                                       num_chains=4,
                                       num_cpu=4,
                                       thin=1,
                                       tune_l_per_chain=500,
                                       warmup_per_chain=600,
                                       is_float=False,
                                       isstore_to_disk=False,
                                       allow_restart=False,
                                       seed=25)
input_data = get_data_dict("pima_indian")
V_pima_indian_logit = wrap_V_class_with_input_data(
    class_constructor=V_logistic_regression, input_data=input_data)

address = os.environ[
    "PYTHONPATH"] + "/experiments/correctdist_experiments/result_from_long_chain.pkl"
correct = pickle.load(open(address, 'rb'))
correct_mean = correct["correct_mean"]
correct_cov = correct["correct_cov"]
correct_diag_cov = correct_cov.diagonal()

input_dict = {
    "v_fun": [V_pima_indian_logit],
    "epsilon": ["dual"],
    "second_order": [False],
    "evolve_t": [1.],
Пример #8
0
from input_data.convert_data_to_dict import get_data_dict
import numpy, pystan, pickle, os
dataset = get_data_dict("boston", standardize_predictor=False)

N = dataset["input"].shape[0]
K = dataset["input"].shape[1]
X_mean = numpy.mean(dataset["input"], axis=0)

X_cov = numpy.cov(dataset["input"], rowvar=False)

print(X_mean)

print(numpy.diag(X_cov))

data = {"X": dataset["input"], "y": dataset["target"], "N": N, "K": K}

stan_sampling = True
if stan_sampling:
    recompile = False
    if recompile:
        address = os.environ["PYTHONPATH"] + "/stan_code/linear_regression.stan"
        mod = pystan.StanModel(file=address)
        with open('lr_model.pkl', 'wb') as f:
            pickle.dump(mod, f)
    else:
        mod = pickle.load(open('lr_model.pkl', 'rb'))

#fit = mod.sampling(data=data)
#fit = mod.sampling(data=data,control={"metric":"unit_e"})
#print(fit)
Пример #9
0
from distributions.neural_nets.priors.prior_util import prior_generator
import os, numpy,torch
import dill as pickle
from abstract.mcmc_sampler import mcmc_sampler, mcmc_sampler_settings_dict
from adapt_util.tune_param_classes.tune_param_setting_util import *
from experiments.experiment_obj import tuneinput_class
from experiments.correctdist_experiments.prototype import check_mean_var_stan
from post_processing.ESS_nuts import ess_stan,diagnostics_stan
from post_processing.get_diagnostics import energy_diagnostics,process_diagnostics,get_params_mcmc_tensor,get_short_diagnostics
from input_data.convert_data_to_dict import get_data_dict
from post_processing.test_error import test_error
seed = 1
numpy.random.seed(seed)
torch.manual_seed(seed)

input_data = get_data_dict("australian",standardize_predictor=True)
test_set = {"input":input_data["input"][:-500,],"target":input_data["target"][:-500]}
input_data = {"input":input_data["input"][:500,],"target":input_data["target"][:500]}

prior_dict = {"name":"normal"}
model_dict = {"num_units":40}

v_generator =wrap_V_class_with_input_data(class_constructor=V_fc_model_4,input_data=input_data,prior_dict=prior_dict,model_dict=model_dict)

mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,samples_per_chain=2000,num_chains=1,num_cpu=1,thin=1,tune_l_per_chain=1000,
                                   warmup_per_chain=1100,is_float=False,isstore_to_disk=False,allow_restart=False)

# input_dict = {"v_fun":[V_pima_inidan_logit],"epsilon":[0.1],"second_order":[False],
#                "evolve_L":[10],"metric_name":["unit_e"],"dynamic":[False],"windowed":[False],"criterion":[None]}

input_dict = {"v_fun":[v_generator],"epsilon":["dual"],"second_order":[False],"cov":["adapt"],"max_tree_depth":[8],
Пример #10
0
from abstract.util import wrap_V_class_with_input_data
from input_data.convert_data_to_dict import get_data_dict
from post_processing.get_diagnostics import energy_diagnostics, process_diagnostics, get_params_mcmc_tensor, get_short_diagnostics

mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                       samples_per_chain=1000,
                                       num_chains=4,
                                       num_cpu=4,
                                       thin=1,
                                       tune_l_per_chain=0,
                                       warmup_per_chain=100,
                                       is_float=False,
                                       isstore_to_disk=False,
                                       allow_restart=False)

pima_indian_data = get_data_dict("breast")
V_generator = wrap_V_class_with_input_data(
    class_constructor=V_logistic_regression, input_data=pima_indian_data)

input_dict = {
    "v_fun": [V_generator],
    "epsilon": [0.01],
    "second_order": [False],
    "metric_name": ["unit_e"],
    "dynamic": [True],
    "windowed": [False],
    "criterion": ["gnuts"]
}

other_arguments = other_default_arguments()
tune_settings_dict = tuning_settings([], [], [], other_arguments)
Пример #11
0
from post_processing.diagnostics import WAIC, convert_mcmc_tensor_to_list_points, psis
import pickle, torch
from distributions.logistic_regressions.pima_indian_logisitic_regression import V_pima_inidan_logit
from input_data.convert_data_to_dict import get_data_dict

with open("debug_test_error_mcmc.pkl", 'rb') as f:
    sampler = pickle.load(f)

#print(mcmc_samples["samples"].shape)
#print(mcmc_samples["samples"])
#exit()
train_data = get_data_dict("pima_indian")

v_obj = V_pima_inidan_logit(precision_type="torch.DoubleTensor")

#print(v_obj.beta)
#exit()
# out0 = v_obj.forward()
#
# out_sum = 0
# for i in range(len(train_data["input"])):
#     out_sum += v_obj.forward(input={"input":torch.from_numpy(train_data["input"][i:i+1,:]),"target":torch.from_numpy(train_data["target"][i:i+1])}).data[0]
#
# print(out_sum)
# print(out0)
# exit()

#mcmc_tensor = torch.from_numpy(mcmc_samples["samples"])
#print(mcmc_tensor.shape)
#print(mcmc_tensor.view(-1,7).shape)
#exit()
Пример #12
0
from abstract.util import wrap_V_class_with_input_data
from input_data.convert_data_to_dict import get_data_dict
from post_processing.get_diagnostics import energy_diagnostics, process_diagnostics, get_params_mcmc_tensor, get_short_diagnostics

mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                       samples_per_chain=1000,
                                       num_chains=4,
                                       num_cpu=4,
                                       thin=1,
                                       tune_l_per_chain=0,
                                       warmup_per_chain=100,
                                       is_float=False,
                                       isstore_to_disk=False,
                                       allow_restart=False)

pima_indian_data = get_data_dict("german")
V_generator = wrap_V_class_with_input_data(
    class_constructor=V_logistic_regression, input_data=pima_indian_data)

input_dict = {
    "v_fun": [V_generator],
    "epsilon": [0.01],
    "second_order": [False],
    "metric_name": ["unit_e"],
    "dynamic": [True],
    "windowed": [False],
    "criterion": ["gnuts"]
}

other_arguments = other_default_arguments()
tune_settings_dict = tuning_settings([], [], [], other_arguments)
Пример #13
0
import dill as pickle
from abstract.mcmc_sampler import mcmc_sampler, mcmc_sampler_settings_dict
from adapt_util.tune_param_classes.tune_param_setting_util import *
from experiments.experiment_obj import tuneinput_class
from experiments.correctdist_experiments.prototype import check_mean_var_stan
from post_processing.ESS_nuts import ess_stan,diagnostics_stan
from post_processing.get_diagnostics import energy_diagnostics,process_diagnostics,get_params_mcmc_tensor,get_short_diagnostics
from input_data.convert_data_to_dict import get_data_dict,subset_data_dict
from post_processing.test_error import test_error
seed = 0
numpy.random.seed(seed)
torch.manual_seed(seed)

#input_data = get_data_dict("pima_indian",standardize_predictor=True) #val = 1.2
#input_data = get_data_dict("german",standardize_predictor=True) # val = 1.3
input_data = get_data_dict("8x8mnist",standardize_predictor=True) # val = 1.2
input_data = subset_data_dict(dataset_dict=input_data,subset_size=500)


prior_dict = {"name":"normal"}
model_dict = {"num_units":15}

v_generator =wrap_V_class_with_input_data(class_constructor=V_fc_model_1,input_data=input_data,prior_dict=prior_dict,model_dict=model_dict)

mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,samples_per_chain=2000,num_chains=4,num_cpu=4,thin=1,tune_l_per_chain=1000,
                                   warmup_per_chain=1100,is_float=False,isstore_to_disk=False,allow_restart=False)

# input_dict = {"v_fun":[V_pima_inidan_logit],"epsilon":[0.1],"second_order":[False],
#                "evolve_L":[10],"metric_name":["unit_e"],"dynamic":[False],"windowed":[False],"criterion":[None]}

input_dict = {"v_fun":[v_generator],"epsilon":["dual"],"second_order":[False],"cov":["adapt"],"max_tree_depth":[8],
Пример #14
0
from input_data.convert_data_to_dict import get_data_dict
from post_processing.test_error import map_prediction,test_error,posterior_predictive_dist
from distributions.logistic_regressions.pima_indian_logisitic_regression import V_pima_inidan_logit
import pickle, torch
with open("debug_test_error_mcmc.pkl", 'rb') as f:
    mcmc_samples = pickle.load(f)

target_dataset = get_data_dict("pima_indian")

out_dist = posterior_predictive_dist(target_dataset,V_pima_inidan_logit(),mcmc_samples,"classification")

print(out_dist.shape)

out_dist2 = torch.zeros(out_dist.shape)
v_nn_obj = V_pima_inidan_logit()
for i in range(out_dist.shape[0]):
    test_samples = target_dataset["input"][i:i+1,:]
    for j in range(out_dist.shape[2]):
        v_nn_obj.flattened_tensor.copy_(torch.from_numpy(mcmc_samples[j, :]))
        v_nn_obj.load_flattened_tensor_to_param()
        out_prob = v_nn_obj.predict(test_samples)
        out_dist2[i,:,j] = out_prob

diff_out_dist = ((out_dist-out_dist2)*(out_dist-out_dist2)).sum()

print("diff dist {}".format(diff_out_dist))
Пример #15
0
from abstract.util import wrap_V_class_with_input_data
from input_data.convert_data_to_dict import get_data_dict
from post_processing.get_diagnostics import energy_diagnostics, process_diagnostics, get_params_mcmc_tensor, get_short_diagnostics

mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                       samples_per_chain=1000,
                                       num_chains=4,
                                       num_cpu=4,
                                       thin=1,
                                       tune_l_per_chain=0,
                                       warmup_per_chain=100,
                                       is_float=False,
                                       isstore_to_disk=False,
                                       allow_restart=False)

pima_indian_data = get_data_dict("heart")
V_generator = wrap_V_class_with_input_data(
    class_constructor=V_logistic_regression, input_data=pima_indian_data)

input_dict = {
    "v_fun": [V_generator],
    "epsilon": [0.01],
    "second_order": [False],
    "metric_name": ["unit_e"],
    "dynamic": [True],
    "windowed": [False],
    "criterion": ["gnuts"]
}

other_arguments = other_default_arguments()
tune_settings_dict = tuning_settings([], [], [], other_arguments)
Пример #16
0
from experiments.float_vs_double.convergence.util import convert_convergence_output_to_numpy
from experiments.float_vs_double.convergence.float_vs_double_convergence import convergence_diagnostics
from input_data.convert_data_to_dict import get_data_dict
from distributions.logistic_regressions.logistic_regression import V_logistic_regression
from abstract.util import wrap_V_class_with_input_data

#####################################################################################################################################
numpy.random.seed(1)
input_data = {"input": wishart_for_cov(dim=10)}
V_mvn1 = wrap_V_class_with_input_data(class_constructor=V_mvn,
                                      input_data=input_data)
V_mvn2 = wrap_V_class_with_input_data(class_constructor=V_mvn,
                                      input_data=input_data)
####################################################################################################################################
# logisitc regressions
input_data_pima_indian = get_data_dict("pima_indian")
V_pima_indian = wrap_V_class_with_input_data(
    class_constructor=V_logistic_regression, input_data=input_data_pima_indian)

input_data_australian = get_data_dict("australian")
V_australian = wrap_V_class_with_input_data(
    class_constructor=V_logistic_regression, input_data=input_data_australian)

input_data_heart = get_data_dict("heart")
V_heart = wrap_V_class_with_input_data(class_constructor=V_logistic_regression,
                                       input_data=input_data_heart)

input_data_breast = get_data_dict("breast")
V_breast = wrap_V_class_with_input_data(
    class_constructor=V_logistic_regression, input_data=input_data_breast)
Пример #17
0
from abstract.util import wrap_V_class_with_input_data
from input_data.convert_data_to_dict import get_data_dict
from post_processing.get_diagnostics import energy_diagnostics, process_diagnostics, get_params_mcmc_tensor, get_short_diagnostics

mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                       samples_per_chain=1000,
                                       num_chains=4,
                                       num_cpu=4,
                                       thin=1,
                                       tune_l_per_chain=0,
                                       warmup_per_chain=100,
                                       is_float=False,
                                       isstore_to_disk=False,
                                       allow_restart=False)

pima_indian_data = get_data_dict("australian")
V_generator = wrap_V_class_with_input_data(
    class_constructor=V_logistic_regression, input_data=pima_indian_data)

input_dict = {
    "v_fun": [V_generator],
    "epsilon": [0.01],
    "second_order": [False],
    "metric_name": ["unit_e"],
    "dynamic": [True],
    "windowed": [False],
    "criterion": ["gnuts"]
}

other_arguments = other_default_arguments()
tune_settings_dict = tuning_settings([], [], [], other_arguments)
Пример #18
0
from abstract.util import wrap_V_class_with_input_data
from distributions.neural_nets.priors.prior_util import prior_generator
import os, numpy,torch
import dill as pickle
from abstract.mcmc_sampler import mcmc_sampler, mcmc_sampler_settings_dict
from adapt_util.tune_param_classes.tune_param_setting_util import *
from experiments.experiment_obj import tuneinput_class
from experiments.correctdist_experiments.prototype import check_mean_var_stan
from post_processing.ESS_nuts import ess_stan
from post_processing.get_diagnostics import energy_diagnostics,process_diagnostics
from input_data.convert_data_to_dict import get_data_dict
seed = 1
numpy.random.seed(seed)
torch.manual_seed(seed)

input_data = get_data_dict("boston",standardize_predictor=True)


prior_dict = {"name":"horseshoe_2"}
model_dict = {"num_units":10}

v_generator =wrap_V_class_with_input_data(class_constructor=V_fc_model_1,input_data=input_data,prior_dict=prior_dict,model_dict=model_dict)

mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,samples_per_chain=2000,num_chains=2,num_cpu=1,thin=1,tune_l_per_chain=1000,
                                   warmup_per_chain=1100,is_float=False,isstore_to_disk=False,allow_restart=False)

# input_dict = {"v_fun":[V_pima_inidan_logit],"epsilon":[0.1],"second_order":[False],
#                "evolve_L":[10],"metric_name":["unit_e"],"dynamic":[False],"windowed":[False],"criterion":[None]}

input_dict = {"v_fun":[v_generator],"epsilon":["dual"],"second_order":[False],"cov":["adapt"],"max_tree_depth":[8],
               "metric_name":["diag_e"],"dynamic":[True],"windowed":[False],"criterion":["gnuts"]}
Пример #19
0
from abstract.util import wrap_V_class_with_input_data
from input_data.convert_data_to_dict import get_data_dict
from abstract.util import wrap_V_class_with_input_data
from experiments.neural_net_experiments.sghmc_vs_batch_hmc.model import V_fc_model_1
from distributions.neural_nets.priors.prior_util import prior_generator
import os, numpy, torch
import dill as pickle
from abstract.mcmc_sampler import mcmc_sampler, mcmc_sampler_settings_dict
from adapt_util.tune_param_classes.tune_param_setting_util import *
from experiments.experiment_obj import tuneinput_class
from experiments.correctdist_experiments.prototype import check_mean_var_stan
from post_processing.ESS_nuts import ess_stan, diagnostics_stan
from post_processing.get_diagnostics import energy_diagnostics, process_diagnostics, get_params_mcmc_tensor, get_short_diagnostics
from input_data.convert_data_to_dict import get_data_dict
from post_processing.test_error import test_error
input_data = get_data_dict("8x8mnist")

prior_dict = {"name": "normal"}
model_dict = {"num_units": 25}

v_generator = wrap_V_class_with_input_data(class_constructor=V_fc_model_1,
                                           input_data=input_data,
                                           prior_dict=prior_dict,
                                           model_dict=model_dict)

mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                       samples_per_chain=2000,
                                       num_chains=4,
                                       num_cpu=4,
                                       thin=1,
                                       tune_l_per_chain=1000,
Пример #20
0
from abstract.util import wrap_V_class_with_input_data
from input_data.convert_data_to_dict import get_data_dict
from post_processing.get_diagnostics import energy_diagnostics, process_diagnostics, get_params_mcmc_tensor, get_short_diagnostics

mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                       samples_per_chain=1000,
                                       num_chains=4,
                                       num_cpu=4,
                                       thin=1,
                                       tune_l_per_chain=0,
                                       warmup_per_chain=100,
                                       is_float=False,
                                       isstore_to_disk=False,
                                       allow_restart=False)

input_data = get_data_dict("1-PL", standardize_predictor=False)
V_generator = wrap_V_class_with_input_data(class_constructor=V_response_model,
                                           input_data=input_data)

input_dict = {
    "v_fun": [V_generator],
    "epsilon": [0.1],
    "second_order": [False],
    "metric_name": ["unit_e"],
    "dynamic": [True],
    "windowed": [False],
    "criterion": ["gnuts"]
}

other_arguments = other_default_arguments()
tune_settings_dict = tuning_settings([], [], [], other_arguments)
Пример #21
0
from experiments.neural_net_experiments.sghmc_vs_batch_hmc.sghmc_batchhmc import sghmc_sampler
from distributions.logistic_regressions.pima_indian_logisitic_regression import V_pima_inidan_logit
from abstract.metric import metric
from input_data.convert_data_to_dict import get_data_dict
import numpy
from abstract.abstract_class_point import point
from abstract.abstract_class_Ham import Hamiltonian
v_obj = V_pima_inidan_logit(precision_type="torch.DoubleTensor")
metric = metric(name="unit_e", V_instance=v_obj)
Ham = Hamiltonian(V=v_obj, metric=metric)

full_data = get_data_dict("pima_indian")
init_q_point = point(V=v_obj)
out = sghmc_sampler(init_q_point=init_q_point,
                    epsilon=0.01,
                    L=10,
                    Ham=Ham,
                    alpha=0.01,
                    eta=0.01,
                    betahat=0,
                    full_data=full_data,
                    num_samples=1000,
                    thin=0,
                    burn_in=200,
                    batch_size=50)

store = out[0]

print(store.shape)

print(numpy.mean(store.numpy(), axis=0))
Пример #22
0
from abstract.util import wrap_V_class_with_input_data
from distributions.neural_nets.priors.prior_util import prior_generator
import os, numpy, torch
import dill as pickle
from abstract.mcmc_sampler import mcmc_sampler, mcmc_sampler_settings_dict
from adapt_util.tune_param_classes.tune_param_setting_util import *
from experiments.experiment_obj import tuneinput_class
from experiments.correctdist_experiments.prototype import check_mean_var_stan
from post_processing.ESS_nuts import ess_stan, diagnostics_stan
from post_processing.get_diagnostics import energy_diagnostics, process_diagnostics, get_params_mcmc_tensor, get_short_diagnostics
from input_data.convert_data_to_dict import get_data_dict
#from experiments.neural_net_experiments.gibbs_vs_joint_sampling.V_hierarchical_fc1 import V_fc_gibbs_model_1
from distributions.neural_nets.fc_V_model_1 import V_fc_model_1
from post_processing.test_error import test_error
input_data = get_data_dict("8x8mnist")
input_data = {
    "input": input_data["input"][:500, ],
    "target": input_data["target"][:500]
}
model_dict = {"num_units": 20}

prior_dict = {"name": "gaussian_inv_gamma_2"}
V_fun = wrap_V_class_with_input_data(class_constructor=V_fc_model_1,
                                     input_data=input_data,
                                     prior_dict=prior_dict,
                                     model_dict=model_dict)

mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                       samples_per_chain=2000,
                                       num_chains=4,
                                       num_cpu=4,
Пример #23
0
from input_data.convert_data_to_dict import get_data_dict
from post_processing.test_error import map_prediction, test_error
from distributions.linear_regressions.linear_regression import V_linear_regression
import pickle
with open("debug_test_error_mcmc_regression.pkl", 'rb') as f:
    mcmc_samples = pickle.load(f)

#print(mcmc_samples.shape)

target_dataset = get_data_dict("boston")

te1, predicted1 = test_error(target_dataset,
                             v_obj=V_linear_regression(),
                             mcmc_samples=mcmc_samples,
                             type="regression",
                             memory_efficient=False)
te2, predicted2 = test_error(target_dataset,
                             v_obj=V_linear_regression(),
                             mcmc_samples=mcmc_samples,
                             type="regression",
                             memory_efficient=True)

print(te1)
print(te2)

#print(sum(predicted1!=predicted2))
Пример #24
0
from distributions.neural_nets.priors.prior_util import prior_generator
import os, numpy, torch
import dill as pickle
from abstract.mcmc_sampler import mcmc_sampler, mcmc_sampler_settings_dict
from adapt_util.tune_param_classes.tune_param_setting_util import *
from experiments.experiment_obj import tuneinput_class
from experiments.correctdist_experiments.prototype import check_mean_var_stan
from post_processing.ESS_nuts import ess_stan, diagnostics_stan
from post_processing.get_diagnostics import energy_diagnostics, process_diagnostics, get_params_mcmc_tensor, get_short_diagnostics
from input_data.convert_data_to_dict import get_data_dict
from post_processing.test_error import test_error
seed = 100
numpy.random.seed(seed)
torch.manual_seed(seed)

input_data = get_data_dict("boston")

test_set = {
    "input": input_data["input"][-100:, ],
    "target": input_data["target"][-100:]
}
train_set = {
    "input": input_data["input"][:100, ],
    "target": input_data["target"][:100]
}

prior_dict = {"name": "normal"}
model_dict = {"num_units": 25}

v_generator = wrap_V_class_with_input_data(
    class_constructor=V_fc_model_1_regression,
Пример #25
0
from abstract.metric import metric
from abstract.abstract_class_Ham import Hamiltonian
from abstract.abstract_class_point import point
from explicit.general_util import logsumexp_torch
from experiments.neural_net_experiments.gibbs_vs_joint_sampling.gibbs_vs_together_hyperparam import update_param_and_hyperparam_one_step
from abstract.mcmc_sampler import log_class
from input_data.convert_data_to_dict import get_data_dict
from post_processing.test_error import test_error
from abstract.abstract_nuts_util import abstract_GNUTS
from general_util.pytorch_random import log_inv_gamma_density
from post_processing.ESS_nuts import diagnostics_stan
precision_type = 'torch.DoubleTensor'
#precision_type = 'torch.FloatTensor'
torch.set_default_tensor_type(precision_type)

data_dict = get_data_dict("pima_indian")


class V_hierarchical_logistic_gibbs(V):
    def __init__(self, precision_type, gibbs):
        self.gibbs = gibbs
        super(V_hierarchical_logistic_gibbs,
              self).__init__(precision_type=precision_type)

    # def V_setup(self,y,X,lamb)
    def V_setup(self):
        self.explicit_gradient = False
        self.need_higherorderderiv = True
        data_dict = get_data_dict("pima_indian")
        X_np = data_dict["input"]
        y_np = data_dict["target"]
Пример #26
0
from distributions.neural_nets.priors.prior_util import prior_generator
import os, numpy, torch
import dill as pickle
from abstract.mcmc_sampler import mcmc_sampler, mcmc_sampler_settings_dict
from adapt_util.tune_param_classes.tune_param_setting_util import *
from experiments.experiment_obj import tuneinput_class
from experiments.correctdist_experiments.prototype import check_mean_var_stan
from post_processing.ESS_nuts import ess_stan, diagnostics_stan
from post_processing.get_diagnostics import energy_diagnostics, process_diagnostics, get_params_mcmc_tensor, get_short_diagnostics
from input_data.convert_data_to_dict import get_data_dict
from post_processing.test_error import test_error
seed = 1
numpy.random.seed(seed)
torch.manual_seed(seed)

input_data = get_data_dict("pima_indian", standardize_predictor=True)

prior_dict = {"name": "horseshoe_ard"}
model_dict = {"num_units": 10}

v_generator = wrap_V_class_with_input_data(class_constructor=V_fc_model_1,
                                           input_data=input_data,
                                           prior_dict=prior_dict,
                                           model_dict=model_dict)

mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                       samples_per_chain=2000,
                                       num_chains=4,
                                       num_cpu=4,
                                       thin=1,
                                       tune_l_per_chain=1000,
Пример #27
0
    if recompile:
        address = os.environ["PYTHONPATH"] + "/stan_code/alt_log_reg.stan"
        mod = pystan.StanModel(file=address)
        with open('model.pkl', 'wb') as f:
            pickle.dump(mod, f)
    else:
        mod = pickle.load(open('model.pkl', 'rb'))

#full_data = get_data_dict("pima_indian") # val = 1.3
#full_data = get_data_dict("breast") # val = 2.106
#full_data = get_data_dict("heart") # val = 3.26
#full_data = get_data_dict("german") # val = 8.6

#full_data = get_data_dict("australian") # val = 4.8
#full_data = get_data_dict("logistic_8x8mnist") #val = 2.4
full_data = get_data_dict("logistic_mnist")  # val = 1.12
y = full_data["target"].astype(numpy.int64)
X = full_data["input"]
N = X.shape[0]
p = X.shape[1]
data = {"y": y, "X": X, "N": N, "p": p}
fit = mod.sampling(data=data, seed=20)
print(fit)
correct_samples = fit.extract(permuted=True)["beta"]

correct_mean = numpy.mean(correct_samples, axis=0)

correct_cov = numpy.cov(correct_samples, rowvar=False)

sd_vec = numpy.sqrt(numpy.diagonal(correct_cov))
Пример #28
0
from post_processing.test_error import test_error
from abstract.abstract_class_point import point
from input_data.convert_data_to_dict import get_data_dict
from abstract.util import wrap_V_class_with_input_data
from distributions.neural_nets.fc_V_model_1 import V_fc_model_1
from unit_tests.debug_optimization.debug_optimization import gradient_descent
import torch

input_data = get_data_dict("mnist")  # 0.866 when num_units = 10
input_data = {
    "input": input_data["input"][:5000, :],
    "target": input_data["target"][:5000]
}
#input_data = get_data_dict("mnist")
prior_dict = {"name": "normal"}
model_dict = {"num_units": 300}

v_generator = wrap_V_class_with_input_data(class_constructor=V_fc_model_1,
                                           input_data=input_data,
                                           prior_dict=prior_dict,
                                           model_dict=model_dict)

out, explode_grad = gradient_descent(
    number_of_iter=5000,
    lr=0.01,
    v_obj=v_generator(precision_type="torch.DoubleTensor"))

#print(out.flattened_tensor)

mcmc_samples = torch.zeros(1, len(out.flattened_tensor))
mcmc_samples[0, :] = out.flattened_tensor
Пример #29
0
from abstract.util import wrap_V_class_with_input_data
from input_data.convert_data_to_dict import get_data_dict
from post_processing.get_diagnostics import energy_diagnostics, process_diagnostics, get_params_mcmc_tensor, get_short_diagnostics

mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                       samples_per_chain=1000,
                                       num_chains=1,
                                       num_cpu=1,
                                       thin=1,
                                       tune_l_per_chain=0,
                                       warmup_per_chain=100,
                                       is_float=False,
                                       isstore_to_disk=False,
                                       allow_restart=False)

input_data = get_data_dict("sp500")
V_generator = wrap_V_class_with_input_data(
    class_constructor=V_stochastic_volatility, input_data=input_data)

input_dict = {
    "v_fun": [V_generator],
    "epsilon": [0.001],
    "second_order": [False],
    "metric_name": ["unit_e"],
    "dynamic": [True],
    "windowed": [False],
    "criterion": ["gnuts"]
}

other_arguments = other_default_arguments()
tune_settings_dict = tuning_settings([], [], [], other_arguments)