Пример #1
0
def match_convergence_test(v_fun,seed,correct_mean,correct_cov):
    mcmc_meta_double = mcmc_sampler_settings_dict(mcmc_id=0, samples_per_chain=2000, num_chains=4, num_cpu=4, thin=1,
                                                  tune_l_per_chain=1000,
                                                  warmup_per_chain=1100, is_float=False, isstore_to_disk=False,
                                                  allow_restart=True, seed=seed)
    mcmc_meta_float = mcmc_sampler_settings_dict(mcmc_id=0, samples_per_chain=2000, num_chains=4, num_cpu=4, thin=1,
                                                 tune_l_per_chain=1000,
                                                 warmup_per_chain=1100, is_float=True, isstore_to_disk=False,
                                                 allow_restart=True, seed=seed)

    input_dict = {"v_fun": [v_fun], "epsilon": ["dual"], "second_order": [False], "cov": ["adapt"],
                  "metric_name": ["diag_e"], "dynamic": [True], "windowed": [False],
                  "criterion": ["gnuts"]}
    ep_dual_metadata_argument = {"name": "epsilon", "target": 0.8, "gamma": 0.05, "t_0": 10,
                                 "kappa": 0.75, "obj_fun": "accept_rate", "par_type": "fast"}

    dim = len(v_fun(precision_type="torch.DoubleTensor").flattened_tensor)
    adapt_cov_arguments = [adapt_cov_default_arguments(par_type="slow", dim=dim)]
    dual_args_list = [ep_dual_metadata_argument]
    other_arguments = other_default_arguments()

    tune_settings_dict = tuning_settings(dual_args_list, [], adapt_cov_arguments, other_arguments)

    tune_dict = tuneinput_class(input_dict).singleton_tune_dict()

    sampler_double = mcmc_sampler(tune_dict=tune_dict, mcmc_settings_dict=mcmc_meta_double,
                                  tune_settings_dict=tune_settings_dict)

    sampler_float = mcmc_sampler(tune_dict=tune_dict, mcmc_settings_dict=mcmc_meta_float,
                                 tune_settings_dict=tune_settings_dict)

    sampler_double.start_sampling()

    sampler_float.start_sampling()

    sampler_double.remove_failed_chains()

    sampler_float.remove_failed_chains()

    float_samples = sampler_float.get_samples(permuted=False)
    double_samples = sampler_double.get_samples(permuted=False)
    short_diagnostics_float = check_mean_var_stan(mcmc_samples=float_samples,
                                                  correct_mean=correct_mean.astype(numpy.float32),
                                                  correct_cov=correct_cov.astype(numpy.float32))
    short_diagnostics_double = check_mean_var_stan(mcmc_samples=double_samples,correct_mean=correct_mean,correct_cov=correct_cov)

    samples_double_cast_to_float = double_samples.astype(numpy.float32)
    # samples_float = output_float["samples"]

    # combined_samples = torch.cat([samples_double_cast_to_float,float_samples],dim=0)

    combined_samples = numpy.concatenate([samples_double_cast_to_float, float_samples], axis=0)
    short_diagnostics_combined = check_mean_var_stan(mcmc_samples=combined_samples,correct_mean=correct_mean.astype(numpy.float32),
                                                  correct_cov=correct_cov.astype(numpy.float32))

    out = {"diag_combined_mean": short_diagnostics_combined["pc_of_mean"], "diag_float_mean": short_diagnostics_float["pc_of_mean"],
           "diag_double_mean": short_diagnostics_double["pc_of_mean"],"diag_combined_cov":short_diagnostics_combined["pc_of_cov"],
           "diag_float_cov":short_diagnostics_float["pc_of_cov"],"diag_double_cov":short_diagnostics_double["pc_of_cov"]}
    return(out)
Пример #2
0
def min_ess_gnuts(v_fun, ep):
    mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                           samples_per_chain=10000,
                                           num_chains=4,
                                           num_cpu=1,
                                           thin=1,
                                           tune_l_per_chain=0,
                                           warmup_per_chain=1000,
                                           is_float=False,
                                           isstore_to_disk=False,
                                           allow_restart=True,
                                           max_num_restarts=5)
    tune_settings_dict = tuning_settings([], [], [], [])
    input_dict = {
        "v_fun": [v_fun],
        "epsilon": [ep],
        "second_order": [False],
        "metric_name": ["unit_e"],
        "dynamic": [True],
        "windowed": [None],
        "criterion": ["gnuts"]
    }

    tune_dict = tuneinput_class(input_dict).singleton_tune_dict()
    sampler = mcmc_sampler(tune_dict=tune_dict,
                           mcmc_settings_dict=mcmc_meta,
                           tune_settings_dict=tune_settings_dict)
    sampler.start_sampling()
    out = get_min_ess_and_esjds(ran_sampler=sampler)
    return (out)
Пример #3
0
def run_nn_experiment(xhmc_delta_list,input_data,v_fun,test_set,type_problem):
    out_list = [None]*(len(xhmc_delta_list)+1)
    for i in range(len(out_list)):
        model_dict = {"num_units": 50}

        v_generator = wrap_V_class_with_input_data(class_constructor=v_fun, input_data=input_data,
                                                   model_dict=model_dict)

        mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0, samples_per_chain=2000, num_chains=4, num_cpu=4, thin=1,
                                               tune_l_per_chain=1000,
                                               warmup_per_chain=1100, is_float=False, isstore_to_disk=False,
                                               allow_restart=False)


        if i<len(out_list)-1:
            input_dict = {"v_fun": [v_generator], "epsilon": ["dual"], "second_order": [False], "cov": ["adapt"],
                          "max_tree_depth": [8],"xhmc_delta":[xhmc_delta_list[i]],
                          "metric_name": ["diag_e"], "dynamic": [True], "windowed": [False], "criterion": ["xhmc"]}
        else:
            input_dict = {"v_fun": [v_generator], "epsilon": ["dual"], "second_order": [False], "cov": ["adapt"],
                          "max_tree_depth": [8],
                          "metric_name": ["diag_e"], "dynamic": [True], "windowed": [False], "criterion": ["gnuts"]}

        ep_dual_metadata_argument = {"name": "epsilon", "target": 0.9, "gamma": 0.05, "t_0": 10,
                                     "kappa": 0.75, "obj_fun": "accept_rate", "par_type": "fast"}
        #
        adapt_cov_arguments = [adapt_cov_default_arguments(par_type="slow", dim=v_generator(
            precision_type="torch.DoubleTensor").get_model_dim())]
        dual_args_list = [ep_dual_metadata_argument]
        other_arguments = other_default_arguments()
        # tune_settings_dict = tuning_settings([],[],[],[])
        tune_settings_dict = tuning_settings(dual_args_list, [], adapt_cov_arguments, other_arguments)
        tune_dict = tuneinput_class(input_dict).singleton_tune_dict()

        sampler1 = mcmc_sampler(tune_dict=tune_dict, mcmc_settings_dict=mcmc_meta,
                                tune_settings_dict=tune_settings_dict)


        diagnostics_np = sampler1.np_diagnostics()
        samples_mixed = sampler1.get_samples(permuted=True)
        te = test_error(target_dataset=test_set,v_obj=v_generator("torch.DoubleTensor"),mcmc_samples=samples_mixed,type=type_problem)

        out = {"test_error":te,"diagnostics":diagnostics_np}
        out_list.append(out)


    te_store = numpy.zeros(len(out_list))
    diagnostics_store = numpy.zeros(shape=[len(out_list)]+list(diagnostics_np.shape))
    for i in range(len(out_list)):
        te_store[i] = out_list[i]["test_error"]
        diagnostics_store[i,...] = out_list[i]["diagnostics"]

    output_store = {"test_error":te_store,"diagnostics":diagnostics_store}
    save_name = "xhmc_v_gnuts_8x8mnist.npz"
    numpy.savez(save_name,**output_store)
    return(save_name)
Пример #4
0
    def __init__(self,
                 input_object=None,
                 experiment_setting=None,
                 fun_per_sampler=None):
        # fun per sampler process sampler to extract desired quantities
        self.fun_per_sampler = fun_per_sampler
        self.experiment_setting = experiment_setting
        self.input_object = input_object
        self.tune_param_grid = self.input_object.tune_param_grid
        self.store_grid_obj = numpy.empty(self.input_object.grid_shape,
                                          dtype=object)
        self.experiment_result_grid_obj = numpy.empty(
            self.input_object.grid_shape, dtype=object)
        self.input_name_list = self.input_object.param_name_list
        #loop through each point in the grid and initiate an sampling_object
        it = numpy.nditer(self.store_grid_obj,
                          flags=['multi_index', "refs_ok"])
        cur = 0
        self.id_to_multi_index = []
        self.multi_index_to_id = {}
        while not it.finished:

            self.id_to_multi_index.append(it.multi_index)
            self.multi_index_to_id.update({it.multi_index: cur})
            tune_dict = self.tune_param_grid[it.multi_index]
            sampling_metaobj = mcmc_sampler_settings_dict(
                mcmc_id=cur,
                samples_per_chain=self.experiment_setting["chain_length"],
                num_chains=self.experiment_setting["num_chains_per_sampler"],
                num_cpu=self.experiment_setting["num_cpu_per_sampler"],
                thin=self.experiment_setting["thin"],
                tune_l_per_chain=self.experiment_setting["tune_l"],
                warmup_per_chain=self.experiment_setting["warm_up"],
                is_float=self.experiment_setting["is_float"],
                allow_restart=self.experiment_setting["allow_restart"],
                max_num_restarts=self.experiment_setting["max_num_restarts"])
            tune_settings_dict = get_tune_settings_dict(tune_dict)
            grid_pt_metadict = {
                "mcmc_id": cur,
                "started": False,
                "completed": False,
                "saved": False
            }
            self.store_grid_obj[it.multi_index] = {
                "sampler":
                mcmc_sampler(tune_dict=tune_dict,
                             mcmc_settings_dict=sampling_metaobj,
                             tune_settings_dict=tune_settings_dict),
                "metadata":
                grid_pt_metadict
            }
            self.experiment_result_grid_obj[it.multi_index] = {}
            it.iternext()
            cur += 1
Пример #5
0
def opt_experiment_ep_t(v_fun_list, ep_list, evolve_L_list, num_of_opt_steps,
                        objective, input_dict, max_L):
    # given list of v_fun, epsilon, evolve_t's, number of bayesian optimization steps, an objective function
    # and an input dict , find optimal (ep,t) by repeatedly trying different combinations , sample long chain and
    # compare performance using different objective functions
    assert objective in ("median_ess_normalized", "max_ess_normalized",
                         "min_ess_normalized", "median_ess", "max_ess",
                         "min_ess", "esjd", "esjd_normalized")

    num_grid_divides = len(ep_list)

    ep_bounds = [ep_list[0], ep_list[-1]]
    evolve_L_bounds = [evolve_L_list[0], evolve_L_list[-1]]

    chosen_init = [
        ep_list[numpy.asscalar(numpy.random.choice(num_grid_divides, 1))],
        evolve_L_list[numpy.asscalar(numpy.random.choice(num_grid_divides, 1))]
    ]

    this_opt_state = opt_state(bounds=[ep_bounds, evolve_L_bounds],
                               init=chosen_init)
    cur_ep = chosen_init[0]
    cur_evolve_L = chosen_init[1]
    input_dict.update({"epsilon": [cur_ep], "evolve_L": [cur_evolve_L]})
    for j in range(num_of_opt_steps):
        mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                               samples_per_chain=300,
                                               num_chains=4,
                                               num_cpu=4,
                                               thin=1,
                                               tune_l_per_chain=0,
                                               warmup_per_chain=150,
                                               is_float=False,
                                               isstore_to_disk=False,
                                               allow_restart=True,
                                               max_num_restarts=5)
        tune_settings_dict = tuning_settings([], [], [], [])
        input_dict.update({"epsilon": [cur_ep], "evolve_L": [cur_evolve_L]})
        tune_dict = tuneinput_class(input_dict).singleton_tune_dict()

        sampler = mcmc_sampler(tune_dict=tune_dict,
                               mcmc_settings_dict=mcmc_meta,
                               tune_settings_dict=tune_settings_dict)
        sampler.start_sampling()
        out = get_ess_and_esjds(ran_sampler=sampler)

        #L = max(1,round(cur_evolve_t/cur_ep))
        # need to use actual number of transitions
        this_opt_state.update(new_y=-out[objective])
        cur_ep = this_opt_state.X_step[-1][0]
        cur_evolve_L = round(this_opt_state.X_step[-1][1])

    return (this_opt_state)
Пример #6
0
def choose_optimal_L(v_fun, fixed_ep, L_list, windowed):

    store_median_ess = [None] * len(L_list)

    for i in range(len(L_list)):
        L = L_list[i]
        mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                               samples_per_chain=2000,
                                               num_chains=4,
                                               num_cpu=4,
                                               thin=1,
                                               tune_l_per_chain=0,
                                               warmup_per_chain=1000,
                                               is_float=False,
                                               isstore_to_disk=False,
                                               allow_restart=True,
                                               max_num_restarts=5)

        tune_settings_dict = tuning_settings([], [], [], [])
        input_dict = {
            "v_fun": [v_fun],
            "epsilon": [fixed_ep],
            "second_order": [False],
            "evolve_L": [L],
            "metric_name": ["unit_e"],
            "dynamic": [False],
            "windowed": [windowed],
            "criterion": [None]
        }

        tune_dict = tuneinput_class(input_dict).singleton_tune_dict()
        sampler = mcmc_sampler(tune_dict=tune_dict,
                               mcmc_settings_dict=mcmc_meta,
                               tune_settings_dict=tune_settings_dict)
        sampler.start_sampling()
        out = get_ess_and_esjds(ran_sampler=sampler)
        #samples = sampler.get_samples(permuted=False)

        store_median_ess[i] = out["median_ess"]

    best_median_ess = max(store_median_ess)
    best_L = L_list[numpy.argmax(store_median_ess)]

    out = {"best_median_ess": best_median_ess, "best_L": best_L}
    return (out)
Пример #7
0
    def __init__(self,input_object=None,experiment_setting=None):


        self.input_object = input_object
        self.tune_param_grid = self.input_object.tune_param_grid
        self.store_grid_obj = numpy.empty(self.input_object.grid_shape,dtype=object)
        #loop through each point in the grid and initiate an sampling_object
        it = numpy.nditer(self.store_grid_obj, flags=['multi_index',"refs_ok"])
        cur = 0
        self.id_to_multi_index = []
        self.multi_index_to_id = {}
        while not it.finished:

            self.id_to_multi_index.append(it.multi_index)
            self.multi_index_to_id.update({it.multi_index: cur})
            tune_dict = self.tune_param_grid[it.multi_index]
            sampling_metaobj = mcmc_sampler_settings(mcmc_id = cur)
            grid_pt_metadict = {"mcmc_id":cur,"started":False,"completed":False,"saved":False}
            self.store_grid_obj[it.multi_index] = {"sampler":mcmc_sampler(tune_dict,sampling_metaobj),"metadata":grid_pt_metadict}
            it.iternext()
            cur +=1
Пример #8
0
def setup_xhmc_gnuts_experiment(xhmc_delta_list,train_set,test_set,save_name,seed=1):
    xhmc_delta_list.append(0)
    output_names = ["train_error", "test_error","train_error_sd","test_error_sd","min_ess","median_ess"]
    output_store = numpy.zeros((len(xhmc_delta_list), len(output_names)))

    diagnostics_store = numpy.zeros(shape=[len(xhmc_delta_list)]+[4,13])
    prior_dict = {"name": "normal"}
    model_dict = {"num_units": 35}
    time_list = []
    for i in range(len(xhmc_delta_list)):
        start_time = time.time()
        v_fun = V_fc_model_4


        v_generator = wrap_V_class_with_input_data(class_constructor=v_fun, input_data=train_set,prior_dict=prior_dict,
                                                   model_dict=model_dict)
        mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0, samples_per_chain=2000, num_chains=4, num_cpu=4, thin=1,
                                               tune_l_per_chain=900,
                                               warmup_per_chain=1000, is_float=False, isstore_to_disk=False,
                                               allow_restart=True,seed=seed+i+1)

        if i == len(xhmc_delta_list)-1:
            input_dict = {"v_fun": [v_generator], "epsilon": ["dual"], "second_order": [False], "cov": ["adapt"],
                          "max_tree_depth": [8],
                          "metric_name": ["diag_e"], "dynamic": [True], "windowed": [False], "criterion": ["gnuts"]}
        else:
            input_dict = {"v_fun": [v_generator], "epsilon": ["dual"], "second_order": [False], "cov": ["adapt"],
                          "max_tree_depth": [8],
                          "metric_name": ["diag_e"], "dynamic": [True], "windowed": [False], "criterion": ["xhmc"],"xhmc_delta":[xhmc_delta_list[i]]}

        ep_dual_metadata_argument = {"name": "epsilon", "target": 0.9, "gamma": 0.05, "t_0": 10,
                                     "kappa": 0.75, "obj_fun": "accept_rate", "par_type": "fast"}
        # #
        adapt_cov_arguments = [adapt_cov_default_arguments(par_type="slow", dim=v_generator(
            precision_type="torch.DoubleTensor").get_model_dim())]
        dual_args_list = [ep_dual_metadata_argument]
        other_arguments = other_default_arguments()
        tune_settings_dict = tuning_settings(dual_args_list, [], adapt_cov_arguments, other_arguments)
        tune_dict = tuneinput_class(input_dict).singleton_tune_dict()

        sampler1 = mcmc_sampler(tune_dict=tune_dict, mcmc_settings_dict=mcmc_meta, tune_settings_dict=tune_settings_dict)

        sampler1.start_sampling()
        total_time = time.time() - start_time
        np_diagnostics,feature_names = sampler1.np_diagnostics()

        mcmc_samples_mixed = sampler1.get_samples(permuted=True)
        te, predicted,te_sd = test_error(test_set, v_obj=v_generator(precision_type="torch.DoubleTensor"),
                                     mcmc_samples=mcmc_samples_mixed, type="classification", memory_efficient=False)
        train_error,_,train_error_sd = test_error(train_set, v_obj=v_generator(precision_type="torch.DoubleTensor"),
                                     mcmc_samples=mcmc_samples_mixed, type="classification", memory_efficient=False)

        output_store[i,0] = train_error
        output_store[i,1] = te
        output_store[i,2] = train_error_sd
        output_store[i,3] = te_sd



        diagnostics_store[i,:,:] = np_diagnostics
        output_store[i,4] = np_diagnostics[0,10]
        output_store[i,5] = np_diagnostics[0,11]
        time_list.append(total_time)



    to_store = {"diagnostics":diagnostics_store,"output":output_store,"diagnostics_names":feature_names,
                "output_names":output_names,"seed":seed,"xhmc_delta_list":xhmc_delta_list,"prior":prior_dict["name"],
                "num_units":model_dict["num_units"],"time_list":time_list}

    numpy.savez(save_name,**to_store)


    return()
Пример #9
0
input_dict = {"v_fun":[v_generator],"epsilon":["dual"],"second_order":[False],"cov":["adapt"],"max_tree_depth":[8],
               "metric_name":["diag_e"],"dynamic":[True],"windowed":[False],"criterion":["gnuts"]}
# input_dict = {"v_fun":[v_generator],"epsilon":[0.1],"second_order":[False],"evolve_L":[10],
#               "metric_name":["unit_e"],"dynamic":[False],"windowed":[False],"criterion":[None]}
ep_dual_metadata_argument = {"name":"epsilon","target":0.8,"gamma":0.05,"t_0":10,
                         "kappa":0.75,"obj_fun":"accept_rate","par_type":"fast"}
#
adapt_cov_arguments = [adapt_cov_default_arguments(par_type="slow",dim=v_generator(precision_type="torch.DoubleTensor").get_model_dim())]
dual_args_list = [ep_dual_metadata_argument]
other_arguments = other_default_arguments()
#tune_settings_dict = tuning_settings([],[],[],[])
tune_settings_dict = tuning_settings(dual_args_list,[],adapt_cov_arguments,other_arguments)
tune_dict  = tuneinput_class(input_dict).singleton_tune_dict()

sampler1 = mcmc_sampler(tune_dict=tune_dict,mcmc_settings_dict=mcmc_meta,tune_settings_dict=tune_settings_dict)


store_name = 'student2_fc1_cp_sampler.pkl'
sampled = False
if sampled:
    sampler1 = pickle.load(open(store_name, 'rb'))
else:
    sampler1.start_sampling()
    with open(store_name, 'wb') as f:
        pickle.dump(sampler1, f)
#out = sampler1.start_sampling()


mcmc_samples_hidden_in = sampler1.get_samples_alt(prior_obj_name="hidden_in",permuted=False)
print(mcmc_samples_hidden_in["samples"].shape)
Пример #10
0
def setup_gibbs_v_joint_experiment(num_units_list,
                                   train_set,
                                   test_set,
                                   num_samples,
                                   save_name,
                                   seed=1):
    output_names = [
        "train_error", "test_error", "train_error_sd", "test_error_sd",
        "sigma_2_ess", "mean_sigma2", "median_sigma2", "min_ess", "median_ess"
    ]
    output_store = numpy.zeros((len(num_units_list), 3, len(output_names)))

    diagnostics_store = numpy.zeros(shape=[len(num_units_list), 3] + [4, 13])
    time_store = numpy.zeros(shape=[len(num_units_list), 3])
    for i in range(len(num_units_list)):
        for j in range(3):
            start_time = time.time()
            v_fun = V_fc_model_4
            model_dict = {"num_units": num_units_list[i]}

            mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                                   samples_per_chain=1000 +
                                                   num_samples,
                                                   num_chains=4,
                                                   num_cpu=4,
                                                   thin=1,
                                                   tune_l_per_chain=900,
                                                   warmup_per_chain=1000,
                                                   is_float=False,
                                                   isstore_to_disk=False,
                                                   allow_restart=True,
                                                   seed=seed + i + 1)
            if j == 2:
                v_generator = wrap_V_class_with_input_data(
                    class_constructor=V_fc_gibbs_model_1,
                    input_data=train_set,
                    model_dict=model_dict)
                v_obj = v_generator(precision_type="torch.DoubleTensor",
                                    gibbs=True)
                metric_obj = metric(name="unit_e", V_instance=v_obj)
                Ham = Hamiltonian(v_obj, metric_obj)

                init_q_point = point(V=v_obj)
                init_hyperparam = torch.abs(torch.randn(1)) + 3
                log_obj = log_class()

                dim = len(init_q_point.flattened_tensor)
                mcmc_samples_weight = torch.zeros(1, num_samples + 1000, dim)
                mcmc_samples_hyper = torch.zeros(1, num_samples + 1000, 1)
                for iter in range(num_samples + 1000):
                    print("iter {}".format(iter))
                    outq, out_hyperparam = update_param_and_hyperparam_dynamic_one_step(
                        init_q_point, init_hyperparam, Ham, 0.01, log_obj)
                    init_q_point.flattened_tensor.copy_(outq.flattened_tensor)
                    init_q_point.load_flatten()
                    init_hyperparam = out_hyperparam
                    mcmc_samples_weight[
                        0, iter, :] = outq.flattened_tensor.clone()
                    mcmc_samples_hyper[0, iter, 0] = out_hyperparam

                mcmc_samples_weight = mcmc_samples_weight[:, 1000:, :].numpy()
                mcmc_samples_hyper = mcmc_samples_hyper[:, 1000:, :].numpy()

                te, predicted, te_sd = test_error(
                    test_set,
                    v_obj=v_generator(precision_type="torch.DoubleTensor"),
                    mcmc_samples=mcmc_samples_weight[0, :, :],
                    type="classification",
                    memory_efficient=False)
                train_error, _, train_error_sd = test_error(
                    train_set,
                    v_obj=v_generator(precision_type="torch.DoubleTensor"),
                    mcmc_samples=mcmc_samples_weight[0, :, :],
                    type="classification",
                    memory_efficient=False)
                sigma2_diagnostics = diagnostics_stan(mcmc_samples_hyper)
                sigma2_ess = sigma2_diagnostics["ess"]
                posterior_mean_hidden_in_sigma2 = numpy.mean(
                    mcmc_samples_hyper)
                posterior_median_hidden_in_sigma2 = numpy.median(
                    mcmc_samples_hyper)
                weight_ess = diagnostics_stan(mcmc_samples_weight)["ess"]

                min_ess = min(sigma2_ess, min(weight_ess))
                median_ess = numpy.median([sigma2_ess] + list(weight_ess))

                output_store[i, j, 0] = train_error
                output_store[i, j, 1] = te
                output_store[i, j, 2] = train_error
                output_store[i, j, 3] = te_sd
                output_store[i, j, 4] = sigma2_ess
                output_store[i, j, 5] = posterior_mean_hidden_in_sigma2
                output_store[i, j, 6] = posterior_median_hidden_in_sigma2
                output_store[i, j, 7] = min_ess
                output_store[i, j, 8] = median_ess

            elif j == 0:
                prior_dict = {"name": "gaussian_inv_gamma_1"}
                v_generator = wrap_V_class_with_input_data(
                    class_constructor=v_fun,
                    input_data=train_set,
                    prior_dict=prior_dict,
                    model_dict=model_dict)

            elif j == 1:
                prior_dict = {"name": "gaussian_inv_gamma_2"}
                v_generator = wrap_V_class_with_input_data(
                    class_constructor=v_fun,
                    input_data=train_set,
                    prior_dict=prior_dict,
                    model_dict=model_dict)

            if j == 0 or j == 1:
                input_dict = {
                    "v_fun": [v_generator],
                    "epsilon": ["dual"],
                    "second_order": [False],
                    "max_tree_depth": [8],
                    "metric_name": ["unit_e"],
                    "dynamic": [True],
                    "windowed": [False],
                    "criterion": ["xhmc"],
                    "xhmc_delta": [0.1]
                }
                ep_dual_metadata_argument = {
                    "name": "epsilon",
                    "target": 0.9,
                    "gamma": 0.05,
                    "t_0": 10,
                    "kappa": 0.75,
                    "obj_fun": "accept_rate",
                    "par_type": "fast"
                }

                dual_args_list = [ep_dual_metadata_argument]
                other_arguments = other_default_arguments()
                tune_settings_dict = tuning_settings(dual_args_list, [], [],
                                                     other_arguments)
                tune_dict = tuneinput_class(input_dict).singleton_tune_dict()

                sampler1 = mcmc_sampler(tune_dict=tune_dict,
                                        mcmc_settings_dict=mcmc_meta,
                                        tune_settings_dict=tune_settings_dict)

                sampler1.start_sampling()

                np_diagnostics, feature_names = sampler1.np_diagnostics()

                mcmc_samples_hidden_in = sampler1.get_samples_alt(
                    prior_obj_name="hidden_in", permuted=False)
                samples = mcmc_samples_hidden_in["samples"]
                hidden_in_sigma2_indices = mcmc_samples_hidden_in[
                    "indices_dict"]["sigma2"]
                sigma2_diagnostics = diagnostics_stan(
                    samples[:, :, hidden_in_sigma2_indices])
                sigma2_ess = sigma2_diagnostics["ess"]

                posterior_mean_hidden_in_sigma2 = numpy.mean(
                    samples[:, :, hidden_in_sigma2_indices].reshape(
                        -1, len(hidden_in_sigma2_indices)),
                    axis=0)
                posterior_median_hidden_in_sigma2 = numpy.median(
                    samples[:, :, hidden_in_sigma2_indices].reshape(
                        -1, len(hidden_in_sigma2_indices)),
                    axis=0)

                mcmc_samples_mixed = sampler1.get_samples(permuted=True)
                te, predicted, te_sd = test_error(
                    test_set,
                    v_obj=v_generator(precision_type="torch.DoubleTensor"),
                    mcmc_samples=mcmc_samples_mixed,
                    type="classification",
                    memory_efficient=False)
                train_error, _, train_error_sd = test_error(
                    train_set,
                    v_obj=v_generator(precision_type="torch.DoubleTensor"),
                    mcmc_samples=mcmc_samples_mixed,
                    type="classification",
                    memory_efficient=False)

                output_store[i, j, 0] = train_error
                output_store[i, j, 1] = te
                output_store[i, j, 2] = train_error
                output_store[i, j, 3] = te_sd
                output_store[i, j, 4] = sigma2_ess
                output_store[i, j, 5] = posterior_mean_hidden_in_sigma2
                output_store[i, j, 6] = posterior_median_hidden_in_sigma2

                diagnostics_store[i, j, :, :] = np_diagnostics
                output_store[i, j, 7] = np_diagnostics[0, 10]
                output_store[i, j, 8] = np_diagnostics[0, 11]

            total_time = time.time() - start_time()
            time_store[i, j] = total_time

    to_store = {
        "diagnostics": diagnostics_store,
        "output": output_store,
        "diagnostics_names": feature_names,
        "output_names": output_names,
        "seed": seed,
        "num_units_list": num_units_list,
        "time_store": time_store
    }

    numpy.savez(save_name, **to_store)

    return ()
ideal_diag_cov = 1/(torch.from_numpy(correct_diag_cov).type("torch.FloatTensor"))

input_dict = {"v_fun":[V_pima_indian_logit],"epsilon":[0.1],"second_order":[False],
              "evolve_L":[10],"metric_name":["diag_e"],"dynamic":[False],"windowed":[False],"criterion":[None]}
input_dict2 = {"v_fun":[V_pima_indian_logit],"epsilon":[0.1],"second_order":[False],
              "evolve_L":[10],"metric_name":["diag_e"],"dynamic":[False],"windowed":[True],"criterion":[None]}

input_dict3 = {"v_fun":[V_pima_indian_logit],"epsilon":[0.1],"second_order":[False],"cov":[ideal_diag_cov],
              "evolve_L":[10],"metric_name":["diag_e"],"dynamic":[False],"windowed":[False],"criterion":[None]}
tune_settings_dict = tuning_settings([],[],[],[])

tune_dict  = tuneinput_class(input_dict).singleton_tune_dict()
tune_dict2  = tuneinput_class(input_dict2).singleton_tune_dict()
tune_dict3  = tuneinput_class(input_dict3).singleton_tune_dict()

sampler1 = mcmc_sampler(tune_dict=tune_dict,mcmc_settings_dict=mcmc_meta,tune_settings_dict=tune_settings_dict)
sampler2 = mcmc_sampler(tune_dict=tune_dict2,mcmc_settings_dict=mcmc_meta,tune_settings_dict=tune_settings_dict)
sampler3 = mcmc_sampler(tune_dict=tune_dict3,mcmc_settings_dict=mcmc_meta,tune_settings_dict=tune_settings_dict)

sampler1.start_sampling()
sampler2.start_sampling()
sampler3.start_sampling()


mcmc_samples1 = sampler1.get_samples(permuted=False)

out = check_mean_var_stan(mcmc_samples=mcmc_samples1,correct_mean=correct_mean,correct_cov=correct_cov,diag_only=False)
mean_check,cov_check = out["mcmc_mean"],out["mcmc_Cov"]
pc_mean,pc_cov = out["pc_of_mean"],out["pc_of_cov"]
print(mean_check)
print(cov_check)
Пример #12
0
    "evolve_L": [10],
    "metric_name": ["unit_e"],
    "dynamic": [False],
    "windowed": [False],
    "criterion": [None]
}

tune_dict = tuneinput_class(input_dict).singleton_tune_dict()
#print(tuneable_obj.tune_param_grid.size)
#print(tuneable_obj.singleton_tune_dict())

#tune_dict = tuneable_obj.tune_param_grid
#print(tuneable_obj.tune_param_grid.shape)
#exit()
#initialization_obj = initialization(v_obj1)
sampler1 = mcmc_sampler(tune_dict, mcmc_meta1)

#sampler1.prepare_chains()

out = sampler1.start_sampling()
print(out[0])
print(out[1])

print(sampler1.store_chains[0]["chain_obj"].store_samples)
exit()
#print(sampler1.store_chains[0]["chain_obj"].run())

out1 = sampler1.run(0)
#out2= sampler1.run(1)

print(out1)
Пример #13
0
def generate_q_list(v_fun,num_of_pts):
    # extract number of (q,p) points given v_fun
    mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0, samples_per_chain=200, num_chains=4, num_cpu=4, thin=1,
                                           tune_l_per_chain=100,
                                           warmup_per_chain=110, is_float=False, isstore_to_disk=False,allow_restart=True)
    input_dict = {"v_fun": [v_fun], "epsilon": ["dual"], "second_order": [False],
                  "metric_name": ["unit_e"], "dynamic": [True], "windowed": [False],"max_tree_depth":[6],
                  "criterion": ["xhmc"],"xhmc_delta":[0.1]}

    ep_dual_metadata_argument = {"name": "epsilon", "target": 0.65, "gamma": 0.05, "t_0": 10,
                                 "kappa": 0.75, "obj_fun": "accept_rate", "par_type": "fast"}

    dim = len(v_fun(precision_type="torch.DoubleTensor").flattened_tensor)
    #adapt_cov_arguments = [adapt_cov_default_arguments(par_type="slow", dim=dim)]
    dual_args_list = [ep_dual_metadata_argument]
    other_arguments = other_default_arguments()

    tune_settings_dict = tuning_settings(dual_args_list, [], [], other_arguments)

    tune_dict = tuneinput_class(input_dict).singleton_tune_dict()

    sampler1 = mcmc_sampler(tune_dict=tune_dict, mcmc_settings_dict=mcmc_meta, tune_settings_dict=tune_settings_dict)

    out = sampler1.start_sampling()
    sampler1.remove_failed_chains()

    print("num chains removed {}".format(sampler1.metadata.num_chains_removed))
    print("num restarts {}".format(sampler1.metadata.num_restarts))

    samples = sampler1.get_samples(permuted=True)

    num_mcmc_samples = samples.shape[0]
    indices = numpy.random.choice(a=num_mcmc_samples,size=num_of_pts,replace=False)

    chosen_samples = samples[indices,:]
    list_q_double = [None]*num_of_pts
    #list_p_double = [None]*num_of_pts

    for i in range(num_of_pts):
        q_point = point(V=v_fun(precision_type="torch.DoubleTensor"))
        flattened_tensor = torch.from_numpy(chosen_samples[i,:]).type("torch.DoubleTensor")
        q_point.flattened_tensor.copy_(flattened_tensor)
        q_point.load_flatten()
        #p_point = point(list_tensor=q_point.list_tensor,pointtype="p",need_flatten=q_point.need_flatten)
        # p_point.flattened_tensor.normal_()
        # p_point.load_flatten()
        list_q_double[i] = q_point
        #list_p_double[i] = p_point

    list_q_float = [None] * num_of_pts
    #list_p_float = [None] * num_of_pts

    for i in range(num_of_pts):
        q_point = point(V=v_fun(precision_type="torch.FloatTensor"))
        flattened_tensor = torch.from_numpy(chosen_samples[i, :]).type("torch.FloatTensor")
        q_point.flattened_tensor.copy_(flattened_tensor)
        q_point.load_flatten()
        # p_point = point(list_tensor=q_point.list_tensor,pointtype="p",need_flatten=q_point.need_flatten)
        # p_point.flattened_tensor.normal_()
        # p_point.load_flatten()
        list_q_float[i] = q_point


    out = {"list_q_double":list_q_double,"list_q_float":list_q_float}
    return(out)
Пример #14
0
    "gamma": 0.05,
    "t_0": 10,
    "kappa": 0.75,
    "obj_fun": "accept_rate",
    "par_type": "fast"
}

dim = len(v_fun(precision_type="torch.DoubleTensor").flattened_tensor)
adapt_cov_arguments = [adapt_cov_default_arguments(par_type="slow", dim=dim)]
dual_args_list = [ep_dual_metadata_argument]
other_arguments = other_default_arguments()

tune_settings_dict = tuning_settings(dual_args_list, [], adapt_cov_arguments,
                                     other_arguments)

tune_dict = tuneinput_class(input_dict).singleton_tune_dict()

sampler_double = mcmc_sampler(tune_dict=tune_dict,
                              mcmc_settings_dict=mcmc_meta_double,
                              tune_settings_dict=tune_settings_dict)

sampler_double.start_sampling()

double_samples = sampler_double.get_samples(permuted=False)

print(double_samples[0, 100, 2])
print(double_samples[1, 100, 2])

short_diagnostics_double = get_short_diagnostics(double_samples)

print(short_diagnostics_double)