Esempio n. 1
0
def min_ess_gnuts(v_fun, ep):
    mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                           samples_per_chain=10000,
                                           num_chains=4,
                                           num_cpu=1,
                                           thin=1,
                                           tune_l_per_chain=0,
                                           warmup_per_chain=1000,
                                           is_float=False,
                                           isstore_to_disk=False,
                                           allow_restart=True,
                                           max_num_restarts=5)
    tune_settings_dict = tuning_settings([], [], [], [])
    input_dict = {
        "v_fun": [v_fun],
        "epsilon": [ep],
        "second_order": [False],
        "metric_name": ["unit_e"],
        "dynamic": [True],
        "windowed": [None],
        "criterion": ["gnuts"]
    }

    tune_dict = tuneinput_class(input_dict).singleton_tune_dict()
    sampler = mcmc_sampler(tune_dict=tune_dict,
                           mcmc_settings_dict=mcmc_meta,
                           tune_settings_dict=tune_settings_dict)
    sampler.start_sampling()
    out = get_min_ess_and_esjds(ran_sampler=sampler)
    return (out)
Esempio n. 2
0
def match_convergence_test(v_fun,seed,correct_mean,correct_cov):
    mcmc_meta_double = mcmc_sampler_settings_dict(mcmc_id=0, samples_per_chain=2000, num_chains=4, num_cpu=4, thin=1,
                                                  tune_l_per_chain=1000,
                                                  warmup_per_chain=1100, is_float=False, isstore_to_disk=False,
                                                  allow_restart=True, seed=seed)
    mcmc_meta_float = mcmc_sampler_settings_dict(mcmc_id=0, samples_per_chain=2000, num_chains=4, num_cpu=4, thin=1,
                                                 tune_l_per_chain=1000,
                                                 warmup_per_chain=1100, is_float=True, isstore_to_disk=False,
                                                 allow_restart=True, seed=seed)

    input_dict = {"v_fun": [v_fun], "epsilon": ["dual"], "second_order": [False], "cov": ["adapt"],
                  "metric_name": ["diag_e"], "dynamic": [True], "windowed": [False],
                  "criterion": ["gnuts"]}
    ep_dual_metadata_argument = {"name": "epsilon", "target": 0.8, "gamma": 0.05, "t_0": 10,
                                 "kappa": 0.75, "obj_fun": "accept_rate", "par_type": "fast"}

    dim = len(v_fun(precision_type="torch.DoubleTensor").flattened_tensor)
    adapt_cov_arguments = [adapt_cov_default_arguments(par_type="slow", dim=dim)]
    dual_args_list = [ep_dual_metadata_argument]
    other_arguments = other_default_arguments()

    tune_settings_dict = tuning_settings(dual_args_list, [], adapt_cov_arguments, other_arguments)

    tune_dict = tuneinput_class(input_dict).singleton_tune_dict()

    sampler_double = mcmc_sampler(tune_dict=tune_dict, mcmc_settings_dict=mcmc_meta_double,
                                  tune_settings_dict=tune_settings_dict)

    sampler_float = mcmc_sampler(tune_dict=tune_dict, mcmc_settings_dict=mcmc_meta_float,
                                 tune_settings_dict=tune_settings_dict)

    sampler_double.start_sampling()

    sampler_float.start_sampling()

    sampler_double.remove_failed_chains()

    sampler_float.remove_failed_chains()

    float_samples = sampler_float.get_samples(permuted=False)
    double_samples = sampler_double.get_samples(permuted=False)
    short_diagnostics_float = check_mean_var_stan(mcmc_samples=float_samples,
                                                  correct_mean=correct_mean.astype(numpy.float32),
                                                  correct_cov=correct_cov.astype(numpy.float32))
    short_diagnostics_double = check_mean_var_stan(mcmc_samples=double_samples,correct_mean=correct_mean,correct_cov=correct_cov)

    samples_double_cast_to_float = double_samples.astype(numpy.float32)
    # samples_float = output_float["samples"]

    # combined_samples = torch.cat([samples_double_cast_to_float,float_samples],dim=0)

    combined_samples = numpy.concatenate([samples_double_cast_to_float, float_samples], axis=0)
    short_diagnostics_combined = check_mean_var_stan(mcmc_samples=combined_samples,correct_mean=correct_mean.astype(numpy.float32),
                                                  correct_cov=correct_cov.astype(numpy.float32))

    out = {"diag_combined_mean": short_diagnostics_combined["pc_of_mean"], "diag_float_mean": short_diagnostics_float["pc_of_mean"],
           "diag_double_mean": short_diagnostics_double["pc_of_mean"],"diag_combined_cov":short_diagnostics_combined["pc_of_cov"],
           "diag_float_cov":short_diagnostics_float["pc_of_cov"],"diag_double_cov":short_diagnostics_double["pc_of_cov"]}
    return(out)
Esempio n. 3
0
def run_nn_experiment(xhmc_delta_list,input_data,v_fun,test_set,type_problem):
    out_list = [None]*(len(xhmc_delta_list)+1)
    for i in range(len(out_list)):
        model_dict = {"num_units": 50}

        v_generator = wrap_V_class_with_input_data(class_constructor=v_fun, input_data=input_data,
                                                   model_dict=model_dict)

        mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0, samples_per_chain=2000, num_chains=4, num_cpu=4, thin=1,
                                               tune_l_per_chain=1000,
                                               warmup_per_chain=1100, is_float=False, isstore_to_disk=False,
                                               allow_restart=False)


        if i<len(out_list)-1:
            input_dict = {"v_fun": [v_generator], "epsilon": ["dual"], "second_order": [False], "cov": ["adapt"],
                          "max_tree_depth": [8],"xhmc_delta":[xhmc_delta_list[i]],
                          "metric_name": ["diag_e"], "dynamic": [True], "windowed": [False], "criterion": ["xhmc"]}
        else:
            input_dict = {"v_fun": [v_generator], "epsilon": ["dual"], "second_order": [False], "cov": ["adapt"],
                          "max_tree_depth": [8],
                          "metric_name": ["diag_e"], "dynamic": [True], "windowed": [False], "criterion": ["gnuts"]}

        ep_dual_metadata_argument = {"name": "epsilon", "target": 0.9, "gamma": 0.05, "t_0": 10,
                                     "kappa": 0.75, "obj_fun": "accept_rate", "par_type": "fast"}
        #
        adapt_cov_arguments = [adapt_cov_default_arguments(par_type="slow", dim=v_generator(
            precision_type="torch.DoubleTensor").get_model_dim())]
        dual_args_list = [ep_dual_metadata_argument]
        other_arguments = other_default_arguments()
        # tune_settings_dict = tuning_settings([],[],[],[])
        tune_settings_dict = tuning_settings(dual_args_list, [], adapt_cov_arguments, other_arguments)
        tune_dict = tuneinput_class(input_dict).singleton_tune_dict()

        sampler1 = mcmc_sampler(tune_dict=tune_dict, mcmc_settings_dict=mcmc_meta,
                                tune_settings_dict=tune_settings_dict)


        diagnostics_np = sampler1.np_diagnostics()
        samples_mixed = sampler1.get_samples(permuted=True)
        te = test_error(target_dataset=test_set,v_obj=v_generator("torch.DoubleTensor"),mcmc_samples=samples_mixed,type=type_problem)

        out = {"test_error":te,"diagnostics":diagnostics_np}
        out_list.append(out)


    te_store = numpy.zeros(len(out_list))
    diagnostics_store = numpy.zeros(shape=[len(out_list)]+list(diagnostics_np.shape))
    for i in range(len(out_list)):
        te_store[i] = out_list[i]["test_error"]
        diagnostics_store[i,...] = out_list[i]["diagnostics"]

    output_store = {"test_error":te_store,"diagnostics":diagnostics_store}
    save_name = "xhmc_v_gnuts_8x8mnist.npz"
    numpy.savez(save_name,**output_store)
    return(save_name)
Esempio n. 4
0
def opt_experiment_ep_t(v_fun_list, ep_list, evolve_L_list, num_of_opt_steps,
                        objective, input_dict, max_L):
    # given list of v_fun, epsilon, evolve_t's, number of bayesian optimization steps, an objective function
    # and an input dict , find optimal (ep,t) by repeatedly trying different combinations , sample long chain and
    # compare performance using different objective functions
    assert objective in ("median_ess_normalized", "max_ess_normalized",
                         "min_ess_normalized", "median_ess", "max_ess",
                         "min_ess", "esjd", "esjd_normalized")

    num_grid_divides = len(ep_list)

    ep_bounds = [ep_list[0], ep_list[-1]]
    evolve_L_bounds = [evolve_L_list[0], evolve_L_list[-1]]

    chosen_init = [
        ep_list[numpy.asscalar(numpy.random.choice(num_grid_divides, 1))],
        evolve_L_list[numpy.asscalar(numpy.random.choice(num_grid_divides, 1))]
    ]

    this_opt_state = opt_state(bounds=[ep_bounds, evolve_L_bounds],
                               init=chosen_init)
    cur_ep = chosen_init[0]
    cur_evolve_L = chosen_init[1]
    input_dict.update({"epsilon": [cur_ep], "evolve_L": [cur_evolve_L]})
    for j in range(num_of_opt_steps):
        mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                               samples_per_chain=300,
                                               num_chains=4,
                                               num_cpu=4,
                                               thin=1,
                                               tune_l_per_chain=0,
                                               warmup_per_chain=150,
                                               is_float=False,
                                               isstore_to_disk=False,
                                               allow_restart=True,
                                               max_num_restarts=5)
        tune_settings_dict = tuning_settings([], [], [], [])
        input_dict.update({"epsilon": [cur_ep], "evolve_L": [cur_evolve_L]})
        tune_dict = tuneinput_class(input_dict).singleton_tune_dict()

        sampler = mcmc_sampler(tune_dict=tune_dict,
                               mcmc_settings_dict=mcmc_meta,
                               tune_settings_dict=tune_settings_dict)
        sampler.start_sampling()
        out = get_ess_and_esjds(ran_sampler=sampler)

        #L = max(1,round(cur_evolve_t/cur_ep))
        # need to use actual number of transitions
        this_opt_state.update(new_y=-out[objective])
        cur_ep = this_opt_state.X_step[-1][0]
        cur_evolve_L = round(this_opt_state.X_step[-1][1])

    return (this_opt_state)
Esempio n. 5
0
def choose_optimal_L(v_fun, fixed_ep, L_list, windowed):

    store_median_ess = [None] * len(L_list)

    for i in range(len(L_list)):
        L = L_list[i]
        mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                               samples_per_chain=2000,
                                               num_chains=4,
                                               num_cpu=4,
                                               thin=1,
                                               tune_l_per_chain=0,
                                               warmup_per_chain=1000,
                                               is_float=False,
                                               isstore_to_disk=False,
                                               allow_restart=True,
                                               max_num_restarts=5)

        tune_settings_dict = tuning_settings([], [], [], [])
        input_dict = {
            "v_fun": [v_fun],
            "epsilon": [fixed_ep],
            "second_order": [False],
            "evolve_L": [L],
            "metric_name": ["unit_e"],
            "dynamic": [False],
            "windowed": [windowed],
            "criterion": [None]
        }

        tune_dict = tuneinput_class(input_dict).singleton_tune_dict()
        sampler = mcmc_sampler(tune_dict=tune_dict,
                               mcmc_settings_dict=mcmc_meta,
                               tune_settings_dict=tune_settings_dict)
        sampler.start_sampling()
        out = get_ess_and_esjds(ran_sampler=sampler)
        #samples = sampler.get_samples(permuted=False)

        store_median_ess[i] = out["median_ess"]

    best_median_ess = max(store_median_ess)
    best_L = L_list[numpy.argmax(store_median_ess)]

    out = {"best_median_ess": best_median_ess, "best_L": best_L}
    return (out)
Esempio n. 6
0
def setup_xhmc_gnuts_experiment(xhmc_delta_list,train_set,test_set,save_name,seed=1):
    xhmc_delta_list.append(0)
    output_names = ["train_error", "test_error","train_error_sd","test_error_sd","min_ess","median_ess"]
    output_store = numpy.zeros((len(xhmc_delta_list), len(output_names)))

    diagnostics_store = numpy.zeros(shape=[len(xhmc_delta_list)]+[4,13])
    prior_dict = {"name": "normal"}
    model_dict = {"num_units": 35}
    time_list = []
    for i in range(len(xhmc_delta_list)):
        start_time = time.time()
        v_fun = V_fc_model_4


        v_generator = wrap_V_class_with_input_data(class_constructor=v_fun, input_data=train_set,prior_dict=prior_dict,
                                                   model_dict=model_dict)
        mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0, samples_per_chain=2000, num_chains=4, num_cpu=4, thin=1,
                                               tune_l_per_chain=900,
                                               warmup_per_chain=1000, is_float=False, isstore_to_disk=False,
                                               allow_restart=True,seed=seed+i+1)

        if i == len(xhmc_delta_list)-1:
            input_dict = {"v_fun": [v_generator], "epsilon": ["dual"], "second_order": [False], "cov": ["adapt"],
                          "max_tree_depth": [8],
                          "metric_name": ["diag_e"], "dynamic": [True], "windowed": [False], "criterion": ["gnuts"]}
        else:
            input_dict = {"v_fun": [v_generator], "epsilon": ["dual"], "second_order": [False], "cov": ["adapt"],
                          "max_tree_depth": [8],
                          "metric_name": ["diag_e"], "dynamic": [True], "windowed": [False], "criterion": ["xhmc"],"xhmc_delta":[xhmc_delta_list[i]]}

        ep_dual_metadata_argument = {"name": "epsilon", "target": 0.9, "gamma": 0.05, "t_0": 10,
                                     "kappa": 0.75, "obj_fun": "accept_rate", "par_type": "fast"}
        # #
        adapt_cov_arguments = [adapt_cov_default_arguments(par_type="slow", dim=v_generator(
            precision_type="torch.DoubleTensor").get_model_dim())]
        dual_args_list = [ep_dual_metadata_argument]
        other_arguments = other_default_arguments()
        tune_settings_dict = tuning_settings(dual_args_list, [], adapt_cov_arguments, other_arguments)
        tune_dict = tuneinput_class(input_dict).singleton_tune_dict()

        sampler1 = mcmc_sampler(tune_dict=tune_dict, mcmc_settings_dict=mcmc_meta, tune_settings_dict=tune_settings_dict)

        sampler1.start_sampling()
        total_time = time.time() - start_time
        np_diagnostics,feature_names = sampler1.np_diagnostics()

        mcmc_samples_mixed = sampler1.get_samples(permuted=True)
        te, predicted,te_sd = test_error(test_set, v_obj=v_generator(precision_type="torch.DoubleTensor"),
                                     mcmc_samples=mcmc_samples_mixed, type="classification", memory_efficient=False)
        train_error,_,train_error_sd = test_error(train_set, v_obj=v_generator(precision_type="torch.DoubleTensor"),
                                     mcmc_samples=mcmc_samples_mixed, type="classification", memory_efficient=False)

        output_store[i,0] = train_error
        output_store[i,1] = te
        output_store[i,2] = train_error_sd
        output_store[i,3] = te_sd



        diagnostics_store[i,:,:] = np_diagnostics
        output_store[i,4] = np_diagnostics[0,10]
        output_store[i,5] = np_diagnostics[0,11]
        time_list.append(total_time)



    to_store = {"diagnostics":diagnostics_store,"output":output_store,"diagnostics_names":feature_names,
                "output_names":output_names,"seed":seed,"xhmc_delta_list":xhmc_delta_list,"prior":prior_dict["name"],
                "num_units":model_dict["num_units"],"time_list":time_list}

    numpy.savez(save_name,**to_store)


    return()
Esempio n. 7
0
# input_dict = {"v_fun":[V_pima_inidan_logit],"epsilon":[0.1],"second_order":[False],
#                "evolve_L":[10],"metric_name":["unit_e"],"dynamic":[False],"windowed":[False],"criterion":[None]}

input_dict = {"v_fun":[v_generator],"epsilon":["dual"],"second_order":[False],"cov":["adapt"],"max_tree_depth":[8],
               "metric_name":["diag_e"],"dynamic":[True],"windowed":[False],"criterion":["gnuts"]}
# input_dict = {"v_fun":[v_generator],"epsilon":[0.1],"second_order":[False],"evolve_L":[10],
#               "metric_name":["unit_e"],"dynamic":[False],"windowed":[False],"criterion":[None]}
ep_dual_metadata_argument = {"name":"epsilon","target":0.8,"gamma":0.05,"t_0":10,
                         "kappa":0.75,"obj_fun":"accept_rate","par_type":"fast"}
#
adapt_cov_arguments = [adapt_cov_default_arguments(par_type="slow",dim=v_generator(precision_type="torch.DoubleTensor").get_model_dim())]
dual_args_list = [ep_dual_metadata_argument]
other_arguments = other_default_arguments()
#tune_settings_dict = tuning_settings([],[],[],[])
tune_settings_dict = tuning_settings(dual_args_list,[],adapt_cov_arguments,other_arguments)
tune_dict  = tuneinput_class(input_dict).singleton_tune_dict()

sampler1 = mcmc_sampler(tune_dict=tune_dict,mcmc_settings_dict=mcmc_meta,tune_settings_dict=tune_settings_dict)


store_name = 'student2_fc1_cp_sampler.pkl'
sampled = False
if sampled:
    sampler1 = pickle.load(open(store_name, 'rb'))
else:
    sampler1.start_sampling()
    with open(store_name, 'wb') as f:
        pickle.dump(sampler1, f)
#out = sampler1.start_sampling()

Esempio n. 8
0
def setup_gibbs_v_joint_experiment(num_units_list,
                                   train_set,
                                   test_set,
                                   num_samples,
                                   save_name,
                                   seed=1):
    output_names = [
        "train_error", "test_error", "train_error_sd", "test_error_sd",
        "sigma_2_ess", "mean_sigma2", "median_sigma2", "min_ess", "median_ess"
    ]
    output_store = numpy.zeros((len(num_units_list), 3, len(output_names)))

    diagnostics_store = numpy.zeros(shape=[len(num_units_list), 3] + [4, 13])
    time_store = numpy.zeros(shape=[len(num_units_list), 3])
    for i in range(len(num_units_list)):
        for j in range(3):
            start_time = time.time()
            v_fun = V_fc_model_4
            model_dict = {"num_units": num_units_list[i]}

            mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,
                                                   samples_per_chain=1000 +
                                                   num_samples,
                                                   num_chains=4,
                                                   num_cpu=4,
                                                   thin=1,
                                                   tune_l_per_chain=900,
                                                   warmup_per_chain=1000,
                                                   is_float=False,
                                                   isstore_to_disk=False,
                                                   allow_restart=True,
                                                   seed=seed + i + 1)
            if j == 2:
                v_generator = wrap_V_class_with_input_data(
                    class_constructor=V_fc_gibbs_model_1,
                    input_data=train_set,
                    model_dict=model_dict)
                v_obj = v_generator(precision_type="torch.DoubleTensor",
                                    gibbs=True)
                metric_obj = metric(name="unit_e", V_instance=v_obj)
                Ham = Hamiltonian(v_obj, metric_obj)

                init_q_point = point(V=v_obj)
                init_hyperparam = torch.abs(torch.randn(1)) + 3
                log_obj = log_class()

                dim = len(init_q_point.flattened_tensor)
                mcmc_samples_weight = torch.zeros(1, num_samples + 1000, dim)
                mcmc_samples_hyper = torch.zeros(1, num_samples + 1000, 1)
                for iter in range(num_samples + 1000):
                    print("iter {}".format(iter))
                    outq, out_hyperparam = update_param_and_hyperparam_dynamic_one_step(
                        init_q_point, init_hyperparam, Ham, 0.01, log_obj)
                    init_q_point.flattened_tensor.copy_(outq.flattened_tensor)
                    init_q_point.load_flatten()
                    init_hyperparam = out_hyperparam
                    mcmc_samples_weight[
                        0, iter, :] = outq.flattened_tensor.clone()
                    mcmc_samples_hyper[0, iter, 0] = out_hyperparam

                mcmc_samples_weight = mcmc_samples_weight[:, 1000:, :].numpy()
                mcmc_samples_hyper = mcmc_samples_hyper[:, 1000:, :].numpy()

                te, predicted, te_sd = test_error(
                    test_set,
                    v_obj=v_generator(precision_type="torch.DoubleTensor"),
                    mcmc_samples=mcmc_samples_weight[0, :, :],
                    type="classification",
                    memory_efficient=False)
                train_error, _, train_error_sd = test_error(
                    train_set,
                    v_obj=v_generator(precision_type="torch.DoubleTensor"),
                    mcmc_samples=mcmc_samples_weight[0, :, :],
                    type="classification",
                    memory_efficient=False)
                sigma2_diagnostics = diagnostics_stan(mcmc_samples_hyper)
                sigma2_ess = sigma2_diagnostics["ess"]
                posterior_mean_hidden_in_sigma2 = numpy.mean(
                    mcmc_samples_hyper)
                posterior_median_hidden_in_sigma2 = numpy.median(
                    mcmc_samples_hyper)
                weight_ess = diagnostics_stan(mcmc_samples_weight)["ess"]

                min_ess = min(sigma2_ess, min(weight_ess))
                median_ess = numpy.median([sigma2_ess] + list(weight_ess))

                output_store[i, j, 0] = train_error
                output_store[i, j, 1] = te
                output_store[i, j, 2] = train_error
                output_store[i, j, 3] = te_sd
                output_store[i, j, 4] = sigma2_ess
                output_store[i, j, 5] = posterior_mean_hidden_in_sigma2
                output_store[i, j, 6] = posterior_median_hidden_in_sigma2
                output_store[i, j, 7] = min_ess
                output_store[i, j, 8] = median_ess

            elif j == 0:
                prior_dict = {"name": "gaussian_inv_gamma_1"}
                v_generator = wrap_V_class_with_input_data(
                    class_constructor=v_fun,
                    input_data=train_set,
                    prior_dict=prior_dict,
                    model_dict=model_dict)

            elif j == 1:
                prior_dict = {"name": "gaussian_inv_gamma_2"}
                v_generator = wrap_V_class_with_input_data(
                    class_constructor=v_fun,
                    input_data=train_set,
                    prior_dict=prior_dict,
                    model_dict=model_dict)

            if j == 0 or j == 1:
                input_dict = {
                    "v_fun": [v_generator],
                    "epsilon": ["dual"],
                    "second_order": [False],
                    "max_tree_depth": [8],
                    "metric_name": ["unit_e"],
                    "dynamic": [True],
                    "windowed": [False],
                    "criterion": ["xhmc"],
                    "xhmc_delta": [0.1]
                }
                ep_dual_metadata_argument = {
                    "name": "epsilon",
                    "target": 0.9,
                    "gamma": 0.05,
                    "t_0": 10,
                    "kappa": 0.75,
                    "obj_fun": "accept_rate",
                    "par_type": "fast"
                }

                dual_args_list = [ep_dual_metadata_argument]
                other_arguments = other_default_arguments()
                tune_settings_dict = tuning_settings(dual_args_list, [], [],
                                                     other_arguments)
                tune_dict = tuneinput_class(input_dict).singleton_tune_dict()

                sampler1 = mcmc_sampler(tune_dict=tune_dict,
                                        mcmc_settings_dict=mcmc_meta,
                                        tune_settings_dict=tune_settings_dict)

                sampler1.start_sampling()

                np_diagnostics, feature_names = sampler1.np_diagnostics()

                mcmc_samples_hidden_in = sampler1.get_samples_alt(
                    prior_obj_name="hidden_in", permuted=False)
                samples = mcmc_samples_hidden_in["samples"]
                hidden_in_sigma2_indices = mcmc_samples_hidden_in[
                    "indices_dict"]["sigma2"]
                sigma2_diagnostics = diagnostics_stan(
                    samples[:, :, hidden_in_sigma2_indices])
                sigma2_ess = sigma2_diagnostics["ess"]

                posterior_mean_hidden_in_sigma2 = numpy.mean(
                    samples[:, :, hidden_in_sigma2_indices].reshape(
                        -1, len(hidden_in_sigma2_indices)),
                    axis=0)
                posterior_median_hidden_in_sigma2 = numpy.median(
                    samples[:, :, hidden_in_sigma2_indices].reshape(
                        -1, len(hidden_in_sigma2_indices)),
                    axis=0)

                mcmc_samples_mixed = sampler1.get_samples(permuted=True)
                te, predicted, te_sd = test_error(
                    test_set,
                    v_obj=v_generator(precision_type="torch.DoubleTensor"),
                    mcmc_samples=mcmc_samples_mixed,
                    type="classification",
                    memory_efficient=False)
                train_error, _, train_error_sd = test_error(
                    train_set,
                    v_obj=v_generator(precision_type="torch.DoubleTensor"),
                    mcmc_samples=mcmc_samples_mixed,
                    type="classification",
                    memory_efficient=False)

                output_store[i, j, 0] = train_error
                output_store[i, j, 1] = te
                output_store[i, j, 2] = train_error
                output_store[i, j, 3] = te_sd
                output_store[i, j, 4] = sigma2_ess
                output_store[i, j, 5] = posterior_mean_hidden_in_sigma2
                output_store[i, j, 6] = posterior_median_hidden_in_sigma2

                diagnostics_store[i, j, :, :] = np_diagnostics
                output_store[i, j, 7] = np_diagnostics[0, 10]
                output_store[i, j, 8] = np_diagnostics[0, 11]

            total_time = time.time() - start_time()
            time_store[i, j] = total_time

    to_store = {
        "diagnostics": diagnostics_store,
        "output": output_store,
        "diagnostics_names": feature_names,
        "output_names": output_names,
        "seed": seed,
        "num_units_list": num_units_list,
        "time_store": time_store
    }

    numpy.savez(save_name, **to_store)

    return ()
correct_mean = correct["correct_mean"]
correct_cov = correct["correct_cov"]
correct_diag_cov = correct_cov.diagonal()

ideal_diag_cov = 1/(torch.from_numpy(correct_diag_cov).type("torch.FloatTensor"))

input_dict = {"v_fun":[V_pima_indian_logit],"epsilon":[0.1],"second_order":[False],
              "evolve_L":[10],"metric_name":["diag_e"],"dynamic":[False],"windowed":[False],"criterion":[None]}
input_dict2 = {"v_fun":[V_pima_indian_logit],"epsilon":[0.1],"second_order":[False],
              "evolve_L":[10],"metric_name":["diag_e"],"dynamic":[False],"windowed":[True],"criterion":[None]}

input_dict3 = {"v_fun":[V_pima_indian_logit],"epsilon":[0.1],"second_order":[False],"cov":[ideal_diag_cov],
              "evolve_L":[10],"metric_name":["diag_e"],"dynamic":[False],"windowed":[False],"criterion":[None]}
tune_settings_dict = tuning_settings([],[],[],[])

tune_dict  = tuneinput_class(input_dict).singleton_tune_dict()
tune_dict2  = tuneinput_class(input_dict2).singleton_tune_dict()
tune_dict3  = tuneinput_class(input_dict3).singleton_tune_dict()

sampler1 = mcmc_sampler(tune_dict=tune_dict,mcmc_settings_dict=mcmc_meta,tune_settings_dict=tune_settings_dict)
sampler2 = mcmc_sampler(tune_dict=tune_dict2,mcmc_settings_dict=mcmc_meta,tune_settings_dict=tune_settings_dict)
sampler3 = mcmc_sampler(tune_dict=tune_dict3,mcmc_settings_dict=mcmc_meta,tune_settings_dict=tune_settings_dict)

sampler1.start_sampling()
sampler2.start_sampling()
sampler3.start_sampling()


mcmc_samples1 = sampler1.get_samples(permuted=False)

out = check_mean_var_stan(mcmc_samples=mcmc_samples1,correct_mean=correct_mean,correct_cov=correct_cov,diag_only=False)
Esempio n. 10
0
        max_num_restarts=5,
        num_cpu_per_sampler=4)

    input_dict_gnuts = {
        "v_fun": v_fun_list,
        "epsilon": ["dual"],
        "second_order": [False],
        "cov": ["adapt"],
        "metric_name": ["diag_e"],
        "dynamic": [True],
        "windowed": [False],
        "criterion": ["gnuts"],
        "max_tree_depth": [8]
    }

    input_object_gnuts = tuneinput_class(input_dict_gnuts)
    experiment_instance_gnuts = experiment(
        input_object=input_object_gnuts,
        experiment_setting=experiment_setting_gnuts,
        fun_per_sampler=target_fun)

    experiment_instance_gnuts.run()

    np_store, col_names, output_names = experiment_instance_gnuts.np_output()
    np_store_diagnostics, diagnostics_names = experiment_instance_gnuts.np_diagnostics(
    )
    np_diagnostics_gnuts[i] = np_store_diagnostics
    np_store_gnuts[i] = np_store

np_store_gnuts = numpy.stack(np_store_gnuts, axis=0)
np_diagnostics_gnuts = numpy.stack(np_diagnostics_gnuts, axis=0)
Esempio n. 11
0
converted_t_bounds = (min(L_bounds)*min(ep_bounds),max(L_bounds)*max(ep_bounds))

ep_list = list(numpy.linspace(ep_bounds[0],ep_bounds[1],num_grid_divides))
evolve_L_list = list(numpy.linspace(L_bounds[0],L_bounds[1],num_grid_divides))
evolve_t_list = list(numpy.linspace(converted_t_bounds[0],converted_t_bounds[1],num_grid_divides))

#print(converted_t_bounds)

#####################################################################################################################################
experiment_setting_ep_L = experiment_setting_dict(chain_length=10000,num_chains_per_sampler=4,warm_up=1000,
                                             tune_l=0,allow_restart=True,max_num_restarts=5)

input_dict_ep_L = {"v_fun":v_fun_list,"epsilon":ep_list,"second_order":[False],
              "evolve_t":evolve_t_list,"metric_name":["unit_e"],"dynamic":[False],"windowed":[False],"criterion":[None]}

input_object_ep_L = tuneinput_class(input_dict_ep_L)
experiment_instance_ep_L = experiment(input_object=input_object_ep_L,experiment_setting=experiment_setting_ep_L,fun_per_sampler=function)

experiment_instance_ep_L.run()

result_grid_ep_L= experiment_instance_ep_L.experiment_result_grid_obj

##########################################################################################################################################
experiment_setting_ep_t = experiment_setting_dict(chain_length=10000,num_chains_per_sampler=4,warm_up=1000,
                                             tune_l=0,allow_restart=True,max_num_restarts=5)

input_dict_ep_t = {"v_fun":v_fun_list,"epsilon":ep_list,"second_order":[False],
              "evolve_t":evolve_t_list,"metric_name":["unit_e"],"dynamic":[False],"windowed":[False],"criterion":[None]}

input_object_ep_t = tuneinput_class(input_dict_ep_t)
experiment_instance_ep_t = experiment(input_object=input_object_ep_t,experiment_setting=experiment_setting_ep_t,fun_per_sampler=function)
Esempio n. 12
0
num_grid_divides = 2
ep_list = list(numpy.linspace(1e-2, 0.1, num_grid_divides))
evolve_t_list = list(numpy.linspace(0.15, 5.0, num_grid_divides))

v_fun_list = []
input_dict = {
    "v_fun": v_fun_list,
    "epsilon": ep_list,
    "second_order": [False],
    "evolve_t": evolve_t_list,
    "metric_name": ["unit_e"],
    "dynamic": [False],
    "windowed": [False],
    "criterion": [None]
}

experiment_setting = experiment_setting_dict(chain_length=10000,
                                             num_repeat=20,
                                             num_chains_per_sampler=4,
                                             warm_up=1000,
                                             tune_l=0,
                                             save_name="temp_experiment.pkl")

input_object = tuneinput_class(input_dict)
experiment_instance = experiment(input_object=input_object,
                                 experiment_setting=experiment_setting,
                                 fun_per_sampler=function)

experiment.run()
Esempio n. 13
0
def generate_q_list(v_fun,num_of_pts):
    # extract number of (q,p) points given v_fun
    mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0, samples_per_chain=200, num_chains=4, num_cpu=4, thin=1,
                                           tune_l_per_chain=100,
                                           warmup_per_chain=110, is_float=False, isstore_to_disk=False,allow_restart=True)
    input_dict = {"v_fun": [v_fun], "epsilon": ["dual"], "second_order": [False],
                  "metric_name": ["unit_e"], "dynamic": [True], "windowed": [False],"max_tree_depth":[6],
                  "criterion": ["xhmc"],"xhmc_delta":[0.1]}

    ep_dual_metadata_argument = {"name": "epsilon", "target": 0.65, "gamma": 0.05, "t_0": 10,
                                 "kappa": 0.75, "obj_fun": "accept_rate", "par_type": "fast"}

    dim = len(v_fun(precision_type="torch.DoubleTensor").flattened_tensor)
    #adapt_cov_arguments = [adapt_cov_default_arguments(par_type="slow", dim=dim)]
    dual_args_list = [ep_dual_metadata_argument]
    other_arguments = other_default_arguments()

    tune_settings_dict = tuning_settings(dual_args_list, [], [], other_arguments)

    tune_dict = tuneinput_class(input_dict).singleton_tune_dict()

    sampler1 = mcmc_sampler(tune_dict=tune_dict, mcmc_settings_dict=mcmc_meta, tune_settings_dict=tune_settings_dict)

    out = sampler1.start_sampling()
    sampler1.remove_failed_chains()

    print("num chains removed {}".format(sampler1.metadata.num_chains_removed))
    print("num restarts {}".format(sampler1.metadata.num_restarts))

    samples = sampler1.get_samples(permuted=True)

    num_mcmc_samples = samples.shape[0]
    indices = numpy.random.choice(a=num_mcmc_samples,size=num_of_pts,replace=False)

    chosen_samples = samples[indices,:]
    list_q_double = [None]*num_of_pts
    #list_p_double = [None]*num_of_pts

    for i in range(num_of_pts):
        q_point = point(V=v_fun(precision_type="torch.DoubleTensor"))
        flattened_tensor = torch.from_numpy(chosen_samples[i,:]).type("torch.DoubleTensor")
        q_point.flattened_tensor.copy_(flattened_tensor)
        q_point.load_flatten()
        #p_point = point(list_tensor=q_point.list_tensor,pointtype="p",need_flatten=q_point.need_flatten)
        # p_point.flattened_tensor.normal_()
        # p_point.load_flatten()
        list_q_double[i] = q_point
        #list_p_double[i] = p_point

    list_q_float = [None] * num_of_pts
    #list_p_float = [None] * num_of_pts

    for i in range(num_of_pts):
        q_point = point(V=v_fun(precision_type="torch.FloatTensor"))
        flattened_tensor = torch.from_numpy(chosen_samples[i, :]).type("torch.FloatTensor")
        q_point.flattened_tensor.copy_(flattened_tensor)
        q_point.load_flatten()
        # p_point = point(list_tensor=q_point.list_tensor,pointtype="p",need_flatten=q_point.need_flatten)
        # p_point.flattened_tensor.normal_()
        # p_point.load_flatten()
        list_q_float[i] = q_point


    out = {"list_q_double":list_q_double,"list_q_float":list_q_float}
    return(out)
Esempio n. 14
0
from experiments.experiment_obj import tuneinput_class, experiment

#input_dict = {"v_fun":[V_logistic_regression],"alpha":[0],"epsilon":[0.1,0.2,0.3],"second_order":[False],"Cov":[torch.zeros(2)]}

input_dict = {
    "v_fun": [V_logistic_regression],
    "epsilon": [0.1, 0.05],
    "second_order": [False],
    "evolve_L": [10],
    "metric_name": ["unit_e"],
    "dynamic": [False],
    "windowed": [False],
    "criterion": [None]
}

input_obj = tuneinput_class(input_dict)
#print(input_obj.__dict__["grid_shape"])
#exit()
exper_obj = experiment(input_object=input_obj)

exper_obj.run()
#print(len(exper_obj.id_to_multi_index))

print(exper_obj.id_to_multi_index[0])
print(exper_obj.id_to_multi_index[1])

print(exper_obj.store_grid_obj[exper_obj.id_to_multi_index[0]])
print(exper_obj.store_grid_obj[exper_obj.id_to_multi_index[1]])

#exit()
#print(exper_obj.store_grid_obj[0,0,0,0,0])
Esempio n. 15
0
from abstract.mcmc_sampler import mcmc_sampler, mcmc_sampler_settings_dict
from adapt_util.tune_param_classes.tune_param_setting_util import *
from experiments.experiment_obj import tuneinput_class
from experiments.experiment_obj import experiment,experiment_setting_dict

from experiments.correctdist_experiments.prototype import check_mean_var

num_per_model = 20
mcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,samples_per_chain=500,num_chains=1,num_cpu=1,thin=1,tune_l_per_chain=0,
                                   warmup_per_chain=100,is_float=False,isstore_to_disk=False)

input_dict = {"v_fun":[V_funnel_cp],"epsilon":[0.1],"alpha":[1e6,1e2],"second_order":[True],
              "evolve_L":[10],"metric_name":["softabs"],"dynamic":[False],"windowed":[False],"criterion":[None]}

input_dict2 = {"v_fun":[V_funnel_ncp],"epsilon":[0.1],"second_order":[False],
              "evolve_L":[10],"metric_name":["unit_e"],"dynamic":[False],"windowed":[False],"criterion":[None]}

input_obj  = tuneinput_class(input_dict)

input_obj2 = tuneinput_class(input_dict2)

experiment_setting_dict = experiment_setting_dict(chain_length=10000,num_repeat=num_per_model)
experiment_obj = experiment(input_object=input_obj,experiment_setting=experiment_setting_dict)

experiment_obj.run()

experiment_obj2 = experiment(input_object=input_obj2,experiment_setting=experiment_setting_dict)

experiment_obj2.run()

    "second_order": [True],
    "xhmc_delta": [0.1],
    "metric_name": [
        "softabs", "softabs_diag", "softabs_outer_product",
        "softabs_diag_outer_product"
    ],
    "dynamic": [False],
    "windowed": [False],
    "criterion": ["xhmc"]
}

tune_settings_dict = tuning_settings([], [], [], [])

#tune_dict_fo  = tuneinput_class(input_dict_fo).singleton_tune_dict()
#tune_dict_so  = tuneinput_class(input_dict_so).singleton_tune_dict()
tune_dict_fo = tuneinput_class(input_dict_fo)
tune_dict_so = tuneinput_class(input_dict_so)

#sampler_fo = mcmc_sampler(tune_dict=tune_dict_fo,mcmc_settings_dict=mcmc_meta,tune_settings_dict=tune_settings_dict)
#out = sampler1.start_sampling()

#####################################################################################################################
mcmc_samples = sampler1.get_samples(permuted=True)
correct = pickle.load(open("result_from_long_chain.pkl", 'rb'))
correct_mean = correct["correct_mean"]
correct_cov = correct["correct_cov"]
correct_diag_cov = correct_cov.diagonal()

out = check_mean_var(mcmc_samples=mcmc_samples,
                     correct_mean=correct_mean,
                     correct_cov=correct_cov,