Beispiel #1
0
def blr(npx, npy):
    model1 = BayesianRidge(
        fit_intercept=False,  # assume linear model, not affine model
        copy_X=True)
    model1.fit(npx, npy)
    temp1 = log_marginalized_likelihood(model1.lambda_, model1.alpha_, npx,
                                        npy)
    return model1, temp1
Beispiel #2
0
    def log_evidence_of_new_data(self, X, Y):
        y1 = Y[:, 0] - X[:, 1]
        temp_log_evidence_y1 = -0.5 * (y1.shape[0] * np.log(2. * np.pi *
                                                            (self.noise_var1))
                                       + np.dot(y1, y1) / self.noise_var1)

        blr_x = -np.sin(X[:, 0]).reshape(
            (Y[:, 0].shape[0]), 1)  # x' = - sin(theta)
        blr_y = Y[:, 1] - alpha3 * X[:, 2]  # y' = y2 - alpha3*u
        temp_log_evidence_y2 = log_marginalized_likelihood(
            self.blr_m.lambda_, self.blr_m.alpha_, blr_x, blr_y)
        return temp_log_evidence_y1 + temp_log_evidence_y2
Beispiel #3
0
    def k_fold_cv(self, X, Y, k=10):
        indx = np.array([i for i in range(X.shape[0])])
        indx = np.random.permutation(indx)
        indx_subset = np.split(indx, k)
        ret_val = 0.

        for ki in range(k):
            temp_ind = [
                i for i in range(X.shape[0]) if i not in indx_subset[ki]
            ]
            temp_X = X[temp_ind]
            temp_Y = Y[temp_ind]

            temp_y1 = temp_Y[:, 0] - temp_X[:, 1]
            temp_noise_var1 = np.dot(temp_y1, temp_y1) / temp_y1.shape[0]
            temp_log_evidence_y1 = -0.5 * temp_y1.shape[0] * (
                1. + np.log(2. * np.pi * (temp_noise_var1)))

            temp_blr_x = -np.sin(temp_X[:, 0]).reshape(
                (temp_Y[:, 0].shape[0]), 1)  # x' = - sin(theta)
            temp_blr_y = temp_Y[:,
                                1] - alpha3 * temp_X[:,
                                                     2]  # y' = y2 - alpha3*u
            temp_blr_m, temp_log_evidence_y2 = blr(temp_blr_x, temp_blr_y)

            train_ml = temp_log_evidence_y1 + temp_log_evidence_y2

            y1 = Y[:, 0] - X[:, 1]
            total_log_evidence_y1 = -0.5 * (
                y1.shape[0] * np.log(2. * np.pi * (temp_noise_var1)) +
                np.dot(y1, y1) / temp_noise_var1)

            blr_x = -np.sin(X[:, 0]).reshape(
                (Y[:, 0].shape[0]), 1)  # x' = - sin(theta)
            blr_y = Y[:, 1] - alpha3 * X[:, 2]  # y' = y2 - alpha3*u
            total_log_evidence_y2 = log_marginalized_likelihood(
                temp_blr_m.lambda_, temp_blr_m.alpha_, blr_x, blr_y)

            total_ml = total_log_evidence_y1 + total_log_evidence_y2
            ret_val += total_ml - train_ml
            print(total_ml - train_ml, total_ml, train_ml)
        logger.log("k=", k)
        logger.log("PM_CV =", ret_val / (1. * k))
        return ret_val / (1. * k)
# test 1 (map estimation marginal likelihood)
print("\n\n###  test 1  ###")
w_map = test_la.map_estimator(y,X,mo_model,lam_true,prior_alpha,w_true)
print("approx_w_map  =",w_map) 
#print("test_mo_ML =",test_la.approximate_log_marginal_likelihood(y,X,w_map,mo_model,lam_true,prior_alpha))
if 0==key:
    import marginal_likelihood_blr
    X_double = np.vstack([X, X])
    y_double = np.hstack([y.T[0], y.T[0]]).reshape(2*n,1)
    w_map_analytical=marginal_likelihood_blr.map_estimator(prior_alpha,lam_true[0],X_double,y_double).T[0]
    print("analytical_w_map=",w_map_analytical)
print("approx_log_ML      =", test_la.approximate_log_marginal_likelihood(y,X,w_map,mo_model,lam_true,prior_alpha),", approximated around w_map")
if 0==key:
    print("approx_log_ML      =", test_la.approximate_log_marginal_likelihood(y,X,w_map_analytical,mo_model,lam_true,prior_alpha),", approximated around w_map_analytical")
    print("analytical_log_ML  =", marginal_likelihood_blr.log_marginalized_likelihood(prior_alpha,lam_true[0],X_double,y_double)[0])



# test 2 (empirical Bayes under Laplace approximation around previously obtained MAP estimation)
print("\n\n###  test 2  ###")
lam_init=0.1*lam_true
alpha_init=0.1*prior_alpha
eb_lam, eb_alpha  = test_la.empirical_bayes(y,X,w_map,mo_model,lam_init,alpha_init)
print("empirical_bayes_alpha =",eb_alpha)
print("empirical_bayes_lam =",eb_lam)
print("lam_true            =",lam_true)



Beispiel #5
0
prior_alpha = np.array([1.e-4])  # weight precision (hyperparameter)

# test 1 (map estimation marginal likelihood)
print("\n\n###  test 1  ###")
w_map = test_la.map_estimator(y, X, so_model, lam_true, prior_alpha, w_true)
print("approx_w_map  =", w_map)
#print("test_mo_ML =",test_la.approximate_log_marginal_likelihood(y,X,w_map,mo_model,lam_true,prior_alpha))
if 0 == key:
    import marginal_likelihood_blr
    w_map_analytical = marginal_likelihood_blr.map_estimator(
        prior_alpha, lam_true, X, y).T[0]
    print("analytical_w_map=", w_map_analytical)
print(
    "approx_log_ML      =",
    test_la.approximate_log_marginal_likelihood(y, X, w_map, so_model,
                                                lam_true, prior_alpha),
    ", approximated around w_map")
if 0 == key:
    print(
        "approx_log_ML      =",
        test_la.approximate_log_marginal_likelihood(y, X, w_map_analytical,
                                                    so_model, lam_true,
                                                    prior_alpha),
        ", approximated around w_map_analytical")
    print(
        "analytical_log_ML  =",
        marginal_likelihood_blr.log_marginalized_likelihood(
            prior_alpha, lam_true, X, y)[0])

print("test finish!")