Example #1
0
def estimate_precision(XX):
    p = XX.shape[1]

    # Find neighbors (or non-zero elements in precision matrix) iterating
    # through columns of the data matrix (whose dimension is n \times p)
    A = np.zeros([p,p])
    for i in range(p):
        # i-th column is a response variable
        X = np.delete(XX, i, 1)
        b = XX[:,i]
        b.shape = (b.size, 1)
        grad = lasso_problem(X, b)
        theta0 = np.zeros([p-1, 1])       # initial point
        theta = subgradient_steepest_descent(None, grad, theta0,
                                             method = 'diminishing',
                                             max_iter=200)

        # putting in regressed values except the diagonal entries
        for j in range(theta.size):
            if j < i:
                A[i,j] = theta[j]
            else:
                A[i,j+1] = theta[j]

    return A
Example #2
0
def problem_2():
    n_observations, n_variables = 1000, 100
    X = generate_well_conditioned(n_observations, n_variables)

    for non_zero_count in [10, 25]:
        beta = generate_sparse_sample(n_variables, non_zero_count)

        penalty1, penalty2 = 0.1, 0.1

        func, grad = generate_subgradient_elastic_net(X, beta, penalty1, penalty2)

        x0 = np.ones([beta.shape[0], 1])

        print "non_zero_count:%d, with penalty1 %f, penalty2 %f" % (
                non_zero_count, penalty1, penalty2)

        estimated_beta, history = subgradient_steepest_descent(func, grad, x0, beta,
                method = 'diminishing', max_iter=1e3)
        np.savetxt('problem_2_convergence'
                + '_%d_%.1f_%.1f' % (non_zero_count, penalty1, penalty2), history)
        np.savetxt('problem_2_beta'
                + '_%d_%.1f_%.1f' % (non_zero_count, penalty1, penalty2), estimated_beta)