Ejemplo n.º 1
0
def test_calibration():

    space = odl.uniform_discr(min_pt=[-1],
                              max_pt=[1],
                              shape=[128],
                              dtype='float32',
                              interp='linear')

    cell_side = space.cell_sides

    #kernel = get_kernel(space)
    kernel = get_kernel_gauss(space, 0.2)

    def product(f, g):
        return struct.scalar_product_structured(f, g, kernel)

    #points = space.points()[::2].T
    points = np.array([[
        -0.75,
        0.0,
        0.2,
        0.5,
    ]])
    vectors = np.array([[
        0.3,
        0.0,
        0,
        1,
    ]])
    original = struct.create_structured(points, vectors)
    g = group.Translation(space)

    translation = np.array([1.0])
    translated = action.apply_element_to_field(g, translation, original)

    covariance_matrix = struct.make_covariance_matrix(space.points().T, kernel)
    noise_l2 = odl.phantom.noise.white_noise(space) * 0.05
    decomp = np.linalg.cholesky(covariance_matrix +
                                1e-5 * np.identity(len(covariance_matrix)))
    noise_rkhs = np.dot(decomp, noise_l2)

    get_unstructured = struct.get_from_structured_to_unstructured(
        space, kernel)
    noisy = space.tangent_bundle.element(
        get_unstructured(translated) + noise_rkhs)

    def act(element, struct):
        return action.apply_element_to_field(g, element, struct)

    result_calibration = calib.calibrate(original, noisy, g, act, product,
                                         struct.scalar_product_unstructured)

    estimated_translated = get_unstructured(act(result_calibration.x,
                                                original))
    print('real = {}, computed ={} , log diff = {}'.format(
        translation, result_calibration.x,
        np.log10(np.abs(translation[0] - result_calibration.x[0]))))
Ejemplo n.º 2
0
def test_make_covariance_matrix():
    def kernel(x, y):
        return np.sum(x + y, axis=0)

    dim = 2
    nb_points = 3

    points = np.random.randn(dim, nb_points)
    Mat_expected = np.empty([nb_points, nb_points])

    for i in range(nb_points):
        for j in range(nb_points):
            Mat_expected[i, j] = kernel(points[:, i], points[:, j])

    Mat_computed = structured_vector_fields.make_covariance_matrix(
        points, kernel)

    npt.assert_allclose(Mat_computed, Mat_expected)
Ejemplo n.º 3
0
def iterative_scheme(solve_regression, calibration, action, g, kernel,
                     field_list, sigma0, sigma1, points, nb_iteration):
    nb_data = len(field_list)
    eval_kernel = struct.make_covariance_matrix(points, kernel)
    dim, nb_points = points.shape

    def product(vect0, vect1):
        return struct.scalar_product_structured(vect0, vect1, kernel)

    # initialization with a structured version of first vector field (NOT GOOD)
    group_element_init = g.identity
    vectors_original = solve_regression(g, [group_element_init],
                                        [field_list[0]], sigma0, sigma1,
                                        points, eval_kernel)
    vectors_original_struct = struct.get_structured_vectors_from_concatenated(
        vectors_original, nb_points, dim)
    original = struct.create_structured(points, vectors_original_struct)
    get_unstructured_op = struct.get_from_structured_to_unstructured(
        field_list[0].space[0], kernel)
    get_unstructured_op(original).show('initialisation')

    for k in range(nb_iteration):
        velocity_list = calibrate_list(original, field_list, calibration)
        group_element_list = [
            g.exponential(velocity_list[i]) for i in range(nb_data)
        ]
        vectors_original = solve_regression(g, group_element_list, field_list,
                                            sigma0, sigma1, points,
                                            eval_kernel)
        vectors_original_struct = struct.get_structured_vectors_from_concatenated(
            vectors_original, nb_points, dim)
        original = struct.create_structured(points, vectors_original_struct)
        print('iteration {}'.format(k))
        get_unstructured_op(original).show('iteration {}'.format(k))

    return [original, group_element_list]
Ejemplo n.º 4
0
    vectors = vectors_list[i].copy()
    #points, vectors = cmp.compute_pointsvectors_2articulations_nb(a_list[i], b_list[i], c_list[i], width, sigma, nb_ab, nb_ab_orth, nb_bc, nb_bc_orth)
    eval_field = np.array([
        space.element(vector_fields_list[i][:, :, u]).interpolation(points)
        for u in range(dim)
    ]).copy()

    vector_syst = np.zeros(dim * nb_points)
    basis = np.identity(dim)

    for k0 in range(nb_points):
        for l0 in range(dim):
            vector_syst[dim * k0 + l0] += np.dot(eval_field.T[k0], basis[:,
                                                                         l0])

    eval_kernel = struct.make_covariance_matrix(points, kernel)

    matrix_syst = np.kron(eval_kernel, basis)

    alpha_concatenated = np.linalg.solve(matrix_syst, vector_syst)
    alpha = struct.get_structured_vectors_from_concatenated(
        alpha_concatenated, nb_points, dim)
    structured = struct.create_structured(points, alpha)

    structured_list.append(structured.copy())
    unstructured_list.append(get_unstructured_op(structured).copy())
#
#%% See projection

#plt.plot(points[0] , points[1], 'xb')
get_unstructured_op_generate = struct.get_from_structured_to_unstructured(
    space, kernel_generate)

#%% define data

dim = 1
nb_pt_generate = 3
points_truth = np.random.uniform(low=-1.0, high=1.0, size=nb_pt_generate)
vectors_truth = np.random.uniform(low=-1.0, high=1.0, size=nb_pt_generate)
original = struct.create_structured(points_truth, vectors_truth)
original_unstructured = get_unstructured_op_generate(original)
data_list = []
nb_data = 10
translation_list = np.random.uniform(low=-1.0, high=1.0, size=nb_data)
covariance_matrix = struct.make_covariance_matrix(space.points().T,
                                                  kernel_generate)
noise_l2 = odl.phantom.noise.white_noise(odl.ProductSpace(space,
                                                          nb_data)) * 0.1
decomp = np.linalg.cholesky(covariance_matrix +
                            1e-4 * np.identity(len(covariance_matrix)))
noise_rkhs = [np.dot(decomp, noise_l2[i]) for i in range(nb_data)]
pts_space = space.points().T
#data_list=[]
#for i in range(nb_data):
#    pts_displaced = g.apply(np.array([-translation_list[i]]), pts_space)
#    data_list.append(space.tangent_bundle.element([original_unstructured[u].interpolation(pts_displaced) for u in range(dim)]))

data_list_noisy = [
    space.tangent_bundle.element(
        get_unstructured_op_generate(
            action(np.array([translation_list[i]]), original)) + noise_rkhs[i])
points_list = []
vectors_list = []
cov_mat_list = []
param_list = []
A_inner_prod_list = []

nbdatamax = 10
for i in range(nbdatamax):
    structured_list.append(np.loadtxt(name + 'structured' + str(i)))
    unstructured_list.append(np.loadtxt(name + 'unstructured' + str(i)))
    vectors_i = structured_list[i][dim:2 * dim]
    points_list.append(np.loadtxt(name + 'points' + str(i)))
    vectors_list.append(np.loadtxt(name + 'vectors' + str(i)))
    param_list.append(np.loadtxt(name + 'param' + str(i)))
    cov_mat_list.append(
        struct.make_covariance_matrix(points_list[i], kernel_np))
    A_inner_prod_list.append(
        np.dot(cov_mat_list[i], np.dot(vectors_i.T, vectors_list[i])).T)
#

param_list = np.array(param_list).T

nb_points = len(points_list[0][0])
nb_vectors = len(vectors_list[0][0])
##%% Graph 1 : projection of data on zeta(o, 1) and diff with data
#
##inp = tf.placeholder(shape=(nb_vectors, nb_points), dtype=tf.float64)
#inp = tf.Variable(np.ones([nb_vectors, nb_points]), name="alpha")
#
## List of zeta_(o_i) (1)
#structured_list_computed = [create_structured(points_list[i], tf.matmul(vectors_list[i], inp)) for i in range(nbdatamax)]