def compute_vectorfield_pointsvectorcoeff(points, vectors, alpha): #nb_vectors = vectors.shape[1] vector_translations = np.dot(np.array(vectors), np.array(alpha)) structured = struct.create_structured(points, vector_translations) unstructured = gen_unstructured(structured) return unstructured.copy()
def test_translation_action(): space = odl.uniform_discr(min_pt=[-1], max_pt=[1], shape=[128], dtype='float32', interp='linear') points = np.array([[ -0.75, 0.0, 0.2, 0.5, ]]) vectors = np.array([[ 0.3, 0.0, 0, 1, ]]) original = struct.create_structured(points, vectors) translation_action = get_action(space) translation = np.array([1.0]) expected = translation_action(translation, original) g = group.Translation(space) computed = action.apply_element_to_field(g, translation, original) npt.assert_allclose(computed, expected)
def test_calibration(): space = odl.uniform_discr(min_pt=[-1], max_pt=[1], shape=[128], dtype='float32', interp='linear') cell_side = space.cell_sides #kernel = get_kernel(space) kernel = get_kernel_gauss(space, 0.2) def product(f, g): return struct.scalar_product_structured(f, g, kernel) #points = space.points()[::2].T points = np.array([[ -0.75, 0.0, 0.2, 0.5, ]]) vectors = np.array([[ 0.3, 0.0, 0, 1, ]]) original = struct.create_structured(points, vectors) g = group.Translation(space) translation = np.array([1.0]) translated = action.apply_element_to_field(g, translation, original) covariance_matrix = struct.make_covariance_matrix(space.points().T, kernel) noise_l2 = odl.phantom.noise.white_noise(space) * 0.05 decomp = np.linalg.cholesky(covariance_matrix + 1e-5 * np.identity(len(covariance_matrix))) noise_rkhs = np.dot(decomp, noise_l2) get_unstructured = struct.get_from_structured_to_unstructured( space, kernel) noisy = space.tangent_bundle.element( get_unstructured(translated) + noise_rkhs) def act(element, struct): return action.apply_element_to_field(g, element, struct) result_calibration = calib.calibrate(original, noisy, g, act, product, struct.scalar_product_unstructured) estimated_translated = get_unstructured(act(result_calibration.x, original)) print('real = {}, computed ={} , log diff = {}'.format( translation, result_calibration.x, np.log10(np.abs(translation[0] - result_calibration.x[0]))))
def act(translation, f): points = struct.get_points(f) vectors = struct.get_vectors(f) dim, nb_points = points.shape points_translated = np.array( [[points[u][v] + translation[u] for v in range(nb_points)] for u in range(dim)]) points_translated_projected = proj(points_translated) return struct.create_structured(points_translated_projected, vectors)
def iterative_scheme(solve_regression, calibration, action, g, kernel, field_list, sigma0, sigma1, points, nb_iteration): nb_data = len(field_list) eval_kernel = struct.make_covariance_matrix(points, kernel) dim, nb_points = points.shape def product(vect0, vect1): return struct.scalar_product_structured(vect0, vect1, kernel) # initialization with a structured version of first vector field (NOT GOOD) group_element_init = g.identity vectors_original = solve_regression(g, [group_element_init], [field_list[0]], sigma0, sigma1, points, eval_kernel) vectors_original_struct = struct.get_structured_vectors_from_concatenated( vectors_original, nb_points, dim) original = struct.create_structured(points, vectors_original_struct) get_unstructured_op = struct.get_from_structured_to_unstructured( field_list[0].space[0], kernel) get_unstructured_op(original).show('initialisation') for k in range(nb_iteration): velocity_list = calibrate_list(original, field_list, calibration) group_element_list = [ g.exponential(velocity_list[i]) for i in range(nb_data) ] vectors_original = solve_regression(g, group_element_list, field_list, sigma0, sigma1, points, eval_kernel) vectors_original_struct = struct.get_structured_vectors_from_concatenated( vectors_original, nb_points, dim) original = struct.create_structured(points, vectors_original_struct) print('iteration {}'.format(k)) get_unstructured_op(original).show('iteration {}'.format(k)) return [original, group_element_list]
vector_syst = np.zeros(dim * nb_points) basis = np.identity(dim) for k0 in range(nb_points): for l0 in range(dim): vector_syst[dim * k0 + l0] += np.dot(eval_field.T[k0], basis[:, l0]) eval_kernel = struct.make_covariance_matrix(points, kernel) matrix_syst = np.kron(eval_kernel, basis) alpha_concatenated = np.linalg.solve(matrix_syst, vector_syst) alpha = struct.get_structured_vectors_from_concatenated( alpha_concatenated, nb_points, dim) structured = struct.create_structured(points, alpha) structured_list.append(structured.copy()) unstructured_list.append(get_unstructured_op(structured).copy()) # #%% See projection #plt.plot(points[0] , points[1], 'xb') for i in range(nbdata): space.tangent_bundle.element([ vector_fields_list[i][:, :, u] for u in range(2) ])[0].show('truth' + str(i), clim=[-5, 5]) plt.plot(param.T[i][0::2], param.T[i][1::2], 'xb') unstructured_list[i][0].show('projected' + str(i), clim=[-5, 5]) plt.plot(param.T[i][0::2], param.T[i][1::2], 'xb')
# kern_discr = kern_app_point(mg) # # unstructured += space.tangent_bundle.element([kern_discr * vect for vect in vectors[:, k]]).copy() # # return unstructured get_unstructured_op_generate = struct.get_from_structured_to_unstructured( space, kernel_generate) #%% define data dim = 1 nb_pt_generate = 3 points_truth = np.random.uniform(low=-1.0, high=1.0, size=nb_pt_generate) vectors_truth = np.random.uniform(low=-1.0, high=1.0, size=nb_pt_generate) original = struct.create_structured(points_truth, vectors_truth) original_unstructured = get_unstructured_op_generate(original) data_list = [] nb_data = 10 translation_list = np.random.uniform(low=-1.0, high=1.0, size=nb_data) covariance_matrix = struct.make_covariance_matrix(space.points().T, kernel_generate) noise_l2 = odl.phantom.noise.white_noise(odl.ProductSpace(space, nb_data)) * 0.1 decomp = np.linalg.cholesky(covariance_matrix + 1e-4 * np.identity(len(covariance_matrix))) noise_rkhs = [np.dot(decomp, noise_l2[i]) for i in range(nb_data)] pts_space = space.points().T #data_list=[] #for i in range(nb_data): # pts_displaced = g.apply(np.array([-translation_list[i]]), pts_space)
def mult_scalar_structured_np(scal, structured): points = structured[0:dim] vectors = structured[dim:2 * dim] vectors_mult = scal * vectors return struct.create_structured(points, vectors_mult)
#%%% Visualize results def kernel_np(x, y): #si = tf.shape(x)[0] return np.exp(-sum([(x[i] - y[i])**2 for i in range(dim)]) / (sigma**2)) get_unstructured_op = struct.get_from_structured_to_unstructured( space, kernel_np) structured_computed = [] unstructured_computed = [] fac = [] for i in range(nbdatamax): structured_temp = struct.create_structured(points_list[i], np.dot(vectors_list[i], alpha)) prod = struct.scalar_product_structured(structured_temp, structured_list[i], kernel_np) squared_norm = struct.scalar_product_structured(structured_temp, structured_temp, kernel_np) fac.append(prod / squared_norm) structured_computed.append(structured_temp.copy()) unstructured_computed.append(get_unstructured_op(structured_temp).copy()) # for i in range(nbdatamax): get_unstructured_op(structured_list[i]).show( 'unstructured data {}'.format(i)) (fac[i] * unstructured_computed[i]).show( 'unstructured computed {}'.format(i)) #
dx = round((xmax - xmin) / (fac * sigma_kernel)) ymin = -3.0 ymax = 3.0 dy = round((ymax - ymin) / (fac * sigma_kernel)) points_list_gen = [] for i in range(dx + 1): for j in range(dy + 1): points_list_gen.append([ xmin + fac * sigma_kernel * i * 1.0, ymin + fac * sigma_kernel * j * 1.0 ]) points_list_gen = np.array(points_list_gen) nb_pts_gen = len(points_list_gen) vectors_truth = np.random.uniform(low=-1.0, high=1.0, size=[2, nb_pts_gen]) original = struct.create_structured(points_list_gen.T, vectors_truth) original_unstructured = get_unstructured_op(original) data_list = [] nb_data = 10 translation_list = np.random.uniform(low=-1.0, high=1.0, size=[2, nb_data]) scaling_list = np.abs(np.random.normal(1, 1, nb_data)) #1. + np.zeros(nb_data) theta_list = np.random.uniform(low=-np.pi, high=np.pi, size=nb_data) param_transfor_list = np.array([ g.exponential( np.array([ scaling_list[i], theta_list[i], translation_list[0, i], translation_list[1, i] ])) for i in range(nb_data) ])