svf1 = np.load(pfi_svf1) flow1_ground = np.load(pfi_flow) if methods[method_name][6]: raise IOError( 'TODO for point-wise methods differentiate vode, lsoda' ) # compute exponetial with time: start = time.time() disp_computed = exp_method(svf1, input_num_steps=st) stop = (time.time() - start) # compute error: error = qr.norm(disp_computed - flow1_ground, passe_partout_size=params['passepartout'], normalized=True) df_time_error['subject'][sj] = 'sj{}'.format(sj) df_time_error['time (sec)'][sj] = stop df_time_error['error (mm)'][sj] = error # save pandas df in csv pfi_df_time_error = jph( pfo_output_A4_AD, 'ad-{}-steps-{}.csv'.format(method_name, st)) df_time_error.to_csv(pfi_df_time_error) # print something if you fancy: if verbose == 2: print(df_time_error)
start = time.time() l_exp = lie_exp.LieExp() sdisp_scipy = l_exp.scipy_pointwise( svf_0, integrator='vode', method=method, max_steps=max_step, interpolation_method=interp_method, verbose=verbose_exp, passepartout=passepartout, return_integral_curves=False) operation_time = (time.time() - start) error = qr.norm(sdisp_scipy - sdisp_0, passe_partout_size=passepartout) print('---------- Error and Computational Time ----') print('|vode - disp| = {} voxel'.format(str(error))) print('Comp Time = {} sec.'.format( str(operation_time))) print('-----------------------------------------------') # store errors and computational time in appropriate matrices: # 4d matrix x: steps, y: integrators and methods, z: interpolation, t: error/computational time main_errors[max_step_i, method_i, interp_method_i, i] = error main_computational_time[max_step_i, method_i, interp_method_i, i] = operation_time
sdisp_mid_p = l_exp.midpoint(svf_0) sdisp_euler_m = l_exp.euler_mod(svf_0) sdisp_rk4 = l_exp.rk4(svf_0) sdisp_vode = l_exp.scipy_pointwise(svf_0, verbose=True, passepartout=passepartout) print(type(sdisp_ss)) print(type(sdisp_ss_pa)) print(type(sdisp_euler)) print(type(sdisp_euler_m)) print(type(sdisp_rk4)) print('--------------------') print("Norm of the svf:") print(qr.norm(svf_0, passe_partout_size=4)) print('--------------------') print("Norm of the displacement field:") print(qr.norm(sdisp_0, passe_partout_size=4)) print('--------------------') print("Norm of the errors:") print('--------------------') print('|ss - disp| = {} '.format( str(qr.norm(sdisp_ss - sdisp_0, passe_partout_size=passepartout)))) print('|ss_pa - disp| = {} '.format( str(qr.norm(sdisp_ss_pa - sdisp_0, passe_partout_size=passepartout)))) print('|euler - disp| = {} '.format( str(qr.norm(sdisp_euler - sdisp_0, passe_partout_size=passepartout)))) print('|midpoint - disp| = {} '.format(
def test_vf_norm_ones_normalised(): vf = np.ones([10, 10, 10, 1, 3]) assert_almost_equal(qr.norm(vf, passe_partout_size=0, normalized=True), np.sqrt(3))
h_a, h_g = pgl2.get_random_hom_matrices(d=hom_attributes[0], scale_factor=hom_attributes[1], sigma=hom_attributes[2], special=hom_attributes[3], projective_center=np.array( [25, 25])) print(h_a) print(h_g) svf1 = gen.generate_from_projective_matrix(domain, h_a, structure='algebra') flow = gen.generate_from_projective_matrix(domain, h_g, structure='group') l_exp = lie_exp.LieExp() flow_ss = l_exp.scaling_and_squaring(svf1, input_num_steps=10) print(qr.norm(flow - flow_ss, passe_partout_size=4)) fields_at_the_window.see_field(svf1, input_color='r') fields_at_the_window.see_field(flow, input_color='b') fields_at_the_window.see_field(flow_ss, input_color='g', width=0.01) plt.show() for tp in [1, 2, 3, 4, 6, 7, 10, 12, 20]: flow_ss = l_exp.scaling_and_squaring(svf1, input_num_steps=tp) print(qr.norm(flow - flow_ss, passe_partout_size=4))
def test_vf_norm_zeros(): vf = np.zeros([10, 10, 10, 1, 3]) assert_equal(qr.norm(vf), 0)
def test_vf_norm_ones(): vf = np.ones([10, 10, 10, 1, 3]) assert_almost_equal(qr.norm(vf, passe_partout_size=0, normalized=False), 10**3 * np.sqrt(3))
# get integral of the SVF with the matrix exponential m = scipy.linalg.expm(dm) sdisp_expm = alpha * gen.generate_from_matrix( (40, 40), m, structure='group') see_field(svf, subtract_id=False, input_color='r', fig_tag=2) see_field(sdisp_expm, subtract_id=False, input_color='b', title_input='Unstable node expm', fig_tag=2) pyplot.show(block=False) print('difference in "improper" norm of the flow fields {}'.format( qr.norm(sdisp - sdisp_expm, passe_partout_size=5))) # ---------------------------- # ---- See all tastes -------- # ---------------------------- vfs = [] titles = [] for taste in range(1, 7): dm = linear.randomgen_linear_by_taste(1, taste, (20, 20)) svf = beta * gen.generate_from_matrix( (40, 40), dm, structure='algebra') vfs.append(svf) titles.append('Kind {}'.format(taste)) extra_titles = { 'Kind 1': 'Unstable node',
l_exp.trapeziod_euler, l_exp.gss_trapezoid_euler, l_exp.trapezoid_midpoint, l_exp.gss_trapezoid_midpoint] res_time = np.zeros(len(methods)) res_err = np.zeros(len(methods)) fields_list = [] for met_id, met in enumerate(methods): start = time.time() sdisp_num = met(svf_0) res_time[met_id] = (time.time() - start) res_err[met_id] = qr.norm(sdisp_num - sdisp_0, passe_partout_size=passepartout) print(res_err[met_id]) fields_list.append(sdisp_num) print('--------------------') print("Norm of the svf:") print(qr.norm(svf_0, passe_partout_size=passepartout)) print('--------------------') print("Norm of the displacement field:") print(qr.norm(sdisp_0, passe_partout_size=passepartout)) print('--------------------') print("Norm of the errors:") print('--------------------')
def test_visual_assessment_method_one_se2(show=False): """ :param show: to add the visualisation of a figure. This test is for visual assessment. Please put show to True. Aimed to test the prototyping of the computation of the exponential map with some methods. (Nothing is saved in external folder.) """ ############## # controller # ############## domain = (20, 20) x_c = 10 y_c = 10 theta = np.pi / 8 tx = (1 - np.cos(theta)) * x_c + np.sin(theta) * y_c ty = -np.sin(theta) * x_c + (1 - np.cos(theta)) * y_c passepartout = 5 spline_interpolation_order = 3 l_exp = lie_exp.LieExp() l_exp.s_i_o = spline_interpolation_order methods_list = [ l_exp.scaling_and_squaring, l_exp.gss_ei, l_exp.gss_ei_mod, l_exp.gss_aei, l_exp.midpoint, l_exp.series, l_exp.euler, l_exp.euler_aei, l_exp.euler_mod, l_exp.heun, l_exp.heun_mod, l_exp.rk4, l_exp.gss_rk4, l_exp.trapeziod_euler, l_exp.trapezoid_midpoint, l_exp.gss_trapezoid_euler, l_exp.gss_trapezoid_midpoint ] # ----- # model # ----- m_0 = se2.Se2G(theta, tx, ty) dm_0 = se2.se2g_log(m_0) # -- generate subsequent vector fields svf_0 = gen.generate_from_matrix(domain, dm_0.get_matrix, structure='algebra') sdisp_0 = gen.generate_from_matrix(domain, m_0.get_matrix, structure='group') # -- compute exponential with different available methods: sdisp_list = [] res_time = np.zeros(len(methods_list)) for num_met, met in enumerate(methods_list): start = time.time() sdisp_list.append(met(svf_0, input_num_steps=10)) res_time[num_met] = (time.time() - start) # ---- # view # ---- print('--------------------') print('Norm of the svf: ') print(qr.norm(svf_0, passe_partout_size=4)) print('--------------------') print("Norm of the displacement field:") print(qr.norm(sdisp_0, passe_partout_size=4)) print('--------------------') print('Norm of the errors: ') print('--------------------') for num_met in range(len(methods_list)): err = qr.norm(sdisp_list[num_met] - sdisp_0, passe_partout_size=passepartout) print('|{0:>22} - disp| = {1}'.format(methods_list[num_met].__name__, err)) if methods_list[num_met].__name__ == 'euler': assert err < 3 else: assert err < 0.5 print('---------------------') print('Computational Times: ') print('---------------------') if show: title_input_l = ['Sfv Input', 'Ground Output'] + methods_list fields_list = [svf_0, sdisp_0] + sdisp_list list_fields_of_field = [[svf_0], [sdisp_0]] list_colors = ['r', 'b'] for third_field in fields_list[2:]: list_fields_of_field += [[svf_0, sdisp_0, third_field]] list_colors += ['r', 'b', 'm'] fields_comparisons.see_n_fields_special( list_fields_of_field, fig_tag=50, row_fig=5, col_fig=5, input_figsize=(14, 7), colors_input=list_colors, titles_input=title_input_l, sample=(1, 1), zoom_input=[0, 20, 0, 20], window_title_input='matrix generated svf') plt.show()
print('Number of steps for scipy method : ' + str(steps_scipy)) print('--------------------') disp_scipy_out = l_exp.scipy_pointwise(svf_0, method=methods_vode[1], max_steps=steps_scipy, verbose=False, passepartout=passepartout, return_integral_curves=True) disp_scipy = disp_scipy_out[0] integral_curves = disp_scipy_out[1] print(type(integral_curves)) print(type(integral_curves[0])) error = qr.norm(disp_scipy - sdisp_0, passe_partout_size=3) print(type(sdisp_ss)) print(type(sdisp_gss_ei)) print(type(sdisp_euler)) print(type(sdisp_euler_m)) print(type(sdisp_rk4)) print('--------------------') print("Norm of the svf:") print(qr.norm(svf_0, passe_partout_size=4)) print('--------------------') print("Norm of the displacement field:") print(qr.norm(sdisp_0, passe_partout_size=4))
def three_assessments_collector(control): # ----------------------- # # Retrieve data set paths # ----------------------- # if control['svf_dataset'].lower() in {'rotation', 'rotations'}: pfi_svf_list = [ jph(pfo_output_A4_SE2, 'se2-{}-algebra.npy'.format(s + 1)) for s in range(num_samples) ] elif control['svf_dataset'].lower() in {'linear'}: pfi_svf_list = [ jph(pfo_output_A4_GL2, 'gl2-{}-algebra.npy'.format(s + 1)) for s in range(num_samples) ] elif control['svf_dataset'].lower() in {'homography', 'homographies'}: pfi_svf_list = [ jph(pfo_output_A4_HOM, 'hom-{}-algebra.npy'.format(s + 1)) for s in range(12) ] # TODO elif control['svf_dataset'].lower() in {'gauss'}: pfi_svf_list = [ jph(pfo_output_A4_GAU, 'gau-{}-algebra.npy'.format(s + 1)) for s in range(num_samples) ] elif control['svf_dataset'].lower() in {'brainweb'}: pfi_svf_list = [ jph(pfo_output_A4_BW, 'bw-{}-algebra.npy'.format(sj)) for sj in bw_subjects[1:] ] elif control['svf_dataset'].lower() in {'adni'}: pfi_svf_list = [ jph(pfo_output_A4_AD, 'ad-{}-algebra.npy'.format(sj)) for sj in ad_subjects ] else: raise IOError('Svf data set not given'.format(control['svf_dataset'])) for pfi in pfi_svf_list: assert os.path.exists(pfi), pfi # --------------------------------------- # # Select number of steps for each method # --------------------------------------- # if control['computation'] == 'IC': steps = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 30] elif control['computation'] == 'SA': steps = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 30] elif control['computation'] == 'SE': steps = range(1, 30) else: raise IOError('Input control computation {} not defined.'.format( control['computation'])) if control['collect']: print( '---------------------------------------------------------------------------' ) print('Test {} for dataset {} '.format(control['computation'], control['svf_dataset'])) print( '---------------------------------------------------------------------------' ) for pfi_svf in pfi_svf_list: sj_id = os.path.basename(pfi_svf).split('-')[:2] sj_id = sj_id[0] + '-' + sj_id[1] print('Computation for subject {}.'.format(sj_id)) method_names = [k for k in methods.keys() if methods[k][1]] df_steps_measures = pd.DataFrame(columns=method_names, index=steps) svf1 = np.load(pfi_svf) for met in method_names: print(' --> Computing method {}.'.format(met)) exp_method = methods[met][0] for st in steps: print(' ---> step {}'.format(st)) if control['computation'] == 'IC': exp_st_svf1 = exp_method(svf1, input_num_steps=st) exp_st_neg_svf1 = exp_method(-1 * svf1, input_num_steps=st) error = 0.5 * (qr.norm(cp.lagrangian_dot_lagrangian( exp_st_svf1, exp_st_neg_svf1), normalized=True) + qr.norm(cp.lagrangian_dot_lagrangian( exp_st_neg_svf1, exp_st_svf1), normalized=True)) elif control['computation'] == 'SA': a, b, c = 0.3, 0.3, 0.4 exp_st_a_svf1 = exp_method(a * svf1, input_num_steps=st) exp_st_b_svf1 = exp_method(b * svf1, input_num_steps=st) exp_st_c_svf1 = exp_method(c * svf1, input_num_steps=st) error = qr.norm(cp.lagrangian_dot_lagrangian( cp.lagrangian_dot_lagrangian( exp_st_a_svf1, exp_st_b_svf1), exp_st_c_svf1), normalized=True) elif control['computation'] == 'SE': exp_st_svf1 = exp_method(svf1, input_num_steps=st) exp_st_plus_one_svf1 = exp_method(svf1, input_num_steps=st + 1) error = qr.norm(exp_st_svf1 - exp_st_plus_one_svf1, normalized=True) else: raise IOError( 'Input control computation {} not defined.'.format( control['computation'])) df_steps_measures[met][st] = error print(df_steps_measures) fin_output = 'test_{}_{}.csv'.format(control['computation'], sj_id) df_steps_measures.to_csv(jph(pfo_output_A5_3T, fin_output)) print('Test result saved in:') print(fin_output) print('\n') else: # assert pandas data-frame exists. for pfi_svf in pfi_svf_list: sj_id = os.path.basename(pfi_svf).split('-')[:2] sj_id = sj_id[0] + '-' + sj_id[1] fin_output = 'test_{}_{}.csv'.format(control['computation'], sj_id) assert os.path.exists(jph(pfo_output_A5_3T, fin_output)), jph( pfo_output_A5_3T, fin_output) ################## # get statistics # ################## if control['get_statistics']: print( '---------------------------------------------------------------------------' ) print('Get statistics for {}, dataset {}. '.format( control['computation'], control['svf_dataset'])) print( '---------------------------------------------------------------------------' ) # for each method get mean and std indexed by num-steps. # | steps | mu_error | sigma_error | mu_time | sigma_error | # in a file called stats-<computation>-<method>.csv for method_name in [k for k in methods.keys() if methods[k][1]]: print('\n Statistics for method {} \n'.format(method_name)) # for each method stack all the measurements in a single matrix STEPS x SVFs steps_times_subjects = np.nan * np.ones( [len(steps), len(pfi_svf_list)]) for pfi_svf_index, pfi_svf in enumerate(pfi_svf_list): sj_id = os.path.basename(pfi_svf).split('-')[:2] sj_id = sj_id[0] + '-' + sj_id[1] fin_test = 'test_{}_{}.csv'.format(control['computation'], sj_id) df_steps_measures = pd.read_csv(jph(pfo_output_A5_3T, fin_test)) steps_times_subjects[:, pfi_svf_index] = df_steps_measures[ method_name].as_matrix() df_mean_std = pd.DataFrame( columns=['steps', 'mu_error', 'std_error'], index=range(len(steps))) df_mean_std['steps'] = steps df_mean_std['mu_error'] = np.mean(steps_times_subjects, axis=1) df_mean_std['std_error'] = np.std(steps_times_subjects, axis=1) print(df_mean_std) pfi_df_mean_std = jph( pfo_output_A5_3T, 'stats-3T-{}-{}-{}.csv'.format(control['svf_dataset'], control['computation'], method_name)) df_mean_std.to_csv(jph(pfi_df_mean_std)) else: for method_name in [k for k in methods.keys() if methods[k][1]][:1]: pfi_df_mean_std = jph( pfo_output_A5_3T, 'stats-3T-{}-{}-{}.csv'.format(control['svf_dataset'], control['computation'], method_name)) assert os.path.exists(pfi_df_mean_std), pfi_df_mean_std ############### # show graphs # ############### if control['show_graphs']: print( '---------------------------------------------------------------------------' ) print('Showing graphs for {}, dataset {}. '.format( control['computation'], control['svf_dataset'])) print( '---------------------------------------------------------------------------' ) font_top = { 'family': 'serif', 'color': 'darkblue', 'weight': 'normal', 'size': 14 } font_bl = { 'family': 'serif', 'color': 'black', 'weight': 'normal', 'size': 12 } legend_prop = {'size': 11} sns.set_style() fig, ax = plt.subplots(figsize=(11, 6)) fig.canvas.set_window_title('{}_{}.pdf'.format(control['svf_dataset'], control['computation'])) for method_name in [k for k in methods.keys() if methods[k][1]]: pfi_df_mean_std = jph( pfo_output_A5_3T, 'stats-3T-{}-{}-{}.csv'.format(control['svf_dataset'], control['computation'], method_name)) df_mean_std = pd.read_csv(pfi_df_mean_std) if method_name in [ 'gss_ei', 'gss_ei_mod', 'gss_aei', 'gss_rk4', 'euler_aei' ]: method_name_bypass = method_name + ' *' elif method_name in ['scaling_and_squaring']: method_name_bypass = '******' else: method_name_bypass = method_name ax.plot(df_mean_std['steps'].values, df_mean_std['mu_error'].values, label=method_name_bypass, color=methods[method_name][3], linestyle=methods[method_name][4], marker=methods[method_name][5]) plt.errorbar(df_mean_std['steps'].values, df_mean_std['mu_error'].values, df_mean_std['std_error'].values, linestyle='None', marker='None', color=methods[method_name][3], alpha=0.5, elinewidth=0.8) ax.set_title('Experiment {} for {}'.format(control['computation'], control['svf_dataset']), fontdict=font_top) ax.legend(loc='upper right', shadow=True, prop=legend_prop) ax.xaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax.set_axisbelow(True) ax.set_xlabel('Steps', fontdict=font_bl, labelpad=5) ax.set_ylabel('Error (mm)', fontdict=font_bl, labelpad=5) # ax.set_xscale('log', nonposx="mask") # ax.set_yscale('log', nonposy="mask") pfi_figure_time_vs_error = jph( pfo_output_A5_3T, 'three_experiments_{}_{}.pdf'.format(control['computation'], control['svf_dataset'])) plt.savefig(pfi_figure_time_vs_error, dpi=150) plt.show(block=True)