def main(): # :: handle program parameters arg_parser = handle_arg() args = arg_parser.parse_args() # fix verbosity in case of 'quiet' if args.quiet: args.verbose = VERB_LVL['none'] # :: print debug info if args.verbose >= VERB_LVL['debug']: arg_parser.print_help() msg('\nARGS: ' + str(vars(args)), args.verbose, VERB_LVL['debug']) if not args.indirect: numex.interactive_tk_mpl.plotting(plot_rho_b1t_mp2rage_seq, SEQ_INTERACTIVES, resources_path=PATH['resources'], title=TITLE, about=__doc__) else: numex.interactive_tk_mpl.plotting(plot_rho_b1t_mp2rage_acq, ACQ_INTERACTIVES, resources_path=PATH['resources'], title=TITLE, about=__doc__) elapsed(__file__[len(PATH['base']) + 1:]) msg(report())
def main(): # :: handle program parameters arg_parser = handle_arg() args = arg_parser.parse_args() # fix verbosity in case of 'quiet' if args.quiet: args.verbose = VERB_LVL['none'] # :: print debug info if args.verbose >= VERB_LVL['debug']: arg_parser.print_help() msg('\nARGS: ' + str(vars(args)), args.verbose, VERB_LVL['debug']) if not args.name: msg('Choose your playground:') msg('Available playgrounds: {AVAILABLES}'.format_map(globals())) args.name = input(': ') if args.name in AVAILABLES: numex.interactive_tk_mpl.plotting(globals()[PREFIX + args.name], PARAMS[args.name], title=TITLE_BASE + ' - ' + TITLE[args.name], about=__doc__) elapsed(__file__[len(PATH['base']) + 1:]) msg(report()) else: msg(fmtm('Plot `{args.name}` not valid.'))
def _to_be_checked(): msg(__doc__.strip()) test() # todo: move this test to unittest s = '/scr/beryllium1/mr16/RM/lcmLONGc_128avg' \ '/LONGc_128avg160419_RL6T_MET20_Step01_WAT.txt' d = read_output(s) for k, v in sorted(d['metabolites'].items()): print('{} : {}'.format(k, v)) print() for k, v in sorted(d['extra'].items()): print('{} : {}'.format(k, v)) print() if 'data_cs' in d: import matplotlib.pyplot as plt plt.figure() plt.plot(d['data_cs'], d['data_s'], '-b') plt.plot(d['data_cs'], d['data_fit'], '-r') plt.plot(d['data_cs'], d['data_bg'], '-y') plt.show() s = '/scr/beryllium1/mr16/RM/lcmLONGc_112avg' \ '/LONGc_112avg160419_RL6T_MET20_Step01' c = read_input(s) print(c['data'].shape, c) elapsed('test lcmodel i/o') msg(report()) print()
def main(): # :: handle program parameters arg_parser = handle_arg() args = arg_parser.parse_args() # fix verbosity in case of 'quiet' if args.quiet: args.verbose = VERB_LVL['none'] # :: print debug info if args.verbose >= VERB_LVL['debug']: arg_parser.print_help() msg('\nARGS: ' + str(vars(args)), args.verbose, VERB_LVL['debug']) msg(__doc__.strip()) extract_nifti(args.dir, args.extradir, args.force, args.verbose) elapsed('extract_nifit_bruker') msg(report())
def main(): # :: handle program parameters arg_parser = handle_arg() args = arg_parser.parse_args() # fix verbosity in case of 'quiet' if args.quiet: args.verbose = VERB_LVL['none'] # :: print debug info if args.verbose >= VERB_LVL['debug']: arg_parser.print_help() msg('\nARGS: ' + str(vars(args)), args.verbose, VERB_LVL['debug']) msg(__doc__.strip()) kws = vars(args) kws.pop('quiet') pml.check_correlation(**kws) elapsed(__file__[len(PATH['base']) + 1:]) msg(report())
def main(): # :: handle program parameters arg_parser = handle_arg() args = arg_parser.parse_args() # fix verbosity in case of 'quiet' if args.quiet: args.verbose = VERB_LVL['none'] # :: print debug info if args.verbose >= VERB_LVL['debug']: arg_parser.print_help() msg('\nARGS: ' + str(vars(args)), args.verbose, VERB_LVL['debug']) x_vars = set([x.lower() for x in args.mode]) filtered_interactives = INTERACTIVES.copy() for k in list(filtered_interactives.keys()): if k[:2] not in x_vars: filtered_interactives.pop(k) if x_vars == {'t1', 'tr'}: numex.interactive_tk_mpl.plotting( plot_flash_ernst_angle_t1_tr, filtered_interactives, resources_path=PATH['resources'], title=TITLE, about=__doc__) elif x_vars == {'fa', 't1'}: numex.interactive_tk_mpl.plotting( plot_flash_ernst_angle_fa_t1, filtered_interactives, resources_path=PATH['resources'], title=TITLE, about=__doc__) elif x_vars == {'fa', 't1'}: numex.interactive_tk_mpl.plotting( filtered_interactives, resources_path=PATH['resources'], title=TITLE, about=__doc__) elapsed(__file__[len(PATH['base']) + 1:]) msg(report())
noise_arr = np.random.normal(0, noise_std_val, raw_arr.shape) noised_reco_arr = reco_func(raw_arr + noise_arr, mask, *reco_args, **reco_kws) # new uncorrelated noise noise_arr = np.random.normal(0, noise_std_val, reco_arr.shape) noised_optim_arr = reco_func(noise_arr, None, *reco_args, **reco_kws) mean_noised_reco_arr, sosd_noised_reco_arr, _ = \ fc.next_mean_and_sosd( np.real(noised_reco_arr), mean_noised_reco_arr, sosd_noised_reco_arr, i) mean_noised_optim_arr, sosd_noised_optim_arr, _ = \ fc.next_mean_and_sosd( np.real(noised_optim_arr), mean_noised_optim_arr, sosd_noised_optim_arr, i) noise_reco_arr = fc.sosd2stdev(sosd_noised_reco_arr, num) noise_optim_arr = fc.sosd2stdev(sosd_noised_optim_arr, num) snr_arr = np.abs(reco_arr) / noise_reco_arr g_factor_arr = g_factor(noise_optim_arr, noise_reco_arr, sampling_ratio) return snr_arr, g_factor_arr # ====================================================================== elapsed(__file__[len(PATH['base']) + 1:]) # ====================================================================== if __name__ == '__main__': run_doctests(__doc__)
# check_dynamics_operator() # fc.elapsed'check_dynamics_operator') # check_mt_sequence() # fc.elapsed'check_mt_sequence') # check_approx_propagator() # fc.elapsed'check_approx_propagator') # check_z_spectrum( # SpinModel(100.0, (0.5, 0.3, 0.1, 0.1), (GAMMA['1H'] * B0,) * 4, # (0.25, 0.8, 0.001, 1.0), (20.0, 60.0, 8e4, 5e4), # (1.0, 0.3, 0.0, 1.0, 0.5, 1.0), # (None, None, 'superlorenz_approx', 'superlorenz_approx'))) x1 = check_z_spectrum() elapsed('check_z_spectrum') x2 = check_z_spectrum2() elapsed('check_z_spectrum2') x3 = check_z_spectrum_sparse() elapsed('check_z_spectrum_sparse') # print(x2[0].ravel() / x1[0].ravel()) # print(x3[0].ravel() / x1[0].ravel()) # check_fit_spin_model() # fc.elapsed('check_fit_spin_model') msg(report()) # profile.run('check_z_spectrum()', sort=1) plt.show()
def _test(use_cache=True): # x = np.linspace(1, 40, 5) x = np.array([2, 5, 7, 20, 40]) tau_arr = np.linspace(2, 1000, 4000) a_arr = np.linspace(500, 4000, 4000) import pymrt.util import os base_dir = fc.realpath('~/hd1/TEMP') filepath = os.path.join(base_dir, 'tau_arr.npz') if os.path.isfile(filepath) and use_cache: y = np.load(filepath)['y'] else: y = np.zeros((len(tau_arr), len(a_arr), len(x))) for i, a in enumerate(a_arr): for j, tau in enumerate(tau_arr): y[j, i] = func_exp_decay(x, tau, a) np.savez(filepath, y=y) def eval_dist(a, b, axis=-1): mu = np.nanmean(a, axis) - b std = np.nanstd(a, axis) return np.mean(mu), np.mean(std) elapsed('gen_tau_phantom') snr = 20 p = 1 / snr n = np.max(a_arr) * p * (np.random.random(y.shape) - 0.5) m = [True, True, False, False, False] # print(fit_exp_loglin(y + n, x)['tau']) # print(fit_exp_loglin(y + n, x, weighted=False)['tau']) # print(fit_exp_tau_quadr(y + n, x)) print('quad', eval_dist(fit_exp_quad(y + n, x, m)['tau'], tau_arr)) elapsed('quad') print('diff', eval_dist(fit_exp_diff(y + n, x, m)['tau'], tau_arr)) elapsed('diff') print('quadr', eval_dist(fit_exp_quadr(y + n, x, m)['tau'], tau_arr)) elapsed('quadr') print('quadr_w2', eval_dist(fit_exp_quadr(y + n, x, m, window_size=2)['tau'], tau_arr)) elapsed('quadr_w2') print('quadr_w3', eval_dist(fit_exp_quadr(y + n, x, m, window_size=3)['tau'], tau_arr)) elapsed('quadr_w3') print('arlo', eval_dist(fit_exp_arlo(y + n, x, m)['tau'], tau_arr)) elapsed('arlo') print('loglin', eval_dist(fit_exp_loglin(y + n, x, m)['tau'], tau_arr)) elapsed('loglin') print( 'loglin_w', eval_dist( fit_exp_loglin(y + n, x, m, variant='weighted_reverse')['tau'], tau_arr)) elapsed('loglin_w') # print('leasq', # eval_dist(fit_exp_curve_fit(y + n, x, init=[5, 4000])['tau'], # tau_arr)) # elapsed('leasq') msg(report())