def main(): parser = build_args_parser() args = parser.parse_args() print(args) # Get experiment folder experiment_path = args.name if not os.path.isdir(experiment_path): # If not a directory, it must be the name of the experiment. experiment_path = pjoin(".", "experiments", args.name) if not os.path.isdir(experiment_path): parser.error('Cannot find experiment: {0}!'.format(args.name)) # Load experiments hyperparameters try: hyperparams = smartutils.load_dict_from_json_file(pjoin(experiment_path, "hyperparams.json")) except FileNotFoundError: hyperparams = smartutils.load_dict_from_json_file(pjoin(experiment_path, "..", "hyperparams.json")) with Timer("Loading dataset", newline=True): volume_manager = VolumeManager() dataset = datasets.load_tractography_dataset([args.streamlines], volume_manager, name="dataset", use_sh_coeffs=hyperparams['use_sh_coeffs']) print("Dataset size:", len(dataset)) with Timer("Loading model"): model = None if hyperparams['model'] == 'gru_regression': from learn2track.models import GRU_Regression model = GRU_Regression.create(experiment_path, volume_manager=volume_manager) elif hyperparams['model'] == 'gru_mixture': from learn2track.models import GRU_Mixture model = GRU_Mixture.create(experiment_path, volume_manager=volume_manager) elif hyperparams['model'] == 'gru_multistep': from learn2track.models import GRU_Multistep_Gaussian model = GRU_Multistep_Gaussian.create(experiment_path, volume_manager=volume_manager) model.k = 1 model.m = 1 elif hyperparams['model'] == 'ffnn_regression': from learn2track.models import FFNN_Regression model = FFNN_Regression.create(experiment_path, volume_manager=volume_manager) else: raise NameError("Unknown model: {}".format(hyperparams['model'])) print(str(model)) tractogram_file = pjoin(experiment_path, args.out) if not os.path.isfile(tractogram_file) or args.force: if args.method == 'prediction': tractogram = prediction_tractogram(hyperparams, model, dataset, args.batch_size, args.prediction) elif args.method == 'evaluation': tractogram = evaluation_tractogram(hyperparams, model, dataset, args.batch_size, args.metric) else: raise ValueError("Unrecognized method: {}".format(args.method)) tractogram.affine_to_rasmm = dataset.subjects[0].signal.affine nib.streamlines.save(tractogram, tractogram_file) else: print("Tractogram already exists. (use --force to generate it again)")
def main(): parser = build_args_parser() args = parser.parse_args() print(args) # Get experiment folder experiment_path = args.name if not os.path.isdir(experiment_path): # If not a directory, it must be the name of the experiment. experiment_path = pjoin(".", "experiments", args.name) if not os.path.isdir(experiment_path): parser.error('Cannot find experiment: {0}!'.format(args.name)) # Load experiments hyperparameters try: hyperparams = smartutils.load_dict_from_json_file( pjoin(experiment_path, "hyperparams.json")) except FileNotFoundError: hyperparams = smartutils.load_dict_from_json_file( pjoin(experiment_path, "..", "hyperparams.json")) with Timer("Loading dataset", newline=True): volume_manager = VolumeManager() dataset = datasets.load_tractography_dataset( [args.streamlines], volume_manager, name="dataset", use_sh_coeffs=hyperparams['use_sh_coeffs']) print("Dataset size:", len(dataset)) with Timer("Loading model"): model = None if hyperparams['model'] == 'gru_regression': from learn2track.models import GRU_Regression model = GRU_Regression.create(experiment_path, volume_manager=volume_manager) elif hyperparams['model'] == 'gru_mixture': from learn2track.models import GRU_Mixture model = GRU_Mixture.create(experiment_path, volume_manager=volume_manager) elif hyperparams['model'] == 'gru_multistep': from learn2track.models import GRU_Multistep_Gaussian model = GRU_Multistep_Gaussian.create( experiment_path, volume_manager=volume_manager) model.k = 1 model.m = 1 elif hyperparams['model'] == 'ffnn_regression': from learn2track.models import FFNN_Regression model = FFNN_Regression.create(experiment_path, volume_manager=volume_manager) else: raise NameError("Unknown model: {}".format(hyperparams['model'])) print(str(model)) tractogram_file = pjoin(experiment_path, args.out) if not os.path.isfile(tractogram_file) or args.force: if args.method == 'prediction': tractogram = prediction_tractogram(hyperparams, model, dataset, args.batch_size, args.prediction) elif args.method == 'evaluation': tractogram = evaluation_tractogram(hyperparams, model, dataset, args.batch_size, args.metric) else: raise ValueError("Unrecognized method: {}".format(args.method)) tractogram.affine_to_rasmm = dataset.subjects[0].signal.affine nib.streamlines.save(tractogram, tractogram_file) else: print("Tractogram already exists. (use --force to generate it again)")
def main(): parser = build_parser() args = parser.parse_args() print(args) # Get experiment folder experiment_path = args.name if not os.path.isdir(experiment_path): # If not a directory, it must be the name of the experiment. experiment_path = pjoin(".", "experiments", args.name) if not os.path.isdir(experiment_path): parser.error('Cannot find experiment: {0}!'.format(args.name)) # Load experiments hyperparameters try: hyperparams = smartutils.load_dict_from_json_file(pjoin(experiment_path, "hyperparams.json")) except FileNotFoundError: hyperparams = smartutils.load_dict_from_json_file(pjoin(experiment_path, "..", "hyperparams.json")) with Timer("Loading dataset", newline=True): volume_manager = VolumeManager() dataset = datasets.load_tractography_dataset(args.subjects, volume_manager, name="dataset", use_sh_coeffs=hyperparams['use_sh_coeffs']) print("Dataset size:", len(dataset)) with Timer("Loading model"): model = None if hyperparams['model'] == 'gru_regression': from learn2track.models import GRU_Regression model = GRU_Regression.create(experiment_path, volume_manager=volume_manager) elif hyperparams['model'] == 'gru_mixture': from learn2track.models import GRU_Mixture model = GRU_Mixture.create(experiment_path, volume_manager=volume_manager) elif hyperparams['model'] == 'gru_multistep': from learn2track.models import GRU_Multistep_Gaussian model = GRU_Multistep_Gaussian.create(experiment_path, volume_manager=volume_manager) model.k = 1 model.m = 1 elif hyperparams['model'] == 'ffnn_regression': from learn2track.models import FFNN_Regression model = FFNN_Regression.create(experiment_path, volume_manager=volume_manager) else: raise NameError("Unknown model: {}".format(hyperparams['model'])) with Timer("Building evaluation function"): # Override K for gru_multistep if 'k' in hyperparams: hyperparams['k'] = 1 batch_scheduler = batch_scheduler_factory(hyperparams, dataset, train_mode=False, batch_size_override=args.batch_size) loss = loss_factory(hyperparams, model, dataset, loss_type=args.loss_type) l2_error = views.LossView(loss=loss, batch_scheduler=batch_scheduler) with Timer("Evaluating...", newline=True): results_file = pjoin(experiment_path, "results.json") results = {} if os.path.isfile(results_file) and not args.force: print("Loading saved results... (use --force to re-run evaluation)") results = smartutils.load_dict_from_json_file(results_file) tag = "" if args.loss_type == 'expected_value' or hyperparams['model'] == 'gru_regression': tag = "_EV_L2_error" elif args.loss_type == 'maximum_component': tag = "_MC_L2_error" elif hyperparams['model'] == 'gru_mixture' or hyperparams['model'] == 'gru_multistep': tag = "_NLL" entry = args.dataset_name + tag if entry not in results or args.force: with Timer("Evaluating {}".format(entry)): dummy_status = Status() # Forces recomputing results results[entry] = {'mean': float(l2_error.mean.view(dummy_status)), 'stderror': float(l2_error.stderror.view(dummy_status))} smartutils.save_dict_to_json_file(results_file, results) # Update results file. print("{}: {:.4f} ± {:.4f}".format(entry, results[entry]['mean'], results[entry]['stderror']))
def main(): parser = build_parser() args = parser.parse_args() print(args) # Get experiment folder experiment_path = args.name if not os.path.isdir(experiment_path): # If not a directory, it must be the name of the experiment. experiment_path = pjoin(".", "experiments", args.name) if not os.path.isdir(experiment_path): parser.error('Cannot find experiment: {0}!'.format(args.name)) # Load experiments hyperparameters try: hyperparams = smartutils.load_dict_from_json_file( pjoin(experiment_path, "hyperparams.json")) except FileNotFoundError: hyperparams = smartutils.load_dict_from_json_file( pjoin(experiment_path, "..", "hyperparams.json")) # Use this for hyperparams added in a new version, but nonexistent from older versions retrocompatibility_defaults = { 'feed_previous_direction': False, 'predict_offset': False, 'normalize': False, 'keep_step_size': False, 'sort_streamlines': False, 'use_layer_normalization': False, 'drop_prob': 0., 'use_zoneout': False } for new_hyperparams, default_value in retrocompatibility_defaults.items(): if new_hyperparams not in hyperparams: hyperparams[new_hyperparams] = default_value with Timer("Loading dataset", newline=True): volume_manager = VolumeManager() dataset = datasets.load_tractography_dataset( args.subjects, volume_manager, name="dataset", use_sh_coeffs=hyperparams['use_sh_coeffs']) print("Dataset size:", len(dataset)) with Timer("Loading model"): model = None if hyperparams['model'] == 'gru_regression': from learn2track.models import GRU_Regression model = GRU_Regression.create(experiment_path, volume_manager=volume_manager) elif hyperparams['model'] == 'gru_gaussian': from learn2track.models import GRU_Gaussian model = GRU_Gaussian.create(experiment_path, volume_manager=volume_manager) elif hyperparams['model'] == 'gru_mixture': from learn2track.models import GRU_Mixture model = GRU_Mixture.create(experiment_path, volume_manager=volume_manager) elif hyperparams['model'] == 'gru_multistep': from learn2track.models import GRU_Multistep_Gaussian model = GRU_Multistep_Gaussian.create( experiment_path, volume_manager=volume_manager) model.k = 1 model.m = 1 elif hyperparams['model'] == 'ffnn_regression': from learn2track.models import FFNN_Regression model = FFNN_Regression.create(experiment_path, volume_manager=volume_manager) else: raise NameError("Unknown model: {}".format(hyperparams['model'])) model.drop_prob = 0. # Make sure dropout/zoneout is not used when testing with Timer("Building evaluation function"): # Override K for gru_multistep if 'k' in hyperparams: hyperparams['k'] = 1 batch_scheduler = batch_scheduler_factory( hyperparams, dataset, train_mode=False, batch_size_override=args.batch_size) loss = loss_factory(hyperparams, model, dataset, loss_type=args.loss_type) l2_error = views.LossView(loss=loss, batch_scheduler=batch_scheduler) with Timer("Evaluating...", newline=True): results_file = pjoin(experiment_path, "results.json") results = {} if os.path.isfile(results_file) and not args.force: print( "Loading saved results... (use --force to re-run evaluation)") results = smartutils.load_dict_from_json_file(results_file) tag = "" if args.loss_type == 'expected_value' or hyperparams[ 'model'] == 'gru_regression': tag = "_EV_L2_error" elif args.loss_type == 'maximum_component': tag = "_MC_L2_error" elif hyperparams['model'] in [ 'gru_gaussian', 'gru_mixture', 'gru_multistep' ]: tag = "_NLL" entry = args.dataset_name + tag if entry not in results or args.force: with Timer("Evaluating {}".format(entry)): dummy_status = Status() # Forces recomputing results results[entry] = { 'mean': float(l2_error.mean.view(dummy_status)), 'stderror': float(l2_error.stderror.view(dummy_status)) } smartutils.save_dict_to_json_file( results_file, results) # Update results file. print("{}: {:.4f} ± {:.4f}".format(entry, results[entry]['mean'], results[entry]['stderror']))
def model_factory(hyperparams, input_size, output_size, volume_manager): if hyperparams['model'] == 'gru_regression' and hyperparams[ 'learn_to_stop']: raise NotImplementedError() # from learn2track.models import GRU_RegressionAndBinaryClassification # return GRU_RegressionAndBinaryClassification(batch_scheduler.input_size, # hyperparams['hidden_sizes'], # batch_scheduler.target_size) elif hyperparams['model'] == 'gru_regression': from learn2track.models import GRU_Regression return GRU_Regression( volume_manager=volume_manager, input_size=input_size, hidden_sizes=hyperparams['hidden_sizes'], output_size=output_size, activation=hyperparams['activation'], use_previous_direction=hyperparams['feed_previous_direction'], predict_offset=hyperparams['predict_offset'], use_layer_normalization=hyperparams['use_layer_normalization'], drop_prob=hyperparams['drop_prob'], use_zoneout=hyperparams['use_zoneout'], use_skip_connections=hyperparams['skip_connections'], seed=hyperparams['seed']) elif hyperparams['model'] == 'gru_multistep': from learn2track.models import GRU_Multistep_Gaussian return GRU_Multistep_Gaussian( volume_manager=volume_manager, input_size=input_size, hidden_sizes=hyperparams['hidden_sizes'], target_dims=output_size, k=hyperparams['k'], m=hyperparams['m'], seed=hyperparams['seed'], use_previous_direction=hyperparams['feed_previous_direction'], use_layer_normalization=hyperparams['use_layer_normalization'], drop_prob=hyperparams['drop_prob'], use_zoneout=hyperparams['use_zoneout']) elif hyperparams['model'] == 'gru_mixture': from learn2track.models import GRU_Mixture return GRU_Mixture( volume_manager=volume_manager, input_size=input_size, hidden_sizes=hyperparams['hidden_sizes'], output_size=output_size, n_gaussians=hyperparams['n_gaussians'], activation=hyperparams['activation'], use_previous_direction=hyperparams['feed_previous_direction'], use_layer_normalization=hyperparams['use_layer_normalization'], drop_prob=hyperparams['drop_prob'], use_zoneout=hyperparams['use_zoneout'], use_skip_connections=hyperparams['skip_connections'], seed=hyperparams['seed']) elif hyperparams['model'] == 'gru_gaussian': from learn2track.models import GRU_Gaussian return GRU_Gaussian( volume_manager=volume_manager, input_size=input_size, hidden_sizes=hyperparams['hidden_sizes'], output_size=output_size, use_previous_direction=hyperparams['feed_previous_direction'], use_layer_normalization=hyperparams['use_layer_normalization'], drop_prob=hyperparams['drop_prob'], use_zoneout=hyperparams['use_zoneout'], use_skip_connections=hyperparams['skip_connections'], seed=hyperparams['seed']) elif hyperparams['model'] == 'ffnn_regression': from learn2track.models import FFNN_Regression return FFNN_Regression( volume_manager=volume_manager, input_size=input_size, hidden_sizes=hyperparams['hidden_sizes'], output_size=output_size, activation=hyperparams['activation'], use_previous_direction=hyperparams['feed_previous_direction'], predict_offset=hyperparams['predict_offset'], use_layer_normalization=hyperparams['use_layer_normalization'], dropout_prob=hyperparams['dropout_prob'], use_skip_connections=hyperparams['skip_connections'], seed=hyperparams['seed']) else: raise ValueError("Unknown model!")