Exemplo n.º 1
0
    def save(self, path):
        savedir = smartutils.create_folder(pjoin(path, type(self).__name__))
        smartutils.save_dict_to_json_file(pjoin(savedir, "hyperparams.json"), self.hyperparameters)

        params = {param.name: param.get_value() for param in self.parameters}
        assert len(self.parameters) == len(params)  # Implies names are all unique.
        np.savez(pjoin(savedir, "params.npz"), **params)
Exemplo n.º 2
0
    def save(self, path):
        savedir = utils.create_folder(pjoin(path, "model"))
        hyperparameters = {'input_size': self.input_size,
                           'output_size': self.output_size}
        utils.save_dict_to_json_file(pjoin(savedir, "meta.json"), {"name": self.__class__.__name__})
        utils.save_dict_to_json_file(pjoin(savedir, "hyperparams.json"), hyperparameters)

        params = {param.name: param.get_value() for param in self.parameters}
        np.savez(pjoin(savedir, "params.npz"), **params)
Exemplo n.º 3
0
def maybe_create_experiment_folder(args,
                                   exclude=[],
                                   retrocompatibility_defaults={}):
    # Extract experiments hyperparameters
    hyperparams = OrderedDict(sorted(vars(args).items()))

    # Remove hyperparams that should not be part of the hash
    for name in exclude:
        if name in hyperparams:
            del hyperparams[name]

    # Get/generate experiment name
    experiment_name = args.name
    if experiment_name is None:
        experiment_name = generate_uid_from_string(repr(hyperparams))

    # Create experiment folder
    experiment_path = pjoin(".", "experiments", experiment_name)
    resuming = False
    if os.path.isdir(experiment_path) and not args.force:
        resuming = True
        print("### Resuming experiment ({0}). ###\n".format(experiment_name))
        # Check if provided hyperparams match those in the experiment folder
        hyperparams_loaded = smartutils.load_dict_from_json_file(
            pjoin(experiment_path, "hyperparams.json"))

        for name in exclude:
            if name in hyperparams_loaded:
                del hyperparams_loaded[name]

        for new_hyperparam, default_value in retrocompatibility_defaults.items(
        ):
            if new_hyperparam in hyperparams and new_hyperparam not in hyperparams_loaded:
                hyperparams_loaded[new_hyperparam] = default_value

        if hyperparams != hyperparams_loaded:
            print("{\n" + "\n".join([
                "{}: {}".format(k, hyperparams[k])
                for k in sorted(hyperparams.keys())
            ]) + "\n}")
            print("{\n" + "\n".join([
                "{}: {}".format(k, hyperparams_loaded[k])
                for k in sorted(hyperparams_loaded.keys())
            ]) + "\n}")
            print(
                "The arguments provided are different than the one saved. Use --force if you are certain.\nQuitting."
            )
            sys.exit(1)
    else:
        if os.path.isdir(experiment_path):
            shutil.rmtree(experiment_path)

        os.makedirs(experiment_path)
        smartutils.save_dict_to_json_file(
            pjoin(experiment_path, "hyperparams.json"), hyperparams)

    return experiment_path, hyperparams, resuming
Exemplo n.º 4
0
    def save(self, path):
        savedir = utils.create_folder(pjoin(path, "model"))
        hyperparameters = {
            'input_size': self.input_size,
            'output_size': self.output_size
        }
        utils.save_dict_to_json_file(pjoin(savedir, "meta.json"),
                                     {"name": self.__class__.__name__})
        utils.save_dict_to_json_file(pjoin(savedir, "hyperparams.json"),
                                     hyperparameters)

        params = {param.name: param.get_value() for param in self.parameters}
        np.savez(pjoin(savedir, "params.npz"), **params)
Exemplo n.º 5
0
def maybe_create_experiment_folder(args, exclude=[], retrocompatibility_defaults={}):
    # Extract experiments hyperparameters
    hyperparams = OrderedDict(sorted(vars(args).items()))

    # Remove hyperparams that should not be part of the hash
    for name in exclude:
        if name in hyperparams:
            del hyperparams[name]

    # Get/generate experiment name
    experiment_name = args.name
    if experiment_name is None:
        experiment_name = generate_uid_from_string(repr(hyperparams))

    # Create experiment folder
    experiment_path = pjoin(".", "experiments", experiment_name)
    resuming = False
    if os.path.isdir(experiment_path) and not args.force:
        resuming = True
        print("### Resuming experiment ({0}). ###\n".format(experiment_name))
        # Check if provided hyperparams match those in the experiment folder
        hyperparams_loaded = smartutils.load_dict_from_json_file(pjoin(experiment_path, "hyperparams.json"))

        for name in exclude:
            if name in hyperparams_loaded:
                del hyperparams_loaded[name]

        for new_hyperparams, default_value in retrocompatibility_defaults.items():
            if new_hyperparams not in hyperparams_loaded:
                hyperparams_loaded[new_hyperparams] = default_value

        if hyperparams != hyperparams_loaded:
            print("{\n" + "\n".join(["{}: {}".format(k, hyperparams[k]) for k in sorted(hyperparams.keys())]) + "\n}")
            print("{\n" + "\n".join(["{}: {}".format(k, hyperparams_loaded[k]) for k in sorted(hyperparams_loaded.keys())]) + "\n}")
            print("The arguments provided are different than the one saved. Use --force if you are certain.\nQuitting.")
            sys.exit(1)
    else:
        if os.path.isdir(experiment_path):
            shutil.rmtree(experiment_path)

        os.makedirs(experiment_path)
        smartutils.save_dict_to_json_file(pjoin(experiment_path, "hyperparams.json"), hyperparams)

    return experiment_path, hyperparams, resuming
Exemplo n.º 6
0
    def save(self, path):
        savedir = smartutils.create_folder(pjoin(path, type(self).__name__))
        smartutils.save_dict_to_json_file(pjoin(savedir, "hyperparams.json"),
                                          self.hyperparameters)

        params = {param.name: param.get_value() for param in self.parameters}
        assert len(self.parameters) == len(
            params)  # Implies names are all unique.
        np.savez(pjoin(savedir, "params.npz"), **params)

        state = {
            "version":
            1,
            "_srng_rstate":
            self.srng.rstate,
            "_srng_state_updates": [
                state_update[0].get_value()
                for state_update in self.srng.state_updates
            ]
        }
        np.savez(pjoin(savedir, "state.npz"), **state)
Exemplo n.º 7
0
def main():
    parser = build_argparser()
    args = parser.parse_args()

    # Get experiment folder
    experiment_path = args.name
    if not os.path.isdir(experiment_path):
        # If not a directory, it must be the name of the experiment.
        experiment_path = pjoin(".", "experiments", args.name)

    if not os.path.isdir(experiment_path):
        parser.error('Cannot find experiment: {0}!'.format(args.name))

    if not os.path.isdir(pjoin(experiment_path, "evaluation")):
        parser.error('Cannot find evaluations for experiment: {0}!'.format(
            experiment_path))

    results_file = pjoin(experiment_path, "results.json")

    if not os.path.isfile(results_file) or args.force:
        with Timer("Merging NLL evaluations"):
            results = merge_NLL_evaluations(
                evaluation_dir=pjoin(experiment_path, "evaluation"))
            smartutils.save_dict_to_json_file(results_file, {"NLL": results})

    else:
        print("Loading saved losses... (use --force to re-run evaluation)")
        results = smartutils.load_dict_from_json_file(results_file)["NLL"]

    nb_orderings = results['nb_orderings']
    for dataset in ['validset', 'testset']:
        print("NLL estimate on {} ({} orderings): {:.2f} ± {:.2f}".format(
            dataset, nb_orderings, results[dataset]['mean'],
            results[dataset]['stderror']))

        if results[dataset]['incomplete']:
            print(
                "** Warning **: {} evaluation is incomplete. Missing some orderings or batches."
                .format(dataset))
Exemplo n.º 8
0
 def save(self, path):
     state = {"version": 1,
              "var": self.var.get_value()}
     smartutils.save_dict_to_json_file(pjoin(path, type(self).__name__ + ".json"), state)
Exemplo n.º 9
0
def main():
    parser = build_parser()
    args = parser.parse_args()
    print(args)

    # Get experiment folder
    experiment_path = args.name
    if not os.path.isdir(experiment_path):
        # If not a directory, it must be the name of the experiment.
        experiment_path = pjoin(".", "experiments", args.name)

    if not os.path.isdir(experiment_path):
        parser.error('Cannot find experiment: {0}!'.format(args.name))

    # Load experiments hyperparameters
    try:
        hyperparams = smartutils.load_dict_from_json_file(pjoin(experiment_path, "hyperparams.json"))
    except FileNotFoundError:
        hyperparams = smartutils.load_dict_from_json_file(pjoin(experiment_path, "..", "hyperparams.json"))

    with Timer("Loading dataset", newline=True):
        volume_manager = VolumeManager()
        dataset = datasets.load_tractography_dataset(args.subjects, volume_manager, name="dataset", use_sh_coeffs=hyperparams['use_sh_coeffs'])
        print("Dataset size:", len(dataset))

    with Timer("Loading model"):
        model = None
        if hyperparams['model'] == 'gru_regression':
            from learn2track.models import GRU_Regression
            model = GRU_Regression.create(experiment_path, volume_manager=volume_manager)
        elif hyperparams['model'] == 'gru_mixture':
            from learn2track.models import GRU_Mixture
            model = GRU_Mixture.create(experiment_path, volume_manager=volume_manager)
        elif hyperparams['model'] == 'gru_multistep':
            from learn2track.models import GRU_Multistep_Gaussian
            model = GRU_Multistep_Gaussian.create(experiment_path, volume_manager=volume_manager)
            model.k = 1
            model.m = 1
        elif hyperparams['model'] == 'ffnn_regression':
            from learn2track.models import FFNN_Regression
            model = FFNN_Regression.create(experiment_path, volume_manager=volume_manager)
        else:
            raise NameError("Unknown model: {}".format(hyperparams['model']))

    with Timer("Building evaluation function"):
        # Override K for gru_multistep
        if 'k' in hyperparams:
            hyperparams['k'] = 1

        batch_scheduler = batch_scheduler_factory(hyperparams, dataset, train_mode=False, batch_size_override=args.batch_size)
        loss = loss_factory(hyperparams, model, dataset, loss_type=args.loss_type)
        l2_error = views.LossView(loss=loss, batch_scheduler=batch_scheduler)

    with Timer("Evaluating...", newline=True):
        results_file = pjoin(experiment_path, "results.json")
        results = {}
        if os.path.isfile(results_file) and not args.force:
            print("Loading saved results... (use --force to re-run evaluation)")
            results = smartutils.load_dict_from_json_file(results_file)

        tag = ""
        if args.loss_type == 'expected_value' or hyperparams['model'] == 'gru_regression':
            tag = "_EV_L2_error"
        elif args.loss_type == 'maximum_component':
            tag = "_MC_L2_error"
        elif hyperparams['model'] == 'gru_mixture' or hyperparams['model'] == 'gru_multistep':
            tag = "_NLL"

        entry = args.dataset_name + tag

        if entry not in results or args.force:
            with Timer("Evaluating {}".format(entry)):
                dummy_status = Status()  # Forces recomputing results
                results[entry] = {'mean': float(l2_error.mean.view(dummy_status)), 'stderror': float(l2_error.stderror.view(dummy_status))}
                smartutils.save_dict_to_json_file(results_file, results)  # Update results file.

        print("{}: {:.4f} ± {:.4f}".format(entry, results[entry]['mean'], results[entry]['stderror']))
Exemplo n.º 10
0
def main():
    parser = build_parser()
    args = parser.parse_args()
    print(args)

    # Get experiment folder
    experiment_path = args.name
    if not os.path.isdir(experiment_path):
        # If not a directory, it must be the name of the experiment.
        experiment_path = pjoin(".", "experiments", args.name)

    if not os.path.isdir(experiment_path):
        parser.error('Cannot find experiment: {0}!'.format(args.name))

    # Load experiments hyperparameters
    try:
        hyperparams = smartutils.load_dict_from_json_file(
            pjoin(experiment_path, "hyperparams.json"))
    except FileNotFoundError:
        hyperparams = smartutils.load_dict_from_json_file(
            pjoin(experiment_path, "..", "hyperparams.json"))

    # Use this for hyperparams added in a new version, but nonexistent from older versions
    retrocompatibility_defaults = {
        'feed_previous_direction': False,
        'predict_offset': False,
        'normalize': False,
        'keep_step_size': False,
        'sort_streamlines': False,
        'use_layer_normalization': False,
        'drop_prob': 0.,
        'use_zoneout': False
    }
    for new_hyperparams, default_value in retrocompatibility_defaults.items():
        if new_hyperparams not in hyperparams:
            hyperparams[new_hyperparams] = default_value

    with Timer("Loading dataset", newline=True):
        volume_manager = VolumeManager()
        dataset = datasets.load_tractography_dataset(
            args.subjects,
            volume_manager,
            name="dataset",
            use_sh_coeffs=hyperparams['use_sh_coeffs'])
        print("Dataset size:", len(dataset))

    with Timer("Loading model"):
        model = None
        if hyperparams['model'] == 'gru_regression':
            from learn2track.models import GRU_Regression
            model = GRU_Regression.create(experiment_path,
                                          volume_manager=volume_manager)
        elif hyperparams['model'] == 'gru_gaussian':
            from learn2track.models import GRU_Gaussian
            model = GRU_Gaussian.create(experiment_path,
                                        volume_manager=volume_manager)
        elif hyperparams['model'] == 'gru_mixture':
            from learn2track.models import GRU_Mixture
            model = GRU_Mixture.create(experiment_path,
                                       volume_manager=volume_manager)
        elif hyperparams['model'] == 'gru_multistep':
            from learn2track.models import GRU_Multistep_Gaussian
            model = GRU_Multistep_Gaussian.create(
                experiment_path, volume_manager=volume_manager)
            model.k = 1
            model.m = 1
        elif hyperparams['model'] == 'ffnn_regression':
            from learn2track.models import FFNN_Regression
            model = FFNN_Regression.create(experiment_path,
                                           volume_manager=volume_manager)
        else:
            raise NameError("Unknown model: {}".format(hyperparams['model']))
        model.drop_prob = 0.  # Make sure dropout/zoneout is not used when testing

    with Timer("Building evaluation function"):
        # Override K for gru_multistep
        if 'k' in hyperparams:
            hyperparams['k'] = 1

        batch_scheduler = batch_scheduler_factory(
            hyperparams,
            dataset,
            train_mode=False,
            batch_size_override=args.batch_size)
        loss = loss_factory(hyperparams,
                            model,
                            dataset,
                            loss_type=args.loss_type)
        l2_error = views.LossView(loss=loss, batch_scheduler=batch_scheduler)

    with Timer("Evaluating...", newline=True):
        results_file = pjoin(experiment_path, "results.json")
        results = {}
        if os.path.isfile(results_file) and not args.force:
            print(
                "Loading saved results... (use --force to re-run evaluation)")
            results = smartutils.load_dict_from_json_file(results_file)

        tag = ""
        if args.loss_type == 'expected_value' or hyperparams[
                'model'] == 'gru_regression':
            tag = "_EV_L2_error"
        elif args.loss_type == 'maximum_component':
            tag = "_MC_L2_error"
        elif hyperparams['model'] in [
                'gru_gaussian', 'gru_mixture', 'gru_multistep'
        ]:
            tag = "_NLL"

        entry = args.dataset_name + tag

        if entry not in results or args.force:
            with Timer("Evaluating {}".format(entry)):
                dummy_status = Status()  # Forces recomputing results
                results[entry] = {
                    'mean': float(l2_error.mean.view(dummy_status)),
                    'stderror': float(l2_error.stderror.view(dummy_status))
                }
                smartutils.save_dict_to_json_file(
                    results_file, results)  # Update results file.

        print("{}: {:.4f} ± {:.4f}".format(entry, results[entry]['mean'],
                                           results[entry]['stderror']))