def run(self):
        measures = normalized_measures  #+ [tm.ANOVAInvariance(),tm.GoodfellowNormalInvariance()]

        # model_names=["SimpleConv","VGGLike","AllConvolutional"]
        # model_names=["ResNet"]

        model_names = simple_models_generators
        transformations = common_transformations

        combinations = itertools.product(model_names, dataset_names,
                                         transformations, measures)
        for (model_config_generator, dataset, transformation,
             measure) in combinations:
            # train
            model_config = model_config_generator.for_dataset(dataset)
            # train
            epochs = config.get_epochs(model_config, dataset, transformation)
            p_training = training.Parameters(model_config, dataset,
                                             transformation, epochs)
            self.experiment_training(p_training)
            # generate variance params
            p = config.dataset_size_for_measure(measure)
            # make 1/number of classes
            p_dataset = measure_package.DatasetParameters(
                dataset, datasets.DatasetSubset.train, 0.1)
            p_variance = measure_package.Parameters(p_training.id(), p_dataset,
                                                    transformation, measure)

            p_dataset_variance = measure_package.DatasetParameters(
                dataset, datasets.DatasetSubset.train, 1.0)
            p_variance_stratified = measure_package.Parameters(
                p_training.id(),
                p_dataset_variance,
                transformation,
                measure,
                stratified=True)

            # evaluate variance
            model_path = self.model_path(p_training)
            self.experiment_measure(p_variance)
            self.experiment_measure(p_variance_stratified)
            variance_parameters = [p_variance, p_variance_stratified]
            # plot results
            experiment_name = f"{model_config.name}_{dataset}_{transformation.id()}_{measure.id()}"
            plot_filepath = self.folderpath / f"{experiment_name}.jpg"
            results = self.load_measure_results(
                self.results_paths(variance_parameters))

            labels = [l.non_stratified, l.stratified]
            visualization.plot_collapsing_layers_same_model(
                results,
                plot_filepath,
                labels=labels,
                ylim=get_ylim_normalized(measure))
예제 #2
0
    def run(self):
        measures = normalized_measures

        transformations = common_transformations

        combinations = itertools.product(dataset_names, transformations,
                                         measures)
        for (dataset, transformation, measure) in combinations:

            siamese = config.TIPoolingSimpleConvConfig.for_dataset(
                dataset, bn=False, t=transformation)
            siamese_epochs = config.get_epochs(siamese, dataset,
                                               transformation)
            p_training_siamese = training.Parameters(siamese, dataset,
                                                     identity_transformation,
                                                     siamese_epochs, 0)

            normal = config.SimpleConvConfig.for_dataset(dataset, bn=False)
            normal_epochs = config.get_epochs(normal, dataset, transformation)
            p_training_normal = training.Parameters(normal, dataset,
                                                    transformation,
                                                    normal_epochs, 0)

            p_training_parameters = [p_training_siamese, p_training_normal]
            variance_parameters = []
            for p_training in p_training_parameters:
                # train
                self.experiment_training(p_training)
                # generate variance params
                p = config.dataset_size_for_measure(measure)
                p_dataset = measure_package.DatasetParameters(
                    dataset, datasets.DatasetSubset.test, p)
                p_variance = measure_package.Parameters(
                    p_training.id(), p_dataset, transformation, measure)
                variance_parameters.append(p_variance)
                # evaluate variance
                model_path = self.model_path(p_training)
                self.experiment_measure(p_variance)

            model, _, _, _ = self.load_model(p_training_siamese,
                                             use_cuda=False,
                                             load_state=False)
            model: models.TIPoolingSimpleConv = model
            results = self.load_measure_results(
                self.results_paths(variance_parameters))
            results[0] = self.average_paths_tipooling(model, results[0])
            # plot results
            # print("simpleconv",len(results[1].measure_result.layer_names),results[1].measure_result.layer_names)
            labels = ["TIPooling SimpleConv", "SimpleConv"]
            experiment_name = f"{dataset}_{transformation.id()}_{measure.id()}"
            plot_filepath = self.folderpath / f"{experiment_name}.jpg"
            mark_layers = range(
                model.layer_before_pooling_each_transformation())
            visualization.plot_collapsing_layers_same_model(
                results,
                plot_filepath,
                labels=labels,
                mark_layers=mark_layers,
                ylim=get_ylim_normalized(measure))
예제 #3
0
    def run(self):
        measures = normalized_measures
        combinations = itertools.product(simple_models_generators,
                                         dataset_names, measures)
        nr, ns, nt = config.transformations.n_r, config.transformations.n_s, config.transformations.n_t
        n_rotations = [nr - 20, nr - 10, nr, nr + 10, nr + 20, nr + 30]
        n_scales = [ns - 2, ns - 1, ns, ns + 2, ns + 4, nt + 6]
        n_translations = [nt - 2, nt - 1, nt, nt + 1, nt + 2, nt + 3]
        r = config.transformations.rotation_max_degrees
        down, up = config.transformations.scale_min_downscale, config.transformations.scale_max_upscale
        t = config.transformations.translation_max
        test_sets = [
            [AffineGenerator(r=UniformRotation(i, r)) for i in n_rotations],
            [AffineGenerator(s=ScaleUniform(i, down, up)) for i in n_scales],
            [
                AffineGenerator(t=TranslationUniform(i, t))
                for i in n_translations
            ],
        ]

        labels = [[f"{len(s)} {l.transformations}" for s in set]
                  for set in test_sets]

        train_transformations = common_transformations

        for model_config_generator, dataset, measure in combinations:
            model_config = model_config_generator.for_dataset(dataset)

            for train_transformation, transformation_set, set_labels in zip(
                    train_transformations, test_sets, labels):
                # TRAIN
                epochs = config.get_epochs(model_config, dataset,
                                           train_transformation)
                p_training = training.Parameters(model_config, dataset,
                                                 train_transformation, epochs,
                                                 0)
                model_path = self.model_path(p_training)
                self.experiment_training(p_training)
                # MEASURE
                variance_parameters = []
                for k, test_transformation in enumerate(transformation_set):
                    p_dataset = measure_package.DatasetParameters(
                        dataset, datasets.DatasetSubset.test,
                        default_dataset_percentage)
                    p_variance = measure_package.Parameters(
                        model_config.id(), p_dataset, test_transformation,
                        measure)
                    #model_path
                    self.experiment_measure(p_variance, model_path=model_path)
                    variance_parameters.append(p_variance)

                # PLOT
                experiment_name = f"{model_config.name}_{dataset}_{measure.id()}_{train_transformation.id()}"
                plot_filepath = self.folderpath / f"{experiment_name}.jpg"
                results = self.load_measure_results(
                    self.results_paths(variance_parameters))
                visualization.plot_collapsing_layers_same_model(
                    results, plot_filepath, labels=set_labels, ylim=1.4)
예제 #4
0
    def run(self):
        measures = normalized_measures
        models = simple_models_generators
        combinations = itertools.product(models, dataset_names,
                                         common_transformations, measures)
        for (model_config_generator, dataset, transformation,
             measure) in combinations:
            # train

            variance_parameters = []
            for bn in [True, False]:
                model_config = model_config_generator.for_dataset(dataset,
                                                                  bn=bn)
                epochs = config.get_epochs(model_config, dataset,
                                           transformation)
                p_training = training.Parameters(model_config, dataset,
                                                 transformation, epochs)
                self.experiment_training(p_training)

                p = config.dataset_size_for_measure(measure)
                p_dataset = measure_package.DatasetParameters(
                    dataset, datasets.DatasetSubset.test, p)
                p_variance = measure_package.Parameters(
                    p_training.id(), p_dataset, transformation, measure)
                model_path = self.model_path(p_training)
                batch_size = 64
                if model_config.name.startswith("ResNet"):
                    batch_size = 32
                self.experiment_measure(p_variance, batch_size=batch_size)
                variance_parameters.append(p_variance)

            # plot results
            bn_result, result = self.load_measure_results(
                self.results_paths(variance_parameters))
            layer_names = bn_result.layer_names
            bn_indices = [
                i for i, n in enumerate(layer_names) if n.endswith("bn")
            ]
            # single
            experiment_name = f"{model_config.name}_{dataset}_{transformation.id()}_{measure.id()}"
            plot_filepath = self.folderpath / f"{experiment_name}.jpg"
            visualization.plot_collapsing_layers_same_model(
                [bn_result], plot_filepath, mark_layers=bn_indices)

            # comparison
            experiment_name = f"{model_config.name}_{dataset}_{transformation.id()}_{measure.id()}_comparison"
            plot_filepath = self.folderpath / f"{experiment_name}.jpg"
            bn_result = bn_result.remove_layers(bn_indices)
            labels = [l.with_bn, l.without_bn]
            visualization.plot_collapsing_layers_same_model(
                [bn_result, result],
                plot_filepath,
                labels=labels,
                ylim=get_ylim_normalized(measure))
    def run(self):
        measures = normalized_measures
        # conv_model_names = [m for m in common_model_names if (not "FFNet" in m)]
        conv_model_names = simple_models_generators  # [models.SimpleConv.__name__]

        transformations = common_transformations

        combinations = itertools.product(conv_model_names, dataset_names,
                                         transformations, measures)
        for (model_config_generator, dataset_name, transformation,
             measure) in combinations:
            model_config = model_config_generator.for_dataset(dataset_name)
            # train
            epochs = config.get_epochs(model_config, dataset_name,
                                       transformation)
            p_training = training.Parameters(model_config, dataset_name,
                                             transformation, epochs)

            experiment_name = f"{model_config.name}_{dataset_name}_{transformation.id()}_{measure.id()}"
            plot_folderpath = self.folderpath / experiment_name
            finished = Path(plot_folderpath) / "finished"
            if finished.exists():
                continue
            # train
            self.experiment_training(p_training)
            p = config.dataset_size_for_measure(measure)
            p_dataset = measure_package.DatasetParameters(
                dataset_name, datasets.DatasetSubset.test, p)
            p_variance = measure_package.Parameters(p_training.id(), p_dataset,
                                                    transformation, measure)
            model_path = self.model_path(p_training)
            self.experiment_measure(p_variance)

            model_filepath = self.model_path(p_training)
            model, p_model, o, scores = training.load_model(
                model_filepath, use_cuda=torch.cuda.is_available())
            result_filepath = self.results_path(p_variance)
            result = self.load_experiment_result(
                result_filepath).measure_result
            dataset = datasets.get_classification(dataset_name)

            plot_folderpath.mkdir(parents=True, exist_ok=True)

            self.plot(plot_folderpath,
                      model,
                      dataset,
                      transformation,
                      result,
                      images=2,
                      most_invariant_k=4,
                      least_invariant_k=4,
                      conv_aggregation=ca_mean)
            finished.touch()
    def run(self):
        measures = normalized_measures_validation
        repetitions = 8

        model_generators = simple_models_generators
        transformations = common_transformations

        combinations = itertools.product(model_generators, dataset_names,
                                         transformations, measures)
        for (model_generator, dataset, transformation,
             measure) in combinations:
            # train
            model_config = model_generator.for_dataset(dataset)
            epochs = config.get_epochs(model_config, dataset, transformation)
            training_parameters = []
            for r in range(repetitions):
                p_training = training.Parameters(model_config,
                                                 dataset,
                                                 transformation,
                                                 epochs,
                                                 0,
                                                 suffix=f"rep{r:02}")
                self.experiment_training(p_training)
                training_parameters.append(p_training)
            # generate variance params
            variance_parameters = []
            for p_training in training_parameters:
                model_path = self.model_path(p_training)
                p = config.dataset_size_for_measure(measure)
                p_dataset = measure_package.DatasetParameters(
                    dataset, datasets.DatasetSubset.test, p)
                p_variance = measure_package.Parameters(
                    p_training.id(), p_dataset, transformation, measure)
                variance_parameters.append(p_variance)
                # evaluate variance
                self.experiment_measure(p_variance)

            # plot results
            experiment_name = f"{model_config.name}_{dataset}_{transformation.id()}_{measure.id()}"
            plot_filepath = self.folderpath / f"{experiment_name}.jpg"
            results = self.load_measure_results(
                self.results_paths(variance_parameters))
            visualization.plot_collapsing_layers_same_model(
                results,
                plot_filepath,
                plot_mean=True,
                ylim=get_ylim_normalized(measure))
    def run(self):
        dataset_sizes = [0.01, 0.05, 0.1, 0.5, 1.0]
        model_names = simple_models_generators
        measures = normalized_measures_validation
        combinations = list(
            itertools.product(model_names, dataset_names,
                              common_transformations_combined, measures))
        for i, (model, dataset, transformation,
                measure) in enumerate(combinations):
            #print(f"{i}/{len(combinations)}", end=", ")
            model_config = model.for_dataset(dataset)
            epochs = config.get_epochs(model_config, dataset, transformation)
            p_training = training.Parameters(model_config, dataset,
                                             transformation, epochs)
            self.experiment_training(p_training)
            p_datasets = [
                measure_package.DatasetParameters(dataset,
                                                  datasets.DatasetSubset.test,
                                                  p) for p in dataset_sizes
            ]
            experiment_name = f"{model_config}_{dataset}_{transformation.id()}_{measure.id()}"
            plot_filepath = self.folderpath / f"{experiment_name}.jpg"
            variance_parameters = [
                measure_package.Parameters(p_training.id(), p_dataset,
                                           transformation, measure)
                for p_dataset in p_datasets
            ]
            model_path = self.model_path(p_training)
            for p_variance in variance_parameters:
                self.experiment_measure(p_variance)
            results = self.load_measure_results(
                self.results_paths(variance_parameters))
            #p_datasets = [r.parameters.dataset for r in results]

            labels = [f"{d * 100:2}%" for d in dataset_sizes]
            n = len(dataset_sizes)
            values = list(range(n))
            values.reverse()
            colors = visualization.get_sequential_colors(values)

            visualization.plot_collapsing_layers_same_model(
                results,
                plot_filepath,
                labels=labels,
                colors=colors,
                ylim=get_ylim_normalized(measure))
    def run(self):
        dataset_subsets = [
            datasets.DatasetSubset.test, datasets.DatasetSubset.train
        ]

        model_names = simple_models_generators
        measures = normalized_measures_validation
        combinations = list(
            itertools.product(model_names, dataset_names,
                              common_transformations_combined, measures))

        for i, (model_config_generator, dataset, transformation,
                measure) in enumerate(combinations):
            # print(f"{i}/{len(combinations)}", end=", ")

            model_config = model_config_generator.for_dataset(dataset)
            epochs = config.get_epochs(model_config, dataset, transformation)

            p_training = training.Parameters(model_config, dataset,
                                             transformation, epochs)
            self.experiment_training(p_training)

            p_datasets = []
            for subset in dataset_subsets:
                p = config.dataset_size_for_measure(measure, subset)
                p_datasets.append(
                    measure_package.DatasetParameters(dataset, subset, p))
            experiment_name = f"{model_config.name}_{dataset}_{transformation.id()}_{measure.id()}"
            plot_filepath = self.folderpath / f"{experiment_name}.jpg"
            variance_parameters = [
                measure_package.Parameters(p_training.id(), p_dataset,
                                           transformation, measure)
                for p_dataset in p_datasets
            ]
            model_path = self.model_path(p_training)
            for p_variance in variance_parameters:
                self.experiment_measure(p_variance)
            results = self.load_measure_results(
                self.results_paths(variance_parameters))
            labels = [f"{l.format_subset(d.subset)}" for d in p_datasets]
            visualization.plot_collapsing_layers_same_model(
                results,
                plot_filepath,
                labels=labels,
                ylim=get_ylim_normalized(measure))
    def measure(self, p_training, config, dataset, measure, transformation,
                savepoints):
        variance_parameters = []
        model_paths = []
        p = config.dataset_size_for_measure(measure)
        p_dataset = measure_package.DatasetParameters(
            dataset, datasets.DatasetSubset.test, p)
        for sp in savepoints:
            model_path = self.model_path(p_training, savepoint=sp)
            model_id = p_training.id(savepoint=sp)
            p_variance = measure_package.Parameters(model_id, p_dataset,
                                                    transformation, measure)
            variance_parameters.append(p_variance)
            model_paths.append(model_path)

        for p_variance, model_path in zip(variance_parameters, model_paths):
            self.experiment_measure(p_variance)
        return variance_parameters, model_paths
예제 #10
0
    def run(self):
        measures = normalized_measures

        combinations = itertools.product(simple_models_generators,
                                         dataset_names, measures)
        transformations = common_transformations

        labels = [l.rotation, l.scale, l.translation]
        for model_config_generator, dataset, measure in combinations:
            for i, train_transformation in enumerate(transformations):
                # transformation_plot_folderpath = self.plot_folderpath / name
                # transformation_plot_folderpath.mkdir(exist_ok=True,parents=True)
                model_config = model_config_generator.for_dataset(dataset)

                variance_parameters = []
                print(f"{l.train}: {train_transformation}")
                epochs = config.get_epochs(model_config, dataset,
                                           train_transformation)

                p_training = training.Parameters(model_config, dataset,
                                                 train_transformation, epochs,
                                                 0)
                self.experiment_training(p_training)
                for i, test_transformation in enumerate(transformations):
                    print(f"{i}, ", end="")
                    p_dataset = measure_package.DatasetParameters(
                        dataset, datasets.DatasetSubset.test,
                        default_dataset_percentage)
                    p_variance = measure_package.Parameters(
                        p_training.id(), p_dataset, test_transformation,
                        measure)
                    model_path = self.model_path(p_training)
                    self.experiment_measure(p_variance)
                    variance_parameters.append(p_variance)
                results = self.load_measure_results(
                    self.results_paths(variance_parameters))
                experiment_name = f"{model_config.name}_{dataset}_{train_transformation}_{measure.id()}"
                plot_filepath = self.folderpath / f"{experiment_name}.jpg"
                # title = f"Train transformation: {train_transformation.id()}"
                visualization.plot_collapsing_layers_same_model(results,
                                                                plot_filepath,
                                                                labels=labels)
예제 #11
0
    def run(self):
        measures = normalized_measures

        models = [
            config.SimpleConvConfig,
            config.AllConvolutionalConfig,
            config.VGG16DConfig,
            config.ResNetConfig,

        ]
        transformations = common_transformations
        model_names = [m.for_dataset("mnist").name for m in models]
        combinations = itertools.product(dataset_names, transformations, measures)
        for (dataset, transformation, measure) in combinations:
            variance_parameters = []
            model_configs = []
            for model_config_generator in models:
                # train
                model_config = model_config_generator.for_dataset(dataset)
                model_configs.append(model_config)

                # train
                epochs = config.get_epochs(model_config, dataset, transformation)
                p_training = training.Parameters(model_config, dataset, transformation, epochs)
                self.experiment_training(p_training)
                # generate variance params
                p = config.dataset_size_for_measure(measure)
                p_dataset = measure_package.DatasetParameters(dataset, datasets.DatasetSubset.test, p)
                p_variance = measure_package.Parameters(p_training.id(), p_dataset, transformation, measure)
                variance_parameters.append(p_variance)
                # evaluate variance
                self.experiment_measure(p_variance)

            # plot results
            experiment_name = f"{dataset}_{transformation.id()}_{measure.id()}"
            plot_filepath = self.folderpath / f"{experiment_name}.jpg"
            results = self.load_measure_results(self.results_paths(variance_parameters))
            visualization.plot_collapsing_layers_different_models(results, plot_filepath, labels=model_names,
                                                                  markers=self.fc_layers_indices(results),ylim=get_ylim_normalized(measure))
    def run(self):
        measures = normalized_measures_validation

        combinations = itertools.product(simple_models_generators,
                                         dataset_names,
                                         common_transformations_combined,
                                         measures)
        for (model_config_generator, dataset, transformation,
             measure) in combinations:
            model_config = model_config_generator.for_dataset(dataset)

            # train
            epochs = config.get_epochs(model_config, dataset, transformation)
            p_training = training.Parameters(model_config, dataset,
                                             transformation, epochs)
            self.experiment_training(p_training)

            variance_parameters = []
            for dataset_test in dataset_names:
                p = 0.5 if measure.__class__ == tm.ANOVAInvariance else default_dataset_percentage
                p_dataset = measure_package.DatasetParameters(
                    dataset_test, datasets.DatasetSubset.test, p)
                p_variance = measure_package.Parameters(
                    p_training.id(), p_dataset, transformation, measure)
                model_path = self.model_path(p_training)
                self.experiment_measure(p_variance, adapt_dataset=True)
                variance_parameters.append(p_variance)

            # plot results
            experiment_name = f"{model_config.name}_{dataset}_{transformation.id()}_{measure.id()}"
            plot_filepath = self.folderpath / f"{experiment_name}.jpg"
            results = self.load_measure_results(
                self.results_paths(variance_parameters))
            labels = dataset_names
            visualization.plot_collapsing_layers_same_model(
                results,
                plot_filepath,
                labels=labels,
                ylim=get_ylim_normalized(measure))
예제 #13
0
    def train_measure(self,
                      model_config: config.ModelConfig,
                      dataset: str,
                      transformation: tm.TransformationSet,
                      m: tm.NumpyMeasure,
                      task: Task,
                      p=None):

        epochs = config.get_epochs(model_config, dataset, transformation)
        p_training = training.Parameters(
            model_config,
            dataset,
            transformation,
            epochs,
        )
        self.experiment_training(p_training)
        if p is None:
            p = config.dataset_size_for_measure(m)
        p_dataset = measure.DatasetParameters(dataset,
                                              datasets.DatasetSubset.test, p)
        p_variance = measure.Parameters(p_training.id(), p_dataset,
                                        transformation, m)
        self.experiment_measure(p_variance)
        return p_training, p_variance, p_dataset
    def run(self):

        measure_sets = {
            "Variance": [
                tm.TransformationVarianceInvariance(),
                tm.SampleVarianceInvariance(),
            ],
            "ANOVA": [
                tm.ANOVAInvariance(0.99, bonferroni=True),
            ],
            "NormalizedVariance": [
                tm.NormalizedVarianceInvariance(ca_sum),
            ],
            "Goodfellow": [
                tm.GoodfellowNormalInvariance(alpha=0.99),
            ],
        }

        # model_names=["SimpleConv","VGGLike","AllConvolutional"]
        # model_names=["ResNet"]

        # model_generators = common_models_generators
        model_generators = simple_models_generators
        # model_names = ["SimpleConv"]
        transformations = common_transformations_combined

        combinations = itertools.product(model_generators, dataset_names,
                                         transformations, measure_sets.items())
        for (model_config_generator, dataset, transformation,
             measure_set) in combinations:
            # train model with data augmentation and without
            variance_parameters_both = []
            for t in [identity_transformation, transformation]:
                model_config = model_config_generator.for_dataset(dataset)
                epochs = config.get_epochs(model_config, dataset, t)
                p_training = training.Parameters(model_config, dataset, t,
                                                 epochs, 0)
                self.experiment_training(p_training)

                # generate variance params
                variance_parameters = []
                measure_set_name, measures = measure_set
                for measure in measures:
                    p = config.dataset_size_for_measure(measure)
                    p_dataset = measure_package.DatasetParameters(
                        dataset, datasets.DatasetSubset.test, p)
                    p_variance = measure_package.Parameters(
                        p_training.id(), p_dataset, transformation, measure)
                    variance_parameters.append(p_variance)
                # evaluate variance

                for p_variance in variance_parameters:
                    self.experiment_measure(p_variance)
                variance_parameters_both.append(variance_parameters)

            variance_parameters_id = variance_parameters_both[0]
            variance_parameters_data_augmentation = variance_parameters_both[1]
            variance_parameters_all = variance_parameters_id + variance_parameters_data_augmentation
            # plot results
            experiment_name = f"{measure_set_name}_{model_config.name}_{dataset}_{transformation.id()}"
            plot_filepath = self.folderpath / f"{experiment_name}.jpg"

            results = self.load_measure_results(
                self.results_paths(variance_parameters_all))
            labels = [
                l.measure_name(m) + f" ({l.no_data_augmentation})"
                for m in measures
            ] + [l.measure_name(m) for m in measures]
            n = len(measures)

            cmap = tmv.default_discrete_colormap()
            color = cmap(range(n))
            colors = np.vstack([color, color])
            linestyles = ["--" for i in range(n)] + ["-" for i in range(n)]
            # ylim = self.get_ylim(measure_set_name, dataset)
            tmv.plot_collapsing_layers_same_model(results,
                                                  plot_filepath,
                                                  labels=labels,
                                                  linestyles=linestyles,
                                                  colors=colors)
    def run(self):

        # model_names=["SimpleConv","VGGLike","AllConvolutional"]
        # model_names=["ResNet"]
        batch_sizes = [32, 128, 512, 1024]
        # model_generators = common_models_generators
        model_generators = simple_models_generators
        # model_names = ["SimpleConv"]
        transformations = [common_transformations[0]]

        dataset_names = ["mnist", "cifar10"]
        combinations = itertools.product(model_generators, dataset_names,
                                         transformations)
        for (model_config_generator, dataset, transformation) in combinations:
            # train model with data augmentation and without

            model_config = model_config_generator.for_dataset(dataset)
            epochs = config.get_epochs(model_config, dataset, transformation)
            p_training = training.Parameters(model_config, dataset,
                                             transformation, epochs, 0)
            self.experiment_training(p_training)

            variance_measure = tm.NormalizedVarianceInvariance()
            distance_measure = tm.NormalizedDistanceInvariance(
                tm.DistanceAggregation(True, False))
            percentage_dataset = config.dataset_size_for_measure(
                variance_measure)
            p_dataset = measure_package.DatasetParameters(
                dataset, datasets.DatasetSubset.test, percentage_dataset)
            p_variance = measure_package.Parameters(p_training.id(), p_dataset,
                                                    transformation,
                                                    variance_measure)

            # evaluate variance
            results = [self.experiment_measure(p_variance)]
            for b in batch_sizes:
                p = measure_package.Parameters(p_training.id(),
                                               p_dataset,
                                               transformation,
                                               distance_measure,
                                               suffix=f"batch_size={b}")
                variance_result = self.experiment_measure(p, batch_size=b)
                results.append(variance_result)
            # plot results
            experiment_name = f"{model_config.name}_{dataset}_{transformation.id()}"
            plot_filepath = self.folderpath / f"{experiment_name}.jpg"

            labels = [l.measure_name(variance_measure)] + [
                l.measure_name(distance_measure) + f"(b={b})"
                for b in batch_sizes
            ]
            n = len(results)

            cmap = tmv.default_discrete_colormap()
            color = cmap(range(n))
            colors = np.vstack([color])
            linestyles = ["--"] + ["-" for b in batch_sizes]

            tmv.plot_collapsing_layers_same_model(results,
                                                  plot_filepath,
                                                  labels=labels,
                                                  linestyles=linestyles,
                                                  colors=colors)
    def run(self):

        measure_sets = {
            "Variance": [
                tm.TransformationVarianceInvariance(),
                tm.SampleVarianceInvariance(),
            ],
            # "Distance": [
            #    tm.TransformationDistanceInvariance(da),
            #    tm.SampleDistanceInvariance(da),
            # ],
            "Normalized": [
                tm.ANOVAInvariance(),
                tm.NormalizedVarianceInvariance(ca_sum),
                tm.GoodfellowNormalInvariance(alpha=0.99),
            ],
            # "Equivariance": [
            #     tm.NormalizedVarianceSameEquivariance(ca_mean),
            #     tm.NormalizedDistanceSameEquivariance(da_normalize_keep),
            #     tm.DistanceSameEquivarianceSimple(df_normalize),
            # ]
        }

        # model_names=["SimpleConv","VGGLike","AllConvolutional"]
        # model_names=["ResNet"]

        # model_generators = common_models_generators
        model_generators = simple_models_generators
        # model_names = ["SimpleConv"]
        transformations = common_transformations

        combinations = itertools.product(model_generators, dataset_names,
                                         transformations, measure_sets.items())
        for (model_config_generator, dataset, transformation,
             measure_set) in combinations:
            # train model with data augmentation and without

            model_config = model_config_generator.for_dataset(dataset)
            epochs = config.get_epochs(model_config, dataset, transformation)
            p_training = training.Parameters(model_config, dataset,
                                             transformation, epochs, 0)
            self.experiment_training(p_training)

            # generate variance params
            variance_parameters = []
            measure_set_name, measures = measure_set
            for measure in measures:
                p = config.dataset_size_for_measure(measure)
                p_dataset = measure_package.DatasetParameters(
                    dataset, datasets.DatasetSubset.test, p)
                p_variance = measure_package.Parameters(
                    p_training.id(), p_dataset, transformation, measure)
                variance_parameters.append(p_variance)
            # evaluate variance
            model_path = self.model_path(p_training)

            for p_variance in variance_parameters:
                self.experiment_measure(p_variance, model_path=model_path)

            # plot results
            experiment_name = f"{measure_set_name}_{model_config.name}_{dataset}_{transformation.id()}"
            plot_filepath = self.folderpath / f"{experiment_name}.jpg"

            results = self.load_measure_results(
                self.results_paths(variance_parameters))
            labels = [l.measure_name(m) for m in measures]
            n = len(measures)

            cmap = tmv.default_discrete_colormap()
            color = cmap(range(n))
            colors = np.vstack([color, color])
            #      linestyles = ["--" for i in range(n)] + ["-" for i in range(n)]
            ylim = self.get_ylim(measure_set_name, dataset)
            tmv.plot_collapsing_layers_same_model(results,
                                                  plot_filepath,
                                                  labels=labels,
                                                  ylim=ylim)
    def run(self):
        measures = [nvi, gf, anova, svi, tvi]
        combinations = itertools.product(simple_models_generators,
                                         dataset_names, measures)

        n = 4
        rotations = np.linspace(0, ct.rotation_max_degrees, n)
        #[(0.875,1.0625),(0.75,1.125),(0.75,1.125),(0.5,1.25)
        upscale = np.linspace(1, ct.scale_max_upscale, n)
        downscale = np.flip(np.linspace(ct.scale_min_downscale, 1, n))
        scaling = list(zip(downscale, upscale))
        translation = np.linspace(0, ct.translation_max, n)
        n_r, n_s, n_t = ct.n_r, ct.n_s, ct.n_t
        train_sets = [
            [AffineGenerator(r=UniformRotation(n_r, r)) for r in rotations],
            [AffineGenerator(s=ScaleUniform(n_s, *s)) for s in scaling],
            [
                AffineGenerator(t=TranslationUniform(n_t, t))
                for t in translation
            ],
        ]
        labels = [
            [f"0° {l.to} {d:.0f}°" for d in rotations],
            [f"{d:.2f} {l.to} {u:.2f}" for (d, u) in scaling],
            [f"0 {l.to} {i:.2f}" for i in translation],
        ]
        test_transformations = common_transformations

        for model_config_generator, dataset, measure in combinations:
            model_config = model_config_generator.for_dataset(dataset)

            for train_set, test_transformation, set_labels in zip(
                    train_sets, test_transformations, labels):
                # TRAIN
                variance_parameters = []
                for k, train_transformation in enumerate(train_set):
                    epochs = config.get_epochs(model_config, dataset,
                                               train_transformation)
                    p_training = training.Parameters(model_config, dataset,
                                                     train_transformation,
                                                     epochs, 0)
                    self.experiment_training(p_training)
                    # MEASURE
                    p_dataset = measure_package.DatasetParameters(
                        dataset, datasets.DatasetSubset.test,
                        default_dataset_percentage)

                    p_variance = measure_package.Parameters(
                        p_training.id(), p_dataset, test_transformation,
                        measure)
                    model_path = self.model_path(p_training)
                    self.experiment_measure(p_variance)
                    variance_parameters.append(p_variance)
                # PLOT
                experiment_name = f"{model_config.name}_{dataset}_{measure.id()}_{test_transformation.id()}"
                plot_filepath = self.folderpath / f"{experiment_name}.jpg"
                #title = f" transformation: {train_transformation.id()}"

                results = self.load_measure_results(
                    self.results_paths(variance_parameters))
                visualization.plot_collapsing_layers_same_model(
                    results, plot_filepath, labels=set_labels, ylim=1.4)
    def run(self):
        random_models_folderpath = self.models_folder() / "random"
        random_models_folderpath.mkdir(exist_ok=True, parents=True)
        o = training.Options(False, 32, 4, torch.cuda.is_available(), False, 0)
        measures = normalized_measures

        # number of random models to generate
        random_model_n = 10

        combinations = itertools.product(simple_models_generators,
                                         dataset_names, common_transformations,
                                         measures)
        for model_config_generator, dataset_name, transformation, measure in combinations:
            model_config = model_config_generator.for_dataset(dataset_name)
            p = config.dataset_size_for_measure(measure)
            # generate `random_model_n` models and save them without training
            models_paths = []
            p_training = training.Parameters(model_config, dataset_name,
                                             transformation, 0)
            dataset = datasets.get_classification(dataset_name)
            for i in range(random_model_n):

                model_path = self.model_path(
                    p_training,
                    custom_models_folderpath=random_models_folderpath)

                # append index to model name
                name, ext = os.path.splitext(str(model_path))
                name += f"_random{i:03}"
                model_path = Path(f"{name}{ext}")
                if not model_path.exists():
                    model, optimizer = model_config.make_model_and_optimizer(
                        dataset.input_shape, dataset.num_classes, o.use_cuda)
                    scores = training.eval_scores(
                        model, dataset, p_training.transformations,
                        TransformationStrategy.random_sample,
                        o.get_eval_options())
                    training.save_model(p_training, o, model, scores,
                                        model_path)
                    del model
                models_paths.append(model_path)

            # generate variance params
            variance_parameters = []
            p_dataset = measure_package.DatasetParameters(
                dataset_name, datasets.DatasetSubset.test, p)

            for model_path in models_paths:
                model_id, ext = os.path.splitext(os.path.basename(model_path))
                p_variance = measure_package.Parameters(
                    model_id, p_dataset, transformation, measure)
                self.experiment_measure(p_variance, model_path=model_path)
                variance_parameters.append(p_variance)

            # plot results
            experiment_name = f"{model_config.name}_{dataset_name}_{transformation.id()}_{measure}"
            plot_filepath = self.folderpath / f"{experiment_name}.jpg"
            results = self.load_measure_results(
                self.results_paths(variance_parameters))
            n = len(results)
            labels = [f"{l.random_models} ({n} {l.samples})."] + ([None] *
                                                                  (n - 1))
            # get alpha colors
            import matplotlib.pyplot as plt
            color = plt.cm.hsv(np.linspace(0.1, 0.9, n))
            color[:, 3] = 0.5
            visualization.plot_collapsing_layers_same_model(results,
                                                            plot_filepath,
                                                            plot_mean=True,
                                                            labels=labels,
                                                            colors=color)
예제 #19
0
    def run(self):
        measures = normalized_measures
        combinations = itertools.product(simple_models_generators,
                                         dataset_names, measures)

        n_complexities = 4
        rotations = np.linspace(90, 360, n_complexities)
        #[(0.875,1.0625),(0.75,1.125),(0.75,1.125),(0.5,1.25)
        upscale = np.linspace(1, 1.25, n_complexities + 1)[1:]
        downscale = np.flip(np.linspace(0.5, 1, n_complexities,
                                        endpoint=False))
        scaling = list(zip(downscale, upscale))
        translation = np.linspace(0.05, 0.2, n_complexities)

        test_sets = [
            [AffineGenerator(r=UniformRotation(25, r)) for r in rotations],
            [AffineGenerator(s=ScaleUniform(4, *s)) for s in scaling],
            [AffineGenerator(t=TranslationUniform(3, t)) for t in translation],
        ]
        labels = [
            [f"0° {l.to} {d}°" for d in rotations],
            [f"{d} {l.to} {u}" for (d, u) in scaling],
            [f"0 {l.to} {i}" for i in translation],
        ]

        train_transformations = common_transformations

        for model_config_generator, dataset, measure in combinations:
            model_config = model_config_generator.for_dataset(dataset)

            for train_transformation, transformation_set, set_labels in zip(
                    train_transformations, test_sets, labels):
                # TRAIN
                epochs = config.get_epochs(model_config, dataset,
                                           train_transformation)
                p_training = training.Parameters(model_config, dataset,
                                                 train_transformation, epochs,
                                                 0)
                self.experiment_training(p_training)
                # MEASURE
                variance_parameters = []
                for k, test_transformation in enumerate(transformation_set):
                    p_dataset = measure_package.DatasetParameters(
                        dataset, datasets.DatasetSubset.test,
                        default_dataset_percentage)
                    p_variance = measure_package.Parameters(
                        p_training.id(), p_dataset, test_transformation,
                        measure)
                    model_path = self.model_path(p_training)
                    self.experiment_measure(p_variance)
                    variance_parameters.append(p_variance)
                # PLOT
                experiment_name = f"{model_config.name}_{dataset}_{measure.id()}_{train_transformation.id()}"
                plot_filepath = self.folderpath / f"{experiment_name}.jpg"
                #title = f" transformation: {train_transformation.id()}"

                results = self.load_measure_results(
                    self.results_paths(variance_parameters))

                visualization.plot_collapsing_layers_same_model(
                    results, plot_filepath, labels=set_labels, ylim=1.4)