Exemple #1
0
 def test_normal_weight_initializer(self):
     model_name = 'mesh_classifier'
     network_params = {
         'in_channels_primal': 1,
         'in_channels_dual': 4,
         'norm_layer_type': 'group_norm',
         'num_groups_norm_layer': 16,
         'conv_primal_out_res': [64, 128, 256, 256],
         'conv_dual_out_res': [64, 128, 256, 256],
         'num_classes': 30,
         'num_output_units_fc': 100,
         'num_res_blocks': 1,
         'weight_initialization_type': 'normal',
         'weight_initialization_gain': 0.02,
         'single_dual_nodes': False,
         'undirected_dual_edges': True
     }
     model = create_model(model_name=model_name,
                          should_initialize_weights=True,
                          **network_params)
     # Print the weights of a fully-connected layer.
     print(
         "\nThe following weights from a fully-connected layer should look "
         "like samples drawn from a Gaussian distribution with mean 0. and "
         f"variance {network_params['weight_initialization_gain']**2}:")
     print(model.fc1.weight.data)
     # Print the weights of a batch-normalization layer.
     print(
         "\nThe following weights from a batch-normalization layer should "
         "look like samples drawn from a Gaussian distribution with mean "
         "1.0 and variance "
         f"{network_params['weight_initialization_gain']**2}:")
     print(model.block0.conv.bn1_primal.weight.data)
Exemple #2
0
 def test_create_nonexistent_optimizer(self):
     # Create a model.
     model_name = 'mesh_classifier'
     network_params = {
         'in_channels_primal': 1,
         'in_channels_dual': 4,
         'norm_layer_type': 'group_norm',
         'num_groups_norm_layer': 16,
         'conv_primal_out_res': [64, 128, 256, 256],
         'conv_dual_out_res': [64, 128, 256, 256],
         'num_classes': 30,
         'num_output_units_fc': 100,
         'num_res_blocks': 1,
         'single_dual_nodes': False,
         'undirected_dual_edges': True
     }
     model = create_model(model_name=model_name,
                          should_initialize_weights=True,
                          **network_params)
     # Create the optimizer.
     optimizer_params = {'optimizer_type': 'nonexistent_optimizer'}
     self.assertRaises(KeyError,
                       create_optimizer,
                       network_parameters=model.parameters(),
                       **optimizer_params)
Exemple #3
0
    def test_create_step_scheduler(self):
        # Create a model.
        model_name = 'mesh_classifier'
        network_params = {
            'in_channels_primal': 1,
            'in_channels_dual': 4,
            'norm_layer_type': 'group_norm',
            'num_groups_norm_layer': 16,
            'conv_primal_out_res': [64, 128, 256, 256],
            'conv_dual_out_res': [64, 128, 256, 256],
            'num_classes': 30,
            'num_output_units_fc': 100,
            'num_res_blocks': 1,
            'single_dual_nodes': False,
            'undirected_dual_edges': True
        }
        model = create_model(model_name=model_name,
                             should_initialize_weights=True,
                             **network_params)
        # Create an optimizer.
        optimizer_params = {
            'optimizer_type': 'adam',
            'betas': (0.9, 0.999),
            'lr': 0.0001
        }
        optimizer = create_optimizer(network_parameters=model.parameters(),
                                     **optimizer_params)
        # Create the scheduler.
        scheduler_params = {
            'scheduler_type': 'step',
            'step_size': 20,
            'gamma': 0.2,
        }
        lr_scheduler = create_lr_scheduler(optimizer=optimizer,
                                           **scheduler_params)
        self.assertTrue(isinstance(lr_scheduler, StepLR))

        # Verify that the learning rate is multiplied by 'gamma' every
        # 'step_size' steps of the learning-rate scheduler.
        num_epochs = 300
        last_lr = optimizer_params['lr']
        for epoch in range(1, num_epochs + 1):
            if (epoch != 1
                    and (epoch - 1) % scheduler_params['step_size'] == 0):
                # Update the learning rate.
                last_lr *= scheduler_params['gamma']
            # Verify the learning rate.
            self.assertAlmostEqual(lr_scheduler.get_last_lr()[0], last_lr, 5)
            # Verify that the learning-rate scheduler is considering the right
            # epoch. Since at the first epoch the learning-rate scheduler is
            # internally initialized to have epoch 0, the epoch "counter" in the
            # scheduler should always lag the actual epoch number by 1.
            self.assertEqual(lr_scheduler.last_epoch, epoch - 1)
            # Update optimizer and learning-rate scheduler.
            optimizer.step()
            lr_scheduler.step()
Exemple #4
0
 def test_create_mesh_classifier(self):
     model_name = 'mesh_classifier'
     network_params = {
         'in_channels_primal': 1,
         'in_channels_dual': 4,
         'norm_layer_type': 'group_norm',
         'num_groups_norm_layer': 16,
         'conv_primal_out_res': [64, 128, 256, 256],
         'conv_dual_out_res': [64, 128, 256, 256],
         'num_classes': 30,
         'num_output_units_fc': 100,
         'num_res_blocks': 1,
         'single_dual_nodes': False,
         'undirected_dual_edges': True
     }
     model = create_model(model_name=model_name,
                          should_initialize_weights=True,
                          **network_params)
     self.assertTrue(isinstance(model, DualPrimalMeshClassifier))
Exemple #5
0
 def test_create_plateau_scheduler(self):
     # Create a model.
     model_name = 'mesh_classifier'
     network_params = {
         'in_channels_primal': 1,
         'in_channels_dual': 4,
         'norm_layer_type': 'group_norm',
         'num_groups_norm_layer': 16,
         'conv_primal_out_res': [64, 128, 256, 256],
         'conv_dual_out_res': [64, 128, 256, 256],
         'num_classes': 30,
         'num_output_units_fc': 100,
         'num_res_blocks': 1,
         'single_dual_nodes': False,
         'undirected_dual_edges': True
     }
     model = create_model(model_name=model_name,
                          should_initialize_weights=True,
                          **network_params)
     # Create an optimizer.
     optimizer_params = {
         'optimizer_type': 'adam',
         'betas': (0.9, 0.999),
         'lr': 0.0001
     }
     optimizer = create_optimizer(network_parameters=model.parameters(),
                                  **optimizer_params)
     # Create the scheduler.
     scheduler_params = {
         'scheduler_type': 'plateau',
         'mode': 'min',
         'factor': 0.2,
         'threshold': 0.01,
         'patience': 5
     }
     lr_scheduler = create_lr_scheduler(optimizer=optimizer,
                                        **scheduler_params)
     self.assertTrue(isinstance(lr_scheduler, ReduceLROnPlateau))
Exemple #6
0
 def test_create_adam_optimizer(self):
     # Create a model.
     model_name = 'mesh_classifier'
     network_params = {
         'in_channels_primal': 1,
         'in_channels_dual': 4,
         'norm_layer_type': 'group_norm',
         'num_groups_norm_layer': 16,
         'conv_primal_out_res': [64, 128, 256, 256],
         'conv_dual_out_res': [64, 128, 256, 256],
         'num_classes': 30,
         'num_output_units_fc': 100,
         'num_res_blocks': 1,
         'single_dual_nodes': False,
         'undirected_dual_edges': True
     }
     model = create_model(model_name=model_name,
                          should_initialize_weights=True,
                          **network_params)
     # Create the optimizer.
     optimizer_params = {'optimizer_type': 'adam', 'betas': (0.9, 0.999)}
     optimizer = create_optimizer(network_parameters=model.parameters(),
                                  **optimizer_params)
     self.assertTrue(isinstance(optimizer, Adam))
Exemple #7
0
    def test_create_lambda_scheduler(self):
        # Create a model.
        model_name = 'mesh_classifier'
        network_params = {
            'in_channels_primal': 1,
            'in_channels_dual': 4,
            'norm_layer_type': 'group_norm',
            'num_groups_norm_layer': 16,
            'conv_primal_out_res': [64, 128, 256, 256],
            'conv_dual_out_res': [64, 128, 256, 256],
            'num_classes': 30,
            'num_output_units_fc': 100,
            'num_res_blocks': 1,
            'single_dual_nodes': False,
            'undirected_dual_edges': True
        }
        model = create_model(model_name=model_name,
                             should_initialize_weights=True,
                             **network_params)
        # Create an optimizer.
        optimizer_params = {
            'optimizer_type': 'adam',
            'betas': (0.9, 0.999),
            'lr': 0.0001
        }
        optimizer = create_optimizer(network_parameters=model.parameters(),
                                     **optimizer_params)
        # Create the scheduler.
        scheduler_params = {
            'scheduler_type': 'lambda',
            'last_epoch_constant_lr': 100,
            'last_epoch': 300
        }
        lr_scheduler = create_lr_scheduler(optimizer=optimizer,
                                           **scheduler_params)
        self.assertTrue(isinstance(lr_scheduler, LambdaLR))

        # Verify that the learning rate decays linearly over the epochs after
        # epoch 100.
        for epoch in range(1, scheduler_params['last_epoch'] + 1):
            if (epoch <= scheduler_params['last_epoch_constant_lr']):
                self.assertEqual(lr_scheduler.get_last_lr()[0],
                                 optimizer_params['lr'])
            else:
                expected_lr = optimizer_params['lr'] * (
                    ((epoch + 1) - scheduler_params['last_epoch'] - 1) /
                    (scheduler_params['last_epoch_constant_lr'] -
                     scheduler_params['last_epoch'] - 1))
                self.assertAlmostEqual(lr_scheduler.get_last_lr()[0],
                                       expected_lr, 5)
            # Verify that the learning-rate scheduler is considering the right
            # epoch. Since at the first epoch the learning-rate scheduler is
            # internally initialized to have epoch 0, the epoch "counter" in the
            # scheduler should always lag the actual epoch number by 1.
            # However, it should be noted that our LRStep scheduler internally
            # adjusts the epoch number, when using it to compute the learning
            # rate, by adding 1 to it.
            self.assertEqual(lr_scheduler.last_epoch, epoch - 1)
            # Update optimizer and learning-rate scheduler.
            optimizer.step()
            lr_scheduler.step()
        # Look at two more epochs and verify that the learning rate stays at
        # zero.
        self.assertEqual(lr_scheduler.get_last_lr()[0], 0.)
        self.assertEqual(lr_scheduler.last_epoch, 300)
        optimizer.step()
        lr_scheduler.step()
        self.assertEqual(lr_scheduler.get_last_lr()[0], 0.)
        self.assertEqual(lr_scheduler.last_epoch, 301)
Exemple #8
0
    def _initialize_components(self, network_parameters, dataset_parameters,
                               data_loader_parameters, loss_parameters):
        r"""Instantiates and initializes: network, dataset, data loader and
        loss.

        Args:
            network_parameters, dataset_parameters, data_loader_parameters,
            loss_parameters (dict): Input parameters used to construct and
                initialize the network, the dataset, the data loader and the
                loss.
        
        Returns:
            None.
        """
        # Initialize model.
        assert ('should_initialize_weights' not in network_parameters), (
            "Network parameters should not contain the parameter "
            "'should_initialize_weights', as weights will be automatically "
            "initialized or not, depending on whether training is resumed "
            "from a previous job or not.")
        if (self.__verbose):
            print("Initializing network...")
        if (self.__save_clusterized_meshes):
            network_contains_at_least_one_pooling_layer = False
            if ('num_primal_edges_to_keep' in network_parameters
                    and network_parameters['num_primal_edges_to_keep']
                    is not None):
                num_pooling_layers = len([
                    threshold for threshold in
                    network_parameters['num_primal_edges_to_keep']
                    if threshold is not None
                ])
                network_contains_at_least_one_pooling_layer |= (
                    num_pooling_layers >= 1)
            elif ('fractions_primal_edges_to_keep' in network_parameters
                  and network_parameters['fractions_primal_edges_to_keep']
                  is not None):
                num_pooling_layers = len([
                    threshold for threshold in
                    network_parameters['fractions_primal_edges_to_keep']
                    if threshold is not None
                ])
                network_contains_at_least_one_pooling_layer |= (
                    num_pooling_layers >= 1)
            elif ('primal_attention_coeffs_thresholds' in network_parameters
                  and network_parameters['primal_attention_coeffs_thresholds']
                  is not None):
                num_pooling_layers = len([
                    threshold for threshold in
                    network_parameters['primal_attention_coeffs_thresholds']
                    if threshold is not None
                ])
                network_contains_at_least_one_pooling_layer |= (
                    num_pooling_layers >= 1)
            assert (network_contains_at_least_one_pooling_layer), (
                "Please use at least one pooling layer in the test model to "
                "save the clusterized meshes.")
            # Add to the input parameters of the network the flag that specifies
            # that the node-to-cluster correspondences should be returned.
            network_parameters['return_node_to_cluster'] = True

        self.__net = create_model(should_initialize_weights=False,
                                  **network_parameters)
        if ('log_ratios_new_old_primal_nodes' in network_parameters and
                network_parameters['log_ratios_new_old_primal_nodes'] is True):
            self.__are_ratios_new_old_primal_nodes_logged = True
        else:
            self.__are_ratios_new_old_primal_nodes_logged = False
        # Move network to GPU if necessary.
        if (self.__use_gpu):
            self.__net.to("cuda")
        else:
            self.__net.to("cpu")
        # Initialize dataset.
        if (self.__verbose):
            print("Initializing dataset...")
        if (dataset_parameters['train'] == True):
            print(
                "\033[93mNote: running evaluation on a 'train' split! If you "
                "instead want to use the 'test' split of the dataset, please "
                "set the dataset parameter 'train' as False.\033[0m")
            self.__split = 'train'
        else:
            self.__split = 'test'
        if (self.__standardize_features_using_training_set):
            assert (
                'compute_node_feature_stats' not in dataset_parameters
                or not dataset_parameters['compute_node_feature_stats']
            ), ("Setting argument 'standardize_features_using_training_set' of "
                "the test job to True is incompatible with dataset parameter "
                "'compute_node_feature_stats' = True.")
            # Perform input-feature normalization using the statistics from
            # the training set.
            print("\033[92mWill perform input-feature standardization using "
                  "the provided mean and standard deviation of the "
                  "primal-graph-/dual-graph- node features of the training "
                  f"set (file '{self.__training_params_filename}').\033[0m")
            primal_mean = dataset_parameters.pop('primal_mean_train')
            primal_std = dataset_parameters.pop('primal_std_train')
            dual_mean = dataset_parameters.pop('dual_mean_train')
            dual_std = dataset_parameters.pop('dual_std_train')
            dataset_parameters['compute_node_feature_stats'] = False
            dataset, _ = create_dataset(**dataset_parameters)
        else:
            if ('compute_node_feature_stats' in dataset_parameters
                    and not dataset_parameters['compute_node_feature_stats']):
                # No feature standardization.
                dataset, _ = create_dataset(**dataset_parameters)
                primal_mean = primal_std = dual_mean = dual_std = None
                print("\033[93mNote: no input-feature standardization will be "
                      "performed! If you wish to use standardization instead, "
                      "please set the argument "
                      "'standardize_features_using_training_set' of the test "
                      "job to True or set the dataset-parameter "
                      "`compute_node_feature_stats` to True.\033[0m")
            else:
                print("\033[93mNote: input-feature standardization will be "
                      "performed using the mean and standard deviation of the "
                      "primal-graph-/dual-graph- node features of the test "
                      "set! If you wish to use those of the training set "
                      "instead, please set the argument "
                      "'standardize_features_using_training_set' of the test "
                      "job to True.\033[0m")
                dataset, (primal_mean, primal_std, dual_mean,
                          dual_std) = create_dataset(**dataset_parameters)
        # Initialize data loader.
        assert (len(
            set(['primal_mean', 'primal_std', 'dual_mean', 'dual_std'])
            & set(data_loader_parameters)) == 0), (
                "Data-loader parameters should not contain any of the "
                "following parameters, as they will be automatically computed "
                "from the dataset or restored from the previous training job, "
                "if set to do so: 'primal_mean', "
                "'primal_std', 'dual_mean', 'dual_std'.")
        if (self.__verbose):
            print("Initializing data loader...")
        # Add to the input parameters of the data-loader the flag that specifies
        # that the indices of the sample in the dataset should be returned when
        # iterating on it.
        data_loader_parameters['return_sample_indices'] = True

        self.__data_loader = DualPrimalDataLoader(dataset=dataset,
                                                  primal_mean=primal_mean,
                                                  primal_std=primal_std,
                                                  dual_mean=dual_mean,
                                                  dual_std=dual_std,
                                                  **data_loader_parameters)
        # Initialize loss.
        if (loss_parameters is not None):
            if (self.__verbose):
                print("Initializing loss...")
            self.__loss = create_loss(**loss_parameters)