Example #1
0
    def test_run(self):
        # set up functions and values that will be called and returned in run function
        mock_runner_config = Mock(spec=RunnerConfig)
        train_mock = Mock()

        train = (train_mock for _ in range(1))
        ctest, ttest = Mock(), Mock()
        dd1, dd2, dd3 = Mock(), Mock(), Mock()  # the data description classes
        mock_runner_config.data = Mock(spec=DataManager)
        mock_runner_config.data.load_data = Mock()
        mock_runner_config.data.load_data.return_value = (train, ctest, ttest,
                                                          dd1, dd2, dd3)
        mock_runner_config.data.torch_dataloader_kwargs = None
        mock_runner_config.optimizer = Mock(spec=OptimizerInterface)
        mock_optimizer1 = Mock(spec=DefaultOptimizer)
        mock_optimizer1.train = Mock()
        mock_runner_config.optimizer_generator = (opt
                                                  for opt in [mock_optimizer1])
        mock_runner_config.arch_factory = Mock(spec=ArchitectureFactory)
        mock_runner_config.arch_factory.new_architecture = Mock()
        arch = Mock(spec=nn.Module)
        mock_runner_config.arch_factory.new_architecture.return_value = arch
        mock_runner_config.arch_factory_kwargs = None
        mock_runner_config.arch_factory_kwargs_generator = None
        mock_runner_config.parallel = False

        test_batch_stats = BatchStatistics(1, 1, 1, 1, 1)
        e = EpochStatistics(1)
        e.add_batch(test_batch_stats)

        mock_optimizer1.train.return_value = (arch, [e], 1)
        mock_optimizer1.test = Mock()
        mock_training_cfg1 = Mock(spec=TrainingConfig)
        mock_optimizer1.get_cfg_as_dict.return_value = mock_training_cfg1

        test_return_dict = dict()
        test_return_dict['clean_accuracy'] = 1
        test_return_dict['clean_n_total'] = 1
        test_return_dict['triggered_accuracy'] = 1
        test_return_dict['triggered_n_total'] = 1
        mock_optimizer1.test.return_value = test_return_dict

        # create runner and set mock _save_model function
        runner = Runner(mock_runner_config)
        mock_save_model = Mock()
        runner._save_model_and_stats = mock_save_model

        # run function
        with patch("trojai.modelgen.runner.TrainingRunStatistics") as p:
            runner.run()

            # check if correct functions were called with correct arguments and the correct number of times
            mock_runner_config.data.load_data.assert_called_once_with()
            mock_optimizer1.train.assert_called_once_with(
                arch, train_mock, False, {})
            mock_optimizer1.test.assert_called_once_with(
                arch, ctest, ttest, False, {})
            mock_save_model.assert_called_once_with(arch, p(),
                                                    [mock_training_cfg1])
Example #2
0
    def test_save_model(self):
        tmp_1 = tempfile.TemporaryDirectory()
        stats_tmp_1 = tempfile.TemporaryDirectory()

        # path and file names to test to ensure correct file name saving
        path1 = tmp_1.name
        path1_stats_dir = stats_tmp_1.name
        path2 = path1 + '/'
        path2_stats_dir = path1_stats_dir + '/'
        pt_filename = 'model.pt'

        m_model = models.alexnet()
        ts = TrainingRunStatistics()

        mock_runner_config = Mock(spec=RunnerConfig)
        mock_runner_config.parallel = False
        runner = Runner(mock_runner_config)

        mock_runner_config.model_save_dir = path1
        mock_runner_config.stats_save_dir = path1_stats_dir
        mock_runner_config.filename = pt_filename
        mock_runner_config.run_id = None
        mock_runner_config.save_with_hash = False
        mock_runner_config.model_save_format = 'pt'

        mock_runner_config.optimizer = DefaultOptimizer()
        runner._save_model_and_stats(m_model, ts, [])
        self.assertTrue(os.path.isfile(path2 + pt_filename + '.1'))

        mock_runner_config.model_save_dir = path2
        mock_runner_config.stats_save_dir = path2_stats_dir
        runner._save_model_and_stats(m_model, ts, [])
        self.assertTrue(os.path.isfile(path2 + 'model.pt.2'))

        mock_runner_config.filename = None
        mock_runner_config.run_id = 50
        runner._save_model_and_stats(m_model, ts, [])
        self.assertTrue(os.path.isfile(path2 + 'AlexNet_id50.pt.1'))

        tmp_1.cleanup()
        stats_tmp_1.cleanup()
Example #3
0
    def test_save_model(self):
        # path and file names to test to ensure correct file name saving
        path1 = './test_dir'
        path1_stats_dir = './test_dir_stats'
        path2 = './test_dir/'
        path2_stats_dir = './test_dir_stats/'
        pt_filename = 'model.pt'

        m_model = models.alexnet()
        ts = TrainingRunStatistics()

        mock_runner_config = Mock(spec=RunnerConfig)
        mock_runner_config.parallel = False
        runner = Runner(mock_runner_config)

        mock_runner_config.model_save_dir = path1
        mock_runner_config.stats_save_dir = path1_stats_dir
        mock_runner_config.filename = pt_filename
        mock_runner_config.run_id = None
        mock_runner_config.save_with_hash = False

        mock_runner_config.optimizer = DefaultOptimizer()
        runner._save_model_and_stats(m_model, ts, [])
        self.assertTrue(os.path.isfile(path2 + pt_filename + '.1'))

        mock_runner_config.model_save_dir = path2
        mock_runner_config.stats_save_dir = path2_stats_dir
        runner._save_model_and_stats(m_model, ts, [])
        self.assertTrue(os.path.isfile(path2 + 'model.pt.2'))

        mock_runner_config.filename = None
        mock_runner_config.run_id = 50
        runner._save_model_and_stats(m_model, ts, [])
        self.assertTrue(os.path.isfile(path2 + 'AlexNet_id50.pt.1'))

        os.remove(path2 + 'model.pt.1')
        os.remove(path2 + 'model.pt.2')
        os.remove(path2 + 'AlexNet_id50.pt.1')
Example #4
0
    def test_run_with_iterable_data(self):
        # set up functions and values that will be called and returned in run function
        mock_runner_config = Mock(spec=RunnerConfig)
        train1, train2, train3 = Mock(), Mock(), Mock()

        train = (t for t in [train1, train2, train3])
        ctest, ttest, cttest = Mock(), Mock(), Mock()
        dd1, dd2, dd3, dd4 = Mock(), Mock(), Mock(), Mock(
        )  # the data descriptor classes
        mock_runner_config.data = Mock(spec=DataManager)
        mock_runner_config.data.torch_dataloader_kwargs = None
        mock_runner_config.data.iterable_training = True
        mock_runner_config.data.load_data = Mock()
        mock_runner_config.data.load_data.return_value = (train, ctest, ttest,
                                                          cttest, dd1, dd2,
                                                          dd3, dd4)
        mock_runner_config.data.train_dataloader_kwargs = None
        mock_runner_config.data.test_dataloader_kwargs = None
        mock_runner_config.arch_factory = Mock(spec=ArchitectureFactory)
        mock_runner_config.arch_factory.new_architecture = Mock()
        arch = Mock(spec=nn.Module)
        mock_runner_config.arch_factory.new_architecture.return_value = arch
        mock_runner_config.arch_factory_kwargs = None
        mock_runner_config.arch_factory_kwargs_generator = None
        mock_runner_config.optimizer = Mock(spec=OptimizerInterface)
        mock_optimizer1 = Mock(spec=DefaultOptimizer)
        mock_optimizer1.train = Mock()
        mock_training_cfg1 = Mock(spec=TrainingConfig)
        mock_optimizer1.get_cfg_as_dict.return_value = mock_training_cfg1
        mock_runner_config.optimizer_generator = (mock_optimizer1
                                                  for _ in range(3))
        mock_runner_config.parallel = False

        test_batch_stats = BatchStatistics(1, 1, 1)
        e = EpochStatistics(1)
        e.add_batch(test_batch_stats)

        mock_optimizer1.train.return_value = (arch, [e], 1)
        mock_optimizer1.test = Mock()

        test_return_dict = dict()
        test_return_dict['clean_accuracy'] = 1
        test_return_dict['clean_n_total'] = 1
        test_return_dict['triggered_accuracy'] = 1
        test_return_dict['triggered_n_total'] = 1
        mock_optimizer1.test.return_value = test_return_dict

        # create runner and set mock _save_model function
        runner = Runner(mock_runner_config)
        mock_save_model = Mock()
        runner._save_model_and_stats = mock_save_model

        calls = [
            unittest.mock.call(arch, train1, False, None),
            unittest.mock.call(arch, train2, False, None),
            unittest.mock.call(arch, train3, False, None)
        ]

        # run function
        with patch("trojai.modelgen.runner.TrainingRunStatistics") as p:
            runner.run()

            # check if correct functions were called with correct arguments and the correct number of times
            mock_runner_config.data.load_data.assert_called_once_with()
            mock_optimizer1.train.assert_has_calls(calls, any_order=False)
            mock_optimizer1.test.assert_called_once_with(
                arch, ctest, ttest, cttest, False, None)
            mock_save_model.assert_called_once_with(
                arch, p(),
                [mock_training_cfg1, mock_training_cfg1, mock_training_cfg1])

        # again with multiple optimizers
        mock_runner_config = Mock(spec=RunnerConfig)
        train1, train2, train3 = Mock(), Mock(), Mock()

        train = (t for t in [train1, train2, train3])
        ctest, ttest, cttest = Mock(), Mock(), Mock()
        dd1, dd2, dd3, dd4 = Mock(), Mock(), Mock(), Mock(
        )  # the data descriptor classes
        mock_runner_config.data = Mock(spec=DataManager)
        mock_runner_config.data.torch_dataloader_kwargs = None
        mock_runner_config.data.iterable_training = True
        mock_runner_config.data.load_data = Mock()
        mock_runner_config.data.load_data.return_value = (train, ctest, ttest,
                                                          cttest, dd1, dd2,
                                                          dd3, dd4)
        mock_runner_config.data.train_dataloader_kwargs = None
        mock_runner_config.data.test_dataloader_kwargs = None
        mock_runner_config.arch_factory = Mock(spec=ArchitectureFactory)
        mock_runner_config.arch_factory.new_architecture = Mock()
        arch = Mock(spec=nn.Module)
        mock_runner_config.arch_factory.new_architecture.return_value = arch
        mock_runner_config.arch_factory_kwargs = None
        mock_runner_config.arch_factory_kwargs_generator = None
        mock_runner_config.parallel = False

        mock_runner_config.optimizer = Mock(spec=OptimizerInterface)
        mock_optimizer1 = Mock(spec=DefaultOptimizer)
        mock_optimizer1.train = Mock()
        mock_training_cfg1 = Mock(spec=TrainingConfig)
        mock_optimizer1.get_cfg_as_dict.return_value = mock_training_cfg1
        mock_optimizer2 = Mock(spec=DefaultOptimizer)
        mock_optimizer2.train = Mock()
        mock_training_cfg2 = Mock(spec=TrainingConfig)
        mock_optimizer2.get_cfg_as_dict.return_value = mock_training_cfg2
        mock_optimizer3 = Mock(spec=DefaultOptimizer)
        mock_optimizer3.train = Mock()
        mock_training_cfg3 = Mock(spec=TrainingConfig)
        mock_optimizer3.get_cfg_as_dict.return_value = mock_training_cfg3
        mock_runner_config.optimizer_generator = (
            mo for mo in [mock_optimizer1, mock_optimizer2, mock_optimizer3])

        test_batch_stats = BatchStatistics(1, 1, 1)
        e = EpochStatistics(1)
        e.add_batch(test_batch_stats)

        mock_optimizer1.train.return_value = (arch, [e], 1)
        mock_optimizer1.test = Mock()
        mock_optimizer2.train.return_value = (arch, [e], 1)
        mock_optimizer2.test = Mock()
        mock_optimizer3.train.return_value = (arch, [e], 1)
        mock_optimizer3.test = Mock()

        test_return_dict = dict()
        test_return_dict['clean_accuracy'] = 1
        test_return_dict['clean_n_total'] = 1
        test_return_dict['triggered_accuracy'] = 1
        test_return_dict['triggered_n_total'] = 1
        mock_optimizer3.test.return_value = test_return_dict

        # create runner and set mock _save_model function
        runner = Runner(mock_runner_config)
        mock_save_model = Mock()
        runner._save_model_and_stats = mock_save_model

        # run function
        with patch("trojai.modelgen.runner.TrainingRunStatistics") as p:
            runner.run()

            mock_optimizer1.train.assert_called_once_with(
                arch, train1, False, None)
            mock_optimizer1.test.assert_not_called()
            mock_optimizer2.train.assert_called_once_with(
                arch, train2, False, None)
            mock_optimizer2.test.assert_not_called()
            mock_optimizer3.train.assert_called_once_with(
                arch, train3, False, None)
            mock_optimizer3.test.assert_called_once_with(
                arch, ctest, ttest, cttest, False, None)