def test_logged_lr(self):
        # Mock LR scheduler
        def scheduler_mock(where):
            return where

        mock_lr_scheduler = mock.Mock(side_effect=scheduler_mock)
        mock_lr_scheduler.update_interval = UpdateInterval.STEP
        config = get_test_mlp_task_config()
        config["num_epochs"] = 3
        config["dataset"]["train"]["batchsize_per_replica"] = 5
        config["dataset"]["test"]["batchsize_per_replica"] = 5
        task = build_task(config)
        task.optimizer.lr_scheduler = mock_lr_scheduler
        trainer = LocalTrainer()

        # 2 LR updates per epoch
        # At end of each epoch for train, LR is logged an additional time
        lr_order = [
            0.0, 1 / 6, 1 / 6, 2 / 6, 3 / 6, 3 / 6, 4 / 6, 5 / 6, 5 / 6
        ]
        lr_list = []

        def mock_log_lr(task: ClassyTask, local_variables) -> None:
            lr_list.append(task.optimizer.lr)

        with mock.patch.object(LossLrMeterLoggingHook,
                               "_log_lr",
                               side_effect=mock_log_lr):
            hook = LossLrMeterLoggingHook(1)
            task.set_hooks([hook])
            trainer.train(task)
            self.assertEqual(lr_list, lr_order)
예제 #2
0
    def test_logged_lr(self):
        # Mock LR scheduler
        def scheduler_mock(where):
            return where

        mock_lr_scheduler = mock.Mock(side_effect=scheduler_mock)
        mock_lr_scheduler.update_interval = UpdateInterval.STEP
        config = get_test_mlp_task_config()
        config["num_epochs"] = 3
        config["dataset"]["train"]["batchsize_per_replica"] = 10
        config["dataset"]["test"]["batchsize_per_replica"] = 5
        task = build_task(config)
        task.optimizer.param_schedulers["lr"] = mock_lr_scheduler
        trainer = LocalTrainer()

        # 2 LR updates per epoch = 6
        lr_order = [0.0, 1 / 6, 2 / 6, 3 / 6, 4 / 6, 5 / 6]
        lr_list = []

        class LRLoggingHook(ClassyHook):
            on_end = ClassyHook._noop
            on_phase_end = ClassyHook._noop
            on_phase_start = ClassyHook._noop
            on_start = ClassyHook._noop

            def on_step(self, task):
                if task.train:
                    lr_list.append(task.optimizer.parameters.lr)

        hook = LRLoggingHook()
        task.set_hooks([hook])
        trainer.train(task)
        self.assertEqual(lr_list, lr_order)
예제 #3
0
    def test_logged_lr(self):
        class SchedulerMock(ClassyParamScheduler):
            def __call__(self, where):
                return where

        mock_lr_scheduler = SchedulerMock(UpdateInterval.STEP)
        config = get_test_mlp_task_config()
        config["num_epochs"] = 3
        config["dataset"]["train"]["batchsize_per_replica"] = 10
        config["dataset"]["test"]["batchsize_per_replica"] = 5
        task = build_task(config)
        task.set_optimizer_schedulers({"lr": mock_lr_scheduler})
        trainer = LocalTrainer()

        # 2 LR updates per epoch = 6
        lr_order = [0.0, 1 / 6, 2 / 6, 3 / 6, 4 / 6, 5 / 6]
        lr_list = []

        class LRLoggingHook(ClassyHook):
            on_end = ClassyHook._noop
            on_phase_end = ClassyHook._noop
            on_phase_start = ClassyHook._noop
            on_start = ClassyHook._noop

            def on_step(self, task):
                if task.train:
                    lr_list.append(task.optimizer.options_view.lr)

        hook = LRLoggingHook()
        task.set_hooks([hook])
        trainer.train(task)
        self.assertEqual(lr_list, lr_order)
예제 #4
0
 def test_train(self):
     config = get_test_mlp_task_config()
     task = build_task(config)
     num_samples = 10
     precise_batch_norm_hook = PreciseBatchNormHook(num_samples)
     task.set_hooks([precise_batch_norm_hook])
     task.prepare()
     trainer = ClassyTrainer()
     trainer.train(task)
    def test_logged_lr(self):
        # Mock LR scheduler
        class SchedulerMock(ClassyParamScheduler):
            def __call__(self, where):
                return where

        mock_lr_scheduler = SchedulerMock(UpdateInterval.STEP)

        # Mock Logging
        class DummySummaryWriter(object):
            def __init__(self):
                self.scalar_logs = {}

            def add_scalar(self,
                           key,
                           value,
                           global_step=None,
                           walltime=None) -> None:
                self.scalar_logs[key] = self.scalar_logs.get(key, []) + [value]

            def add_histogram(self,
                              key,
                              value,
                              global_step=None,
                              walltime=None) -> None:
                return

            def add_text(self, *args, **kwargs):
                pass

            def flush(self):
                return

        config = get_test_mlp_task_config()
        config["num_epochs"] = 3
        config["dataset"]["train"]["batchsize_per_replica"] = 10
        config["dataset"]["test"]["batchsize_per_replica"] = 5
        task = build_task(config)

        writer = DummySummaryWriter()
        hook = TensorboardPlotHook(writer)
        hook.log_period = 1
        task.set_hooks([hook])
        task.set_optimizer_schedulers({"lr": mock_lr_scheduler})

        trainer = LocalTrainer()
        trainer.train(task)

        # We have 20 samples, batch size is 10. Each epoch is done in two steps.
        self.assertEqual(
            writer.scalar_logs["Learning Rate/train"],
            [0, 1 / 6, 2 / 6, 3 / 6, 4 / 6, 5 / 6],
        )
예제 #6
0
 def setUp(self):
     config = get_test_mlp_task_config()
     invalid_config = copy.deepcopy(config)
     invalid_config["name"] = "invalid_task"
     self.config_files = {}
     for config_key, config in [
         ("config", config),
         ("invalid_config", invalid_config),
     ]:
         with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
             json.dump(config, f)
             f.flush()
             self.config_files[config_key] = f.name
     self.path = Path(__file__).parent.absolute()
    def test_training(self):
        """Checks we can train a small MLP model."""
        config = get_test_mlp_task_config()
        task = (ClassificationTask().set_num_epochs(10).set_loss(
            build_loss(config["loss"])).set_model(build_model(
                config["model"])).set_optimizer(
                    build_optimizer(config["optimizer"])).set_meters([
                        AccuracyMeter(topk=[1])
                    ]).set_hooks([LossLrMeterLoggingHook()]))
        for split in ["train", "test"]:
            dataset = build_dataset(config["dataset"][split])
            task.set_dataset(dataset, split)

        self.assertTrue(task is not None)

        trainer = LocalTrainer()
        trainer.train(task)
        accuracy = task.meters[0].value["top_1"]
        self.assertAlmostEqual(accuracy, 1.0)
예제 #8
0
    def test_logged_lr(self):
        # Mock LR scheduler
        def scheduler_mock(where):
            return where

        mock_lr_scheduler = mock.Mock(side_effect=scheduler_mock)
        mock_lr_scheduler.update_interval = UpdateInterval.STEP

        # Mock Logging
        class DummySummaryWriter(object):
            def __init__(self):
                self.scalar_logs = {}

            def add_scalar(self,
                           key,
                           value,
                           global_step=None,
                           walltime=None) -> None:
                self.scalar_logs[key] = self.scalar_logs.get(key, []) + [value]

            def flush(self):
                return

        config = get_test_mlp_task_config()
        config["num_epochs"] = 3
        config["dataset"]["train"]["batchsize_per_replica"] = 5
        config["dataset"]["test"]["batchsize_per_replica"] = 5
        task = build_task(config)

        writer = DummySummaryWriter()
        hook = TensorboardPlotHook(writer)
        task.set_hooks([hook])
        task.optimizer.param_schedulers["lr"] = mock_lr_scheduler

        trainer = LocalTrainer()
        trainer.train(task)

        # We have 10 samples, batch size is 5. Each epoch is done in two steps.
        self.assertEqual(
            writer.scalar_logs["train_learning_rate_updates"],
            [0, 1 / 6, 2 / 6, 3 / 6, 4 / 6, 5 / 6],
        )
예제 #9
0
    def test_bn_stats(self):
        base_self = self

        class TestHook(ClassyHook):
            on_start = ClassyHook._noop
            on_phase_start = ClassyHook._noop
            on_phase_end = ClassyHook._noop
            on_end = ClassyHook._noop

            def __init__(self):
                self.train_bn_stats = None
                self.test_bn_stats = None

            def on_step(self, task):
                if task.train:
                    self.train_bn_stats = base_self._get_bn_stats(
                        task.base_model)
                else:
                    self.test_bn_stats = base_self._get_bn_stats(
                        task.base_model)

        config = get_test_mlp_task_config()
        task = build_task(config)
        num_samples = 10
        precise_batch_norm_hook = PreciseBatchNormHook(num_samples)
        test_hook = TestHook()
        task.set_hooks([precise_batch_norm_hook, test_hook])
        trainer = ClassyTrainer()
        trainer.train(task)

        updated_bn_stats = self._get_bn_stats(task.base_model)

        # the stats should be modified after train steps but not after test steps
        self.assertFalse(
            self._compare_bn_stats(test_hook.train_bn_stats, updated_bn_stats))
        self.assertTrue(
            self._compare_bn_stats(test_hook.test_bn_stats, updated_bn_stats))