Exemplo n.º 1
0
    def test_get_optimizer(self):
        model = TempModel()

        for opt_name in AVAILABLE_OPTIMIZERS.keys():
            opt_cls = optim.get_optimizer(opt_name)
            opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)

            assert isinstance(opt, AVAILABLE_OPTIMIZERS[opt_name])
Exemplo n.º 2
0
    def test_get_optimizer(self):
        model = TempModel()

        for opt_name in AVAILABLE_OPTIMIZERS.keys():
            if opt_name == 'fused_adam':
                if not torch.cuda.is_available():
                    continue
            opt_cls = optim.get_optimizer(opt_name)
            opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)

            assert isinstance(opt, AVAILABLE_OPTIMIZERS[opt_name])
Exemplo n.º 3
0
    def test_sched_config_parse_simple(self):
        model = TempModel()
        opt_cls = optim.get_optimizer('novograd')
        opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)

        basic_sched_config = {'name': 'CosineAnnealing', 'max_steps': 10}
        scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, basic_sched_config)
        assert isinstance(scheduler_setup['scheduler'], optim.lr_scheduler.CosineAnnealing)

        dict_config = omegaconf.OmegaConf.create(basic_sched_config)
        scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, dict_config)
        assert isinstance(scheduler_setup['scheduler'], optim.lr_scheduler.CosineAnnealing)
Exemplo n.º 4
0
    def test_register_optimizer(self):
        class TempOpt(torch.optim.SGD):
            pass

        class TempOptParams(config.optimizers.SGDParams):
            pass

        optim.register_optimizer('TempOpt', TempOpt, TempOptParams)

        model = TempModel()
        opt_cls = optim.get_optimizer('TempOpt')
        opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)

        assert isinstance(opt, TempOpt)
Exemplo n.º 5
0
    def test_get_optimizer(self):
        model = TempModel()

        for opt_name in AVAILABLE_OPTIMIZERS.keys():
            if opt_name == 'fused_adam':
                if not torch.cuda.is_available():
                    continue
            opt_cls = optim.get_optimizer(opt_name)
            if opt_name == 'adafactor':
                # Adafactor's default mode uses relative_step without any lr.
                opt = opt_cls(model.parameters())
            else:
                opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)

            assert isinstance(opt, AVAILABLE_OPTIMIZERS[opt_name])
Exemplo n.º 6
0
    def test_sched_config_parse_from_cls(self):
        model = TempModel()
        opt_cls = optim.get_optimizer('novograd')
        opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)

        basic_sched_config = {
            'target': 'nemo.core.config.CosineAnnealingParams',
            'params': {'min_lr': 0.1},
            'max_steps': self.MAX_STEPS,
        }
        scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, basic_sched_config)
        assert isinstance(scheduler_setup['scheduler'], optim.lr_scheduler.CosineAnnealing)

        dict_config = omegaconf.OmegaConf.create(basic_sched_config)
        scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, dict_config)
        assert isinstance(scheduler_setup['scheduler'], optim.lr_scheduler.CosineAnnealing)
Exemplo n.º 7
0
    def test_register_scheduler(self):
        class TempSched(optim.lr_scheduler.CosineAnnealing):
            pass

        class TempSchedParams(config.schedulers.CosineAnnealingParams):
            pass

        optim.lr_scheduler.register_scheduler('TempSched', TempSched, TempSchedParams)

        model = TempModel()
        opt_cls = optim.get_optimizer('novograd')
        opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
        sched_cls = optim.lr_scheduler.get_scheduler('TempSched')
        sched = sched_cls(opt, max_steps=self.MAX_STEPS)

        assert isinstance(sched, TempSched)
Exemplo n.º 8
0
    def test_SquareAnnealing(self):
        model = TempModel()
        opt_cls = optim.get_optimizer('novograd')
        opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)

        # No warmup case
        policy = optim.lr_scheduler.SquareAnnealing(opt,
                                                    max_steps=self.MAX_STEPS,
                                                    min_lr=self.MIN_LR)
        initial_lr = policy.get_last_lr()[0]

        assert initial_lr == self.INITIAL_LR

        for i in range(self.MAX_STEPS):
            assert policy.get_last_lr()[0] <= self.INITIAL_LR
            opt.step()
            policy.step()

        policy.step()
        final_lr = policy.get_last_lr()[0]

        assert final_lr == self.MIN_LR

        # Warmup steps available
        policy = optim.lr_scheduler.SquareAnnealing(opt,
                                                    warmup_steps=5,
                                                    max_steps=self.MAX_STEPS,
                                                    min_lr=self.MIN_LR)
        initial_lr = policy.get_last_lr()[0]

        assert initial_lr < self.INITIAL_LR

        for i in range(self.MAX_STEPS):
            if i <= 5:
                assert policy.get_last_lr()[0] <= self.INITIAL_LR
            else:
                assert policy.get_last_lr()[0] < self.INITIAL_LR

            opt.step()
            policy.step()

        policy.step()
        final_lr = policy.get_last_lr()[0]

        assert final_lr == self.MIN_LR
Exemplo n.º 9
0
    def test_CosineAnnealing_with_noop_steps(self):
        model = TempModel()
        opt_cls = optim.get_optimizer('novograd')
        opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)

        # No warmup case
        policy = optim.lr_scheduler.CosineAnnealing(opt,
                                                    max_steps=self.MAX_STEPS,
                                                    min_lr=self.MIN_LR)
        initial_lr = policy.get_last_lr()[0]

        assert initial_lr == self.INITIAL_LR

        update_steps = 0
        for i in range(self.MAX_STEPS):
            assert policy.get_last_lr()[0] <= self.INITIAL_LR
            opt.step()
            policy.step()

            # Perform a No-Op for scheduler every 2 steps
            if i % 2 == 0:
                policy.last_epoch -= 1
            else:
                update_steps += 1

        policy.step()
        update_steps += 1

        assert update_steps < self.MAX_STEPS

        final_lr = policy.get_last_lr()[0]
        assert final_lr > self.MIN_LR

        # update step = true number of updates performed after some number of skipped steps
        true_end_lr = policy._get_lr(step=update_steps)[0]
        assert final_lr == true_end_lr
Exemplo n.º 10
0
    def test_PolynomialDecayAnnealing(self):
        model = TempModel()
        opt_cls = optim.get_optimizer('novograd')
        opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)

        # No warmup case
        policy = optim.lr_scheduler.PolynomialHoldDecayAnnealing(
            opt, power=2, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
        initial_lr = policy.get_last_lr()[0]

        assert initial_lr == self.INITIAL_LR

        for i in range(self.MAX_STEPS):
            assert policy.get_last_lr()[0] <= self.INITIAL_LR
            opt.step()
            policy.step()

        policy.step()
        final_lr = policy.get_last_lr()[0]

        assert final_lr == self.MIN_LR

        # Warmup steps available
        policy = optim.lr_scheduler.PolynomialHoldDecayAnnealing(
            opt,
            power=2,
            warmup_steps=5,
            max_steps=self.MAX_STEPS,
            min_lr=self.MIN_LR)
        initial_lr = policy.get_last_lr()[0]

        assert initial_lr < self.INITIAL_LR

        for i in range(self.MAX_STEPS):
            if i <= 5:
                assert policy.get_last_lr()[0] <= self.INITIAL_LR
            else:
                assert policy.get_last_lr()[0] < self.INITIAL_LR

            opt.step()
            policy.step()

        policy.step()
        final_lr = policy.get_last_lr()[0]

        assert final_lr == self.MIN_LR

        # Warmup + Hold steps available
        policy = optim.lr_scheduler.PolynomialHoldDecayAnnealing(
            opt,
            warmup_steps=5,
            hold_steps=3,
            max_steps=self.MAX_STEPS,
            min_lr=self.MIN_LR,
            power=2)
        initial_lr = policy.get_last_lr()[0]

        assert initial_lr < self.INITIAL_LR

        for i in range(self.MAX_STEPS):
            if i <= 4:
                assert policy.get_last_lr()[0] <= self.INITIAL_LR
            elif i <= 8:
                assert policy.get_last_lr()[0] == self.INITIAL_LR
            else:
                assert policy.get_last_lr()[0] < self.INITIAL_LR
            opt.step()
            policy.step()

        policy.step()
        final_lr = policy.get_last_lr()[0]

        assert final_lr == self.MIN_LR