def _create_optimizer(self):
        self._log.info(self.id, '=> setting Adam solver')
        param_groups = [
            {'params': bias_parameters(self.model.module),
             'weight_decay': self.cfg.train.bias_decay},
            {'params': weight_parameters(self.model.module),
             'weight_decay': self.cfg.train.weight_decay}]

        if self.cfg.train.optim == 'adamw':
            optimizer = AdamW(param_groups, self.cfg.train.lr,
                              betas=(self.cfg.train.momentum, self.cfg.train.beta))
        elif self.cfg.train.optim == 'adam':
            # optimizer = torch.optim.Adam(param_groups, self.cfg.train.lr,
            #                              betas=(self.cfg.train.momentum, self.cfg.train.beta),
            #                              eps=1e-7)
            optimizer = torch.optim.Adam(self.model.parameters(), lr=self.cfg.train.lr,
                                         betas=(self.cfg.train.momentum, self.cfg.train.beta))
        else:
            raise NotImplementedError(self.cfg.train.optim)

        for item in self.cfg.train.halflr:
            if self.i_epoch >= item:
                self._log.info(self.id, 'Halfving LR')
                for g in optimizer.param_groups:
                    g['lr'] /= 2

        return optimizer
Пример #2
0
    def _create_optimizer(self):
        self._log.info('=> setting Adam solver')

        param_groups = [
            {
                'params': bias_parameters(self.model[0].module),
                'weight_decay': self.cfg.bias_decay,
                'lr': self.cfg.lr_flow
            },
            {
                'params': weight_parameters(self.model[0].module),
                'weight_decay': self.cfg.wd_flow,
                'lr': self.cfg.lr_flow
            },
        ]
        if self.cfg.train_depth:
            param_groups += [{
                'params': bias_parameters(self.model[1].module),
                'weight_decay': self.cfg.bias_decay,
                'lr': self.cfg.lr_depth
            }, {
                'params': weight_parameters(self.model[1].module),
                'weight_decay': self.cfg.wd_depth,
                'lr': self.cfg.lr_depth
            }]
        else:
            for param in self.model[1].parameters():
                param.requires_grad = False

        if self.cfg.optim == 'adamw':
            optimizer = AdamW(param_groups,
                              betas=(self.cfg.momentum, self.cfg.beta))
        elif self.cfg.optim == 'adam':
            optimizer = torch.optim.Adam(param_groups,
                                         betas=(self.cfg.momentum,
                                                self.cfg.beta),
                                         eps=1e-7)
        else:
            raise NotImplementedError(self.cfg.optim)
        return optimizer
Пример #3
0
    def _create_optimizer(self):
        self._log.info('=> setting Adam solver')
        param_groups = [
            {'params': bias_parameters(self.model.module),
             'weight_decay': self.cfg.bias_decay},
            {'params': weight_parameters(self.model.module),
             'weight_decay': self.cfg.weight_decay}]

        if self.cfg.optim == 'adamw':
            optimizer = AdamW(param_groups, self.cfg.lr,
                              betas=(self.cfg.momentum, self.cfg.beta))
        elif self.cfg.optim == 'adam':
            optimizer = torch.optim.Adam(param_groups, self.cfg.lr,
                                         betas=(self.cfg.momentum, self.cfg.beta),
                                         eps=1e-7)
        else:
            raise NotImplementedError(self.cfg.optim)
        return optimizer