def test_lambda_lr(self, lr_mock):
        state = {torchbearer.OPTIMIZER: 'optimizer'}

        scheduler = CosineAnnealingLR(4, eta_min=10, last_epoch=-4, step_on_batch='batch')
        scheduler.on_start(state)

        lr_mock.assert_called_once_with('optimizer', 4, eta_min=10, last_epoch=-4)
        self.assertTrue(scheduler._step_on_batch == 'batch')
    def test_lambda_lr(self, lr_mock):
        state = {torchbearer.OPTIMIZER: 'optimizer', torchbearer.EPOCH: 0}

        scheduler = CosineAnnealingLR(T_max=4, eta_min=10, step_on_batch=True)
        scheduler.on_start(state)

        lr_mock.assert_called_once_with('optimizer',
                                        T_max=4,
                                        eta_min=10,
                                        last_epoch=-1)
        self.assertTrue(scheduler._step_on_batch)
示例#3
0
# Pointcloud fmix converts voxel grids back into point clouds after mixing
mode = 'pointcloud_fmix' if (args.msda_mode == 'fmix' and args.dataset == 'modelnet') else args.msda_mode

# CutMix callback returns mixed and original targets. We mix in the loss function instead
@torchbearer.callbacks.on_sample
def cutmix_reformat(state):
    state[torchbearer.Y_TRUE] = state[torchbearer.Y_TRUE][0]

cb = [tboard, tboardtext, write_params, torchbearer.callbacks.MostRecent(args.model_file)]
# Toxic helper needs to go before the msda to reshape the input
cb.append(ToxicHelper(to_float=args.dataset != 'yelp_5')) if (args.dataset in ['toxic', 'imdb', 'yelp_2', 'yelp_5']) else []
cb.append(modes[mode]) if args.msda_mode not in [None, 'None'] else []
cb.append(Cutout(1, args.cutout_l)) if args.cutout else []
cb.append(RandomErase(1, args.cutout_l)) if args.random_erase else []
# WARNING: Schedulers appear to be broken (wrong lr output) in some versions of PyTorch, including 1.4. We used 1.3.1
cb.append(MultiStepLR(args.schedule)) if not args.cosine_scheduler else cb.append(CosineAnnealingLR(args.epoch, eta_min=0.))
cb.append(WarmupLR(0.1, args.lr)) if args.lr_warmup else []
cb.append(cutmix_reformat) if args.msda_mode == 'cutmix' else []

# FMix loss is equivalent to mixup loss and works for all msda in torchbearer
if args.msda_mode not in [None, 'None']:
    bce = True if (args.dataset in ['toxic', 'toxic_bert', 'imdb', 'yelp_2']) else False
    criterion = modes['fmix'].loss(bce)
elif args.dataset in ['toxic', 'toxic_bert', 'imdb', 'yelp_2']:
    criterion = nn.BCEWithLogitsLoss()
else:
    criterion = nn.CrossEntropyLoss()

metrics_append = []
if 'bengali' in args.dataset:
    from utils.macro_recall import MacroRecall
示例#4
0
文件: trainer.py 项目: zymale/FMix
mode = 'pointcloud_fmix' if (args.msda_mode == 'fmix' and args.dataset
                             == 'modelnet') else args.msda_mode

cb = [
    tboard, tboardtext, write_params,
    torchbearer.callbacks.MostRecent(args.model_file)
]
# Toxic helper needs to go before the msda to reshape the input
cb.append(ToxicHelper()) if args.dataset == 'toxic' else []
cb.append(modes[mode]) if args.msda_mode not in [None, 'None'] else []
cb.append(Cutout(1, args.cutout_l)) if args.cutout else []
cb.append(RandomErase(1, args.cutout_l)) if args.random_erase else []
# WARNING: Schedulers appear to be broken (wrong lr output) in some versions of PyTorch, including 1.4. We used 1.3.1
cb.append(MultiStepLR(
    args.schedule)) if not args.cosine_scheduler else cb.append(
        CosineAnnealingLR(args.epoch, eta_min=0.))
cb.append(WarmupLR(0.1, args.lr)) if args.lr_warmup else []

# FMix loss is equivalent to mixup loss and works for all msda in torchbearer
if args.msda_mode not in [None, 'None']:
    bce = True if args.dataset == 'toxic' else False
    criterion = modes['fmix'].loss(bce)
elif args.dataset == 'toxic':
    criterion = nn.BCEWithLogitsLoss()
else:
    criterion = nn.CrossEntropyLoss()

# from torchbearer.metrics.roc_auc_score import RocAucScore
print('==> Training model..')
trial = Trial(net,
              optimizer,