def train_sequence_si(): ks = [10, 20, 30] fecs = [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]] logdir = 'modeldir/stage_all/seq_si/' from loss import FECLoss for i, k in enumerate(ks): for fec in fecs[i]: cfg = _allrun_config_si(k) cfg['lr'] = 0.00001 cfg['batch'] = 64 cfg['epochs'] = 200 cfg['scheduler'] = True cfg['patience'] = 30 cfg['step'] = 0 if fec % 2 == 1: continue if fec == 0: cfg['criterion'] = torch.nn.BCELoss() cfg['model'] = 'resnet18b4_si_k%d' % (k) else: cfg['criterion'] = FECLoss(alpha=cfg['batch'] * fec) cfg['model'] = 'resnet18b4_si_k%d_fec%d' % (k, fec) cfg['model_dir'] = '%s/%s' % (logdir, cfg['model']) model = nn.DataParallel(sinet.SiNet(nblock=4, k=k).cuda()) train.run_train(model, cfg)
def train_smallnet_si_boost_fec(s=2, k=10): cfg = util.default_cfg() train_dataset = dataset.StratifySIDataset(mode='train', stage=s, k=k) val_dataset = dataset.StratifySIDataset(mode='val', stage=s, k=k) test_dataset = dataset.StratifySIDataset(mode='test', stage=s, k=k) cfg['train'] = train_dataset cfg['val'] = val_dataset cfg['test'] = test_dataset cfg['batch'] = 64 cfg['epochs'] = 500 cfg['scheduler'] = True cfg['decay'] = 0.01 cfg['lr'] = 0.0001 cfg['patience'] = 20 cfg['collate'] = default_collate cfg['instance'] = _train_si from loss import FECLoss cfg['criterion'] = FECLoss(alpha=64, reduction='none') cfg['model'] = 'smallnet_si_k%d_boost_fec1' % (k) cfg['model_dir'] = 'modeldir/stage%d/smallnet_si_k%d_boost_fec1' % (s, k) model_pth = os.path.join(cfg['model_dir'], 'model.pth') model = nn.DataParallel(sinet.SmallNet(k=k).cuda()) if os.path.exists(model_pth): ckp = torch.load(model_pth) model.load_state_dict(ckp['model']) cfg['step'] = ckp['epoch'] + 1 print("load pretrained model", model_pth, "start epoch:", cfg['step']) run_train(model, cfg)
def train_smallnet_stratify_pj_fecq(s=2, k=10): cfg = util.default_cfg() cfg = train._config_stratify_pj_dataset(cfg, s, k) model = nn.DataParallel(sinet.SmallNet(k=k).cuda()) from loss import FECLoss cfg['criterion'] = FECLoss(alpha=8) cfg['model'] = 'smallnet_pj_k%d_fec0.25' % (k) cfg['model_dir'] = 'modeldir/stage%d/smallnet_pj_k%d_fec0.25' % (s, k) cfg = train._train_config_pj(model, cfg) cfg['scheduler'] = False cfg['lr'] = 0.0001 cfg['epochs'] = 1000 train.run_train(model, cfg)
def train_tinynet_stratify_si(s=2, k=10): cfg = util.default_cfg() cfg = train._config_stratify_si_dataset(cfg, s, k) from loss import FECLoss cfg['criterion'] = FECLoss(alpha=32) cfg['model'] = 'tinynet_si_k%d_fec0.5' % (k) cfg['model_dir'] = 'modeldir/stage%d/tinynet_si_k%d_fec0.5' % (s, k) cfg['collate'] = default_collate cfg['instance'] = train._train_si model = nn.DataParallel(sinet.TinyNet(k=k).cuda()) cfg = train._train_config_si(model, cfg) train.run_train(model, cfg)
def train_resnet_si(k=10): cfg = _allrun_config_si(k) from loss import FECLoss cfg['criterion'] = FECLoss(alpha=64) model = nn.DataParallel(sinet.SiNet(nblock=4, k=k).cuda()) cfg['model'] = 'resnet18b4_si_k%d_fec1' % (k) cfg['model_dir'] = 'modeldir/stage_all/resnet18b4_si_k%d_fec1' % (k) cfg['lr'] = 0.0001 model_pth = os.path.join(cfg['model_dir'], 'model.pth') if os.path.exists(model_pth): ckp = torch.load(model_pth) model.load_state_dict(ckp['model']) cfg['step'] = ckp['epoch'] + 1 print("load pretrained model", model_pth, "start epoch:", cfg['step']) train.run_train(model, cfg)