def train_sequence_si(): ks = [10, 20, 30] fecs = [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]] logdir = 'modeldir/stage_all/seq_si/' from loss import FECLoss for i, k in enumerate(ks): for fec in fecs[i]: cfg = _allrun_config_si(k) cfg['lr'] = 0.00001 cfg['batch'] = 64 cfg['epochs'] = 200 cfg['scheduler'] = True cfg['patience'] = 30 cfg['step'] = 0 if fec % 2 == 1: continue if fec == 0: cfg['criterion'] = torch.nn.BCELoss() cfg['model'] = 'resnet18b4_si_k%d' % (k) else: cfg['criterion'] = FECLoss(alpha=cfg['batch'] * fec) cfg['model'] = 'resnet18b4_si_k%d_fec%d' % (k, fec) cfg['model_dir'] = '%s/%s' % (logdir, cfg['model']) model = nn.DataParallel(sinet.SiNet(nblock=4, k=k).cuda()) train.run_train(model, cfg)
def train_resnet_pj(s=2, k=10): cfg = util.default_cfg() cfg = train._config_pj_dataset(cfg, s, k) cfg['model'] = 'resnet_pj_k%d' % (k) cfg['model_dir'] = 'modeldir/stage%d/resnet_pj_k%d' % (s, k) model = nn.DataParallel(sinet.SiNet(nblock=4, k=k).cuda()) cfg = train._train_config_pj(model, cfg) train.run_train(model, cfg)
def train_resnet_si(s=2, k=10, val_index=4): cfg = util.default_cfg() cfg = train._config_si_dataset(cfg, s, k) cfg['model'] = 'resnet_si_k%d_val%d' % (k, val_index) cfg['model_dir'] = 'modeldir/stage%d/resnet_si_k%d_val%d' % (s, k, val_index) model = nn.DataParallel(sinet.SiNet(nblock=4, k=k).cuda()) cfg = train._train_config_si(model, cfg) train.run_train(model, cfg)
def train_resnet_stratify_si(s=2, k=10): cfg = util.default_cfg() cfg = train._config_stratify_si_dataset(cfg, s, k) # from loss import FECLoss # cfg['criterion'] = FECLoss(alpha=48) # from loss import SFocalLoss # cfg['criterion'] = SFocalLoss(gamma=1) model = nn.DataParallel(sinet.SiNet(nblock=2, k=k).cuda()) cfg['model'] = 'resnet18b2_si_k%d' % (k) cfg['model_dir'] = 'modeldir/stage%d/resnet18b2_si_k%d' % (s, k) cfg = train._train_config_si(model, cfg) cfg['scheduler'] = False train.run_train(model, cfg)
def train_resnet_si(k=10): cfg = _allrun_config_si(k) from loss import FECLoss cfg['criterion'] = FECLoss(alpha=64) model = nn.DataParallel(sinet.SiNet(nblock=4, k=k).cuda()) cfg['model'] = 'resnet18b4_si_k%d_fec1' % (k) cfg['model_dir'] = 'modeldir/stage_all/resnet18b4_si_k%d_fec1' % (k) cfg['lr'] = 0.0001 model_pth = os.path.join(cfg['model_dir'], 'model.pth') if os.path.exists(model_pth): ckp = torch.load(model_pth) model.load_state_dict(ckp['model']) cfg['step'] = ckp['epoch'] + 1 print("load pretrained model", model_pth, "start epoch:", cfg['step']) train.run_train(model, cfg)
def run_kfold_test(k=10): for s in [6, 5, 4, 3, 2]: test_dataset = dataset.SIDataset(mode='test', stage=s, k=k) s_dir = 'modeldir/stage%d' % s s_score = [] s_label = [] for val_index in [4, 3, 2, 1, 0]: m_dir = '%s/resnet_si_k%d_val%d' % (s_dir, k, val_index) model_pth = os.path.join(m_dir, 'model.pth') model = nn.DataParallel(sinet.SiNet(nblock=4, k=k).cuda()) ckp = torch.load(model_pth) model.load_state_dict(ckp['model']) cfg = util.default_cfg() cfg['test'] = test_dataset cfg['batch'] = 128 cfg['collate'] = default_collate cfg['instance'] = train._train_si cfg['model'] = m_dir np_score, np_label = train.run_test_score(model, cfg) s_score.append(np_score) s_label.append(np_label) m_score = np.mean(np.stack(s_score, axis=0), axis=0) print("m_score", m_score.shape, 'np_label', np_label.shape) np_pd = (m_score > 0.5).astype(np.int) mean_dir = '%s/resnet_si_k%d_mean' % (s_dir, k) if not os.path.exists(mean_dir): os.mkdir(mean_dir) pth = os.path.join(mean_dir, 'metrics.csv') util.write_metrics(pth, np_label, np_pd, np_score)