コード例 #1
0
ファイル: Train.py プロジェクト: Pseudo-Lab/20_MoA
            valid_dataset = moaDataset(fold_valid.sig_id.values,
                                       fold_valid[g_cols].values,
                                       fold_valid[c_cols].values,
                                       fold_valid[cate_cols].values,
                                       fold_valid[score_cols].values,
                                       fold_valid[nonscore_cols].values)

            train_loader = DataLoader(train_dataset, batch_size=config.batch_size,
                                      shuffle=True, num_workers=8, drop_last=True)
            valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, shuffle=False, num_workers=8)

            model = moaModel(config, num_g, num_c, num_cate,
                             classes=config.num_scored,
                             non_classes=config.num_nonscored)
            train_module = Learner(model, train_loader, valid_loader, fold, config, seed)
            history = train_module.fit(config.n_epochs)

            for key in hists.keys():
                hists[key].append(history[key])
            saved_models['cv'].append(min(history['valid']))
            saved_models['path'].append(os.path.join(config.log_dir, f"{config.name}_seed{seed}_fold{fold}.pth"))

        # Inference
        cv_seed = np.mean([min(hist_valid) for hist_valid in hists['valid']])
        print(f'SEED {seed}: CV {cv_seed:.6f}')

    cv_score = np.mean(saved_models['cv'])
    dataset = moaDataset(test_features.sig_id.values,
                         test_features[g_cols].values,
                         test_features[c_cols].values,
                         test_features[cate_cols].values,
コード例 #2
0
ファイル: Train.py プロジェクト: tacalvin/scribble2label
    thr_conf = 0.8
    alpha = 0.2


if __name__ == '__main__':
    seed_everything(config.seed)

    model = Unet(encoder_name='resnet50', encoder_weights='imagenet', decoder_use_batchnorm=True,
                 decoder_attention_type='scse', classes=2, activation=None)

    df = pd.read_csv(config.df_path)
    train_df = df[df.fold != config.fold].reset_index(drop=True)
    valid_df = df[df.fold == config.fold].reset_index(drop=True)
    transforms = get_transforms(config.input_size, need=('train', 'val'))

    train_dataset = dsbDataset(config.data_dir, config.scr_dir, config.mask_dir, train_df,
                               tfms=transforms['train'], return_id=False)
    valid_dataset = dsbDataset(config.data_dir, config.scr_dir, config.mask_dir, valid_df,
                               tfms=transforms['val'], return_id=True)
    train_loader = DataLoader(dataset=train_dataset, batch_size=config.batch_size, num_workers=config.num_workers,
                              shuffle=True)
    valid_loader = DataLoader(dataset=valid_dataset, batch_size=1, num_workers=config.num_workers,
                              shuffle=False)

    Learner = Learner(model, train_loader, valid_loader, config)
    pretrained_path = os.path.join(config.log_dir, 'best_model.pth')
    if os.path.isfile(pretrained_path):
        Learner.load(pretrained_path)
        Learner.log(f"Checkpoint Loaded: {pretrained_path}")
    Learner.fit(config.n_epochs)
コード例 #3
0
ファイル: main.py プロジェクト: melugi/mnist
        return correct/len(batch_images)

val_set = torch.utils.data.DataLoader(
    torchvision.datasets.MNIST('./', train=False, download=True,
                                transform=torchvision.transforms.Compose([
                                    torchvision.transforms.ToTensor(),
                                    torchvision.transforms.Normalize((0.5,), (0.5,))
                                    ])),
    batch_size=128, shuffle=True)

train_set = torch.utils.data.DataLoader(
    torchvision.datasets.MNIST('./', train=True, download=True,
                                transform=torchvision.transforms.Compose([
                                    torchvision.transforms.ToTensor(),
                                    torchvision.transforms.Normalize((0.5,), (0.5,))
                                    ])),
    batch_size=512, shuffle=True)

model = nn.Sequential(nn.Linear(28*28, 2**7),
                      nn.ReLU(),
                      nn.Linear(2**7, 2**6),
                      nn.ReLU(),
                      nn.Linear(2**6, 10),
                      nn.LogSoftmax(dim=1))

#optimizer = SgdOptimizer(model.parameters(), 0.003)
optimizer = optim.SGD(model.parameters(), lr=0.003, momentum=0.9)

learner = Learner(train_set, val_set, model, optimizer, nn.NLLLoss(), batch_accuracy)
learner.fit(50)