Exemplo n.º 1
0
def train(epochs, batch_size, learning_rate):

    train_loader = torch.utils.data.DataLoader(SegThorDataset(
        "data",
        phase='train',
        transform=transforms.Compose([Rescale(0.25),
                                      Normalize(),
                                      ToTensor()]),
        target_transform=transforms.Compose([Rescale(0.25),
                                             ToTensor()])),
                                               batch_size=batch_size,
                                               shuffle=True)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = UNet().to(device)
    model.apply(weight_init)
    #optimizer = optim.Adam(model.parameters(), lr=learning_rate)    #learning rate to 0.001 for initial stage
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.95)
    #optimizer = adabound.AdaBound(params = model.parameters(), lr = 0.001, final_lr = 0.1)

    for epoch in range(epochs):
        print('Epoch {}/{}'.format(epoch + 1, epochs))
        print('-' * 10)

        running_loss = 0.0
        loss_seg = np.zeros(5)

        for batch_idx, (train_data, labels) in enumerate(train_loader):
            train_data, labels = train_data.to(
                device, dtype=torch.float), labels.to(device,
                                                      dtype=torch.uint8)

            print("train data size", train_data.size())
            print("label size", labels.size())
            optimizer.zero_grad()
            output = model(train_data)

            print("output: {} and taget: {}".format(output.size(),
                                                    labels.size()))
            loss_label, loss = dice_loss(output, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()

            for i in range(4):
                loss_seg[i] += loss_label[i]

        print("Length: ", len(train_loader))
        epoch_loss = running_loss / len(train_loader)
        epoch_loss_class = np.true_divide(loss_seg, len(train_loader))
        print(
            "Dice per class: Background = {:.4f} Eusophagus = {:.4f}  Heart = {:.4f}  Trachea = {:.4f}  Aorta = {:.4f}\n"
            .format(epoch_loss_class[0], epoch_loss_class[1],
                    epoch_loss_class[2], epoch_loss_class[3],
                    epoch_loss_class[4]))
        print("Total Dice Loss: {:.4f}\n".format(epoch_loss))

    os.makedirs("models", exist_ok=True)
    torch.save(model, "models/model.pt")
def define_G(input_nc,
             output_nc,
             ngf,
             norm='batch',
             use_dropout=False,
             gpu_ids=[]):
    netG = None
    use_gpu = len(gpu_ids) > 0
    norm_layer = get_norm_layer(norm_type=norm)

    if use_gpu:
        assert (torch.cuda.is_available())

    #netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids)
    #netG = GeneratorUNet(in_channels=2, out_channels=1).cuda()
    netG = UNet(n_classes=1).cuda()
    if len(gpu_ids) > 0:
        netG.cuda(gpu_ids[0])
    netG.apply(weights_init)
    return netG
Exemplo n.º 3
0
def main():

    opt = parser.parse_args()
    print(opt)
    print(torch.__version__)
    opt.seed = 1337
    random.seed(opt.seed)
    torch.manual_seed(opt.seed)
    np.random.seed(opt.seed)
    torch.cuda.manual_seed(opt.seed)

    series = [
        f for f in os.listdir(opt.train_path[0])
        if os.path.isdir(os.path.join(opt.train_path[0], f))
    ]
    series.sort()

    print("===> Building model")
    enc_layers = [1, 2, 2, 4]
    dec_layers = [1, 1, 1, 1]
    number_of_channels = [int(8 * 2**i) for i in range(1, 1 + len(enc_layers))]
    model = UNet(depth=len(enc_layers),
                 encoder_layers=enc_layers,
                 decoder_layers=dec_layers,
                 number_of_channels=number_of_channels,
                 number_of_outputs=3)
    model.apply(weight_init.weight_init)
    model = torch.nn.DataParallel(module=model, device_ids=range(opt.gpus))

    trainer = train.Trainer(model=model,
                            name=opt.name,
                            models_root=opt.models_path,
                            rewrite=False)
    trainer.cuda()

    gc.collect()

    opt.seed = 1337  # random.randint(1, 10000)
    random.seed(opt.seed)
    torch.manual_seed(opt.seed)
    np.random.seed(opt.seed)
    torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    print('Train data:', opt.train_path)

    series_val = [
        'BraTS19_2013_0_1', 'BraTS19_2013_12_1', 'BraTS19_2013_16_1',
        'BraTS19_2013_2_1', 'BraTS19_2013_23_1', 'BraTS19_2013_26_1',
        'BraTS19_2013_29_1', 'BraTS19_CBICA_AAB_1', 'BraTS19_CBICA_AAP_1',
        'BraTS19_CBICA_AMH_1', 'BraTS19_CBICA_AQD_1', 'BraTS19_CBICA_ATX_1',
        'BraTS19_CBICA_AZH_1', 'BraTS19_CBICA_BHB_1', 'BraTS19_TCIA12_101_1',
        'BraTS19_TCIA01_150_1', 'BraTS19_TCIA10_152_1', 'BraTS19_TCIA04_192_1',
        'BraTS19_TCIA08_205_1', 'BraTS19_TCIA06_211_1', 'BraTS19_TCIA02_222_1',
        'BraTS19_TCIA12_298_1', 'BraTS19_TCIA13_623_1', 'BraTS19_CBICA_ANV_1',
        'BraTS19_CBICA_BBG_1', 'BraTS19_TMC_15477_1'
    ]

    series_train = [f for f in series if f not in series_val]
    print('Train {}'.format(series_train))
    print('Val {}'.format(series_val))

    train_set = dataloader.SimpleReader(
        paths=opt.train_path,
        patch_size=(144, 144, 128),
        series=[
            series_train,
        ] + [None for i in range(len(opt.train_path) - 1)],
        annotation_path=opt.annotation_path,
        images_in_epoch=8000,
        patches_from_single_image=1)
    val_set = dataloader.FullReader(path=opt.train_path[0], series=series_val)

    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True,
                                      drop_last=True,
                                      worker_init_fn=worker_init_fn)

    batch_sampler = Data.BatchSampler(sampler=Data.SequentialSampler(val_set),
                                      batch_size=1,
                                      drop_last=True)

    evaluation_data_loader = DataLoader(dataset=val_set,
                                        num_workers=0,
                                        batch_sampler=batch_sampler)

    criterion = [
        loss.Dice_loss_joint(index=0, priority=1).cuda(),
        loss.BCE_Loss(index=0, bg_weight=1e-2).cuda(),
    ]
    print("===> Building model")

    print("===> Training")

    trainer.train(criterion=criterion,
                  optimizer=optim.Adam,
                  optimizer_params={"lr": 2e-5,
                                    "weight_decay": 1e-6,
                                    "amsgrad": True,
                                    },
                  scheduler=torch.optim.lr_scheduler.StepLR,
                  scheduler_params={"step_size": 16000,
                                    "gamma": 0.5,
                                    },
                  training_data_loader=training_data_loader,
                  evaluation_data_loader=evaluation_data_loader,
                  split_into_tiles=False,
                  pretrained_weights=None,
                  train_metrics=[metrics.Dice(name='Dice', input_index=0, target_index=0, classes=4), \
                                 ],
                  val_metrics=[metrics.Dice(name='Dice', input_index=0, target_index=0, classes=4),
                               metrics.Hausdorff_ITK(name='Hausdorff_ITK', input_index=0, target_index=0, classes=4),
                               ],
                  track_metric='Dice',
                  epoches=opt.nEpochs,
                  default_val=np.array([0, 0, 0, 0, 0]),
                  comparator=lambda x, y: np.min(x) + np.mean(x) > np.min(y) + np.mean(y),
                  eval_cpu=False,
                  continue_form_pretraining=False
                  )
Exemplo n.º 4
0
def train(epochs, batch_size, learning_rate):

    torch.manual_seed(1234)

    train_loader = torch.utils.data.DataLoader(SegThorDataset(
        "/home/WIN-UNI-DUE/smnemada/Master_Thesis/SegThor/data/train",
        phase='train',
        transform=transforms.Compose([Rescale(1.0),
                                      Normalize(),
                                      ToTensor()])),
                                               batch_size=batch_size,
                                               shuffle=True)
    '''
    # Loading validation data
    val_set = SegThorDataset("/home/WIN-UNI-DUE/smnemada/Master_Thesis/SegThor/data_val", phase='val',
                                   transform=transforms.Compose([
                                       Rescale(0.5),
                                       Normalize(),
                                       ToTensor2()
                                   ]))

    val_loader = torch.utils.data.DataLoader(dataset=val_set,
                                             batch_size=1,
                                             shuffle=False)
    '''

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = UNet().to(device)
    model.apply(weight_init)
    #optimizer = optim.Adam(model.parameters(), lr=learning_rate)    #learning rate to 0.001 for initial stage
    optimizer = optim.SGD(model.parameters(),
                          lr=0.01,
                          momentum=0.9,
                          weight_decay=0.00001)
    #optimizer = adabound.AdaBound(params = model.parameters(), lr = 0.001, final_lr = 0.1)

    for epoch in range(epochs):
        f = open('train_output.log', 'a')
        f.write('Epoch {}/{}\n'.format(epoch + 1, epochs))
        f.write('-' * 10)

        running_loss = 0.0
        running_loss_label = np.zeros(5)
        for batch_idx, sample in enumerate(train_loader):
            train_data, labels = sample['image'].to(
                device,
                dtype=torch.float), sample['label'].to(device,
                                                       dtype=torch.uint8)

            optimizer.zero_grad()
            output = model(train_data)

            loss_label, loss = dice_loss2(output, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            for i in range(5):
                running_loss_label[i] += loss_label[i]

        epoch_loss = running_loss / len(train_loader)
        writer.add_scalar('Train/Loss', epoch_loss, epoch)
        f.write("\n Total Dice Loss: {:.4f}\n".format(epoch_loss))
        epoch_loss_class = np.true_divide(running_loss_label,
                                          len(train_loader))
        f.write(
            "Dice per class: Background = {:.4f} Eusophagus = {:.4f}  Heart = {:.4f}  Trachea = {:.4f}  Aorta = {:.4f}\n"
            .format(epoch_loss_class[0], epoch_loss_class[1],
                    epoch_loss_class[2], epoch_loss_class[3],
                    epoch_loss_class[4]))
        #f.write("Dice per class: Background = {:.4f} Eusophagus = {:.4f}\n".format(epoch_loss_class[0], epoch_loss_class[1]))
        f.close()

        if epoch % 4 == 0:
            os.makedirs("models", exist_ok=True)
            torch.save(model, "models/model.pt")

    # export scalar data to JSON for external processing
    writer.export_scalars_to_json("./all_scalars.json")
    writer.close()
    os.makedirs("models", exist_ok=True)
    torch.save(model, "models/model.pt")
Exemplo n.º 5
0
def main(epochs, batch_size, learning_rate):

    total_train_loss = 0
    total_val_loss = 0
    train_loss_seg = np.zeros(5)
    val_loss_seg = np.zeros(5)

    for train_list, test_list in k_folds(n_splits=4, subjects=41):
        # Loading train data
        train_loader = torch.utils.data.DataLoader(SegThorDataset(
            "/home/WIN-UNI-DUE/smnemada/Master_Thesis/SegThor/data_sub/train_cv",
            phase='train',
            transform=transforms.Compose(
                [Rescale(1.0), Normalize(),
                 ToTensor2()]),
            file_list=train_list),
                                                   batch_size=batch_size,
                                                   shuffle=False)

        # Loading validation data
        val_set = SegThorDataset(
            "/home/WIN-UNI-DUE/smnemada/Master_Thesis/SegThor/data_sub/train_cv",
            phase='val',
            transform=transforms.Compose(
                [Rescale(1.0), Normalize(),
                 ToTensor2()]),
            file_list=test_list)

        val_loader = torch.utils.data.DataLoader(dataset=val_set,
                                                 batch_size=1,
                                                 shuffle=False)
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        model = UNet().to(device)
        model.apply(weight_init)
        #optimizer = optim.Adam(model.parameters(), lr=learning_rate)    #learning rate to 0.001 for initial stage
        optimizer = optim.SGD(model.parameters(),
                              lr=0.01,
                              momentum=0.9,
                              weight_decay=0.00001)
        #optimizer = adabound.AdaBound(params = model.parameters(), lr = 0.001, final_lr = 0.1)

        for epoch in range(epochs):
            print('Epoch {}/{}'.format(epoch + 1, epochs))
            print('-' * 10)

            train_loss, train_loss_label = train(train_loader, model,
                                                 optimizer, epoch, device,
                                                 batch_size, train_list)
            val_loss, val_loss_label = validation(val_loader, model, epoch,
                                                  device, batch_size,
                                                  test_list)

            if val_loss < train_loss:
                os.makedirs("models", exist_ok=True)
                torch.save(model, "models/model.pt")

                # Save model output
                #save_results(epoch, device)

            if epoch % 4 == 0:
                os.makedirs("models", exist_ok=True)
                torch.save(model, "models/model.pt")

            total_train_loss = total_train_loss + train_loss
            total_val_loss = total_val_loss + val_loss
            train_loss_seg = train_loss_seg + train_loss_label
            val_loss_seg = val_loss_seg + val_loss_label
            #evaluate_model(epoch, device)

    train_loss_seg = np.true_divide(train_loss_seg, 4)
    val_loss_seg = np.true_divide(val_loss_seg, 4)

    print(" Training Loss: ")
    print(total_train_loss // epochs)
    print(
        "Background = {:.4f} Eusophagus = {:.4f}  Heart = {:.4f}  Trachea = {:.4f}  Aorta = {:.4f}\n"
        .format(train_loss_seg[0], train_loss_seg[1], train_loss_seg[2],
                train_loss_seg[3], train_loss_seg[4]))

    print(" Validation Loss: ")
    print(total_val_loss // epochs)
    print(
        "Background = {:.4f} Eusophagus = {:.4f}  Heart = {:.4f}  Trachea = {:.4f}  Aorta = {:.4f}\n"
        .format(val_loss_seg[0], val_loss_seg[1], val_loss_seg[2],
                val_loss_seg[3], val_loss_seg[4]))