def train():
    # args = brats2019_arguments()

    utils.reproducibility(args, seed)
    utils.make_dirs(args.save)

    (
        training_generator,
        val_generator,
        full_volume,
        affine,
    ) = medical_loaders.generate_datasets(args)
    model, optimizer = medzoo.create_model(args)
    val_criterion = DiceLoss(classes=11, skip_index_after=args.classes)

    # criterion = DiceLoss(classes=3, skip_index_after=args.classes)
    # criterion = DiceLoss(classes=args.classes)
    criterion = torch.nn.CrossEntropyLoss()

    if args.cuda:
        model = model.cuda()
        print("Model transferred in GPU.....")

    trainer = train_module.Trainer(
        args,
        model,
        criterion,
        optimizer,
        val_criterion=val_criterion,
        train_data_loader=training_generator,
        valid_data_loader=val_generator,
        lr_scheduler=None,
    )
    print("START TRAINING...")
    trainer.training()
Exemple #2
0
def main():
    args = get_arguments()

    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    ## FOR REPRODUCIBILITY OF RESULTS
    seed = 1777777
    utils.reproducibility(args, seed)
    utils.make_dirs(args.save)
    name_model = args.model + "_" + args.dataset_name + "_" + utils.datestr()

    # TODO visual3D_temp.Basewriter package
    writer = SummaryWriter(log_dir='./runs/' + name_model, comment=name_model)

    training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(
        args, path='.././datasets')
    model, optimizer = medzoo.create_model(args)

    if args.cuda:
        model = model.cuda()
        print("Model transferred in GPU.....")

    print("START TRAINING...")
    for epoch in range(1, args.nEpochs + 1):
        train(args, model, training_generator, optimizer, epoch, writer)
        val_metrics, confusion_matrix = validation(args, model, val_generator,
                                                   epoch, writer)
def main():
    args = get_arguments()
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    ## FOR REPRODUCIBILITY OF RESULTS
    seed = 1777777
    utils.reproducibility(args, seed)

    utils.make_dirs(args.save)
    utils.save_arguments(args, args.save)

    training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(
        args, path='.././datasets')
    model, optimizer = medzoo.create_model(args)
    criterion = create_loss('CrossEntropyLoss')
    criterion = DiceLoss(classes=args.classes,
                         weight=torch.tensor([0.1, 1, 1, 1]).cuda())

    if args.cuda:
        model = model.cuda()
        print("Model transferred in GPU.....")

    trainer = Trainer(args,
                      model,
                      criterion,
                      optimizer,
                      train_data_loader=training_generator,
                      valid_data_loader=val_generator,
                      lr_scheduler=None)
    print("START TRAINING...")
    trainer.training()
Exemple #4
0
def main():
    args = get_arguments()
    #os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    ## FOR REPRODUCIBILITY OF RESULTS
    seed = 1777777
    utils.reproducibility(args, seed)

    training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(
        args, path='./datasets')
    model, optimizer = medzoo.create_model(args)
    #
    criterion = DiceLoss(classes=args.classes)
    #
    # ## TODO LOAD PRETRAINED MODEL
    print(affine.shape)
    model.restore_checkpoint(args.pretrained)
    if args.cuda:
        model = model.cuda()
        full_volume = full_volume.cuda()
        print("Model transferred in GPU.....")
    x = torch.randn(3, 156, 240, 240).cuda()
    print(full_volume.shape)
    output = non_overlap_padding(args,
                                 full_volume,
                                 model,
                                 criterion,
                                 kernel_dim=(32, 32, 32))
Exemple #5
0
def main():
    args = get_arguments()

    utils.reproducibility(args, seed)
    utils.make_dirs(args.save)

    training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(
        args, path='.././datasets')
    model, optimizer = medzoo.create_model(args)
    criterion = DiceLoss(classes=args.classes)

    if args.cuda:
        model = model.cuda()
        print("Model transferred in GPU.....")

    trainer = train.Trainer(args,
                            model,
                            criterion,
                            optimizer,
                            train_data_loader=training_generator,
                            valid_data_loader=val_generator,
                            lr_scheduler=None)
    print("START TRAINING...")
    trainer.training()

    visualize_3D_no_overlap_new(args, full_volume, affine, model, 10, args.dim)
def main():
    args = get_arguments()
    utils.reproducibility(args, seed)
    utils.make_dirs(args.save)

    training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(args,
                                                                                               path='.././datasets')
    model, optimizer = medzoo.create_model(args)
    criterion = DiceLoss(classes=args.classes)

    if args.cuda:
        model = model.cuda()

    trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator,
                            valid_data_loader=val_generator)
    trainer.training()
def main():
    args = get_arguments()
    utils.reproducibility(args, seed)
    utils.make_dirs(args.save)

    training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(
        args, path='.././datasets')
    model, optimizer = medzoo.create_model(args)
    criterion = DiceLoss(
        classes=args.classes
    )  # ,skip_index_after=2,weight=torch.tensor([0.00001,1,1,1]).cuda())

    if args.cuda:
        model = model.cuda()
        print("Model transferred in GPU.....")

    trainer = train.Trainer(args,
                            model,
                            criterion,
                            optimizer,
                            train_data_loader=training_generator,
                            valid_data_loader=val_generator)
    print("START TRAINING...")
    trainer.training()
def test():
    # args = mrbrains9_arguments(loadData=True)

    utils.reproducibility(args, seed)
    utils.make_dirs(args.save)

    params = {"batch_size": args.batchSz, "shuffle": True, "num_workers": 2}
    samples_train = args.samples_train
    samples_val = args.samples_val
    test_loader = MRIDatasetMRBRAINS2018(
        args,
        "test",
        dataset_path=dataset_dir,
        dim=args.dim,
        split_id=0,
        samples=samples_train,
        load=args.loadData,
    )

    model_name = args.model
    lr = args.lr
    in_channels = args.inChannels
    num_classes = args.classes
    weight_decay = 0.0000000001
    print("Building Model . . . . . . . ." + model_name)
    model = UNet3D(in_channels=in_channels, n_classes=num_classes, base_n_filter=8)
    print(
        model_name,
        "Number of params: {}".format(
            sum([p.data.nelement() for p in model.parameters()])
        ),
    )

    model.restore_checkpoint(
        "/home/kyle/results/UNET3D/mrbrains9_148_09-08_17-46/mrbrains9_148_09-08_17-46_BEST.pth"
    )

    # model = model.cuda()
    # print("Model transferred in GPU.....")

    print("TESTING...")

    # [[37507023, 290552, 0, 25074, 30, 1323040, 134, 20823, 10884, 0],
    #  [256417, 16475613, 1592518, 1920243, 2259, 2491037, 67078, 61665, 497, 0],
    #  [60651, 3655997, 594069, 16095428, 2494541, 102916, 456685, 4495, 4150, 0],
    #  [1225472, 1183528, 24771, 74524, 614, 13215040, 2649646, 104672, 72727, 0],
    #  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    #  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    #  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    #  [91439, 131011, 492, 8209, 0, 784077, 1, 6204313, 249132, 0],
    #  [9185, 3047, 0, 13415, 0, 62690, 0, 12931, 1513371, 0],
    #  [1471, 46677, 0, 5422, 10476, 32999, 1025, 12, 0, 0],

    model.eval()

    confusion_matrix = [[0] * (num_classes * 2) for i in range(num_classes * 2)]

    for batch_idx, input_tuple in enumerate(test_loader):
        with torch.no_grad():
            img_t1, img_t2, img_t3, target = input_tuple

            target = torch.reshape(torch.from_numpy(target), (-1, 1, 48, 48, 48))
            img_t1 = torch.reshape(torch.from_numpy(img_t1), (-1, 1, 48, 48, 48))
            img_t2 = torch.reshape(torch.from_numpy(img_t2), (-1, 1, 48, 48, 48))
            img_t3 = torch.reshape(torch.from_numpy(img_t3), (-1, 1, 48, 48, 48))

            input_tensor = torch.cat((img_t1, img_t2, img_t3), dim=1)

            input_tensor.requires_grad = False

            output = model(input_tensor)

            output = torch.argmax(output, dim=1)
            output = torch.reshape(output, (-1, 1, 48, 48, 48))

            assert target.size() == output.size()

            output = torch.reshape(output, (-1,)).tolist()
            target = torch.reshape(target, (-1,)).tolist()

            assert len(output) == len(target)

            for gt, pred in zip(target, output):
                confusion_matrix[int(gt)][int(pred)] += 1

    pprint(confusion_matrix)
Exemple #9
0
def test():
    # args = iseg2019_arguments()
    print(args)

    utils.reproducibility(args, seed)
    utils.make_dirs(args.save)

    params = {"batch_size": args.batchSz, "shuffle": True, "num_workers": 2}
    print(params)
    samples_train = args.samples_train
    samples_val = args.samples_val
    test_loader = MRIDatasetISEG2019(
        args,
        "test",
        dataset_path=dataset_dir,
        crop_dim=args.dim,
        split_id=0,
        samples=samples_train,
        load=args.loadData,
    )

    model_name = args.model
    lr = args.lr
    in_channels = args.inChannels
    num_classes = args.classes
    weight_decay = 0.0000000001
    print("Building Model . . . . . . . ." + model_name)
    model = UNet3D(in_channels=in_channels, n_classes=num_classes, base_n_filter=8)
    print(
        model_name,
        "Number of params: {}".format(
            sum([p.data.nelement() for p in model.parameters()])
        ),
    )

    model.restore_checkpoint(
        "/home/kyle/results/UNET3D/iseg2019_9_06-08_21-25/iseg2019_9_06-08_21-25_BEST.pth"
    )
    criterion = DiceLoss(classes=args.classes)

    # model = model.cuda()
    # print("Model transferred in GPU.....")

    print("TESTING...")

    model.eval()

    confusion_matrix = [[0] * 4 for i in range(4)]

    for batch_idx, input_tuple in enumerate(test_loader):
        with torch.no_grad():
            img_t1, img_t2, target = input_tuple

            target = torch.reshape(target, (-1, 1, 64, 64, 64))
            img_t1 = torch.reshape(img_t1, (-1, 1, 64, 64, 64))
            img_t2 = torch.reshape(img_t2, (-1, 1, 64, 64, 64))

            input_tensor = torch.cat((img_t1, img_t2), dim=1)
            # print(input_tensor.size())

            input_tensor.requires_grad = False

            output = model(input_tensor)

            output = torch.argmax(output, dim=1)
            output = torch.reshape(output, (-1, 1, 64, 64, 64))

            assert target.size() == output.size()

            output = torch.reshape(output, (-1,)).tolist()
            target = torch.reshape(target, (-1,)).tolist()

            assert len(output) == len(target)

            for gt, pred in zip(target, output):
                confusion_matrix[int(gt)][int(pred)] += 1

    pprint(confusion_matrix)