Esempio n. 1
0
def main(mode='train', args=None, writer=None):
    # create dataset and dataloader
    dataset_path = os.path.join(args.data, args.dataset)
    dataset_train = CNV_2d5(dataset_path, scale=(
        args.crop_height, args.crop_width), mode='train')
    dataloader_train = DataLoader(dataset_train, batch_size=args.batch_size,
                                  shuffle=True, num_workers=args.num_workers, pin_memory=True, drop_last=True)
    dataset_val = CNV_2d5(dataset_path, scale=(
        args.crop_height, args.crop_width), mode='val')
    dataloader_val = DataLoader(dataset_val, batch_size=1, shuffle=True,
                                num_workers=args.num_workers, pin_memory=True, drop_last=True)
    dataset_test = CNV_2d5(dataset_path, scale=(
        args.crop_height, args.crop_width), mode='test')
    dataloader_test = DataLoader(dataset_test, batch_size=1, shuffle=True,
                                 num_workers=args.num_workers, pin_memory=True, drop_last=True)
    # build model
    os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda
    # load model
    model_all = {'unet_2.5d': UNet(in_channels=3, n_classes=args.num_classes),
                 'resunetplusplus': ResUnetPlusPlus(channel=3),
                 'cpfnet_2.5d':CPFNet(),
                 }
    model = model_all[args.net_work].cuda()
    cudnn.benchmark = True

    if(args.optimizer=="SGD"):
        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    elif(args.optimizer=="Adam"):
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay, amsgrad=False)
    
    criterion_aux = nn.BCELoss()
    criterion_main = LS.DiceLoss()
    # criterion_0 = LS.SD_Loss()
    criterion = [criterion_aux, criterion_main]
    if mode == 'train':  # tv
        train(args, model, optimizer, criterion, dataloader_train, dataloader_val, writer)
    if mode == 'test':  # 单独使用测试集
        eval(args, model, dataloader_test)
    if mode == 'train_test':
        train(args, model, optimizer, criterion, dataloader_train, dataloader_val,writer)
        eval(args, model, dataloader_test)
Esempio n. 2
0
def test_dice(args, epoch, model, testLoader, optimizer, testF, config):
    model.eval()
    test_loss = 0
    dice = [0.0, 0.0]
    # no gradient computation
    dice_loss = dloss.DiceLoss(nclass=3)
    with torch.no_grad():
        for data, target in testLoader:
            if args.cuda:
                data, target = data.cuda(), target.type(
                    torch.LongTensor).cuda()
            data, target = Variable(data,
                                    requires_grad=False), Variable(target)
            output = model(data)
            # -------------- Process multi supervision in a model -----------------------------------------
            if isinstance(output, list):
                loss1 = []
                loss2 = []
                tar_numeled = target.view(target.numel())
                tar_size = target.size()
                for i, out in enumerate(output):
                    if i < (len(output) - 1):
                        out = F.interpolate(out,
                                            size=tar_size[2:5],
                                            mode='trilinear')
                    loss1.append(
                        dice_loss(out, target).detach().data.cpu().numpy())

                    out = out.permute(0, 2, 3, 4, 1).contiguous()
                    out = out.view(out.numel() // 3, 3)
                    loss2.append(
                        F.cross_entropy(
                            out, tar_numeled).detach().data.cpu().numpy())

                test_loss += reduce(lambda x, y: x + y, loss1)
                test_loss += reduce(lambda x, y: x + y, loss2)

                target = target[:, 0, :, :, :]
                pred = torch.argmax(output[-1], dim=1)

                assert len(target.size()) == len(pred.size())

            else:
                output = F.softmax(output, dim=1)
                test_loss += dice_loss(output,
                                       target).detach().data.cpu().numpy()

                # output = output.permute(0, 2, 3, 4, 1).contiguous()
                # output = output.view(output.numel()//3, 3)
                # target = target.view(target.numel())
                target = target.view(target.size(0), target.size(2),
                                     target.size(3), target.size(4))

                # test_loss += F.cross_entropy(output,
                #                             target).detach().data.cpu().numpy()

                # get the index of the max probability
                pred = torch.argmax(output, dim=1)

            d = evaluate_dice(pred, target, cpu=True)
            dice[0] += d[0]
            dice[1] += d[1]

    # loss function already averages over batch size
    test_loss /= len(testLoader)
    dice = [c / len(testLoader) for c in dice]  # average dice on every sample
    print(
        '\nTest set: Average loss: {:.4f}, Kidney_Dice: {:.6f}, Tumor_Dice: {:.6f}\n'
        .format(test_loss, dice[0], dice[1]))
    #
    testF.write('{},{},{},{}\n'.format(epoch, test_loss, dice[0], dice[1]))
    testF.flush()
    return (dice[0] + dice[1])
Esempio n. 3
0
def train_dice(args, epoch, model, trainLoader, optimizer, trainF, config,
               scheduler):
    model.train()
    nProcessed = 0
    nTrain = len(trainLoader.dataset)
    # nIter_per_epoch = nTrain // batch_size
    dice_loss = dloss.DiceLoss(nclass=3)
    for batch_idx, (data, target) in enumerate(trainLoader):
        if args.cuda:
            data, target = data.cuda(), target.type(torch.LongTensor).cuda()
        data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        # output is already the result of softmax function
        # may add CrossEntropy here
        output = model(data)

        # tar_numeled = False
        # -------------- Process multi supervision in a model -----------------------------------------
        if isinstance(output, list):
            loss1 = []
            loss2 = []
            tar_size = target.size()
            tar_numeled = target.view(target.numel())
            for i, out in enumerate(output):
                if i < (len(output) - 1):
                    out = F.interpolate(out,
                                        size=tar_size[2:5],
                                        mode='trilinear')
                loss1.append(dice_loss(out, target))

                out = out.permute(0, 2, 3, 4, 1).contiguous()
                out = out.view(out.numel() // 3, 3)
                loss2.append(F.cross_entropy(out, tar_numeled))

            loss = reduce(lambda x, y: x + y, loss1)
            loss += reduce(lambda x, y: x + y, loss2)

            target = target[:, 0, :, :, :]
            pred = torch.argmax(output[-1], dim=1)

            assert len(target.size()) == len(pred.size())

        else:
            output = F.softmax(output, dim=1)
            loss = dice_loss(output, target)

            # output = output.permute(0, 2, 3, 4, 1).contiguous()
            # output = output.view(output.numel()//3, 3)
            # target = target.view(target.numel())
            target = target.view(target.size(0), target.size(2),
                                 target.size(3), target.size(4))
            # loss += F.cross_entropy(output, target)

            # get the index of the max log-probability
            pred = torch.argmax(output, dim=1)
        # ----------- apex mixed precision ------------------------------------------------
        # loss.backward() becomes:
        with amp.scale_loss(loss, optimizer) as scaled_loss:
            scaled_loss.backward()

        optimizer.step()
        # update learning rate
        scheduler(optimizer, i=batch_idx, epoch=epoch)
        nProcessed += len(data)
        # print(output.size(), pred.size(), target.size())
        dice = evaluate_dice(pred, target, cpu=True)

        incorrect = pred.ne(target.data).cpu().sum()
        partialEpoch = (int)(epoch + batch_idx / len(trainLoader))
        loss_data = loss.detach().data.cpu().numpy()
        print(
            'Train Epoch: {} [{}/{} ({:.0f}%)], Loss: {:.4f}, Kidney_Dice: {:.6f}, Tumor_Dice: {:.6f}'
            .format(partialEpoch, nProcessed, nTrain,
                    100. * batch_idx / len(trainLoader), loss_data, dice[0],
                    dice[1]))
        #
        trainF.write('{},{},{},{}\n'.format(partialEpoch, loss_data, dice[0],
                                            dice[1]))
        trainF.flush()
Esempio n. 4
0
def main(mode='train', args=None, writer=None, k_fold=1):
    # create dataset and dataloader
    dataset_path = os.path.join(args.data, args.dataset)
    dataset_train = LinearLesion(dataset_path,
                                 scale=(args.crop_height, args.crop_width),
                                 k_fold_test=k_fold,
                                 mode='train')
    dataloader_train = DataLoader(dataset_train,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.num_workers,
                                  pin_memory=True,
                                  drop_last=True)

    dataset_val = LinearLesion(dataset_path,
                               scale=(args.crop_height, args.crop_width),
                               k_fold_test=k_fold,
                               mode='val')

    dataloader_val = DataLoader(
        dataset_val,
        batch_size=len(
            args.cuda.split(',')
        ),  # the default is 1(the number of gpu), you can set it to what you want
        shuffle=True,
        num_workers=args.num_workers,
        pin_memory=True,
        drop_last=True)

    dataset_test = LinearLesion(dataset_path,
                                scale=(args.crop_height, args.crop_width),
                                k_fold_test=k_fold,
                                mode='test')
    dataloader_test = DataLoader(
        dataset_test,
        batch_size=len(
            args.cuda.split(',')
        ),  # the default is 1(the number of gpu), you can set it to what you want
        shuffle=True,
        num_workers=args.num_workers,
        pin_memory=True,
        drop_last=True)

    # build model
    os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda

    # load model
    model_all = {
        'UNet': UNet(in_channels=args.input_channel,
                     n_classes=args.num_classes)
    }
    model = model_all[args.net_work]
    cudnn.benchmark = True
    # model._initialize_weights()
    if torch.cuda.is_available() and args.use_gpu:
        model = torch.nn.DataParallel(model).cuda()
    # load pretrained model if exists
    if args.pretrained_model_path and mode == 'test':
        print("=> loading pretrained model '{}'".format(
            args.pretrained_model_path))
        checkpoint = torch.load(args.pretrained_model_path)
        model.load_state_dict(checkpoint['state_dict'])
        print('Done!')

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    # criterion_aux=nn.NLLLoss(weight=None)
    criterion_aux = nn.BCEWithLogitsLoss(weight=None)
    criterion_main = LS.DiceLoss()
    criterion = [criterion_aux, criterion_main]
    if mode == 'train':
        train(args, model, optimizer, criterion, dataloader_train,
              dataloader_val, writer, k_fold)
    if mode == 'test':
        eval(model, dataloader_test, args)
    if mode == 'train_test':
        train(args, model, optimizer, criterion, dataloader_train,
              dataloader_val)
        eval(model, dataloader_test, args)
Esempio n. 5
0
def main(mode='train', args=None, writer=None, k_fold=1):
    # create dataset and dataloader
    dataset_path = os.path.join(args.data, args.dataset)
    dataset_train = CNV_AND_SRF(dataset_path,
                                scale=(args.crop_height, args.crop_width),
                                k_fold_test=k_fold,
                                mode='train')
    dataloader_train = DataLoader(dataset_train,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.num_workers,
                                  pin_memory=True,
                                  drop_last=True)
    dataset_val = CNV_AND_SRF(dataset_path,
                              scale=(args.crop_height, args.crop_width),
                              k_fold_test=k_fold,
                              mode='val')
    dataloader_val = DataLoader(dataset_val,
                                batch_size=1,
                                shuffle=True,
                                num_workers=args.num_workers,
                                pin_memory=True,
                                drop_last=True)
    dataset_test = CNV_AND_SRF(dataset_path,
                               scale=(args.crop_height, args.crop_width),
                               k_fold_test=k_fold,
                               mode='test')
    dataloader_test = DataLoader(dataset_test,
                                 batch_size=1,
                                 shuffle=True,
                                 num_workers=args.num_workers,
                                 pin_memory=True,
                                 drop_last=True)
    # build model
    os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda
    # load model
    model_all = {
        'UNet': UNet(in_channels=args.input_channel,
                     n_classes=args.num_classes),
        'ResUnetPlusPlus': ResUnetPlusPlus(channel=args.input_channel)
    }
    model = model_all[args.net_work].cuda()
    cudnn.benchmark = True
    # if torch.cuda.is_available() and args.use_gpu:
    #     model = torch.nn.DataParallel(model).cuda()
    if args.pretrained_model_path and model == 'test':
        print("=> loading pretrained model '{}'".format(
            args.pretrained_model_path))
        checkpoint = torch.load(args.pretrained_model_path)
        model.load_state_dict(checkpoint['state_dict'])
        print('Done!')
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    criterion_aux = nn.BCEWithLogitsLoss(weight=None)
    criterion_main = LS.DiceLoss()
    criterion = [criterion_aux, criterion_main]
    if mode == 'train':  # 交叉验证
        train(args, model, optimizer, criterion, dataloader_train,
              dataloader_val, writer, k_fold)
    if mode == 'test':  # 单独使用测试集
        eval(model, dataloader_test, args)
Esempio n. 6
0
                              shuffle=True)
    val_loader = DataLoader(dataset=val_set,
                            batch_size=1,
                            num_workers=args.n_threads,
                            shuffle=False)

    # model info
    model = UNet3D(in_channels=1,
                   filter_num_list=[16, 32, 48, 64, 96],
                   class_num=args.n_labels).to(device)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    # common.print_network(model)
    # model = nn.DataParallel(model, device_ids=[0,1])  # multi-GPU

    # loss=loss.DiceLoss(weight=np.array([0.2,0.3,0.5]))
    loss = loss.DiceLoss(weight=np.array([0.3, 0.7]))

    log = logger.Train_Logger(save_path, "train_log")

    best = [0, np.inf]  # 初始化最优模型的epoch和performance
    trigger = 0  # early stop 计数器
    for epoch in range(1, args.epochs + 1):
        common.adjust_learning_rate(optimizer, epoch, args)
        train_log = train(model, train_loader, optimizer, loss, args.n_labels)
        val_log = val(model, val_loader, loss, args.n_labels)
        log.update(epoch, train_log, val_log)

        # Save checkpoint.
        state = {
            'net': model.state_dict(),
            'optimizer': optimizer.state_dict(),