示例#1
0
def test():
    model = UNet_2d(8, 1, 1).to(device)
    model.load_state_dict(torch.load(args.weight))
    model.eval()

    dataset = SpleenDatasetTest(transform=x_transform,
                                target_transform=y_transform)
    dataloaders = DataLoader(dataset)

    dice = []
    inf_time = []
    with torch.no_grad():
        for x, _ in dataloaders:
            inputs = x.type(torch.FloatTensor).to(device)
            labels = _.type(torch.FloatTensor).to(device)
            #x = torch.tensor(x, dtype=torch.float32)
            #_ = torch.tensor(_, dtype=torch.float32)
            time1 = time.time()
            y = model(inputs)
            time2 = time.time()
            dice.append(dice_coeff(y, labels).data.cpu().numpy())
            inf_time.append(time2 - time1)
        print('Dice :', dice)
        print('Ave : ', np.average(dice))
        print('Var : ', np.var(dice))
        print('infrence time : ', np.average(inf_time))
示例#2
0
def valid(data, net, args, mc_samples=1):
    valid_set = DataLoader(data, batch_size=args.batch_size // 2, num_workers=multiprocessing.cpu_count(), shuffle=True)
    net.eval()

    progress_bar = tqdm(iter(valid_set))

    dice_avg = list()
    entropy_avg = list()
    for img, label, label_bin, weight in progress_bar:
        img, label, label_bin, weight = Variable(img), Variable(label), Variable(label_bin), Variable(weight)
        label_bin = label_bin.type(torch.FloatTensor)

        if args.cuda:
            img, label_bin = img.cuda(), label_bin.cuda()

        if mc_samples > 1:
            # lol this is insanely inefficient
            avg, _, overall_entropy, _ = net.predict(img, times=mc_samples)
            entropy_avg.append(np.mean(overall_entropy))
            output = Variable(torch.Tensor(avg))
            if args.cuda:
                output = output.cuda()
        else:
            output = net(img)

        dice_avg.append(torch.mean(dice_coeff(output, label_bin)).item())

    dice_avg = np.asarray(dice_avg).mean()
    entropy_avg = np.asarray(entropy_avg).mean()

    print('Validation dice avg: {}'.format(dice_avg))
    print('Validation entropy avg: {}'.format(entropy_avg))

    return dice_avg, entropy_avg
示例#3
0
def evaluate(model, dataloader, num_classes, device, num_val):
    """ Evaluation without the densecrf with the dice coefficient """

    model.eval()
    total = 0

    with tqdm(total=num_val, desc='Validation round', leave=False) as t:

        for img, mask in dataloader:

            mask_type = torch.float32 if num_classes == 1 else torch.long
            img = img.to(device=device, dtype=torch.float32)
            mask = mask.to(device=device, dtype=mask_type)

            with torch.no_grad():
                pred = model(img)
            # for mask_, pred_ in zip(mask, pred):
            #     pred_ = (pred_ > 0.5).float()
            #     if model.n_classes > 1:
            #         tot += F.cross_entropy(pred_.unsqueeze(dim=0), mask_.unsqueeze(dim=0)).item()  # ??????
            #     else:
            #         tot += dice_coeff(pred, mask.squeeze(dim=1)).item()

            if num_classes > 1:
                total += F.cross_entropy(pred, mask).item()
            else:
                pred = torch.sigmoid(pred)
                pred = (pred > 0.5).float()
                total += dice_coeff(pred, mask).item()

            t.update(img.shape[0])

    return total / num_val
示例#4
0
def train(epoch, data, net, criterion, optimizer, args):
    train_set = DataLoader(data, batch_size=args.batch_size, num_workers=multiprocessing.cpu_count(), shuffle=True)

    progress_bar = tqdm(iter(train_set))
    moving_loss = 0

    net.train()
    for img, label, label_bin, weight in progress_bar:
        img, label, label_bin, weight = Variable(img), Variable(label), Variable(label_bin), Variable(weight)
        label = label.type(torch.LongTensor)
        label_bin = label_bin.type(torch.FloatTensor)

        if args.cuda:
            img, label, label_bin, weight = img.cuda(), label.cuda(), label_bin.cuda(), weight.cuda()

        output = net(img)
        loss = criterion(output, label, weight, label_bin)
        net.zero_grad()
        loss.backward()
        optimizer.step()

        if moving_loss == 0:
            moving_loss = loss.item()
        else:
            moving_loss = moving_loss * 0.9 + loss.item() * 0.1

        dice_avg = torch.mean(dice_coeff(output, label_bin))

        progress_bar.set_description(
            'Epoch: {}; Loss: {:.5f}; Avg: {:.5f}; Dice: {:.5f}'
                .format(epoch + 1, loss.item(), moving_loss, dice_avg.item()))
示例#5
0
def structure_wise_uncertainty_dice(data,
                                    net,
                                    args,
                                    mc_samples=10,
                                    n_classes=9):

    dice_avg = list()

    for _, _, _, samples, _ in iter_data_and_predict(data, net, args,
                                                     mc_samples):

        samples = torch.Tensor(samples)
        if args.cuda:
            samples = samples.cuda()

        for i, j in product(range(mc_samples), range(mc_samples)):
            if i == j:
                continue

            dice_score = dice_coeff(samples[i],
                                    samples[j, :],
                                    n_classes=n_classes)
            dice_avg.append(torch.mean(dice_score, dim=0).cpu().numpy())

    dice_avg = np.asarray(dice_avg).mean(axis=0)

    return dice_avg
示例#6
0
def train(net, epochs, batch_size, lr, mra_transforms, label_transforms):
    dir_imgs = "./data/after_slice/copy/data/"
    dir_labels = "./data/after_slice/copy/seg/"
    dir_model = "./model"

    utility.sureDir(dir_model)

    #load data
    dataset = NiiDataset(mra_dir=dir_imgs,
                         label_dir=dir_labels,
                         mra_transforms=mra_transforms,
                         label_transforms=label_transforms)

    dataloader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=True,
                            num_workers=4)

    #loss and optimizer
    criterion = SoftDiceLoss()
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)

    #begin train
    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        print('-' * 10)

        net.train()
        dt_size = len(dataloader.dataset)
        epoch_loss = 0
        step = 0

        for img, label in dataloader:
            step += 1
            input = img.type(torch.FloatTensor).cuda()  #因为前面已经为它们to tensor了
            label = label.type(torch.FloatTensor).cuda().squeeze()  # .long()

            # zero the parameter gradients
            optimizer.zero_grad()

            output = net(input)

            out = output[:, 1, :, :, :].squeeze()  #(75,64,64)
            print("dice: %0.3f " % dice_coeff(out, label))

            loss = criterion(out, label)
            loss.backward()
            optimizer.step()
            epoch_loss += float(loss.item())
            print("%d/%d,train_loss:%0.3f" %
                  (step, dt_size // dataloader.batch_size, loss.item()))
        print("epoch %d loss:%0.3f" % (epoch, epoch_loss / step))

        torch.save(net.state_dict(), dir_model)
示例#7
0
def error_map_dice(data, net, args, mc_samples=10, entropy_threshold=0.5):
    """
    Computes the dice score between prediction error map and the entropy. This is a measure on how well the entropy
    describes the actual error the network makes.
    :param data:
    :param net:
    :param args:
    :param mc_samples:
    :param entropy_threshold:
    :return:
    """

    dice_avg = list()
    for avg, _, overall_entropy, _, label in iter_data_and_predict(
            data, net, args, mc_samples):

        overall_entropy = overall_entropy > entropy_threshold
        overall_entropy = Variable(
            torch.Tensor(overall_entropy.astype(np.float32)))

        indices = np.argmax(avg, axis=1)  # 1 is class dim
        indices = Variable(torch.LongTensor(indices))

        if args.cuda:
            overall_entropy, indices = overall_entropy.cuda(), indices.cuda()

        error_map = label != indices
        error_map = error_map.type(
            torch.cuda.FloatTensor if args.cuda else torch.FloatTensor)

        dice_avg.append(
            torch.mean(dice_coeff(overall_entropy, error_map,
                                  n_classes=1)).item())

    dice_avg = np.asarray(dice_avg).mean()

    print('dice avg: {}'.format(dice_avg))

    return dice_avg
示例#8
0
文件: train.py 项目: Mehnoor/RelayNet
def valid(data, net, args):
    valid_set = DataLoader(data,
                           batch_size=args.batch_size,
                           num_workers=0,
                           shuffle=True)
    net.eval()

    progress_bar = tqdm(iter(valid_set))

    dice_avg = list()
    for img, label, label_bin, weight in progress_bar:
        img, label, label_bin, weight = Variable(img), Variable(
            label), Variable(label_bin), Variable(weight)
        label_bin = label_bin.type(torch.FloatTensor)

        if args.cuda:
            img, label_bin = img.cuda(), label_bin.cuda()

        output = net(img)
        dice_avg.append(torch.mean(dice_coeff(output, label_bin)).item())

    dice_avg = np.asarray(dice_avg).mean()

    print('Validation dice avg: {}'.format(dice_avg))
示例#9
0
def train_and_test(model,
                   dataloaders,
                   optimizer,
                   criterion,
                   num_epochs=3,
                   show_images=False):
    since = time.time()
    best_loss = 1e10
    # Use gpu if available
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.to(device)

    fieldnames = [
        'epoch', 'training_loss', 'test_loss', 'training_dice_coeff',
        'test_dice_coeff'
    ]
    train_epoch_losses = []
    test_epoch_losses = []
    for epoch in range(1, num_epochs + 1):

        print(f'Epoch {epoch}/{num_epochs}')
        print('-' * 10)
        # Each epoch has a training and validation phase
        # Initialize batch summary
        batchsummary = {a: [0] for a in fieldnames}
        batch_train_loss = 0.0
        batch_test_loss = 0.0

        for phase in ['training', 'test']:
            if phase == 'training':
                model.train()  # Set model to training mode
            else:
                model.eval()  # Set model to evaluate mode

            # Iterate over data.
            for sample in iter(dataloaders[phase]):

                if show_images:
                    grid_img = make_grid(sample['image'])
                    grid_img = grid_img.permute(1, 2, 0)
                    plt.imshow(grid_img)
                    plt.show()

                inputs = sample['image'].to(device)
                masks = sample['mask'].to(device)
                # zero the parameter gradients
                optimizer.zero_grad()

                # track history only in training phase
                with torch.set_grad_enabled(phase == 'training'):
                    outputs = model(inputs)

                    loss = criterion(outputs, masks)

                    y_pred = outputs.data.cpu().numpy().ravel()
                    y_true = masks.data.cpu().numpy().ravel()

                    batchsummary[f'{phase}_dice_coeff'].append(
                        dice_coeff(y_pred, y_true))

                    # back-propagation
                    if phase == 'training':
                        loss.backward()
                        optimizer.step()

                        # accumulate batch loss
                        batch_train_loss += loss.item() * sample['image'].size(
                            0)

                    else:
                        batch_test_loss += loss.item() * sample['image'].size(
                            0)

            # save epoch losses
            if phase == 'training':
                epoch_train_loss = batch_train_loss / len(
                    dataloaders['training'])
                train_epoch_losses.append(epoch_train_loss)
            else:
                epoch_test_loss = batch_test_loss / len(dataloaders['test'])
                test_epoch_losses.append(epoch_test_loss)

            batchsummary['epoch'] = epoch
            # batchsummary[f'{phase}_loss'] = epoch_train_loss.item()
            print('{} Loss: {:.4f}'.format(phase, loss))

        best_loss = np.max(batchsummary['test_dice_coeff'])
        for field in fieldnames[3:]:
            batchsummary[field] = np.mean(batchsummary[field])
        print(
            f'\t\t\t train_dice_coeff: {batchsummary["training_dice_coeff"]}, test_dice_coeff: {batchsummary["test_dice_coeff"]}'
        )

    # summary
    print('Best dice coefficient: {:4f}'.format(best_loss))

    return model, train_epoch_losses, test_epoch_losses