Beispiel #1
0
def inference(network, moda_1, moda_g, imageNames, epoch, folder_save):
    '''root_dir = './Data/MRBrainS/DataNii/'
    model_dir = 'model'

    moda_1 = root_dir + 'Training/T1'
    moda_g = root_dir + 'Training/GT'''
    network.eval()
    softMax = nn.Softmax()
    numClasses = 4  # Move this out
    if torch.cuda.is_available():
        softMax.cuda()
        network.cuda()

    dscAll = np.zeros(
        (len(imageNames), numClasses - 1))  # 1 class is the background!!
    for i_s in range(len(imageNames)):
        patch_1, patch_g, img_shape = load_data_test(
            moda_1, moda_g, imageNames[i_s]
        )  # hardcoded to read the first file. Loop this to get all files
        patchSize = 27
        patchSize_gt = 9
        x = np.zeros((0, 3, patchSize, patchSize, patchSize))
        x = np.vstack(
            (x, np.zeros(
                (patch_1.shape[0], 3, patchSize, patchSize, patchSize))))
        x[:, 0, :, :, :] = patch_1

        pred_numpy = np.zeros(
            (0, numClasses, patchSize_gt, patchSize_gt, patchSize_gt))
        pred_numpy = np.vstack(
            (pred_numpy,
             np.zeros((patch_1.shape[0], numClasses, patchSize_gt,
                       patchSize_gt, patchSize_gt))))
        totalOp = len(imageNames) * patch_1.shape[0]
        #pred = network(numpy_to_var(x[0,:,:,:,:]).view(1,3,patchSize,patchSize,patchSize))
        for i_p in range(patch_1.shape[0]):
            pred = network(
                numpy_to_var(x[i_p, :, :, :, :].reshape(
                    1, 3, patchSize, patchSize, patchSize)))
            pred_y = softMax(pred)
            pred_numpy[i_p, :, :, :, :] = pred_y.cpu().data.numpy()

            printProgressBar(i_s * ((totalOp + 0.0) / len(imageNames)) + i_p +
                             1,
                             totalOp,
                             prefix="[Validation] ",
                             length=15)

        # To reconstruct the predicted volume
        extraction_step_value = 9
        pred_classes = np.argmax(pred_numpy, axis=1)

        pred_classes = pred_classes.reshape(
            (len(pred_classes), patchSize_gt, patchSize_gt, patchSize_gt))

        bin_seg = my_reconstruct_volume(
            pred_classes, (img_shape[1], img_shape[2], img_shape[3]),
            patch_shape=(27, 27, 27),
            extraction_step=(extraction_step_value, extraction_step_value,
                             extraction_step_value))

        bin_seg = bin_seg[:, :, extraction_step_value:img_shape[3] -
                          extraction_step_value]
        gt = nib.load(moda_g + '/' + imageNames[i_s]).get_data()

        img_pred = nib.Nifti1Image(bin_seg, np.eye(4))
        img_gt = nib.Nifti1Image(gt, np.eye(4))

        img_name = imageNames[i_s].split('.nii')
        name = 'Pred_' + img_name[0] + '_Epoch_' + str(epoch) + '.nii.gz'

        namegt = 'GT_' + img_name[0] + '_Epoch_' + str(epoch) + '.nii.gz'

        if not os.path.exists(folder_save + 'Segmentations/'):
            os.makedirs(folder_save + 'Segmentations/')

        if not os.path.exists(folder_save + 'GT/'):
            os.makedirs(folder_save + 'GT/')

        nib.save(img_pred, folder_save + 'Segmentations/' + name)
        nib.save(img_gt, folder_save + 'GT/' + namegt)

        dsc = evaluateSegmentation(gt, bin_seg)

        dscAll[i_s, :] = dsc

    return dscAll
def inference(network, moda_n, moda_g, imageNames, epoch, folder_save,
              number_modalities):
    a = 64
    b = 64
    '''root_dir = './Data/MRBrainS/DataNii/'
    model_dir = 'model'

    moda_1 = root_dir + 'Training/T1'
    moda_2 = root_dir + 'Training/T1_IR'
    moda_3 = root_dir + 'Training/T2_FLAIR'
    moda_g = root_dir + 'Training/GT'''
    network.eval()
    softMax = nn.Sigmoid()
    numClasses = 1
    if torch.cuda.is_available():
        softMax.cuda()
        network.cuda()

    dscAll = []
    accall = []
    for i_s in range(len(imageNames)):
        if number_modalities == 2:
            patch_1, patch_2, patch_g, img_shape = load_data_test(
                moda_n, moda_g, imageNames[i_s], number_modalities
            )  # hardcoded to read the first file. Loop this to get all files
        if number_modalities == 3:
            patch_1, patch_2, patch_3, patch_g, img_shape = load_data_test(
                [moda_n], moda_g, imageNames[i_s], number_modalities
            )  # hardcoded to read the first file. Loop this to get all files
    # Normalization

        patchSize = a
        patchSize_gt = b

        x = np.zeros((0, number_modalities, patchSize, patchSize, patchSize))
        x = np.vstack((x,
                       np.zeros((patch_1.shape[0], number_modalities,
                                 patchSize, patchSize, patchSize))))
        x[:, 0, :, :, :] = patch_1
        x[:, 1, :, :, :] = patch_2
        if (number_modalities == 3):
            x[:, 2, :, :, :] = patch_3

        pred_numpy = np.zeros((0, patchSize_gt, patchSize_gt, patchSize_gt))
        pred_numpy = np.vstack((pred_numpy,
                                np.zeros((patch_1.shape[0], patchSize_gt,
                                          patchSize_gt, patchSize_gt))))
        totalOp = len(imageNames) * patch_1.shape[0]
        #pred = network(numpy_to_var(x[0,:,:,:,:]).view(1,number_modalities,patchSize,patchSize,patchSize))
        for i_p in range(patch_1.shape[0]):
            pred = network(
                numpy_to_var(x[i_p, :, :, :, :].reshape(
                    1, number_modalities, patchSize, patchSize, patchSize)))
            pred_y = softMax(
                pred.reshape(patchSize_gt, patchSize_gt, patchSize_gt))
            pred_numpy[i_p, :, :, :] = pred_y.cpu().data.numpy()

            printProgressBar(i_s * ((totalOp + 0.0) / len(imageNames)) + i_p +
                             1,
                             totalOp,
                             prefix="[Validation] ",
                             length=15)

        # To reconstruct the predicted volume
        extraction_step_value = b
        pred_classes = np.round(pred_numpy)

        pred_classes = pred_classes.reshape(
            (patch_1.shape[0], patchSize_gt, patchSize_gt, patchSize_gt))
        #bin_seg = reconstruct_volume(pred_classes, (img_shape[1], img_shape[2], img_shape[3]))

        bin_seg = my_reconstruct_volume(
            pred_classes, (img_shape[1], img_shape[2], img_shape[3]),
            patch_shape=(a, a, a),
            extraction_step=(b, b, b))

        #bin_seg = bin_seg[:,:,extraction_step_value:img_shape[3]-extraction_step_value]
        #label_selector = [slice(None)] + [slice(9, 117) for i in range(3)]
        gt = nib.load(moda_g + '/' + imageNames[i_s]).get_fdata()
        gt_patches = extract_patches(gt, (a, a, a), (b, b, b))
        #gt_patches = gt_patches[label_selector]
        img_pred = nib.Nifti1Image(bin_seg, np.eye(4))
        img_gt = nib.Nifti1Image(gt, np.eye(4))

        img_name = imageNames[i_s].split('.nii')
        name = 'Pred_' + img_name[0] + '_Epoch_' + str(epoch) + '.nii.gz'

        namegt = 'GT_' + img_name[0] + '_Epoch_' + str(epoch) + '.nii.gz'

        if not os.path.exists(folder_save + 'Segmentations/'):
            os.makedirs(folder_save + 'Segmentations/')

        if not os.path.exists(folder_save + 'GT/'):
            os.makedirs(folder_save + 'GT/')

        nib.save(img_pred, folder_save + 'Segmentations/' + name)
        nib.save(img_gt, folder_save + 'GT/' + namegt)

        dsc = dc(gt_patches, pred_classes)
        acc = accuracy_score(gt_patches.flatten(), pred_classes.flatten())
        dscAll.append(dsc)
        accall.append(acc)
    return dscAll, accall