def main(args):
    original_data_dir = os.path.expanduser(args.original_data_dir)
    target_dir = os.path.expanduser(args.target_dir)
    if not os.path.exists(target_dir):
        os.makedirs(target_dir)
    # List filenames of data
    unfiltered_filelist = getAllFiles(original_data_dir)
    input_list = [
        item for item in unfiltered_filelist if re.search('_img', item)
    ]
    mask_list = [
        item for item in unfiltered_filelist if re.search('_mask', item)
    ]
    label_list = [
        item for item in unfiltered_filelist if re.search('_grid', item)
    ]
    input_list = sorted(input_list)
    mask_list = sorted(mask_list)
    label_list = sorted(label_list)
    print(input_list)
    print(mask_list)
    print(label_list)
    # load image, mask and label stacks as matrices
    for i, j in enumerate(input_list):
        print('Loading image...')
        img_mat = helper.load_nifti_mat_from_file(j)
        print('Loading mask...')
        mask_mat = helper.load_nifti_mat_from_file(mask_list[i])
        print('Loading label...')
        label_mat = helper.load_nifti_mat_from_file(label_list[i])
        # check the dimensions
        assert img_mat.shape == mask_mat.shape == label_mat.shape, "The DIMENSIONS of image, mask and label are NOT " \
                                                                   "SAME."

        # mask images and labels (skull stripping)
        img_mat = helper.aplly_mask(img_mat, mask_mat)
        label_mat = helper.aplly_mask(label_mat, mask_mat)
        print(j.split(os.sep)[-1].split('_')[0])
        # save to new file as masked version of original data
        helper.create_and_save_nifti(
            img_mat,
            target_dir + j.split(os.sep)[-1].split('_')[0] + '_img.nii')
        helper.create_and_save_nifti(
            mask_mat,
            target_dir + j.split(os.sep)[-1].split('_')[0] + '_mask.nii')
        helper.create_and_save_nifti(
            label_mat,
            target_dir + j.split(os.sep)[-1].split('_')[0] + '_label.nii')

        print()
    print('DONE')
示例#2
0
def main(args):
    original_data_dir = args.original_data
    grid_filepath = args.grid_filepath
    patch_size = 32
    if not os.path.exists(grid_filepath):
        os.makedirs(grid_filepath)
    unfiltered_filelist = getAllFiles(original_data_dir)
    input_list = [
        item for item in unfiltered_filelist if re.search('_img', item)
    ]
    mask_list = [
        item for item in unfiltered_filelist if re.search('_mask', item)
    ]
    input_list = sorted(input_list)
    mask_list = sorted(mask_list)
    print(input_list)
    print(mask_list)
    # load image, mask and label stacks as matrices
    for i, j in enumerate(input_list):
        print('> Loading image...')
        img_mat = load_nifti_mat_from_file(j)
        print('Loading mask...')
        mask_mat = load_nifti_mat_from_file(mask_list[i])
        # the grid is going to be saved in this matrix
        prob_mat = np.zeros(img_mat.shape, dtype=np.float32)
        x_dim, y_dim, z_dim = prob_mat.shape
        # get the x, y and z coordinates where there is brain
        x, y, z = np.where(mask_mat)
        print('x shape:', x.shape)
        print('y shape:', y.shape)
        print('z shape:', z.shape)
        # get the z slices with brain
        z_slices = np.unique(z)
        # proceed slice by slice
        for l in z_slices:
            slice_vox_inds = np.where(z == l)
            # find all x and y coordinates with brain in given slice
            x_in_slice = x[slice_vox_inds]
            y_in_slice = y[slice_vox_inds]
            # find min and max x and y coordinates
            slice_x_min = min(x_in_slice)
            slice_x_max = max(x_in_slice)
            slice_y_min = min(y_in_slice)
            slice_y_max = max(y_in_slice)
            # calculate number of patches in x and y direction in given slice
            num_of_x_patches = np.int(
                np.ceil((slice_x_max - slice_x_min) / patch_size))
            num_of_y_patches = np.int(
                np.ceil((slice_y_max - slice_y_min) / patch_size))
            for m in range(num_of_x_patches):
                for n in range(num_of_y_patches):
                    # find the starting and ending x and y coordinates of given patch
                    patch_start_x = slice_x_min + patch_size * m
                    patch_end_x = slice_x_min + patch_size * (m + 1)
                    patch_start_y = slice_y_min + patch_size * n
                    patch_end_y = slice_y_min + patch_size * (n + 1)
                    if patch_end_x > x_dim:
                        patch_end_x = slice_x_max
                        patch_start_x = slice_x_max - patch_size
                    if patch_end_y > y_dim:
                        patch_end_y = slice_y_max
                        patch_start_y = slice_y_max - patch_size
                    prob_mat[patch_start_x:patch_end_x, patch_start_y, l] = 1
                    prob_mat[patch_start_x:patch_end_x, patch_end_y, l] = 1
                    prob_mat[patch_start_x, patch_start_y:patch_end_y, l] = 1
                    prob_mat[patch_end_x, patch_start_y:patch_end_y, l] = 1
        # SAVE AS NIFTI
        create_and_save_nifti(
            prob_mat,
            grid_filepath + j.split(os.sep)[-1].split('_')[0] + '_grid.nii')
    print('DONE')
def main(args):
    train_non_label_dir = args.train_non_label_dir
    patch_size = args.patch_size
    model_filepath = args.model_filepath
    train_metadata_filepath = args.train_metadata_filepath
    grid_label_filepath = args.grid_label_filepath
    # LOADING MODEL, RESULTS AND WHOLE BRAIN MATRICES
    print(model_filepath)
    model = load_model(model_filepath)
    with open(train_metadata_filepath, 'rb') as handle:
        train_metadata = pickle.load(handle)
    print(train_metadata)
    # List filenames of data after the skull stripping process
    unfiltered_filelist = getAllFiles(train_non_label_dir)
    input_list = [
        item for item in unfiltered_filelist if re.search('_img', item)
    ]
    mask_list = [
        item for item in unfiltered_filelist if re.search('_mask', item)
    ]
    input_list = sorted(input_list)
    mask_list = sorted(mask_list)
    print(input_list)
    print(mask_list)
    # load image, mask and label stacks as matrices
    for i, j in enumerate(input_list):
        print('Loading image...')
        img_mat = load_nifti_mat_from_file(j)
        print('Loading mask...')
        mask_mat = load_nifti_mat_from_file(mask_list[i])
        # weak_annotation matrix
        prob_mat = np.zeros(img_mat.shape, dtype=np.float32)
        x_dim, y_dim, _ = prob_mat.shape
        # get the x, y and z coordinates where there is brain
        x, y, z = np.where(mask_mat)
        print('x shape:', x.shape)
        print('y shape:', y.shape)
        print('z shape:', z.shape)
        # get the z slices with brain
        z_slices = np.unique(z)
        # start cutting out and predicting the patches
        # proceed slice by slice
        for l in z_slices:
            print('Slice:', l)
            slice_vox_inds = np.where(z == l)
            # find all x and y coordinates with brain in given slice
            x_in_slice = x[slice_vox_inds]
            y_in_slice = y[slice_vox_inds]
            # find min and max x and y coordinates
            slice_x_min = min(x_in_slice)
            slice_x_max = max(x_in_slice)
            slice_y_min = min(y_in_slice)
            slice_y_max = max(y_in_slice)
            # calculate number of predicted patches in x and y direction in given slice
            num_of_x_patches = np.int(
                np.ceil((slice_x_max - slice_x_min) / patch_size))
            num_of_y_patches = np.int(
                np.ceil((slice_y_max - slice_y_min) / patch_size))
            print('num x patches', num_of_x_patches)
            print('num y patches', num_of_y_patches)
            # predict patch by patch in given slice
            for m in range(num_of_x_patches):
                for n in range(num_of_y_patches):
                    # find the starting and ending x and y coordinates of given patch
                    patch_start_x = slice_x_min + patch_size * m
                    patch_end_x = slice_x_min + patch_size * (m + 1)
                    patch_start_y = slice_y_min + patch_size * n
                    patch_end_y = slice_y_min + patch_size * (n + 1)
                    # if the dimensions of the probability matrix are exceeded shift back the last patch
                    if patch_end_x > x_dim:
                        patch_end_x = slice_x_max
                        patch_start_x = slice_x_max - patch_size
                    if patch_end_y > y_dim:
                        patch_end_y = slice_y_max
                        patch_start_y = slice_y_max - patch_size
                    # get the patch with the found coordinates from the image matrix
                    img_patch = img_mat[patch_start_x:patch_end_x,
                                        patch_start_y:patch_end_y, l]
                    # normalize the patch with mean and standard deviation calculated over training set
                    img_patch = img_patch.astype(np.float)
                    img_patch = img_patch[None, :, :, None]
                    img_patch -= train_metadata['mean_train']
                    img_patch /= train_metadata['std_train']
                    if model.predict(img_patch) >= 0.5:
                        prob_mat[patch_start_x:patch_end_x,
                                 patch_start_y:patch_end_y, l] = 2
                    prob_mat[patch_start_x:patch_end_x, patch_start_y, l] = 1
                    prob_mat[patch_start_x:patch_end_x, patch_end_y, l] = 1
                    prob_mat[patch_start_x, patch_start_y:patch_end_y, l] = 1
                    prob_mat[patch_end_x, patch_start_y:patch_end_y, l] = 1
        # Save weak annotation
        create_and_save_nifti(
            prob_mat, grid_label_filepath + j.split(os.sep)[-1].split('_')[0] +
            '_pnetcls.nii')
        print('done')
    print('DONE')
示例#4
0
def main(args):
    test_set_dir = args.test_set_dir
    patch_size = args.patch_size
    model_arch = args.model_arch
    train_metadata_filepath = args.train_metadata_filepath
    model_filepath = args.model_filepath
    prediction_filepath = args.prediction_filepath
    # LOADING MODEL, RESULTS AND WHOLE BRAIN MATRICES
    print(model_filepath)
    model = load_model(model_filepath,
                       custom_objects={
                           'dice_coef_loss': dice_coef_loss,
                           'dice_coef': dice_coef
                       })
    with open(train_metadata_filepath, 'rb') as handle:
        train_metadata = pickle.load(handle)
    print(train_metadata)
    # List filenames of data after the skull stripping process
    unfiltered_filelist = getAllFiles(test_set_dir)
    input_list = [
        item for item in unfiltered_filelist if re.search('_img', item)
    ]
    mask_list = [
        item for item in unfiltered_filelist if re.search('_mask', item)
    ]
    input_list = sorted(input_list)
    mask_list = sorted(mask_list)
    print(input_list)
    print(mask_list)
    # load image, mask and label stacks as matrices
    for i, j in enumerate(input_list):
        print('Loading image...')
        img_mat = load_nifti_mat_from_file(j)
        print('Loading mask...')
        mask_mat = load_nifti_mat_from_file(mask_list[i])
        # prediction
        prob_mat = np.zeros(img_mat.shape, dtype=np.float32)
        x_dim, y_dim, _ = prob_mat.shape
        # get the x, y and z coordinates where there is brain
        x, y, z = np.where(mask_mat)
        print('x shape:', x.shape)
        print('y shape:', y.shape)
        print('z shape:', z.shape)
        # get the z slices with brain
        z_slices = np.unique(z)

        # start cutting out and predicting the patches
        starttime_total = time.time()
        # proceed slice by slice
        for l in z_slices:
            print('Slice:', l)
            starttime_slice = time.time()
            slice_vox_inds = np.where(z == l)
            # find all x and y coordinates with brain in given slice
            x_in_slice = x[slice_vox_inds]
            y_in_slice = y[slice_vox_inds]
            # find min and max x and y coordinates
            slice_x_min = min(x_in_slice)
            slice_x_max = max(x_in_slice)
            slice_y_min = min(y_in_slice)
            slice_y_max = max(y_in_slice)

            # calculate number of predicted patches in x and y direction in given slice
            num_of_x_patches = np.int(
                np.ceil((slice_x_max - slice_x_min) / patch_size))
            num_of_y_patches = np.int(
                np.ceil((slice_y_max - slice_y_min) / patch_size))
            print('num x patches', num_of_x_patches)
            print('num y patches', num_of_y_patches)

            # predict patch by patch in given slice
            for m in range(num_of_x_patches):
                for n in range(num_of_y_patches):
                    # find the starting and ending x and y coordinates of given patch
                    patch_start_x = slice_x_min + patch_size * m
                    patch_end_x = slice_x_min + patch_size * (m + 1)
                    patch_start_y = slice_y_min + patch_size * n
                    patch_end_y = slice_y_min + patch_size * (n + 1)
                    # if the dimensions of the probability matrix are exceeded shift back the last patch
                    if patch_end_x > x_dim:
                        patch_end_x = slice_x_max
                        patch_start_x = slice_x_max - patch_size
                    if patch_end_y > y_dim:
                        patch_end_y = slice_y_max
                        patch_start_y = slice_y_max - patch_size

                    # get the patch with the found coordinates from the image matrix
                    img_patch = img_mat[patch_start_x:patch_end_x,
                                        patch_start_y:patch_end_y, l]

                    # normalize the patch with mean and standard deviation calculated over training set
                    img_patch = img_patch.astype(np.float)
                    img_patch -= train_metadata['mean_train']
                    img_patch /= train_metadata['std_train']

                    # predict the patch with the model and save to probability matrix
                    prob_mat[patch_start_x:patch_end_x,
                             patch_start_y:patch_end_y, l] = np.reshape(
                                 model.predict(np.reshape(
                                     img_patch,
                                     (1, patch_size, patch_size, 1)),
                                               batch_size=1,
                                               verbose=0),
                                 (patch_size, patch_size))

            # how long does the prediction take for one slice
            duration_slice = time.time() - starttime_slice
            print('prediction in slice took:', (duration_slice // 3600) % 60,
                  'hours', (duration_slice // 60) % 60, 'minutes',
                  duration_slice % 60, 'seconds')
        # how long does the prediction take for a patient
        duration_total = time.time() - starttime_total
        print('prediction in total took:', (duration_total // 3600) % 60,
              'hours', (duration_total // 60) % 60, 'minutes',
              duration_total % 60, 'seconds')
        # save file
        print(j.split(os.sep)[-1].split('_')[0])
        if model_arch == 'wnetseg':
            create_and_save_nifti(
                prob_mat, prediction_filepath + 'prediction' + '_' +
                j.split(os.sep)[-1].split('_')[0] + '_wnetseg.nii.gz')
        elif model_arch == 'pnet':
            create_and_save_nifti(
                prob_mat, prediction_filepath + 'prediction' + '_' +
                j.split(os.sep)[-1].split('_')[0] + '_pnet.nii.gz')
        elif model_arch == 'unet':
            create_and_save_nifti(
                prob_mat, prediction_filepath + 'prediction' + '_' +
                j.split(os.sep)[-1].split('_')[0] + '_unet.nii.gz')
def main(args):
    patch_size = args.patch_size
    clustering = args.clustering
    patch_annotation_dir = os.path.expanduser(args.patch_annotation_dir)
    rough_mask_dir = os.path.expanduser(args.rough_mask_dir)
    if not os.path.exists(rough_mask_dir):
        os.makedirs(rough_mask_dir)
    # List filenames of data after the skull stripping process
    unfiltered_filelist = getAllFiles(patch_annotation_dir)
    input_list = [
        item for item in unfiltered_filelist if re.search('_img', item)
    ]
    mask_list = [
        item for item in unfiltered_filelist if re.search('_mask', item)
    ]
    label_list = [
        item for item in unfiltered_filelist if re.search('_grid', item)
    ]
    input_list = sorted(input_list)
    mask_list = sorted(mask_list)
    label_list = sorted(label_list)
    print(input_list)
    print(mask_list)
    print(label_list)
    # load image, mask and label stacks as matrices
    for i, j in enumerate(input_list):
        print('Loading image...')
        img_mat = load_nifti_mat_from_file(j)
        # Normalization
        mask = img_mat > 0
        img_mat = (img_mat - img_mat[mask].mean()) / img_mat[mask].std()
        print('Loading mask...')
        mask_mat = load_nifti_mat_from_file(mask_list[i])
        print('Loading weak label...')
        label_mat = load_nifti_mat_from_file(label_list[i])
        # extract square
        prob_mat = np.zeros(img_mat.shape, dtype=np.float32)
        x_dim, y_dim, _ = prob_mat.shape
        # get the x, y and z coordinates where there is brain
        x, y, z = np.where(mask_mat)
        z_slices = np.unique(z)
        cnt = 0
        for l in z_slices:
            slice_vox_inds = np.where(z == l)
            # find all x and y coordinates with brain in given slice
            x_in_slice = x[slice_vox_inds]
            y_in_slice = y[slice_vox_inds]
            # find min and max x and y coordinates
            slice_x_min = min(x_in_slice)
            slice_x_max = max(x_in_slice)
            slice_y_min = min(y_in_slice)
            slice_y_max = max(y_in_slice)
            # calculate number of patches in x and y direction in given slice
            num_of_x_patches = np.int(
                np.ceil((slice_x_max - slice_x_min) / patch_size))
            num_of_y_patches = np.int(
                np.ceil((slice_y_max - slice_y_min) / patch_size))
            for m in range(num_of_x_patches):
                for n in range(num_of_y_patches):
                    # find the starting and ending x and y coordinates of given patch
                    patch_start_x = slice_x_min + patch_size * m
                    patch_end_x = slice_x_min + patch_size * (m + 1)
                    patch_start_y = slice_y_min + patch_size * n
                    patch_end_y = slice_y_min + patch_size * (n + 1)
                    if patch_end_x > x_dim:
                        patch_end_x = slice_x_max
                        patch_start_x = slice_x_max - patch_size
                    if patch_end_y > y_dim:
                        patch_end_y = slice_y_max
                        patch_start_y = slice_y_max - patch_size
                    # get the patch with the found coordinates from the image matrix
                    img_patch = img_mat[patch_start_x:patch_end_x,
                                        patch_start_y:patch_end_y, l]
                    label_patch = label_mat[patch_start_x:patch_end_x,
                                            patch_start_y:patch_end_y, l]
                    mask_check = mask_mat[patch_start_x:patch_end_x,
                                          patch_start_y:patch_end_y, l]
                    if 2 in label_patch:
                        image = img_patch
                        pixels = image
                        pixels = pixels.astype('float32')
                        if clustering == 'kmeans':
                            vectorized = pixels.reshape((-1, 1))
                            vectorized = np.float32(vectorized)
                            criteria = (cv2.TERM_CRITERIA_EPS +
                                        cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
                            attempts = 10
                            if 0 in mask_check:
                                K = 4
                            else:
                                K = 2
                            _, label, center = cv2.kmeans(
                                vectorized, K, None, criteria, attempts,
                                cv2.KMEANS_PP_CENTERS)
                            res = center[label.flatten()]
                            result_image = res.reshape((image.shape))
                            result_image[result_image != np.amax(center)] = 0
                            result_image[result_image == np.amax(center)] = 1
                            result_image = result_image.astype('int8')
                            num_labels, labels_im = cv2.connectedComponents(
                                result_image)
                            threshold = 350
                            if (0 in mask_check):
                                threshold = 100
                            if (3 in label_patch):
                                threshold = 350
                            if (num_labels <
                                    5) and (np.count_nonzero(result_image) <
                                            threshold):
                                cnt += 1
                                prob_mat[patch_start_x:patch_end_x,
                                         patch_start_y:patch_end_y,
                                         l] = result_image
                        elif clustering == 'gmm':
                            pixels = image[image != 0]
                            vectorized = pixels.reshape((-1, 1))
                            vectorized = np.float32(vectorized)
                            if 0 in mask_check:
                                n_components = 4
                            else:
                                n_components = 2
                            if (3 in label_patch):
                                n_components = 2
                            gmm_model_tied = GMM(
                                n_components=2,
                                covariance_type='tied').fit(vectorized)
                            center_tied = gmm_model_tied.means_
                            label_tied = gmm_model_tied.predict(
                                vectorized).reshape(-1, 1)
                            res_tied = center_tied[label_tied.flatten()]
                            result_image_tied = res_tied
                            result_image_tied[
                                result_image_tied != np.amax(center_tied)] = 0
                            result_image_tied[result_image_tied == np.amax(
                                center_tied)] = 1
                            b = np.zeros(img_patch.shape)
                            pos = np.where(img_patch != 0)
                            b[pos[0], pos[1]] = result_image_tied.reshape(
                                len(img_patch[img_patch != 0]))
                            b = b.astype('int8')
                            num_labels, labels_im = cv2.connectedComponents(b)
                            threshold = 350
                            if (0 in mask_check):
                                threshold = 100
                            if (3 in label_patch):
                                threshold = 350
                            if (num_labels < 5) and (np.count_nonzero(b) <
                                                     threshold):
                                cnt += 1
                                prob_mat[patch_start_x:patch_end_x,
                                         patch_start_y:patch_end_y, l] = b
        # save prob_mat
        print('the number of vessel patch:', cnt)
        create_and_save_nifti(
            prob_mat, rough_mask_dir + j.split(os.sep)[-1].split('_')[0] +
            '_label_rough.nii.gz')
    print()
    print('DONE')