def load_and_estimate(file, arguments, denoise=medfilt, data=None):
    """Loads mean+std images and evaluates noise. Required for parallelization."""
    # Pipeline for µCT data
    if data is not None:
        # Evaluate noise on data
        noises = np.zeros(len(metrics))
        for m in range(len(metrics)):
            noise = estimate_noise(data, metrics[m], kernel_size=kernel_size, denoise_method=denoise)
            noises[m] = noise
        return np.array(noises)

    # Pipeline for images

    # Get images
    path = arguments.image_path
    # Load images
    image_surf, image_deep, image_calc = load_vois_h5(path, file)

    # Auto crop
    if arguments.auto_crop:
        image_deep, cropped = auto_corner_crop(image_deep)
        image_calc, cropped = auto_corner_crop(image_calc)

    # Evaluate noise on mean+std images
    noises_surf, noises_deep, noises_calc = np.zeros(len(metrics)), np.zeros(len(metrics)), np.zeros(len(metrics))
    for m in range(len(metrics)):
        noise_surf = estimate_noise(image_surf, metrics[m], kernel_size=kernel_size, denoise_method=denoise)
        noise_deep = estimate_noise(image_deep, metrics[m], kernel_size=kernel_size, denoise_method=denoise)
        noise_calc = estimate_noise(image_calc, metrics[m], kernel_size=kernel_size, denoise_method=denoise)
        noises_surf[m] = noise_surf
        noises_deep[m] = noise_deep
        noises_calc[m] = noise_calc
    return np.array((noises_surf, noises_deep, noises_calc))
Esempio n. 2
0
def load_voi_save_subvolume(args, file, n_x=3, n_y=3, size_x=400, size_y=400):

    # Load images
    image_surf, image_deep, image_calc = load_vois_h5(str(args.data_path),
                                                      file)

    ims_surf = create_subimages(image_surf,
                                n_x=n_x,
                                n_y=n_y,
                                im_size_x=size_x,
                                im_size_y=size_y)
    ims_deep = create_subimages(image_deep,
                                n_x=n_x,
                                n_y=n_y,
                                im_size_x=size_x,
                                im_size_y=size_y)
    ims_calc = create_subimages(image_calc,
                                n_x=n_x,
                                n_y=n_y,
                                im_size_x=size_x,
                                im_size_y=size_y)

    for sub in range(len(ims_surf)):
        h5 = h5py.File(
            str(args.save_image_path /
                (file[:-3] + '_sub' + str(sub) + '.h5')), 'w')
        h5.create_dataset('surf', data=ims_surf[sub])
        h5.create_dataset('deep', data=ims_deep[sub])
        h5.create_dataset('calc', data=ims_calc[sub])
        h5.close()
def load_voi(args, file, grade, par, save_images=False, autocrop=True):
    """Loads mean+std images and performs automatic artefact crop and grayscale normalization."""
    path = args.image_path
    save = args.save_path
    # Load images
    image_surf, image_deep, image_calc = load_vois_h5(path, file)

    # Select VOI
    if grade[:4] == 'surf':
        image = image_surf[:]
    elif grade[:4] == 'deep':
        if autocrop:
            image, cropped = auto_corner_crop(image_deep)
            if cropped:
                # print_crop(image_deep, image, file[:-3] + ' deep zone')
                print(
                    'Automatically cropped sample {0}, deep zone from shape: ({1}, {2}) to: ({3}, {4})'
                    .format(file[:-3], image_deep.shape[0],
                            image_deep.shape[1], image.shape[0],
                            image.shape[1]))
        else:
            image = image_deep[:]
    elif grade[:4] == 'calc':
        if autocrop:
            image, cropped = auto_corner_crop(image_calc)
            if cropped:
                # print_crop(image_calc, image, file[:-3] + ' calcified zone')
                print(
                    'Automatically cropped sample {0}, calcified zone from shape: ({1}, {2}) to: ({3}, {4})'
                    .format(file[:-3], image_calc.shape[0],
                            image_calc.shape[1], image.shape[0],
                            image.shape[1]))
        else:
            image = image_calc[:]
    else:
        raise Exception('Check selected zone!')

    # Median filtering for noisy images
    if args.median_filter:
        image = medfilt2d(image, 3)
    # Normalize
    image_norm = local_standard(image, par)
    # Save image
    if save_images:
        titles_norm = ['Mean + Std', '', 'Normalized']
        print_images((image, image, image_norm),
                     subtitles=titles_norm,
                     title=file + ' Input',
                     cmap2='gray',
                     save_path=save + '/Images/Input/',
                     sample=file[:-3] + '_' + grade + '.png')
    return image_norm
def load_voi(path, file, grade, max_roi=400):
    # Load images
    image_surf, image_deep, image_calc = load_vois_h5(path, file)
    # Crop
    if np.shape(image_surf)[0] > max_roi:
        crop = (np.shape(image_surf)[0] - max_roi) // 2
        image_surf = image_surf[crop:-crop, crop:-crop]
    if np.shape(image_deep)[0] > max_roi:
        crop = (np.shape(image_deep)[0] - max_roi) // 2
        image_deep = image_deep[crop:-crop, crop:-crop]
    if np.shape(image_calc)[0] > max_roi:
        crop = (np.shape(image_calc)[0] - max_roi) // 2
        image_calc = image_calc[crop:-crop, crop:-crop]
    # Select VOI
    if grade[:4] == 'surf':
        image = image_surf[:]
    elif grade[:4] == 'deep':
        image = image_deep[:]
    elif grade[:4] == 'calc':
        image = image_calc[:]
    else:
        raise Exception('Check selected zone!')
    return image
Esempio n. 5
0
def pipeline_hyperopt(args, files, metric, pat_groups=None):
    """Pipeline for Bayesian optimization.
    1. Loads images and ground truth.
    2. Calls the optimization function and displays result.

    Parameters
    ----------
    args : Namespace
        Namespace containing grading arguments. See grading_pipelines for detailed description.
    files : list
        List of sample datasets containing mean+std images.
    metric : function
        Loss function used for optimization.
        Defaults to sklearn.metrics.mean_squared error
        Possible to use for example 1 - spearman correlation or other custom loss functions.
    pat_groups : ndarray
        Groups for leave-one-group-out split.
    """

    # Load images
    images_surf = []
    images_deep = []
    images_calc = []
    for k in range(len(files)):
        # Load images
        image_surf, image_deep, image_calc = load_vois_h5(
            args.image_path, files[k])

        # Automatic corner crop for deep and calcified zones
        image_deep, cropped_deep = auto_corner_crop(image_deep)
        if cropped_deep:
            print(
                'Automatically cropped sample {0}, deep zone to shape: ({1}, {2})'
                .format(files[k][:-3], image_deep.shape[0],
                        image_deep.shape[1]))
        image_calc, cropped_calc = auto_corner_crop(image_calc)
        if cropped_calc:
            print(
                'Automatically cropped sample {0}, calcified zone to shape: ({1}, {2})'
                .format(files[k][:-3], image_calc.shape[0],
                        image_calc.shape[1]))

        # Append to list
        images_surf.append(image_surf)
        images_deep.append(image_deep)
        images_calc.append(image_calc)

    # Load grades to array
    grades, hdr_grades = load_excel(arguments.grade_path,
                                    titles=[arguments.grades_used])
    grades = grades.squeeze()
    # Sort grades based on alphabetical order
    grades = np.array([
        grade
        for _, grade in sorted(zip(hdr_grades, grades), key=lambda var: var[0])
    ])
    if arguments.n_subvolumes > 1:
        # Extend grades variable
        grades = np.array(
            [val for val in grades for _ in range(arguments.n_subvolumes)])

    # Select VOI
    if args.grades_used[:4] == 'surf':
        images = images_surf[:]
    elif args.grades_used[:4] == 'deep':
        images = images_deep[:]
    elif args.grades_used[:4] == 'calc':
        images = images_calc[:]
    else:
        raise Exception('Check selected zone!')
    # Optimize parameters
    pars, error = optimization_hyperopt_loo(np.array(images),
                                            grades,
                                            args,
                                            metric,
                                            groups=pat_groups)

    print('Results for grades: ' + args.grades_used)
    print("Parameters are:\n", pars)
    for i in range(len(pars)):
        print(pars[i])
Esempio n. 6
0
def pipeline_lbp(image_path, savepath, save, pars, data_type='dat'):
    # Start time
    start_time = time.time()
    # Calculate MRELBP from dataset
    # Parameters
    # mapping = getmapping(dict['N']) # mapping

    # Save parameters
    writer = pd.ExcelWriter(save + r'\LBP_parameters.xlsx')
    df1 = pd.DataFrame(pars, index=[0])
    df1.to_excel(writer)
    writer.save()

    files = os.listdir(image_path)
    files.sort()
    if data_type == 'h5':
        images = load_dataset_h5(image_path, files)

    features = None  # Reset feature array

    for k in tqdm(range(len(files)), desc='Calculating LBP features'):
        # Load file
        if data_type == 'dat':
            if k > len(files) / 2 - 1:
                break
            file = os.path.join(image_path, files[2 * k])
            try:
                mu = load_binary(file, np.float64)
            except FileNotFoundError:
                continue
            file = os.path.join(image_path, files[2 * k + 1])
            try:
                sd = load_binary(file, np.float64)
            except FileNotFoundError:
                continue
        elif data_type == 'mat':
            file = os.path.join(image_path, files[k])
            try:
                file = sio.loadmat(file)
                mu = file['Mz']
                sd = file['sz']
            except NotImplementedError:
                file = h5py.File(file)
                mu = file['Mz'][()]
                sd = file['sz'][()]

        # Combine mean and sd images
        if data_type == 'h5':
            image_surf, image_deep, image_calc = load_vois_h5(
                image_path, files[k])
            # image = images[k]
            if np.shape(image)[0] != 400:
                crop = (np.shape(image)[0] - 400) // 2
                image = image[crop:-crop, crop:-crop]
                image_surf = image_surf[crop:-crop, crop:-crop]
                image_deep = image_deep[crop:-crop, crop:-crop]
                image_calc = image_calc[crop:-crop, crop:-crop]
        else:
            image = mu + sd
        # Grayscale normalization
        # image = local_normalize(image,dict['ks1'],dict['sigma1'],dict['ks2'],dict['sigma2'])
        image = local_standard(image, pars['ks1'], pars['sigma1'], pars['ks2'],
                               pars['sigma2'])
        plt.imshow(image)
        plt.show()
        # LBP
        hist, lbp_il, lbp_is, lbp_ir = MRELBP(image, pars['N'], pars['R'],
                                              pars['r'], pars['wc'],
                                              (pars['wl'], pars['ws']))
        # hist = Conv_MRELBP(image,dict['N'],dict['R'],dict['r'],dict['wr'][0],dict['wr'][1] ,dict['wc'])
        if hist.shape[0] == 1:
            hist = hist.T
        try:
            features = np.concatenate((features, hist), axis=1)
        except ValueError:
            features = hist
        # Save images
        # if dtype == 'dat':
        #    cv2.imwrite(savepath + '\\' + files[2 * k][:-9] + '.png', lbp_is)
        # else:
        #    cv2.imwrite(savepath + '\\' + files[k][:-9] + '.png', lbp_is)

        # Plot LBP images
        # plt.imshow(lbp_is); plt.show()
        # plt.imshow(lbp_il); plt.show()
        # plt.imshow(lbpIR); plt.show()

    # Save features
    writer = pd.ExcelWriter(save + r'\LBP_features_python.xlsx')
    df1 = pd.DataFrame(features)
    df1.to_excel(writer, sheet_name='LBP_features')
    writer.save()

    t = time.time() - start_time
    print('Elapsed time: {0}s'.format(t))
    return features