예제 #1
0
def adding_noise_test(img,
                      model,
                      cats,
                      noise_steps,
                      perc_noise,
                      perc_std,
                      savedir=None):
    """Progressively add noise to an image and classifying it"""

    from Analysis.plot_rabani import show_image
    from Analysis.model_stats import preds_histogram
    fig, axes = plt.subplots(1, 2)
    fig.tight_layout(pad=3)
    img = img.copy()
    for i in range(noise_steps):
        axes[0].clear()
        axes[1].clear()

        img_classifier = single_prediction_with_noise(img, model, perc_noise,
                                                      perc_std)

        show_image(img, axis=axes[0])
        preds_histogram(img_classifier.cnn_preds, cats, axis=axes[1])

        if savedir:
            plt.savefig(f"{savedir}/img_{i}.png")
예제 #2
0
    def _plot_denoising(self, orig_image, denoised_image, savedir=None):
        fig, axs = plt.subplots(1, 2)
        fig.suptitle(
            f"{os.path.basename(self.filepath)} - {self.fail_reasons}",
            fontsize=5)

        show_image(orig_image, axs[0], "Original")
        # show_image(denoised_image[0, :, :, 0], axs[1], "Denoised Subsection")
        show_image(denoised_image, axs[1], "Majority Vote")

        if savedir:
            filename = os.path.basename(self.filepath)[:-4]
            # if self.fail_reasons:
            fig.savefig(f"{savedir}/denoised/denoise_{filename}.png", dpi=300)
예제 #3
0
def adding_noise_euler_test(num_steps, perc_noise, save=True):
    from Rabani_Simulation.rabani import rabani_single
    from Models.h5_iterator import h5RabaniDataGenerator
    from Analysis.plot_rabani import show_image
    from skimage import measure
    from matplotlib.ticker import PercentFormatter

    # Gen rabani
    img, _ = rabani_single(kT=0.12,
                           mu=2.9,
                           MR=1,
                           C=0.3,
                           e_nl=1.5,
                           e_nn=2,
                           L=200,
                           MCS_max=5000,
                           early_stop=True)

    # Set up figure
    fig, axs = plt.subplots(1, 2)
    lims = [-0.00025, -0.001, -0.01, -0.03, -0.04]
    axs[1].set_xlim(0, num_steps * perc_noise)
    axs[1].set_ylim(lims[-1], 0)
    for lim in lims:
        axs[1].axhline(lim, color='k', linestyle='--')
    axs[1].set_xlabel("Percentage Speckle Noise")
    axs[1].xaxis.set_major_formatter(PercentFormatter(xmax=1))
    axs[1].set_ylabel("Euler Number")

    # Continuously calculate Euler number while adding speckle noise
    for i in range(num_steps):
        region = (measure.regionprops((img != 0) + 1)[0])
        euler_num = region["euler_number"] / np.sum(img == 2)
        axs[1].plot(i * perc_noise, euler_num, 'rx')

        show_image(img, axis=axs[0])

        img = h5RabaniDataGenerator.speckle_noise(img,
                                                  perc_noise,
                                                  perc_std=None,
                                                  randomness="batchwise",
                                                  num_uniques=4,
                                                  scaling=False)
        del region

        if save:
            plt.savefig(
                f"/home/mltest1/tmp/pycharm_project_883/Data/Plots/euler_noise_comp/{i}.png"
            )
예제 #4
0
def test_minkowski_scale_invariance(img, stride=8, max_subimgs=20):
    xaxis = np.arange(1, len(img))
    num_jumps = int((len(img)) / stride)
    SIA = np.zeros((len(img) - 1, num_jumps**2))
    SIP = np.zeros((len(img) - 1, num_jumps**2))
    SIE = np.zeros((len(img) - 1, num_jumps**2))

    SIA[:] = np.nan
    SIP[:] = np.nan
    SIE[:] = np.nan

    # For each window size, make sub-images
    for i in tqdm(xaxis[::2]):
        sub_imgs = ImageClassifier._wrap_image_to_tensorflow(
            img=img, network_img_size=i, stride=stride)[:, :, :, 0]

        # For each sub-image, calculate normalised stats
        rand_inds = np.random.choice(len(sub_imgs),
                                     replace=False,
                                     size=np.min((max_subimgs, len(sub_imgs))))
        rand_sub_imgs = sub_imgs[rand_inds, :, :]

        for j, sub_img in enumerate(rand_sub_imgs):
            SIA[i, j], SIP[i, j], SIE[i,
                                      j] = calculate_normalised_stats(sub_img)

    # Take means
    SIA_mean = np.nanmean(SIA, axis=1)
    SIA_std = np.nanstd(SIA, axis=1)
    SIP_mean = np.nanmean(SIP, axis=1)
    SIP_std = np.nanstd(SIP, axis=1)
    SIE_mean = np.nanmean(SIE, axis=1)
    SIE_std = np.nanstd(SIE, axis=1)

    # Plot
    fig, axs = plt.subplots(1, 4, sharex=True, figsize=(1280 / 96, 480 / 96))

    show_image(img, axis=axs[0])
    axs[1].errorbar(xaxis, SIA_mean, SIA_std, fmt='rx')
    axs[2].errorbar(xaxis, SIP_mean, SIP_std, fmt='rx')
    axs[3].errorbar(xaxis, SIE_mean, SIE_std, fmt='rx')

    axs[1].set_ylabel("SIA")
    axs[2].set_ylabel("SIP")
    axs[3].set_ylabel("SIE")
    axs[2].set_xlabel("Window Size")

    fig.tight_layout()
예제 #5
0
def compare_classifications(df):
    from Filters.screening import FileFilter

    ngridpts = int(np.sqrt(len(df)))
    fig, axs = plt.subplots(ngridpts, ngridpts)
    axs = axs.reshape((-1, ))

    for i, file in enumerate(df.iterrows()):
        filterer = FileFilter()
        filterer.assess_file(file[1]["File Path"])

        show_image(
            filterer.binarized_data,
            axis=axs[i],
            title=
            f"Steff = {file[1]['Regime']} | CNN = {file[1]['CNN Classification']}"
        )
예제 #6
0
def minkowski_stability_test(filepath, window_size, save):
    from Filters.screening import FileFilter
    from skimage import measure
    from Analysis.plot_rabani import show_image

    filterer = FileFilter()
    _, _, _, _, _, data, _, _ = filterer._load_and_preprocess(
        filepath=filepath, threshold_method="multiotsu", nbins=1000)
    wrapped_arr = filterer._wrap_image_to_tensorflow(data,
                                                     window_size,
                                                     zigzag=True)

    fig, axs = plt.subplots(1, 3, figsize=(1000 / 96, 480 / 96))
    lims = [-0.00025, -0.001, -0.01, -0.03, -0.04]
    for lim in lims:
        axs[1].axhline(lim, color='k', linestyle='--')

    axs[2].set_ylim(0, 1)
    # axs[3].set_ylim(50, 250)

    axs[1].set_xlabel("Subimage Number")
    axs[2].set_xlabel("Subimage Number")

    axs[1].set_ylabel("Normalised Euler Number")
    axs[2].set_ylabel("Normalised Perimeter")

    show_image(data, axis=axs[0])
    for i, img in enumerate(wrapped_arr):
        region = measure.regionprops((img[:, :, 0] != 0) + 1)[0]
        euler_num = region["euler_number"] / np.sum(img == 1)
        eccentricity = region["perimeter"] / np.sum(img == 1)
        axs[1].plot(i, euler_num, 'rx')
        axs[2].plot(i, eccentricity, 'rx')

    plt.tight_layout()

    if save:
        savedir = '/'.join(filepath.split('/')[-2:])
        plt.savefig(
            f"/home/mltest1/tmp/pycharm_project_883/Data/Plots/minkowski_stability/{savedir}"
        )
        plt.close()
예제 #7
0
def everything_test(filepath, window_size, num_steps, perc_noise):
    from Filters.screening import FileFilter
    from skimage import measure
    from Analysis.plot_rabani import show_image
    from Models.h5_iterator import h5RabaniDataGenerator
    from matplotlib.ticker import PercentFormatter
    from tqdm import tqdm

    # Load in file
    filterer = FileFilter()
    _, _, _, _, _, data, _, _ = filterer._load_and_preprocess(
        filepath=filepath, threshold_method="multiotsu", nbins=1000)
    wrapped_arr = filterer._wrap_image_to_tensorflow(data,
                                                     window_size,
                                                     zigzag=True)
    wrapped_arr_for_noise = wrapped_arr.copy()

    # Calculate stats as function of window num
    euler_nums = np.zeros(len(wrapped_arr))
    perimeters = np.zeros(len(wrapped_arr))
    for i, img in enumerate(tqdm(wrapped_arr)):
        region = measure.regionprops((img[:, :, 0] != 0) + 1)[1]
        euler_nums[i] = region["euler_number"] / np.sum(img == 1)
        perimeters[i] = region["perimeter"] / np.sum(img == 1)

    # Calculate stats as function of noise
    euler_nums_noise = np.zeros(num_steps)
    perimeters_noise = np.zeros(num_steps)
    euler_nums_noise_std = np.zeros(num_steps)
    perimeters_noise_std = np.zeros(num_steps)
    for i in tqdm(range(num_steps)):
        euler_nums_noise_step = np.zeros(len(wrapped_arr))
        perimeters_noise_step = np.zeros(len(wrapped_arr))
        for j, img in enumerate(wrapped_arr_for_noise):
            region = measure.regionprops((img[:, :, 0] != 0) + 1)[1]
            euler_nums_noise_step[j] = region["euler_number"] / np.sum(
                img == 1)
            perimeters_noise_step[j] = region["perimeter"] / np.sum(img == 1)

        euler_nums_noise[i] = np.mean(euler_nums_noise_step)
        euler_nums_noise_std[i] = np.std(euler_nums_noise_step)
        perimeters_noise[i] = np.mean(perimeters_noise_step)
        perimeters_noise_std[i] = np.std(perimeters_noise_step)

        wrapped_arr_for_noise = h5RabaniDataGenerator.speckle_noise(
            wrapped_arr_for_noise,
            perc_noise,
            perc_std=None,
            num_uniques=2,
            randomness="batchwise_flip",
            scaling=False)

    # Plot
    fig, axs = plt.subplots(1, 5, figsize=(1400 / 96, 500 / 96))

    [ax.set_xlabel("Subimage Number") for ax in axs[1:3]]
    [ax.set_xlabel("% Speckle Noise") for ax in axs[3:]]
    [ax.xaxis.set_major_formatter(PercentFormatter(xmax=1)) for ax in axs[3:]]

    [ax.set_ylabel("Normalised Euler Number") for ax in axs[1::2]]
    [ax.set_ylabel("Normalised Perimeter") for ax in axs[2::2]]

    lims = [-0.00025, -0.001, -0.01, -0.03, -0.04]
    for ax in axs[1::2]:
        for lim in lims:
            ax.axhline(lim, color='k', linestyle='--')

    show_image(data, axis=axs[0])
    axs[1].plot(euler_nums)
    axs[2].plot(perimeters)
    axs[3].errorbar(
        np.arange(num_steps) * perc_noise, euler_nums_noise,
        euler_nums_noise_std)
    axs[4].errorbar(
        np.arange(num_steps) * perc_noise, perimeters_noise,
        perimeters_noise_std)

    plt.tight_layout()
예제 #8
0
    Returns
    -------
    runs : ndarray
        (NxLxL) array of simulations
    m_all : ndarray
        1D array of length N showing the number of MC steps taken in each of the N simulations

    See Also
    --------
    Rabani_Simulation.gen_rabanis.RabaniSweeper
    """
    axis_steps = len(params)
    runs = np.zeros((axis_steps, int(params[0, 6]), int(params[0, 6])))
    m_all = np.zeros((axis_steps,))

    for i in prange(axis_steps):
        runs[i, :, :], m_all[i] = rabani_single(kT=float(params[i, 0]), mu=float(params[i, 1]),
                                                MR=int(params[i, 2]), C=float(params[i, 3]),
                                                e_nl=float(params[i, 4]), e_nn=float(params[i, 5]), L=int(params[i, 6]),
                                                MCS_max=int(params[i, 7]), early_stop=bool(params[i, 8]))

    return runs, m_all


if __name__ == '__main__':
    # for MCS in np.linspace(100, 2000, 5):
    img, num_steps = rabani_single(kT=0.35, mu=3, MR=1, C=0.4, e_nl=1.5,
                               e_nn=2, L=128, MCS_max=300, early_stop=False)
    show_image(img)
예제 #9
0
    def _plot(self,
              data=None,
              median_data=None,
              flattened_data=None,
              binarized_data=None,
              binarized_data_for_plotting=None,
              savedir=None):

        fig, axs = plt.subplots(3, 4)
        fig.suptitle(
            f"{os.path.basename(self.filepath)} - {self.fail_reasons}",
            fontsize=5)
        fig.tight_layout(pad=3)
        if data is not None:
            axs[0, 0].imshow(data, cmap='RdGy')
            axs[0, 0].set_title('Original Image')
            axs[0, 0].axis("off")
        if median_data is not None:
            axs[0, 1].imshow(median_data, cmap='RdGy')
            axs[0, 1].set_title('Median Aligned')
            axs[0, 1].axis("off")
        if flattened_data is not None:
            axs[0, 2].imshow(flattened_data,
                             extent=(0, self.image_res, 0, self.image_res),
                             cmap='RdGy')
            axs[0, 2].set_title('Planar Flattened')
            axs[0, 2].axis("off")
        if binarized_data_for_plotting is not None:
            thres = binarized_data_for_plotting[0]
            pix = binarized_data_for_plotting[1]
            pix_gauss_grad = binarized_data_for_plotting[2]
            peaks = binarized_data_for_plotting[3]
            troughs = binarized_data_for_plotting[4]

            axs[1, 0].plot(thres, pix_gauss_grad)
            axs[1, 0].scatter(thres[peaks], pix_gauss_grad[peaks])
            axs[1, 0].scatter(thres[troughs],
                              pix_gauss_grad[troughs],
                              marker='x')
            axs[1, 0].set_xlabel('Threshold')
            axs[1, 0].set_ylabel('Pixels')
            axs[1, 0].set_title('Thresholding Levels')
        if binarized_data is not None:
            show_image(binarized_data.astype(int), axis=axs[0, 3])
            axs[0, 3].set_title('Binarized')
        if self.image_classifier is not None:
            if self.image_classifier.cnn_preds is not None:
                preds_histogram(self.image_classifier.cnn_preds,
                                self.cats,
                                axis=axs[1, 1])
                preds_pie(self.image_classifier.cnn_preds,
                          self.cats,
                          axis=axs[1, 2])
                axs[1, 1].set_title('Network Predictions')
                axs[1, 1].set_ylim(0, 15)
                axs[1, 2].set_title('Network Predictions')
            if self.image_classifier.euler_preds is not None:
                preds_pie(self.image_classifier.euler_preds,
                          self.cats + ["none"],
                          axis=axs[1, 3])
                axs[1, 3].set_title('Euler Predictions')
            if self.image_classifier.minkowski_preds is not None:
                preds_histogram(self.image_classifier.minkowski_preds,
                                self.minkowski_cats,
                                axis=axs[2, 1])
                preds_pie(self.image_classifier.minkowski_preds,
                          self.minkowski_cats,
                          axis=axs[2, 2])
                axs[2, 1].set_title('Minkowski Predictions')
                axs[2, 2].set_title('Minkowski Predictions')

        if savedir:
            filename = os.path.basename(self.filepath)[:-4]
            if self.fail_reasons:
                fig.savefig(f"{savedir}/fail/stats_{filename}.png", dpi=300)
            else:
                fig.savefig(
                    f"{savedir}/{self.CNN_classification}/stats_{filename}.png",
                    dpi=300)
                plt.imsave(
                    f"{savedir}/{self.CNN_classification}/image_{filename}.png",
                    binarized_data,
                    cmap=cmap_rabani)