def set_context_frame_statistics(im,ctx):
     ctx["last_frame_stats"] = {}
     #this is the upper bound on acceptable noise levels
     excessive_noise_th = 0.1 if "excessive_noise_th" not in ctx else ctx["excessive_noise_th"]
     noise = estimate_sigma(im, multichannel=False, average_sigmas=True)
     ctx["last_frame_stats"]["noise"] = noise
     ctx["last_frame_stats"]["percentiles_509599"] =  [round(p,3) for p in np.percentile(im, [50,95, 99])]
     perc = ctx["last_frame_stats"]["percentiles_509599"][-1]
     #the pseudo entropy is a measure based on summed 2d projection
     analysis.log_pseudo_entropy(im,ctx)
     #degeneracy checks
     if perc == 1.:
         ctx.log("Too much saturation, 99th percentile is 1.0 - marking frame as degenerate!", mtype="WARN")
         ctx["last_frame_stats"]["is_degenerate"] = True
     if noise > excessive_noise_th: 
         ctx.log("noise level of {:.4f} is excessive - marking frame as degenerate!".format(noise), mtype="WARN")
         ctx["last_frame_stats"]["is_degenerate"] = True
             
     ctx.log_stats({"noise":ctx["last_frame_stats"]["noise"],
                    "degenerate":ctx.is_frame_degenerate,
                    "perc": perc, 
                    "pseudo_ent": ctx["last_frame_stats"]["pseudo_ent"], 
                    "2dhist99post": ctx["last_frame_stats"]["2dHisto_postnorm"][-1],
                    "2dhist95post": ctx["last_frame_stats"]["2dHisto_postnorm"][-2],
                    "2dhist99pre": ctx["last_frame_stats"]["2dHisto_prenorm"][-1],
                    "2dhist95pre": ctx["last_frame_stats"]["2dHisto_prenorm"][-2]})  
Exemplo n.º 2
0
def test_estimate_sigma_color():
    rstate = np.random.RandomState(1234)
    # astronaut image
    img = astro.copy()
    sigma = 0.1
    # add noise to astronaut
    img += sigma * rstate.standard_normal(img.shape)

    sigma_est = restoration.estimate_sigma(img, multichannel=True,
                                           average_sigmas=True)
    assert_almost_equal(sigma, sigma_est, decimal=2)

    sigma_list = restoration.estimate_sigma(img, multichannel=True,
                                            average_sigmas=False)
    assert_equal(len(sigma_list), img.shape[-1])
    assert_almost_equal(sigma_list[0], sigma_est, decimal=2)

    # default multichannel=False should raise a warning about last axis size
    assert_warns(UserWarning, restoration.estimate_sigma, img)
Exemplo n.º 3
0
def test_estimate_sigma_gray():
    rstate = np.random.RandomState(1234)
    # astronaut image
    img = astro_gray.copy()
    sigma = 0.1
    # add noise to astronaut
    img += sigma * rstate.standard_normal(img.shape)

    sigma_est = restoration.estimate_sigma(img, multichannel=False)
    assert_almost_equal(sigma, sigma_est, decimal=2)
Exemplo n.º 4
0
def test_estimate_sigma_masked_image():
    # Verify computation on an image with a large, noise-free border.
    # (zero regions will be masked out by _sigma_est_dwt to avoid returning
    #  sigma = 0)
    rstate = np.random.RandomState(1234)
    # uniform image
    img = np.zeros((128, 128))
    center_roi = (slice(32, 96), slice(32, 96))
    img[center_roi] = 0.8
    sigma = 0.1

    img[center_roi] = sigma * rstate.standard_normal(img[center_roi].shape)

    sigma_est = restoration.estimate_sigma(img, multichannel=False)
    assert_almost_equal(sigma, sigma_est, decimal=1)
Exemplo n.º 5
0
from skimage.util import random_noise
from skimage.measure import compare_psnr


original = img_as_float(data.chelsea()[100:250, 50:300])

sigma = 0.12
noisy = random_noise(original, var=sigma**2)

fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(8, 5),
                       sharex=True, sharey=True)

plt.gray()

# Estimate the average noise standard deviation across color channels.
sigma_est = estimate_sigma(noisy, multichannel=True, average_sigmas=True)
# Due to clipping in random_noise, the estimate will be a bit smaller than the
# specified sigma.
print("Estimated Gaussian noise standard deviation = {}".format(sigma_est))

im_bayes = denoise_wavelet(noisy, multichannel=True, convert2ycbcr=True,
                           method='BayesShrink', mode='soft')
im_visushrink = denoise_wavelet(noisy, multichannel=True, convert2ycbcr=True,
                                method='VisuShrink', mode='soft',
                                sigma=sigma_est)

# VisuShrink is designed to eliminate noise with high probability, but this
# results in a visually over-smooth appearance.  Repeat, specifying a reduction
# in the threshold by factors of 2 and 4.
im_visushrink2 = denoise_wavelet(noisy, multichannel=True, convert2ycbcr=True,
                                 method='VisuShrink', mode='soft',
import matplotlib.pyplot as plt

from skimage import data, img_as_float
from skimage.restoration import denoise_nl_means, estimate_sigma
from skimage.measure import compare_psnr
from skimage.util import random_noise


astro = img_as_float(data.astronaut())
astro = astro[30:180, 150:300]

sigma = 0.08
noisy = random_noise(astro, var=sigma**2)

# estimate the noise standard deviation from the noisy image
sigma_est = np.mean(estimate_sigma(noisy, multichannel=True))
print("estimated noise standard deviation = {}".format(sigma_est))

patch_kw = dict(patch_size=5,      # 5x5 patches
                patch_distance=6,  # 13x13 search area
                multichannel=True)

# slow algorithm
denoise = denoise_nl_means(noisy, h=1.15 * sigma_est, fast_mode=False,
                           **patch_kw)

# slow algorithm, sigma provided
denoise2 = denoise_nl_means(noisy, h=0.8 * sigma_est, sigma=sigma_est,
                            fast_mode=False, **patch_kw)

# fast algorithm
original = img_as_float(data.chelsea()[100:250, 50:300])

sigma = 0.12
noisy = random_noise(original, var=sigma**2)

fig, ax = plt.subplots(nrows=2,
                       ncols=3,
                       figsize=(8, 5),
                       sharex=True,
                       sharey=True)

plt.gray()

# Estimate the average noise standard deviation across color channels.
sigma_est = estimate_sigma(noisy, channel_axis=-1, average_sigmas=True)
# Due to clipping in random_noise, the estimate will be a bit smaller than the
# specified sigma.
print(f"Estimated Gaussian noise standard deviation = {sigma_est}")

im_bayes = denoise_wavelet(noisy,
                           channel_axis=-1,
                           convert2ycbcr=True,
                           method='BayesShrink',
                           mode='soft',
                           rescale_sigma=True)
im_visushrink = denoise_wavelet(noisy,
                                channel_axis=-1,
                                convert2ycbcr=True,
                                method='VisuShrink',
                                mode='soft',
def motor_recon_met2(TE_array, path_to_data, path_to_mask, path_to_save_data,
                     TR, reg_method, reg_matrix, denoise, FA_method, FA_smooth,
                     myelin_T2, num_cores):
    # Load Data and Mask
    img = nib.load(path_to_data)
    data = img.get_data()
    data = data.astype(np.float64, copy=False)

    img_mask = nib.load(path_to_mask)
    mask = img_mask.get_data()
    mask = mask.astype(np.int64, copy=False)

    print('--------- Data shape -----------------')
    nx, ny, nz, nt = data.shape
    print(data.shape)
    print('--------------------------------------')

    for c in xrange(nt):
        data[:, :, :, c] = np.squeeze(data[:, :, :, c]) * mask
    #end

    # Only for testing: selects a few slices
    #mask[:,:,40:-1] = 0
    #mask[:,:,0:35]  = 0

    nEchoes = TE_array.shape[0]
    tau = TE_array[1] - TE_array[0]

    fM = np.zeros((nx, ny, nz))
    fIE = fM.copy()
    fnT = fM.copy()
    fCSF = fM.copy()
    T2m = fM.copy()
    T2IE = fM.copy()
    T2nT = fM.copy()
    Ktotal = fM.copy()
    FA = fM.copy()
    FA_index = fM.copy()
    reg_param = fM.copy()
    NITERS = fM.copy()

    # ==============================================================================
    # Inital values for the dictionary

    if reg_method == 'T2SPARC':
        # Regularization matrix for method: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3568216/
        #: Junyu Guo et al. 2014. Multi-slice Myelin Water Imaging for Practical Clinical Applications at 3.0 T.
        Npc = 96
    else:
        Npc = 60
    #end if
    # -----------
    T2m0 = 10.0
    T2mf = myelin_T2
    T2tf = 200.0
    T2csf = 2000.0

    T2s = np.logspace(math.log10(T2m0),
                      math.log10(T2csf),
                      num=Npc,
                      endpoint=True,
                      base=10.0)

    ind_m = T2s <= T2mf  # myelin
    ind_t = (T2s > T2mf) & (T2s <= T2tf)  # intra+extra
    ind_csf = T2s >= T2tf  # quasi free-water and csf

    T1s = 1000.0 * np.ones_like(
        T2s)  # a constant T1=1000 is assumed for all compartments

    # Create multi-dimensional dictionary with multiples flip_angles
    #N_alphas     = 91 # (steps = 1.0 degrees, from 90 to 180)
    if FA_method == 'spline':
        N_alphas = 91 * 3  # (steps = 0.333 degrees, from 90 to 180)
        #N_alphas     = 91*2 # (steps = 0.5 degrees, from 90 to 180)
        #N_alphas     = 91 # (steps = 1.0 degrees, from 90 to 180)
        alpha_values = np.linspace(90.0, 180.0, N_alphas)
        Dic_3D = create_Dic_3D(Npc, T2s, T1s, nEchoes, tau, alpha_values, TR)
        #alpha_values_spline = np.round( np.linspace(90.0, 180.0, 8) )
        alpha_values_spline = np.linspace(90.0, 180.0, 15)
        Dic_3D_LR = create_Dic_3D(Npc, T2s, T1s, nEchoes, tau,
                                  alpha_values_spline, TR)
    #end

    if FA_method == 'brute-force':
        N_alphas = 91  # (steps = 1.0 degrees, from 90 to 180)
        alpha_values = np.linspace(90.0, 180.0, N_alphas)
        Dic_3D = create_Dic_3D(Npc, T2s, T1s, nEchoes, tau, alpha_values, TR)
    #end

    # Define regularization vectors for the L-curve method
    num_l_laplac = 50
    lambda_reg = np.zeros((num_l_laplac))
    # lambda_reg[1:] = np.logspace(math.log10(1e-8), math.log10(100.0), num=num_l_laplac-1, endpoint=True, base=10.0)
    lambda_reg[1:] = np.logspace(math.log10(1e-8),
                                 math.log10(10.0),
                                 num=num_l_laplac - 1,
                                 endpoint=True,
                                 base=10.0)

    # --------------------------------------------------------------------------
    if reg_matrix == 'I':
        order = 0
        Laplac = create_Laplacian_matrix(Npc, order)
    elif reg_matrix == 'L1':
        order = 1
        Laplac = create_Laplacian_matrix(Npc, order)
    elif reg_matrix == 'L2':
        order = 2
        Laplac = create_Laplacian_matrix(Npc, order)
    elif reg_matrix == 'InvT2':
        # Regularization matrix for method: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3568216/
        #: Junyu Guo et al. 2014. Multi-slice Myelin Water Imaging for Practical Clinical Applications at 3.0 T.
        T2s_mod = np.concatenate((np.array([T2s[0] - 1.0]),
                                  T2s[:-1]))  # add 0.0 and remove the last one
        deltaT2 = T2s - T2s_mod
        deltaT2[0] = deltaT2[1]
        Laplac = np.diag(1. / deltaT2)
    else:
        print('Error: Wrong reg_matrix option!')
        sys.exit()
    # end if

    # create 4D images
    f_sol_4D = np.zeros((nx, ny, nz, T2s.shape[0]))
    s_sol_4D = np.zeros((nx, ny, nz, nEchoes))

    data[data < 0.0] = 0.0  # correct artifacts

    number_of_cores = multiprocessing.cpu_count()
    if num_cores == -1:
        num_cores = number_of_cores
        print('Using all CPUs: ', number_of_cores)
    else:
        print('Using ', num_cores, ' CPUs from ', number_of_cores)
    #end if

    #_______________________________________________________________________________
    #_______________________________ ESTIMATION ____________________________________
    #_______________________________________________________________________________

    if denoise == 'TV':
        print 'Step #1: Denoising using Total Variation:'
        for voxelt in progressbar.progressbar(xrange(nt),
                                              redirect_stdout=True):
            print(voxelt + 1, ' volumes processed')
            data_vol = np.squeeze(data[:, :, :, voxelt])
            sigma_est = np.mean(estimate_sigma(data_vol, multichannel=False))
            #data[:,:,:,voxelt] = denoise_tv_chambolle(data_vol, weight=1.0*sigma_est, eps=0.0002, n_iter_max=200, multichannel=False)
            data[:, :, :,
                 voxelt] = denoise_tv_chambolle(data_vol,
                                                weight=2.0 * sigma_est,
                                                eps=0.0002,
                                                n_iter_max=200,
                                                multichannel=False)
        #end for
        outImg = nib.Nifti1Image(data, img.affine)
        nib.save(outImg, path_to_save_data + 'Data_denoised.nii.gz')
    elif denoise == 'NESMA':
        data_den = np.zeros_like(data)
        path_size = [6, 6, 6]  # real-size = 2*path_size + 1
        print 'Step #1: Denoising using the NESMA filter:'
        for voxelx in progressbar.progressbar(xrange(nx),
                                              redirect_stdout=True):
            print(voxelx + 1, ' slices processed')
            min_x = np.max([voxelx - path_size[0], 0])
            max_x = np.min([voxelx + path_size[0], nx])
            for voxely in xrange(ny):
                min_y = np.max([voxely - path_size[1], 0])
                max_y = np.min([voxely + path_size[1], ny])
                for voxelz in xrange(nz):
                    if mask[voxelx, voxely, voxelz] == 1:
                        min_z = np.max([voxelz - path_size[2], 0])
                        max_z = np.min([voxelz + path_size[2], nz])
                        # -----------------------------------------
                        signal_path = data[min_x:max_x, min_y:max_y,
                                           min_z:max_z, :]
                        dim = signal_path.shape
                        signal_path2D = signal_path.reshape(
                            (np.prod(dim[0:3]), nt))
                        signal_xyz = data[voxelx, voxely, voxelz]
                        RE = 100 * np.sum(np.abs(signal_path2D - signal_xyz),
                                          axis=1) / np.sum(signal_xyz)
                        ind_valid = RE < 2.5  # (percent %)
                        data_den[voxelx, voxely,
                                 voxelz] = np.mean(signal_path2D[ind_valid, :],
                                                   axis=0)
                    #end if
                #end vz
            #end vy
        #end vx
        data = data_den.copy()
        del data_den
    #end if

    print 'Step #2: Estimation of flip angles:'
    if FA_smooth == 'yes':
        # Smoothing the data for a robust B1 map estimation
        data_smooth = np.zeros((nx, ny, nz, nt))
        sig_g = 2.0
        for c in xrange(nt):
            data_smooth[:, :, :,
                        c] = filt.gaussian_filter(np.squeeze(data[:, :, :, c]),
                                                  sig_g, 0)
        #end for
    else:
        data_smooth = data.copy()
    #end if

    mean_T2_dist = 0
    for voxelz in progressbar.progressbar(xrange(nz), redirect_stdout=True):
        #print('Estimation of flip angles: slice', voxelz+1)
        print(voxelz + 1, ' slices processed')
        # Parallelization by rows: this is more efficient for computing a single or a few slices
        mask_slice = mask[:, :, voxelz]
        data_slice = data_smooth[:, :, voxelz, :]
        #FA_par = Parallel(n_jobs=num_cores)(delayed(fitting_slice_FA)(mask_slice[:, voxely], data_slice[:,voxely,:], nx, Dic_3D, alpha_values) for voxely in tqdm(range(ny)))
        if FA_method == 'brute-force':
            FA_par = Parallel(n_jobs=num_cores, backend='multiprocessing')(
                delayed(fitting_slice_FA_brute_force)
                (mask_slice[:, voxely], data_slice[:, voxely, :], nx, Dic_3D,
                 alpha_values) for voxely in range(ny))
            for voxely in xrange(ny):
                FA[:, voxely, voxelz] = FA_par[voxely][0]
                FA_index[:, voxely, voxelz] = FA_par[voxely][1]
                Ktotal[:, voxely, voxelz] = FA_par[voxely][2]
                mean_T2_dist = mean_T2_dist + FA_par[voxely][3]
            #end for voxely
        elif FA_method == 'spline':
            FA_par = Parallel(n_jobs=num_cores, backend='multiprocessing')(
                delayed(fitting_slice_FA_spline_method)
                (Dic_3D_LR, Dic_3D, data_slice[:, voxely, :],
                 mask_slice[:, voxely], alpha_values_spline, nx, alpha_values)
                for voxely in range(ny))
            for voxely in xrange(ny):
                FA[:, voxely, voxelz] = FA_par[voxely][0]
                FA_index[:, voxely, voxelz] = FA_par[voxely][1]
                Ktotal[:, voxely, voxelz] = FA_par[voxely][2]
                mean_T2_dist = mean_T2_dist + FA_par[voxely][3]
            #end voxely
        #end if
    #end voxelx
    del data_smooth
    # TO DO: (1) Estimate also the standard deviation of the spectrum and plot it
    #        (2) Estimate a different mean spectrum for each tissue type (using a segmentation from a T1, or any strategy to segment the raw MET2 data)
    mean_T2_dist = mean_T2_dist / np.sum(mean_T2_dist)

    total_signal = 0
    total_Kernel = 0
    nv = 0
    for voxelx in xrange(nx):
        for voxely in xrange(ny):
            for voxelz in xrange(nz):
                if mask[voxelx, voxely, voxelz] == 1:
                    total_signal = total_signal + data[voxelx, voxely,
                                                       voxelz, :]
                    ind_xyz = np.int(FA_index[voxelx, voxely, voxelz])
                    total_Kernel = total_Kernel + Dic_3D[:, :, ind_xyz]
                    nv = nv + 1.0
            #end vz
        #end vy
    #end vx
    total_Kernel = total_Kernel / nv
    total_signal = total_signal / nv

    fmean1, SSE = nnls(total_Kernel, total_signal)
    dist_T2_mean1 = fmean1 / np.sum(fmean1)

    factor = 1.01  # smaller than 1.02 due to the low level of noise
    order = 0
    Id = create_Laplacian_matrix(Npc, order)
    fmean2, reg_opt2, k_est = nnls_x2(total_Kernel, total_signal, Id, factor)
    dist_T2_mean2 = fmean2 / np.sum(fmean2)

    # Save mean_T2_dist, which is the initial value for RUMBA
    fig = plt.figure('Showing results', figsize=(8, 8))
    ax0 = fig.add_subplot(1, 1, 1)
    im0 = plt.plot(T2s,
                   mean_T2_dist,
                   color='b',
                   label='Mean T2-dist from all voxels: NNLS')
    im1 = plt.plot(T2s,
                   dist_T2_mean1,
                   color='g',
                   label='T2-dist from mean signals: NNLS')
    im2 = plt.plot(T2s,
                   dist_T2_mean2,
                   color='r',
                   label='T2-dist from mean signals: NNLS-X2-I')

    ax0.set_xscale('log')
    plt.axvline(x=40.0, color='k', linestyle='--', ymin=0)
    plt.title('Mean spectrum', fontsize=18)
    plt.xlabel('T2', fontsize=18)
    plt.ylabel('Intesity', fontsize=18)
    ax0.set_xlim(T2s[0], T2s[-1])
    ax0.set_ylim(0, np.max(mean_T2_dist) * 1.2)
    ax0.tick_params(axis='both', which='major', labelsize=16)
    ax0.tick_params(axis='both', which='minor', labelsize=14)
    ax0.set_yticks([])
    plt.legend()
    plt.savefig(path_to_save_data + 'Mean_spectrum_unitial_iter.png', dpi=600)
    plt.close('all')
    # --------------------------------------------------------------------------

    print 'Step #3: Estimation of T2 spectra:'
    for voxelz in progressbar.progressbar(xrange(nz), redirect_stdout=True):
        print(voxelz + 1, ' slices processed')
        # Parallelization by rows: this is more efficient for computing a single or a few slices
        mask_slice = mask[:, :, voxelz]
        data_slice = data[:, :, voxelz, :]
        FA_index_slice = FA_index[:, :, voxelz]
        #T2_par = Parallel(n_jobs=num_cores, backend='multiprocessing')(delayed(fitting_slice_T2)(mask_slice[:, voxely], data_slice[:,voxely,:], FA_index_slice[:, voxely], nx, Dic_3D, lambda_reg, alpha_values, T2s.shape[0], nEchoes, num_l_laplac, N_alphas, reg_method, Laplac1, Laplac2, Is, Laplac_mod, mean_T2_dist, Laplac2_cp_var, W_inv_deltaT2) for voxely in range(ny))
        T2_par = Parallel(n_jobs=num_cores, backend='multiprocessing')(
            delayed(fitting_slice_T2)
            (mask_slice[:, voxely], data_slice[:, voxely, :],
             FA_index_slice[:, voxely], nx, Dic_3D, lambda_reg, T2s.shape[0],
             nEchoes, reg_method, Laplac, mean_T2_dist)
            for voxely in range(ny))
        for voxely in xrange(ny):
            f_sol_4D[:, voxely, voxelz, :] = T2_par[voxely][0]
            s_sol_4D[:, voxely, voxelz, :] = T2_par[voxely][1]
            reg_param[:, voxely, voxelz] = T2_par[voxely][2]
        #end voxely
    #end voxelx

    print('Step #4: Estimation of quantitative metrics')
    logT2 = np.log(T2s)
    for voxelx in xrange(nx):
        for voxely in xrange(ny):
            for voxelz in xrange(nz):
                if mask[voxelx, voxely, voxelz] > 0.0:
                    M = data[voxelx, voxely, voxelz, :]
                    x_sol = f_sol_4D[voxelx, voxely, voxelz, :]
                    vt = np.sum(x_sol) + epsilon
                    x_sol = x_sol / vt
                    # fill matrices
                    # pdb.set_trace()
                    fM[voxelx, voxely, voxelz] = np.sum(x_sol[ind_m])
                    fIE[voxelx, voxely, voxelz] = np.sum(x_sol[ind_t])
                    fCSF[voxelx, voxely, voxelz] = np.sum(x_sol[ind_csf])
                    # ------ T2m
                    # Aritmetic mean
                    # T2m [voxelx, voxely, voxelz] = np.sum(x_sol[ind_m] * T2s[ind_m])/(np.sum(x_sol[ind_m])   + epsilon)
                    # Geometric mean: see Bjarnason TA. Proof that gmT2 is the reciprocal of gmR2. Concepts Magn Reson 2011; 38A: 128– 131.
                    T2m[voxelx, voxely, voxelz] = np.exp(
                        np.sum(x_sol[ind_m] * logT2[ind_m]) /
                        (np.sum(x_sol[ind_m]) + epsilon))
                    # ------ T2IE0
                    # Aritmetic mean
                    # T2IE[voxelx, voxely, voxelz] = np.sum(x_sol[ind_t] * T2s[ind_t])/(np.sum(x_sol[ind_t])   + epsilon)
                    # Geometric mean: see Bjarnason TA. Proof that gmT2 is the reciprocal of gmR2. Concepts Magn Reson 2011; 38A: 128– 131.
                    T2IE[voxelx, voxely, voxelz] = np.exp(
                        np.sum(x_sol[ind_t] * logT2[ind_t]) /
                        (np.sum(x_sol[ind_t]) + epsilon))
                    Ktotal[voxelx, voxely, voxelz] = vt
                # end if
            #end for z
        # end for y
    # end for x

    # -------------------------- Save all datasets -----------------------------
    outImg = nib.Nifti1Image(fM, img.affine)
    nib.save(outImg, path_to_save_data + 'MWF.nii.gz')

    outImg = nib.Nifti1Image(fIE, img.affine)
    nib.save(outImg, path_to_save_data + 'IEWF.nii.gz')

    outImg = nib.Nifti1Image(fCSF, img.affine)
    nib.save(outImg, path_to_save_data + 'FWF.nii.gz')

    outImg = nib.Nifti1Image(T2m, img.affine)
    nib.save(outImg, path_to_save_data + 'T2_M.nii.gz')

    outImg = nib.Nifti1Image(T2IE, img.affine)
    nib.save(outImg, path_to_save_data + 'T2_IE.nii.gz')

    outImg = nib.Nifti1Image(Ktotal, img.affine)
    nib.save(outImg, path_to_save_data + 'TWC.nii.gz')

    outImg = nib.Nifti1Image(FA, img.affine)
    nib.save(outImg, path_to_save_data + 'FA.nii.gz')

    outImg = nib.Nifti1Image(f_sol_4D, img.affine)
    nib.save(outImg, path_to_save_data + 'fsol_4D.nii.gz')

    outImg = nib.Nifti1Image(s_sol_4D, img.affine)
    nib.save(outImg, path_to_save_data + 'Est_Signal.nii.gz')

    outImg = nib.Nifti1Image(reg_param, img.affine)
    nib.save(outImg, path_to_save_data + 'reg_param.nii.gz')
    #end if
    print('Done!')
Exemplo n.º 9
0
def image_noise(path = None, image = None):
    #converts to an opencv image object
    if image == None:
        image = cv2.imread(path)
    return estimate_sigma(image, multichannel=True, average_sigmas=True)
Exemplo n.º 10
0
 def estimate_noise_sd(self):
     if self._noise_sd < 0.0:
         self._noise_sd = estimate_sigma(self._image)
     return self._noise_sd
Exemplo n.º 11
0
img = cv2.imread('..//images//tes.jpg', 0)

blocks = []
variances = []
imgwidth, imgheight = img.shape[0:2]
blockSize = 64
print(img.shape)
#break up image into NxN blocks, N = blockSize
for j in range(0, imgheight, blockSize):
    for i in range(0, imgwidth, blockSize):
        a = img
        if (i + blockSize > imgwidth and j + blockSize > imgheight):
            a = img[i:, j:]
        elif (i + blockSize > imgwidth):
            a = img[i:, j:j + blockSize]
        elif (j + blockSize > imgheight):
            a = img[i:i + blockSize, j:]
        else:
            a = img[i:i + blockSize, j:j + blockSize]

        sigma = estimate_sigma(a, multichannel=False, average_sigmas=True)
        blocks.append(a)
        variances.append([sigma])

print(variances)
kmeans = KMeans(n_clusters=2, random_state=0).fit(variances)
center1, center2 = kmeans.cluster_centers_
sigma = estimate_sigma(img, multichannel=False, average_sigmas=True)
print(sigma)
print(center1, center2)