Esempio n. 1
0
    def __init__(self, is_stationary, sqrt_beta, order_number, n_samples, pcn,
                 init_sample_sym):
        self.is_stationary = is_stationary
        self.sqrt_beta = sqrt_beta
        self.order_number = order_number
        self.n_samples = n_samples
        self.pcn = pcn

        zero_compl_dummy = cp.zeros(self.pcn.fourier.basis_number_2D_ravel,
                                    dtype=cp.complex64)
        ones_compl_dummy = cp.ones(self.pcn.fourier.basis_number_2D_ravel,
                                   dtype=cp.complex64)

        self.stdev = ones_compl_dummy
        self.stdev_sym = util.symmetrize(self.stdev)
        self.samples_history = np.empty(
            (self.n_samples, self.pcn.fourier.basis_number_2D_ravel),
            dtype=np.complex64)

        self.LMat = Lmatrix_2D(self.pcn.fourier, self.sqrt_beta)
        self.current_noise_sample = self.pcn.random_gen.construct_w(
        )  #noise sample always symmetric
        self.new_noise_sample = self.current_noise_sample.copy()

        if self.is_stationary:
            self.new_sample_sym = init_sample_sym
            self.new_sample = init_sample_sym[self.pcn.fourier.
                                              basis_number_2D_ravel - 1:]
            self.current_sample = self.new_sample.copy()
            self.current_sample_sym = self.new_sample_sym.copy()

            self.new_sample_scaled_norm = 0
            self.new_log_L_det = 0
            #numba need this initialization. otherwise it will not compile
            self.current_sample_scaled_norm = 0
            self.current_log_L_det = 0

        else:
            init_sample = init_sample_sym[self.pcn.fourier.
                                          basis_number_2D_ravel - 1:]
            self.LMat.construct_from(init_sample)
            self.LMat.set_current_L_to_latest()
            self.new_sample_sym = cp.linalg.solve(
                self.LMat.current_L, self.pcn.random_gen.construct_w())
            self.new_sample = self.new_sample_sym[self.pcn.fourier.
                                                  basis_number_2D_ravel - 1:]
            self.new_sample_scaled_norm = util.norm2(
                self.LMat.current_L @ self.new_sample_sym)  #ToDO: Modify this
            self.new_log_L_det = self.LMat.logDet(True)  #ToDO: Modify this
            # #numba need this initialization. otherwise it will not compile
            self.current_sample = init_sample.copy()
            self.current_sample_sym = self.new_sample_sym.copy()
            self.current_sample_scaled_norm = self.new_sample_scaled_norm
            self.current_log_L_det = self.new_log_L_det

        # self.update_current_sample()
        self.i_record = 0
Esempio n. 2
0
    def preinit(self,is_stationary,sqrt_beta,order_number,n_samples,pcn,init_sample_sym):
        self.is_stationary = is_stationary
        self.sqrt_beta = sqrt_beta
        self.order_number = order_number
        self.n_samples = n_samples
        self.pcn = pcn

        # zero_compl_dummy =  cp.zeros(self.pcn.fourier.basis_number_2D_ravel,dtype=cp.complex64)
        ones_compl_dummy =  cp.ones(self.pcn.fourier.basis_number_2D_ravel,dtype=cp.complex64)

        self.stdev = ones_compl_dummy
        self.stdev_sym = util.symmetrize(self.stdev)
        self.samples_history = np.empty((self.n_samples, self.pcn.fourier.basis_number_2D_ravel), dtype=np.complex64)
        self.current_noise_sample = self.pcn.random_gen.construct_w()#noise sample always symmetric
        self.new_noise_sample = self.current_noise_sample.copy()
        self.i_record = 0
Esempio n. 3
0
def initialize_from_folder(target_folder, sim, sequence_no):
    #TODO:HARD CODED relative path BADDD
    relative_path = pathlib.Path("/scratch/work/emzirm1/SimulationResult")
    init_folder = relative_path / target_folder
    file_name = 'result_{}.hdf5'.format(sequence_no)
    init_file_path = init_folder / file_name
    if not init_file_path.exists():
        initialize_using_FBP(sim)
    else:

        with h5py.File(init_file_path, mode='r') as file:
            n_layers = file['n_layers'][()]
            sim.pcn.beta = file['pcn/beta'][()]
            for i in range(n_layers):
                samples_history = file['Layers {}/samples_history'.format(i)][(
                )]
                init_Sym = util.symmetrize(cp.asarray(samples_history[-1]))
                del samples_history
                sim.Layers[i].current_sample_sym = init_Sym
                sim.Layers[i].current_sample = sim.Layers[
                    i].current_sample_sym[sim.fourier.basis_number_2D_ravel -
                                          1:]
                sim.Layers[i].record_sample()
Esempio n. 4
0
def _process_data(samples_history,u_samples_history,n,n_ext,t_start,t_end,target_image,corrupted_image,burn_percentage,isSinogram,sinogram,theta,fbp,SimulationResult_dir,result_file,cmap = plt.cm.seismic_r):
    burn_start_index = np.int(0.01*burn_percentage*u_samples_history.shape[0])
    
    #initial conditions
    samples_init = samples_history[0,:]

    #change
    u_samples_history = u_samples_history[burn_start_index:,:]
    samples_history = samples_history[burn_start_index:,:]
    N = u_samples_history.shape[0]
    
    #initial condition
    vF_init = util.symmetrize(cp.asarray(samples_init)).reshape(2*n-1,2*n-1,order=imc.ORDER)
    # vF_init = vF_init.conj()
    

    vF_mean = util.symmetrize(cp.asarray(np.mean(samples_history,axis=0)))
    vF_stdev = util.symmetrize(cp.asarray(np.std(samples_history,axis=0)))
    vF_abs_stdev = util.symmetrize(cp.asarray(np.std(np.abs(samples_history),axis=0)))
   
    
    fourier = imc.FourierAnalysis_2D(n,n_ext,t_start,t_end)
    sL2 = util.sigmasLancosTwo(cp.int(n))
    
    # if isSinogram:
    #     vF_init = util.symmetrize_2D(fourier.rfft2(cp.asarray(fbp,dtype=cp.float32)))
    
    
#    if not isSinogram:
    vForiginal = util.symmetrize_2D(fourier.rfft2(cp.array(target_image)))
    reconstructed_image_original = fourier.irfft2(vForiginal[:,n-1:])
    reconstructed_image_init = fourier.irfft2(vF_init[:,n-1:])
    
    samples_history_cp = cp.asarray(samples_history)
    v_image_count=0
    v_image_M = cp.zeros_like(reconstructed_image_original)
    v_image_M2 = cp.zeros_like(reconstructed_image_original)
    v_image_aggregate = (v_image_count,v_image_M,v_image_M2)
    for i in range(N):
        vF = util.symmetrize(samples_history_cp[i,:]).reshape(2*n-1,2*n-1,order=imc.ORDER)
        v_temp = fourier.irfft2(vF[:,n-1:])
        v_image_aggregate = util.updateWelford(v_image_aggregate,v_temp)
        
    
    
    v_image_mean,v_image_var,v_image_s_var = util.finalizeWelford(v_image_aggregate)
    
    #TODO: This is sign of wrong processing, Remove this
    # if isSinogram:
    #     reconstructed_image_init = cp.fliplr(reconstructed_image_init)
    #     v_image_mean = cp.fliplr(v_image_mean)
    #     v_image_s_var = cp.fliplr(v_image_s_var)
    
    mask = cp.zeros_like(reconstructed_image_original)
    r = (mask.shape[0]+1)//2
    for i in range(mask.shape[0]):
        for j in range(mask.shape[1]):
            x = 2*(i - r)/mask.shape[0]
            y = 2*(j - r)/mask.shape[1]
            if (x**2+y**2 < 1):
                mask[i,j]=1.
    
    u_samples_history_cp = cp.asarray(u_samples_history)
    u_image = cp.zeros_like(v_image_mean)
    # ell_image = cp.zeros_like(v_image_mean)
    
    u_image_count=0
    u_image_M = cp.zeros_like(u_image)
    u_image_M2 = cp.zeros_like(u_image)
    u_image_aggregate = (u_image_count,u_image_M,u_image_M2)
    ell_image_count=0
    ell_image_M = cp.zeros_like(u_image)
    ell_image_M2 = cp.zeros_like(u_image)
    ell_image_aggregate = (ell_image_count,ell_image_M,ell_image_M2)
    for i in range(N):
        uF = util.symmetrize(u_samples_history_cp[i,:]).reshape(2*n-1,2*n-1,order=imc.ORDER)
        u_temp = fourier.irfft2(uF[:,n-1:])
        u_image_aggregate = util.updateWelford(u_image_aggregate,u_temp)
        ell_temp = cp.exp(u_temp)
        ell_image_aggregate = util.updateWelford(ell_image_aggregate, ell_temp)
    u_image_mean,u_image_var,u_image_s_var = util.finalizeWelford(u_image_aggregate)
    ell_image_mean,ell_image_var,ell_image_s_var = util.finalizeWelford(ell_image_aggregate)

    
    # if isSinogram:
        # u_image_mean = cp.flipud(u_image_mean) #cp.rot90(cp.fft.fftshift(u_image),1) 
        # u_image_var = cp.flipud(u_image_var) #cp.rot90(cp.fft.fftshift(u_image),1) 
        # ell_image_mean = cp.flipud(ell_image_mean)# cp.rot90(cp.fft.fftshift(ell_image),1) 
        # ell_image_var = cp.flipud(ell_image_var)# cp.rot90(cp.fft.fftshift(ell_image),1) 
        
    ri_fourier = cp.asnumpy(reconstructed_image_original)
    
    if isSinogram:
        ri_compare = fbp
    else:
        ri_compare = ri_fourier
   
    is_masked=True
    if is_masked:
        reconstructed_image_var = mask*v_image_s_var
        reconstructed_image_mean = mask*v_image_mean
        reconstructed_image_init = mask*reconstructed_image_init
        u_image_mean = mask*u_image_mean #cp.rot90(cp.fft.fftshift(u_image),1) 
        u_image_s_var = mask*u_image_s_var #cp.rot90(cp.fft.fftshift(u_image),1) 
        ell_image_mean = mask*ell_image_mean# cp.rot90(cp.fft.fftshift(ell_image),1) 
        ell_image_s_var = mask*ell_image_s_var# cp.rot90(cp.fft.fftshift(ell_image),1) 
    else:
        reconstructed_image_mean = v_image_mean        
    
    
    ri_init = cp.asnumpy(reconstructed_image_init)
    
    # ri_fourier = fourier.irfft2((sL2.astype(cp.float32)*vForiginal)[:,n-1:])
    vForiginal_n = cp.asnumpy(vForiginal)
    vF_init_n = cp.asnumpy(vF_init)
    ri_fourier_n = cp.asnumpy(ri_fourier)
    vF_mean_n = cp.asnumpy(vF_mean.reshape(2*n-1,2*n-1,order=imc.ORDER))
    vF_stdev_n = cp.asnumpy(vF_stdev.reshape(2*n-1,2*n-1,order=imc.ORDER))
    vF_abs_stdev_n = cp.asnumpy(vF_abs_stdev.reshape(2*n-1,2*n-1,order=imc.ORDER))
    ri_mean_n = cp.asnumpy(reconstructed_image_mean)
    ri_var_n = cp.asnumpy(reconstructed_image_var)
    ri_std_n = np.sqrt(ri_var_n)

#    ri_n_scalled = ri_n*cp.asnumpy(scalling_factor)
    u_mean_n = cp.asnumpy(u_image_mean)
    u_var_n = cp.asnumpy(u_image_s_var)
    ell_mean_n = cp.asnumpy(ell_image_mean)
    ell_var_n = cp.asnumpy(ell_image_s_var)
    
    
    #Plotting one by one
    #initial condition
    fig = plt.figure()
    plt.subplot(1,2,1)
    im = plt.imshow(np.absolute(vF_init_n),cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Fourier - real part')
    plt.subplot(1,2,2)
    im = plt.imshow(np.angle(vF_init_n),cmap=cmap,vmin=-np.pi,vmax=np.pi)
    fig.colorbar(im)
    plt.title('Fourier - imaginary part')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'vF_init')+image_extension, bbox_inches='tight')
    plt.close()

    #vF Original 
    fig = plt.figure()
    plt.subplot(1,2,1)
    im = plt.imshow(np.absolute(vForiginal_n),cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Fourier - absolute')
    plt.subplot(1,2,2)
    im = plt.imshow(np.angle(vForiginal_n),cmap=cmap,vmin=-np.pi,vmax=np.pi)
    fig.colorbar(im)
    plt.title('Fourier - angle')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'vForiginal')+image_extension, bbox_inches='tight')
    plt.close()

    #vF Original 
    fig = plt.figure()
    plt.subplot(1,2,1)
    im = plt.imshow(np.absolute(vF_mean_n),cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Fourier - absolute')
    plt.subplot(1,2,2)
    im = plt.imshow(np.angle(vF_mean_n),cmap=cmap,vmin=-np.pi,vmax=np.pi)
    fig.colorbar(im)
    plt.title('Fourier - phase')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'vF_mean')+image_extension, bbox_inches='tight')
    plt.close()

    #Absolute error of vF - vForiginal
    fig = plt.figure()
    im = plt.imshow(np.abs(vF_mean_n-vForiginal_n),cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Fourier abs Error')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'abs_err_vF_mean')+image_extension, bbox_inches='tight')
    plt.close()

    #Absolute error of vF_init - vForiginal
    fig = plt.figure()
    im = plt.imshow(np.abs(vF_init_n-vForiginal_n),cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Fourier abs Error')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'abs_err_vF_init')+image_extension, bbox_inches='tight')
    plt.close()

    #Absolute error of vF_init - vForiginal
    fig = plt.figure()
    im = plt.imshow(np.abs(vF_init_n-vF_mean_n),cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Fourier abs Error')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'abs_err_vF_init_vF_mean')+image_extension, bbox_inches='tight')
    plt.close()

    fig = plt.figure()
    im = plt.imshow(ri_mean_n,cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Reconstructed Image mean')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'ri_mean_n')+image_extension, bbox_inches='tight')
    plt.close()

    fig = plt.figure()
    im = plt.imshow(ri_fourier,cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Reconstructed Image through Fourier')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'ri_or_n')+image_extension, bbox_inches='tight')
    plt.close()

    fig = plt.figure()
    im = plt.imshow(ri_init,cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Reconstructed Image through Fourier')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'ri_init')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow(ri_var_n,cmap=cmap)
    fig.colorbar(im)
    plt.title('Reconstructed Image variance')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'ri_var_n')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow(target_image,cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Target Image')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'target_image')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow(ri_compare,cmap=cmap,vmin=-1,vmax=1)
    if isSinogram:        
        plt.title('Filtered Back Projection -FBP')
    else:
        plt.title('Reconstructed Image From vFOriginal')
    fig.colorbar(im)
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'ri_compare')+image_extension, bbox_inches='tight')
    plt.close()

    fig = plt.figure()
    im = plt.imshow((target_image-ri_mean_n),cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Error SPDE')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'err_RI_TI')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow((target_image-ri_compare),cmap=cmap)#,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Error SPDE')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'err_RIO_TI')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow((ri_compare-target_image),cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Error FPB')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'err_RI_CMP')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow(u_mean_n,cmap=cmap)
    fig.colorbar(im)
    plt.title('Mean $u$')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'u_mean_n')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow(u_var_n,cmap=cmap)
    plt.title('Var $u$')
    fig.colorbar(im)
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'u_var_n')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow(ell_mean_n,cmap=cmap)
    fig.colorbar(im)
    plt.title('Mean $\ell$')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'ell_mean_n')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow(ell_var_n,cmap=cmap)
    fig.colorbar(im)
    plt.title('Var $\ell$')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'ell_var_n')+image_extension, bbox_inches='tight')
    plt.close()
    
    
    fig = plt.figure()
    if isSinogram:
        im = plt.imshow(sinogram,cmap=cmap)
        plt.title('Sinogram')
    else:
        im = plt.imshow(corrupted_image,cmap=cmap)
        plt.title('corrupted_image --- CI')
    fig.colorbar(im)
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'measurement')+image_extension, bbox_inches='tight')
    plt.close()

    #plot several slices
    N_slices = 16
    t_index = np.arange(target_image.shape[1])
    for i in range(N_slices):
        fig = plt.figure()
        slice_index = target_image.shape[0]*i//N_slices
        plt.plot(t_index,target_image[slice_index,:],'-k',linewidth=0.5,markersize=1)
        plt.plot(t_index,ri_fourier_n[slice_index,:],'-r',linewidth=0.5,markersize=1)
        plt.plot(t_index,ri_mean_n[slice_index,:],'-b',linewidth=0.5,markersize=1)
        
        plt.fill_between(t_index,ri_mean_n[slice_index,:]-2*ri_std_n[slice_index,:],
                        ri_mean_n[slice_index,:]+2*ri_std_n[slice_index,:], 
                        color='b', alpha=0.1)
        plt.plot(t_index,ri_compare[slice_index,:],':k',linewidth=0.5,markersize=1)
        plt.savefig(str(SimulationResult_dir/'1D_Slice_{}'.format(slice_index-(target_image.shape[0]//2)))+image_extension, bbox_inches='tight')
        plt.close()

    
    f_index = np.arange(n)
    for i in range(N_slices):
        fig = plt.figure()
        slice_index = vForiginal_n.shape[0]*i//N_slices
        plt.plot(f_index,np.abs(vForiginal_n[slice_index,n-1:]),'-r',linewidth=0.5,markersize=1)
        plt.plot(f_index,np.abs(vF_init_n[slice_index,n-1:]),':k',linewidth=0.5,markersize=1)
        plt.plot(f_index,np.abs(vF_mean_n[slice_index,n-1:]),'-b',linewidth=0.5,markersize=1)
        
        plt.fill_between(f_index,np.abs(vF_mean_n[slice_index,n-1:])-2*vF_abs_stdev_n[slice_index,n-1:],
                        np.abs(vF_mean_n[slice_index,n-1:])+2*vF_abs_stdev_n[slice_index,n-1:], 
                        color='b', alpha=0.1)
        plt.savefig(str(SimulationResult_dir/'1D_F_Slice_{}'.format(slice_index-n))+image_extension, bbox_inches='tight')
        plt.close()
#    fig.colorbar(im, ax=ax[:,:], shrink=0.8)
#    fig.savefig(str(SimulationResult_dir/'Result')+image_extension, bbox_inches='tight')
#    for ax_i in ax.flatten():
#        extent = ax_i.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
#    #    print(ax_i.title.get_text())
#        fig.savefig(str(SimulationResult_dir/ax_i.title.get_text())+''+image_extension, bbox_inches=extent.expanded(1.2, 1.2))
#    
#    fig = plt.figure()
#    plt.hist(u_samples_history[:,0],bins=50,density=1)
    error = (target_image-ri_mean_n)
    error_CMP = (target_image-ri_compare)
    
    L2_error = np.linalg.norm(error)
    MSE = np.sum(error*error)/error.size
    PSNR = 10*np.log10(np.max(ri_mean_n)**2/MSE)
    SNR = np.mean(ri_mean_n)/np.sqrt(MSE*(error.size/(error.size-1)))
    
    L2_error_CMP = np.linalg.norm(error_CMP)
    MSE_CMP = np.sum(error_CMP*error_CMP)/error_CMP.size
    PSNR_CMP = 10*np.log10(np.max(ri_compare)**2/MSE_CMP)
    SNR_CMP = np.mean(ri_compare)/np.sqrt(MSE_CMP*(error_CMP.size/(error_CMP.size-1)))
    metric = {'L2_error':L2_error,
               'MSE':MSE,
               'PSNR':PSNR,
               'SNR':SNR,
                'L2_error_CMP':L2_error_CMP,
                'MSE_CMP':MSE_CMP,
                'PSNR_CMP':PSNR_CMP,
                'SNR_CMP':SNR_CMP}
    with h5py.File(result_file,mode='a') as file:
        for key,value in metric.items():
            if key in file.keys():
                del file[key]
            # else:
            file.create_dataset(key,data=value)
        
    print('Shallow-SPDE : L2-error {}, MSE {}, SNR {}, PSNR {},'.format(L2_error,MSE,SNR,PSNR))
    print('FBP : L2-error {}, MSE {}, SNR {}, PSNR {}'.format(L2_error_CMP,MSE_CMP,SNR_CMP,PSNR_CMP))
Esempio n. 5
0
def post_analysis(input_dir):
    relative_path = pathlib.Path(
        "//data.triton.aalto.fi/work/emzirm1/SimulationResult/")
    SimulationResult_dir = relative_path / input_dir
    file = h5py.File(str(SimulationResult_dir / 'result.hdf5'), mode='r')

    samples_history = file['Layers 1/samples_history'][()]
    u_samples_history = file['Layers 0/samples_history'][()]
    #    meas_std = file['measurement/stdev'][()]
    burn_start_index = np.int(0.3 * u_samples_history.shape[0])
    u_samples_history = u_samples_history[burn_start_index:, :]
    samples_history = samples_history[burn_start_index:, :]
    N = u_samples_history.shape[0]

    mean_field = util.symmetrize(cp.asarray(np.mean(samples_history, axis=0)))
    #    u_mean_field = util.symmetrize(cp.asarray(np.mean(u_samples_history,axis=0)))
    #    stdev_field = util.symmetrize(cp.asarray(np.std(samples_history,axis=0)))
    n = file['fourier/basis_number'][()]
    n_ext = file['fourier/extended_basis_number'][()]
    t_start = file['t_start'][()]
    t_end = file['t_end'][()]
    target_image = file['measurement/target_image'][()]
    corrupted_image = file['measurement/corrupted_image'][()]

    isSinogram = 'sinogram' in file['measurement'].keys()

    if isSinogram:
        sinogram = file['measurement/sinogram'][()]
        theta = file['measurement/theta'][()]
        fbp = iradon(sinogram, theta, circle=True)

    fourier = imc.FourierAnalysis_2D(n, n_ext, t_start, t_end)
    sL2 = util.sigmasLancosTwo(cp.int(n))

    vF = mean_field.reshape(2 * n - 1, 2 * n - 1, order=imc.ORDER).T

    #    if not isSinogram:
    vForiginal = sL2 * util.symmetrize_2D(
        fourier.fourierTransformHalf(cp.array(target_image)))

    vFn = cp.asnumpy(vF)

    reconstructed_image = fourier.inverseFourierLimited(vF[:, n - 1:])

    if isSinogram:
        reconstructed_image = cp.rot90(cp.fft.fftshift(reconstructed_image),
                                       -1)

    reconstructed_image_original = fourier.inverseFourierLimited(
        vForiginal[:, n - 1:])
    scalling_factor = (cp.max(reconstructed_image_original) -
                       cp.min(reconstructed_image_original)) / (
                           cp.max(reconstructed_image) -
                           cp.min(reconstructed_image))

    u_samples_history_cp = cp.asarray(u_samples_history)
    u_image = cp.zeros_like(reconstructed_image)
    for i in range(N):
        uF = util.symmetrize(u_samples_history_cp[i, :]).reshape(
            2 * n - 1, 2 * n - 1, order=imc.ORDER).T
        u_image += fourier.inverseFourierLimited(uF[:, n - 1:]) / N

    if isSinogram:
        u_image = cp.rot90(cp.fft.fftshift(u_image), -1)

    ri_n = cp.asnumpy(reconstructed_image)
    if isSinogram:
        ri_or_n = fbp
    else:
        ri_or_n = cp.asnumpy(reconstructed_image_original)

    ri_n_scalled = ri_n * cp.asnumpy(scalling_factor)
    u_n = cp.asnumpy(u_image / (np.max(ri_n) - np.min(ri_n)))
    ell_n = np.exp(u_n)
    fig, ax = plt.subplots(ncols=3, nrows=3, figsize=(15, 15))
    ax[0, 0].imshow(ri_n_scalled, cmap=plt.cm.Greys_r)
    ax[0, 0].set_title('Reconstructed Image From vF---  RI')
    ax[0, 1].imshow(target_image, cmap=plt.cm.Greys_r)
    ax[0, 1].set_title('Target Image ---  TI')
    if isSinogram:
        ax[0, 2].imshow(fbp, cmap=plt.cm.Greys_r)
        ax[0, 2].set_title('FBP --- RIO')
    else:
        ax[0, 2].imshow(ri_or_n, cmap=plt.cm.Greys_r)
        ax[0, 2].set_title('Reconstructed Image From vFOriginal --- RIO')

    ax[1, 0].imshow(np.abs(target_image - ri_n_scalled), cmap=plt.cm.Greys_r)
    ax[1, 0].set_title('Absolute error--- RI-TI')
    ax[1, 1].imshow(np.abs(ri_or_n - target_image), cmap=plt.cm.Greys_r)
    ax[1, 1].set_title('Absolute error--- RIO-TI')
    ax[1, 2].imshow(np.abs(ri_n_scalled - ri_or_n), cmap=plt.cm.Greys_r)
    ax[1, 2].set_title('Absolute error--- RI-RIO')
    ax[2, 0].imshow(u_n, cmap=plt.cm.Greys_r)
    ax[2, 0].set_title('Field u--- u')
    ax[2, 1].imshow(ell_n, cmap=plt.cm.Greys_r)
    ax[2, 1].set_title('Length Scale of v--- ell')

    if isSinogram:
        im = ax[2, 2].imshow(sinogram, cmap=plt.cm.Greys_r)
        ax[2, 2].set_title('Measurement (Sinogram) --- CI')
    else:
        im = ax[2, 2].imshow(corrupted_image, cmap=plt.cm.Greys_r)
        ax[2, 2].set_title('Measurement (corrupted_image) --- CI')

    fig.colorbar(im, ax=ax[:, :], shrink=0.8)
    fig.savefig(str(SimulationResult_dir / 'Result.pdf'), bbox_inches='tight')
    for ax_i in ax.flatten():
        extent = ax_i.get_window_extent().transformed(
            fig.dpi_scale_trans.inverted())
        #    print(ax_i.title.get_text())
        fig.savefig(str(SimulationResult_dir / ax_i.title.get_text()) + '.pdf',
                    bbox_inches=extent.expanded(1.2, 1.2))

    return file
Esempio n. 6
0
        initialize_using_FBP(sim)
    else:
        #TODO:HARD CODED relative path BADDD

        relative_path = pathlib.Path("/scratch/work/emzirm1/SimulationResult")
        # relative_path = pathlib.Path("//data.triton.aalto.fi/work/emzirm1/SimulationResult")
        init_folder = relative_path / args.init_folder
        init_file = init_folder / 'result.hdf5'
        if not init_file.exists():
            initialize_using_FBP(sim)
        else:

            with h5py.File(init_file, mode='r') as file:
                #take the latest sample from the folder
                samples_history = file['Layers 1/samples_history'][()]
                init_Sym = util.symmetrize(cp.asarray(samples_history[-1]))
                del samples_history
                u_samples_history = file['Layers 0/samples_history'][()]
                u_init_Sym = util.symmetrize(cp.asarray(u_samples_history[-1]))
                del u_samples_history

            sim.Layers[-1].current_sample_sym = init_Sym
            sim.Layers[-1].current_sample = sim.Layers[-1].current_sample_sym[
                sim.fourier.basis_number_2D_ravel - 1:]
            sim.Layers[-1].record_sample()

            sim.Layers[0].current_sample_sym = u_init_Sym
            sim.Layers[0].current_sample = sim.Layers[0].current_sample_sym[
                sim.fourier.basis_number_2D_ravel - 1:]
            sim.Layers[0].record_sample()
    print("Used bytes so far, before even running the simulation {}".format(
Esempio n. 7
0
    lay.update_current_sample()
    pcn.Layers_sqrtBetas[i] = lay.sqrt_beta
    Layers.append(lay)
#%%
accepted_count = 0
for i in range(n_layers):
    Layers[i].i_record = 0
for i in range(n_samples):
    accepted_count += pcn.one_step_non_centered_dunlop(Layers)
    print('Completed step {0}'.format(i))
    if i + 1 % 5 == 0:
        acceptancePercentage = accepted_count / (i + 1)
        pcn.adapt_beta(acceptancePercentage)

#%%
mean_field = util.symmetrize(
    cp.asarray(np.mean(Layers[-1].samples_history[:n_samples, :], axis=0)))
u_mean_field = util.symmetrize(
    cp.asarray(np.mean(Layers[0].samples_history[:n_samples, :], axis=0)))
vF = mean_field.reshape(2 * f.basis_number - 1,
                        2 * f.basis_number - 1,
                        order=im.ORDER).T
uF = u_mean_field.reshape(2 * f.basis_number - 1,
                          2 * f.basis_number - 1,
                          order=im.ORDER).T
vForiginal = util.symmetrize_2D(
    f.fourierTransformHalf(measurement.target_image))
vFwithNoise = util.symmetrize_2D(
    f.fourierTransformHalf(measurement.corrupted_image))
vFn = cp.asnumpy(vF)
uFn = cp.asnumpy(uF)
vForiginaln = cp.asnumpy(vForiginal)
Esempio n. 8
0
 def construct_from(self, uHalf):
     uHalf2D = util.from_u_2D_ravel_to_uHalf_2D(util.symmetrize(uHalf),
                                                self.fourier.basis_number)
     return self.construct_from_2D(uHalf2D)