def __init__( self, N_pix=1024, power_spectrum_beta=lambda k: 2 / (k**4 + 1), noise_variance=0.01, rho=1, sigma_f=1, minimizer=None, controller=None, beta_init=None, tau_f_init=None, ): super().__init__(N_pix=N_pix) self.power_spectrum_beta = power_spectrum_beta self.noise_variance = noise_variance self.rho = rho self.sigma_f = sigma_f if controller is None: controller = nifty5.GradientNormController(tol_rel_gradnorm=1e-2, iteration_limit=500) if minimizer is None: minimizer = nifty5.VL_BFGS(controller=controller) self.minimizer = minimizer self.beta_init = beta_init self.tau_f_init = tau_f_init
def curvature(self): iteration_controller = nifty5.GradientNormController( iteration_limit=300, tol_abs_gradnorm=1e-3, name=None) return nifty5.InversionEnabler( BetaCurvature( domain=self.s_space, beta=self.beta, B=self.B, rho=self.rho), iteration_controller=iteration_controller)
def metric(self): class RBCurv(ift.EndomorphicOperator): def __init__(self, loc): self._loc = loc.to_global_data_rw() self._capability = self.TIMES self._domain = space def apply(self, x, mode): self._check_input(x, mode) inp = x.to_global_data_rw() out = ift.Field.from_global_data( space, rosen_hess_prod(self._loc.copy(), inp)) return out t1 = ift.GradientNormController( tol_abs_gradnorm=1e-5, iteration_limit=1000) return ift.InversionEnabler(RBCurv(self._position), t1)
def wf_test(signal, noise, signal_boost, npix = 400): pixel_space = ift.RGSpace([npix, npix]) fourier_space = pixel_space.get_default_codomain() signal_field = ift.Field.from_global_data(pixel_space, signal.astype(float)) HT = ift.HartleyOperator(fourier_space, target=pixel_space) power_field = ift.power_analyze(HT.inverse(signal_field), binbounds=ift.PowerSpace.useful_binbounds(fourier_space, True)) Sh = ift.create_power_operator(fourier_space, power_spectrum=power_field) R = HT noise_field = ift.Field.from_global_data(pixel_space, noise.astype(float)) noise_power_field = ift.power_analyze(HT.inverse(noise_field), binbounds=ift.PowerSpace.useful_binbounds(fourier_space, True)) N = ift.create_power_operator(HT.domain, noise_power_field) N_inverse = HT@[email protected] amplify = len(signal_boost) s_data = np.zeros((amplify, npix, npix)) m_data = np.zeros((amplify, npix, npix)) d_data = np.zeros((amplify, npix, npix)) for i in np.arange(amplify): data = noise_field # Wiener filtering the data j = (R.adjoint @N_inverse.inverse)(data) D_inv = R.adjoint @ N_inverse.inverse @ R + Sh.inverse IC = ift.GradientNormController(iteration_limit=500, tol_abs_gradnorm=1e-3) D = ift.InversionEnabler(D_inv, IC, approximation=Sh.inverse).inverse m = D(j) s_data[i,:,:] = (signal_field * signal_boost[i]).to_global_data() m_data[i,:,:] = HT(m).to_global_data() d_data[i,:,:] = data.to_global_data() return (s_data, m_data, d_data)
def __init__( self, N_pix=1024, power_spectrum_f=lambda k: 1/(k**4+1), power_spectrum_beta=lambda k: 1/(k**4+1), noise_var=0.1, rho=1, minimization=None, minimizer=None, ): super().__init__( N_pix=N_pix) # because of how nifty implements the FFT, we have to multiply the # amplitude by N_pix self.power_spectrum_f = lambda q: power_spectrum_f(q)*N_pix self.power_spectrum_beta = lambda q: power_spectrum_beta(q)*N_pix self.noise_var = noise_var self.rho = rho if minimizer is None: minimizer = nifty5.VL_BFGS( controller=nifty5.GradientNormController( tol_abs_gradnorm=1, iteration_limit=100)) self.minimizer = minimizer
signal_response = R(correlated_field) # Set up likelihood and load data N = ift.ScalingOperator(0.1, data_space) data, ground_truth = generate_mysterious_data(position_space) data = ift.from_global_data(data_space, data) likelihood = ift.GaussianEnergy(mean=data, inverse_covariance=N.inverse)(signal_response) #### SOLVING PROBLEM #### ic_sampling = ift.GradientNormController(iteration_limit=100) ic_newton = ift.GradInfNormController( name='Newton', tol=1e-6, iteration_limit=30) minimizer = ift.NewtonCG(ic_newton) H = ift.StandardHamiltonian(likelihood, ic_sampling) initial_mean = ift.MultiField.full(H.domain, 0.) mean = initial_mean # number of samples used to estimate the KL N_samples = 10 # Draw new samples to approximate the KL ten times for i in range(10): # Draw new samples and minimize KL
data_space = GR.target # Set the noise covariance N noise = 5. N = ift.ScalingOperator(noise, data_space) # Create mock data MOCK_SIGNAL = S.draw_sample() MOCK_NOISE = N.draw_sample() data = R(MOCK_SIGNAL) + MOCK_NOISE # Build inverse propagator D and information source j D_inv = R.adjoint @ N.inverse @ R + S.inverse j = R.adjoint_times(N.inverse_times(data)) # Make D_inv invertible (via Conjugate Gradient) IC = ift.GradientNormController(iteration_limit=500, tol_abs_gradnorm=1e-3) D = ift.InversionEnabler(D_inv, IC, approximation=S.inverse).inverse # Calculate WIENER FILTER solution m = D(j) # Plotting rg = isinstance(position_space, ift.RGSpace) plot = ift.Plot() filename = "getting_started_1_mode_{}.png".format(mode) if rg and len(position_space.shape) == 1: plot.add([HT(MOCK_SIGNAL), GR.adjoint(data), HT(m)], label=['Mock signal', 'Data', 'Reconstruction'], alpha=[1, .3, 1]) plot.add(mask_to_nan(mask, HT(m - MOCK_SIGNAL)), title='Residuals')
accuracy = 0 sum_of_weights = 0 weighted_correct = 0 for i in range(FIRST_ID-1, LAST_ID): (x, y), true_direction, weight = get_pair( i, BENCHMARK, subsample_size=SUBSAMPLE) if true_direction == 0: continue scaler = MinMaxScaler(scale) x, y = scaler.fit_transform(np.array((x, y)).T).T minimizer = nifty5.RelaxedNewton(controller=nifty5.GradientNormController( tol_rel_gradnorm=TOL_REL_GRADNORM, iteration_limit=ITERATION_LIMIT, convergence_level=5, )) bcm = bayesian_causal_model_nifty.cause_model_shallow.CausalModelShallow( N_bins=N_BINS, noise_var=NOISE_VAR, rho=RHO, power_spectrum_beta=POWER_SPECTRUM_BETA, power_spectrum_f=POWER_SPECTRUM_F, minimizer=minimizer, ) bcm.set_data(x, y) H1 = bcm.get_evidence(direction=1, verbosity=VERBOSITY - 1)
def nifty_wf(signal, noise, y_map, npix = 400, pxsize = 1.5, kernel = 9.68, n = 10, smooth = False): cmb_mocks = noise.shape[0] A = (2*np.sqrt(2*np.log(2))) if smooth is True: signal_smooth = np.zeros((cmb_mocks, npix, npix)) noise_smooth = np.zeros((cmb_mocks, npix, npix)) for i in np.arange(cmb_mocks): noise_data = ndimage.gaussian_filter(noise[i], sigma= kernel/A/pxsize, order=0, mode = "reflect", truncate = 10) #signal_data = ndimage.gaussian_filter(signal[i], sigma= kernel/A/pxsize, order=0, mode = "reflect", truncate = 10) signal_data = signal[i] #uncomment here if smoothing signal and noise noise_smooth[i,:,:] = noise_data signal_smooth[i,:,:] = signal_data else: noise_smooth = noise signal_smooth = signal pixel_space = ift.RGSpace([npix, npix]) fourier_space = pixel_space.get_default_codomain() s_data = np.zeros((cmb_mocks, npix, npix)) m_data = np.zeros((cmb_mocks, npix, npix)) d_data = np.zeros((cmb_mocks, npix, npix)) for i in np.arange(cmb_mocks): signal_field = ift.Field.from_global_data(pixel_space, signal_smooth.astype(float)) #[i] for mock_data HT = ift.HartleyOperator(fourier_space, target=pixel_space) power_field = ift.power_analyze(HT.inverse(signal_field), binbounds=ift.PowerSpace.useful_binbounds(fourier_space, True)) Sh = ift.create_power_operator(fourier_space, power_spectrum=power_field) R = HT noise_field = ift.Field.from_global_data(pixel_space, noise_smooth[i].astype(float)) noise_power_field = ift.power_analyze(HT.inverse(noise_field), binbounds=ift.PowerSpace.useful_binbounds(fourier_space, True)) N = ift.create_power_operator(HT.domain, noise_power_field) N_inverse = HT@[email protected] data = signal_field + noise_field # --->when using mock_data # Wiener filtering the data j = (R.adjoint @N_inverse.inverse)(data) D_inv = R.adjoint @ N_inverse.inverse @ R + Sh.inverse IC = ift.GradientNormController(iteration_limit=500, tol_abs_gradnorm=1e-3) D = ift.InversionEnabler(D_inv, IC, approximation=Sh.inverse).inverse m = D(j) #s_data[i,:,:] = (signal_field).to_global_data() m_data[i,:,:] = HT(m).to_global_data() #d_data[i,:,:] = data.to_global_data() #Squaring the filtered map and also taking the absoute val of filtered map # uncomment here for no cross correlation squared_m_data = np.zeros((cmb_mocks, npix, npix)) abs_m_data = np.zeros((cmb_mocks, npix, npix)) for i in np.arange(m_data.shape[0]): squared_m_data[i,:,:] = m_data[i,:,:] * m_data[i,:,:] abs_m_data[i,:,:] = np.abs(m_data[i,:,:]) #Stacking all filtered maps stack1 = np.sum(squared_m_data, axis = 0)/m_data.shape[0] stack2 = np.sum(abs_m_data, axis = 0)/m_data.shape[0] return (m_data, squared_m_data, abs_m_data, stack1, stack2) #change here to return the right values ---->, stack_square, stack_abs
# along with this program. If not, see <http://www.gnu.org/licenses/>. # # Copyright(C) 2013-2019 Max-Planck-Society # # NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik. from unittest import SkipTest import numpy as np import pytest from numpy.testing import assert_allclose, assert_equal import nifty5 as ift pmp = pytest.mark.parametrize IC = ift.GradientNormController(tol_abs_gradnorm=1e-5, iteration_limit=1000) spaces = [ift.RGSpace([1024], distances=0.123), ift.HPSpace(32)] minimizers = [ 'ift.VL_BFGS(IC)', 'ift.NonlinearCG(IC, "Polak-Ribiere")', # 'ift.NonlinearCG(IC, "Hestenes-Stiefel"), 'ift.NonlinearCG(IC, "Fletcher-Reeves")', 'ift.NonlinearCG(IC, "5.49")', 'ift.L_BFGS_B(ftol=1e-10, gtol=1e-5, maxiter=1000)', 'ift.L_BFGS(IC)', 'ift.NewtonCG(IC)' ] newton_minimizers = ['ift.RelaxedNewton(IC)']
N = ift.ScalingOperator(0.1, data_space) data = ift.from_global_data(data_space, sheared_image) + N.draw_sample() # Similar to the fourier transform, but we have real numbers as output harmonic_space = position_space.get_default_codomain() HT = ift.HartleyOperator(harmonic_space, target=position_space) S_h = ift.create_power_operator(harmonic_space, prior_spectrum) # @ is as if doing function composition S = HT @ S_h @ HT.adjoint D_inv = S.inverse + R.adjoint @ N.inverse @ R j = (R.adjoint @ N.inverse)(data) IC = ift.GradientNormController(name = 'CG', iteration_limit=100, tol_abs_gradnorm=1e-7) D = ift.InversionEnabler(D_inv.inverse, IC, approximation=S) # Conjugate gradiend applied here m = D(j) result_image = m.val # Plot the images f, axarr = plt.subplots(1,3) axarr[0].imshow(image_original_arr) axarr[1].imshow(data.val) axarr[2].imshow(m.val) plt.savefig('Original_data_mean.png', dpi = 150)