Пример #1
0
def test_power_synthesize_analyze(space1, space2):
    np.random.seed(11)

    p1 = ift.PowerSpace(space1)
    fp1 = ift.PS_field(p1, _spec1)
    p2 = ift.PowerSpace(space2)
    fp2 = ift.PS_field(p2, _spec2)
    outer = np.outer(fp1.to_global_data(), fp2.to_global_data())
    fp = ift.Field.from_global_data((p1, p2), outer)

    op1 = ift.create_power_operator((space1, space2), _spec1, 0)
    op2 = ift.create_power_operator((space1, space2), _spec2, 1)
    opfull = op2(op1)

    samples = 500
    sc1 = ift.StatCalculator()
    sc2 = ift.StatCalculator()
    for ii in range(samples):
        sk = opfull.draw_sample()

        sp = ift.power_analyze(sk, spaces=(0, 1), keep_phase_information=False)
        sc1.add(sp.sum(spaces=1) / fp2.sum())
        sc2.add(sp.sum(spaces=0) / fp1.sum())

    assert_allclose(sc1.mean.local_data, fp1.local_data, rtol=0.2)
    assert_allclose(sc2.mean.local_data, fp2.local_data, rtol=0.2)
Пример #2
0
def wf_test(signal, noise, signal_boost, npix = 400):
    
    pixel_space = ift.RGSpace([npix, npix]) 
    fourier_space = pixel_space.get_default_codomain()

    signal_field = ift.Field.from_global_data(pixel_space, signal.astype(float))
    
    HT = ift.HartleyOperator(fourier_space, target=pixel_space) 
    power_field = ift.power_analyze(HT.inverse(signal_field), binbounds=ift.PowerSpace.useful_binbounds(fourier_space, True))

    Sh = ift.create_power_operator(fourier_space, power_spectrum=power_field) 
    R = HT
 
    noise_field = ift.Field.from_global_data(pixel_space, noise.astype(float))
    noise_power_field = ift.power_analyze(HT.inverse(noise_field), binbounds=ift.PowerSpace.useful_binbounds(fourier_space, True))

    N = ift.create_power_operator(HT.domain, noise_power_field)
    N_inverse = HT@[email protected]
    
    amplify = len(signal_boost)
    
    s_data = np.zeros((amplify, npix, npix))
    m_data = np.zeros((amplify, npix, npix))
    d_data = np.zeros((amplify, npix, npix))

    for i in np.arange(amplify):
        
        data = noise_field 

        # Wiener filtering the data

        j = (R.adjoint @N_inverse.inverse)(data)
        D_inv = R.adjoint @ N_inverse.inverse @ R + Sh.inverse

        IC = ift.GradientNormController(iteration_limit=500, tol_abs_gradnorm=1e-3)
        D = ift.InversionEnabler(D_inv, IC, approximation=Sh.inverse).inverse
        m = D(j)

        s_data[i,:,:] = (signal_field * signal_boost[i]).to_global_data()
        m_data[i,:,:] = HT(m).to_global_data()
        d_data[i,:,:] = data.to_global_data()

    return (s_data, m_data, d_data)
Пример #3
0
def test_DiagonalOperator_power_analyze2(space1, space2):
    np.random.seed(11)

    fp1 = ift.PS_field(ift.PowerSpace(space1), _spec1)
    fp2 = ift.PS_field(ift.PowerSpace(space2), _spec2)

    S_1 = ift.create_power_operator((space1, space2), _spec1, 0)
    S_2 = ift.create_power_operator((space1, space2), _spec2, 1)
    S_full = S_2(S_1)

    samples = 500
    sc1 = ift.StatCalculator()
    sc2 = ift.StatCalculator()

    for ii in range(samples):
        sk = S_full.draw_sample()
        sp = ift.power_analyze(sk, spaces=(0, 1), keep_phase_information=False)
        sc1.add(sp.sum(spaces=1) / fp2.sum())
        sc2.add(sp.sum(spaces=0) / fp1.sum())

    assert_allclose(sc1.mean.local_data, fp1.local_data, rtol=0.2)
    assert_allclose(sc2.mean.local_data, fp2.local_data, rtol=0.2)
Пример #4
0
def nifty_wf(signal, noise, y_map, npix = 400, pxsize = 1.5, kernel = 9.68, n = 10, smooth = False):
    
    cmb_mocks = noise.shape[0]
    
    A = (2*np.sqrt(2*np.log(2)))
    
    if smooth is True:
        signal_smooth = np.zeros((cmb_mocks, npix, npix))
        noise_smooth = np.zeros((cmb_mocks, npix, npix))
        
        for i in np.arange(cmb_mocks):
            noise_data = ndimage.gaussian_filter(noise[i], sigma= kernel/A/pxsize, order=0, mode = "reflect", truncate = 10)
            #signal_data = ndimage.gaussian_filter(signal[i], sigma= kernel/A/pxsize, order=0, mode = "reflect", truncate = 10)
            signal_data = signal[i] #uncomment here if smoothing signal and noise
            noise_smooth[i,:,:] = noise_data
            signal_smooth[i,:,:] = signal_data
    else:
        noise_smooth = noise
        signal_smooth = signal
                
    pixel_space = ift.RGSpace([npix, npix]) 
    fourier_space = pixel_space.get_default_codomain()

    s_data = np.zeros((cmb_mocks, npix, npix))
    m_data = np.zeros((cmb_mocks, npix, npix))
    d_data = np.zeros((cmb_mocks, npix, npix))


    for i in np.arange(cmb_mocks):
        
        signal_field = ift.Field.from_global_data(pixel_space, signal_smooth.astype(float)) #[i] for mock_data
        HT = ift.HartleyOperator(fourier_space, target=pixel_space) 
        power_field = ift.power_analyze(HT.inverse(signal_field), binbounds=ift.PowerSpace.useful_binbounds(fourier_space, True))
        Sh = ift.create_power_operator(fourier_space, power_spectrum=power_field) 
        R = HT
           
        noise_field = ift.Field.from_global_data(pixel_space, noise_smooth[i].astype(float))
        noise_power_field = ift.power_analyze(HT.inverse(noise_field), binbounds=ift.PowerSpace.useful_binbounds(fourier_space, True))

        N = ift.create_power_operator(HT.domain, noise_power_field)
        N_inverse = HT@[email protected]

        data = signal_field + noise_field # --->when using mock_data

        # Wiener filtering the data

        j = (R.adjoint @N_inverse.inverse)(data)
        D_inv = R.adjoint @ N_inverse.inverse @ R + Sh.inverse

        IC = ift.GradientNormController(iteration_limit=500, tol_abs_gradnorm=1e-3)
        D = ift.InversionEnabler(D_inv, IC, approximation=Sh.inverse).inverse
        m = D(j)

        #s_data[i,:,:] = (signal_field).to_global_data()
        m_data[i,:,:] = HT(m).to_global_data()
        #d_data[i,:,:] = data.to_global_data()    
    
    #Squaring the filtered map and also taking the absoute val of filtered map
       
    
    # uncomment here for no cross correlation 
    squared_m_data = np.zeros((cmb_mocks, npix, npix))
    abs_m_data = np.zeros((cmb_mocks, npix, npix))
    
    for i in np.arange(m_data.shape[0]):
        squared_m_data[i,:,:]  = m_data[i,:,:] * m_data[i,:,:]
        abs_m_data[i,:,:] = np.abs(m_data[i,:,:])
    
    #Stacking all filtered maps
    stack1  = np.sum(squared_m_data, axis = 0)/m_data.shape[0]
    stack2  = np.sum(abs_m_data, axis = 0)/m_data.shape[0]
       
    return (m_data, squared_m_data, abs_m_data, stack1, stack2) #change here to return the right values ---->, stack_square, stack_abs