def grad_k(ws, fdensity, alpha, sig, psf_k):
    print('grad_k begin')
    mo = np.exp(-4.)
    ws = real_to_complex(ws)
    ws = ws.reshape((n_grid, n_grid))
    #wk = ws;
    ws = np.real(fft.ifft2(ws))

    l1 = -1 * fft.ifft2((fft.ifft2(fft.fft2(ws) * fft.fft2(psf)) - data) /
                        sig_noise**2) * psf_k
    '''    
    l1_og = fft.ifft2((Psi(ws) - data)/sig_noise**2)*psf_k;
    l1_other = fft.ifft2((fft.fft2(fft.ifft2(ws)*fft.ifft2(psf)) - data)/sig_noise**2)*psf_k;
    print('diff is:');
    print(l1 - l1_other);
    print(l1-l1_og);
    '''
    #print(l1-l1_other)
    l1 = l1.flatten()

    xsi = (1. - fdensity) * gaussian(np.log(ws), loc=np.log(
        mo), scale=sig) / ws + fdensity * (ws**alpha / w_norm)
    l2 = -1 * gaussian(np.log(ws), loc=np.log(mo), scale=sig) * (
        1. - fdensity) / ws**2 - (1. - fdensity) * np.log(ws / mo) * np.exp(
            -np.log(ws / mo)**2 / 2 / sig**2) / np.sqrt(
                2 * np.pi) / ws**2 / sig**3 + fdensity * alpha * ws**(
                    alpha - 1) / w_norm
    l2 = l2 / np.absolute(xsi)
    l2 = fft.ifft2(l2).flatten()
    l_tot = l1 + l2
    #return l1,l2;
    l_tot = complex_to_real(l_tot)
    #print('grad is');
    #print(l_tot);
    return l_tot
 def grad_like(self,wsp,ws,ws_k,xi):
     #print('start grad_like')
     conv = np.real(fft.ifft2(ws_k*self.psf_k)); #convolution of ws with psf
     term1 = (conv - self.data)/self.n_grid**2 /self.sig_noise**2 #term thats squared in like (with N in denom)
     grad = np.zeros((self.n_grid,self.n_grid),dtype='complex')
     for i in range(0,self.n_grid):
         for j in range(0,self.n_grid):        
             #try to modulate by hand
             ft1 = fft.fft2(1/(1+np.exp(-1*wsp/xi)));
             ftp = np.roll(ft1,(i,j),axis=(0,1));
             term2 = fft.ifft2(ftp*self.psf_k);
             grad[i,j] = np.sum(term1*term2);
     grad_real = self.complex_to_real(np.conj(grad.flatten())); #embed to 2R
     #print('end grad_like');
     return grad_real; #return 1d array
def grad_k(ws, fdensity, alpha, sig):
    #print('grad_k begin')
    mo = np.exp(-4.)
    ws = real_to_complex(ws)
    ws = ws.reshape((n_grid, n_grid))
    #wk = ws
    #ws = np.real(fft.ifft2(ws))

    l1 = -1 * fft.ifft2(
        (np.real(fft.ifft2(ws * psf_k)) - data) / sig_noise**2) * psf_k
    #print(l1-l1_other)
    l1 = l1.flatten()
    l_tot = l1
    #return l1,l2
    l_tot = complex_to_real(l_tot)
    return l_tot
 def loss_fn(self,wsp_k,xi,f,alpha):
     wsp_k = self.real_to_complex(wsp_k); #2*reals -> complex
     wsp_k = wsp_k.reshape((self.n_grid,self.n_grid)); #reshape to 2d
     wsp = np.real(fft.ifft2(wsp_k));
     ws = xi*np.log(np.exp(wsp/xi)+1) #reparametrize from m_prime back to m
     ws_k = fft.fft2(ws);
     return self.loss_like(ws_k) - self.loss_prior(ws,f,alpha);
示例#5
0
 def loss_fn_real(self, wsp_k, f, alpha):
     wsp_k = wsp_k.reshape((self.n_grid, self.n_grid))
     #reshape to 2d
     wsp = np.real(fft.ifft2(wsp_k))
     ws = wsp  #reparametrize from m_prime back to m
     ws_k = fft.fft2(ws)
     return self.loss_like(ws_k) - self.loss_prior(ws, f, alpha)
示例#6
0
def makeMock(grnd):
    grnd_x = np.around(grnd['xnano'] / 100).astype(int)
    grnd_y = np.around(grnd['ynano'] / 100).astype(int)
    grnd_arr = np.zeros((64, 64))
    grnd_arr[grnd_y, grnd_x] = grnd['intensity']
    grnd_arr = grnd_arr / np.max(grnd_arr)
    grnd_conv = np.real(fft.ifft2(fft.fft2(grnd_arr) * psf_k))
    return grnd_conv
def lnpost_k_og(ws, fdensity, alpha, sig):
    #converting flattened ws to matrix
    #ws = real_to_complex(ws);
    ws = ws.reshape((n_grid, n_grid))
    ws = np.real(fft.ifft2(ws))
    post = lnlike_k(ws) + lnprior_k(ws, fdensity, alpha, sig)
    #print('post is');
    #print(post);
    return post
示例#8
0
def fft_convolve2d(x, y):
    """ 2D convolution, using FFT"""
    fr = fft.fft2(x)
    fr2 = fft.fft2(np.flipud(np.fliplr(y)))
    m, n = fr.shape
    cc = np.real(fft.ifft2(fr * fr2))
    cc = np.roll(cc, -int(m / 2) + 1, axis=0)
    cc = np.roll(cc, -int(n / 2) + 1, axis=1)
    return cc
 def grad_prior(self,wsp,ws,ws_k,xi,f,alpha):
     w_norm = (self.wlim[1]**(alpha+1) - self.wlim[0]**(alpha+1))/(alpha+1); #normalization from integrating
     param_term = 1/(1+np.exp(-1*wsp/xi)) #differentiation term due to parametrization
     #grad = fft.ifft2((-1/ws - (np.log(ws)-norm_mean)/ws/norm_sig**2)*param_term); #version with p1=0
     numerator = (1+(np.log(ws)-self.norm_mean)/self.norm_sig**2)*self.lognorm(ws)/ws + f*alpha*ws**(alpha-1)/w_norm;
     prior = self.lognorm(ws)*(1-f) + f*ws**(alpha)/w_norm; #prior w/o log
     grad = fft.ifft2(param_term*numerator/prior);
     grad_real = self.complex_to_real(np.conj(grad.flatten())); #embed to 2R
     return grad_real; #return 1d array
示例#10
0
    def loss_like(self, ws_k):
        #gaussian likelihood, assumes ws_k is in complex form and 2d
        conv = np.real(fft.ifft2(ws_k * self.psf_k))
        #convolution of ws with psf
        like_loss = 0.5 * np.sum(
            (conv - self.data)**
            2) / self.sig_noise**2  #gaussian likelihood loss

        return like_loss
示例#11
0
def lnlike_k(ws):
    ''' log likelihood 
    '''
    #ws = ws.reshape((n_grid,n_grid));
    #ws = np.absolute(fft.ifft2(ws));
    like = -0.5 * np.sum(
        (fft.ifft2(fft.fft2(ws) * fft.fft2(psf)) - data)**2 / sig_noise**2)
    like = np.real(like)
    #print('like is:');
    #print(like);
    return like
def lnpost_k_og(ws, fdensity, alpha, sig):
    #converting flattened ws to matrix
    #print(np.shape(ws))
    ws = ws.reshape((n_grid, n_grid))
    ws_real = np.real(fft.ifft2(ws))
    post = lnlike_k(ws) - lnprior_k(ws_real, fdensity, alpha, sig)
    #print('post is');
    #print(post);
    #barrier function

    return post
def lnlike_k(ws):
    ''' log likelihood w/ periodic boundary conditions (need for solving w/
    respect to fourier coefficients)
    '''
    like = -0.5 * np.sum(
        (np.real(fft.ifft2(fft.fft2(ws) * fft.fft2(psf))) - data)**2 /
        sig_noise**2)
    #like = -0.5 * np.sum((np.real(fft.fft2(fft.fft2(ws)))- data)**2/sig_noise**2);
    #like = np.real(like);
    #print('like is:');
    #print(like);
    return like
def lnpost_k_debug(ws, fdensity, alpha, sig):
    #converting flattened ws to matrix
    print(ws[:5])
    ws = real_to_complex(ws)
    ws = ws.reshape((n_grid, n_grid))
    post = lnlike_k(ws)  #+ lnprior_k(ws,fdensity,alpha,sig)

    fig, ax = plt.subplots(2, 2)
    ax[0][0].imshow(np.real(fft.ifft2(w_true_k)), vmin=0, vmax=10)
    ax[0][0].set_title('Truth')
    ax[0][1].imshow(np.real(fft.ifft2(ws)), vmin=0, vmax=10)
    ax[1][0].imshow(data, vmin=0, vmax=10)
    ax[1][0].set_title('Data')
    ax[1][1].imshow(np.real(fft.ifft2(ws * psf_k)), vmin=0, vmax=10)

    ii = 0
    while os.path.isfile('test_opt%i.png' % ii):
        ii += 1
    plt.savefig('test_opt%i.png' % ii)
    plt.close()
    return post
 def optimize_m(self,wsp_k,xi,f,alpha):
     print('optimizing')
     gradfun = lambda tg: self.areal(tg,xi,f,alpha);
     res = scipy.optimize.minimize(lambda tt: self.loss_fn(tt,xi,f,alpha),
         wsp_k, # theta initial
         jac=gradfun,                          
         method='Newton-CG');
     print('Number of Iterations m');
     print(res['nit']);
     w_final_k = self.real_to_complex(res['x']);
     w_final_k = w_final_k.reshape((self.n_grid,self.n_grid)); #reshape to 2d
     w_final = np.real(fft.ifft2(w_final_k));
     w_final = xi*np.log(np.exp(w_final/xi)+1);
     return res['x'],w_final;
def lnlike_k(ws):
    ''' log likelihood w/ periodic boundary conditions (need for solving w/
    respect to fourier coefficients)
    '''
    #cv = signal.convolve(ws,psf);
    #diff = int((len(cv[:,0]) - n_grid)/2);
    #cv = cv[diff:n_grid+diff,diff:n_grid+diff];
    #like = -0.5 * np.sum((cv - data)**2/sig_noise**2);
    #like = -0.5 * np.sum((np.real(fft.fft2(fft.fft2(ws)))- data)**2/sig_noise**2);
    like = 0.5 * np.sum(
        (np.real(fft.ifft2(ws * psf_k)) - data)**2) / sig_noise**2
    #like = np.real(like);
    #print('like is:');
    #print(like);
    return like
示例#17
0
def loss_like(ws_k):
    #gaussian likelihood, assumes ws_k is in complex form and 2d
    conv = np.real(fft.ifft2(ws_k*psf_k)); #convolution of ws with psf
    like_loss = 0.5 * (conv - data)**2 /sig_noise**2 #gaussian likelihood loss
    
    return like_loss;
示例#18
0
def lnlike(ws): 
    ''' log likelihood 
    '''
    return -0.5 * np.sum((np.real(fft.ifft2(fft.fft2(ws)*fft.fft2(psf))) - data)**2/sig_noise**2)
theta_grid = np.linspace(0., 1., n_grid)  # gridding of theta (same as pixels)

#create true values - assign to grid
x_true = np.abs(np.random.rand(Ndata))  # location of sources
y_true = np.abs(np.random.rand(Ndata))

#w_true = np.abs(np.random.rand(Ndata))+1;

#true grid needs to be set up with noise
w_true_grid = np.zeros((n_grid, n_grid))
for x, y, w in zip(x_true, y_true, w_true):
    w_true_grid[np.argmin(np.abs(theta_grid - x)),
                np.argmin(np.abs(theta_grid - y))] = w

data = np.real(fft.ifft2(
    fft.fft2(w_true_grid) *
    fft.fft2(psf)))  #+ np.absolute(sig_noise* np.random.randn(n_grid,n_grid));
data3 = signal.convolve(w_true_grid, psf)
diff = int((len(data3[:, 0]) - n_grid) / 2)
data3 = data3[diff:n_grid + diff, diff:n_grid + diff]
#data = data3;
'''
fig, ax = plt.subplots(1,2)
ax[0].imshow(w_true_grid);
ax[0].set_title('True Positions')
#ax[1].imshow(data3[:-4,:-4]);
ax[1].imshow(data4);
ax[1].set_title('Observed Data')
plt.show();
'''
#create fft of psf
def lnlike_k(ws):
    ''' log likelihood w/ periodic boundary conditions (need for solving w/
    respect to fourier coefficients)
    '''
    return 0.5 * np.sum(
        (np.real(fft.ifft2(ws * psf_k)) - data)**2) / sig_noise**2
def optimize_m(t_ini, f_ini, alpha_ini, sig_curr):
    #keeping in mind that minimize requires flattened arrays
    print('Initial Likelihood')
    print(lnpost_k(t_ini, f_ini, alpha_ini, sig_curr))
    t_ini_comp = real_to_complex(t_ini)

    hfunc = Agrad.hessian(lambda tt: lnpost_k(tt, f_ini, alpha_ini, sig_curr))
    afunc = Agrad.grad(lambda tt: lnpost_k(tt, f_curr, a_curr, sig_delta))
    grad_fun = lambda tg: -1 * grad_k(tg, f_ini, alpha_ini, sig_curr)
    hess_fun = lambda th: -1 * hess_k(th, f_ini, alpha_ini, sig_curr)
    afunc_og = Agrad.holomorphic_grad(
        lambda tt: np.conj(lnpost_k_og(tt, f_curr, a_curr, sig_delta)))
    aog = lambda ts: complex_to_real(afunc_og(real_to_complex(ts)))

    #try optimization with some different algorithms
    res = scipy.optimize.minimize(
        lambda tt: lnpost_k(tt, f_ini, alpha_ini, sig_curr),
        t_ini,  # theta initial
        jac=grad_fun,
        hess=hess_fun,
        method='trust-ncg')
    res2 = scipy.optimize.minimize(
        lambda tt: lnpost_k(tt, f_ini, alpha_ini, sig_curr),
        t_ini,  # theta initial
        jac=aog,
        method='CG')
    res3 = scipy.optimize.minimize(
        lambda tt: lnpost_k(tt, f_ini, alpha_ini, sig_curr),
        t_ini,
        method='Nelder-Mead')

    #cres = real_to_complex(res['x'])
    #tt_prime = np.real(fft.ifft(cres))
    #cres2 = real_to_complex(res2.x)
    #tt_prime2 = np.real(fft.ifft(cres2))
    print('Final Log Likelihood')
    print(lnpost_k(res.x, f_ini, alpha_ini, sig_curr))
    print(lnpost_k(res2.x, f_ini, alpha_ini, sig_curr))

    w_final_k = real_to_complex(res['x']).reshape(n_grid, n_grid)
    w_final = np.real(fft.ifft2(w_final_k))
    w_final2_k = real_to_complex(res2.x).reshape(n_grid, n_grid)
    w_final2 = np.real(fft.ifft2(w_final2_k))
    w_final3_k = real_to_complex(res3.x).reshape(n_grid, n_grid)
    w_final3 = np.real(fft.ifft2(w_final3_k))

    fig, ax = plt.subplots(2, 4)
    ax[0][0].imshow(np.real(fft.ifft2(w_true_k)), vmin=0, vmax=10)
    ax[0][0].set_title('Truth')
    ax[0][1].imshow(w_final2, vmin=0, vmax=10)
    ax[0][1].set_title('Newton-CG')
    ax[0][2].imshow(w_final, vmin=0, vmax=10)
    ax[0][2].set_title('trust-ncg')
    ax[0][3].imshow(w_final3, vmin=0, vmax=10)
    ax[0][3].set_title('nelder-mead')
    ax[1][0].imshow(data, vmin=0, vmax=10)
    #ax[1][0].imshow(np.real(fft.ifft2(w_true_k*psf_k)))
    ax[1][0].set_title('Data')
    ax[1][1].imshow(np.real(fft.ifft2(w_final2_k * psf_k)), vmin=0, vmax=10)
    ax[1][1].set_title('FM Newton-CG')
    ax[1][2].imshow(np.real(fft.ifft2(w_final_k * psf_k)), vmin=0, vmax=10)
    ax[1][2].set_title('FM trust-ncg')
    ax[1][3].imshow(np.real(fft.ifft2(w_final3_k * psf_k)), vmin=0, vmax=10)
    ax[1][3].set_title('FM nelder-mead')
    plt.savefig('test_opt.png')
    return w_final
示例#22
0
y_true = np.abs(np.random.rand(Ndata))

#w_true = np.abs(np.random.rand(Ndata))+1;

#true grid needs to be set up with noise
w_true_grid = np.zeros((n_grid, n_grid))
for x, y, w in zip(x_true, y_true, w_true):
    w_true_grid[np.argmin(np.abs(theta_grid - x)),
                np.argmin(np.abs(theta_grid - y))] = w

data4 = convolvesame_fft(w_true_grid,
                         psf) + sig_noise  # * np.random.randn(n_grid,n_grid);
data2 = Psi(w_true_grid) + sig_noise  # * np.random.randn(n_grid,n_grid);
data3 = np.real(
    fft.ifft2(
        fft.fft2(np.pad(w_true_grid, ((5, 0), (5, 0)), 'constant')) *
        fft.fft2(np.pad(psf, ((5, 0), (5, 0)), 'constant')))
) + sig_noise  # * np.random.randn(n_grid,n_grid);
data = np.real(
    fft.ifft2(fft.fft2(w_true_grid) *
              fft.fft2(psf))) + sig_noise * np.random.randn(n_grid, n_grid)
#print(data-data2);
'''
fig, ax = plt.subplots(1,2)
ax[0].imshow(w_true_grid);
ax[0].set_title('True Positions')
#ax[1].imshow(data3[:-4,:-4]);
ax[1].imshow(data4);
ax[1].set_title('Observed Data')
plt.show();
'''
#x_true = np.abs(np.random.rand(Ndata)) # location of sources
#y_true = np.abs(np.random.rand(Ndata))
x_true = [0.5]
y_true = [0.5]
w_true = np.ones(Ndata) * 5
#w_true = np.abs(np.random.rand(Ndata))+1

#true grid needs to be set up with noise
w_true_grid = np.zeros((n_grid, n_grid))
for x, y, w in zip(x_true, y_true, w_true):
    w_true_grid[np.argmin(np.abs(theta_grid - x)),
                np.argmin(np.abs(theta_grid - y))] = w

w_true_k = fft.fft2(w_true_grid)
data = np.real(fft.ifft2(
    w_true_k *
    psf_k))  #+ np.absolute(sig_noise* np.random.randn(n_grid,n_grid))

data_p = signal.convolve(w_true_grid, psf)
diff = int((data_p.shape[0] - n_grid) / 2)
data_p = data_p[diff:n_grid + diff, diff:n_grid + diff]

fig, ax = plt.subplots(1, 3)
ax[0].imshow(w_true_grid)
ax[0].set_title('True Positions')
ax[1].imshow(data)
ax[1].set_title('data')
ax[2].imshow(data_p)
ax[2].set_title("data'")
plt.savefig('test.png')
示例#24
0
x, y = np.meshgrid(grid1d, grid1d)
psf = np.exp(-((y - grid1d[mid])**2 + (x - grid1d[mid])**2) / 2. / sig_psf**2)

xx = np.linspace(-0.5, 0.5, n_grid)
bump = np.exp(-0.5 * xx**2 / sig_psf**2)
#bump /= np.trapz(bump) # normalize the integral to 1
_psf = bump[:, np.newaxis] * bump[np.newaxis, :]

fig, ax = plt.subplots(1, 2)
ax[0].imshow(psf)
ax[0].set_title('psf')
ax[1].imshow(_psf)
ax[1].set_title('psf')
plt.savefig('test0.png')

data = np.real(fft.ifft2(fft.fft2(w_true_grid) * fft.fft2(_psf)))
data_p = Signal.fftconvolve(w_true_grid, _psf)  #, mode='same')
#psf_k = fft.fft2(psf);
psf_k = fft.fft2(_psf)


def lnlike_k(ws):
    ''' log likelihood w/ periodic boundary conditions (need for solving w/
    respect to fourier coefficients)
    '''
    return 0.5 * np.sum(
        (aSignal.convolve(ws, _psf) - data_p)**2) / sig_noise**2
    #return 0.5 * np.sum(((fft.ifft2(ws*psf_k))- data_p)**2)/sig_noise**2
    #return 0.5 * np.sum((np.real(fft.ifft2(ws*psf_k))- data)**2)/sig_noise**2

def hess_k(ws, fdensity, alpha, sig, psf_k):
    print('hess_k begin')
    mo = np.exp(-4.)
    ws = real_to_complex(ws)
    ws = ws.reshape((n_grid, n_grid))
    ws = np.real(fft.ifft2(ws))
    #calc l1 we only get diagonals here
    l1 = -1 * (psf_k**2 / sig_noise**2 / n_grid**2).flatten()
    #calc l2, the hessian of the prior is messy
    xsi = (1. - fdensity) * gaussian(np.log(ws), loc=np.log(
        mo), scale=sig) / ws + fdensity * (ws**alpha / w_norm)
    dxsi = -1 * gaussian(np.log(ws), loc=np.log(mo), scale=sig) * (
        1. - fdensity) / ws**2 - (1. - fdensity) * np.log(ws / mo) * np.exp(
            -np.log(ws / mo)**2 / 2 / sig**2) / np.sqrt(
                2 * np.pi) / ws**2 / sig**3 + fdensity * alpha * ws**(
                    alpha - 1) / w_norm
    dxsi_st = -1 * gaussian(np.log(ws), loc=np.log(mo), scale=sig) * (
        1. - fdensity) / ws**2 - (1. - fdensity) * np.log(ws / mo) * np.exp(
            -np.log(ws / mo)**2 / 2 / sig**2) / np.sqrt(
                2 * np.pi) / ws**2 / sig**3
    ddxsi_st = -1 * dxsi_st / ws - dxsi_st * np.log(ws / mo) / ws / sig**2 - (
        1. - fdensity) * (1 / np.sqrt(2 * np.pi) / sig) * np.exp(
            -np.log(ws / mo)**2 / 2 /
            sig**2) * (1 / sig**2 - np.log(ws / mo) / sig**2 - 1) / ws**3
    ddxsi = ddxsi_st + fdensity * alpha * (alpha - 1) * ws**(alpha -
                                                             2) / w_norm
    l2 = -1 * (dxsi / xsi)**2 + ddxsi / np.absolute(xsi)
    #this is the hessian of the prior wrt m_x, not m_k
    l2_k = fft.ifft2(l2).flatten() / n_grid**2
    #we assume that hessian of l2 is diagonal. Under assumption k = -k', then we only get the zeroth element along the diag
    #lets fill the entire matrix and see whats up;
    hess_m = np.zeros((n_grid**2, n_grid**2), dtype=complex)
    hess_l1 = np.zeros((n_grid**2, n_grid**2), dtype=complex)
    np.fill_diagonal(hess_l1, l1)
    off = []
    #print(l2_k[0]);
    for i in range(0, n_grid**2):
        for j in range(0, n_grid**2):
            hess_m[i, j] = l2_k[int(np.absolute(i - j))]
            #check the off diagonals to make sure they are small
            if i != j:
                off.append(l2_k[int(np.absolute(i - j))])
    hess_m = hess_l1 + hess_m
    '''
    print('Sigma Real is:');
    print(np.std(np.real(off)));
    print('Simga Imag is:');
    print(np.std(np.imag(off)));
    fig, ax = plt.subplots(1,2)
    ax[0].imshow(np.real(hess_m));
    ax[0].set_title('Real Hessian')
    #ax[1].imshow(data3[:-4,:-4]);
    ax[1].imshow(np.imag(hess_m));
    ax[1].set_title('Imaginary Hessian')
    plt.show();
    '''
    l_tot = np.diagonal(hess_m)

    l_minr = min(np.real(l_tot))
    l_mini = min(np.imag(l_tot))
    #print(l_tot-l1);
    if l_minr < 0:
        l_tot = l_tot - l_minr + 0.1
    if l_mini < 0:
        l_tot = l_tot - 1j * (l_mini + 0.1)
    '''
    print('diag is:');
    print(l2_k[0]);
    print('other is:');
    print(l1);
    '''
    '''
    hess_m = np.zeros((n_grid**2,n_grid**2));
    np.fill_diagonal(hess_m,l_tot);
    return hess_m;
    '''
    #return l1,l2_k[0];
    l_tot = complex_to_real(l_tot)
    #print('hess is');
    #print(l_tot);
    return l_tot
#create coordinate grid
theta_grid = np.linspace(0., 1., n_grid)  # gridding of theta (same as pixels)

#create true values - assign to grid
x_true = np.abs(np.random.rand(Ndata))  # location of sources
y_true = np.abs(np.random.rand(Ndata))

#w_true = np.abs(np.random.rand(Ndata))+1;

#true grid needs to be set up with noise
w_true_grid = np.zeros((n_grid, n_grid))
for x, y, w in zip(x_true, y_true, w_true):
    w_true_grid[np.argmin(np.abs(theta_grid - x)),
                np.argmin(np.abs(theta_grid - y))] = w

data = np.real(fft.ifft2(fft.fft2(w_true_grid) * fft.fft2(psf))) + np.absolute(
    sig_noise * np.random.randn(n_grid, n_grid))
data3 = signal.convolve(w_true_grid, psf)
diff = int((len(data3[:, 0]) - n_grid) / 2)
data3 = data3[diff:n_grid + diff, diff:n_grid + diff]
#data = data3;
'''
fig, ax = plt.subplots(1,2)
ax[0].imshow(w_true_grid);
ax[0].set_title('True Positions')
#ax[1].imshow(data3[:-4,:-4]);
ax[1].imshow(data4);
ax[1].set_title('Observed Data')
plt.show();
'''
#create fft of psf
示例#27
0
def loss_fn(wsp_k,xi,f,alpha):
    wsp = np.real(fft.ifft2(wsp_k));
    ws = xi*np.log(np.exp(wsp/xi)+1) #reparametrize from m_prime back to m
    ws_k = fft.fft2(ws);
    return loss_like(ws_k) - loss_prior(ws,f,alpha);
theta_grid = np.linspace(0., 1., n_grid)  # gridding of theta (same as pixels)

#create true values - assign to grid
x_true = np.abs(np.random.rand(Ndata))  # location of sources
y_true = np.abs(np.random.rand(Ndata))

#w_true = np.abs(np.random.rand(Ndata))+1;

#true grid needs to be set up with noise
w_true_grid = np.zeros((n_grid, n_grid))
for x, y, w in zip(x_true, y_true, w_true):
    w_true_grid[np.argmin(np.abs(theta_grid - x)),
                np.argmin(np.abs(theta_grid - y))] = w

data = np.real(
    fft.ifft2(fft.fft2(w_true_grid) *
              fft.fft2(psf))) + sig_noise * np.random.randn(n_grid, n_grid)
'''
fig, ax = plt.subplots(1,2)
ax[0].imshow(w_true_grid);
ax[0].set_title('True Positions')
#ax[1].imshow(data3[:-4,:-4]);
ax[1].imshow(data4);
ax[1].set_title('Observed Data')
plt.show();
'''
#create fft of psf
psf_k = fft.fft2(psf)

########################################################################
#now begin the actual execution
########################################################################