def print_function(params, iter, gradient):
     """ Print the error at every iteration """
     if iter % 10 == 0:
         print(
             "Total Error:",
             np.sum(
                 np.absolute(neural_net_predict(params, inputs) - targets)))
    def print_function(params, iter, gradient):
        plot_inputs = np.linspace(-8, 8, num=400)
        outputs = neural_net_predict(params, np.expand_dims(plot_inputs, 1))

        # Plot data and functions.
        plt.cla()
        plt.title('Overfitting test with L2_reg = %f' % L2_reg)
        ax.set_xlabel("Possible Inputs")
        ax.set_ylabel("Neural Network Outputs")
        ax.plot(inputs, targets, 'bx', label='Train Data')
        ax.plot(testinputs, testtargets, 'bo', label='Test Data')
        ax.plot(plot_inputs, outputs)
        ax.legend()
        ax.set_ylim([-2, 3])

        # Plot the cost function for the test
        testcost = np.sum(
            (neural_net_predict(params, testinputs) - testtargets)**2)
        traincost = np.sum((neural_net_predict(params, inputs) - targets)**2)
        diff = np.absolute(testcost - traincost)
        testcostlist.append(testcost)
        traincostlist.append(traincost)
        iterlist.append(iter)
        ax2.plot(iterlist, testcostlist, 'r-', label='Test Cost')
        ax2.plot(iterlist, traincostlist, 'g-', label='Train Cost')
        ax2.set_xlabel('Number of Iterations')
        ax2.set_ylabel('Error in Estimation or Cost')
        ax2.set_xlim([0, 50])
        if iter == 0:
            ax2.legend()

        plt.draw()
        #plt.savefig(str(iter) + '.jpg')
        plt.pause(1.0 / 60.0)
def hess_lnpost(ws,fdensity,alpha,sig):
    print('hess')
    #print(ws);
    mo = np.exp(-4.);
    #hval = hfunc(ws);
    ws = ws.reshape((n_grid,n_grid));
    #calc l1
    lsis = np.array([-1*np.sum(psi(index)**2)/sig_noise**2 for (index,w) in np.ndenumerate(ws)]);
    lsis = lsis.reshape((n_grid,n_grid));
    l1 = lsis#*np.sum((Psi(ws)-data)/2/sig_noise**2);
    xsi = (1.-fdensity ) * gaussian(np.log(ws),loc=np.log(mo), scale=sig)/ws + fdensity*(ws**alpha /w_norm)
    dxsi = -1*gaussian(np.log(ws),loc=np.log(mo), scale=sig)*(1.-fdensity)/ws**2 - (1.-fdensity)*np.log(ws/mo)*np.exp(-np.log(ws/mo)**2 /2/sig**2)/np.sqrt(2*np.pi)/ws**2 /sig**3 + fdensity*alpha*ws**(alpha-1) /w_norm;
    dxsi_st = -1*gaussian(np.log(ws),loc=np.log(mo), scale=sig)*(1.-fdensity)/ws**2 - (1.-fdensity)*np.log(ws/mo)*np.exp(-np.log(ws/mo)**2 /2/sig**2)/np.sqrt(2*np.pi)/ws**2 /sig**3;
    ddxsi_st = -1*dxsi_st/ws - dxsi_st*np.log(ws/mo)/ws /sig**2 -(1.-fdensity)*(1/np.sqrt(2*np.pi)/sig)*np.exp(-np.log(ws/mo)**2 /2/sig**2)*(1/sig**2 - np.log(ws/mo)/sig**2 -1)/ ws**3;
    ddxsi = ddxsi_st + fdensity*alpha*(alpha-1)*ws**(alpha-2) /w_norm   ;
    l2 = -1*(dxsi/xsi)**2 + ddxsi/np.absolute(xsi);
    l_tot = l1+l2;
    #those are the diagonal terms, now need to build off diagonal
    hess_m = np.zeros((n_grid**2,n_grid**2));
    np.fill_diagonal(hess_m,l_tot);
    '''
    for i in range(0,n_grid**2):
        for j in range(i+1,n_grid**2):
            ind1 = (int(i/n_grid),i%n_grid);
            ind2 = (int(j/n_grid),j%n_grid);
            hess_m[i,j] = -1*np.sum(psi(ind1)*psi(ind2))/sig_noise**2;

    hess_m = symmetrize(hess_m);
    '''
    print('hess fin');
    #print(l_tot);
    #print('new it');
    #print(np.average(hval[0][:][:]-hess_m));
    return -1*hess_m;
def grad_k(ws, fdensity, alpha, sig, psf_k):
    print('grad_k begin')
    mo = np.exp(-4.)
    ws = real_to_complex(ws)
    ws = ws.reshape((n_grid, n_grid))
    #wk = ws;
    ws = np.real(fft.ifft2(ws))

    l1 = -1 * fft.ifft2((fft.ifft2(fft.fft2(ws) * fft.fft2(psf)) - data) /
                        sig_noise**2) * psf_k
    '''    
    l1_og = fft.ifft2((Psi(ws) - data)/sig_noise**2)*psf_k;
    l1_other = fft.ifft2((fft.fft2(fft.ifft2(ws)*fft.ifft2(psf)) - data)/sig_noise**2)*psf_k;
    print('diff is:');
    print(l1 - l1_other);
    print(l1-l1_og);
    '''
    #print(l1-l1_other)
    l1 = l1.flatten()

    xsi = (1. - fdensity) * gaussian(np.log(ws), loc=np.log(
        mo), scale=sig) / ws + fdensity * (ws**alpha / w_norm)
    l2 = -1 * gaussian(np.log(ws), loc=np.log(mo), scale=sig) * (
        1. - fdensity) / ws**2 - (1. - fdensity) * np.log(ws / mo) * np.exp(
            -np.log(ws / mo)**2 / 2 / sig**2) / np.sqrt(
                2 * np.pi) / ws**2 / sig**3 + fdensity * alpha * ws**(
                    alpha - 1) / w_norm
    l2 = l2 / np.absolute(xsi)
    l2 = fft.ifft2(l2).flatten()
    l_tot = l1 + l2
    #return l1,l2;
    l_tot = complex_to_real(l_tot)
    #print('grad is');
    #print(l_tot);
    return l_tot
Exemple #5
0
def delta_for_minimal_r_0(deltaStart, NAtoms, kd, g1d, gprime, gm, Deltac,
                          Omega):
    res = minimize(lambda x: np.absolute(
        r_0(x[0], NAtoms, kd, g1d, gprime, gm, Deltac, Omega)),
                   deltaStart,
                   method='Nelder-Mead')

    return res.x[0]
Exemple #6
0
    def _evaluate(self, x, out, *args, **kwargs):
        f = -anp.sqrt(self.n_var)**self.n_var * anp.prod(x, axis=1)

        # Constraints
        g = anp.absolute(anp.sum(x**2, axis=1) - 1) - 1e-4

        out["F"] = f
        out["G"] = g
 def cost(coef):
     X_coef = -1 * np.matmul(X_, coef)
     z = 1 / (1 + np.exp(X_coef))
     epsilon = 1e-5
     class1 = np.multiply(y_, np.log(z + epsilon))
     class2 = np.multiply(1 - y_, np.log(1 - z + epsilon))
     ans = -(1 / y_.size) * (np.sum(class1 + class2))
     if self.penalty == "l1":
         return ans + self.val * np.sum(np.absolute(coef))
     else:
         return ans + self.val * np.sum(np.square(coef))
 def __init__(self, data, psf, psf_k, no_source):
     self.data = np.absolute(data)
     self.psf = psf
     self.psf_k = psf_k
     self.n_grid = len(data)
     self.f_true = no_source / self.n_grid**2
     self.wlim = (1, 2)
     #min and max signal (given by challenge, will determine ourselves in the future)
     self.sig_noise = self.getNoise()
     self.norm_mean, self.norm_sig = self.getNorm()
     self.xi = self.getXi()
     self.res = self.run()
Exemple #9
0
def find_KL(dist1, dist2):
    L = len(dist1)
    if L != len(dist2):
        raise ValueError("Distributions must be same length")
    kl = 0.0
    for i in xrange(L):
        #temp = dist1[i]*(np.log(dist1[i]) - np.log(dist2[i]))
        if (dist1[i] != 0 and dist2[i] != 0):
            #print kl
            kl += dist1[i]*(np.log(dist1[i]) - np.log(dist2[i]))

    #print kl
    return np.absolute(kl)
Exemple #10
0
def cherry_warp(wts, inputs, labels, parameters):

    # hidden layer input
    biased_inputs = np.subtract(inputs, wts[0])**2

    # hidden layer activations
    hidden_activations = np.exp(
        -np.absolute(np.einsum('hif,fh -> ih', biased_inputs, wts[2])))

    # output layer activations
    output_activations = softmax(hidden_activations, wts, labels, parameters)

    return output_activations, hidden_activations
Exemple #11
0
    def _evaluate(self, x, out, *args, **kwargs):
        f = 3 * x[:, 0] + (10**-6) * x[:, 0]**3 + 2 * x[:, 1] + (
            2 * 10**(-6)) / 3 * x[:, 1]**3

        # Constraints
        g1 = x[:, 2] - x[:, 3] - 0.55
        g2 = x[:, 3] - x[:, 2] - 0.55

        g3 = anp.absolute(
            1000 * (anp.sin(-x[:, 2] - 0.25) + anp.sin(-x[:, 3] - 0.25)) +
            894.8 - x[:, 0]) - 10**(-4)
        g4 = anp.absolute(
            1000 *
            (anp.sin(x[:, 2] - 0.25) + anp.sin(x[:, 2] - x[:, 3] - 0.25)) +
            894.8 - x[:, 1]) - 10**(-4)
        g5 = anp.absolute(
            1000 *
            (anp.sin(x[:, 3] - 0.25) + anp.sin(x[:, 3] - x[:, 2] - 0.25)) +
            1294.8) - 10**(-4)

        out["F"] = f
        out["G"] = anp.column_stack([g1, g2, g3, g4, g5])
Exemple #12
0
def spectra_calculation(u):
    # Transform to Fourier space
    array_hat = np.real(np.fft.fft(u))

    # Normalizing data
    array_new = np.copy(array_hat / float(nx))
    # Energy Spectrum
    espec = 0.5 * np.absolute(array_new)**2
    # Angle Averaging
    eplot = np.zeros(nx // 2, dtype='double')
    for i in range(1, nx // 2):
        eplot[i] = 0.5 * (espec[i] + espec[nx - i])

    return eplot
def grad_lnpost(ws,fdensity,alpha,sig):
    #calculate gradient of the ln posterior
    print('grad');
    mo = np.exp(-4.);
    ws = ws.reshape((n_grid,n_grid));
    #calc l1
    bsis = (Psi(ws)-data)/sig_noise**2;
    lsis = ws*0;
    for (index,w) in np.ndenumerate(ws):
        lsis[index] = np.sum(psi(index)*bsis);
    l1 = lsis#*np.sum((Psi(ws)-data)/2/sig_noise**2);
    xsi = (1.-fdensity ) * gaussian(np.log(ws),loc=np.log(mo), scale=sig)/ws + fdensity*(ws**alpha /w_norm)
    l2 = -1*gaussian(np.log(ws),loc=np.log(mo), scale=sig)*(1.-fdensity)/ws**2 - (1.-fdensity)*np.log(ws/mo)*np.exp(-np.log(ws/mo)**2 /2/sig**2)/np.sqrt(2*np.pi)/ws**2 /sig**3 + fdensity*alpha*ws**(alpha-1) /w_norm;
    l2 = l2/np.absolute(xsi);
    l_tot = l1-l2;
    return l_tot.flatten();
 def grad_lnpost(self,ws,fdensity,alpha,sig):
     #calculate gradient of the ln posterior
     print('grad');
     w_norm = (self.wlim[1]**(alpha+1) - self.wlim[0]**(alpha+1))/(alpha+1); #normalization from integrating
     mo = np.exp(self.norm_mean);
     ws = ws.reshape((self.n_grid,self.n_grid));
     #calc l1
     bsis = (self.Psi(ws)-self.data)/self.sig_noise**2;
     lsis = ws*0;
     for (index,w) in np.ndenumerate(ws):
         lsis[index] = np.sum(self.psi(index)*bsis);
     l1 = lsis#*np.sum((Psi(ws)-data)/2/sig_noise**2);
     xsi = (1.-fdensity ) * self.gaussian(np.log(ws),loc=np.log(mo), scale=sig)/ws + fdensity*(ws**alpha /w_norm)
     l2 = -1*self.gaussian(np.log(ws),loc=np.log(mo), scale=sig)*(1.-fdensity)/ws**2 - (1.-fdensity)*np.log(ws/mo)*np.exp(-np.log(ws/mo)**2 /2/sig**2)/np.sqrt(2*np.pi)/ws**2 /sig**3 + fdensity*alpha*ws**(alpha-1) /w_norm;
     l2 = l2/np.absolute(xsi);
     l_tot = l1-l2;
     return l_tot.flatten();   
Exemple #15
0
    def _evaluate(self, x, out, *args, **kwargs):
        l = []
        for j in range(self.n_var):
            l.append((j + 1) * x[:, j]**2)
        sum_jx = anp.sum(anp.column_stack(l), axis=1)

        a = anp.sum(anp.cos(x)**4, axis=1)
        b = 2 * anp.prod(anp.cos(x)**2, axis=1)
        c = (anp.sqrt(sum_jx)).flatten()
        c = c + (c == 0) * 1e-20

        f = -anp.absolute((a - b) / c)

        # Constraints
        g1 = -anp.prod(x, 1) + 0.75
        g2 = anp.sum(x, axis=1) - 7.5 * self.n_var

        out["F"] = f
        out["G"] = anp.column_stack([g1, g2])
Exemple #16
0
def sgrad_lnpost(w_all, index, fdensity, alpha, sig):
    #calculate gradient of the ln posterior
    mo = np.exp(-4.)
    ws = w_all[index]
    #calc l1
    bsis = -(Psi(w_all) - data) / sig_noise**2
    lsis = np.sum(bsis * psi(index))
    l1 = lsis  #*np.sum((Psi(ws)-data)/2/sig_noise**2);
    xsi = (1. - fdensity) * gaussian(np.log(ws), loc=np.log(
        mo), scale=sig) / ws + fdensity * (ws**alpha / w_norm)
    l2 = -1 * gaussian(np.log(ws), loc=np.log(mo), scale=sig) * (
        1. - fdensity) / ws**2 - (1. - fdensity) * np.log(ws / mo) * np.exp(
            -np.log(ws / mo)**2 / 2 / sig**2) / np.sqrt(
                2 * np.pi) / ws**2 / sig**3 + fdensity * alpha * ws**(
                    alpha - 1) / w_norm
    l2 = l2 / np.absolute(xsi)
    #l2 = fdensity*alpha*ws**(alpha-1) /w_norm /(fdensity*(ws**alpha /w_norm))
    l_tot = l1 + l2
    return -1 * l_tot
Exemple #17
0
def compute_control_inputs(fc, xk, yk, thetak):
    n = xk.shape[0] - 1
    # import ipdb; ipdb.set_trace()

    dx = xk[1:] - xk[0:-1]
    dy = yk[1:] - yk[0:-1]
    dk = np.array([dx, dy, np.zeros(n)]).T
    dtheta = (thetak[1:] - thetak[0:-1]) / (1.0 / fc)

    # vel
    qk = np.array([np.cos(thetak[:-1]), np.sin(thetak[:-1]), np.zeros(n)]).T
    proj_q_d = np.sum(qk * dk, axis=1)
    sign_v = sign(proj_q_d)
    vk = np.linalg.norm(dk, axis=1) * sign_v / (1.0 / fc)

    # steering angle
    phi_k = np.zeros(vk.shape)
    mask = np.absolute(vk) > 1e-5
    phi_k[mask] = np.arctan(wheelbase * dtheta[mask] / vk[mask])
    u = np.array((vk, phi_k))
    return u
Exemple #18
0
def grad_lnpost(ws, fdensity, alpha, sig):
    #calculate gradient of the ln posterior
    mo = np.exp(-4.)
    ws = ws.reshape((n_grid, n_grid))
    #calc l1
    bsis = -(Psi(ws) - data) / sig_noise**2
    lsis = np.array(
        [np.sum(bsis * psi(index)) for (index, w) in np.ndenumerate(ws)])
    lsis = lsis.reshape((n_grid, n_grid))
    l1 = lsis  #*np.sum((Psi(ws)-data)/2/sig_noise**2);
    xsi = (1. - fdensity) * gaussian(np.log(ws), loc=np.log(
        mo), scale=sig) / ws + fdensity * (ws**alpha / w_norm)
    l2 = -1 * gaussian(np.log(ws), loc=np.log(mo), scale=sig) * (
        1. - fdensity) / ws**2 - (1. - fdensity) * np.log(ws / mo) * np.exp(
            -np.log(ws / mo)**2 / 2 / sig**2) / np.sqrt(
                2 * np.pi) / ws**2 / sig**3 + fdensity * alpha * ws**(
                    alpha - 1) / w_norm
    l2 = l2 / np.absolute(xsi)
    #l2 = fdensity*alpha*ws**(alpha-1) /w_norm /(fdensity*(ws**alpha /w_norm))
    l_tot = l1 + l2
    return -1 * l_tot.flatten()
Exemple #19
0
def eig_solver(L, mode='smallest', var_percentage=0.9):
    #L = ensure_matrix_is_numpy(L)
    Σ, eigenVectors = np.linalg.eigh(L)
    absΣ = np.absolute(Σ)

    #less0 = np.sum(Σ < 0)									# DONT USE, It seems to get better results if we pass all the information through at each epoch
    rank_sorted = np.cumsum(absΣ) / np.sum(absΣ)
    rank = np.sum(rank_sorted < var_percentage) + 1
    #num_eig = np.min([less0, rank])
    num_eig = rank

    if mode == 'smallest':
        U = eigenVectors[:, 0:num_eig]
        U_λ = Σ[0:num_eig]
    elif mode == 'largest':
        n2 = len(Σ)
        n1 = n2 - num_eig
        U = eigenVectors[:, n1:n2]
        U_λ = Σ[n1:n2]
    else:
        raise ValueError('unrecognized mode : ' + str(mode) + ' found.')
    return [U, U_λ]
Exemple #20
0
 def _evaluate_wo_regularization(self, *params):
     return np.nansum(np.absolute(self.data - self.model(*params)))
Exemple #21
0
 def _evaluate_w_regularization(self, *params):
     return np.nansum(
         np.absolute(self.data - self.model(*params[:-1])) +
         params[-1] * self.regularization(*params[:-1]))
Exemple #22
0
#w_true = np.abs(np.random.rand(Ndata))+1;

#true grid needs to be set up with noise
w_true_grid = np.zeros((n_grid, n_grid))
for x, y, w in zip(x_true, y_true, w_true):
    w_true_grid[np.argmin(np.abs(theta_grid - x)),
                np.argmin(np.abs(theta_grid - y))] = w
data = Psi(w_true_grid) + sig_noise * np.random.randn(n_grid, n_grid)

########################################################################
#now begin the actual execution
########################################################################

#now we begin the optimization
#tt0 = np.zeros(n_grid**2) +3; #begin with high uniform M
tt0 = np.absolute(np.random.randn(n_grid**2)) + 2
#tt0[1] = 0.5
#tt0[5] = 0.2
#tt0[7] = 0.1
#begin with the simple method of just minimizing
f_curr = fdensity_true
a_curr = 2
sig_delta = 0.75
'''
#afunc = Agrad.grad(lambda tt: -1*lnpost(tt,f_curr,a_curr,sig_delta));
af_like = Agrad.grad(lambda tt: -1*lnlike(tt));
af_pri = Agrad.grad(lambda tt: -1*lnprior(tt,f_curr,a_curr,sig_delta));
aval_like = af_like(tt0);
aval_pri = af_pri(tt0);
aval = aval_like+aval_pri
tt0 = tt0.reshape((n_grid,n_grid));
# plot DerL
fig, ax = plt.subplots()
ax.set(xlabel='Θ(rad)',
       ylabel="dL(Θ)/dΘ (H/rad)",
       title='Ρυθμός Mεταβολής Αλληλεπαγωγής dL(Θ)/dΘ')
ax.grid()

ax.plot(theta, DerL(theta))
plt.show()

# plot E(f)^2
t = np.linspace(0, 5 / f, 1000)
timestep = ((5 / f) + 1) / t.size
E_f = np.fft.fft(E(t))
E_f = np.absolute(E_f)
freq = np.fft.fftfreq(t.size, d=timestep)
fig, ax = plt.subplots()
ax.set(xlabel='f(Hz)',
       ylabel='E(f)^2 ((V*s)^2)',
       title='Φασματική Πυκνότητα Ενέργειας E(f)^2')
ax.grid()

ax.plot(freq, np.power(E_f, 2))
plt.show()

# fourier series coefficients with monte carlo integration, using 10000 samples
monte_time = np.linspace(0, 1 / f, 10000)
E_t = E(monte_time)

Exemple #24
0
def l1_norm(params):
    if isinstance(params, dict):
        return np.sum(np.absolute(flatten(params)[0]))
    return np.sum(np.absolute(flatten(params.value)[0]))
Exemple #25
0
 def trial_function(self, x_in, weights):
     """Compute custom trial function."""
     return self.bc(x_in) * \
         (1.0 - erf(x_in[0] / (np.sqrt(x_in[1]*4.0*self.d_v) + 1.0E-9) *
                    np.absolute(self.mlp.network_output(x_in, weights))))
#create coordinate grid
theta_grid = np.linspace(0., 1., n_grid)  # gridding of theta (same as pixels)

#create true values - assign to grid
x_true = np.abs(np.random.rand(Ndata))  # location of sources
y_true = np.abs(np.random.rand(Ndata))

#w_true = np.abs(np.random.rand(Ndata))+1;

#true grid needs to be set up with noise
w_true_grid = np.zeros((n_grid, n_grid))
for x, y, w in zip(x_true, y_true, w_true):
    w_true_grid[np.argmin(np.abs(theta_grid - x)),
                np.argmin(np.abs(theta_grid - y))] = w

data = np.real(fft.ifft2(fft.fft2(w_true_grid) * fft.fft2(psf))) + np.absolute(
    sig_noise * np.random.randn(n_grid, n_grid))
data3 = signal.convolve(w_true_grid, psf)
diff = int((len(data3[:, 0]) - n_grid) / 2)
data3 = data3[diff:n_grid + diff, diff:n_grid + diff]
#data = data3;
'''
fig, ax = plt.subplots(1,2)
ax[0].imshow(w_true_grid);
ax[0].set_title('True Positions')
#ax[1].imshow(data3[:-4,:-4]);
ax[1].imshow(data4);
ax[1].set_title('Observed Data')
plt.show();
'''
#create fft of psf
psf_k = fft.fft2(psf)
Exemple #27
0
def lasso_cost(th, X, y, lmbd):
    y_pred = anp.dot(X, th[:, None])
    error_square = anp.square(y[:, None] - y_pred).sum()
    lasso_error = (lmbd) * ((anp.absolute(th)).sum())
    r = error_square + lasso_error
    return r
def hess_k(ws, fdensity, alpha, sig, psf_k):
    print('hess_k begin')
    mo = np.exp(-4.)
    ws = real_to_complex(ws)
    ws = ws.reshape((n_grid, n_grid))
    ws = np.real(fft.ifft2(ws))
    #calc l1 we only get diagonals here
    l1 = -1 * (psf_k**2 / sig_noise**2 / n_grid**2).flatten()
    #calc l2, the hessian of the prior is messy
    xsi = (1. - fdensity) * gaussian(np.log(ws), loc=np.log(
        mo), scale=sig) / ws + fdensity * (ws**alpha / w_norm)
    dxsi = -1 * gaussian(np.log(ws), loc=np.log(mo), scale=sig) * (
        1. - fdensity) / ws**2 - (1. - fdensity) * np.log(ws / mo) * np.exp(
            -np.log(ws / mo)**2 / 2 / sig**2) / np.sqrt(
                2 * np.pi) / ws**2 / sig**3 + fdensity * alpha * ws**(
                    alpha - 1) / w_norm
    dxsi_st = -1 * gaussian(np.log(ws), loc=np.log(mo), scale=sig) * (
        1. - fdensity) / ws**2 - (1. - fdensity) * np.log(ws / mo) * np.exp(
            -np.log(ws / mo)**2 / 2 / sig**2) / np.sqrt(
                2 * np.pi) / ws**2 / sig**3
    ddxsi_st = -1 * dxsi_st / ws - dxsi_st * np.log(ws / mo) / ws / sig**2 - (
        1. - fdensity) * (1 / np.sqrt(2 * np.pi) / sig) * np.exp(
            -np.log(ws / mo)**2 / 2 /
            sig**2) * (1 / sig**2 - np.log(ws / mo) / sig**2 - 1) / ws**3
    ddxsi = ddxsi_st + fdensity * alpha * (alpha - 1) * ws**(alpha -
                                                             2) / w_norm
    l2 = -1 * (dxsi / xsi)**2 + ddxsi / np.absolute(xsi)
    #this is the hessian of the prior wrt m_x, not m_k
    l2_k = fft.ifft2(l2).flatten() / n_grid**2
    #we assume that hessian of l2 is diagonal. Under assumption k = -k', then we only get the zeroth element along the diag
    #lets fill the entire matrix and see whats up;
    hess_m = np.zeros((n_grid**2, n_grid**2), dtype=complex)
    hess_l1 = np.zeros((n_grid**2, n_grid**2), dtype=complex)
    np.fill_diagonal(hess_l1, l1)
    off = []
    #print(l2_k[0]);
    for i in range(0, n_grid**2):
        for j in range(0, n_grid**2):
            hess_m[i, j] = l2_k[int(np.absolute(i - j))]
            #check the off diagonals to make sure they are small
            if i != j:
                off.append(l2_k[int(np.absolute(i - j))])
    hess_m = hess_l1 + hess_m
    '''
    print('Sigma Real is:');
    print(np.std(np.real(off)));
    print('Simga Imag is:');
    print(np.std(np.imag(off)));
    fig, ax = plt.subplots(1,2)
    ax[0].imshow(np.real(hess_m));
    ax[0].set_title('Real Hessian')
    #ax[1].imshow(data3[:-4,:-4]);
    ax[1].imshow(np.imag(hess_m));
    ax[1].set_title('Imaginary Hessian')
    plt.show();
    '''
    l_tot = np.diagonal(hess_m)

    l_minr = min(np.real(l_tot))
    l_mini = min(np.imag(l_tot))
    #print(l_tot-l1);
    if l_minr < 0:
        l_tot = l_tot - l_minr + 0.1
    if l_mini < 0:
        l_tot = l_tot - 1j * (l_mini + 0.1)
    '''
    print('diag is:');
    print(l2_k[0]);
    print('other is:');
    print(l1);
    '''
    '''
    hess_m = np.zeros((n_grid**2,n_grid**2));
    np.fill_diagonal(hess_m,l_tot);
    return hess_m;
    '''
    #return l1,l2_k[0];
    l_tot = complex_to_real(l_tot)
    #print('hess is');
    #print(l_tot);
    return l_tot