Ejemplo n.º 1
0
    def _init_params(self, X):
        init = self.init
        n_samples, n_features = X.shape
        n_components = self.n_components

        if (init == 'kmeans'):
            km = Kmeans(n_components)
            clusters, mean, cov = km.cluster(X)
            coef = sp.array([c.shape[0] / n_samples for c in clusters])
            comps = [multivariate_normal(mean[i], cov[i], allow_singular=True)
                     for i in range(n_components)]
        elif (init == 'rand'):
            coef = sp.absolute(sprand.randn(n_components))
            coef = coef / coef.sum()
            means = X[sprand.permutation(n_samples)[0: n_components]]
            clusters = [[] for i in range(n_components)]
            for x in X:
                idx = sp.argmin([spla.norm(x - mean) for mean in means])
                clusters[idx].append(x)

            comps = []
            for k in range(n_components):
                mean = means[k]
                cov = sp.cov(clusters[k], rowvar=0, ddof=0)
                comps.append(multivariate_normal(mean, cov, allow_singular=True))

        self.coef = coef
        self.comps = comps
Ejemplo n.º 2
0
def main():
    N = 10000
    D = 10

    mean, covar = moments_from_samples(random.rand(N), random.randn(D, N))

    print("residual norm^2:",
          linalg.norm(mean)**2,
          linalg.norm(covar - eye(D))**2)
Ejemplo n.º 3
0
def generate_lin_system(n, d, filepath=None):
    X = random.randn(n, d)
    A = 1. / (d * n) * X.T.dot(X)
    y = random.uniform(low=-1, high=1, size=d)
    b = A.dot(y)
    # -10 and 10 are an arbitrary choice here
    mask_A = random.uniform(low=-10, high=10, size=(d, d))
    mask_b = random.uniform(low=-10, high=10, size=d)
    if filepath:
        write_system(A, b, y, filepath)
    return (A, mask_A, b, mask_b, y)
Ejemplo n.º 4
0
def generate_lin_regression(n, d, sigma):
    """
    See cgd.pdf
    """
    X = random.randn(n, d)
    for i in xrange(d):
        X[:, i] /= numpy.max(numpy.abs(X[:, i]))
    beta = random.uniform(low=0, high=1, size=d)
    e = numpy.array(random.normal(0, sigma, n))
    y = X.dot(beta) + e.T
    return (X, y, beta, e)
Ejemplo n.º 5
0
def calloptionMC(spot,strike,vol,T,Rf,M,alpha):
    '''
    call option using Monte Carlo simulation
    returns option price and lower and upper bounds
    '''
    S = np.zeros(M)
    S = spot * np.exp( (Rf - 0.5*vol**2)*T + vol * math.sqrt(T) * random.randn(M))
    Carray = np.exp(-Rf * T) * np.maximum(S - strike, 0)

    c = np.average(Carray)
    c_std = np.std(Carray) / math.sqrt(M)
    bounds = c + np.array([-1,1]) * stats.norm.ppf(0.5 + alpha/2) * c_std
    return c, bounds
Ejemplo n.º 6
0
    def draw(self):
        """
        If an arm has been drawn less than 2 times, select that arm

        Otherwise return:
            argmax([ ... random.normal(mean=expected_return[i], sd=std[i]) ...])

        :return: The numerical index of the selected arm
        """
        mu = array(self.expected_payouts)
        sd = array([float('inf') if n < 2 else sqrt(s / (n - 1)) for s, n in zip(self.M2, self.draws)])

        return argmax(random.randn(self.n_arms) * sd + mu)
Ejemplo n.º 7
0
Archivo: pca.py Proyecto: Yevgnen/prml
    def _em(self, X):
        # Constants
        n_samples, n_features = X.shape
        n_components = self.n_components
        max_iter = self.max_iter
        # tol = self.tol

        mu = X.mean(axis=0)
        X_centered = X - sp.atleast_2d(mu)

        # Initialize parameters
        latent_mean = 0
        sigma2 = 1
        weight = sprd.randn(n_features, n_components)

        # Main loop of EM algorithm
        for i in range(max_iter):
            # E step
            M = sp.dot(weight.T, weight) + sigma2 * sp.eye(n_components)
            inv_M = spla.inv(M)
            latent_mean = sp.dot(inv_M, sp.dot(weight.T, X_centered.T)).T

            # M step
            expectation_zzT = n_samples * sigma2 * inv_M + sp.dot(latent_mean.T, latent_mean)

            # Re-estimate W
            weight = sp.dot(sp.dot(X_centered.T, latent_mean), spla.inv(expectation_zzT))
            weight2 = sp.dot(weight.T, weight)

            # Re-estimate \sigma^2
            sigma2 = ((spla.norm(X_centered)**2 -
                       2 * sp.dot(latent_mean.ravel(), sp.dot(X_centered, weight).ravel()) +
                       sp.trace(sp.dot(expectation_zzT, weight2))) /
                      (n_samples * n_features))

        self.predict_mean = mu
        self.predict_cov = sp.dot(weight, weight.T) + sigma2 * sp.eye(n_features)
        self.latent_mean = latent_mean
        self.latent_cov = sigma2 * inv_M
        self.sigma2 = sigma2
        self.weight = weight
        self.inv_M = inv_M

        return self.latent_mean
def add_awgn(x, SNRdB=100):
    '''
    add additive withe gaussian noise with a given SNR (dB)
    INPUT
    x: ndarray (TODO check sulle istanze per righe o per colonne)

    OUTPUT
    xn: copy of x corrupted by awgn
    '''
    if SNRdB < np.inf:
        ps = linalg.norm(x, axis=-1)
        noise = random.randn(*x.shape)
        pn0 = linalg.norm(noise, axis=-1)
        pn = ps / pn0 / (10**(SNRdB / 20))
        if len(x.shape) > 1:
            pn = np.tile(pn, (x.shape[1], 1)).T
        xn = x + noise * pn
    else:
        xn = x
    return xn
def Inference_2D(Data, Data_pred, confidence=0.95):
    ## Objective: To fit the hierarchical approach on univariate functions
    ## Input:
    ## 1. Data: The dataset to be modeled
    ## 2. Data_pred: Prediction locations
    ## 3. confidence: % confidence level for the model
    ## Output:
    ## 1. results: The inference results for the prediction
    ## 2. Sp: The sparse representation at each scale
    ## 3. qoptx: Optimal penalty at each considered scale (either 1 or 2) in x direction
    ## 4. qopty: Optimal penalty at each considered scale (either 1 or 2) in y direction
    ## 5. fun_vec: The cost of fitting at each scale

    ## Initializations
    points = Data.shape[0]
    Gaussian = np.empty([points, points])
    ## Computing T
    dist = squareform(pdist(Data[:, 0:2], 'euclidean'))
    max_d = np.amax(dist)
    k1 = 2 * (max_d / 2)**2
    T = k1
    l_s = 0
    s = 0
    n = Data.shape[0]
    tol = 1.0e9
    fun_vec = []
    results = {}
    qoptx = []
    qopty = []
    Sp = {}
    while l_s <= n:
        epsilon = T / (2**s)
        Gaussian = np.exp(-((dist**2) / epsilon))

        # Finding the rank of the Gaussian Kernel
        l_s = LA.matrix_rank(Gaussian)
        k = l_s + 8

        #print('epsilon,T,s for this iteration is '+str(epsilon)+' '+str(T)+' '+str(s))

        # Calculating the W matrix
        points = Data.shape[0]
        A = random.randn(k, points)
        W = A.dot(Gaussian)
        W = np.matrix(W)

        # Applying Pivoted QR on W

        Q, R, P = linalg.qr(W, mode='full', pivoting=True)
        Perm = np.zeros([len(P), len(P)], dtype=int)
        for w in range(0, len(P)):
            Perm[P[w], w] = 1

        sparse = np.zeros([l_s, 3])
        for i in range(l_s):
            sparse[i, 0] = Data[P[i], 0]
            sparse[i, 1] = Data[P[i], 1]
            sparse[i, 2] = Data[P[i], 3]

        # Selecting Relevant columns of Kernel into B Matrix
        Mat = Gaussian.dot(Perm)
        B = Mat[:, 0:l_s]

        ## Getting the Permutation matrix for the coordinate vector
        ttx = sparse[:, 0]
        ttx_sort = np.argsort(ttx)
        Permmx = np.zeros([len(ttx_sort), len(ttx_sort)], dtype=int)
        for w in range(0, len(ttx_sort)):
            Permmx[w, ttx_sort[w]] = 1

        tty = sparse[:, 1]
        tty_sort = np.argsort(tty)
        Permmy = np.zeros([len(tty_sort), len(tty_sort)], dtype=int)
        for w in range(0, len(tty_sort)):
            Permmy[w, tty_sort[w]] = 1

        ### COmputing the Penalty and Fitting the Regularization network
        q1x, q1y = 1, 1
        Pex_temp = Penalty_p(q1x, l_s)
        Pe_x = Permmx.T.dot(Pex_temp).dot(Permmx)
        Pey_temp = Penalty_p(q1y, l_s)
        Pe_y = Permmy.T.dot(Pey_temp).dot(Permmy)
        args = (Data, B, Pe_x, Pe_y)
        bnds = [(1.0e-12, None), (1.0e-12, None)]
        par = [0.1, 0.1]
        l = minimize(Cost_func_2D, par, args, bounds=bnds, method='SLSQP')
        lam1_x_1 = l.x[0]
        lam1_y_1 = l.x[1]
        fun1_1 = l.fun

        q1x, q1y = 1, 2
        Pex_temp = Penalty_p(q1x, l_s)
        Pe_x = Permmx.T.dot(Pex_temp).dot(Permmx)
        Pey_temp = Penalty_p(q1y, l_s)
        Pe_y = Permmy.T.dot(Pey_temp).dot(Permmy)
        args = (Data, B, Pe_x, Pe_y)
        bnds = [(1.0e-12, None), (1.0e-12, None)]
        par = [0.1, 0.1]
        l = minimize(Cost_func_2D, par, args, bounds=bnds, method='SLSQP')
        lam1_x_2 = l.x[0]
        lam1_y_2 = l.x[1]
        fun1_2 = l.fun

        if fun1_1 <= fun1_2:
            fun1 = fun1_1
            lam_x1 = lam1_x_1
            lam_y1 = lam1_y_1
            optq_x1 = 1
            optq_y1 = 1
        else:
            fun1 = fun1_2
            lam_x1 = lam1_x_2
            lam_y1 = lam1_y_2
            optq_x1 = 1
            optq_y1 = 2

        q2x, q2y = 2, 1
        Pex_temp = Penalty_p(q2x, l_s)
        Pe_x = Permmx.T.dot(Pex_temp).dot(Permmx)
        Pey_temp = Penalty_p(q2y, l_s)
        Pe_y = Permmy.T.dot(Pey_temp).dot(Permmy)
        args = (Data, B, Pe_x, Pe_y)
        bnds = [(1.0e-12, None), (1.0e-12, None)]
        par = [0.1, 0.1]
        l = minimize(Cost_func_2D, par, args, bounds=bnds, method='SLSQP')
        lam2_x_1 = l.x[0]
        lam2_y_1 = l.x[1]
        fun2_1 = l.fun

        q2x, q2y = 2, 2
        Pex_temp = Penalty_p(q2x, l_s)
        Pe_x = Permmx.T.dot(Pex_temp).dot(Permmx)
        Pey_temp = Penalty_p(q2y, l_s)
        Pe_y = Permmy.T.dot(Pey_temp).dot(Permmy)
        args = (Data, B, Pe_x, Pe_y)
        bnds = [(1.0e-12, None), (1.0e-12, None)]
        par = [0.1, 0.1]
        l = minimize(Cost_func_2D, par, args, bounds=bnds, method='SLSQP')
        lam2_x_2 = l.x[0]
        lam2_y_2 = l.x[1]
        fun2_2 = l.fun

        if fun2_1 <= fun2_2:
            fun2 = fun2_1
            lam_x2 = lam2_x_1
            lam_y2 = lam2_y_1
            optq_x2 = 2
            optq_y2 = 1
        else:
            fun2 = fun2_2
            lam_x2 = lam2_x_2
            lam_y2 = lam2_y_2
            optq_x2 = 2
            optq_y2 = 2

        if fun1 <= fun2:
            fun_vec.append(fun1)
            lam_x = lam_x1
            lam_y = lam_y1
            optq_x = optq_x1
            optq_y = optq_y1
        else:
            fun_vec.append(fun2)
            lam_x = lam_x2
            lam_y = lam_y2
            optq_x = optq_x2
            optq_y = optq_y2

        #8. Returning the mean Prediction
        Bpred = np.zeros([Data_pred.shape[0], l_s])
        for i in range(Data_pred.shape[0]):
            for j in range(l_s):
                temp = distance.euclidean(
                    Data_pred[i, :], np.array([sparse[j, 0], sparse[j, 1]]))
                Bpred[i, j] = np.exp(-((temp**2) / epsilon))

        Pex_temp = Penalty_p(optq_x, l_s)
        Pe_x = Permmx.T.dot(Pex_temp).dot(Permmx)
        Pey_temp = Penalty_p(optq_y, l_s)
        Pe_y = Permmy.T.dot(Pey_temp).dot(Permmy)
        inver = np.linalg.inv(B.T.dot(B) + n * lam_x * Pe_x + n * lam_y * Pe_y)
        theta = inver.dot(B.T.dot(Data[:, 3].reshape(-1, 1)))
        pred = Bpred.dot(theta)

        #9. Standard Error bounds
        nr = (Data[:, 3].reshape(-1, 1) - B.dot(theta)).reshape(-1, 1)
        term = B.dot(inver.dot(B.T))
        df_res = n - 2 * np.trace(term) + np.trace(term.dot(term.T))
        sigmasq = (nr.T.dot(nr)) / (df_res)
        sigmasq = sigmasq[0][0]
        std = np.sqrt(np.diag(sigmasq * Bpred.dot(inver).dot(Bpred.T)))
        stdev_t = sp.stats.t._ppf((1 + confidence) / 2., df_res) * std
        results[s] = [pred, stdev_t]
        qoptx.append(optq_x)
        qopty.append(optq_y)
        Sp[s] = sparse
        print(s)
        if l_s == n:
            break
        s = s + 1
    return [results, Sp, qoptx, qopty, fun_vec]
def Inference_1D(Data_temp, Data_pred, confidence=0.95):
    ## Objective: To fit the hierarchical approach on univariate functions
    ## Input:
    ## 1. Data_temp: The dataset to be modeled
    ## 2. Data_pred: Prediction locations
    ## 3. confidence: % confidence level for the model
    ## Output:
    ## 1. results: The inference results for the prediction
    ## 2. Sparse: The sparse representation at each scale
    ## 3. qopt: Optimal penalty at each considered scale (either 1 or 2)
    ## 4. fun_vec: The cost of fitting at each scale

    ## Formatting the Data matrix
    if Data_temp.shape[1] == 3:
        Data = zeros([Data_temp.shape[0], 4])
        Data[:, 0] = Data_temp[:, 0]
        Data[:, 2] = Data_temp[:, 1]
        Data[:, 3] = Data_temp[:, 2]

    if Data_temp.shape[1] == 4:
        Data = Data_temp

    ## Initializations
    points = Data.shape[0]
    Gaussian = np.empty([points, points])
    ## Computing T
    dist = squareform(pdist(Data[:, 0:2], 'euclidean'))
    max_d = np.amax(dist)
    k1 = 2 * (max_d / 2)**2
    T = k1
    l_s = 0
    s = 0
    n = Data.shape[0]
    tol = 1.0e9
    fun_vec = []

    results = {}
    qopt = []
    Sp = {}
    while l_s <= n:
        epsilon = T / (2**s)
        Gaussian = np.exp(-((dist**2) / epsilon))

        # Finding the rank of the Gaussian Kernel
        l_s = LA.matrix_rank(Gaussian)
        k = l_s + 8

        # Getting Permutation ordering in terms of importance
        points = Data.shape[0]
        A = random.randn(k, points)
        W = A.dot(Gaussian)
        W = np.matrix(W)
        Q, R, P = linalg.qr(W, mode='full', pivoting=True)

        # Getting Perm: the permutation matrix
        Perm = np.zeros([len(P), len(P)], dtype=int)
        for w in range(0, len(P)):
            Perm[P[w], w] = 1

        # Getting sparse representation ordered by importance (Most important point at index 0)
        sparse = np.zeros([l_s, 3])
        for i in range(l_s):
            sparse[i, 0] = Data[P[i], 0]
            sparse[i, 1] = Data[P[i], 1]
            sparse[i, 2] = Data[P[i], 3]

        # Selecting Relevant columns of Kernel into B Matrix
        Mat = Gaussian.dot(Perm)
        B = Mat[:, 0:l_s]

        # Getting the Permutation operator for the coordinate vector which
        # changes ordering of theta from decreasing order of importance to
        # increasing sequentially (Relation <=)
        tt = sparse[:, 0]
        tt_sort = np.argsort(tt)
        Permm = np.zeros([len(tt_sort), len(tt_sort)], dtype=int)
        for w in range(0, len(tt_sort)):
            Permm[w, tt_sort[w]] = 1

        # Computing the Penalty and Fitting the Regularization network
        q1 = 1
        Pe_temp = Penalty_p(q1, l_s)
        Pe = Permm.T.dot(Pe_temp).dot(Permm)
        args = (Data, B, Pe)
        bnds = [(1.0e-12, None)]
        par = [0.01]
        l = minimize(Cost_func_1D, par, args, bounds=bnds, method='SLSQP')
        lam1 = l.x[0]
        fun1 = l.fun

        q2 = 2
        Pe_temp = Penalty_p(q2, l_s)
        Pe = Permm.T.dot(Pe_temp).dot(Permm)
        args = (Data, B, Pe)
        bnds = [(1.0e-12, None)]
        par = [0.01]
        l = minimize(Cost_func_1D, par, args, bounds=bnds, method='SLSQP')
        lam2 = l.x[0]
        fun2 = l.fun

        if fun1 <= fun2:
            fun_vec.append(fun1)
            lam = lam1
            optq = q1
        else:
            fun_vec.append(fun2)
            lam = lam2
            optq = q2

        #8. Returning the mean Prediction
        Bpred = np.zeros([Data_pred.shape[0], l_s])
        for i in range(Data_pred.shape[0]):
            for j in range(l_s):
                temp = distance.euclidean(
                    Data_pred[i, :], np.array([sparse[j, 0], sparse[j, 1]]))
                Bpred[i, j] = np.exp(-((temp**2) / epsilon))

        Pe_temp = Penalty_p(optq, l_s)
        Pe = Permm.T.dot(Pe_temp).dot(Permm)
        inver = np.linalg.inv(B.T.dot(B) + n * lam * Pe)
        theta = inver.dot(B.T.dot(Data[:, 3].reshape(-1, 1)))
        pred = Bpred.dot(theta)

        #9. Error bounds
        nr = (Data[:, 3].reshape(-1, 1) - B.dot(theta)).reshape(-1, 1)
        term = B.dot(inver.dot(B.T))
        df_res = n - 2 * np.trace(term) + np.trace(term.dot(term.T))
        sigmasq = (nr.T.dot(nr)) / (df_res)
        sigmasq = sigmasq[0][0]
        std = np.sqrt(np.diag(sigmasq * Bpred.dot(inver).dot(Bpred.T)))
        stdev_t = sp.stats.t._ppf((1 + confidence) / 2., df_res) * std
        results[s] = [pred, stdev_t]
        qopt.append(optq)
        Sp[s] = sparse
        print(s)
        if l_s == n:
            break
        s = s + 1
    return [results, Sp, qopt, fun_vec]
Ejemplo n.º 11
0
k = db['k_1d_from_2d']['center']

mask = np.isfinite(P_sim)
P_sim = P_sim[mask]
C_sim = C_sim[mask][:,mask]
k = k[mask]
n = len(k)

# Generate model for Covariance.
C_mod, f = get_C_model(C_sim, 10)

# Make a fake power spectrum.
P_fake = 1.3 * P_sim + 3.1 * np.mean(P_sim) * (k / np.mean(k))**1.4
e, v = linalg.eigh(C_sim)
noise = np.sqrt(e) * random.randn(n)
noise = np.dot(v, noise)
P_fake += noise

# Choose PS and covariance to process.
P = P_fake
C = C_sim
C_inv = linalg.inv(C)

# Process and fit.
fg_residuals = P - P_sim

def PL_model(pars):
    A = pars[0]
    beta = pars[1]
    return A * (k/k[0])**beta
Ejemplo n.º 12
0
nx = ny = 20
nz = 6

model.generate_uniform_mesh(nx, ny, nz, xmin=0, xmax=L, ymin=0, ymax=L,
                            generate_pbcs=True)
model.set_parameters(src.physical_constants.IceParameters())
model.initialize_variables()

F = src.solvers.SteadySolver(model,config)
F.solve()

model.eps_reg = 1e-5
config['adjoint']['control_variable'] = [model.beta2]
config['adjoint']['bounds'] = [(0.0,5000.0)]
dolfin.File('results/beta2_obs.xml') << model.beta2

A = src.solvers.AdjointSolver(model,config)
u_o = model.u.vector().get_local()
v_o = model.v.vector().get_local()
U_e = 10.0
from scipy import random

for i in range(50):
    config['output_path'] = 'results/run_'+str(i)+'/'
    model.beta2.vector()[:] = 1000.
    u_error = U_e*random.randn(len(u_o))
    v_error = U_e*random.randn(len(v_o))
    model.u_o.vector().set_local(u_o+u_error)
    model.v_o.vector().set_local(v_o+v_error)
    A.solve()
Ejemplo n.º 13
0
		hankel1_ratio_2d(m.astype(float64), x, br)
	return br

def hankel_ratio_scipy(m, x):
	br = x*hankel1(m-1,x)/hankel1(m,x) - m
	return br

def hankel_ratio_scipy_p(m, x):
	br = x*hankel1p(m,x)/hankel1(m,x)
	return br

t = timer.timer()

Nt = 100
mr = random.random_integers(-500,500,(100,2))
xr = random.randn(Nt)*300 + random.randn(Nt)*30j

#"Exact" defined by scipy
exact_y = [hankel_ratio_amos(mr, x) for x in xr]

t.start("asym")
y = zeros(mr.shape, complex_)
for ii in range(Nt):
	z=xr[ii]
	m=mr
	d = (e*z/2/(m+1))**2*((m-1)/(m+1))**(m-0.5)
	b1 = m*(1-2*d)
t.stop("asym")

nans_found = error = 0
t.start("scipy")
Ejemplo n.º 14
0
def f(x):
    return exp(-x[0]**2 - x[1]**2) * sin(x[1]**2) + cos(x[0])


def neg_f(x):
    return -f(x)


minXY = [-1.5, -2]  # punkt startowy
local_X_Y = minXY
local_fmin = fmin(neg_f, local_X_Y, disp=False)
minVAL = -1000

for i in range(500):
    lel = random.randn(2)
    xd = fmin(neg_f, lel, disp=False)
    if minVAL < f(xd):
        minXY = xd
        minVAL = f(minXY)

delta = 4.5
x_knots = linspace(minXY[0] - delta, minXY[0] + delta, 41)
y_knots = linspace(minXY[1] - delta, minXY[1] + delta, 41)
X, Y = meshgrid(x_knots, y_knots)
Z = zeros(X.shape)
for i in range(Z.shape[0]):
    for j in range(Z.shape[1]):
        Z[i][j] = f([X[i, j], Y[i, j]])

print(50 * "#")
Ejemplo n.º 15
0
mom = MomentumHybrid(model)
mom.solve(annotate=False)

u     = Function(model.Q)
v     = Function(model.Q)
assign(u, model.u.sub(0))
assign(v, model.u.sub(1))
u_o   = u.vector().array()
v_o   = v.vector().array()
n     = len(u_o)
#U_e   = model.get_norm(as_vector([model.u, model.v]), 'linf') / 500
U_e   = 0.18
print_min_max(U_e, 'U_e')

u_error = U_e * random.randn(n)
v_error = U_e * random.randn(n)
u_ob    = u_o + u_error
v_ob    = v_o + v_error

model.init_U_ob(u_ob, v_ob)

model.save_xdmf(model.u,   'U_true')
model.save_xdmf(model.U_ob, 'U_ob')
model.save_xdmf(model.beta, 'beta_true')

model.init_beta(30.0**2)
#model.init_beta_SIA()
#model.save_xdmf(model.beta, 'beta_SIA')

model.set_out_dir(out_dir + 'inversion/')
Ejemplo n.º 16
0
    count_list = []
    for x in list1:
        if x not in unique_list:
            unique_list.append(x)
            count_list.append(1)
        else:
            count_list[unique_list.index(x)] += 1
    return unique_list, count_list


resultList = []

currentTime = time.perf_counter()

while time.perf_counter() - currentTime < 5:
    x0 = random.randn(2)
    x_min = fmin(f, x0)  #minimum
    resultList.append([round(x_min[0], 2), round(x_min[1], 2)])

val_list, count_list = unique(resultList)

best_probability = 0
for i in val_list:
    print("Point: {}  |  Value: {}   |  Probability: {}".format(
        i, f(i), count_list[val_list.index(i)] / sum(count_list) * 100))
    if best_probability <= count_list[val_list.index(i)] / sum(
            count_list) * 100:
        best_point = i
        best_probability = count_list[val_list.index(i)] / sum(
            count_list) * 100
Ejemplo n.º 17
0
def mcmc(a, b, phi, sst_dict, n, ld_blk, blk_size, n_iter, n_burnin, thin,
         chrom, out_dir, beta_std):
    print('... MCMC ...')

    # derived stats
    beta_mrg = sp.array(sst_dict['BETA'], ndmin=2).T
    maf = sp.array(sst_dict['MAF'], ndmin=2).T
    n_pst = (n_iter - n_burnin) / thin
    p = len(sst_dict['SNP'])
    n_blk = len(ld_blk)

    # initialization
    beta = sp.zeros((p, 1))
    psi = sp.ones((p, 1))
    sigma = 1.0
    if phi == None:
        phi = 1.0
        phi_updt = True
    else:
        phi_updt = False

    beta_est = sp.zeros((p, 1))
    psi_est = sp.zeros((p, 1))
    sigma_est = 0.0
    phi_est = 0.0

    # MCMC
    for itr in range(1, n_iter + 1):
        if itr % 100 == 0:
            print('--- iter-' + str(itr) + ' ---')

        mm = 0
        quad = 0.0
        for kk in range(n_blk):
            if blk_size[kk] == 0:
                continue
            else:
                idx_blk = range(mm, mm + blk_size[kk])
                dinvt = ld_blk[kk] + sp.diag(1.0 / psi[idx_blk].T[0])
                dinvt_chol = linalg.cholesky(dinvt)
                beta_tmp = linalg.solve_triangular(
                    dinvt_chol, beta_mrg[idx_blk], trans='T') + sp.sqrt(
                        sigma / n) * random.randn(len(idx_blk), 1)
                beta[idx_blk] = linalg.solve_triangular(dinvt_chol,
                                                        beta_tmp,
                                                        trans='N')
                quad += sp.dot(sp.dot(beta[idx_blk].T, dinvt), beta[idx_blk])
                mm += blk_size[kk]

        err = max(n / 2.0 * (1.0 - 2.0 * sum(beta * beta_mrg) + quad),
                  n / 2.0 * sum(beta**2 / psi))
        sigma = 1.0 / random.gamma((n + p) / 2.0, 1.0 / err)

        delta = random.gamma(a + b, 1.0 / (psi + phi))

        for jj in range(p):
            psi[jj] = gigrnd.gigrnd(a - 0.5, 2.0 * delta[jj],
                                    n * beta[jj]**2 / sigma)
        psi[psi > 1] = 1.0

        if phi_updt == True:
            w = random.gamma(1.0, 1.0 / (phi + 1.0))
            phi = random.gamma(p * b + 0.5, 1.0 / (sum(delta) + w))

        # posterior
        if (itr > n_burnin) and (itr % thin == 0):
            beta_est += beta / n_pst
            psi_est += psi / n_pst
            sigma_est += sigma / n_pst
            phi_est += phi / n_pst

    # convert standardized beta to per-allele beta
    if beta_std == False:
        beta_est /= sp.sqrt(2.0 * maf * (1.0 - maf))

    # write posterior effect sizes
    if phi_updt == True:
        eff_file = out_dir + '_pst_eff_a%d_b%.1f_phiauto_chr%d.txt' % (a, b,
                                                                       chrom)
    else:
        eff_file = out_dir + '_pst_eff_a%d_b%.1f_phi%1.0e_chr%d.txt' % (
            a, b, phi, chrom)

    with open(eff_file, 'w') as ff:
        for snp, bp, a1, a2, beta in zip(sst_dict['SNP'], sst_dict['BP'],
                                         sst_dict['A1'], sst_dict['A2'],
                                         beta_est):
            ff.write('%d\t%s\t%d\t%s\t%s\t%.6e\n' %
                     (chrom, snp, bp, a1, a2, beta))

    print('... Done ...')
Ejemplo n.º 18
0
from scipy import linspace, cos, exp, random, meshgrid, zeros
from scipy.optimize import fmin
from matplotlib.pyplot import plot, show, legend, figure, cm, contour, clabel
from mpl_toolkits.mplot3d import Axes3D


def f(x):
    return exp(-x[0]**2 - x[1]**2)


def neg_f(x):
    return -f(x)


x0 = random.randn(2) * 2 + 3

x_min = fmin(neg_f, x0)

delta = 3
x_knots = linspace(x_min[0] - delta, x_min[0] + delta, 41)
y_knots = linspace(x_min[1] - delta, x_min[1] + delta, 41)
X, Y = meshgrid(x_knots, y_knots)
Z = zeros(X.shape)
for i in range(Z.shape[0]):
    for j in range(Z.shape[1]):
        Z[i][j] = f([X[i, j], Y[i, j]])

ax = Axes3D(figure(figsize=(8, 5)))
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.4)
ax.plot([x0[0]], [x0[1]], [f(x0)],
Ejemplo n.º 19
0
mom.solve(annotate=False)

# add noise with a signal-to-noise ratio of 100 :
snr   = 100.0
u     = Function(model.Q)
v     = Function(model.Q)
assign(u, model.u.sub(0))
assign(v, model.u.sub(1))
u_o   = u.vector().array()
v_o   = v.vector().array()
n     = len(u_o)
sig   = model.get_norm(as_vector([u, v]), 'linf') / snr
print_min_max(snr, 'SNR')
print_min_max(sig, 'sigma')

u_error = sig * random.randn(n)
v_error = sig * random.randn(n)
u_ob    = u_o + u_error
v_ob    = v_o + v_error

# init the 'observed' velocity :
model.init_U_ob(u_ob, v_ob)
u_ob_ex = model.vert_extrude(model.u_ob, 'down')
v_ob_ex = model.vert_extrude(model.v_ob, 'down')
model.init_U_ob(u_ob_ex, v_ob_ex)

# init the traction to the SIA approximation :
model.init_beta_SIA()

# solving the incomplete adjoint is more efficient :
mom.linearize_viscosity()
Ejemplo n.º 20
0
        'animate': False,
        'bounds': None,
        'control_variable': None,
        'regularization_type': 'Tikhonov'
    }
}

F = SteadySolver(model, config)
F.solve()

model.eps_reg = 1e-5
config['adjoint']['control_variable'] = [model.beta2]
config['adjoint']['bounds'] = [(0.0, 5000.0)]
File('results/beta2_obs.xml') << model.beta2
File('results/beta2_obs.pvd') << model.beta2

A = AdjointSolver(model, config)
u_o = model.u.vector().get_local()
v_o = model.v.vector().get_local()
U_e = 10.0

for i in range(50):
    model.beta2.vector()[:] = 1000.
    u_error = U_e * random.randn(len(u_o))
    v_error = U_e * random.randn(len(v_o))
    model.u_o.vector().set_local(u_o + u_error)
    model.v_o.vector().set_local(v_o + v_error)
    model.u_o.vector().apply('insert')
    model.v_o.vector().apply('insert')
    A.solve()
Ejemplo n.º 21
0
    Count_list = []
    for x in list:
        if x not in Value_list:
            Value_list.append(x)
            Count_list.append(1)
        else:
            Count_list[Value_list.index(x)] += 1
    return Value_list, Count_list


Xmin_list = []

currenttime = time.perf_counter()

while time.perf_counter() - currenttime < 30:
    x_0 = random.randn(2) * 20 - 10
    x_min = fmin(neg_f, x_0)
    Xmin_list.append([round(x_min[0], 2), round(x_min[1], 2)])

Val_list, count_list = value_and_count(Xmin_list)

probab = 0
for i in Val_list:
    print("point:", i, "value:", round(f(i), 2), "probability:",
          round(count_list[Val_list.index(i)] / sum(count_list) * 100, 4))
    if probab <= count_list[Val_list.index(i)] / sum(count_list) * 100:
        best_point = i
        probab = count_list[Val_list.index(i)] / sum(count_list) * 100

print("best point:", best_point, probab)
Ejemplo n.º 22
0
k = db["k_1d_from_2d"]["center"]

mask = np.isfinite(P_sim)
P_sim = P_sim[mask]
C_sim = C_sim[mask][:, mask]
k = k[mask]
n = len(k)

# Generate model for Covariance.
C_mod, f = get_C_model(C_sim, 10)

# Make a fake power spectrum.
P_fake = 1.3 * P_sim + 3.1 * np.mean(P_sim) * (k / np.mean(k)) ** 1.4
e, v = linalg.eigh(C_sim)
noise = np.sqrt(e) * random.randn(n)
noise = np.dot(v, noise)
P_fake += noise

# Choose PS and covariance to process.
P = P_fake
C = C_sim
C_inv = linalg.inv(C)

# Process and fit.
fg_residuals = P - P_sim


def PL_model(pars):
    A = pars[0]
    beta = pars[1]
Ejemplo n.º 23
0
def mcmc(a, b, phi, snp_dict, beta_mrg, frq_dict, idx_dict, n, ld_blk, blk_size, n_iter, n_burnin, thin, pop, chrom, out_dir, out_name, meta, seed):
    print('... MCMC ...')

    # seed
    if seed != None:
        random.seed(seed)

    # derived stats
    n_pst = (n_iter-n_burnin)/thin
    n_pop = len(pop)
    p_tot = len(snp_dict['SNP'])

    p = {}
    n_blk = {}
    het = {}
    for pp in range(n_pop):
        p[pp] = len(beta_mrg[pp])
        n_blk[pp] = len(ld_blk[pp])
        het[pp] = sp.sqrt(2.0*frq_dict[pp]*(1.0-frq_dict[pp]))

    n_grp = sp.zeros((p_tot,1))
    for jj in range(p_tot):
        for pp in range(n_pop):
            if jj in idx_dict[pp]:
                n_grp[jj] += 1

    # initialization
    beta = {}
    sigma = {}
    for pp in range(n_pop):
        beta[pp] = sp.zeros((p[pp],1))
        sigma[pp] = 1.0

    psi = sp.ones((p_tot,1))

    if phi == None:
        phi = 1.0; phi_updt = True
    else:
        phi_updt = False

    # space allocation
    beta_est = {}
    sigma_est = {}
    for pp in range(n_pop):
        beta_est[pp] = sp.zeros((p[pp],1))
        sigma_est[pp] = 0.0

    psi_est = sp.zeros((p_tot,1))
    phi_est = 0.0

    # MCMC
    for itr in range(1,n_iter+1):
        if itr % 100 == 0:
            print('--- iter-' + str(itr) + ' ---')

        for pp in range(n_pop):
            mm = 0; quad = 0.0
            psi_pp = psi[idx_dict[pp]]
            for kk in range(n_blk[pp]):
                if blk_size[pp][kk] == 0:
                    continue
                else:
                    idx_blk = range(mm,mm+blk_size[pp][kk])
                    dinvt = ld_blk[pp][kk]+sp.diag(1.0/psi_pp[idx_blk].T[0])
                    dinvt_chol = linalg.cholesky(dinvt)
                    beta_tmp = linalg.solve_triangular(dinvt_chol, beta_mrg[pp][idx_blk], trans='T') \
                               + sp.sqrt(sigma[pp]/n[pp])*random.randn(len(idx_blk),1)
                    beta[pp][idx_blk] = linalg.solve_triangular(dinvt_chol, beta_tmp, trans='N')
                    quad += sp.dot(sp.dot(beta[pp][idx_blk].T, dinvt), beta[pp][idx_blk])
                    mm += blk_size[pp][kk]

            err = max(n[pp]/2.0*(1.0-2.0*sum(beta[pp]*beta_mrg[pp])+quad), n[pp]/2.0*sum(beta[pp]**2/psi_pp))
            sigma[pp] = 1.0/random.gamma((n[pp]+p[pp])/2.0, 1.0/err)

        delta = random.gamma(a+b, 1.0/(psi+phi))

        xx = sp.zeros((p_tot,1))
        for pp in range(n_pop):
            xx[idx_dict[pp]] += n[pp]*beta[pp]**2/sigma[pp]

        for jj in range(p_tot):
            while True:
                try:
                    psi[jj] = gigrnd.gigrnd(a-0.5*n_grp[jj], 2.0*delta[jj], xx[jj])
                except:
                    continue
                else:
                    break
        psi[psi>1] = 1.0

        if phi_updt == True:
            w = random.gamma(1.0, 1.0/(phi+1.0))
            phi = random.gamma(p_tot*b+0.5, 1.0/(sum(delta)+w))

        # posterior
        if (itr > n_burnin) and (itr % thin == 0):
            for pp in range(n_pop):
                beta_est[pp] = beta_est[pp] + beta[pp]/n_pst
                sigma_est[pp] = sigma_est[pp] + sigma[pp]/n_pst

            psi_est = psi_est + psi/n_pst
            phi_est = phi_est + phi/n_pst

    # convert standardized beta to per-allele beta
    for pp in range(n_pop):
        beta_est[pp] /= het[pp]

    # meta
    if meta == 'TRUE':
        vv = sp.zeros((p_tot,1))
        zz = sp.zeros((p_tot,1))
        for pp in range(n_pop):
            vv[idx_dict[pp]] += n[pp]/sigma_est[pp]/psi_est[idx_dict[pp]]*het[pp]**2
            zz[idx_dict[pp]] += n[pp]/sigma_est[pp]/psi_est[idx_dict[pp]]*het[pp]**2*beta_est[pp]
        mu = zz/vv

    # write posterior effect sizes
    for pp in range(n_pop):
        if phi_updt == True:
            eff_file = out_dir + '/' + '%s_%s_pst_eff_a%d_b%.1f_phiauto_chr%d.txt' % (out_name, pop[pp], a, b, chrom)
        else:
            eff_file = out_dir + '/' + '%s_%s_pst_eff_a%d_b%.1f_phi%1.0e_chr%d.txt' % (out_name, pop[pp], a, b, phi, chrom)

        snp_pp = [snp_dict['SNP'][ii] for ii in idx_dict[pp]]
        bp_pp = [snp_dict['BP'][ii] for ii in idx_dict[pp]]
        a1_pp = [snp_dict['A1'][ii] for ii in idx_dict[pp]]
        a2_pp = [snp_dict['A2'][ii] for ii in idx_dict[pp]]

        with open(eff_file, 'w') as ff:
            for snp, bp, a1, a2, beta in zip(snp_pp, bp_pp, a1_pp, a2_pp, beta_est[pp]):
                ff.write('%d\t%s\t%d\t%s\t%s\t%.6e\n' % (chrom, snp, bp, a1, a2, beta))

    if meta == 'TRUE':
        if phi_updt == True:
            eff_file = out_dir + '/' + '%s_META_pst_eff_a%d_b%.1f_phiauto_chr%d.txt' % (out_name, a, b, chrom)
        else:
            eff_file = out_dir + '/' + '%s_META_pst_eff_a%d_b%.1f_phi%1.0e_chr%d.txt' % (out_name, a, b, phi, chrom)

        with open(eff_file, 'w') as ff:
            for snp, bp, a1, a2, beta in zip(snp_dict['SNP'], snp_dict['BP'], snp_dict['A1'], snp_dict['A2'], mu):
                ff.write('%d\t%s\t%d\t%s\t%s\t%.6e\n' % (chrom, snp, bp, a1, a2, beta))

    # print estimated phi
    if phi_updt == True:
        print('... Estimated global shrinkage parameter: %1.2e ...' % phi_est )

    print('... Done ...')
Ejemplo n.º 24
0
def hankel_ratio_scipy(m, x):
    br = x * hankel1(m - 1, x) / hankel1(m, x) - m
    return br


def hankel_ratio_scipy_p(m, x):
    br = x * hankel1p(m, x) / hankel1(m, x)
    return br


t = timer.timer()

Nt = 100
mr = random.random_integers(-500, 500, (100, 2))
xr = random.randn(Nt) * 300 + random.randn(Nt) * 30j

#"Exact" defined by scipy
exact_y = [hankel_ratio_amos(mr, x) for x in xr]

t.start("asym")
y = zeros(mr.shape, complex_)
for ii in range(Nt):
    z = xr[ii]
    m = mr
    d = (e * z / 2 / (m + 1))**2 * ((m - 1) / (m + 1))**(m - 0.5)
    b1 = m * (1 - 2 * d)
t.stop("asym")

nans_found = error = 0
t.start("scipy")
Ejemplo n.º 25
0
def assimilate(h, H, g):
    """
  Run the full assimilation process for a cell size <h>, ice thickness <H>,
  and surface slope <g>.
  """
    out_dir = 'dump/stokes/full/h_%i/H_%i/g_%.2f/' % (h, H, g)
    n = 25
    L = n * h
    alpha = g * pi / 180

    p1 = Point(0.0, 0.0, 0.0)
    p2 = Point(L, L, 1)
    mesh = BoxMesh(p1, p2, n, n, 10)

    model = D3Model(mesh, out_dir + 'initial/', use_periodic=True)

    surface = Expression('- x[0] * tan(alpha)',
                         alpha=alpha,
                         element=model.Q.ufl_element())
    bed = Expression('- x[0] * tan(alpha) - H',
                     alpha=alpha,
                     H=H,
                     element=model.Q.ufl_element())
    beta = Expression('H - H * sin(2*pi*x[0]/L) * sin(2*pi*x[1]/L)',
                      H=H,
                      alpha=alpha,
                      L=L,
                      element=model.Q.ufl_element())

    model.calculate_boundaries()
    model.deform_mesh_to_geometry(surface, bed)

    model.init_beta(beta)
    model.init_b(model.A0(0)**(-1 / model.n(0)))
    model.init_E(1.0)

    nparams = {
        'newton_solver': {
            'linear_solver': 'mumps',
            'relative_tolerance': 1e-8,
            'relaxation_parameter': 1.0,
            'maximum_iterations': 25,
            'error_on_nonconvergence': False
        }
    }
    m_params = {'solver': nparams}

    mom = MomentumDukowiczStokes(model, m_params, isothermal=True)
    mom.solve(annotate=False)

    u, v, w = model.U3.split(True)
    u_s = model.vert_extrude(u, d='down')
    v_s = model.vert_extrude(v, d='down')
    sigma = 100.0
    U_mag = model.get_norm(as_vector([u_s, v_s]), 'linf')[1]
    n = len(U_mag)
    U_avg = sum(U_mag) / n
    U_e = U_avg / sigma
    print_min_max(U_e, 'U_error')

    u_o = u.vector().array()
    v_o = v.vector().array()
    n = len(u_o)
    u_error = U_e * random.randn(n)
    v_error = U_e * random.randn(n)
    u_ob = u_o + u_error
    v_ob = v_o + v_error

    model.assign_variable(u, u_ob)
    model.assign_variable(v, v_ob)

    print_min_max(u_error, 'u_error')
    print_min_max(v_error, 'v_error')

    model.init_U_ob(u, v)

    #model.save_pvd(model.U3,   'U_true')
    #model.save_pvd(model.U_ob, 'U_ob')
    #model.save_pvd(model.beta, 'beta_true')
    #model.set_out_dir(model.out_dir + 'xml/')

    model.save_xml(interpolate(model.u, model.Q), 'u_true')
    model.save_xml(interpolate(model.v, model.Q), 'v_true')
    model.save_xml(interpolate(model.w, model.Q), 'w_true')
    model.save_xml(model.U_ob, 'U_ob')
    model.save_xml(model.beta, 'beta_true')

    # save the true beta for MSE calculation :
    beta_true = model.beta.copy(True)

    m_params['solver']['newton_solver']['maximum_iterations'] = 3

    mom = MomentumDukowiczStokes(model, m_params, linear=True, isothermal=True)

    #model.init_beta_SIA()
    #model.save_pvd(model.beta, 'beta_SIA')

    #alphas = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0,
    #          1e1,  1e2,  1e3,  1e4,  1e5,  1e6]
    #alphas = [1e-2, 5e-2, 1e-3, 5e-3, 1e-2, 5e-2, 1e-1,
    #          5e-1, 1e0,  5e0,  1e1,  5e1,  1e2]
    alphas = [1e-2, 5e-2, 1e-1, 5e-1, 1e0, 5e0, 1e1]
    Js = []
    Rs = []
    MSEs = []
    REs = []
    nits = []

    for alpha in alphas:

        model.init_beta(30.0**2)
        mom.solve(annotate=True)

        model.set_out_dir(out_dir=out_dir + 'assimilated/alpha_%.1E/' % alpha)

        J = mom.form_obj_ftn(integral=model.dSrf,
                             kind='log_lin_hybrid',
                             g1=0.01,
                             g2=1000)
        R = mom.form_reg_ftn(model.beta,
                             integral=model.dBed,
                             kind='Tikhonov',
                             alpha=alpha)
        I = J + R

        #controls = File(model.out_dir + "control_viz/beta_control.pvd")
        #beta_viz = Function(model.Q, name="beta_control")

        def eval_cb(I, beta):
            #       commented out because the model variables are not updated by
            #       dolfin-adjoint (yet) :
            #mom.print_eval_ftns()
            #print_min_max(mom.U, 'U')
            print_min_max(I, 'I')
            print_min_max(beta, 'beta')

        def deriv_cb(I, dI, beta):
            #print_min_max(I,     'I')
            print_min_max(dI, 'dI/dbeta')
            #print_min_max(beta,  'beta')
            #beta_viz.assign(beta)
            #controls << beta_viz

        def hessian_cb(I, ddI, beta):
            print_min_max(ddI, 'd/db dI/db')

        m = FunctionControl('beta')
        F = ReducedFunctional(Functional(I),
                              m,
                              eval_cb_post=eval_cb,
                              derivative_cb_post=deriv_cb,
                              hessian_cb=hessian_cb)

        b_opt, res = minimize(F,
                              method="L-BFGS-B",
                              tol=1e-9,
                              bounds=(0, 4000),
                              options={
                                  "disp": True,
                                  "maxiter": 1000,
                                  "gtol": 1e-5,
                                  "factr": 1e7
                              })

        #problem = MinimizationProblem(F, bounds=(0, 4000))
        #parameters = {"tol"                : 1e8,
        #              "acceptable_tol"     : 1000.0,
        #              "maximum_iterations" : 1000,
        #              "linear_solver"      : "ma57"}
        #
        #solver = IPOPTSolver(problem, parameters=parameters)
        #b_opt = solver.solve()
        print_min_max(b_opt, 'b_opt')

        model.assign_variable(model.beta, b_opt)
        mom.solve(annotate=False)
        mom.calc_eval_ftns()

        beta_true_v = beta_true.vector()
        beta_opt_v = b_opt.vector()

        mse = norm(beta_opt_v - beta_true_v)**2 / len(beta_true_v)
        re = norm(beta_opt_v - beta_true_v) / norm(beta_true_v)

        print_min_max(mse, 'MSE')
        print_min_max(re, 'RE')

        Rs.append(assemble(mom.Rp))
        Js.append(assemble(mom.J))
        MSEs.append(mse)
        REs.append(re)
        nits.append(res['nit'])

        #model.save_pvd(model.beta, 'beta_opt')
        #model.save_pvd(model.U3,   'U_opt')
        #model.set_out_dir(model.out_dir + 'xml/')

        model.save_xml(model.beta, 'beta_opt')
        model.save_xml(interpolate(model.u, model.Q), 'u_opt')
        model.save_xml(interpolate(model.v, model.Q), 'v_opt')
        model.save_xml(interpolate(model.w, model.Q), 'w_opt')

        # reset entire dolfin-adjoint state :
        adj_reset()

    from numpy import savetxt, array
    import os

    if model.MPI_rank == 0:
        d = out_dir + 'plot/'
        if not os.path.exists(d):
            os.makedirs(d)
        savetxt(d + 'Rs.txt', array(Rs))
        savetxt(d + 'Js.txt', array(Js))
        savetxt(d + 'as.txt', array(alphas))
        savetxt(d + 'MSEs.txt', array(MSEs))
        savetxt(d + 'REs.txt', array(REs))
        savetxt(d + 'nits.txt', array(nits))
Ejemplo n.º 26
0
model.init_mask(0.0)  # all grounded
model.init_beta(beta)
model.init_b(model.A0(0)**(-1 / model.n(0)))

mom = MomentumHybrid(model, isothermal=True)
mom.solve(annotate=True)

u, v, w = model.U3.split(True)
u_o = u.vector().array()
v_o = v.vector().array()
n = len(u_o)
#U_e   = model.get_norm(as_vector([model.u, model.v]), 'linf')[1] / 500
U_e = 0.18
print_min_max(U_e, 'U_e')

u_error = U_e * random.randn(n)
v_error = U_e * random.randn(n)
u_ob = u_o + u_error
v_ob = v_o + v_error

model.init_U_ob(u_ob, v_ob)

model.save_pvd(model.U3, 'U_true')
model.save_pvd(model.U_ob, 'U_ob')
model.save_pvd(model.beta, 'beta_true')

model.init_beta(30.0**2)
#model.init_beta_SIA()
#model.save_pvd(model.beta, 'beta_SIA')

model.set_out_dir(out_dir=out_dir + 'inverted/')
Ejemplo n.º 27
0
u_i = Expression('sqrt(pow(x[0],2) + pow(x[1],2))')
v_i = Expression('exp(-pow(x[0],2)/2 - pow(x[1],2)/2)')
z_i = Expression('10 + 10 * sin(2*pi*x[0]) * sin(2*pi*x[1])')
x_i = Expression('x[0]')
y_i = Expression('x[1]')

u = interpolate(u_i, V)
v = interpolate(v_i, V)
z = interpolate(z_i, V)
x = interpolate(x_i, V)
y = interpolate(y_i, V)
w = project(u * v * z, V)

# apply some noise
w_v = w.vector().array()
w_v += 0.05 * random.randn(len(w_v))
w.vector().set_local(w_v)
w.vector().apply('insert')

bmesh = BoundaryMesh(mesh, "exterior")  # surface boundary mesh

cellmap = bmesh.entity_map(2)
pb = CellFunction("size_t", bmesh, 0)
for c in cells(bmesh):
    if Facet(mesh, cellmap[c.index()]).normal().z() < 0:
        pb[c] = 1
submesh = SubMesh(bmesh, pb, 1)  # subset of surface mesh

Vs = FunctionSpace(submesh, "CG", 1)  # submesh function space

us = Function(Vs)  # desired function