def test_chol_sample(self):
        """
        Sample field using Cholesky factorization of the covariance and of 
        the precision. 
        """
        #
        # Initialize Gaussian Random Field
        #
        n = 201  # size
        H = 0.5  # Hurst parameter in [0.5,1]

        # Form covariance and precision matrices
        x = np.arange(1, n)
        X, Y = np.meshgrid(x, x)
        K = fbm_cov(X, Y, H)

        # Compute the precision matrix
        I = np.identity(n - 1)
        Q = linalg.solve(K, I)

        # Define mean
        mean = np.random.rand(n - 1, 1)

        # Define Gaussian field
        u_cov = GaussianField(n - 1, mean=mean, K=K, mode='covariance')
        u_prc = GaussianField(n - 1, mean=mean, K=Q, mode='precision')

        # Define generating white noise
        z = u_cov.iid_gauss(n_samples=10)

        u_chol_prec = u_prc.sample(z=z, mode='precision', decomposition='chol')
        u_chol_cov = u_cov.sample(z=z, mode='covariance', decomposition='eig')
        u_chol_can = u_prc.sample(z=z, mode='canonical', decomposition='chol')

        fig, ax = plt.subplots(1, 3, figsize=(7, 3))
        ax[0].plot(u_chol_prec, linewidth=0.5)
        ax[0].set_title('Precision')
        ax[0].axis('tight')

        ax[1].plot(u_chol_cov, linewidth=0.5)
        ax[1].set_title('Covariance')
        ax[1].axis('tight')

        ax[2].plot(u_chol_can, linewidth=0.5)
        ax[2].set_title('Canonical')
        ax[2].axis('tight')
        fig.suptitle('Samples')

        fig.tight_layout()
        fig.subplots_adjust(top=0.8)
        fig.savefig('gaussian_field_chol_samples.eps')
    def test_degenerate_sample(self):
        """
        Test support and reduced covariance 
        """
        oort = 1 / np.sqrt(2)
        V = np.array([[0.5, oort, 0, 0.5], [0.5, 0, -oort, -0.5],
                      [0.5, -oort, 0, 0.5], [0.5, 0, oort, -0.5]])

        # Eigenvalues
        d = np.array([4, 3, 2, 0], dtype=float)
        Lmd = np.diag(d)

        # Covariance matrix
        K = V.dot(Lmd.dot(V.T))

        # Zero mean Gaussian field
        u_ex = GaussianField(4, K=K, mode='covariance', support=V[:, 0:3])
        u_im = GaussianField(4, K=K, mode='covariance')
        u_im.update_support()

        # Check reduced covariances
        self.assertTrue(
            np.allclose(u_ex.covariance().get_matrix(),
                        u_im.covariance().get_matrix().toarray()))

        # Check supports
        V_ex = u_ex.support()
        V_im = u_im.support()

        # Ensure they have the same sign
        for i in range(V_ex.shape[1]):
            if V_ex[0, i] < 0:
                V_ex[:, i] = -V_ex[:, i]

            if V_im[0, i] < 0:
                V_im[:, i] = -V_im[:, i]

        self.assertTrue(np.allclose(V_ex, V_im))
        u_ex.set_support(V_ex)
        u_im.set_support(V_im)

        # Compare samples
        z = u_ex.iid_gauss(n_samples=1)
        u_ex_smp = u_ex.sample(z=z, decomposition='chol')
        u_im_smp = u_im.sample(z=z, decomposition='chol')
        self.assertTrue(np.allclose(u_ex_smp, u_im_smp))
Exemple #3
0
    def test_gaussian_random_field(self):
        """
        Reproduce statistics of Gaussian random field
        """
        #
        # Define Gaussian Field with degenerate support
        #
        oort = 1 / np.sqrt(2)
        V = np.array([[0.5, oort, 0, 0.5], [0.5, 0, -oort, -0.5],
                      [0.5, -oort, 0, 0.5], [0.5, 0, oort, -0.5]])

        # Eigenvalues
        d = np.array([4, 3, 2, 1], dtype=float)
        Lmd = np.diag(d)

        # Covariance matrix
        K = V.dot(Lmd.dot(V.T))

        mu = np.array([1, 2, 3, 4])[:, None]

        # Zero mean Gaussian field
        dim = 4
        eta = GaussianField(dim, mean=mu, K=K, mode='covariance')
        n_vars = eta.covariance().size()
        level = 1

        # Define Gauss-Hermite physicist's rule exp(-x**2)
        grid = Tasmanian.makeGlobalGrid(n_vars, 4, level, "level",
                                        "gauss-hermite-odd")

        # Evaluate the Gaussian random field at the Gauss points
        z = grid.getPoints()
        y = np.sqrt(2) * z

        const_norm = np.sqrt(np.pi)**n_vars

        # Evaluate the random field at the Gauss points
        w = grid.getQuadratureWeights()
        etay = eta.sample(z=y.T)
        n = grid.getNumPoints()
        I = np.zeros(4)
        II = np.zeros((4, 4))
        for i in range(n):
            II += w[i] * np.outer(etay[:, i] - mu.ravel(),
                                  etay[:, i] - mu.ravel())
            I += w[i] * etay[:, i]
        I /= const_norm
        II /= const_norm

        self.assertTrue(np.allclose(II, K))
        self.assertTrue(np.allclose(I, mu.ravel()))
Exemple #4
0
def test02_variance():
    """
    Compute the variance of J(q) for different mesh refinement levels
    and compare with MC estimates. 
    """
    l_max = 8
    for i_res in np.arange(2, l_max):

        # Computational mesh
        mesh = Mesh1D(resolution=(2**i_res, ))

        # Element
        element = QuadFE(mesh.dim(), 'DQ0')
        dofhandler = DofHandler(mesh, element)
        dofhandler.distribute_dofs()

        # Linear Functional
        mesh.mark_region('integrate',
                         lambda x: x >= 0.75,
                         entity_type='cell',
                         strict_containment=False)
        phi = Basis(dofhandler)
        assembler = Assembler(Form(4, test=phi, flag='integrate'))
        assembler.assemble()
        L = assembler.get_vector()

        # Define Gaussian random field
        C = Covariance(dofhandler, name='gaussian', parameters={'l': 0.05})
        C.compute_eig_decomp()

        eta = GaussianField(dofhandler.n_dofs(), K=C)
        eta.update_support()

        n_samples = 100000
        J_paths = L.dot(eta.sample(n_samples=n_samples))
        var_mc = np.var(J_paths)
        lmd, V = C.get_eig_decomp()
        LV = L.dot(V)
        var_an = LV.dot(np.diag(lmd).dot(LV.transpose()))

        print(var_mc, var_an)
Exemple #5
0
phiy_1 = Basis(dQ1, 'vy')

phi_2 = Basis(dQ2)
phix_2 = Basis(dQ2, 'vx')
phiy_2 = Basis(dQ2, 'vy')

#
# Define Random field
#
cov = Covariance(dQ0, name='gaussian', parameters={'l': 0.01})
cov.compute_eig_decomp()
q = GaussianField(dQ0.n_dofs(), K=cov)

# Sample Random field
n_samples = 100
eq = Nodal(basis=phi_0, data=np.exp(q.sample(n_samples)))

plot.contour(eq, n_sample=25)

#
# Compute state
#

# Define weak form
state = [[
    Form(eq, test=phix_1, trial=phix_1),
    Form(eq, test=phiy_1, trial=phiy_1),
    Form(1, test=phi_1)
], [Form(1, test=phi_1, flag='dmn')]]

# Assemble system
Exemple #6
0
phi = Basis(dQ1)
phi_x = Basis(dQ1, derivative='vx')
phi_y = Basis(dQ1, derivative='vy')

#
# Diffusion Parameter
#

# Covariance Matrix
K = Covariance(dQ1, name='exponential', parameters={'sgm': 1, 'l': 0.1})

# Gaussian random field θ
tht = GaussianField(dQ1.n_dofs(), K=K)

# Sample from field
tht_fn = Nodal(data=tht.sample(n_samples=3), basis=phi)
plot = Plot()
plot.contour(tht_fn)

#
# Advection
#
v = [0.1, -0.1]

plot.mesh(mesh, regions=[('in', 'edge'), ('out', 'edge'), ('reg', 'cell')])

k = Kernel(tht_fn, F=lambda tht: np.exp(tht))
adv_diff = [
    Form(k, trial=phi_x, test=phi_x),
    Form(k, trial=phi_y, test=phi_y),
    Form(0, test=phi),
Exemple #7
0

#
# Regularization parameter
# 
#gamma = 0.00001
gamma = 0.1

#
# Random diffusion coefficient
# 
n_samples = 200
cov = Covariance(dh_y, name='gaussian', parameters={'l':0.1})
k = GaussianField(ny, K=cov)
k.update_support()
kfn = Nodal(dofhandler=dh_y, data=k.sample(n_samples=n_samples))

    
# =============================================================================
# Assembly
# =============================================================================
K = Kernel(kfn, F=lambda f:np.exp(f))  # diffusivity

problems = [[Form(K, test=phi_x, trial=phi_x)], 
            [Form(test=phi, trial=phi)]]

assembler = Assembler(problems, mesh)
assembler.assemble()

# Mass matrix (for control)
M = assembler.af[1]['bilinear'].get_matrix()
                                       entity_flag='injection')

# Initial control
n_Q1 = dh_Q1.n_dofs()
data = np.zeros((n_Q1, 1))
data[production_dofs, :] = 1
u = Nodal(dofhandler=dh_Q1, data=data, dim=2)

#
# Random diffusion coefficient
#
cov = Covariance(dh_Q1, name='gaussian', parameters={'l': 0.1})
n_samples = 1000
k = GaussianField(n_Q1, K=cov)
k.update_support()
kfn = Nodal(dofhandler=dh_Q1, data=k.sample(n_samples=n_samples))

# =============================================================================
# Assembly
# =============================================================================
vb.comment('assembling system')
vb.tic()
K = Kernel(kfn, F=lambda f: np.exp(f))  # diffusivity

problems = [[
    Form(K, test=phi_x, trial=phi_x),
    Form(K, test=phi_y, trial=phi_y),
    Form(0, test=phi)
], [Form(test=phi, trial=phi)]]

assembler = Assembler(problems, mesh)
Exemple #9
0
# Random field
# =============================================================================
n_samples = 10
n_train, n_test = 8, 2
i_train = np.arange(n_train)
i_test = np.arange(n_train, n_samples)

cov = Covariance(dofhandler, name='gaussian', parameters={'l': 0.1})
cov.compute_eig_decomp()
d, V = cov.get_eig_decomp()
plt.semilogy(d, '.')
plt.show()
log_q = GaussianField(n, K=cov)
log_q.update_support()
qfn = Nodal(dofhandler=dofhandler,
            data=np.exp(log_q.sample(n_samples=n_samples)))

plot = Plot()
plot.line(qfn, i_sample=np.arange(n_samples))

# =============================================================================
# Generate Snapshot Set
# =============================================================================
phi = Basis(dofhandler, 'u')
phi_x = Basis(dofhandler, 'ux')

problems = [[Form(kernel=qfn, trial=phi_x, test=phi_x),
             Form(1, test=phi)], [Form(1, test=phi, trial=phi)]]

assembler = Assembler(problems, mesh)
assembler.assemble()
Exemple #10
0
        lmd = np.arange(len(d))
        ax.semilogy(lmd, d, '.-', label='level=%d' % i)
    plt.legend()
    plt.show()

    #
    # Define random field on the fine mesh
    #
    C = Covariance(dofhandler, name='gaussian', parameters={'l': 0.05})
    C.compute_eig_decomp()

    eta = GaussianField(dofhandler.n_dofs(), K=C)
    eta.update_support()

    #eta_path = Nodal(data=eta.sample(), basis=phi)
    eta0 = P.dot(eta.sample())
    eg0 = eta.condition(P, eta0, n_samples=100)
    eg0_paths = Nodal(data=eg0, basis=Basis(dofhandler))
    e0_path = Nodal(data=eta0, basis=Basis(dofhandler, subforest_flag=0))
    plot = Plot(quickview=False)
    ax = plt.subplot(111)
    for i in range(30):
        ax = plot.line(eg0_paths,
                       axis=ax,
                       mesh=mesh,
                       i_sample=i,
                       plot_kwargs={
                           'color': 'k',
                           'linewidth': 0.5
                       })
    ax = plot.line(e0_path,
Exemple #11
0
u_data = np.zeros((ny, 1))
u_data[dofs_inj] = 1
u = Nodal(dofhandler=dh_y, data=u_data, dim=1)

#
# Regularization parameter
#
gamma = 0.00001
#gamma = 0.1
#
# Random diffusion coefficient
#
cov = Covariance(dh_y, name='gaussian', parameters={'l': 0.1})
k = GaussianField(ny, K=cov)
k.update_support()
kfn = Nodal(dofhandler=dh_y, data=k.sample(n_samples=1))

# =============================================================================
# Assembly
# =============================================================================
K = Kernel(kfn, F=lambda f: np.exp(f))  # diffusivity

problems = [[Form(K, test=phi_x, trial=phi_x)], [Form(test=phi, trial=phi)]]

assembler = Assembler(problems, mesh)
assembler.assemble()

# Mass matrix (for control)
A = assembler.af[0]['bilinear'].get_matrix()
M = assembler.af[1]['bilinear'].get_matrix()
    def test_condition_pointswise(self):
        """
        Generate samples and random field  by conditioning on pointwise data
        """
        #
        # Initialize Gaussian Random Field
        #
        # Resolution
        max_res = 10
        n = 2**max_res + 1  # size

        # Hurst parameter
        H = 0.5  # Hurst parameter in [0.5,1]

        # Form covariance and precision matrices
        x = np.arange(1, n)
        X, Y = np.meshgrid(x, x)
        K = fbm_cov(X, Y, H)

        # Compute the precision matrix
        I = np.identity(n - 1)
        Q = linalg.solve(K, I)

        # Define mean
        mean = np.random.rand(n - 1, 1)

        # Define Gaussian field
        u_cov = GaussianField(n - 1, mean=mean, K=K, mode='covariance')
        u_prc = GaussianField(n - 1, mean=mean, K=Q, mode='precision')

        # Define generating white noise
        z = u_cov.iid_gauss(n_samples=10)

        u_obs = u_cov.sample(z=z)

        # Index of measured observations
        A = np.arange(0, n - 1, 2)

        # observed quantities
        e = u_obs[A, 0][:, None]
        #print('e shape', e.shape)

        # change A into matrix
        k = len(A)
        rows = np.arange(k)
        cols = A
        vals = np.ones(k)
        AA = sp.coo_matrix((vals, (rows, cols)), shape=(k, n - 1)).toarray()

        AKAt = AA.dot(K.dot(AA.T))
        KAt = K.dot(AA.T)

        U, S, Vt = linalg.svd(AA)
        #print(U)
        #print(S)
        #print(Vt)

        #print(AA.dot(u_obs)-e)

        k = e.shape[0]
        Ko = 0.01 * np.identity(k)

        # Debug
        K = u_cov.covariance()
        #U_spp = u_cov.support()
        #A_cmp = A.dot(U_spp)

        u_cond = u_cov.condition(A, e, Ko=Ko, n_samples=100)
        """
    def test_condition_ptwise(self):
        #
        # Initialize Gaussian Random Field
        #
        # Resolution
        l_max = 9
        n = 2**l_max + 1  # size

        # Hurst parameter
        H = 0.5  # Hurst parameter in [0.5,1]

        # Form covariance and precision matrices
        x = np.arange(1, n + 1)
        X, Y = np.meshgrid(x, x)
        K = fbm_cov(X, Y, H)

        # Compute the precision matrix
        I = np.identity(n)
        Q = linalg.solve(K, I)

        # Plot meshes
        fig, ax = plt.subplots(1, 1)
        n = 2**l_max + 1
        for l in range(l_max):
            nl = 2**l + 1
            i_spp = [i * 2**(l_max - l) for i in range(nl)]
            ax.plot(x[i_spp], l * np.ones(nl), '.')
        #ax.plot(x,'.', markersize=0.1)
        #plt.show()

        # Plot conditioned field
        fig, ax = plt.subplots(3, 3)

        # Define original field
        u = []
        n = 2**(l_max) + 1
        for l in range(l_max):
            nl = 2**l + 1
            i_spp = [i * 2**(l_max - l) for i in range(nl)]
            V_spp = I[:, i_spp]
            if l == 0:
                u_fne = GaussianField(n, K=K, mode='covariance',\
                                      support=V_spp)
                u_obs = u_fne.sample()
                i_obs = np.array(i_spp)
            else:
                u_fne = GaussianField(n, K=K, mode='covariance',\
                                      support=V_spp)
                u_cnd = u_fne.condition(i_obs, u_obs[i_obs], output='field')
                u_obs = u_cnd.sample()
                i_obs = np.array(i_spp)
            u.append(u_obs)

            # Plot
            for ll in range(l, l_max):
                i, j = np.unravel_index(ll, (3, 3))
                if ll == l:
                    ax[i, j].plot(x[i_spp],
                                  5 * np.exp(0.01 * u_obs[i_spp]),
                                  linewidth=0.5)
                else:
                    ax[i, j].plot(x[i_spp],
                                  5 * np.exp(0.01 * u_obs[i_spp]),
                                  'g',
                                  linewidth=0.1,
                                  alpha=0.1)
            fig.savefig('successive_conditioning.pdf')
    def test_condition_with_nullspace(self):
        """
        Test conditioning with an existing nullspace
        """

        #
        # Define Gaussian Field with degenerate support
        #
        oort = 1 / np.sqrt(2)
        V = np.array([[0.5, oort, 0, 0.5], [0.5, 0, -oort, -0.5],
                      [0.5, -oort, 0, 0.5], [0.5, 0, oort, -0.5]])

        # Eigenvalues
        d = np.array([4, 3, 2, 0], dtype=float)
        Lmd = np.diag(d)

        # Covariance matrix
        K = V.dot(Lmd.dot(V.T))

        mu = np.array([1, 2, 3, 4])[:, None]

        # Zero mean Gaussian field
        u_ex = GaussianField(4,
                             mean=mu,
                             K=K,
                             mode='covariance',
                             support=V[:, 0:3])

        #
        # Conditioned random field (hard constraint)
        #
        # Condition on Ax=e (A full rank)
        A = np.array([[1, 2, 3, 2], [2, 4, 6, 4]])
        e = np.array([[1], [5]])

        # Matrix A is not full rank -> error
        with self.assertRaises(np.linalg.LinAlgError):
            u_ex.condition(A, e)

        # Full rank matrix
        A = np.array([[1, 2, 3, 2], [3, 9, 8, 7]])

        # Compute conditioned field
        u_cnd = u_ex.condition(A, e, output='field')
        X_cnd = u_cnd.sample(n_samples=100)

        # Sample by Kriging
        X_kriged = u_cnd.sample(n_samples=100)

        # Check that both samples satisfy constraint
        self.assertTrue(np.allclose(A.dot(X_cnd) - e, 0))
        self.assertTrue(np.allclose(A.dot(X_kriged) - e, 0))

        # Check that the support of the conditioned field is contained in
        # that of the unconditioned one
        self.assertTrue(
            np.allclose(u_ex.project(u_cnd.support(), 'nullspace'), 0))

        plt.close('all')
        fig, ax = plt.subplots(1, 3, figsize=(7, 3))
        ax[0].plot(u_ex.sample(n_samples=100), 'k', linewidth=0.1)
        ax[0].set_title('Unconditioned Field')

        ax[1].plot(X_kriged, 'k', linewidth=0.1)
        ax[1].plot(u_cnd.mean())
        ax[1].set_title('Kriged Sample')

        ax[2].plot(X_cnd, 'k', linewidth=0.1)
        ax[2].set_title('Sample of conditioned field')

        fig.suptitle('Samples')
        fig.tight_layout()
        fig.subplots_adjust(top=0.8)
        fig.savefig('degenerate_gf_conditioned_samples.eps')