Example #1
0
def test02a_sensitivity_gradient():
    """
    Test whether the sensitivity and adjoint calculations give the same gradient
    """
    # Mesh
    mesh = Mesh1D(resolution=(100, ))
    mesh.mark_region('left', lambda x: np.abs(x) < 1e-10)
    mesh.mark_region('right', lambda x: np.abs(1 - x) < 1e-10)

    # Element
    Q = QuadFE(mesh.dim(), 'Q2')
    dh = DofHandler(mesh, Q)
    dh.distribute_dofs()
    n_dofs = dh.n_dofs()
    phi = Basis(dh, 'u')

    # Covariance
    cov = Covariance(dh, name='gaussian', parameters={'l': 0.05})
    cov.compute_eig_decomp()
    lmd, V = cov.get_eig_decomp()
    d = len(lmd)

    # Coarse field (single sample)
    d0 = 2
    z0 = np.random.randn(d0, 1)
    q0 = sample_q0(V, lmd, d0, z0)
    q0_fn = Nodal(data=q0, basis=phi)

    # State
    J0, u0 = sample_qoi(q0, dh, return_state=True)
    u0_fn = Nodal(data=u0, basis=phi)

    # Compute gradient using sensitivity
    dJs = np.zeros(n_dofs)
    for i in range(n_dofs):
        # Define perturbation
        dq = np.zeros(n_dofs)
        dq[i] = 1
        dq_fn = Nodal(data=dq, basis=phi)

        # Compute gradient using sensitivity
        dJs[i] = dJdq_sen(q0_fn, u0_fn, dq_fn)

    dJs_fn = Nodal(data=dJs, basis=phi)
    plot = Plot()
    plot.line(dJs_fn)

    # Compute gradient using adjoint method
    dJa = dJdq_adj(q0_fn, u0_fn)
    dJa_fn = Nodal(data=dJa, basis=phi)
    print(dJa)
    plot.line(dJa_fn)
Example #2
0
def test02_variance():
    """
    Compute the variance of J(q) for different mesh refinement levels
    and compare with MC estimates. 
    """
    l_max = 8
    for i_res in np.arange(2, l_max):

        # Computational mesh
        mesh = Mesh1D(resolution=(2**i_res, ))

        # Element
        element = QuadFE(mesh.dim(), 'DQ0')
        dofhandler = DofHandler(mesh, element)
        dofhandler.distribute_dofs()

        # Linear Functional
        mesh.mark_region('integrate',
                         lambda x: x >= 0.75,
                         entity_type='cell',
                         strict_containment=False)
        phi = Basis(dofhandler)
        assembler = Assembler(Form(4, test=phi, flag='integrate'))
        assembler.assemble()
        L = assembler.get_vector()

        # Define Gaussian random field
        C = Covariance(dofhandler, name='gaussian', parameters={'l': 0.05})
        C.compute_eig_decomp()

        eta = GaussianField(dofhandler.n_dofs(), K=C)
        eta.update_support()

        n_samples = 100000
        J_paths = L.dot(eta.sample(n_samples=n_samples))
        var_mc = np.var(J_paths)
        lmd, V = C.get_eig_decomp()
        LV = L.dot(V)
        var_an = LV.dot(np.diag(lmd).dot(LV.transpose()))

        print(var_mc, var_an)
Example #3
0
#
phi_0 = Basis(dQ0)

phi_1 = Basis(dQ1)
phix_1 = Basis(dQ1, 'vx')
phiy_1 = Basis(dQ1, 'vy')

phi_2 = Basis(dQ2)
phix_2 = Basis(dQ2, 'vx')
phiy_2 = Basis(dQ2, 'vy')

#
# Define Random field
#
cov = Covariance(dQ0, name='gaussian', parameters={'l': 0.01})
cov.compute_eig_decomp()
q = GaussianField(dQ0.n_dofs(), K=cov)

# Sample Random field
n_samples = 100
eq = Nodal(basis=phi_0, data=np.exp(q.sample(n_samples)))

plot.contour(eq, n_sample=25)

#
# Compute state
#

# Define weak form
state = [[
    Form(eq, test=phix_1, trial=phix_1),
Example #4
0
    # Define piecewise constant elements
    #
    element = QuadFE(mesh.dim(), 'DQ0')
    dofhandler = DofHandler(mesh, element)
    dofhandler.distribute_dofs()

    # Get projection matrix
    P = projection_matrix(dofhandler, None, 0)

    fig, ax = plt.subplots(1, 1)
    for i in range(l_max):
        CC = Covariance(dofhandler,
                        subforest_flag=i,
                        name='gaussian',
                        parameters={'l': 0.01})
        CC.compute_eig_decomp()
        d, V = CC.get_eig_decomp()
        print(d)
        lmd = np.arange(len(d))
        ax.semilogy(lmd, d, '.-', label='level=%d' % i)
    plt.legend()
    plt.show()

    #
    # Define random field on the fine mesh
    #
    C = Covariance(dofhandler, name='gaussian', parameters={'l': 0.05})
    C.compute_eig_decomp()

    eta = GaussianField(dofhandler.n_dofs(), K=C)
    eta.update_support()
Example #5
0
def experiment01_problem():
    """
    Illustrate the problem:  Plot sample paths of the input q, of the output, 
        and histogram of the QoI.
    """

    #
    # Computational Mesh
    #
    mesh = Mesh1D(resolution=(100, ))
    mesh.mark_region('left', lambda x: np.abs(x) < 1e-10)
    mesh.mark_region('right', lambda x: np.abs(x - 1) < 1e-10)

    #
    # Element
    #
    Q1 = QuadFE(mesh.dim(), 'Q1')
    dQ1 = DofHandler(mesh, Q1)
    dQ1.distribute_dofs()

    #
    # Basis
    #
    phi = Basis(dQ1, 'v')
    phi_x = Basis(dQ1, 'vx')

    #
    # Covariance
    #
    cov = Covariance(dQ1, name='gaussian', parameters={'l': 0.05})
    cov.compute_eig_decomp()
    lmd, V = cov.get_eig_decomp()
    d = len(lmd)

    #
    # Sample and plot full dimensional parameter and solution
    #
    n_samples = 20000
    z = np.random.randn(d, n_samples)
    q = sample_q0(V, lmd, d, z)

    # Define finite element function
    qfn = Nodal(data=q, basis=phi)
    problem = [[Form(qfn, test=phi_x, trial=phi_x),
                Form(1, test=phi)],
               [Form(qfn, test=phi_x, dmu='dv', flag='right')]]

    # Define assembler
    assembler = Assembler(problem)

    # Incorporate Dirichlet conditions
    assembler.add_dirichlet('left', 0)
    assembler.add_dirichlet('right', 1)

    comment.tic('assembly')
    # Assemble system
    assembler.assemble()
    comment.toc()

    comment.tic('solver')
    ufn = Nodal(basis=phi, data=None)
    J = np.zeros(n_samples)
    for i in range(n_samples):
        # Solve system
        u = assembler.solve(i_problem=0, i_matrix=i, i_vector=0)

        # Compute quantity of interest
        J[i] = u.dot(assembler.get_vector(1, i))

        # Update sample paths
        ufn.add_samples(u)
    comment.toc()

    #
    # Plots
    #
    """
    # Formatting
    plt.rc('text', usetex=True)
    
    # Figure sizes
    fs2 = (3,2)
    fs1 = (4,3)
    
    plot = Plot(quickview=False)
    plot_kwargs = {'color':'k', 'linewidth':0.05}
    
    #
    # Plot qfn
    # 
    
    # Figure 
    fig = plt.figure(figsize=fs2)
    ax = fig.add_subplot(111)
    ax = plot.line(qfn, axis=ax, 
                   i_sample=np.arange(100), 
                   plot_kwargs=plot_kwargs)
    ax.set_xlabel(r'$x$')
    ax.set_ylabel(r'$q$')
    plt.tight_layout()
    fig.savefig('fig/ex02_gauss_qfn.eps')
    plt.close()
    
    #
    # Plot ufn
    # 
    fig = plt.figure(figsize=fs2)
    ax = fig.add_subplot(111)
    ax = plot.line(ufn, axis=ax, 
                   i_sample=np.arange(100), 
                   plot_kwargs=plot_kwargs)
    ax.set_xlabel(r'$x$')
    ax.set_ylabel(r'$u$')
    plt.tight_layout()
    fig.savefig('fig/ex02_gauss_ufn.eps')
    plt.close()
    """

    # Formatting
    plt.rc('text', usetex=True)

    # Figure sizes
    fs2 = (3, 2)
    fs1 = (4, 3)

    fig = plt.figure(figsize=fs2)
    ax = fig.add_subplot(111)
    plt.hist(J, bins=100, density=True)
    ax.set_xlabel(r'$J(u)$')
    plt.tight_layout()
    fig.savefig('fig/ex02_gauss_jhist.eps')
Example #6
0
def test06_linearization():
    """
    Compute samples on fine grid via the linearization
    """
    plot = Plot()
    #
    # Computational mesh
    #
    mesh = Mesh1D(resolution=(100, ))
    mesh.mark_region('left', lambda x: np.abs(x) < 1e-10)
    mesh.mark_region('right', lambda x: np.abs(x - 1) < 1e-10)

    #
    # Element
    #
    Q1 = QuadFE(mesh.dim(), 'Q1')
    dQ1 = DofHandler(mesh, Q1)
    dQ1.distribute_dofs()
    nx = dQ1.n_dofs()
    x = dQ1.get_dof_vertices()

    Q3 = QuadFE(mesh.dim(), 'Q3')
    dQ3 = DofHandler(mesh, Q3)
    dQ3.distribute_dofs()

    #
    # Basis
    #
    phi = Basis(dQ1, 'u')
    phi_x = Basis(dQ1, 'ux')
    psi = Basis(dQ3, 'u')
    psi_x = Basis(dQ3, 'ux')

    #
    # Covariance
    #
    cov = Covariance(dQ1, name='gaussian', parameters={'l': 0.05})
    cov.compute_eig_decomp()
    lmd, V = cov.get_eig_decomp()
    d = len(lmd)

    # Fix coarse truncation level
    d0 = 10

    #
    # Build Sparse Grid
    #
    grid = TasmanianSG.TasmanianSparseGrid()
    dimensions = d0
    outputs = 1
    depth = 2
    type = 'level'
    rule = 'gauss-hermite'
    grid.makeGlobalGrid(dimensions, outputs, depth, type, rule)

    # Sample Points
    zzSG = grid.getPoints()
    zSG = np.sqrt(2) * zzSG  # transform to N(0,1)

    # Quadrature Weights
    wSG = grid.getQuadratureWeights()
    wSG /= np.sqrt(np.pi)**d0  # normalize weights

    # Number of grid points
    n0 = grid.getNumPoints()

    #
    # Sample low dimensional input parameter
    #
    q0 = sample_q0(V, lmd, d0, zSG.T)
    J0 = sample_qoi(q0, dQ1)

    #
    # Sample conditional expectation
    #

    # Pick a single coarse sample to check
    i0 = np.random.randint(0, high=n0)

    # Sample fine, conditional on coarse
    n_samples = 1
    z1 = np.random.randn(d - d0, n_samples)
    q = sample_q_given_q0(q0[:, i0], V, lmd, d0, z1)

    # Perturbation
    log_qref = np.log(q0[:, i0])
    dlog_q = np.log(q.ravel()) - log_qref
    dlog_qfn = Nodal(data=dlog_q, basis=phi)

    # Perturbed q
    n_eps = 12  # Number of refinements
    epsilons = [10**(-l) for l in range(n_eps)]
    log_qper = np.empty((nx, n_eps))
    for i in range(n_eps):
        log_qper[:, i] = log_qref + epsilons[i] * dlog_q
    """
    plt.plot(x, log_qref, label='ref')
    for i in range(n_eps):
        plt.plot(x, log_qper[:,i],label='%d'%(i))
    """

    assert np.allclose(log_qper[:, 0], np.log(q.ravel()))

    plt.legend()
    plt.show()

    # Define finite element function
    exp_qref = Nodal(data=q0[:, i0], basis=phi)
    exp_qper = Nodal(data=np.exp(log_qper), basis=phi)

    #
    # PDEs
    #

    # 1. State Equation
    state_eqn = [Form(exp_qref, test=phi_x, trial=phi_x), Form(1, test=phi)]
    state_dbc = {'left': 0, 'right': 1}

    # 2. Perturbed Equation
    perturbed_eqn = [
        Form(exp_qper, test=phi_x, trial=phi_x),
        Form(1, test=phi)
    ]
    perturbed_dbc = {'left': 0, 'right': 1}

    # 3. Adjoint Equation
    adjoint_eqn = [Form(exp_qref, test=psi_x, trial=psi_x), Form(0, test=psi)]
    adjoint_dbc = {'left': 0, 'right': -1}

    # Combine
    eqns = [state_eqn, perturbed_eqn, adjoint_eqn]
    bcs = [state_dbc, perturbed_dbc, adjoint_dbc]

    #
    # Assembly
    #
    assembler = Assembler(eqns, n_gauss=(6, 36))

    # Boundary conditions
    for i, bc in zip(range(3), bcs):
        for loc, val in bc.items():
            assembler.add_dirichlet(loc, val, i_problem=i)

    # Assemble
    assembler.assemble()

    #
    # Solve
    #

    # Solve state
    ur = assembler.solve(i_problem=0)
    u_ref = Nodal(data=ur, basis=phi)
    ux_ref = Nodal(data=ur, basis=phi_x)

    # Solve perturbed state
    u_per = Nodal(basis=phi)
    for i in range(n_eps):
        # FEM solution
        up = assembler.solve(i_problem=1, i_matrix=i)
        u_per.add_samples(up)

        plt.plot(x, up - ur)
    plt.show()
    ux_per = Nodal(data=u_per.data(), basis=phi_x)

    # Solve adjoint equation
    v = assembler.solve(i_problem=2)
    v_adj = Nodal(data=v, basis=psi)
    vx_adj = Nodal(data=v, basis=psi_x)

    #
    # Sensitivity
    #

    # Sensitivity Equation
    ker_sen = Kernel(f=[exp_qref, dlog_qfn, ux_ref],
                     F=lambda eq, dq, ux: -eq * dq * ux)
    sensitivity_eqn = [
        Form(exp_qref, test=phi_x, trial=phi_x),
        Form(ker_sen, test=phi_x)
    ]

    sensitivity_dbc = {'left': 0, 'right': 0}
    assembler = Assembler(sensitivity_eqn, n_gauss=(6, 36))
    for loc in sensitivity_dbc:
        assembler.add_dirichlet(loc, sensitivity_dbc[loc])
    assembler.assemble()
    s = assembler.solve()
    sx = Nodal(data=s, basis=phi_x)

    plt.plot(x, s)
    plt.show()

    #
    # Quantities of Interest
    #

    # Reference
    k_ref = Kernel(f=[exp_qref, ux_ref], F=lambda eq, ux: eq * ux)
    ref_qoi = [Form(k_ref, dmu='dv', flag='right')]

    # Perturbed
    k_per = Kernel(f=[exp_qper, ux_per], F=lambda eq, ux: eq * ux)
    per_qoi = [Form(k_per, dmu='dv', flag='right')]

    # Adjoint
    k_adj = Kernel(f=[exp_qref, dlog_qfn, ux_ref, vx_adj],
                   F=lambda eq, dq, ux, vx: -eq * dq * ux * vx)
    adj_qoi = [Form(k_adj)]

    # Sensitivity
    k_sens = Kernel(f=[exp_qref, dlog_qfn, ux_ref, sx],
                    F=lambda eq, dq, ux, sx: eq * dq * ux + eq * sx)
    sens_qoi = Form(k_sens, dmu='dv', flag='right')

    qois = [ref_qoi, per_qoi, adj_qoi, sens_qoi]

    # Assemble
    assembler = Assembler(qois, mesh=mesh)
    assembler.assemble()

    # Evaluate
    J_ref = assembler.get_scalar(0)
    J_per = []
    for i in range(n_eps):
        J_per.append(assembler.get_scalar(1, i))

    # Finite difference approximation
    dJ = []
    for eps, J_p in zip(epsilons, J_per):
        dJ.append((J_p - J_ref) / eps)

    # Adjoint differential
    dJ_adj = assembler.get_scalar(2)

    # Sensitivity differential
    dJ_sen = assembler.get_scalar(3)

    print(dJ_adj)
    print(dJ_sen)
    print(dJ)
    """
Example #7
0
def test01_finite_elements():
    """
    Test accuracy of the finite element approximation
    """
    #
    # Construct reference solution
    #
    plot = Plot(quickview=False)

    # Mesh
    mesh = Mesh1D(resolution=(2**11, ))
    mesh.mark_region('left', lambda x: np.abs(x) < 1e-10)
    mesh.mark_region('right', lambda x: np.abs(x - 1) < 1e-10)

    # Element
    Q1 = QuadFE(mesh.dim(), 'Q1')
    dQ1 = DofHandler(mesh, Q1)
    dQ1.distribute_dofs()

    # Basis
    phi = Basis(dQ1, 'v')
    phi_x = Basis(dQ1, 'vx')

    #
    # Covariance
    #
    cov = Covariance(dQ1, name='gaussian', parameters={'l': 0.05})
    cov.compute_eig_decomp()
    lmd, V = cov.get_eig_decomp()
    d = len(lmd)

    #
    # Sample and plot full dimensional parameter and solution
    #
    n_samples = 1
    z = np.random.randn(d, n_samples)
    q_ref = sample_q0(V, lmd, d, z)

    print(q_ref.shape)

    # Define finite element function
    q_ref_fn = Nodal(data=q_ref, basis=phi)
    problem = [[Form(q_ref_fn, test=phi_x, trial=phi_x),
                Form(1, test=phi)],
               [Form(q_ref_fn, test=phi_x, dmu='dv', flag='right')]]

    # Define assembler
    assembler = Assembler(problem)

    # Incorporate Dirichlet conditions
    assembler.add_dirichlet('left', 0)
    assembler.add_dirichlet('right', 1)

    # Assemble system
    assembler.assemble()

    # Solve system
    u_ref = assembler.solve()

    # Compute quantity of interest
    J_ref = u_ref.dot(assembler.get_vector(1))

    # Plot
    fig = plt.figure(figsize=(6, 4))
    ax = fig.add_subplot(111)
    u_ref_fn = Nodal(basis=phi, data=u_ref)

    ax = plot.line(u_ref_fn, axis=ax)

    n_levels = 10
    J = np.zeros(n_levels)
    for l in range(10):
        comment.comment('level: %d' % (l))

        #
        # Mesh
        #
        mesh = Mesh1D(resolution=(2**l, ))
        mesh.mark_region('left', lambda x: np.abs(x) < 1e-10)
        mesh.mark_region('right', lambda x: np.abs(x - 1) < 1e-10)

        #
        # Element
        #
        Q1 = QuadFE(mesh.dim(), 'Q1')
        dQ1 = DofHandler(mesh, Q1)
        dQ1.distribute_dofs()

        #
        # Basis
        #
        phi = Basis(dQ1, 'v')
        phi_x = Basis(dQ1, 'vx')

        # Define problem
        problem = [[
            Form(q_ref_fn, test=phi_x, trial=phi_x),
            Form(1, test=phi)
        ], [Form(q_ref_fn, test=phi_x, dmu='dv', flag='right')]]

        assembler = Assembler(problem)

        # Incorporate Dirichlet conditions
        assembler.add_dirichlet('left', 0)
        assembler.add_dirichlet('right', 1)

        assembler.assemble()
        A = assembler.get_matrix()
        print('A shape', A.shape)

        u = assembler.solve()
        J[l] = u.dot(assembler.get_vector(1))

        print(u.shape)
        print(phi.n_dofs())
        ufn = Nodal(basis=phi, data=u)
        ax = plot.line(ufn, axis=ax)

    plt.show()
    #
    # Plots
    #
    # Formatting
    plt.rc('text', usetex=True)

    # Figure sizes
    fs2 = (3, 2)
    fs1 = (4, 3)

    print(J_ref)
    print(J)

    #
    # Plot truncation error for mean and variance of J
    #

    fig = plt.figure(figsize=fs2)
    ax = fig.add_subplot(111)

    err = np.array([np.abs(J[i] - J_ref) for i in range(n_levels)])
    h = np.array([2**(-l) for l in range(n_levels)])
    plt.loglog(h, err, '.-')

    ax.set_xlabel(r'$h$')
    ax.set_ylabel(r'$|J-J^h|$')
    plt.tight_layout()
    fig.savefig('fig/ex02_gauss_fem_error.eps')
Example #8
0
def experiment06_sensitivity_stats():
    """
    Compute the sensitivities
    """
    comment = Verbose()
    comment.comment('Computing statistics for the sensitivity dJ_dq')

    #
    # Computational mesh
    #
    mesh = Mesh1D(resolution=(100, ))
    mesh.mark_region('left', lambda x: np.abs(x) < 1e-10)
    mesh.mark_region('right', lambda x: np.abs(x - 1) < 1e-10)

    #
    # Element
    #
    Q1 = QuadFE(mesh.dim(), 'Q1')
    dQ1 = DofHandler(mesh, Q1)
    dQ1.distribute_dofs()
    n_dofs = dQ1.n_dofs()
    phi = Basis(dQ1, 'u')

    #
    # Covariance
    #
    cov = Covariance(dQ1, name='gaussian', parameters={'l': 0.05})
    cov.compute_eig_decomp()
    lmd, V = cov.get_eig_decomp()
    d = len(lmd)

    # Fix coarse truncation level
    d0 = 10

    #
    # Build Sparse Grid
    #
    grid = TasmanianSG.TasmanianSparseGrid()
    dimensions = d0
    outputs = 1
    depth = 4
    type = 'level'
    rule = 'gauss-hermite'
    grid.makeGlobalGrid(dimensions, outputs, depth, type, rule)

    # Sample Points
    zzSG = grid.getPoints()
    zSG = np.sqrt(2) * zzSG  # transform to N(0,1)

    # Quadrature Weights
    wSG = grid.getQuadratureWeights()
    wSG /= np.sqrt(np.pi)**d0  # normalize weights

    # Number of grid points
    n0 = grid.getNumPoints()

    comment.comment('Element DOFs: {0}'.format(n_dofs))
    comment.comment('Sparse Grid Size: {0}'.format(n0))

    #
    # Sample low dimensional input parameter
    #
    comment.tic('Sampling reference')
    q0 = sample_q0(V, lmd, d0, zSG.T)
    J0, u0 = sample_qoi(q0, dQ1, return_state=True)
    comment.toc()

    comment.tic('Sampling gradient')
    dJdq = np.zeros((n_dofs, n0))
    for i in range(n0):
        # Sample input and state
        q = Nodal(data=q0[:, i], basis=phi)
        u = Nodal(data=u0[:, i], basis=phi)

        # Compute gradient using adjoint approach
        dJdq[:, i] = dJdq_adj(q, u)
    comment.toc()

    # Compute sparse grid mean and variance
    E_dJ = np.dot(dJdq, wSG)
    V_dJ = np.dot(dJdq**2, wSG) - E_dJ**2

    E_dJ = Nodal(data=E_dJ, basis=phi)
    V_dJ = Nodal(data=V_dJ, basis=phi)

    fig, ax = plt.subplots(nrows=1, ncols=2)

    plot = Plot(quickview=False)
    ax[0] = plot.line(E_dJ, axis=ax[0])
    ax[1] = plot.line(V_dJ, axis=ax[1])
    plt.show()
Example #9
0
def experiment05_conditioning():
    """
    Obtain an estimate of J using sparse grids on the coarse scale and MC as a
    correction. 
    
    REMARKS: This takes very long, especially since the convergence rate of the 
    conditional samples is low. 
    """
    #
    # Computational mesh
    #
    mesh = Mesh1D(resolution=(100, ))
    mesh.mark_region('left', lambda x: np.abs(x) < 1e-10)
    mesh.mark_region('right', lambda x: np.abs(x - 1) < 1e-10)

    #
    # Element
    #
    Q1 = QuadFE(mesh.dim(), 'Q1')
    dQ1 = DofHandler(mesh, Q1)
    dQ1.distribute_dofs()

    #
    # Covariance
    #
    cov = Covariance(dQ1, name='gaussian', parameters={'l': 0.05})
    cov.compute_eig_decomp()
    lmd, V = cov.get_eig_decomp()
    d = len(lmd)

    # Fix coarse truncation level
    d0 = 10

    #
    # Build Sparse Grid
    #
    grid = TasmanianSG.TasmanianSparseGrid()
    dimensions = d0
    outputs = 1
    depth = 2
    type = 'level'
    rule = 'gauss-hermite'
    grid.makeGlobalGrid(dimensions, outputs, depth, type, rule)

    # Sample Points
    zzSG = grid.getPoints()
    zSG = np.sqrt(2) * zzSG  # transform to N(0,1)

    # Quadrature Weights
    wSG = grid.getQuadratureWeights()
    wSG /= np.sqrt(np.pi)**d0  # normalize weights

    # Number of grid points
    n0 = grid.getNumPoints()

    #
    # Sample low dimensional input parameter
    #
    q0 = sample_q0(V, lmd, d0, zSG.T)
    J0 = sample_qoi(q0, dQ1)

    # Compute sparse grid mean and variance
    EJ0 = np.sum(wSG * J0)
    VJ0 = np.sum(wSG * (J0**2)) - EJ0**2

    J = np.load('data/j_mc.npy')
    mean_ref = np.mean(J)
    var_ref = np.var(J)

    # Record errors
    mean_err = [np.abs(EJ0 - mean_ref)]
    var_err = [np.abs(VJ0 - var_ref)]

    for n_samples in [10, 100, 1000]:
        mean_Jg0 = 0
        var_Jg0 = 0
        for i in range(n0):
            z = np.random.randn(d - d0, n_samples)
            qg0 = sample_q_given_q0(q0[:, i], V, lmd, d0, z)
            Jg0 = sample_qoi(qg0, dQ1)

            mean_Jg0 += wSG[i] * np.mean(Jg0)

        mean_err.append(np.abs(mean_Jg0 - mean_ref))

    # Formatting
    plt.rc('text', usetex=True)

    # Figure sizes
    fs2 = (3, 2)
    fs1 = (4, 3)

    fig = plt.figure(figsize=fs2)
    ax = fig.add_subplot(111)
    ax.semilogy([0, 10, 100, 1000], mean_err, '.-')
    ax.set_xlabel(r'$n$')
    ax.set_ylabel(r'$\mathrm{Error}$')

    fig.tight_layout()
    fig.savefig('fig/ex02_gauss_hyb_mean_err.eps')
    """
    #
    # Plot conditional variances
    #
    fig = plt.figure(figsize=fs2)
    ax = fig.add_subplot(111)
    ax.hist(varJg,bins=30, density=True)
    ax.set_xlabel(r'$\sigma_{J|q_0}^2$')
    fig.tight_layout()
    fig.savefig('fig/ex02_gauss_cond_var.eps')
    """
    """     
Example #10
0
def experiment04_sparse_grid():
    """
    Test sparse grid
    """
    #
    # Computational mesh
    #
    mesh = Mesh1D(resolution=(100, ))
    mesh.mark_region('left', lambda x: np.abs(x) < 1e-10)
    mesh.mark_region('right', lambda x: np.abs(x - 1) < 1e-10)

    #
    # Element
    #
    Q1 = QuadFE(mesh.dim(), 'Q1')
    dQ1 = DofHandler(mesh, Q1)
    dQ1.distribute_dofs()

    #
    # Covariance
    #
    cov = Covariance(dQ1, name='gaussian', parameters={'l': 0.05})
    cov.compute_eig_decomp()
    lmd, V = cov.get_eig_decomp()

    # Truncation levels
    truncation_levels = [1, 5, 10, 20]

    # Formatting
    plt.rc('text', usetex=True)

    # Set figure and axis
    fs2 = (3, 2)
    fs1 = (4, 3)

    # For mean
    fig1 = plt.figure(figsize=fs1)
    ax1 = fig1.add_subplot(111)

    # For variance
    fig2 = plt.figure(figsize=fs1)
    ax2 = fig2.add_subplot(111)

    for d0 in truncation_levels:
        J = []
        mean = []
        var = []
        n = []
        for depth in range(5):
            #
            # Construct Sparse Grid
            #
            grid = TasmanianSG.TasmanianSparseGrid()
            dimensions = d0
            outputs = 1
            type = 'level'
            rule = 'gauss-hermite'
            grid.makeGlobalGrid(dimensions, outputs, depth, type, rule)

            # Get Sample Points
            zzSG = grid.getPoints()
            zSG = np.sqrt(2) * zzSG  # transform to N(0,1)

            wSG = grid.getQuadratureWeights()
            wSG /= np.sqrt(np.pi)**d0  # normalize weights

            n0 = grid.getNumPoints()
            n.append(n0)

            #
            # Sample input parameter
            #
            q0 = sample_q0(V, lmd, d0, zSG.T)
            J = sample_qoi(q0, dQ1)

            EJ = np.sum(wSG * J)
            VJ = np.sum(wSG * (J**2)) - EJ**2
            mean.append(EJ)
            var.append(VJ)

        J_mc = np.load('data/j_%d_mc.npy' % (d0))

        # Compute mean and variance
        mean_mc = np.mean(J_mc)
        var_mc = np.var(J_mc)

        # Plot mean error
        mean_err = [np.abs(mean[i] - mean_mc) for i in range(5)]
        ax1.loglog(n, mean_err, '.-.', label=r'$k=%d$' % (d0))
        ax1.set_xlabel(r'$n$')
        ax1.set_ylabel(r'$\mathrm{Error}$')
        ax1.legend()
        fig1.tight_layout()

        # Plot variance error
        var_err = [np.abs(var[i] - var_mc) for i in range(5)]
        ax2.loglog(n, var_err, '.-.', label=r'k=%d' % (d0))
        ax2.set_xlabel(r'$n$')
        ax2.set_ylabel(r'$\mathrm{Error}$')
        ax2.legend()
        fig2.tight_layout()

    fig1.savefig('fig/ex02_gauss_sg_mean_error.eps')
    fig2.savefig('fig/ex02_gauss_sg_var_error.eps')
Example #11
0
def experiment03_truncation():
    """
    Investigate the error in truncation level
    """
    generate = False

    mesh = Mesh1D(resolution=(100, ))
    mesh.mark_region('left', lambda x: np.abs(x) < 1e-10)
    mesh.mark_region('right', lambda x: np.abs(x - 1) < 1e-10)

    #
    # Element
    #
    Q1 = QuadFE(mesh.dim(), 'Q1')
    dQ1 = DofHandler(mesh, Q1)
    dQ1.distribute_dofs()

    #
    # Basis
    #
    phi = Basis(dQ1, 'v')
    phi_x = Basis(dQ1, 'vx')

    #
    # Covariance
    #
    cov = Covariance(dQ1, name='gaussian', parameters={'l': 0.05})
    cov.compute_eig_decomp()
    lmd, V = cov.get_eig_decomp()
    d = len(lmd)

    # Truncation levels
    truncation_levels = [1, 5, 10, 20, 50]

    n_samples = 1000000
    if generate:
        n_batches = 1000
        batch_size = n_samples // n_batches

        for d0 in truncation_levels:
            comment.tic('d = %d' % (d0))
            J = np.empty(n_samples)
            for i in range(n_batches):
                # Print progress
                #print('.',end='')

                # Sample diffusion coefficient
                z = np.random.randn(d0, batch_size)
                q = sample_q0(V, lmd, d0, z)

                # Evaluate quantity of interest
                J[(i) * batch_size:(i + 1) * batch_size] = sample_qoi(q, dQ1)

                # Save current update to file
                np.save('./data/j_%d_mc.npy' % (d0), J)
            comment.toc()

    #
    # Compute estimates and errors
    #
    n_levels = len(truncation_levels)
    mean = []
    var = []
    for d0 in truncation_levels:
        J = np.load('data/j_%d_mc.npy' % (d0))

        # Compute mean and variance
        mean.append(np.mean(J))
        var.append(np.var(J))

    # Load reference
    J = np.load('data/j_mc.npy')
    mean_ref = np.mean(J)
    var_ref = np.var(J)

    #truncation_levels.append(101)
    err_mean = [np.abs(mean[i] - mean_ref) for i in range(n_levels)]
    err_var = [np.abs(var[i] - var_ref) for i in range(n_levels)]

    #
    # Plots
    #
    # Formatting
    plt.rc('text', usetex=True)

    # Figure sizes
    fs2 = (3, 2)
    fs1 = (4, 3)

    #
    # Plot truncation error for mean and variance of J
    #

    fig = plt.figure(figsize=fs2)
    ax = fig.add_subplot(111)

    plt.semilogy(truncation_levels, err_mean, '.-', label='mean')
    plt.semilogy(truncation_levels, err_var, '.--', label='variance')
    plt.legend()

    ax.set_xlabel(r'$k$')
    ax.set_ylabel(r'$\mathrm{Error}$')
    plt.tight_layout()
    fig.savefig('fig/ex02_gauss_trunc_error.eps')

    #
    # Plot estimated mean and variance
    #

    fig = plt.figure(figsize=fs2)
    ax = fig.add_subplot(111)

    truncation_levels.append(101)
    mean.append(mean_ref)
    var.append(var_ref)
    plt.plot(truncation_levels, mean, 'k.-', label='mean')
    plt.plot(truncation_levels, var, 'k.--', label='variance')
    plt.legend()

    ax.set_xlabel(r'$k$')
    plt.tight_layout()
    fig.savefig('fig/ex02_gauss_trunc_stats.eps')
Example #12
0
def experiment02_reference():
    """
    Convergence rate of MC
    """
    generate = False
    #
    # Computational Mesh
    #
    mesh = Mesh1D(resolution=(100, ))
    mesh.mark_region('left', lambda x: np.abs(x) < 1e-10)
    mesh.mark_region('right', lambda x: np.abs(x - 1) < 1e-10)

    #
    # Element
    #
    Q1 = QuadFE(mesh.dim(), 'Q1')
    dQ1 = DofHandler(mesh, Q1)
    dQ1.distribute_dofs()

    #
    # Covariance
    #
    cov = Covariance(dQ1, name='gaussian', parameters={'l': 0.05})
    cov.compute_eig_decomp()
    lmd, V = cov.get_eig_decomp()
    d = len(lmd)

    #
    # Generate random sample for J
    #

    n_samples = 1000000

    if generate:
        n_batches = 1000
        batch_size = n_samples // n_batches
        J = np.empty(n_samples)
        for i in range(n_batches):

            # Sample diffusion coefficient
            z = np.random.randn(d, n_samples // n_batches)
            q = sample_q0(V, lmd, d, z)

            # Evaluate quantity of interest
            J[(i) * batch_size:(i + 1) * batch_size] = sample_qoi(q, dQ1)

            # Save current update to file
            np.save('./data/j_mc.npy', J)

    #
    # Process data
    #

    # Load MC samples
    J = np.load('data/j_mc.npy')

    # Compute sample mean and variance of J
    EX = np.mean(J)
    VarX = np.var(J)

    print(EX, VarX)