예제 #1
0
def estimate_diagonal_inv2(Asolver, k, d):
    """
    An unbiased stochastic estimator for the diagonal of A^-1.
    d = [ \sum_{j=1}^k vj .* A^{-1} vj ] ./ [ \sum_{j=1}^k vj .* vj ]
    where
    - vj are i.i.d. ~ N(0, I)
    - .* and ./ represent the element-wise multiplication and division
      of vectors, respectively.
      
    REFERENCE:
    Costas Bekas, Effrosyni Kokiopoulou, and Yousef Saad,
    An estimator for the diagonal of a matrix,
    Applied Numerical Mathematics, 57 (2007), pp. 1214-1229.
    """
    x, b = Vector(), Vector()

    if hasattr(Asolver, "init_vector"):
        Asolver.init_vector(x, 1)
        Asolver.init_vector(b, 0)
    else:
        Asolver.get_operator().init_vector(x, 1)
        Asolver.get_operator().init_vector(b, 0)

    d.zero()
    for i in range(k):
        x.zero()
        parRandom.normal(1., b)
        Asolver.solve(x, b)
        x *= b
        d.axpy(1. / float(k), x)
예제 #2
0
    def proposal(self, current):
        #Generate sample from the prior
        parRandom.normal(1., self.noise)
        w_prior = dl.Vector()
        self.nu.init_vector(w_prior, 0)
        w = dl.Vector()
        self.nu.init_vector(w, 0)
        self.nu.sample(self.noise, w_prior, w, add_mean=True)

        return w
예제 #3
0
 def proposal(self, current):
     delta_t = self.parameters["delta_t"]
     parRandom.normal(1., self.noise)
     w = dl.Vector()
     self.model.prior.init_vector(w, 0)
     self.model.prior.sample(self.noise, w, add_mean=False)
     delta_tp2 = 2 + delta_t
     d_gam = self.pr_mean + (2 - delta_t) / (2 + delta_t) * (
         current.m - self.pr_mean) - (2 * delta_t) / (
             delta_tp2) * current.Cg + math.sqrt(
                 8 * delta_t) / delta_tp2 * w
     return d_gam
예제 #4
0
    def set_operator(self, A):
        """
        Set the operator A, such that x ~ N(0, A^-1).
        
        Note A is any object that provides the methods init_vector and mult.
        """
        self.A = A
        self.A.init_vector(self.r, 0)
        self.A.init_vector(self.p, 0)
        self.A.init_vector(self.Ap, 0)

        self.A.init_vector(self.b, 0)
        parRandom.normal(1., self.b)
예제 #5
0
    def proposal(self, current):
        #Generate sample from the prior
        parRandom.normal(1., self.noise)
        w = dl.Vector()
        self.model.prior.init_vector(w, 0)
        self.model.prior.sample(self.noise, w, add_mean=False)
        # do pCN linear combination with current sample
        s = self.parameters["s"]
        w *= s
        w.axpy(1., self.model.prior.mean)
        w.axpy(np.sqrt(1. - s * s), current.m - self.model.prior.mean)

        return w
예제 #6
0
파일: qoi.py 프로젝트: shahmoradi/hippy
def qoiVerify(qoi, x, generate_state, h=None, innerTol=1e-9, plotting=True):

    rank = dl.MPI.rank(x[STATE].mpi_comm())

    if h is None:
        h = generate_state()
        parRandom.normal(1., h)

    qoi_x = qoi.eval(x)
    grad_x = generate_state()
    qoi.setLinearizationPoint(x)
    qoi.grad(STATE, x, grad_x)
    grad_xh = grad_x.inner(h)
    Hh = generate_state()
    qoi.apply_ij(STATE, STATE, h, Hh)

    n_eps = 32
    eps = np.power(.5, np.arange(n_eps, 0, -1))
    err_grad = np.zeros(n_eps)
    err_H = np.zeros(n_eps)

    for i in range(n_eps):
        my_eps = eps[i]

        state_plus = generate_state()
        state_plus.axpy(1., x[STATE])
        state_plus.axpy(my_eps, h)

        dq = qoi.eval([state_plus, x[PARAMETER], x[ADJOINT]]) - qoi_x
        err_grad[i] = abs(dq / my_eps - grad_xh)

        grad_xplus = generate_state()
        qoi.grad(STATE, [state_plus, x[PARAMETER], x[ADJOINT]], grad_xplus)

        err = grad_xplus - grad_x
        err *= 1. / my_eps
        err -= Hh

        err_H[i] = err.norm('linf')

    if plotting and (rank == 0):
        qoiVerifyPlotErrors(eps, err_grad, err_H)

    return eps, err_grad, err_H
예제 #7
0
def reducedQOIVerify(rQOI, m0, h=None, innerTol=1e-9,eps=None, plotting = True):
    """
    Verify the gradient and the Hessian of a parameter-to-qoi map.
    It will produce two loglog plots of the finite difference checks
    for the gradient and for the Hessian.
    It will also check for symmetry of the Hessian.
    """
    rank = dl.MPI.rank(m0.mpi_comm())
    
    if h is None:
        h = rQOI.generate_vector(PARAMETER)
        parRandom.normal(1., h)

    
    x = rQOI.generate_vector()
    
    if hasattr(rQOI.problem, "initial_guess"):
        x[STATE].axpy(1., rQOI.problem.initial_guess)
    x[PARAMETER] = m0
    rQOI.solveFwd(x[STATE], x, innerTol)
    rQOI.solveAdj(x[ADJOINT], x, innerTol)
    qoi_x = rQOI.eval(x)
    
    grad_x = rQOI.generate_vector(PARAMETER)
    rQOI.evalGradientParameter(x, grad_x)
    grad_xh = grad_x.inner( h )
    
    H = rQOI.reduced_hessian(x=x, innerTol=innerTol)
    Hh = rQOI.generate_vector(PARAMETER)
    H.mult(h, Hh)
    
    if eps is None:
        n_eps = 32
        eps = np.power(.5, np.arange(n_eps-5,-5,-1))
    else:
        n_eps = eps.shape[0]
        
    err_grad = np.zeros(n_eps)
    err_H = np.zeros(n_eps)
    qois = np.zeros(n_eps)
    
    x_plus = rQOI.generate_vector()
    x_plus[STATE].axpy(1., x[STATE])
    
    for i in range(n_eps):
        my_eps = eps[i]
        
        x_plus[PARAMETER].zero()
        x_plus[PARAMETER].axpy(1., m0)
        x_plus[PARAMETER].axpy(my_eps, h)
        rQOI.solveFwd(x_plus[STATE],   x_plus, innerTol)
        rQOI.solveAdj(x_plus[ADJOINT], x_plus,innerTol)
        
        qoi_plus = rQOI.eval(x_plus)
        qois[i] = qoi_plus
        dQOI = qoi_plus - qoi_x
        err_grad[i] = abs(dQOI/my_eps - grad_xh)
        
        #Check the Hessian
        grad_xplus = rQOI.generate_vector(PARAMETER)
        rQOI.evalGradientParameter(x_plus, grad_xplus)
        
        err  = grad_xplus - grad_x
        err *= 1./my_eps
        err -= Hh
        
        err_H[i] = err.norm('linf')

        if rank == 0:
            print "{0:1.7e} {1:1.7e} {2:1.7e} {3:1.7e}".format(eps[i], qois[i], err_grad[i], err_H[i])
    
    if plotting and (rank == 0):
        reducedQOIVerifyVerifyPlotErrors(eps, err_grad, err_H) 

    out = np.zeros((eps.shape[0], 4))
    out[:,0] = eps
    out[:,1] = qois
    out[:,2] = err_grad
    out[:,3] = err_H
    
    if rank == 0:
        np.savetxt('fd_check.txt', out)
      
    xx = rQOI.generate_vector(PARAMETER)
    parRandom.normal(1., xx)
    yy = rQOI.generate_vector(PARAMETER)
    parRandom.normal(1., yy)
    
    ytHx = H.inner(yy,xx)
    xtHy = H.inner(xx,yy)
    rel_symm_error = 2*abs(ytHx - xtHy)/(ytHx + xtHy)
    if rank == 0:
        print "(yy, H xx) - (xx, H yy) = ", rel_symm_error
        if(rel_symm_error > 1e-10):
            print "HESSIAN IS NOT SYMMETRIC!!"
        
    return out
예제 #8
0
def modelVerify(model,
                a0,
                innerTol,
                is_quadratic=False,
                misfit_only=False,
                verbose=True,
                eps=None):
    """
    Verify the reduced Gradient and the Hessian of a model.
    It will produce two loglog plots of the finite difference checks
    for the gradient and for the Hessian.
    It will also check for symmetry of the Hessian.
    """
    if misfit_only:
        index = 2
    else:
        index = 0

    h = model.generate_vector(PARAMETER)
    parRandom.normal(1., h)

    x = model.generate_vector()
    x[PARAMETER] = a0
    model.solveFwd(x[STATE], x, innerTol)
    model.solveAdj(x[ADJOINT], x, innerTol)
    cx = model.cost(x)

    grad_x = model.generate_vector(PARAMETER)
    model.evalGradientParameter(x, grad_x, misfit_only=misfit_only)
    grad_xh = grad_x.inner(h)

    model.setPointForHessianEvaluations(x)
    H = ReducedHessian(model, innerTol, misfit_only=misfit_only)
    Hh = model.generate_vector(PARAMETER)
    H.mult(h, Hh)

    if eps is None:
        n_eps = 32
        eps = np.power(.5, np.arange(n_eps))
        eps = eps[::-1]
    else:
        n_eps = eps.shape[0]
    err_grad = np.zeros(n_eps)
    err_H = np.zeros(n_eps)

    for i in range(n_eps):
        my_eps = eps[i]

        x_plus = model.generate_vector()
        x_plus[PARAMETER].axpy(1., a0)
        x_plus[PARAMETER].axpy(my_eps, h)
        model.solveFwd(x_plus[STATE], x_plus, innerTol)
        model.solveAdj(x_plus[ADJOINT], x_plus, innerTol)

        dc = model.cost(x_plus)[index] - cx[index]
        err_grad[i] = abs(dc / my_eps - grad_xh)

        #Check the Hessian
        grad_xplus = model.generate_vector(PARAMETER)
        model.evalGradientParameter(x_plus,
                                    grad_xplus,
                                    misfit_only=misfit_only)

        err = grad_xplus - grad_x
        err *= 1. / my_eps
        err -= Hh

        err_H[i] = err.norm('linf')

    if verbose:
        modelVerifyPlotErrors(is_quadratic, eps, err_grad, err_H)

    xx = model.generate_vector(PARAMETER)
    parRandom.normal(1., xx)
    yy = model.generate_vector(PARAMETER)
    parRandom.normal(1., yy)

    ytHx = H.inner(yy, xx)
    xtHy = H.inner(xx, yy)
    if np.abs(ytHx + xtHy) > 0.:
        rel_symm_error = 2 * abs(ytHx - xtHy) / (ytHx + xtHy)
    else:
        rel_symm_error = abs(ytHx - xtHy)
    if verbose:
        print "(yy, H xx) - (xx, H yy) = ", rel_symm_error
        if rel_symm_error > 1e-10:
            print "HESSIAN IS NOT SYMMETRIC!!"

    return eps, err_grad, err_H
예제 #9
0
 def consume_random(self):
     parRandom.normal(1., self.noise)
     np.random.rand()
예제 #10
0
 def proposal(self, current):
     parRandom.normal(1., self.noise)
     self.nu.sample(self.noise, self.discard, self.w, add_mean=False)
     return current.m - current.Cg + self.w
예제 #11
0
def expectedInformationGainLaplace(model, ns, k, save_any=10, fname="kldist"):
    rank = dl.MPI.rank(dl.mpi_comm_world())
    m_MC = model.generate_vector(PARAMETER)
    u_MC = model.generate_vector(STATE)
    noise_m = dl.Vector()
    model.prior.init_vector(noise_m, "noise")
    noise_obs = dl.Vector()
    model.misfit.B.init_vector(noise_obs, 0)

    out = np.zeros((ns, 5))
    header = 'kldist, c_misfit, c_reg, c_logdet, c_tr'

    p = 20

    Omega = MultiVector(m_MC, k + p)
    parRandom.normal(1., Omega)

    for iMC in np.arange(ns):
        if rank == 0:
            print "Sample: ", iMC
        parRandom.normal(1., noise_m)
        parRandom.normal(1., noise_obs)
        model.prior.sample(noise_m, m_MC)
        model.solveFwd(u_MC, [u_MC, m_MC, None], 1e-9)
        model.misfit.B.mult(u_MC, model.misfit.d)
        model.misfit.d.axpy(np.sqrt(model.misfit.noise_variance), noise_obs)

        a = m_MC.copy()
        parameters = ReducedSpaceNewtonCG_ParameterList()
        parameters["rel_tolerance"] = 1e-9
        parameters["abs_tolerance"] = 1e-12
        parameters["max_iter"] = 25
        parameters["inner_rel_tolerance"] = 1e-15
        parameters["globalization"] = "LS"
        parameters["GN_iter"] = 5
        if rank != 0:
            parameters["print_level"] = -1

        solver = ReducedSpaceNewtonCG(model, parameters)
        x = solver.solve([None, a, None])

        if rank == 0:
            if solver.converged:
                print "\nConverged in ", solver.it, " iterations."
            else:
                print "\nNot Converged"

            print "Termination reason: ", solver.termination_reasons[
                solver.reason]
            print "Final gradient norm: ", solver.final_grad_norm
            print "Final cost: ", solver.final_cost

        model.setPointForHessianEvaluations(x, gauss_newton_approx=False)
        Hmisfit = ReducedHessian(model,
                                 solver.parameters["inner_rel_tolerance"],
                                 misfit_only=True)
        d, U = doublePassG(Hmisfit,
                           model.prior.R,
                           model.prior.Rsolver,
                           Omega,
                           k,
                           s=1,
                           check=False)
        posterior = GaussianLRPosterior(model.prior, d, U)
        posterior.mean = x[PARAMETER]

        kl_dist, c_detlog, c_tr, cr = posterior.klDistanceFromPrior(
            sub_comp=True)
        if rank == 0:
            print "KL-Distance from prior: ", kl_dist

        cm = model.misfit.cost(x)
        out[iMC, 0] = kl_dist
        out[iMC, 1] = cm
        out[iMC, 2] = cr
        out[iMC, 3] = c_detlog
        out[iMC, 4] = c_tr

        if (rank == 0) and (iMC % save_any == save_any - 1):
            all_kl = out[0:iMC + 1, 0]
            print "I = ", np.mean(all_kl), " Var[I_MC] = ", np.var(
                all_kl, ddof=1) / float(iMC + 1)
            if fname is not None:
                np.savetxt(fname + '_tmp.txt',
                           out[0:iMC + 1, :],
                           header=header,
                           comments='% ')

    if fname is not None:
        np.savetxt(fname + '.txt', out, header=header, comments='% ')

    return np.mean(out[:, 0])
예제 #12
0
def expectedInformationGainMC2(model, n, fname="loglikeEv"):
    rank = dl.MPI.rank(dl.mpi_comm_world())
    # STEP 1: Generate random m and evaluate/store B*u(m)
    u = model.generate_vector(STATE)
    m = model.generate_vector(PARAMETER)
    all_p2o = [dl.Vector() for i in range(n)]
    [model.misfit.B.init_vector(p2o, 0) for p2o in all_p2o]
    noise_m = dl.Vector()
    model.prior.init_vector(noise_m, "noise")

    for i in range(n):
        if rank == 0 and i % 10 == 0:
            print "Compute observation", i
        parRandom.normal(1., noise_m)
        model.prior.sample(noise_m, m)
        model.solveFwd(u, [u, m, None])
        model.misfit.B.mult(u, all_p2o[i])

    # STEP 2: Compute Evidence and likelihood
    noise_obs = dl.Vector()
    model.misfit.B.init_vector(noise_obs, 0)
    obs = dl.Vector()
    model.misfit.B.init_vector(obs, 0)
    diff = dl.Vector()
    model.misfit.B.init_vector(diff, 0)

    gamma_noise = model.misfit.noise_variance
    log_evidence = np.zeros(n)
    log_like = np.zeros(n)

    nobs = float(noise_obs.size())

    for i in range(n):
        if rank == 0 and i % 10 == 0:
            print "Evaluate evidence", i
        parRandom.normal(1., noise_obs)
        #Note: the normalizing costant 1/sqrt( det( 2\pi \Gamma_noise ) ) cancels out between the log_like and log_evidence
        log_like[i] = -0.5 * noise_obs.inner(
            noise_obs)  #- 0.5*np.log(2.*np.pi*gamma_noise)*nobs
        obs.zero()
        obs.axpy(1., all_p2o[i])
        obs.axpy(np.sqrt(gamma_noise), noise_obs)
        tmp = 0.0
        for j in range(n):
            diff.zero()
            diff.axpy(1., obs)
            diff.axpy(-1., all_p2o[j])
            tmp += np.exp(-0.5 * diff.inner(diff) / gamma_noise)

        log_evidence[i] = np.log(tmp) - np.log(
            n)  #- 0.5*np.log(2.*np.pi*gamma_noise)*nobs

    if fname is not None:
        out = np.zeros((n, 2))
        out[:, 0] = log_like
        out[:, 1] = log_evidence
        np.savetxt(fname + '.txt',
                   out,
                   header='log_like, log_ev',
                   comments='% ')

    return np.mean(log_like) - np.mean(log_evidence)
예제 #13
0
def varianceReductionMC(prior,
                        rqoi,
                        taylor_qoi,
                        nsamples,
                        filename="realizations.txt"):
    """
    This function computes Monte Carlo Estimates for forward propagation of uncertainty.
    The uncertain parameter satisfies a Gaussian distribution with known mean and covariance 
    (describes as the inverse of a differential operator).
    Convergence of the Monte Carlo estimates is accelerated using a variance reduction
    techinque based on a Taylor approximation of the parameter-to-qoi map.
    
    INPUTS:
    - prior: an object of type hIPPYlib._Prior that allows to generate samples from the prior distribution
    - rqoi: an object of type ReducedQOI that describes the parameter-to-qoi map
    - taylor_qoi: an object of type TaylorApproximationQOI that computes the first and second order Taylor
                  approximation of the qoi
    - nsamples: an integer representing the number of samples for the MC estimates
    - filename: a string containing the name of the file where the computed qoi
                and its Taylor approximations (for each realization of the parameter) are saved
                
    OUTPUTS:
    - Sample mean of the quantity of interest q, its Taylor approx q1 and q2, and corrections y1=q-q1 and y2=q-q2.
    - MSE (Mean square error) of the standard MC, and the variance reduced MC using q1 and q2.
    
    Note: The variate control MC estimator can be computed off-line by postprocessing the file containing the
          values of the computed qoi and Taylor approximations.
    
    """
    noise = dl.Vector()
    sample = dl.Vector()

    prior.init_vector(noise, "noise")
    prior.init_vector(sample, 1)

    rank = dl.MPI.rank(noise.mpi_comm())

    q_i = np.zeros(nsamples)
    q1_i = np.zeros(nsamples)
    q2_i = np.zeros(nsamples)
    y1_i = np.zeros(nsamples)
    y2_i = np.zeros(nsamples)

    Eq1_exact = taylor_qoi.expectedValue(order=1)
    Eq2_exact = taylor_qoi.expectedValue(order=2)

    if rank == 0:
        print "nsamples | E[q], E[y1] + E[q1],  E[y2] + E[q2]| Var[q] Var[y1] Var[y2]"
        fid = open(filename, "w")

    for i in range(nsamples):
        parRandom.normal(1., noise)
        prior.sample(noise, sample)
        q_i[i] = rqoi.reduced_eval(sample)
        q1_i[i] = taylor_qoi.eval(sample, order=1)
        q2_i[i] = taylor_qoi.eval(sample, order=2)
        y1_i[i] = q_i[i] - q1_i[i]
        y2_i[i] = q_i[i] - q2_i[i]

        if rank == 0:
            fid.write("{0:15e} {1:15e} {2:15e}\n".format(
                q_i[i], q1_i[i], q2_i[i]))
            fid.flush()

        if ((i + 1) % 10 == 0) or (i + 1 == nsamples):
            Eq = np.sum(q_i) / float(i + 1)
            Eq1 = np.sum(q1_i) / float(i + 1)
            Eq2 = np.sum(q2_i) / float(i + 1)
            Ey1 = np.sum(y1_i) / float(i + 1)
            Ey2 = np.sum(y2_i) / float(i + 1)

            Varq = np.sum(np.power(q_i, 2)) / float(i) - (float(i + 1) /
                                                          float(i) * Eq * Eq)
            Varq1 = np.sum(np.power(q1_i, 2)) / float(i) - (
                float(i + 1) / float(i) * Eq1 * Eq1)
            Varq2 = np.sum(np.power(q2_i, 2)) / float(i) - (
                float(i + 1) / float(i) * Eq2 * Eq2)
            Vary1 = np.sum(np.power(y1_i, 2)) / float(i) - (
                float(i + 1) / float(i) * Ey1 * Ey1)
            Vary2 = np.sum(np.power(y2_i, 2)) / float(i) - (
                float(i + 1) / float(i) * Ey2 * Ey2)

            if rank == 0:
                print "{0:3} | {1:7e} {2:7e} {3:7e} | {4:7e} {5:7e} {6:7e}".format(
                    i + 1, Eq, Ey1 + Eq1_exact, Ey2 + Eq2_exact, Varq, Vary1,
                    Vary2)

    Vq1_exact = taylor_qoi.variance(order=1)
    Vq2_exact = taylor_qoi.variance(order=1)

    if rank == 0:
        fid.close()

        print "Expected value q1: analytical: ", Eq1_exact, "estimated: ", Eq1
        print "Expected value q2: analytical: ", Eq2_exact, "estimated: ", Eq2
        print "Variance q1: analytical", Vq1_exact, "estimated: ", Varq1
        print "Variance q2: analytical", Vq2_exact, "estimated: ", Varq2

    return Eq, Ey1, Ey2, Eq1_exact, Eq2_exact, Varq / nsamples, Vary1 / nsamples, Vary2 / nsamples
예제 #14
0
def gaussian_engine(v):
    """
    Generate a vector of n i.i.d. standard normal variables.
    """
    parRandom.normal(1., v)