Exemplo n.º 1
0
    def _select_step(self, shots, current_objective_value, gradient, iteration, objective_arguments, **kwargs):
        """Compute the Gauss-Newton update for a set of shots.

        Gives the step s as a function of the gradient vector.  Implemented as in p178 of Nocedal and Wright.

        Parameters
        ----------
        shots : list of pysit.Shot
            List of Shots for which to compute.
        grad : ndarray
            Gradient vector.
        iteration : int
            Current time index.

        """

        m0 = self.base_model

        rhs = -1*gradient.asarray()

        def matvec(x):
            m1 = m0.perturbation(data=x)
            return self.objective_function.apply_hessian(shots, self.base_model, x, **objective_arguments).data

        A_shape = (len(rhs), len(rhs))

        A = LinearOperator(shape=A_shape, matvec=matvec, dtype=gradient.dtype)

        resid = []

#       d, info = cg(A, rhs, maxiter=self.krylov_maxiter, residuals=resid)
        d, info = gmres(A, rhs, maxiter=self.krylov_maxiter, residuals=resid)

        d.shape = rhs.shape

        direction = m0.perturbation(data=d)

        if info < 0:
            print "CG Failure"
        if info == 0:
            print "CG Converge"
        if info > 0:
            print "CG ran {0} iterations".format(info)

        alpha0_kwargs = {'reset' : False}
        if iteration == 0:
            alpha0_kwargs = {'reset' : True}

        alpha = self.select_alpha(shots, gradient, direction, objective_arguments,
                                  current_objective_value=current_objective_value,
                                  alpha0_kwargs=alpha0_kwargs, **kwargs)

        self._print('  alpha {0}'.format(alpha))
        self.store_history('alpha', iteration, alpha)

        step = alpha * direction

        return step
Exemplo n.º 2
0
 def smoother(A, x, b):
     x[:] = (gmres(A,
                   b,
                   x0=x,
                   tol=tol,
                   maxiter=maxiter,
                   restrt=restrt,
                   M=M,
                   callback=callback,
                   residuals=residuals)[0]).reshape(x.shape)
Exemplo n.º 3
0
 def smoother(A, x, b):
     x[:] = (
         gmres(
             A,
             b,
             x0=x,
             tol=tol,
             maxiter=maxiter,
             restrt=restrt,
             M=M,
             callback=callback,
             residuals=residuals)[0]).reshape(
         x.shape)
Exemplo n.º 4
0
 def relax(A, x):
     fn, kwargs = unpack_arg(prepostsmoother)
     if fn == 'gauss_seidel':
         gauss_seidel(A, x, np.zeros_like(x),
                      iterations=candidate_iters, sweep='symmetric')
     elif fn == 'gauss_seidel_nr':
         gauss_seidel_nr(A, x, np.zeros_like(x),
                         iterations=candidate_iters, sweep='symmetric')
     elif fn == 'gauss_seidel_ne':
         gauss_seidel_ne(A, x, np.zeros_like(x),
                         iterations=candidate_iters, sweep='symmetric')
     elif fn == 'jacobi':
         jacobi(A, x, np.zeros_like(x), iterations=1,
                omega=1.0 / rho_D_inv_A(A))
     elif fn == 'richardson':
         polynomial(A, x, np.zeros_like(x), iterations=1,
                    coefficients=[1.0/approximate_spectral_radius(A)])
     elif fn == 'gmres':
         x[:] = (gmres(A, np.zeros_like(x), x0=x,
                       maxiter=candidate_iters)[0]).reshape(x.shape)
     else:
         raise TypeError('Unrecognized smoother')
Exemplo n.º 5
0
    Ub[ind1] = 1.-np.cos(np.pi*Y[ind1])
    Ub[ind2] = 1.-np.cos(np.pi*Y[ind2])
    rhs = - R.dot(Q.transpose().dot(Lgd).dot(Q).dot(Ub.reshape(-1,order='F')))

## Direct solve
t0 = time.time()
Lglu = sla.splu(Lg.tocsc())
usln1 = Lglu.solve(rhs)
t1 = time.time() - t0
print('SP LU: time=%f'%(t1))

## GMRES solve
res2 = []
t0 = time.time()
# usln2,info,niter = gmressolve(Lg,rhs,tol=1e-8)
usln2, info = gmres(Lg,rhs,x0=None,tol=1e-8,restrt=200, maxiter=5,residuals=res2)
t2 = time.time() - t0
print('GMRES: time=%f, iter=%d, (start, end) res=%e,%e'%(t2, len(res2), res2[0],res2[-1]))

## MG setup
mgschls = [('Divide',4),('Divide',2),('Subtract',2)]
mgschls = [('Divide',4)]
mgsmthl = ['RR','SC']
# mgsmthl = ['SC']

for sched in mgschls:
    for smther in mgsmthl:
        t0 = time.time()
        semml = semml_md(p0,Ex,Ey,mxlv,initflg,X,Y,smoother=smther,schedule=sched,crs_solver='SPLU')
        tmgsetup = time.time() - t0
        print(semml, )
Exemplo n.º 6
0
def general_setup_stage(ml, symmetry, candidate_iters, prepostsmoother, smooth,
                        eliminate_local, coarse_solver, work):
    """
    Computes additional candidates and improvements
    following Algorithm 4 in Brezina et al.

    Parameters
    ----------
    candidate_iters
        number of test relaxation iterations
    epsilon
        minimum acceptable relaxation convergence factor

    References
    ----------
    .. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
       "Adaptive Smoothed Aggregation (alphaSA) Multigrid"
       SIAM Review Volume 47,  Issue 2  (2005)
       http://www.cs.umn.edu/~maclach/research/aSA2.pdf
    """
    def make_bridge(T):
        M, N = T.shape
        K = T.blocksize[0]
        bnnz = T.indptr[-1]
        # the K+1 represents the new dof introduced by the new candidate.  the
        # bridge 'T' ignores this new dof and just maps zeros there
        data = np.zeros((bnnz, K + 1, K), dtype=T.dtype)
        data[:, :-1, :] = T.data
        return bsr_matrix((data, T.indices, T.indptr),
                          shape=((K + 1) * int(M / K), N))

    def expand_candidates(B_old, nodesize):
        # insert a new dof that is always zero, to create NullDim+1 dofs per
        # node in B
        NullDim = B_old.shape[1]
        nnodes = int(B_old.shape[0] / nodesize)
        Bnew = np.zeros((nnodes, nodesize + 1, NullDim), dtype=B_old.dtype)
        Bnew[:, :-1, :] = B_old.reshape(nnodes, nodesize, NullDim)
        return Bnew.reshape(-1, NullDim)

    levels = ml.levels

    x = sp.rand(levels[0].A.shape[0], 1)
    if levels[0].A.dtype.name.startswith('complex'):
        x = x + 1.0j * sp.rand(levels[0].A.shape[0], 1)
    b = np.zeros_like(x)

    x = ml.solve(b,
                 x0=x,
                 tol=float(np.finfo(np.float).tiny),
                 maxiter=candidate_iters)
    work[:] += ml.operator_complexity(
    ) * ml.levels[0].A.nnz * candidate_iters * 2

    T0 = levels[0].T.copy()

    # TEST FOR CONVERGENCE HERE

    for i in range(len(ml.levels) - 2):
        # alpha-SA paper does local elimination here, but after talking
        # to Marian, its not clear that this helps things
        # fn, kwargs = unpack_arg(eliminate_local)
        # if fn == True:
        #    eliminate_local_candidates(x,levels[i].AggOp,levels[i].A,
        #    levels[i].T, **kwargs)

        # add candidate to B
        B = np.hstack((levels[i].B, x.reshape(-1, 1)))

        # construct Ptent
        T, R = fit_candidates(levels[i].AggOp, B)

        levels[i].T = T
        x = R[:, -1].reshape(-1, 1)

        # smooth P
        fn, kwargs = unpack_arg(smooth[i])
        if fn == 'jacobi':
            levels[i].P = jacobi_prolongation_smoother(levels[i].A, T,
                                                       levels[i].C, R,
                                                       **kwargs)
        elif fn == 'richardson':
            levels[i].P = richardson_prolongation_smoother(
                levels[i].A, T, **kwargs)
        elif fn == 'energy':
            levels[i].P = energy_prolongation_smoother(levels[i].A, T,
                                                       levels[i].C, R, None,
                                                       (False, {}), **kwargs)
            x = R[:, -1].reshape(-1, 1)
        elif fn is None:
            levels[i].P = T
        else:
            raise ValueError('unrecognized prolongation smoother method %s' %
                             str(fn))

        # construct R
        if symmetry == 'symmetric':  # R should reflect A's structure
            levels[i].R = levels[i].P.T.asformat(levels[i].P.format)
        elif symmetry == 'hermitian':
            levels[i].R = levels[i].P.H.asformat(levels[i].P.format)

        # construct coarse A
        levels[i + 1].A = levels[i].R * levels[i].A * levels[i].P

        # construct bridging P
        T_bridge = make_bridge(levels[i + 1].T)
        R_bridge = levels[i + 2].B

        # smooth bridging P
        fn, kwargs = unpack_arg(smooth[i + 1])
        if fn == 'jacobi':
            levels[i + 1].P = jacobi_prolongation_smoother(
                levels[i + 1].A, T_bridge, levels[i + 1].C, R_bridge, **kwargs)
        elif fn == 'richardson':
            levels[i + 1].P = richardson_prolongation_smoother(
                levels[i + 1].A, T_bridge, **kwargs)
        elif fn == 'energy':
            levels[i + 1].P = energy_prolongation_smoother(
                levels[i + 1].A, T_bridge, levels[i + 1].C, R_bridge, None,
                (False, {}), **kwargs)
        elif fn is None:
            levels[i + 1].P = T_bridge
        else:
            raise ValueError('unrecognized prolongation smoother method %s' %
                             str(fn))

        # construct the "bridging" R
        if symmetry == 'symmetric':  # R should reflect A's structure
            levels[i + 1].R = levels[i + 1].P.T.asformat(levels[i +
                                                                1].P.format)
        elif symmetry == 'hermitian':
            levels[i + 1].R = levels[i + 1].P.H.asformat(levels[i +
                                                                1].P.format)

        # run solver on candidate
        solver = multilevel_solver(levels[i + 1:], coarse_solver=coarse_solver)
        change_smoothers(solver,
                         presmoother=prepostsmoother,
                         postsmoother=prepostsmoother)
        x = solver.solve(np.zeros_like(x),
                         x0=x,
                         tol=float(np.finfo(np.float).tiny),
                         maxiter=candidate_iters)
        work[:] += 2 * solver.operator_complexity() * solver.levels[0].A.nnz *\
            candidate_iters*2

        # update values on next level
        levels[i + 1].B = R[:, :-1].copy()
        levels[i + 1].T = T_bridge

    # note that we only use the x from the second coarsest level
    fn, kwargs = unpack_arg(prepostsmoother)
    for lvl in reversed(levels[:-2]):
        x = lvl.P * x
        work[:] += lvl.A.nnz * candidate_iters * 2

        if fn == 'gauss_seidel':
            # only relax at nonzeros, so as not to mess up any locally dropped
            # candidates
            indices = np.ravel(x).nonzero()[0]
            gauss_seidel_indexed(lvl.A,
                                 x,
                                 np.zeros_like(x),
                                 indices,
                                 iterations=candidate_iters,
                                 sweep='symmetric')

        elif fn == 'gauss_seidel_ne':
            gauss_seidel_ne(lvl.A,
                            x,
                            np.zeros_like(x),
                            iterations=candidate_iters,
                            sweep='symmetric')

        elif fn == 'gauss_seidel_nr':
            gauss_seidel_nr(lvl.A,
                            x,
                            np.zeros_like(x),
                            iterations=candidate_iters,
                            sweep='symmetric')

        elif fn == 'jacobi':
            jacobi(lvl.A,
                   x,
                   np.zeros_like(x),
                   iterations=1,
                   omega=1.0 / rho_D_inv_A(lvl.A))

        elif fn == 'richardson':
            polynomial(lvl.A,
                       x,
                       np.zeros_like(x),
                       iterations=1,
                       coefficients=[1.0 / approximate_spectral_radius(lvl.A)])

        elif fn == 'gmres':
            x[:] = (gmres(lvl.A,
                          np.zeros_like(x),
                          x0=x,
                          maxiter=candidate_iters)[0]).reshape(x.shape)
        else:
            raise TypeError('Unrecognized smoother')

    # x will be dense again, so we have to drop locally again
    elim, elim_kwargs = unpack_arg(eliminate_local)
    if elim is True:
        x = x / norm(x, 'inf')
        eliminate_local_candidates(x, levels[0].AggOp, levels[0].A, T0,
                                   **elim_kwargs)

    return x.reshape(-1, 1)
Exemplo n.º 7
0
def general_setup_stage(ml, symmetry, candidate_iters, prepostsmoother,
                        smooth, eliminate_local, coarse_solver, work):
    """Compute additional candidates and improvements following Algorithm 4 in Brezina et al.

    Parameters
    ----------
    candidate_iters
        number of test relaxation iterations
    epsilon
        minimum acceptable relaxation convergence factor

    References
    ----------
    .. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
       "Adaptive Smoothed Aggregation (alphaSA) Multigrid"
       SIAM Review Volume 47,  Issue 2  (2005)
       http://www.cs.umn.edu/~maclach/research/aSA2.pdf

    """
    def make_bridge(T):
        M, N = T.shape
        K = T.blocksize[0]
        bnnz = T.indptr[-1]
        # the K+1 represents the new dof introduced by the new candidate.  the
        # bridge 'T' ignores this new dof and just maps zeros there
        data = np.zeros((bnnz, K+1, K), dtype=T.dtype)
        data[:, :-1, :] = T.data
        return bsr_matrix((data, T.indices, T.indptr),
                          shape=((K + 1) * int(M / K), N))

    def expand_candidates(B_old, nodesize):
        # insert a new dof that is always zero, to create NullDim+1 dofs per
        # node in B
        NullDim = B_old.shape[1]
        nnodes = int(B_old.shape[0] / nodesize)
        Bnew = np.zeros((nnodes, nodesize+1, NullDim), dtype=B_old.dtype)
        Bnew[:, :-1, :] = B_old.reshape(nnodes, nodesize, NullDim)
        return Bnew.reshape(-1, NullDim)

    levels = ml.levels

    x = sp.rand(levels[0].A.shape[0], 1)
    if levels[0].A.dtype.name.startswith('complex'):
        x = x + 1.0j*sp.rand(levels[0].A.shape[0], 1)
    b = np.zeros_like(x)

    x = ml.solve(b, x0=x, tol=float(np.finfo(np.float).tiny),
                 maxiter=candidate_iters)
    work[:] += ml.operator_complexity()*ml.levels[0].A.nnz*candidate_iters*2

    T0 = levels[0].T.copy()

    # TEST FOR CONVERGENCE HERE

    for i in range(len(ml.levels) - 2):
        # alpha-SA paper does local elimination here, but after talking
        # to Marian, its not clear that this helps things
        # fn, kwargs = unpack_arg(eliminate_local)
        # if fn == True:
        #    eliminate_local_candidates(x,levels[i].AggOp,levels[i].A,
        #    levels[i].T, **kwargs)

        # add candidate to B
        B = np.hstack((levels[i].B, x.reshape(-1, 1)))

        # construct Ptent
        T, R = fit_candidates(levels[i].AggOp, B)

        levels[i].T = T
        x = R[:, -1].reshape(-1, 1)

        # smooth P
        fn, kwargs = unpack_arg(smooth[i])
        if fn == 'jacobi':
            levels[i].P = jacobi_prolongation_smoother(levels[i].A, T,
                                                       levels[i].C, R,
                                                       **kwargs)
        elif fn == 'richardson':
            levels[i].P = richardson_prolongation_smoother(levels[i].A, T,
                                                           **kwargs)
        elif fn == 'energy':
            levels[i].P = energy_prolongation_smoother(levels[i].A, T,
                                                       levels[i].C, R, None,
                                                       (False, {}), **kwargs)
            x = R[:, -1].reshape(-1, 1)
        elif fn is None:
            levels[i].P = T
        else:
            raise ValueError('unrecognized prolongation smoother method %s' %
                             str(fn))

        # construct R
        if symmetry == 'symmetric':  # R should reflect A's structure
            levels[i].R = levels[i].P.T.asformat(levels[i].P.format)
        elif symmetry == 'hermitian':
            levels[i].R = levels[i].P.H.asformat(levels[i].P.format)

        # construct coarse A
        levels[i+1].A = levels[i].R * levels[i].A * levels[i].P

        # construct bridging P
        T_bridge = make_bridge(levels[i+1].T)
        R_bridge = levels[i+2].B

        # smooth bridging P
        fn, kwargs = unpack_arg(smooth[i+1])
        if fn == 'jacobi':
            levels[i+1].P = jacobi_prolongation_smoother(levels[i+1].A,
                                                         T_bridge,
                                                         levels[i+1].C,
                                                         R_bridge, **kwargs)
        elif fn == 'richardson':
            levels[i+1].P = richardson_prolongation_smoother(levels[i+1].A,
                                                             T_bridge,
                                                             **kwargs)
        elif fn == 'energy':
            levels[i+1].P = energy_prolongation_smoother(levels[i+1].A,
                                                         T_bridge,
                                                         levels[i+1].C,
                                                         R_bridge, None,
                                                         (False, {}), **kwargs)
        elif fn is None:
            levels[i+1].P = T_bridge
        else:
            raise ValueError('unrecognized prolongation smoother method %s' %
                             str(fn))

        # construct the "bridging" R
        if symmetry == 'symmetric':  # R should reflect A's structure
            levels[i+1].R = levels[i+1].P.T.asformat(levels[i+1].P.format)
        elif symmetry == 'hermitian':
            levels[i+1].R = levels[i+1].P.H.asformat(levels[i+1].P.format)

        # run solver on candidate
        solver = multilevel_solver(levels[i+1:], coarse_solver=coarse_solver)
        change_smoothers(solver, presmoother=prepostsmoother,
                         postsmoother=prepostsmoother)
        x = solver.solve(np.zeros_like(x), x0=x,
                         tol=float(np.finfo(np.float).tiny),
                         maxiter=candidate_iters)
        work[:] += 2 * solver.operator_complexity() * solver.levels[0].A.nnz *\
            candidate_iters*2

        # update values on next level
        levels[i+1].B = R[:, :-1].copy()
        levels[i+1].T = T_bridge

    # note that we only use the x from the second coarsest level
    fn, kwargs = unpack_arg(prepostsmoother)
    for lvl in reversed(levels[:-2]):
        x = lvl.P * x
        work[:] += lvl.A.nnz*candidate_iters*2

        if fn == 'gauss_seidel':
            # only relax at nonzeros, so as not to mess up any locally dropped
            # candidates
            indices = np.ravel(x).nonzero()[0]
            gauss_seidel_indexed(lvl.A, x, np.zeros_like(x), indices,
                                 iterations=candidate_iters, sweep='symmetric')

        elif fn == 'gauss_seidel_ne':
            gauss_seidel_ne(lvl.A, x, np.zeros_like(x),
                            iterations=candidate_iters, sweep='symmetric')

        elif fn == 'gauss_seidel_nr':
            gauss_seidel_nr(lvl.A, x, np.zeros_like(x),
                            iterations=candidate_iters, sweep='symmetric')

        elif fn == 'jacobi':
            jacobi(lvl.A, x, np.zeros_like(x), iterations=1,
                   omega=1.0 / rho_D_inv_A(lvl.A))

        elif fn == 'richardson':
            polynomial(lvl.A, x, np.zeros_like(x), iterations=1,
                       coefficients=[1.0/approximate_spectral_radius(lvl.A)])

        elif fn == 'gmres':
            x[:] = (gmres(lvl.A, np.zeros_like(x), x0=x,
                          maxiter=candidate_iters)[0]).reshape(x.shape)
        else:
            raise TypeError('Unrecognized smoother')

    # x will be dense again, so we have to drop locally again
    elim, elim_kwargs = unpack_arg(eliminate_local)
    if elim is True:
        x = x/norm(x, 'inf')
        eliminate_local_candidates(x, levels[0].AggOp, levels[0].A, T0,
                                   **elim_kwargs)

    return x.reshape(-1, 1)
Exemplo n.º 8
0
    def _select_step(self, shots, current_objective_value, gradient, iteration,
                     objective_arguments, **kwargs):
        """Compute the Gauss-Newton update for a set of shots.

        Gives the step s as a function of the gradient vector.  Implemented as in p178 of Nocedal and Wright.

        Parameters
        ----------
        shots : list of pysit.Shot
            List of Shots for which to compute.
        grad : ndarray
            Gradient vector.
        iteration : int
            Current time index.

        """

        m0 = self.base_model

        rhs = -1 * gradient.asarray()

        def matvec(x):
            m1 = m0.perturbation(data=x)
            return self.objective_function.apply_hessian(
                shots, self.base_model, x, **objective_arguments).data

        A_shape = (len(rhs), len(rhs))

        A = LinearOperator(shape=A_shape, matvec=matvec, dtype=gradient.dtype)

        resid = []

        #       d, info = cg(A, rhs, maxiter=self.krylov_maxiter, residuals=resid)
        d, info = gmres(A, rhs, maxiter=self.krylov_maxiter, residuals=resid)

        d.shape = rhs.shape

        direction = m0.perturbation(data=d)

        if info < 0:
            print "CG Failure"
        if info == 0:
            print "CG Converge"
        if info > 0:
            print "CG ran {0} iterations".format(info)

        alpha0_kwargs = {'reset': False}
        if iteration == 0:
            alpha0_kwargs = {'reset': True}

        alpha = self.select_alpha(
            shots,
            gradient,
            direction,
            objective_arguments,
            current_objective_value=current_objective_value,
            alpha0_kwargs=alpha0_kwargs,
            **kwargs)

        self._print('  alpha {0}'.format(alpha))
        self.store_history('alpha', iteration, alpha)

        step = alpha * direction

        return step
Exemplo n.º 9
0
bb = np.r_[b.array(), np.zeros(len(points))]

# Preconditioner diag(ML(A), I)
ml = smoothed_aggregation_solver(A_)
PA = ml.aspreconditioner()

def Pmat_vec(x):
    yA = PA.matvec(x[:V.dim()])
    # Identity on part of x from point sources
    return np.r_[yA, x[V.dim():]]

P = LinearOperator(shape=AA.shape, matvec=Pmat_vec)
    
residuals = []
# Run the iterations
x, failed = gmres(AA, bb, M=P, 
                  tol=1e-10, maxiter=1000, residuals=residuals)
uh = Function(V)
uh.vector()[:] = x[:V.dim()]

# Solve
# xx = spsolve(AA, bb)

# Get the coefs of !multipliers
# U = xx[:V.dim()]
# uh = Function(V)
# uh.vector()[:] = U

print len(residuals)-1, residuals[-1]

plot(uh)
interactive()
Exemplo n.º 10
0
    def _select_step(self, shots, current_objective_value, gradient, iteration,
                     objective_arguments, **kwargs):
        """Compute the update for a set of shots.
        
        Gives the step s as a function of the gradient vector.  Implemented as in p178 of Nocedal and Wright.
        
        Parameters
        ----------
        shots : list of pysit.Shot
            List of Shots for which to compute.
        grad : ndarray
            Gradient vector.
        i : int
            Current time index.
        objective_arguments['frequencies'] gives a list with the frequencies
        
        nrealizations: should be passed on through kwargs 
            
        """
        if type(self.objective_function) != FrequencyLeastSquares:
            raise NotImplementedError(
                "Stochastic Gauss Newton is only implemented for a frequency least squares objective function"
            )

        rhs = -1 * gradient.asarray()
        stochastic_hessian = self._get_stoch_hessian(
            objective_arguments['frequencies'], shots, **kwargs)

        #stochastic_hessian = stochastic_hessian / 900.0

        m = self.solver.mesh
        dx = m.x.delta
        dz = m.z.delta

        #Need to multiply stochastic hessian by (dx*dz) to get values at same order of magnitude as you would get using adjoint method to calculate H_appr
        #This will decrease entries in the result of 'd'
        #Difference probably due to difference in definition norm. volume integral vs sum etc...
        #If I don't multiply, the step I get a 'step' with entries that seem to be too large. After 10 reductions of alpha, norm is still decreasing each time alpha is reduced.
        #final result does not seem to change since value of alpha will adapt accordingly with different order of magnitude of 'd'

        #stochastic_hessian = stochastic_hessian * float(dx*dz)

        resid = []
        if self.use_diag_preconditioner:
            diagonal = stochastic_hessian.diagonal()
            idiagonal = 1. / diagonal
            Pinv = dia_matrix((idiagonal, 0), shape=stochastic_hessian.shape)
            d, info = gmres(stochastic_hessian,
                            rhs,
                            maxiter=self.krylov_maxiter,
                            residuals=resid,
                            M=Pinv)
        else:
            d, info = gmres(stochastic_hessian,
                            rhs,
                            maxiter=self.krylov_maxiter,
                            residuals=resid)

        d.shape = rhs.shape
        direction = self.solver.model_parameters.perturbation()
        direction = direction.without_padding()
        direction.data = d

        #        d, info = cg(A, rhs, maxiter=self.krylov_maxiter, residuals=resid)

        #direction = mo.perturbation(data=d)

        if info < 0:
            print "CG Failure"
        if info == 0:
            print "CG Converge"
        if info > 0:
            print "CG ran {0} iterations".format(info)

        alpha0_kwargs = {
            'reset': False
        }  #COPIED FROM LBFGS THINK ABOUT THIS LATER
        if iteration == 0:
            alpha0_kwargs = {'reset': True}

        alpha = self.select_alpha(
            shots,
            gradient,
            direction,
            objective_arguments,
            current_objective_value=current_objective_value,
            alpha0_kwargs=alpha0_kwargs,
            **kwargs)

        self._print('  alpha {0}'.format(alpha))
        self.store_history('alpha', iteration, alpha)

        step = alpha * direction

        if kwargs.has_key('printfigs'):
            if kwargs['printfigs']:  #if it is True
                import numpy as np
                import matplotlib.pyplot as plt
                nx = m.x.n
                nz = m.z.n

                gradientplot = np.reshape(gradient.data, (nz, nx), 'F')
                directionplot = np.reshape(direction.data, (nz, nx), 'F')
                stepplot = np.reshape(step.data, (nz, nz), 'F')

                plt.ion()
                f1 = plt.figure(1)
                plt.imshow(gradientplot, interpolation='nearest')
                plt.colorbar()
                plt.title('gradient')

                f2 = plt.figure(2)
                plt.imshow(directionplot, interpolation='nearest')
                plt.colorbar()
                plt.title('Direction after applying Hessian')

                f3 = plt.figure(3)
                plt.imshow(stepplot, interpolation='nearest')
                plt.colorbar()
                plt.title('Step after applying Hessian and doing linesearch')

                f4 = plt.figure(4)
                plt.imshow(stochastic_hessian.todense(),
                           interpolation='nearest')
                plt.colorbar()
                plt.title("incomplete stochastic hessian")

                wait = raw_input("PRESS ENTER TO CONTINUE.")
                plt.close(f1)
                plt.close(f2)
                plt.close(f3)
                plt.close(f4)

        return step