예제 #1
0
파일: nufft.py 프로젝트: jyhmiinlin/cineFSE
def pipe_density(V): 
    '''
    The original iterative density compensation method by J. Pipe 1999 .
    The iterative implementation by using Python is slow. 
    So I used a lsqr iterative solution which is faster than iterations in Python. 
    '''
    
    V1=V.getH()
#     E = V.dot( V1.dot(    W   )   )
#     W = W*(E+1.0e-17)/(E*E+1.0e-17)    
    b = numpy.ones( (V.get_shape()[0] ,1) ,dtype  = numpy.complex64)  
    from scipy.sparse.linalg import lsqr, lsmr
        
#     x1 =  lsqr(V, b , iter_lim=10, calc_var = True, damp = 0.01)
    x1 =  lsmr(V, b , maxiter=12,  damp = 0.1)
    
    my_k_dens = x1[0]    # the first element is the answer
    
#     tmp_W =  lsqr(V1, my_k_dens, iter_lim=10, calc_var = True, damp = 0.01)
    tmp_W =  lsmr(V1, my_k_dens , maxiter=12,  damp = 0.1)
    
    W = numpy.reshape( tmp_W[0], (V.get_shape()[0] ,1),order='F' ) # reshape vector

#     for pppj in xrange(0,10):
# #             W[W>1.0]=1.0
# #             print(pppj)
#         E = V.dot( V1.dot(    W   )   )
#  
#         W = W*(E+1.0e-17)/(E**2+1.0e-17)
 
    return W
예제 #2
0
    def testInitialization(self):
        # Test that the default setting is not modified
        x_ref = lsmr(G, b)[0]
        x0 = zeros(b.shape)
        x = lsmr(G, b, x0=x0)[0]
        assert_array_almost_equal(x_ref, x)

        # Test warm-start with single iteration
        x0 = lsmr(G, b, maxiter=1)[0]
        x = lsmr(G, b, x0=x0)[0]
        assert_array_almost_equal(x_ref, x)
예제 #3
0
 def newton_raphson(init, fun, jacob, args, TOL = 1e-10, max_iters = 25):
     """Use Newton's method to find a root of the function fun.
     
     Arguments:
     init -- A sequence of length k that represents an initial guess.
     fun -- The function whose roots we wish to find. This function take a
            sequence of length k as input and returns a sequence of length
            k as output.
     jacob -- A function that computes the Jacobian of fun at point 
              x. Takes a sequence of length k as input and returns a k x k 
              np.array as output. 
     args -- A list of extra arguments to be passed to fun() and jacob().
     TOL -- (Optional) Iterations cease when an x is found such that
            ||fun(x)||_2 < TOL.
     max_iters -- (Optional) The number of iterations to perform before 
                  announcing nonconvergence.
     
     Output:
     A sequence of length k that is a root of fun.
     
     """
     init = np.array(init)
     x_curr = np.array(init.copy())
     curr_resid_norm = np.linalg.norm(fun(x_curr, *args))
     iter_num = 0
     while curr_resid_norm >= TOL and iter_num < max_iters:
         # print "Iteration: {0}, resid norm: {1}".format(iter_num, curr_resid_norm)
         iter_num = iter_num + 1
         J = jacob(x_curr, *args)
         try:
             delta_x = spsl.spsolve(J, -fun(x_curr, *args))
         except np.linalg.LinAlgError:
             delta_x = spsl.lsmr(J, -fun(x_curr, *args))[0]
         if np.isnan(np.sum(delta_x)):
             delta_x = spsl.lsmr(J, -fun(x_curr, *args))[0]
         x_old = x_curr.copy()
         old_resid_norm = curr_resid_norm
         x_curr = x_old + delta_x
         curr_resid_norm = np.linalg.norm(fun(x_curr, *args))
         step_size = 2
         # Bisecting line search
         while curr_resid_norm > old_resid_norm:
             step_size = step_size / 2.0
             x_curr = x_old + step_size * delta_x
             curr_resid_norm = np.linalg.norm(fun(x_curr, *args))
     if curr_resid_norm > TOL or np.isnan(np.sum(x_curr)):
         raise RuntimeError("PowerNetwork.newton_raphson: Non-convergence.")
     else: return x_curr
예제 #4
0
    def __init__(self, Nside, cutoff):
        #Nside=128
        self.sradius = 0.4 / 180 * np.pi  ## SY 8/3/19 search radius around map pixel
        self.pixarea = hp.nside2pixarea(Nside)  ## SY 27/2/19
        self.Nside = Nside
        self.q = Quasars(Nside)
        self.d = Data(self.q, Nside)
        self.cutoff = cutoff

        self.pixid = self.q.getCover(Nside)
        self.pixtheta, self.pixphi = hp.pix2ang(Nside, self.pixid)
        self.Np = len(self.pixid)  ## number of pixels
        self.Nd = self.d.len()

        print("# of pixels in a map", self.Np)
        print("Nside", self.Nside)
        print("Pixarea", self.pixarea)
        A = self.getAMatrix()
        b = self.d.signal * np.sqrt(
            self.d.weight)  ## (we suppressed by weight)
        print("running solver")
        mp = lsmr(A, b, show=True)
        print(mp[0])
        print(mp[1])  ## SY 18/2/19
        print(mp[2])  ## SY 18/2/19
        print(mp[3])  ## SY 18/2/19
        print(mp[4])  ## SY 18/2/19
        print(mp[5])  ## SY 18/2/19
        print(mp[6])  ## SY 18/2/19
        print(mp[7])  ## SY 18/2/19
        nside = np.array(Nside)  ## SY 19/2/19
        pixels = self.pixid  ## SY 19/2/19
        kappas = np.array(mp[0])  ## SY 19/2/19
        np.savez('kappa_opt_cutoff/kappa_{}_rt{}_rp{}_nside{}_cutoff{}'.format \
                 (maptype, rtmax, rpmax, nside, cutoff), nside, pixels, kappas)
예제 #5
0
파일: test_lsmr.py 프로젝트: b-t-g/Sim
 def setUp(self):
     self.n = 10
     self.A = lowerBidiagonalMatrix(20,self.n)
     self.xtrue = transpose(arange(self.n,0,-1))
     self.Afun = aslinearoperator(self.A)
     self.b = self.Afun.matvec(self.xtrue)
     self.returnValues = lsmr(self.A,self.b)
예제 #6
0
    def trainReservoirBatch(self, inputData, outputData):

        inputN = inputData.shape[0]
        self.internalState = np.zeros(
            (inputN - self.initialTransient, self.Nx))
        internalState = np.zeros(self.Nx)

        # Compute internal states of the reservoir
        for t in range(inputN):
            term1 = np.dot(self.inputWeight, inputData[t])
            term2 = np.dot(self.reservoirWeight, internalState)
            internalState = (
                1.0 - self.leakingRate
            ) * internalState + self.leakingRate * self.activation(term1 +
                                                                   term2)
            if t >= self.initialTransient:
                self.internalState[t - self.initialTransient] = internalState
                self.latestInternalState = internalState

        # Learn the output weights
        A = np.hstack((inputData[self.initialTransient:], self.internalState))

        # Solve for x in Ax = B
        for d in range(self.outputD):
            B = outputData[self.initialTransient:, d]
            self.outputWeight[d, :] = sla.lsmr(A, B, damp=1e-8)[0]
예제 #7
0
파일: cgsense.py 프로젝트: zongjg/pygrappa
def _isense2(kspace, sens, show=False):
    '''Try LSMR with square matrix.'''

    sx, sy, nc = kspace.shape[:]
    mask = np.abs(kspace[..., 0]) > 0

    def _AH(x0):
        x0 = x0[:sx*sy*nc] + 1j*x0[sx*sy*nc:]
        x0 = np.reshape(x0, (sx, sy, nc))
        res = np.sum(sens.conj()*ifft2(x0), axis=-1)
        res = np.reshape(res, (-1,))
        return np.concatenate((res.real, res.imag))

    def _A(x0):
        res = x0[:sx*sy] + 1j*x0[sx*sy:]
        res = np.reshape(res, (sx, sy))
        res = fft2(res[..., None]*sens)*mask[..., None]
        res = np.reshape(res, (-1,))
        return np.concatenate((res.real, res.imag))

    E = lambda x0: _AH(_A(x0))
    AHA = LinearOperator((sx*sy, sx*sy), matvec=E, rmatvec=E)
    b = np.reshape(kspace, (-1,))
    b = np.concatenate((b.real, b.imag))
    b = _AH(b)
    x = lsmr(AHA, b, show=show)[0]
    x = x[:sx*sy] + 1j*x[sx*sy:]

    return np.reshape(x, (sx, sy))
예제 #8
0
파일: test_lsmr.py 프로젝트: yacth/scipy
 def setup_method(self):
     self.n = 10
     self.A = lowerBidiagonalMatrix(20, self.n)
     self.xtrue = transpose(arange(self.n, 0, -1))
     self.Afun = aslinearoperator(self.A)
     self.b = self.Afun.matvec(self.xtrue)
     self.returnValues = lsmr(self.A, self.b)
예제 #9
0
 def testComplexX0(self):
     A = 4 * eye(self.n) + ones((self.n, self.n))
     xtrue = transpose(arange(self.n, 0, -1))
     b = aslinearoperator(A).matvec(xtrue)
     x0 = zeros(self.n, dtype=complex)
     x = lsmr(A, b, x0=x0)[0]
     assert_almost_equal(norm(x - xtrue), 0, decimal=5)
예제 #10
0
def run_test(data_low, data_high, x_low, x_high):
    """Generates a test problem and solves it using Scipy's LSQR"""
    _proc.start_processes()

    start_time = time.time()
    A, b, x_real = generate_data(data_low, data_high, x_low, x_high)
    print("generate_data(...) run time:", time.time() - start_time, "seconds")

    _proc.end_processes()

    print("Size of A =", A.shape)
    print()

    start_time = time.time()
    lsmr_result = linalg.lsmr(A, b, maxiter=1000)

    print("linalg.lsmr(A, b) run time:", time.time() - start_time, "seconds")
    print("Number of iterations =", lsmr_result[2])

    x_lsmr = numpy.array([lsmr_result[0]]).T
    print("sum(|x_real - x_lsmr|) =", numpy.sum(numpy.abs(x_real - x_lsmr)))
    print()

    start_time = time.time()
    lsqr_result = linalg.lsqr(A, b, iter_lim=1000)

    print("linalg.lsqr(A, b) run time:", time.time() - start_time, "seconds")
    print("Number of iterations =", lsqr_result[2])

    x_lsqr = numpy.array([lsqr_result[0]]).T
    print("sum(|x_real - x_lsqr|) =", numpy.sum(numpy.abs(x_real - x_lsqr)))
예제 #11
0
파일: problem.py 프로젝트: keegango/lsqpy
	def minimizeLSMR(self):
		""" Gather some statistics """
		total_vars = self.total_vars_and_nnz[0]
		num_new_constraints = self.sumsq_expr.numConstraints()
		total_constraints = (num_new_constraints + sum([eq_const.numConstraints() for eq_const in self.eq_consts]))

		""" Create constraint matrices including those implicit constraints """
		constraint_mat = self.createConstraintMat(total_constraints,self.total_vars_and_nnz)
		""" Append a -identity matrix for the dual variables """
		constraint_mat = sparse.hstack(
			[constraint_mat,-1*sparse.eye(constraint_mat.shape[0],num_new_constraints)]).tocsc()

		constraint_const = self.createConstMat(total_constraints,self.total_vars_and_nnz)

		""" Form the KKT system and solve it """
		total_constraints = constraint_const.shape[0]
		KKT_mat = mutils.cat([[mutils.gradMat(total_vars,num_new_constraints),constraint_mat.T],
                              [constraint_mat,mutils.zeros(total_constraints,total_constraints)]])
		KKT_const = sparse.vstack([mutils.zeros(total_vars+num_new_constraints,1),constraint_const])
		KKT_const = np.array(KKT_const.todense())
		KKT_const = np.squeeze(KKT_const)
		solution,exit_condition = linalg.lsmr(KKT_mat.tocsc(),KKT_const)[0:2]
		
		""" Write results back to the correct variables """
		for var in self.included_vars: var.extractValues(solution)
		self.val = solution[total_vars:total_vars+num_new_constraints].T.dot(solution[total_vars:total_vars+num_new_constraints])
예제 #12
0
    def iterate(self, method='lsmr', **kwargs):
        for i_meas, (xdata,ydata, gdata, adata) in enumerate(
                zip(self.xvals, self.yvals, self.gvals, self.avals)):

            # adata is either 0, 1 or 2
            amp_guess = self.PRM[adata]

            # gdata is either 0, 1 or 2
            gain_guess = self.PRM[self.Namp + gdata]

            self.Beta[i_meas] = ydata - amp_guess * gain_guess * self.exp_factor[i_meas]

            self.BIG[i_meas,adata] = gain_guess * self.exp_factor[i_meas]
            self.BIG[i_meas,self.Namp+gdata] = amp_guess* self.exp_factor[i_meas]

        # continue with Wolfram notation
        # http://mathworld.wolfram.com/NonlinearLeastSquaresFitting.html
        b = np.dot(self.BIG.T, self.Beta)
        A = np.dot(self.BIG.T, self.BIG)
        if method == 'lsmr':
            A = coo_matrix(A)  # this will eventually be built instead of self.BIG
            a = lsmr(A,b, **kwargs)[0]

        elif method == 'direct':
            a = np.dot(np.linalg.inv(A), b)

        print self.PRM,
        self.PRM += a  # update
        print "Residual: %.2e" % np.dot(self.Beta, self.Beta)
예제 #13
0
 def testComplexX0(self):
     A = 4 * eye(self.n) + ones((self.n, self.n))
     xtrue = transpose(arange(self.n, 0, -1))
     b = aslinearoperator(A).matvec(xtrue)
     x0 = zeros(self.n, dtype=complex)
     x = lsmr(A, b, x0=x0)[0]
     assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
예제 #14
0
파일: cgsense.py 프로젝트: zongjg/pygrappa
def _isense(kspace, sens, show=False):
    '''Iterative SENSE using nonsquare matrix and LSMR solver.'''

    sx, sy, nc = kspace.shape[:]
    mask = np.abs(kspace[..., 0]) > 0

    # We need to implement an E and E.H operator
    def _EH(x0):
        x0 = x0[:sx*sy*nc] + 1j*x0[sx*sy*nc:]
        x0 = np.reshape(x0, (sx, sy, nc))
        res = np.sum(sens.conj()*ifft2(x0), axis=-1)
        res = np.reshape(res, (-1,))
        return np.concatenate((res.real, res.imag))

    def _E(x0):
        res = x0[:sx*sy] + 1j*x0[sx*sy:]
        res = np.reshape(res, (sx, sy))
        res = fft2(res[..., None]*sens)*mask[..., None]
        res = np.reshape(res, (-1,))
        return np.concatenate((res.real, res.imag))

    # Ax = b
    # A : (2*sx*sy*nc, 2*sx*sy)
    # x : (2*sx*sy)
    # b : (2*sx*sy*nc)
    # Twice the size since complex, do simple real/imag stacking
    A = LinearOperator((2*sx*sy*nc, 2*sx*sy), matvec=_E, rmatvec=_EH)
    b = np.reshape(kspace, (-1,))
    b = np.concatenate((b.real, b.imag))
    x = lsmr(A, b, show=show)[0]
    x = x[:sx*sy] + 1j*x[sx*sy:]

    return np.reshape(x, (sx, sy))
예제 #15
0
    def run_lsmr(self, logdamp, scale=True, **kwargs):
        # scale the log(damp) into a damping
        if logdamp is None:
            damp = 0.
        else:
            damp = 10.**logdamp
            if scale:
                damp *= self.frob  # scale the damping by frobenius
        print('[info]Running LSMR with l={}'.format(damp))

        #t1=default_timer()
        r = ssl.lsmr(self.A, self.bi, damp=damp, **kwargs)
        (x, istop, itn, normr, normar, norma, conda, normx) = r

        r1norm = normr
        r2norm = np.sqrt(normr**2 + (damp * normx)**2)
        lo = np.zeros_like(x)
        hi = np.zeros_like(x)
        r = Result('lsmr', x, istop, itn, r1norm, r2norm, norma, conda, normar,
                   normx, lo, hi, damp / self.frob)
        #t2=default_timer()
        #dt=(t2-t1)/60.    # in minutes

        self.lcurve.append(r.r1norm, r.xnorm, r.logdamp)

        return r
예제 #16
0
def Optimization(b):
    b = b.flatten()

    x = np.zeros((pix))
    z = np.zeros((pix * 2))
    u = z

    A = LinearOperator((3 * pix, pix), matvec=MatVec, rmatvec=RMatVec)
    for i in range(iterations):
        print(i)

        # x step:
        v = np.hstack([b, rho * (z - u)])
        x = lsmr(A, v)[0]  #show=True, atol= , btol =

        # z step
        w = D(x) + u
        z = shrink(w, l / rho)

        # u step
        u = w - z

        print("Error: ", np.linalg.norm(img.flatten() - x))

    # Plot the results

    fig = plt.imshow(x.reshape(imgShape)).figure
    fig.suptitle("lambda={}, rho={}".format(l, rho))
    plt.show()

    return x.reshape(imgShape)
예제 #17
0
    def infer(self, Ms, ys, scale_factors=None):
        ''' Either:
            1) Ms is a single M and ys is a single y 
               (scale_factors ignored) or
            2) Ms and ys are lists of M matrices and y vectors
               and scale_factors is a list of the same length.
        '''
        A, y = self._apply_scales(Ms, ys, scale_factors)

        if self.method == 'AS':
            assert isinstance(
                A, numpy.ndarray), "method 'AS' only works with dense matrices"
            x_est, _ = optimize.nnls(A, y)
        elif self.method == 'LB':
            if self.lasso is None:
                x_est, info = nls_lbfgs_b(A, y)
            if self.lasso:
                lasso = max(
                    0,
                    lsmr(A, y)[0].sum()) if self.lasso is True else self.lasso
                x_est = nls_slsqp(A, y, lasso)
        elif self.method == 'TRF':
            x_est = optimize.lsq_linear(A, y, bounds=(0, numpy.inf),
                                        tol=1e-3)['x']

        elif self.method == 'new':
            x_est, info = nnls(A, y, 1e-6, 1e-6)

        x_est = x_est.reshape(A.shape[1])  # reshape to match shape of x

        return x_est
def solve(A, b, method, tol=1e-3):
    """ General sparse solver interface.

    method can be one of
    - spsolve_umfpack_mmd_ata
    - spsolve_umfpack_colamd
    - spsolve_superlu_mmd_ata
    - spsolve_superlu_colamd
    - bicg
    - bicgstab
    - cg
    - cgs
    - gmres
    - lgmres
    - minres
    - qmr
    - lsqr
    - lsmr
    """

    if method == 'spsolve_umfpack_mmd_ata':
        return spla.spsolve(A, b, use_umfpack=True, permc_spec='MMD_ATA')
    elif method == 'spsolve_umfpack_colamd':
        return spla.spsolve(A, b, use_umfpack=True, permc_spec='COLAMD')
    elif method == 'spsolve_superlu_mmd_ata':
        return spla.spsolve(A, b, use_umfpack=False, permc_spec='MMD_ATA')
    elif method == 'spsolve_superlu_colamd':
        return spla.spsolve(A, b, use_umfpack=False, permc_spec='COLAMD')
    elif method == 'bicg':
        res = spla.bicg(A, b, tol=tol)
        return res[0]
    elif method == 'bicgstab':
        res = spla.bicgstab(A, b, tol=tol)
        return res[0]
    elif method == 'cg':
        res = spla.cg(A, b, tol=tol)
        return res[0]
    elif method == 'cgs':
        res = spla.cgs(A, b, tol=tol)
        return res[0]
    elif method == 'gmres':
        res = spla.gmres(A, b, tol=tol)
        return res[0]
    elif method == 'lgmres':
        res = spla.lgmres(A, b, tol=tol)
        return res[0]
    elif method == 'minres':
        res = spla.minres(A, b, tol=tol)
        return res[0]
    elif method == 'qmr':
        res = spla.qmr(A, b, tol=tol)
        return res[0]
    elif method == 'lsqr':
        res = spla.lsqr(A, b, atol=tol, btol=tol)
        return res[0]
    elif method == 'lsmr':
        res = spla.lsmr(A, b, atol=tol, btol=tol)
        return res[0]
    else:
        raise Exception('UnknownSolverType')
예제 #19
0
def lsmr_annihilate(x: csc_matrix, y: ndarray, use_cache: bool = True, x_hash=None,
                    **lsmr_options) -> ndarray:
    r"""
    Removes projection of x on y from y

    Parameters
    ----------
    x : csc_matrix
        Sparse array of regressors
    y : ndarray
        Array with shape (nobs, nvar)
    use_cache : bool
        Flag indicating whether results should be stored in the cache,
        and retrieved if available.
    x_hash : object
        Hashable object representing the values in x
    lsmr_options: dict
        Dictionary of options to pass to scipy.sparse.linalg.lsmr

    Returns
    -------
    resids : ndarray
        Returns the residuals from regressing y on x, (nobs, nvar)

    Notes
    -----
    Residuals are estiamted column-by-column as

    .. math::

        \hat{\epsilon}_{j} = y_{j} - x^\prime \hat{\beta}

    where :math:`\hat{\beta}` is computed using lsmr.
    """

    use_cache = use_cache and x_hash is not None
    regressor_hash = x_hash if x_hash is not None else ''
    default_opts = dict(atol=1e-8, btol=1e-8, show=False)
    default_opts.update(lsmr_options)
    resids = []
    for i in range(y.shape[1]):
        _y = y[:, i:i + 1]

        variable_digest = ''
        if use_cache:
            hasher = hash_func()
            hasher.update(ascontiguousarray(_y.data))
            variable_digest = hasher.hexdigest()

        if use_cache and variable_digest in _VARIABLE_CACHE[regressor_hash]:
            resid = _VARIABLE_CACHE[regressor_hash][variable_digest]
        else:
            beta = lsmr(x, _y, **default_opts)[0]
            resid = y[:, i:i + 1] - (x.dot(csc_matrix(beta[:, None]))).A
            _VARIABLE_CACHE[regressor_hash][variable_digest] = resid
        resids.append(resid)
    if resids:
        return column_stack(resids)
    else:
        return empty_like(y)
    def calculate_first_order_correction(self, v_min, n, m, j, L0):
        if n >= m:
            return 0.0
        evecs = self.evecs
        evals = self.evals

        v_nm = (evecs[n].dag() * (self.v[j] * evecs[m]))[0][0][0]
        if abs(v_nm) <= v_min:
            return 0.0

        k = self.integer_list
        rho_0 = np.reshape(self.density_matrix, (self.dim**2, 1), order='F')

        V_nm = (evecs[n] * evecs[m].dag() * (evecs[n].dag() *
                                             (self.v[j] * evecs[m])))
        L_nm = liouvillian(V_nm)
        #b = np.dot(L_nm.full(),rho_0)
        b = (L_nm * rho_0).data
        omega_of_k = self.omega[j]
        for l in range(self.number_of_modes):
            omega_of_k += self.omega[l] * (k[l][n] - k[l][m])
        A = 1j * omega_of_k * identity(self.dim**2).data - L0.data
        #A = A.full()
        if omega_of_k == 0:
            rho_correction = la.lsmr(A, b)[0]
        else:
            rho_correction = sp(A, b)

        return nla.norm(rho_correction)
예제 #21
0
def Solve_The_System(true_Points, L, anchors_index, anchors, delta_X, delta_Y):

    L_bar = np.zeros((L[:, 0].size, anchors[:, 0].size))

    anchors_matrix = np.zeros((anchors[:, 0].size, delta_X.size))

    for i in range(anchors_index.size):
        anchors_matrix[i, int(anchors_index[i])] = 1

    L_bar = np.concatenate([L, anchors_matrix])

    b = np.zeros((delta_X.size + anchors_index.size))
    q = np.zeros((delta_Y.size + anchors_index.size))

    for i in range(delta_X.size):

        b[i] = delta_X[i]
        q[i] = delta_Y[i]

    k = delta_X.size
    l = delta_Y.size

    for i in range(anchors[:, 0].size):
        b[k] = anchors[i, 0]
        q[l] = anchors[i, 1]
        k += 1
        l += 1

    X = np.zeros((true_Points[:, 0].size))
    Y = np.zeros((true_Points[:, 0].size))

    X = lsmr(L_bar, b)[0]
    Y = lsmr(L_bar, q)[0]

    lapl_Points = np.zeros((X.size, 3))

    lapl_Points[:, 0] = X
    lapl_Points[:, 1] = Y

    reconstr_error = np.zeros((X.size))

    for i in range(reconstr_error.size):

        reconstr_error[i] = norm(
            np.array([true_Points[i, 0] - X[i], true_Points[i, 1] - Y[i]]), 2)

    return lapl_Points, reconstr_error, L_bar
예제 #22
0
 def test_tikhonov(self):
     for i in range(2, TESTING_ITERATIONS):
         # Generates random lambda
         LAMBDA = np.random.rand(1)
         A, y = generate_random.generate_random_ill_conditioned(i)
         self.assertEquals(
             lp.tikhonov_regularization(A, y, LAMBDA).all(),
             lsmr(A, y, LAMBDA)[0].all())
예제 #23
0
def mainTest(withhold=0, params=None):

    #default value for params
    if params==None:
        params = {'withhold': 0,
          'load': None,
          'extractFile': None,
          'trainFile': None,
          'testFile': None,
          'writePredict': False,
          'outputFile': 'predictions.csv'
          }

    trainfile = "train.xml"
    testfile = "testcases.xml"

    # TODO put the names of the feature functions you've defined above in this list
    #ffs = [metadata_feats, unigram_feats]
    ffs = [metadata_feats, unigram_noStop]
    #ffs = [metadata_feats, bigram_feats_noStop]
    #ffs = [metadata_feats, bigram_feats_noStop, unigram_noStop]
    #totRevLen, revLens
    #ffs = [metadata_feats, unigram_noStop, revLens]

    print "extracting training/testing features..."
    time1 = time.clock()
    X_train, y_train, train_ids,X_test,y_test,test_ids = test.loadData(params, withhold, ffs)
    time2 = time.clock()
    print "done extracting training/testing features", time2-time1, "s"
    print

    # TODO train here, and return regression parameters
    print "learning..."
    time1 = time.clock()
    #learned_w = splinalg.lsqr(X_train,y_train)[0]
    learned_w = splinalg.lsmr(X_train,y_train,damp=5000)[0]
    time2 = time.clock()
    print "done learning, ", time2-time1, "s"
    print

    # get rid of training data and load test data
    del X_train
    del y_train
    del train_ids

    # TODO make predictions on text data and write them out
    print "making predictions..."
    preds = X_test.dot(learned_w)
    print "done making predictions"
    print

    if withhold > 0:
        print "MAE on withheld data:", testMAE(preds, y_test)

    if params['writePredict']==True:
        print "writing predictions..."
        util.write_predictions(preds, test_ids, params['outputFile'])
        print "done!"
예제 #24
0
def run(dataset,
        measurements,
        workloads,
        eps=1.0,
        delta=0.0,
        sensitivity=1.0,
        bounded=True,
        seed=None,
        query_mask=None):
    """
    Run a mechanism that measures the given measurements and runs inference.
    This is a convenience method for running end-to-end experiments.
    """
    state = np.random.RandomState(seed)
    l1 = 0
    l2 = 0
    for _, Q in measurements:
        if query_mask is not None:  # doesn't seem to actually matter
            Q = Q[
                query_mask, :]  # there's definitely a cleaner way for indexing this
            Q = Q[:, query_mask]
        l1 += np.abs(Q).sum(axis=0).max()
        try:
            l2 += Q.power(2).sum(axis=0).max()  # for spares matrices
        except:
            l2 += np.square(Q).sum(axis=0).max()  # for dense matrices

    l1 *= sensitivity
    l2 *= sensitivity
    # print("l1 = {:.4f}, l2 = {:.4f}".format(l1, l2))
    if bounded:
        total = dataset.df.shape[0]
        l1 *= 2
        l2 *= 2

    if delta > 0:
        noise = norm(loc=0, scale=np.sqrt(l2 * 2 * np.log(2 / delta)) / eps)
    else:
        noise = laplace(loc=0, scale=l1 / eps)

    x_bar_answers = []
    local_ls = {}
    for proj, A in measurements:
        x = dataset.project(proj).datavector()
        z = noise.rvs(size=A.shape[0], random_state=state)
        a = A.dot(x)
        y = a + z
        if query_mask is not None:
            y = y[query_mask]
            A = A[query_mask, :]
            A = A[:, query_mask]
        local_ls[proj] = lsmr(A, y)[0]

    answers = []
    for proj, W in workloads:
        answers.append((local_ls[proj], proj, W))

    return answers
	def test_tikhonov(self):
		for i in range(2,TESTING_ITERATIONS):
			# Generates random lambda
			LAMBDA = np.random.rand(1)
			A,y = generate_random.generate_random_ill_conditioned(i)
			self.assertEquals(
				lp.tikhonov_regularization(A,y,LAMBDA).all(), 
				lsmr(A,y,LAMBDA)[0].all()
				)
예제 #26
0
    def iterate(self, **kwargs):

        BIG_row = []
        BIG_col = []
        BIG_data = []
        for i_meas, (xdata, ydata, gdata, adata) in enumerate(
                zip(self.xvals, self.yvals, self.gvals, self.avals)):

            # get the parameters from the pre-structured parameters array
            # which in this case is [AmplitudesA --- AmplitudesB --- Gains]
            ampA_guess = self.PRM[adata]
            ampB_guess = self.PRM[self.Namp + adata]
            gainA_guess = self.PRM[2 * self.Namp + gdata]
            gainB_guess = self.PRM[2 * self.Namp + self.Ngain + gdata]

            # residual between data and guess
            self.Beta[i_meas] = ydata - (
                ampA_guess * gainA_guess +
                ampB_guess * gainB_guess) * self.exp_factor[i_meas]

            # partial derivitives
            dA = gainA_guess * self.exp_factor[i_meas]
            dB = gainB_guess * self.exp_factor[i_meas]
            dGA = ampA_guess * self.exp_factor[i_meas]
            dGB = ampB_guess * self.exp_factor[i_meas]

            # store the data in coordinate format for making sparse array
            BIG_col.extend([
                adata, self.Namp + adata, 2 * self.Namp + gdata,
                2 * self.Namp + self.Ngain + gdata
            ])
            BIG_row.extend([i_meas] * 4)
            BIG_data.extend([dA, dB, dGA, dGB])

        # make the big sparse array
        BS = coo_matrix((BIG_data, (BIG_row, BIG_col)),
                        shape=(self.Nmeas, self.Nprm))
        BS = BS.tocsr()  # convert to csr for gains?

        # continue with Wolfram notation
        # http://mathworld.wolfram.com/NonlinearLeastSquaresFitting.html
        b = BS.T.dot(self.Beta)
        A = BS.T.dot(BS)
        a = lsmr(A, b, **kwargs)[0]  # solve

        self.niters += 1
        self.PRM += a  # update
        resid = np.dot(self.Beta, self.Beta)
        self.residuals.append(resid)
        self.BS = BS  # store for looking

        print "Iter %d ; Residual: %e, Press Ctrl-C to break" % (self.niters,
                                                                 resid)

        if self.save_iters:
            np.save("_PRM_iter%d_nlsq6" % self.niters, self.PRM)
예제 #27
0
 def run(self):
     A=self.getAMatrix()
     b=self.d.signal*np.sqrt(self.d.weight) ## (we suppressed by weight)
     print("running solver")
     mp=lsmr(A,b,show=True)
     print(mp[0])
     hpMap = np.zeros(hp.nside2npix(self.Nside))
     hpMap[self.pixid] = mp[0]
     np.save("output/mpfast" + self.outputname, hpMap)
     self.correlate(hpMap)
예제 #28
0
def rayleigh_iteration(F, J_F, s, x0, niter=10):
    """ Solve the equation
    Pn L_x eta = - Pn F
    J_C eta = 0
    """
    x = x0.copy()
    n, p = x.shape
    # TembT = utils.vech_embedding_tensor(p).T
    # Ttrans = utils.transpose_tensor((n, p))
    err = 100
    ii = 0
    while ii < niter and (err > 1e-3):
        FX = F(x)
        xxT = np.dot(x, x.T)
        FXTx = np.dot(FX.T, x)
        mPFX = -FX + 0.5 * (np.dot(xxT, FX) + np.dot(x, FXTx))
        JFX = J_F(x)
        rayleigh = 0.5 * (FXTx + FXTx.T)
        u, _, _ = np.linalg.svd(x)
        x_perp = u[:, p:]
        # tangent space ix xA + x_perp B
        r_a, c_a = np.triu_indices(p, 1)
        r_b, c_b = np.indices((n - p, p))
        r_b = r_b.reshape(-1)
        c_b = c_b.reshape(-1)
        # dim_a = (p * (p - 1)) // 2
        # dim_b = n * (n - p)
        perp_base = np.zeros((n, p, len(r_a) + len(r_b)))
        perp_im = np.zeros_like(perp_base)
        for i in range(len(r_a)):
            perp_base[:, c_a[i], i] = x[:, r_a[i]]
            perp_base[:, r_a[i], i] = -x[:, c_a[i]]
            l1 = np.tensordot(JFX, perp_base[:, :, i])
            l1 = l1 - np.dot(perp_base[:, :, i], rayleigh)

            perp_im[:, :, i] = l1 -\
                0.5 * (np.dot(xxT, l1) + np.dot(x, np.dot(l1.T, x)))
        for i in range(len(r_b)):
            perp_base[:, c_b[i], len(r_a) + i] = x_perp[:, r_b[i]]
            l1 = np.tensordot(JFX, perp_base[:, :, len(r_a) + i])
            l1 = l1 - np.dot(perp_base[:, :, len(r_a) + i], rayleigh)

            perp_im[:, :, len(r_a) + i] = l1 -\
                0.5 * (np.dot(xxT, l1) + np.dot(x, np.dot(l1.T, x)))

        eta_ = lsmr(perp_im.reshape(n * p, -1), mPFX.reshape(-1), maxiter=300)

        if eta_[1] > 2:
            print(eta_)
        eta = np.tensordot(perp_base, eta_[0], 1)
        x = s.retraction(x, eta)
        err_ = F(x) - np.dot(x, rayleigh)
        err = np.linalg.norm(err_)
        ii += 1
    return x, F(x), rayleigh, eta
예제 #29
0
def simulate_single_edge_flow(G, edge, weighted=False, sparse=False):
    """ Calculate the same as simulate_edge_flows but for one
    single damaged edge.
    Uses preconditioning
    """
    # conductivity weights
    wts = []
    if weighted:
        for u, v, d in G.edges_iter(data=True):
            d['cond'] = np.sqrt(d['conductivity']**4/d['weight'])
            wts.append(d['cond'])
    else:
        for u, v, d in G.edges_iter(data=True):
            d['cond'] = 1.
            wts.append(1.)
    
    wts = np.array(wts)
    wts_sqr = wts**2

    edge_ind = G.edges().index(edge)
    
    # least squares method (can be sparsified!)
    d1 = sparse_incidence_matrix(G, oriented=True, weight='cond')

    delete_row_lil(d1, 0)
    d1t = d1.transpose()

    # rhs
    y = np.zeros(d1t.shape[0])
    y[edge_ind] = wts[edge_ind]

    #ret = scipy.sparse.linalg.lsqr(d1t, y)
    
    if sparse:
        # precondition
        d = 1./np.array(np.abs(d1t).sum(axis=0))[0]
        D = scipy.sparse.diags([d], [0])
        d1t = d1t.dot(D)
        
        # solve
        ret = lsmr(d1t, y, atol=1e-8, btol=1e-8,
                show=False, maxiter=10*d1t.shape[1])
    else:
        d1t = np.array(d1t.todense())
        ret = np.linalg.lstsq(d1t, y)
        
    x = ret[0]
    DeltaF = d1t.dot(x)*wts/wts_sqr[edge_ind]    

    # correct DeltaF at the damaged edge
    DeltaF[edge_ind] = DeltaF[edge_ind] - 1
    #DeltaF[edge_ind] = 1 - DeltaF[edge_ind]
    
    return DeltaF, wts_sqr
예제 #30
0
def solveDimensionLeastSquares(startDim, dimCount, data, indices, indptr, trainPoints, codebookSize, M):
    A = sparse.csr_matrix((data, indices, indptr), shape=(trainPoints.shape[0], M*codebookSize), copy=False)
    discrepancy = 0
    dimCount = min(dimCount, trainPoints.shape[1] - startDim)
    codebooksComponents = np.zeros((M, codebookSize, dimCount), dtype='float32')
    for dim in xrange(startDim, startDim+dimCount):
        b = trainPoints[:, dim].flatten()
        solution = lsmr(A, b, show=False, maxiter=250)
        codebooksComponents[:, :, dim-startDim] = np.reshape(solution[0], (M, codebookSize))
        discrepancy += solution[3] ** 2
    return (codebooksComponents, discrepancy)
 def build_constraint(self, verts):
     verts_mean = self.verts_mean
     s, R, t = weop(verts, verts_mean, 1)
     align_verts = s * verts.dot(R) + t.T
     components = self.verts_comp
     A = np.array([c.reshape(-1) for c in components]).T
     b = align_verts.reshape(-1)
     w = lsmr(A, b)[0]
     cons_verts = A.dot(w).reshape((-1, 3))
     cons_verts = (cons_verts - t.T).dot(R.T) / s
     return cons_verts
예제 #32
0
 def update_C(self, lsqr_lsmr: bool):
     N, D = self.X.shape
     _, M = self.B.shape
     self.one_hot = self.one_hot_encode()
     if lsqr_lsmr:
         res = self._pool.map(self.lsqr, range(D))
         return np.array(res).T
     else:
         for i in range(D):
             self.C[:, i] = lng.lsmr(self.one_hot, self.X[:, i])[0]
             return self.C
예제 #33
0
    def bestcoef(self, **kwargs):
        """ With fixed shifts find best fit coefficients """
        M = (self.deg + 1)**2
        A = np.zeros((M, M))
        b = np.zeros(M)
        for s, d in zip(self.shiftiter, self.images):
            t = self._tmatrix(s)
            A[:,:] += t.T.dot(t)
            b[:] += t.T.dot(d.ravel())

        self._firstcoef = lsmr(A, b, **kwargs)
        return self._firstcoef[0].reshape(self.deg+1,-1)
예제 #34
0
 def bias_correction(self):
     self.Jbias_corr = []
     active = self.active_set[-1]
     if len(active):
         Gtmp = self.Gorig[:, active]
         Gbig = utils.create_bigG(Gtmp, self.Acoef_, self.Morig)
         Z = linalg.lsmr(Gbig,
                         self.Morig.reshape(-1, order='F'),
                         atol=1e-12,
                         btol=1e-12)
         self.Jbias_corr = Z[0].reshape(
             (len(active), self.Morig.shape[1] + self.m_p - 1), order='F')
예제 #35
0
def solve_least_squares_problem(A, b):
    x = 3 * [None]
    for i in range(3): # todo scipy does not support least squares with b.shape = (N,3), but only with (N,1) -> Here one computes the QR three times instead of one time! OPTIMIZE!!!
        b_red = np.array(b[:,i])
        print "\n\n### least squares %d out of 3...\n" % (i+1)
        ret = lin.lsmr(A, b_red, show=True)
        print "done."
        x[i] = ret[0]

    x = scipy.array(x).T
    print "x: shape "+str(x.shape)
    print x
    return x
예제 #36
0
    def trainReservoir(self):

        # Collect internal states
        self.internalState = self.collectInternalStates(self.inputData)

        # Learn the output weights
        A = self.internalState
        B = self.outputData[self.initialTransient:, :]

        # Solve for x in Ax = B
        for d in range(self.outputD):
            B = self.outputData[self.initialTransient:, d]
            self.outputWeight[d, :] = sla.lsmr(A, B, damp=1e-8)[0]
예제 #37
0
    def trainReservoir(self):

        # Collect internal states
        self.internalState = self.collectInternalStates(self.inputData)

        # Learn the output weights
        A = self.internalState
        B = self.outputData[self.initialTransient:, :]

        # Solve for x in Ax = B
        for d in range(self.outputD):
            B = self.outputData[self.initialTransient:, d]
            self.outputWeight[d, :] = sla.lsmr(A, B, damp=1e-8)[0]
예제 #38
0
	def prox_linear_equation(self, v, mu_internal , mode , max_iter):
		'''
		instead of using the mateix inverse we solve linear equation here!
		mu_internal != 0
		'''
		x = self.x
		y = self.y
		dim = self.dim
		regul_coef = self.regul_coef
		# mu_val = self.mu_val # not needed here, it should be set in the call from outside
		n = self.n

		v = 1.0 * v
		mu_internal = 1.0 * mu_internal

		if mu_internal != 0:
			A = 2. * np.dot(x.T, x)/n + (mu_internal + 2. * regul_coef) * np.identity(dim)
			u = mu_internal * v +  2. * np.reshape( np.dot(x.T,y) , (-1,1) ) /n 
		
			if mode == 'linearEq_inexact':
				w_opt = sparsela.lsmr(A, u, damp=0.0, atol=1e-06, btol=1e-06, conlim=100000000.0, maxiter=max_iter, show=False)[0]
			elif mode == 'linearEq_exact':
				w_opt = np.linalg.lstsq(A, u)[0]

		elif mu_internal == 0:
			A = 2./ n * np.dot(x.T, x) + 2 * regul_coef * np.identity(dim)
			u = v +  2./ n * np.reshape( np.dot(x.T,y) , (-1,1) )
		
			if mode == 'linearEq_inexact':
				w_opt = sparsela.lsmr(A, u, damp=0.0, atol=1e-06, btol=1e-06, conlim=100000000.0, maxiter=max_iter, show=False)[0]
			elif mode == 'linearEq_exact':
				# w_opt = np.linalg.lstsq(A, u)[0]
				w_opt = np.linalg.lstsq(A, u)[0]

		# you may want this:
		# print 'w_opt shape:'
		# print np.shape(w_opt)

		return w_opt 
예제 #39
0
    def XPR(self, x):
        sparseA = SparseMatrix(
            (self.numberOfValidMatches * 2, len(self.teamList)), np.int32)
        sparseB = SparseMatrix((self.numberOfValidMatches * 2, 1), np.int32)
        allData = []
        for event in tqdm(self.events):
            try:
                data = pd.read_sql_table(event,
                                         self.conn,
                                         columns=[
                                             "alliances_blue_team_keys_0",
                                             "alliances_blue_team_keys_1",
                                             "alliances_blue_team_keys_2",
                                             "alliances_red_team_keys_0",
                                             "alliances_red_team_keys_1",
                                             "alliances_red_team_keys_2",
                                             "score_breakdown_blue_" + x,
                                             "score_breakdown_red_" + x
                                         ])
                for index, row in data.iterrows():
                    u = 0
                    match_blue_teams = [
                        row["alliances_blue_team_keys_0"],
                        row["alliances_blue_team_keys_1"],
                        row["alliances_blue_team_keys_2"]
                    ]
                    match_red_teams = [
                        row["alliances_red_team_keys_0"],
                        row["alliances_red_team_keys_1"],
                        row["alliances_red_team_keys_2"]
                    ]
                    sparseB.append(2 * u, 0,
                                   int(row["score_breakdown_blue_" + x]))
                    sparseB.append(2 * u + 1, 0,
                                   int(row["score_breakdown_red_" + x]))
                    for team in match_blue_teams:
                        sparseA.append(2 * u,
                                       list(self.teamList).index(team), 1)
                    for team in match_red_teams:
                        sparseA.append(2 * u + 1,
                                       list(self.teamList).index(team), 1)
                    u += 1
            except:
                pass

        A = sparseA.tocoo().tocsr()
        B = np.ndarray.flatten(sparseB.tocoo().tocsc().toarray())

        ans = pd.Series(lsmr(A, B)[0])
        oprs = pd.concat({"Team": self.teamList, x + " OPR": ans}, axis=1)
        oprs.to_sql(x + "OPR", self.conn, if_exists="replace")
예제 #40
0
def pipe_density(V): 
    '''
    An lsmr iterative solution. 
    '''
    
    V1=V.getH()
#     E = V.dot( V1.dot(    W   )   )
#     W = W*(E+1.0e-17)/(E*E+1.0e-17)    
    b = numpy.ones( (V.get_shape()[0] ,1) ,dtype  = numpy.complex64)  
    from scipy.sparse.linalg import lsqr, lsmr
        
#     x1 =  lsqr(V, b , iter_lim=20, calc_var = True, damp = 0.001)
    x1 =  lsmr(V, b , maxiter=100,  damp = 0.001)
    
    my_k_dens = x1[0]    # the first element is the answer
    
#     tmp_W =  lsqr(V1, my_k_dens, iter_lim=20, calc_var = True, damp = 0.001)
    tmp_W =  lsmr(V1, my_k_dens , maxiter=10,  damp = 0.001)
    
    W = numpy.reshape( tmp_W[0], (V.get_shape()[0] ,1),order='F' ) # reshape vector

 
    return W
예제 #41
0
    def iterate(self, **kwargs):
        K = self.K
        BIG_row = []
        BIG_col = []
        BIG_data = []
        for i_meas, (yobs, adata, LA, LB, PA, PB) in enumerate(
                izip(self.yvals, self.avals, self.LAvals, self.LBvals,
                     self.PAvals, self.PBvals)):

            # get the parameters from the pre-structured parameters array
            # which in this case is [AmplitudesA --- AmplitudesB --- Gains]
            ampA_guess = self.PRM[adata]
            ampB_guess = self.PRM[self.Namp + adata]

            # residual between data and guess
            self.Beta[i_meas] = yobs - (ampA_guess * LA * PA / K +
                                        ampB_guess * LB * PB / K)

            # partial derivitives
            dA = LA * PA / K
            dB = LB * PB / K

            # store the data in coordinate format for making sparse array
            BIG_col.extend([adata, self.Namp + adata])
            BIG_row.extend([i_meas] * 2)
            BIG_data.extend([dA, dB])

        # make the big sparse array
        BS = coo_matrix((BIG_data, (BIG_row, BIG_col)),
                        shape=(self.Nmeas, self.Nprm))
        BS = BS.tocsr()  # convert to csr for speed gains?

        # continue with Wolfram notation
        # http://mathworld.wolfram.com/NonlinearLeastSquaresFitting.html
        b = BS.T.dot(self.Beta)
        A = BS.T.dot(BS)
        a = lsmr(A, b, **kwargs)[0]  # solve

        self.niters += 1
        self.PRM += a  # update
        resid = np.dot(self.Beta, self.Beta)
        self.residuals.append(resid)
        self.BS = BS  # store for looking

        print "Iter %d ; Residual: %e, Press Ctrl-C to break" % (self.niters,
                                                                 resid)

        if self.save_iters:
            np.save("_PRM_iter%d" % self.niters, self.PRM)
예제 #42
0
def solve_least_squares_problem(A, b):
    x = 3 * [None]
    for i in range(
            3
    ):  # todo scipy does not support least squares with b.shape = (N,3), but only with (N,1) -> Here one computes the QR three times instead of one time! OPTIMIZE!!!
        b_red = np.array(b[:, i])
        print "\n\n### least squares %d out of 3...\n" % (i + 1)
        ret = lin.lsmr(A, b_red, show=True)
        print "done."
        x[i] = ret[0]

    x = scipy.array(x).T
    print "x: shape " + str(x.shape)
    print x
    return x
예제 #43
0
파일: distortion.py 프로젝트: satarsa/pyFAI
    def uncorrect(self, image, use_cython=False):
        """
        Take an image which has been corrected and transform it into it's raw (with loss of information)

        :param image: 2D-array with the image
        :return: uncorrected 2D image

        Nota: to retrieve the input mask on can do:

        >>> msk =  dis.uncorrect(numpy.ones(dis._shape_out)) <= 0
        """
        assert image.shape == self._shape_out
        if self.lut is None:
            self.calc_LUT()
        if (linalg is not None) and (use_cython is False):
            if self.method == "lut":
                csr = csr_matrix(sparse_utils.LUT_to_CSR(self.lut))
            else:
                csr = csr_matrix(self.lut)
            res = linalg.lsmr(csr, image.ravel())
            out = res[0].reshape(self.shape_in)
        else:  # This is deprecated and does not work with resise=True
            if self.method == "lut":
                if _distortion is not None:
                    out, mask = _distortion.uncorrect_LUT(image, self.shape_in, self.lut)
                else:
                    out = numpy.zeros(self.shape_in, dtype=numpy.float32)
                    lout = out.ravel()
                    lin = image.ravel()
                    tot = self.lut.coef.sum(axis=-1)
                    for idx in range(self.lut.shape[0]):
                        t = tot[idx]
                        if t <= 0:
                            continue
                        val = lin[idx] / t
                        lout[self.lut[idx].idx] += val * self.lut[idx].coef
            elif self.method == "csr":
                if _distortion is not None:
                    out, mask = _distortion.uncorrect_CSR(image, self.shape_in, self.lut)
            else:
                raise NotImplementedError()
        return out
예제 #44
0
 def _fitWindow(self, x, y, dt):
     sel = ~y.mask.any(axis=1) & ~x.mask.any(axis=1)
     if self.damp is None:
         if ~self.robust:
             f = LA.lstsq(x[sel, :], y[sel, :])
         else:
             resrlm = sm.RLM(y[sel, :], x[sel, :]).fit()
             w = resrlm.params
     else:
         f = SLA.lsmr(y[sel, :], x[sel, :], self.damp)
     w = f[0]
     pred = np.dot(x, w)
     eps = y - pred
     nu = eps[sel, :].std(axis=0)/np.sqrt(dt)
     # timescales
     l, v = LA.eig(w)
     theta = np.arctan2(np.imag(l), np.real(l))
     foscil = (np.abs(theta)/dt)/(2.*np.pi)  # in Hz
     tau = -1.*np.sign(np.real(l))/(np.log(np.abs(l))/dt)  # in seconds
     return (w, nu, v, l, foscil, tau)
    def trainReservoir(self):

        internalState = np.zeros(self.Nx)

        #Compute internal states of the reservoir
        for t in range(self.inputN):
            term1 = np.dot(self.inputWeight,self.inputData[t])
            term2 = np.dot(self.reservoirWeight,internalState)
            internalState = (1.0-self.leakingRate)*internalState + self.leakingRate*expit(term1 + term2)
            if t >= self.initialTransient:
                self.internalState[t-self.initialTransient] = internalState

        #Learn the output weights
        A = self.internalState
        B = self.outputData[self.initialTransient:, :]

        #Solve for x in Ax = B
        for d in range(self.outputD):
            B = self.outputData[self.initialTransient:, d]
            self.outputWeight[d, :] = sla.lsmr(A, B, damp=1e-8)[0]
예제 #46
0
def main(X_train=None, global_feat_dict=None):
    trainfile = "train.xml"
    testfile = "testcases.xml"
    outputfile = "mypredictions2.csv"  # feel free to change this or take it as an argument

    # TODO put the names of the feature functions you've defined above in this list
    ffs = [metadata_feats, unigram_feats]

    if X_train == None and global_feat_dict == None:
        # extract features
        print "extracting training features..."
        X_train,global_feat_dict,y_train,train_ids = extract_feats(ffs, trainfile)
        print "done extracting training features"
        print

    # TODO train here, and return regression parameters
    print "learning..."
    #learned_w = splinalg.lsqr(X_train,y_train)[0]
    learned_w = splinalg.lsmr(X_train,y_train)[0]
    print "done learning"
    print

    # get rid of training data and load test data
    del X_train
    del y_train
    del train_ids
    print "extracting test features..."
    X_test,_,y_ignore,test_ids = extract_feats(ffs, testfile, global_feat_dict=global_feat_dict)
    print "done extracting test features"
    print

    # TODO make predictions on text data and write them out
    print "making predictions..."
    preds = X_test.dot(learned_w)
    print "done making predictions"
    print

    print "writing predictions..."
    util.write_predictions(preds, test_ids, outputfile)
    print "done!"
    def trainReservoirBatch(self, inputData, outputData):

        inputN = inputData.shape[0]
        self.internalState = np.zeros((inputN-self.initialTransient, self.Nx))
        internalState = np.zeros(self.Nx)

        # Compute internal states of the reservoir
        for t in range(inputN):
            term1 = np.dot(self.inputWeight, inputData[t])
            term2 = np.dot(self.reservoirWeight,internalState)
            internalState = (1.0-self.leakingRate)*internalState + self.leakingRate*self.activation(term1 + term2)
            if t >= self.initialTransient:
                self.internalState[t-self.initialTransient] = internalState
                self.latestInternalState = internalState

        # Learn the output weights
        A = np.hstack((inputData[self.initialTransient:], self.internalState))

        # Solve for x in Ax = B
        for d in range(self.outputD):
            B = outputData[self.initialTransient:, d]
            self.outputWeight[d, :] = sla.lsmr(A, B, damp=1e-8)[0]
예제 #48
0
def lsmrtest(m, n, damp):
    """Verbose testing of lsmr"""

    A = lowerBidiagonalMatrix(m,n)
    xtrue = arange(n,0,-1, dtype=float)
    Afun = aslinearoperator(A)

    b = Afun.matvec(xtrue)

    atol = 1.0e-7
    btol = 1.0e-7
    conlim = 1.0e+10
    itnlim = 10*n
    show = 1

    x, istop, itn, normr, normar, norma, conda, normx \
      = lsmr(A, b, damp, atol, btol, conlim, itnlim, show)

    j1 = min(n,5)
    j2 = max(n-4,1)
    print(' ')
    print('First elements of x:')
    str = ['%10.4f' % (xi) for xi in x[0:j1]]
    print(''.join(str))
    print(' ')
    print('Last  elements of x:')
    str = ['%10.4f' % (xi) for xi in x[j2-1:]]
    print(''.join(str))

    r = b - Afun.matvec(x)
    r2 = sqrt(norm(r)**2 + (damp*norm(x))**2)
    print(' ')
    str = 'normr (est.)  %17.10e' % (normr)
    str2 = 'normr (true)  %17.10e' % (r2)
    print(str)
    print(str2)
    print(' ')
예제 #49
0
def damped_lstsq(a,b,damping=1.0,plot=False):
    '''
    Gm = d
       G : discrete convolution matrix
       m : signal we are trying to recover (receiver function)
       d : the convolved data (signal a)

       m = (G^T G)^(-1)G^T * d
    '''

    #build G
    padding = np.zeros(a.shape[0] - 1, a.dtype)
    first_col = np.r_[a, padding]
    first_row = np.r_[a[0], padding]
    G = toeplitz(first_col, first_row)

    #reshape b
    shape = G.shape
    shape = shape[0]
    len_b = len(b)
    zeros = np.zeros((shape-len_b))
    b = np.append(b,zeros)

    #solve with scipy.sparse.linalg.lsmr
    sol = lsmr(G,b,damp=damping)
    m_est = sol[0]
    rf = m_est

    if plot==True:
       fig,axes = plt.subplots(3,sharex=True)
       axes[0].plot(a)
       axes[1].plot(b)
       axes[2].plot(rf)
       plt.show()

    return rf
예제 #50
0
 def testColumnB(self):
     A = eye(self.n)
     b = ones((self.n, 1))
     x = lsmr(A, b)[0]
     assert_almost_equal(norm(A.dot(x) - b.ravel()), 0)
예제 #51
0
 def testScalarB(self):
     A = array([[1.0, 2.0]])
     b = 3.0
     x = lsmr(A, b)[0]
     assert_almost_equal(norm(A.dot(x) - b), 0)
예제 #52
0
def dogbox(
    fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, scaling, loss_function, tr_solver, tr_options, verbose
):
    f = f0
    f_true = f.copy()
    nfev = 1

    J = J0
    njev = 1

    if loss_function is not None:
        rho = loss_function(f)
        cost = 0.5 * np.sum(rho[0])
        J, f = scale_for_robust_loss_function(J, f, rho)
    else:
        cost = 0.5 * np.dot(f, f)

    g = compute_grad(J, f)

    jac_scaling = isinstance(scaling, string_types) and scaling == "jac"
    if jac_scaling:
        scale, scale_inv = compute_jac_scaling(J)
    else:
        scale, scale_inv = scaling, 1 / scaling

    Delta = norm(x0 * scale, ord=np.inf)
    if Delta == 0:
        Delta = 1.0

    on_bound = np.zeros_like(x0, dtype=int)
    on_bound[np.equal(x0, lb)] = -1
    on_bound[np.equal(x0, ub)] = 1

    x = x0
    step = np.empty_like(x0)

    if max_nfev is None:
        max_nfev = x0.size * 100

    termination_status = None
    iteration = 0
    step_norm = None
    actual_reduction = None

    if verbose == 2:
        print_header_nonlinear()

    while True:
        active_set = on_bound * g < 0
        free_set = ~active_set

        g_free = g[free_set]
        g_full = g.copy()
        g[active_set] = 0

        g_norm = norm(g, ord=np.inf)
        if g_norm < gtol:
            termination_status = 1

        if verbose == 2:
            print_iteration_nonlinear(iteration, nfev, cost, actual_reduction, step_norm, g_norm)

        if termination_status is not None or nfev == max_nfev:
            break

        x_free = x[free_set]
        lb_free = lb[free_set]
        ub_free = ub[free_set]
        scale_inv_free = scale_inv[free_set]

        # Compute (Gauss-)Newton and build quadratic model for Cauchy step.
        if tr_solver == "exact":
            J_free = J[:, free_set]
            newton_step = lstsq(J_free, -f)[0]

            # Coefficients for the quadratic model along the anti-gradient.
            a, b = build_quadratic_1d(J_free, g_free, -g_free)
        elif tr_solver == "lsmr":
            Jop = aslinearoperator(J)

            # We compute lsmr step in scaled variables and then
            # transform back to normal variables, if lsmr would give exact lsq
            # solution this would be equivalent to not doing any
            # transformations, but from experience it's better this way.

            # We pass active_set to make computations as if we selected
            # the free subset of J columns, but without actually doing any
            # slicing, which is expensive for sparse matrices and impossible
            # for LinearOperator.

            lsmr_op = lsmr_operator(Jop, scale_inv, active_set)
            newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set]
            newton_step *= scale_inv_free

            # Components of g for active variables were zeroed, so this call
            # is correct and equivalent to using J_free and g_free.
            a, b = build_quadratic_1d(Jop, g, -g)

        actual_reduction = -1.0
        while actual_reduction <= 0 and nfev < max_nfev:
            tr_bounds = Delta * scale_inv_free

            step_free, on_bound_free, tr_hit = dogleg_step(
                x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free
            )

            step.fill(0.0)
            step[free_set] = step_free

            if tr_solver == "exact":
                predicted_reduction = -evaluate_quadratic(J_free, g_free, step_free)
            elif tr_solver == "lsmr":
                predicted_reduction = -evaluate_quadratic(Jop, g, step)

            x_new = x + step
            f_new = fun(x_new)
            nfev += 1

            step_h_norm = norm(step * scale, ord=np.inf)

            if not np.all(np.isfinite(f_new)):
                Delta = 0.25 * step_h_norm
                continue

            # Usual trust-region step quality estimation.
            if loss_function is not None:
                cost_new = loss_function(f_new, cost_only=True)
            else:
                cost_new = 0.5 * np.dot(f_new, f_new)
            actual_reduction = cost - cost_new

            Delta, ratio = update_tr_radius(Delta, actual_reduction, predicted_reduction, step_h_norm, tr_hit)

            step_norm = norm(step)
            termination_status = check_termination(actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)

            if termination_status is not None:
                break

        if actual_reduction > 0:
            on_bound[free_set] = on_bound_free

            x = x_new
            # Set variables exactly at the boundary.
            mask = on_bound == -1
            x[mask] = lb[mask]
            mask = on_bound == 1
            x[mask] = ub[mask]

            f = f_new
            f_true = f.copy()

            cost = cost_new

            J = jac(x, f)
            njev += 1

            if loss_function is not None:
                rho = loss_function(f)
                J, f = scale_for_robust_loss_function(J, f, rho)

            g = compute_grad(J, f)

            if jac_scaling:
                scale, scale_inv = compute_jac_scaling(J, scale)
        else:
            step_norm = 0
            actual_reduction = 0

        iteration += 1

    if termination_status is None:
        termination_status = 0

    return OptimizeResult(
        x=x,
        cost=cost,
        fun=f_true,
        jac=J,
        grad=g_full,
        optimality=g_norm,
        active_mask=on_bound,
        nfev=nfev,
        njev=njev,
        status=termination_status,
    )
예제 #53
0
def lsq_linear(A, b, bounds=(-np.inf, np.inf), method='trf', tol=1e-10,
               lsq_solver=None, lsmr_tol=None, max_iter=None, verbose=0):
    r"""Solve a linear least-squares problem with bounds on the variables.

    Given a m-by-n design matrix A and a target vector b with m elements,
    `lsq_linear` solves the following optimization problem::

        minimize 0.5 * ||A x - b||**2
        subject to lb <= x <= ub

    This optimization problem is convex, hence a found minimum (if iterations
    have converged) is guaranteed to be global.

    Parameters
    ----------
    A : array_like, sparse matrix of LinearOperator, shape (m, n)
        Design matrix. Can be `scipy.sparse.linalg.LinearOperator`.
    b : array_like, shape (m,)
        Target vector.
    bounds : 2-tuple of array_like, optional
        Lower and upper bounds on independent variables. Defaults to no bounds.
        Each array must have shape (n,) or be a scalar, in the latter
        case a bound will be the same for all variables. Use ``np.inf`` with
        an appropriate sign to disable bounds on all or some variables.
    method : 'trf' or 'bvls', optional
        Method to perform minimization.

            * 'trf' : Trust Region Reflective algorithm adapted for a linear
              least-squares problem. This is an interior-point-like method
              and the required number of iterations is weakly correlated with
              the number of variables.
            * 'bvls' : Bounded-Variable Least-Squares algorithm. This is
              an active set method, which requires the number of iterations
              comparable to the number of variables. Can't be used when `A` is
              sparse or LinearOperator.

        Default is 'trf'.
    tol : float, optional
        Tolerance parameter. The algorithm terminates if a relative change
        of the cost function is less than `tol` on the last iteration.
        Additionally the first-order optimality measure is considered:

            * ``method='trf'`` terminates if the uniform norm of the gradient,
              scaled to account for the presence of the bounds, is less than
              `tol`.
            * ``method='bvls'`` terminates if Karush-Kuhn-Tucker conditions
              are satisfied within `tol` tolerance.

    lsq_solver : {None, 'exact', 'lsmr'}, optional
        Method of solving unbounded least-squares problems throughout
        iterations:

            * 'exact' : Use dense QR or SVD decomposition approach. Can't be
              used when `A` is sparse or LinearOperator.
            * 'lsmr' : Use `scipy.sparse.linalg.lsmr` iterative procedure
              which requires only matrix-vector product evaluations. Can't
              be used with ``method='bvls'``.

        If None (default) the solver is chosen based on type of `A`.
    lsmr_tol : None, float or 'auto', optional
        Tolerance parameters 'atol' and 'btol' for `scipy.sparse.linalg.lsmr`
        If None (default), it is set to ``1e-2 * tol``. If 'auto', the
        tolerance will be adjusted based on the optimality of the current
        iterate, which can speed up the optimization process, but is not always
        reliable.
    max_iter : None or int, optional
        Maximum number of iterations before termination. If None (default), it
        is set to 100 for ``method='trf'`` or to the number of variables for
        ``method='bvls'`` (not counting iterations for 'bvls' initialization).
    verbose : {0, 1, 2}, optional
        Level of algorithm's verbosity:

            * 0 : work silently (default).
            * 1 : display a termination report.
            * 2 : display progress during iterations.

    Returns
    -------
    OptimizeResult with the following fields defined:
    x : ndarray, shape (n,)
        Solution found.
    cost : float
        Value of the cost function at the solution.
    fun : ndarray, shape (m,)
        Vector of residuals at the solution.
    optimality : float
        First-order optimality measure. The exact meaning depends on `method`,
        refer to the description of `tol` parameter.
    active_mask : ndarray of int, shape (n,)
        Each component shows whether a corresponding constraint is active
        (that is, whether a variable is at the bound):

            *  0 : a constraint is not active.
            * -1 : a lower bound is active.
            *  1 : an upper bound is active.

        Might be somewhat arbitrary for the `trf` method as it generates a
        sequence of strictly feasible iterates and active_mask is determined
        within a tolerance threshold.
    nit : int
        Number of iterations. Zero if the unconstrained solution is optimal.
    status : int
        Reason for algorithm termination:

            * -1 : the algorithm was not able to make progress on the last
              iteration.
            *  0 : the maximum number of iterations is exceeded.
            *  1 : the first-order optimality measure is less than `tol`.
            *  2 : the relative change of the cost function is less than `tol`.
            *  3 : the unconstrained solution is optimal.

    message : str
        Verbal description of the termination reason.
    success : bool
        True if one of the convergence criteria is satisfied (`status` > 0).

    See Also
    --------
    nnls : Linear least squares with non-negativity constraint.
    least_squares : Nonlinear least squares with bounds on the variables.                    

    Notes
    -----
    The algorithm first computes the unconstrained least-squares solution by
    `numpy.linalg.lstsq` or `scipy.sparse.linalg.lsmr` depending on
    `lsq_solver`. This solution is returned as optimal if it lies within the
    bounds.

    Method 'trf' runs the adaptation of the algorithm described in [STIR]_ for
    a linear least-squares problem. The iterations are essentially the same as
    in the nonlinear least-squares algorithm, but as the quadratic function
    model is always accurate, we don't need to track or modify the radius of
    a trust region. The line search (backtracking) is used as a safety net
    when a selected step does not decrease the cost function. Read more
    detailed description of the algorithm in `scipy.optimize.least_squares`.

    Method 'bvls' runs a Python implementation of the algorithm described in
    [BVLS]_. The algorithm maintains active and free sets of variables, on
    each iteration chooses a new variable to move from the active set to the
    free set and then solves the unconstrained least-squares problem on free
    variables. This algorithm is guaranteed to give an accurate solution
    eventually, but may require up to n iterations for a problem with n
    variables. Additionally, an ad-hoc initialization procedure is
    implemented, that determines which variables to set free or active
    initially. It takes some number of iterations before actual BVLS starts,
    but can significantly reduce the number of further iterations.

    References
    ----------
    .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
              and Conjugate Gradient Method for Large-Scale Bound-Constrained
              Minimization Problems," SIAM Journal on Scientific Computing,
              Vol. 21, Number 1, pp 1-23, 1999.
    .. [BVLS] P. B. Start and R. L. Parker, "Bounded-Variable Least-Squares:
              an Algorithm and Applications", Computational Statistics, 10,
              129-141, 1995.

    Examples
    --------
    In this example a problem with a large sparse matrix and bounds on the
    variables is solved.

    >>> from scipy.sparse import rand
    >>> from scipy.optimize import lsq_linear
    ...
    >>> np.random.seed(0)
    ...
    >>> m = 20000
    >>> n = 10000
    ...
    >>> A = rand(m, n, density=1e-4)
    >>> b = np.random.randn(m)
    ...
    >>> lb = np.random.randn(n)
    >>> ub = lb + 1
    ...
    >>> res = lsq_linear(A, b, bounds=(lb, ub), lsmr_tol='auto', verbose=1)
    # may vary
    The relative change of the cost function is less than `tol`.
    Number of iterations 16, initial cost 1.5039e+04, final cost 1.1112e+04,
    first-order optimality 4.66e-08.
    """
    if method not in ['trf', 'bvls']:
        raise ValueError("`method` must be 'trf' or 'bvls'")

    if lsq_solver not in [None, 'exact', 'lsmr']:
        raise ValueError("`solver` must be None, 'exact' or 'lsmr'.")

    if verbose not in [0, 1, 2]:
        raise ValueError("`verbose` must be in [0, 1, 2].")

    if issparse(A):
        A = csr_matrix(A)
    elif not isinstance(A, LinearOperator):
        A = np.atleast_2d(A)

    if method == 'bvls':
        if lsq_solver == 'lsmr':
            raise ValueError("method='bvls' can't be used with "
                             "lsq_solver='lsmr'")

        if not isinstance(A, np.ndarray):
            raise ValueError("method='bvls' can't be used with `A` being "
                             "sparse or LinearOperator.")

    if lsq_solver is None:
        if isinstance(A, np.ndarray):
            lsq_solver = 'exact'
        else:
            lsq_solver = 'lsmr'
    elif lsq_solver == 'exact' and not isinstance(A, np.ndarray):
        raise ValueError("`exact` solver can't be used when `A` is "
                         "sparse or LinearOperator.")

    if len(A.shape) != 2:  # No ndim for LinearOperator.
        raise ValueError("`A` must have at most 2 dimensions.")

    if len(bounds) != 2:
        raise ValueError("`bounds` must contain 2 elements.")

    if max_iter is not None and max_iter <= 0:
        raise ValueError("`max_iter` must be None or positive integer.")

    m, n = A.shape

    b = np.atleast_1d(b)
    if b.ndim != 1:
        raise ValueError("`b` must have at most 1 dimension.")

    if b.size != m:
        raise ValueError("Inconsistent shapes between `A` and `b`.")

    lb, ub = prepare_bounds(bounds, n)

    if lb.shape != (n,) and ub.shape != (n,):
        raise ValueError("Bounds have wrong shape.")

    if np.any(lb >= ub):
        raise ValueError("Each lower bound mush be strictly less than each "
                         "upper bound.")

    if lsq_solver == 'exact':
        x_lsq = np.linalg.lstsq(A, b)[0]
    elif lsq_solver == 'lsmr':
        x_lsq = lsmr(A, b, atol=tol, btol=tol)[0]

    if in_bounds(x_lsq, lb, ub):
        r = A.dot(x_lsq) - b
        cost = 0.5 * np.dot(r, r)
        termination_status = 3
        termination_message = TERMINATION_MESSAGES[termination_status]
        g = compute_grad(A, r)
        g_norm = norm(g, ord=np.inf)

        if verbose > 0:
            print(termination_message)
            print("Final cost {0:.4e}, first-order optimality {1:.2e}"
                  .format(cost, g_norm))

        return OptimizeResult(
            x=x_lsq, fun=r, cost=cost, optimality=g_norm,
            active_mask=np.zeros(n), nit=0, status=termination_status,
            message=termination_message, success=True)

    if method == 'trf':
        res = trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol,
                         max_iter, verbose)
    elif method == 'bvls':
        res = bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose)

    res.message = TERMINATION_MESSAGES[res.status]
    res.success = res.status > 0

    if verbose > 0:
        print(res.message)
        print("Number of iterations {0}, initial cost {1:.4e}, "
              "final cost {2:.4e}, first-order optimality {3:.2e}."
              .format(res.nit, res.initial_cost, res.cost, res.optimality))

    del res.initial_cost

    return res
예제 #54
0
파일: trf.py 프로젝트: MechCoder/scipy
def trf_bounds(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev,
               scaling, loss_function, tr_solver, tr_options, verbose):
    x = x0.copy()

    f = f0
    f_true = f.copy()
    nfev = 1

    J = J0
    njev = 1
    m, n = J.shape

    if loss_function is not None:
        rho = loss_function(f)
        cost = 0.5 * np.sum(rho[0])
        J, f = scale_for_robust_loss_function(J, f, rho)
    else:
        cost = 0.5 * np.dot(f, f)

    g = compute_grad(J, f)

    jac_scaling = isinstance(scaling, string_types) and scaling == 'jac'
    if jac_scaling:
        scale, scale_inv = compute_jac_scaling(J)
    else:
        scale, scale_inv = scaling, 1 / scaling

    v, dv = CL_scaling_vector(x, g, lb, ub)
    v[dv != 0] *= scale[dv != 0]
    Delta = norm(x0 * scale / v**0.5)
    if Delta == 0:
        Delta = 1.0

    g_norm = norm(g * v, ord=np.inf)

    f_augmented = np.zeros((m + n))
    if tr_solver == 'exact':
        J_augmented = np.empty((m + n, n))
    elif tr_solver == 'lsmr':
        reg_term = 0.0
        regularize = tr_options.pop('regularize', True)

    if max_nfev is None:
        max_nfev = x0.size * 100

    alpha = 0.0  # "Levenberg-Marquardt" parameter

    termination_status = None
    iteration = 0
    step_norm = None
    actual_reduction = None

    if verbose == 2:
        print_header_nonlinear()

    while True:
        v, dv = CL_scaling_vector(x, g, lb, ub)

        g_norm = norm(g * v, ord=np.inf)
        if g_norm < gtol:
            termination_status = 1

        if verbose == 2:
            print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
                                      step_norm, g_norm)

        if termination_status is not None or nfev == max_nfev:
            break

        # Now compute variables in "hat" space. Here we also account for
        # scaling introduced by `scaling` parameter. This part is a bit tricky,
        # you have to write down the formulas and see how the trust-region
        # problem is formulated when the two types of scaling are applied.
        # The idea is that first we apply `scaling` and then apply Coleman-Li
        # approach in the new variables.

        # v is recomputed in the variables after applying `scaling`, note that
        # components which were identically 1 not affected.
        v[dv != 0] *= scale[dv != 0]

        # Here we apply two types of scaling.
        d = v**0.5 * scale_inv

        # C = diag(g / scale) Jv
        diag_h = g * dv * scale_inv

        # After all this were done, we continue normally.

        # "hat" gradient.
        g_h = d * g

        f_augmented[:m] = f
        if tr_solver == 'exact':
            J_augmented[:m] = J * d
            J_h = J_augmented[:m]  # Memory view.
            J_augmented[m:] = np.diag(diag_h**0.5)
            U, s, V = svd(J_augmented, full_matrices=False)
            V = V.T
            uf = U.T.dot(f_augmented)
        elif tr_solver == 'lsmr':
            J_h = right_multiplied_operator(J, d)

            if regularize:
                a, b = build_quadratic_1d(J_h, g_h, -g_h, diag=diag_h)
                to_tr = Delta / norm(g_h)
                ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
                reg_term = -ag_value / Delta**2

            lsmr_op = regularized_lsq_operator(J_h, (diag_h + reg_term)**0.5)
            gn_h = lsmr(lsmr_op, f_augmented, **tr_options)[0]
            S = np.vstack((g_h, gn_h)).T
            S, _ = qr(S, mode='economic')
            JS = J_h.dot(S)  # LinearOperator does dot too.
            B_S = np.dot(JS.T, JS) + np.dot(S.T * diag_h, S)
            g_S = S.T.dot(g_h)

        # theta controls step back step ratio from the bounds.
        theta = max(0.995, 1 - g_norm)

        actual_reduction = -1
        while actual_reduction <= 0 and nfev < max_nfev:
            if tr_solver == 'exact':
                p_h, alpha, n_iter = solve_lsq_trust_region(
                    n, m, uf, s, V, Delta, initial_alpha=alpha)
            elif tr_solver == 'lsmr':
                p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
                p_h = S.dot(p_S)

            p = d * p_h  # Trust-region solution in the original space.
            step, step_h, predicted_reduction = select_step(
                x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta)

            x_new = make_strictly_feasible(x + step, lb, ub, rstep=0)
            f_new = fun(x_new)
            nfev += 1

            step_h_norm = norm(step_h)

            if not np.all(np.isfinite(f_new)):
                Delta = 0.25 * step_h_norm
                continue

            # Usual trust-region step quality estimation.
            if loss_function is not None:
                cost_new = loss_function(f_new, cost_only=True)
            else:
                cost_new = 0.5 * np.dot(f_new, f_new)
            actual_reduction = cost - cost_new
            # Correction term is specific to the algorithm,
            # vanishes in unbounded case.
            correction = 0.5 * np.dot(step_h * diag_h, step_h)

            Delta_new, ratio = update_tr_radius(
                Delta, actual_reduction - correction, predicted_reduction,
                step_h_norm, step_h_norm > 0.95 * Delta
            )
            alpha *= Delta / Delta_new
            Delta = Delta_new

            step_norm = norm(step)
            termination_status = check_termination(
                actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)

            if termination_status is not None:
                break

        if actual_reduction > 0:
            x = x_new

            f = f_new
            f_true = f.copy()

            cost = cost_new

            J = jac(x, f)
            njev += 1

            if loss_function is not None:
                rho = loss_function(f)
                J, f = scale_for_robust_loss_function(J, f, rho)

            g = compute_grad(J, f)

            if jac_scaling:
                scale, scale_inv = compute_jac_scaling(J, scale)
        else:
            step_norm = 0
            actual_reduction = 0

        iteration += 1

    if termination_status is None:
        termination_status = 0

    active_mask = find_active_constraints(x, lb, ub, rtol=xtol)
    return OptimizeResult(
        x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
        active_mask=active_mask, nfev=nfev, njev=njev,
        status=termination_status)
예제 #55
0
파일: trf.py 프로젝트: MechCoder/scipy
def trf_no_bounds(fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev,
                  scaling, loss_function, tr_solver, tr_options, verbose):
    x = x0.copy()

    f = f0
    f_true = f.copy()
    nfev = 1

    J = J0
    njev = 1
    m, n = J.shape

    if loss_function is not None:
        rho = loss_function(f)
        cost = 0.5 * np.sum(rho[0])
        J, f = scale_for_robust_loss_function(J, f, rho)
    else:
        cost = 0.5 * np.dot(f, f)

    g = compute_grad(J, f)

    jac_scaling = isinstance(scaling, string_types) and scaling == 'jac'
    if jac_scaling:
        scale, scale_inv = compute_jac_scaling(J)
    else:
        scale, scale_inv = scaling, 1 / scaling

    Delta = norm(x0 * scale)
    if Delta == 0:
        Delta = 1.0

    if tr_solver == 'lsmr':
        reg_term = 0
        damp = tr_options.pop('damp', 0.0)
        regularize = tr_options.pop('regularize', True)

    if max_nfev is None:
        max_nfev = x0.size * 100

    alpha = 0.0  # "Levenberg-Marquardt" parameter

    termination_status = None
    iteration = 0
    step_norm = None
    actual_reduction = None

    if verbose == 2:
        print_header_nonlinear()

    while True:
        g_norm = norm(g, ord=np.inf)
        if g_norm < gtol:
            termination_status = 1

        if verbose == 2:
            print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
                                      step_norm, g_norm)

        if termination_status is not None or nfev == max_nfev:
            break

        d = scale_inv
        g_h = d * g

        if tr_solver == 'exact':
            J_h = J * d
            U, s, V = svd(J_h, full_matrices=False)
            V = V.T
            uf = U.T.dot(f)
        elif tr_solver == 'lsmr':
            J_h = right_multiplied_operator(J, d)

            if regularize:
                a, b = build_quadratic_1d(J_h, g_h, -g_h)
                to_tr = Delta / norm(g_h)
                ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
                reg_term = -ag_value / Delta**2

            damp_full = (damp**2 + reg_term)**0.5
            gn_h = lsmr(J_h, f, damp=damp_full, **tr_options)[0]
            S = np.vstack((g_h, gn_h)).T
            S, _ = qr(S, mode='economic')
            JS = J_h.dot(S)
            B_S = np.dot(JS.T, JS)
            g_S = S.T.dot(g_h)

        actual_reduction = -1
        while actual_reduction <= 0 and nfev < max_nfev:
            if tr_solver == 'exact':
                step_h, alpha, n_iter = solve_lsq_trust_region(
                    n, m, uf, s, V, Delta, initial_alpha=alpha)
            elif tr_solver == 'lsmr':
                p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
                step_h = S.dot(p_S)

            predicted_reduction = -evaluate_quadratic(J_h, g_h, step_h)
            step = d * step_h
            x_new = x + step
            f_new = fun(x_new)
            nfev += 1

            step_h_norm = norm(step_h)

            if not np.all(np.isfinite(f_new)):
                Delta = 0.25 * step_h_norm
                continue

            # Usual trust-region step quality estimation.
            if loss_function is not None:
                cost_new = loss_function(f_new, cost_only=True)
            else:
                cost_new = 0.5 * np.dot(f_new, f_new)
            actual_reduction = cost - cost_new

            Delta_new, ratio = update_tr_radius(
                Delta, actual_reduction, predicted_reduction,
                step_h_norm, step_h_norm > 0.95 * Delta)
            alpha *= Delta / Delta_new
            Delta = Delta_new

            step_norm = norm(step)
            termination_status = check_termination(
                actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)

            if termination_status is not None:
                break

        if actual_reduction > 0:
            x = x_new

            f = f_new
            f_true = f.copy()

            cost = cost_new

            J = jac(x, f)
            njev += 1

            if loss_function is not None:
                rho = loss_function(f)
                J, f = scale_for_robust_loss_function(J, f, rho)

            g = compute_grad(J, f)

            if jac_scaling:
                scale, scale_inv = compute_jac_scaling(J, scale)
        else:
            step_norm = 0
            actual_reduction = 0

        iteration += 1

    if termination_status is None:
        termination_status = 0

    active_mask = np.zeros_like(x)
    return OptimizeResult(
        x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
        active_mask=active_mask, nfev=nfev, njev=njev,
        status=termination_status)
예제 #56
0
파일: numpy.py 프로젝트: nsrishankar/pymor
def _apply_inverse(matrix, V, options=None):
    """Solve linear equation system.

    Applies the inverse of `matrix` to the row vectors in `V`.

    See :func:`dense_options` for documentation of all possible options for
    sparse matrices.

    See :func:`sparse_options` for documentation of all possible options for
    sparse matrices.

    This method is called by :meth:`pymor.core.NumpyMatrixOperator.apply_inverse`
    and usually should not be used directly.

    Parameters
    ----------
    matrix
        The |NumPy| matrix to invert.
    V
        2-dimensional |NumPy array| containing as row vectors
        the right-hand sides of the linear equation systems to
        solve.
    options
        The solver options to use. (See :func:`_options`.)

    Returns
    -------
    |NumPy array| of the solution vectors.
    """

    default_options = _options(matrix)

    if options is None:
        options = default_options.values()[0]
    elif isinstance(options, str):
        if options == 'least_squares':
            for k, v in default_options.iteritems():
                if k.startswith('least_squares'):
                    options = v
                    break
            assert not isinstance(options, str)
        else:
            options = default_options[options]
    else:
        assert 'type' in options and options['type'] in default_options \
            and options.viewkeys() <= default_options[options['type']].viewkeys()
        user_options = options
        options = default_options[user_options['type']]
        options.update(user_options)

    promoted_type = np.promote_types(matrix.dtype, V.dtype)
    R = np.empty((len(V), matrix.shape[1]), dtype=promoted_type)

    if options['type'] == 'solve':
        for i, VV in enumerate(V):
            try:
                R[i] = np.linalg.solve(matrix, VV)
            except np.linalg.LinAlgError as e:
                raise InversionError('{}: {}'.format(str(type(e)), str(e)))
    elif options['type'] == 'least_squares_lstsq':
        for i, VV in enumerate(V):
            try:
                R[i], _, _, _ = np.linalg.lstsq(matrix, VV, rcond=options['rcond'])
            except np.linalg.LinAlgError as e:
                raise InversionError('{}: {}'.format(str(type(e)), str(e)))
    elif options['type'] == 'bicgstab':
        for i, VV in enumerate(V):
            R[i], info = bicgstab(matrix, VV, tol=options['tol'], maxiter=options['maxiter'])
            if info != 0:
                if info > 0:
                    raise InversionError('bicgstab failed to converge after {} iterations'.format(info))
                else:
                    raise InversionError('bicgstab failed with error code {} (illegal input or breakdown)'.
                                         format(info))
    elif options['type'] == 'bicgstab_spilu':
        ilu = spilu(matrix, drop_tol=options['spilu_drop_tol'], fill_factor=options['spilu_fill_factor'],
                    drop_rule=options['spilu_drop_rule'], permc_spec=options['spilu_permc_spec'])
        precond = LinearOperator(matrix.shape, ilu.solve)
        for i, VV in enumerate(V):
            R[i], info = bicgstab(matrix, VV, tol=options['tol'], maxiter=options['maxiter'], M=precond)
            if info != 0:
                if info > 0:
                    raise InversionError('bicgstab failed to converge after {} iterations'.format(info))
                else:
                    raise InversionError('bicgstab failed with error code {} (illegal input or breakdown)'.
                                         format(info))
    elif options['type'] == 'spsolve':
        try:
            # maybe remove unusable factorization:
            if hasattr(matrix, 'factorization'):
                fdtype = matrix.factorizationdtype
                if not np.can_cast(V.dtype, fdtype, casting='safe'):
                    del matrix.factorization

            if map(int, scipy.version.version.split('.')) >= [0, 14, 0]:
                if hasattr(matrix, 'factorization'):
                    # we may use a complex factorization of a real matrix to
                    # apply it to a real vector. In that case, we downcast
                    # the result here, removing the imaginary part,
                    # which should be zero.
                    R = matrix.factorization.solve(V.T).T.astype(promoted_type, copy=False)
                elif options['keep_factorization']:
                    # the matrix is always converted to the promoted type.
                    # if matrix.dtype == promoted_type, this is a no_op
                    matrix.factorization = splu(matrix_astype_nocopy(matrix, promoted_type), permc_spec=options['permc_spec'])
                    matrix.factorizationdtype = promoted_type
                    R = matrix.factorization.solve(V.T).T
                else:
                    # the matrix is always converted to the promoted type.
                    # if matrix.dtype == promoted_type, this is a no_op
                    R = spsolve(matrix_astype_nocopy(matrix, promoted_type), V.T, permc_spec=options['permc_spec']).T
            else:
                # see if-part for documentation
                if hasattr(matrix, 'factorization'):
                    for i, VV in enumerate(V):
                        R[i] = matrix.factorization.solve(VV).astype(promoted_type, copy=False)
                elif options['keep_factorization']:
                    matrix.factorization = splu(matrix_astype_nocopy(matrix, promoted_type), permc_spec=options['permc_spec'])
                    matrix.factorizationdtype = promoted_type
                    for i, VV in enumerate(V):
                        R[i] = matrix.factorization.solve(VV)
                elif len(V) > 1:
                    factorization = splu(matrix_astype_nocopy(matrix, promoted_type), permc_spec=options['permc_spec'])
                    for i, VV in enumerate(V):
                        R[i] = factorization.solve(VV)
                else:
                    R = spsolve(matrix_astype_nocopy(matrix, promoted_type), V.T, permc_spec=options['permc_spec']).reshape((1, -1))
        except RuntimeError as e:
            raise InversionError(e)
    elif options['type'] == 'lgmres':
        for i, VV in enumerate(V):
            R[i], info = lgmres(matrix, VV.copy(i),
                                tol=options['tol'],
                                maxiter=options['maxiter'],
                                inner_m=options['inner_m'],
                                outer_k=options['outer_k'])
            if info > 0:
                raise InversionError('lgmres failed to converge after {} iterations'.format(info))
            assert info == 0
    elif options['type'] == 'least_squares_lsmr':
        for i, VV in enumerate(V):
            R[i], info, itn, _, _, _, _, _ = lsmr(matrix, VV.copy(i),
                                                  damp=options['damp'],
                                                  atol=options['atol'],
                                                  btol=options['btol'],
                                                  conlim=options['conlim'],
                                                  maxiter=options['maxiter'],
                                                  show=options['show'])
            assert 0 <= info <= 7
            if info == 7:
                raise InversionError('lsmr failed to converge after {} iterations'.format(itn))
    elif options['type'] == 'least_squares_lsqr':
        for i, VV in enumerate(V):
            R[i], info, itn, _, _, _, _, _, _, _ = lsqr(matrix, VV.copy(i),
                                                        damp=options['damp'],
                                                        atol=options['atol'],
                                                        btol=options['btol'],
                                                        conlim=options['conlim'],
                                                        iter_lim=options['iter_lim'],
                                                        show=options['show'])
            assert 0 <= info <= 7
            if info == 7:
                raise InversionError('lsmr failed to converge after {} iterations'.format(itn))
    elif options['type'] == 'pyamg':
        if len(V) > 0:
            V_iter = iter(enumerate(V))
            R[0], ml = pyamg.solve(matrix, next(V_iter)[1],
                                   tol=options['tol'],
                                   maxiter=options['maxiter'],
                                   return_solver=True)
            for i, VV in V_iter:
                R[i] = pyamg.solve(matrix, VV,
                                   tol=options['tol'],
                                   maxiter=options['maxiter'],
                                   existing_solver=ml)
    elif options['type'] == 'pyamg-rs':
        ml = pyamg.ruge_stuben_solver(matrix,
                                      strength=options['strength'],
                                      CF=options['CF'],
                                      presmoother=options['presmoother'],
                                      postsmoother=options['postsmoother'],
                                      max_levels=options['max_levels'],
                                      max_coarse=options['max_coarse'],
                                      coarse_solver=options['coarse_solver'])
        for i, VV in enumerate(V):
            R[i] = ml.solve(VV,
                            tol=options['tol'],
                            maxiter=options['maxiter'],
                            cycle=options['cycle'],
                            accel=options['accel'])
    elif options['type'] == 'pyamg-sa':
        ml = pyamg.smoothed_aggregation_solver(matrix,
                                               symmetry=options['symmetry'],
                                               strength=options['strength'],
                                               aggregate=options['aggregate'],
                                               smooth=options['smooth'],
                                               presmoother=options['presmoother'],
                                               postsmoother=options['postsmoother'],
                                               improve_candidates=options['improve_candidates'],
                                               max_levels=options['max_levels'],
                                               max_coarse=options['max_coarse'],
                                               diagonal_dominance=options['diagonal_dominance'])
        for i, VV in enumerate(V):
            R[i] = ml.solve(VV,
                            tol=options['tol'],
                            maxiter=options['maxiter'],
                            cycle=options['cycle'],
                            accel=options['accel'])
    elif options['type'].startswith('generic') or options['type'].startswith('least_squares_generic'):
        logger = getLogger('pymor.operators.numpy._apply_inverse')
        logger.warn('You have selected a (potentially slow) generic solver for a NumPy matrix operator!')
        from pymor.operators.numpy import NumpyMatrixOperator
        from pymor.vectorarrays.numpy import NumpyVectorArray
        return genericsolvers.apply_inverse(NumpyMatrixOperator(matrix),
                                            NumpyVectorArray(V, copy=False),
                                            options=options).data
    else:
        raise ValueError('Unknown solver type')
    return R
예제 #57
0
파일: test_lsmr.py 프로젝트: b-t-g/Sim
 def assertCompatibleSystem(self, A, xtrue):
     Afun = aslinearoperator(A)
     b = Afun.matvec(xtrue)
     x = lsmr(A,b)[0]
     assert_almost_equal(norm(x - xtrue), 0, 6)
예제 #58
0
파일: scipy.py 프로젝트: renemilk/pyMor
def apply_inverse(op, V, options=None, least_squares=False, check_finite=True,
                  default_solver='scipy_spsolve', default_least_squares_solver='scipy_least_squares_lsmr'):
    """Solve linear equation system.

    Applies the inverse of `op` to the vectors in `rhs` using PyAMG.

    Parameters
    ----------
    op
        The linear, non-parametric |Operator| to invert.
    rhs
        |VectorArray| of right-hand sides for the equation system.
    options
        The |solver_options| to use (see :func:`solver_options`).
    check_finite
        Test if solution only containes finite values.
    default_solver
        Default solver to use (scipy_spsolve, scipy_bicgstab, scipy_bicgstab_spilu,
        scipy_lgmres, scipy_least_squares_lsmr, scipy_least_squares_lsqr).
    default_least_squares_solver
        Default solver to use for least squares problems (scipy_least_squares_lsmr,
        scipy_least_squares_lsqr).

    Returns
    -------
    |VectorArray| of the solution vectors.
    """

    assert V in op.range

    if isinstance(op, NumpyMatrixOperator):
        matrix = op._matrix
    else:
        from pymor.algorithms.to_matrix import to_matrix
        matrix = to_matrix(op)

    options = _parse_options(options, solver_options(), default_solver, default_least_squares_solver, least_squares)

    V = V.data
    promoted_type = np.promote_types(matrix.dtype, V.dtype)
    R = np.empty((len(V), matrix.shape[1]), dtype=promoted_type)

    if options['type'] == 'scipy_bicgstab':
        for i, VV in enumerate(V):
            R[i], info = bicgstab(matrix, VV, tol=options['tol'], maxiter=options['maxiter'])
            if info != 0:
                if info > 0:
                    raise InversionError('bicgstab failed to converge after {} iterations'.format(info))
                else:
                    raise InversionError('bicgstab failed with error code {} (illegal input or breakdown)'.
                                         format(info))
    elif options['type'] == 'scipy_bicgstab_spilu':
        if Version(scipy.version.version) >= Version('0.19'):
            ilu = spilu(matrix, drop_tol=options['spilu_drop_tol'], fill_factor=options['spilu_fill_factor'],
                        drop_rule=options['spilu_drop_rule'], permc_spec=options['spilu_permc_spec'])
        else:
            if options['spilu_drop_rule']:
                logger = getLogger('pymor.operators.numpy._apply_inverse')
                logger.error("ignoring drop_rule in ilu factorization due to old SciPy")
            ilu = spilu(matrix, drop_tol=options['spilu_drop_tol'], fill_factor=options['spilu_fill_factor'],
                        permc_spec=options['spilu_permc_spec'])
        precond = LinearOperator(matrix.shape, ilu.solve)
        for i, VV in enumerate(V):
            R[i], info = bicgstab(matrix, VV, tol=options['tol'], maxiter=options['maxiter'], M=precond)
            if info != 0:
                if info > 0:
                    raise InversionError('bicgstab failed to converge after {} iterations'.format(info))
                else:
                    raise InversionError('bicgstab failed with error code {} (illegal input or breakdown)'.
                                         format(info))
    elif options['type'] == 'scipy_spsolve':
        try:
            # maybe remove unusable factorization:
            if hasattr(matrix, 'factorization'):
                fdtype = matrix.factorizationdtype
                if not np.can_cast(V.dtype, fdtype, casting='safe'):
                    del matrix.factorization

            if Version(scipy.version.version) >= Version('0.14'):
                if hasattr(matrix, 'factorization'):
                    # we may use a complex factorization of a real matrix to
                    # apply it to a real vector. In that case, we downcast
                    # the result here, removing the imaginary part,
                    # which should be zero.
                    R = matrix.factorization.solve(V.T).T.astype(promoted_type, copy=False)
                elif options['keep_factorization']:
                    # the matrix is always converted to the promoted type.
                    # if matrix.dtype == promoted_type, this is a no_op
                    matrix.factorization = splu(matrix_astype_nocopy(matrix.tocsc(), promoted_type),
                                                permc_spec=options['permc_spec'])
                    matrix.factorizationdtype = promoted_type
                    R = matrix.factorization.solve(V.T).T
                else:
                    # the matrix is always converted to the promoted type.
                    # if matrix.dtype == promoted_type, this is a no_op
                    R = spsolve(matrix_astype_nocopy(matrix, promoted_type), V.T, permc_spec=options['permc_spec']).T
            else:
                # see if-part for documentation
                if hasattr(matrix, 'factorization'):
                    for i, VV in enumerate(V):
                        R[i] = matrix.factorization.solve(VV).astype(promoted_type, copy=False)
                elif options['keep_factorization']:
                    matrix.factorization = splu(matrix_astype_nocopy(matrix.tocsc(), promoted_type),
                                                permc_spec=options['permc_spec'])
                    matrix.factorizationdtype = promoted_type
                    for i, VV in enumerate(V):
                        R[i] = matrix.factorization.solve(VV)
                elif len(V) > 1:
                    factorization = splu(matrix_astype_nocopy(matrix.tocsc(), promoted_type),
                                         permc_spec=options['permc_spec'])
                    for i, VV in enumerate(V):
                        R[i] = factorization.solve(VV)
                else:
                    R = spsolve(matrix_astype_nocopy(matrix, promoted_type), V.T, permc_spec=options['permc_spec']).reshape((1, -1))
        except RuntimeError as e:
            raise InversionError(e)
    elif options['type'] == 'scipy_lgmres':
        for i, VV in enumerate(V):
            R[i], info = lgmres(matrix, VV,
                                tol=options['tol'],
                                maxiter=options['maxiter'],
                                inner_m=options['inner_m'],
                                outer_k=options['outer_k'])
            if info > 0:
                raise InversionError('lgmres failed to converge after {} iterations'.format(info))
            assert info == 0
    elif options['type'] == 'scipy_least_squares_lsmr':
        from scipy.sparse.linalg import lsmr
        for i, VV in enumerate(V):
            R[i], info, itn, _, _, _, _, _ = lsmr(matrix, VV,
                                                  damp=options['damp'],
                                                  atol=options['atol'],
                                                  btol=options['btol'],
                                                  conlim=options['conlim'],
                                                  maxiter=options['maxiter'],
                                                  show=options['show'])
            assert 0 <= info <= 7
            if info == 7:
                raise InversionError('lsmr failed to converge after {} iterations'.format(itn))
    elif options['type'] == 'scipy_least_squares_lsqr':
        for i, VV in enumerate(V):
            R[i], info, itn, _, _, _, _, _, _, _ = lsqr(matrix, VV,
                                                        damp=options['damp'],
                                                        atol=options['atol'],
                                                        btol=options['btol'],
                                                        conlim=options['conlim'],
                                                        iter_lim=options['iter_lim'],
                                                        show=options['show'])
            assert 0 <= info <= 7
            if info == 7:
                raise InversionError('lsmr failed to converge after {} iterations'.format(itn))
    else:
        raise ValueError('Unknown solver type')

    if check_finite:
        if not np.isfinite(np.sum(R)):
            raise InversionError('Result contains non-finite values')

    return op.source.from_data(R)
예제 #59
0
def apply_inverse(matrix, U, options=None):
    """Solve linear equation system.

    Applies the inverse of `matrix` to the row vectors in `U`.

    See :func:`sparse_options` for documentation of all possible options for
    sparse matrices.

    Parameters
    ----------
    matrix
        The |NumPy| matrix to invert.
    U
        2-dimensional |NumPy array| containing as row vectors
        the right-hand sides of the linear equation systems to
        solve.
    options
        |invert_options| to use. (See :func:`invert_options`.)

    Returns
    -------
    |NumPy array| of the solution vectors.
    """

    default_options = invert_options(matrix)

    if options is None:
        options = default_options.values()[0]
    elif isinstance(options, str):
        if options == 'least_squares':
            for k, v in default_options.iteritems():
                if k.startswith('least_squares'):
                    options = v
                    break
            assert not isinstance(options, str)
        else:
            options = default_options[options]
    else:
        assert 'type' in options and options['type'] in default_options \
            and options.viewkeys() <= default_options[options['type']].viewkeys()
        user_options = options
        options = default_options[user_options['type']]
        options.update(user_options)

    R = np.empty((len(U), matrix.shape[1]))

    if options['type'] == 'solve':
        for i, UU in enumerate(U):
            try:
                R[i] = np.linalg.solve(matrix, UU)
            except np.linalg.LinAlgError as e:
                raise InversionError('{}: {}'.format(str(type(e)), str(e)))
    elif options['type'] == 'least_squares_lstsq':
        for i, UU in enumerate(U):
            try:
                R[i], _, _, _ = np.linalg.lstsq(matrix, UU, rcond=options['rcond'])
            except np.linalg.LinAlgError as e:
                raise InversionError('{}: {}'.format(str(type(e)), str(e)))
    elif options['type'] == 'bicgstab':
        for i, UU in enumerate(U):
            R[i], info = bicgstab(matrix, UU, tol=options['tol'], maxiter=options['maxiter'])
            if info != 0:
                if info > 0:
                    raise InversionError('bicgstab failed to converge after {} iterations'.format(info))
                else:
                    raise InversionError('bicgstab failed with error code {} (illegal input or breakdown)'.
                                         format(info))
    elif options['type'] == 'bicgstab_spilu':
        ilu = spilu(matrix, drop_tol=options['spilu_drop_tol'], fill_factor=options['spilu_fill_factor'],
                    drop_rule=options['spilu_drop_rule'], permc_spec=options['spilu_permc_spec'])
        precond = LinearOperator(matrix.shape, ilu.solve)
        for i, UU in enumerate(U):
            R[i], info = bicgstab(matrix, UU, tol=options['tol'], maxiter=options['maxiter'], M=precond)
            if info != 0:
                if info > 0:
                    raise InversionError('bicgstab failed to converge after {} iterations'.format(info))
                else:
                    raise InversionError('bicgstab failed with error code {} (illegal input or breakdown)'.
                                         format(info))
    elif options['type'] == 'spsolve':
        for i, UU in enumerate(U):
            R[i] = spsolve(matrix, UU, permc_spec=options['permc_spec'])
    elif options['type'] == 'lgmres':
        for i, UU in enumerate(U):
            R[i], info = lgmres(matrix, UU.copy(i),
                                tol=options['tol'],
                                maxiter=options['maxiter'],
                                inner_m=options['inner_m'],
                                outer_k=options['outer_k'])
            if info > 0:
                raise InversionError('lgmres failed to converge after {} iterations'.format(info))
            assert info == 0
    elif options['type'] == 'least_squares_lsmr':
        for i, UU in enumerate(U):
            R[i], info, itn, _, _, _, _, _ = lsmr(matrix, UU.copy(i),
                                                  damp=options['damp'],
                                                  atol=options['atol'],
                                                  btol=options['btol'],
                                                  conlim=options['conlim'],
                                                  maxiter=options['maxiter'],
                                                  show=options['show'])
            assert 0 <= info <= 7
            if info == 7:
                raise InversionError('lsmr failed to converge after {} iterations'.format(itn))
    elif options['type'] == 'least_squares_lsqr':
        for i, UU in enumerate(U):
            R[i], info, itn, _, _, _, _, _, _, _ = lsqr(matrix, UU.copy(i),
                                                        damp=options['damp'],
                                                        atol=options['atol'],
                                                        btol=options['btol'],
                                                        conlim=options['conlim'],
                                                        iter_lim=options['iter_lim'],
                                                        show=options['show'])
            assert 0 <= info <= 7
            if info == 7:
                raise InversionError('lsmr failed to converge after {} iterations'.format(itn))
    elif options['type'] == 'pyamg':
        if len(U) > 0:
            U_iter = iter(enumerate(U))
            R[0], ml = pyamg.solve(matrix, next(U_iter)[1],
                                   tol=options['tol'],
                                   maxiter=options['maxiter'],
                                   return_solver=True)
            for i, UU in U_iter:
                R[i] = pyamg.solve(matrix, UU,
                                   tol=options['tol'],
                                   maxiter=options['maxiter'],
                                   existing_solver=ml)
    elif options['type'] == 'pyamg-rs':
        ml = pyamg.ruge_stuben_solver(matrix,
                                      strength=options['strength'],
                                      CF=options['CF'],
                                      presmoother=options['presmoother'],
                                      postsmoother=options['postsmoother'],
                                      max_levels=options['max_levels'],
                                      max_coarse=options['max_coarse'],
                                      coarse_solver=options['coarse_solver'])
        for i, UU in enumerate(U):
            R[i] = ml.solve(UU,
                            tol=options['tol'],
                            maxiter=options['maxiter'],
                            cycle=options['cycle'],
                            accel=options['accel'])
    elif options['type'] == 'pyamg-sa':
        ml = pyamg.smoothed_aggregation_solver(matrix,
                                               symmetry=options['symmetry'],
                                               strength=options['strength'],
                                               aggregate=options['aggregate'],
                                               smooth=options['smooth'],
                                               presmoother=options['presmoother'],
                                               postsmoother=options['postsmoother'],
                                               improve_candidates=options['improve_candidates'],
                                               max_levels=options['max_levels'],
                                               max_coarse=options['max_coarse'],
                                               diagonal_dominance=options['diagonal_dominance'])
        for i, UU in enumerate(U):
            R[i] = ml.solve(UU,
                            tol=options['tol'],
                            maxiter=options['maxiter'],
                            cycle=options['cycle'],
                            accel=options['accel'])
    elif options['type'].startswith('generic') or options['type'].startswith('least_squares_generic'):
        logger = getLogger('pymor.la.numpysolvers.apply_inverse')
        logger.warn('You have selected a (potentially slow) generic solver for a NumPy matrix operator!')
        from pymor.operators.basic import NumpyMatrixOperator
        from pymor.la import NumpyVectorArray
        return genericsolvers.apply_inverse(NumpyMatrixOperator(matrix),
                                            NumpyVectorArray(U, copy=False),
                                            options=options).data
    else:
        raise ValueError('Unknown solver type')
    return R