Exemple #1
0
 def _setdiag(self, values, k):
     M, N = self.shape
     if k < 0:
         if values.ndim == 0:
             # broadcast
             max_index = min(M+k, N)
             for i in xrange(max_index):
                 self[i - k, i] = values
         else:
             max_index = min(M+k, N, len(values))
             if max_index <= 0:
                 return
             for i,v in enumerate(values[:max_index]):
                 self[i - k, i] = v
     else:
         if values.ndim == 0:
             # broadcast
             max_index = min(M, N-k)
             for i in xrange(max_index):
                 self[i, i + k] = values
         else:
             max_index = min(M, N-k, len(values))
             if max_index <= 0:
                 return
             for i,v in enumerate(values[:max_index]):
                 self[i, i + k] = v
Exemple #2
0
    def _evaluate_derivatives(self, x, der=None):
        n = self.n
        r = self.r

        if der is None:
            der = self.n
        pi = np.zeros((n, len(x)))
        w = np.zeros((n, len(x)))
        pi[0] = 1
        p = np.zeros((len(x), self.r))
        p += self.c[0,np.newaxis,:]

        for k in xrange(1,n):
            w[k-1] = x - self.xi[k-1]
            pi[k] = w[k-1]*pi[k-1]
            p += pi[k,:,np.newaxis]*self.c[k]

        cn = np.zeros((max(der,n+1), len(x), r), dtype=self.dtype)
        cn[:n+1,:,:] += self.c[:n+1,np.newaxis,:]
        cn[0] = p
        for k in xrange(1,n):
            for i in xrange(1,n-k+1):
                pi[i] = w[k+i-1]*pi[i-1]+pi[i]
                cn[k] = cn[k]+pi[i,:,np.newaxis]*cn[k+i]
            cn[k] *= factorial(k)

        cn[n,:,:] = 0
        return cn[:der]
Exemple #3
0
    def __init__(self, xi, yi, axis=0):
        _Interpolator1DWithDerivatives.__init__(self, xi, yi, axis)

        self.xi = np.asarray(xi)
        self.yi = self._reshape_yi(yi)
        self.n, self.r = self.yi.shape

        c = np.zeros((self.n+1, self.r), dtype=self.dtype)
        c[0] = self.yi[0]
        Vk = np.zeros((self.n, self.r), dtype=self.dtype)
        for k in xrange(1,self.n):
            s = 0
            while s <= k and xi[k-s] == xi[k]:
                s += 1
            s -= 1
            Vk[0] = self.yi[k]/float(factorial(s))
            for i in xrange(k-s):
                if xi[i] == xi[k]:
                    raise ValueError("Elements if `xi` can't be equal.")
                if s == 0:
                    Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
                else:
                    Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
            c[k] = Vk[k-s]
        self.c = c
Exemple #4
0
 def __add__(self, other):
     # First check if argument is a scalar
     if isscalarlike(other):
         res_dtype = upcast_scalar(self.dtype, other)
         new = dok_matrix(self.shape, dtype=res_dtype)
         # Add this scalar to every element.
         M, N = self.shape
         for i in xrange(M):
             for j in xrange(N):
                 aij = self.get((i, j), 0) + other
                 if aij != 0:
                     new[i, j] = aij
         # new.dtype.char = self.dtype.char
     elif isinstance(other, dok_matrix):
         if other.shape != self.shape:
             raise ValueError("matrix dimensions are not equal")
         # We could alternatively set the dimensions to the largest of
         # the two matrices to be summed.  Would this be a good idea?
         res_dtype = upcast(self.dtype, other.dtype)
         new = dok_matrix(self.shape, dtype=res_dtype)
         new.update(self)
         for key in other.keys():
             new[key] += other[key]
     elif isspmatrix(other):
         csc = self.tocsc()
         new = csc + other
     elif isdense(other):
         new = self.todense() + other
     else:
         return NotImplemented
     return new
Exemple #5
0
 def __radd__(self, other):
     # First check if argument is a scalar
     if isscalarlike(other):
         new = dok_matrix(self.shape, dtype=self.dtype)
         # Add this scalar to every element.
         M, N = self.shape
         for i in xrange(M):
             for j in xrange(N):
                 aij = self.get((i, j), 0) + other
                 if aij != 0:
                     new[i, j] = aij
     elif isinstance(other, dok_matrix):
         if other.shape != self.shape:
             raise ValueError("matrix dimensions are not equal")
         new = dok_matrix(self.shape, dtype=self.dtype)
         new.update(self)
         for key in other:
             new[key] += other[key]
     elif isspmatrix(other):
         csc = self.tocsc()
         new = csc + other
     elif isdense(other):
         new = other + self.todense()
     else:
         return NotImplemented
     return new
Exemple #6
0
 def __add__(self, other):
     if isscalarlike(other):
         res_dtype = upcast_scalar(self.dtype, other)
         new = dok_matrix(self.shape, dtype=res_dtype)
         # Add this scalar to every element.
         M, N = self.shape
         for key in itertools.product(xrange(M), xrange(N)):
             aij = dict.get(self, (key), 0) + other
             if aij:
                 new[key] = aij
         # new.dtype.char = self.dtype.char
     elif isspmatrix_dok(other):
         if other.shape != self.shape:
             raise ValueError("Matrix dimensions are not equal.")
         # We could alternatively set the dimensions to the largest of
         # the two matrices to be summed.  Would this be a good idea?
         res_dtype = upcast(self.dtype, other.dtype)
         new = dok_matrix(self.shape, dtype=res_dtype)
         dict.update(new, self)
         with np.errstate(over='ignore'):
             dict.update(new,
                        ((k, new[k] + other[k]) for k in iterkeys(other)))
     elif isspmatrix(other):
         csc = self.tocsc()
         new = csc + other
     elif isdense(other):
         new = self.todense() + other
     else:
         return NotImplemented
     return new
Exemple #7
0
    def extend(self, xi, yi, orders=None):
        """
        Extend the PiecewisePolynomial by a list of points

        Parameters
        ----------
        xi : array_like
            A sorted list of x-coordinates.
        yi : list of lists of length N1
            ``yi[i]`` (if ``axis == 0``) is the list of derivatives known
            at ``xi[i]``.
        orders : int or list of ints, optional
            A list of polynomial orders, or a single universal order.

        """
        if self._y_axis == 0:
            # allow yi to be a ragged list
            for i in xrange(len(xi)):
                if orders is None or _isscalar(orders):
                    self.append(xi[i],yi[i],orders)
                else:
                    self.append(xi[i],yi[i],orders[i])
        else:
            preslice = (slice(None,None,None),) * self._y_axis
            for i in xrange(len(xi)):
                if orders is None or _isscalar(orders):
                    self.append(xi[i],yi[preslice + (i,)],orders)
                else:
                    self.append(xi[i],yi[preslice + (i,)],orders[i])
Exemple #8
0
 def test_cascade(self):
     for J in xrange(1, 7):
         for i in xrange(1, 5):
             lpcoef = wavelets.daub(i)
             k = len(lpcoef)
             x, phi, psi = wavelets.cascade(lpcoef, J)
             assert_(len(x) == len(phi) == len(psi))
             assert_equal(len(x), (k - 1) * 2 ** J)
Exemple #9
0
    def _get_arrayXarray(self, row, col):
        # inner indexing
        i, j = map(np.atleast_2d, np.broadcast_arrays(row, col))
        newdok = dok_matrix(i.shape, dtype=self.dtype)

        for key in itertools.product(xrange(i.shape[0]), xrange(i.shape[1])):
            v = dict.get(self, (i[key], j[key]), 0)
            if v:
                dict.__setitem__(newdok, key, v)
        return newdok
Exemple #10
0
 def test_correspond_2_and_up(self):
     # Tests correspond(Z, y) on linkage and CDMs over observation sets of
     # different sizes.
     for i in xrange(2, 4):
         y = np.random.rand(i*(i-1)//2)
         Z = linkage(y)
         assert_(correspond(Z, y))
     for i in xrange(4, 15, 3):
         y = np.random.rand(i*(i-1)//2)
         Z = linkage(y)
         assert_(correspond(Z, y))
Exemple #11
0
 def test_improvement(self):
     import time
     start = time.time()
     for i in xrange(100):
         quad(self.lib.sin, 0, 100)
     fast = time.time() - start
     start = time.time()
     for i in xrange(100):
         quad(math.sin, 0, 100)
     slow = time.time() - start
     assert_(fast < 0.5*slow, (fast, slow))
Exemple #12
0
    def piecefuncgen(num):
        Mk = order // 2 - num
        if (Mk < 0):
            return 0  # final function is 0
        coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
                  for k in xrange(Mk + 1)]
        shifts = [-bound - k for k in xrange(Mk + 1)]

        def thefunc(x):
            res = 0.0
            for k in range(Mk + 1):
                res += coeffs[k] * (x + shifts[k]) ** order
            return res
        return thefunc
Exemple #13
0
    def test_nd_simplex(self):
        # simple smoke test: triangulate a n-dimensional simplex
        for nd in xrange(2, 8):
            points = np.zeros((nd+1, nd))
            for j in xrange(nd):
                points[j,j] = 1.0
            points[-1,:] = 1.0

            tri = qhull.Delaunay(points)

            tri.vertices.sort()

            assert_equal(tri.vertices, np.arange(nd+1, dtype=int)[None,:])
            assert_equal(tri.neighbors, -1 + np.zeros((nd+1), dtype=int)[None,:])
Exemple #14
0
def _printresmat(function, interval, resmat):
    # Print the Romberg result matrix.
    i = j = 0
    print('Romberg integration of', repr(function), end=' ')
    print('from', interval)
    print('')
    print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
    for i in xrange(len(resmat)):
        print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
        for j in xrange(i+1):
            print('%9f' % (resmat[i][j]), end=' ')
        print('')
    print('')
    print('The final result is', resmat[i][j], end=' ')
    print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
Exemple #15
0
 def test_improvement(self):
     def myfunc(x):           # Euler's constant integrand
         return -exp(-x)*log(x)
     import time
     start = time.time()
     for i in xrange(20):
         quad(self.lib._multivariate_indefinite, 0, 100)
     fast = time.time() - start
     start = time.time()
     for i in xrange(20):
         quad(myfunc, 0, 100)
     slow = time.time() - start
     # 2+ times faster speeds generated by nontrivial ctypes
     # function (single variable)
     assert_(fast < 0.5*slow, (fast, slow))
Exemple #16
0
 def test_num_obs_linkage_4_and_up(self):
     # Tests num_obs_linkage(Z) on linkage on observation sets between sizes
     # 4 and 15 (step size 3).
     for i in xrange(4, 15, 3):
         y = np.random.rand(i*(i-1)//2)
         Z = linkage(y)
         assert_equal(num_obs_linkage(Z), i)
Exemple #17
0
 def setUp(self):
     self.tck = splrep([0, 1, 2, 3, 4, 5], [0, 10, -1, 3, 7, 2], s=0)
     self.test_xs = np.linspace(-1, 6, 100)
     self.spline_ys = splev(self.test_xs, self.tck)
     self.spline_yps = splev(self.test_xs, self.tck, der=1)
     self.xi = np.unique(self.tck[0])
     self.yi = [[splev(x, self.tck, der=j) for j in xrange(3)] for x in self.xi]
Exemple #18
0
 def test_exponential(self):
     degree = 5
     p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
     for i in xrange(degree + 1):
         assert_almost_equal(p(0), 1)
         p = p.deriv()
     assert_almost_equal(p(0), 0)
Exemple #19
0
 def test_num_obs_linkage_multi_matrix(self):
     # Tests num_obs_linkage with observation matrices of multiple sizes.
     for n in xrange(2, 10):
         X = np.random.rand(n, 4)
         Y = pdist(X)
         Z = linkage(Y)
         assert_equal(num_obs_linkage(Z), n)
Exemple #20
0
 def test_vector(self):
     xs = [0, 1, 2]
     ys = np.array([[0, 1], [1, 0], [2, 1]])
     P = BarycentricInterpolator(xs, ys)
     Pi = [BarycentricInterpolator(xs, ys[:, i]) for i in xrange(ys.shape[1])]
     test_xs = np.linspace(-1, 3, 100)
     assert_almost_equal(P(test_xs), np.rollaxis(np.asarray([p(test_xs) for p in Pi]), -1))
Exemple #21
0
 def test_call(self):
     poly = []
     for n in xrange(5):
         poly.extend([x.strip() for x in
             ("""
             orth.jacobi(%(n)d,0.3,0.9)
             orth.sh_jacobi(%(n)d,0.3,0.9)
             orth.genlaguerre(%(n)d,0.3)
             orth.laguerre(%(n)d)
             orth.hermite(%(n)d)
             orth.hermitenorm(%(n)d)
             orth.gegenbauer(%(n)d,0.3)
             orth.chebyt(%(n)d)
             orth.chebyu(%(n)d)
             orth.chebyc(%(n)d)
             orth.chebys(%(n)d)
             orth.sh_chebyt(%(n)d)
             orth.sh_chebyu(%(n)d)
             orth.legendre(%(n)d)
             orth.sh_legendre(%(n)d)
             """ % dict(n=n)).split()
         ])
     olderr = np.seterr(all='ignore')
     try:
         for pstr in poly:
             p = eval(pstr)
             assert_almost_equal(p(0.315), np.poly1d(p)(0.315), err_msg=pstr)
     finally:
         np.seterr(**olderr)
Exemple #22
0
 def test_is_valid_linkage_4_and_up(self):
     # Tests is_valid_linkage(Z) on linkage on observation sets between
     # sizes 4 and 15 (step size 3).
     for i in xrange(4, 15, 3):
         y = np.random.rand(i*(i-1)//2)
         Z = linkage(y)
         assert_(is_valid_linkage(Z) == True)
Exemple #23
0
    def test_concurrent_ok(self):
        f = lambda t, y: 1.0

        for k in xrange(3):
            for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'):
                r = ode(f).set_integrator(sol)
                r.set_initial_value(0, 0)

                r2 = ode(f).set_integrator(sol)
                r2.set_initial_value(0, 0)

                r.integrate(r.t + 0.1)
                r2.integrate(r2.t + 0.1)
                r2.integrate(r2.t + 0.1)

                assert_allclose(r.y, 0.1)
                assert_allclose(r2.y, 0.2)

            for sol in ('dopri5', 'dop853'):
                r = ode(f).set_integrator(sol)
                r.set_initial_value(0, 0)

                r2 = ode(f).set_integrator(sol)
                r2.set_initial_value(0, 0)

                r.integrate(r.t + 0.1)
                r.integrate(r.t + 0.1)
                r2.integrate(r2.t + 0.1)
                r.integrate(r.t + 0.1)
                r2.integrate(r2.t + 0.1)

                assert_allclose(r.y, 0.3)
                assert_allclose(r2.y, 0.2)
Exemple #24
0
    def _get_row_ranges(self, rows, col_slice):
        """
        Fast path for indexing in the case where column index is slice.

        This gains performance improvement over brute force by more
        efficient skipping of zeros, by accessing the elements
        column-wise in order.

        Parameters
        ----------
        rows : sequence or xrange
            Rows indexed. If xrange, must be within valid bounds.
        col_slice : slice
            Columns indexed

        """
        j_start, j_stop, j_stride = col_slice.indices(self.shape[1])
        col_range = xrange(j_start, j_stop, j_stride)
        nj = len(col_range)
        new = lil_matrix((len(rows), nj), dtype=self.dtype)

        _csparsetools.lil_get_row_ranges(self.shape[0], self.shape[1],
                                         self.rows, self.data,
                                         new.rows, new.data,
                                         rows,
                                         j_start, j_stop, j_stride, nj)

        return new
Exemple #25
0
    def _major_slice(self, idx, copy=False):
        """Index along the major axis where idx is a slice object.
        """
        if idx == slice(None):
            return self.copy() if copy else self

        M, N = self._swap(self.shape)
        start, stop, step = idx.indices(M)
        M = len(xrange(start, stop, step))
        new_shape = self._swap((M, N))
        if M == 0:
            return self.__class__(new_shape)

        row_nnz = np.diff(self.indptr)
        idx_dtype = self.indices.dtype
        res_indptr = np.zeros(M+1, dtype=idx_dtype)
        np.cumsum(row_nnz[idx], out=res_indptr[1:])

        if step == 1:
            all_idx = slice(self.indptr[start], self.indptr[stop])
            res_indices = np.array(self.indices[all_idx], copy=copy)
            res_data = np.array(self.data[all_idx], copy=copy)
        else:
            nnz = res_indptr[-1]
            res_indices = np.empty(nnz, dtype=idx_dtype)
            res_data = np.empty(nnz, dtype=self.dtype)
            csr_row_slice(start, stop, step, self.indptr, self.indices,
                          self.data, res_indices, res_data)

        return self.__class__((res_data, res_indices, res_indptr),
                              shape=new_shape, copy=False)
Exemple #26
0
    def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
        np.random.seed(123)

        N = 7

        def rand(*a):
            q = np.random.rand(*a)
            if complex:
                q = q + 1j*np.random.rand(*a)
            return q

        def assert_close(a, b, msg):
            d = abs(a - b).max()
            f = tol + abs(b).max()*tol
            if d > f:
                raise AssertionError('%s: err %g' % (msg, d))

        self.A = rand(N, N)

        # initialize
        x0 = np.random.rand(N)
        jac = jac_cls(**kw)
        jac.setup(x0, self._func(x0), self._func)

        # check consistency
        for k in xrange(2*N):
            v = rand(N)

            if hasattr(jac, '__array__'):
                Jd = np.array(jac)
                if hasattr(jac, 'solve'):
                    Gv = jac.solve(v)
                    Gv2 = np.linalg.solve(Jd, v)
                    assert_close(Gv, Gv2, 'solve vs array')
                if hasattr(jac, 'rsolve'):
                    Gv = jac.rsolve(v)
                    Gv2 = np.linalg.solve(Jd.T.conj(), v)
                    assert_close(Gv, Gv2, 'rsolve vs array')
                if hasattr(jac, 'matvec'):
                    Jv = jac.matvec(v)
                    Jv2 = np.dot(Jd, v)
                    assert_close(Jv, Jv2, 'dot vs array')
                if hasattr(jac, 'rmatvec'):
                    Jv = jac.rmatvec(v)
                    Jv2 = np.dot(Jd.T.conj(), v)
                    assert_close(Jv, Jv2, 'rmatvec vs array')

            if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
                Jv = jac.matvec(v)
                Jv2 = jac.solve(jac.matvec(Jv))
                assert_close(Jv, Jv2, 'dot vs solve')

            if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
                Jv = jac.rmatvec(v)
                Jv2 = jac.rmatvec(jac.rsolve(Jv))
                assert_close(Jv, Jv2, 'rmatvec vs rsolve')

            x = rand(N)
            jac.update(x, self._func(x))
Exemple #27
0
    def test_more_barycentric_transforms(self):
        # Triangulate some "nasty" grids

        eps = np.finfo(float).eps

        npoints = {2: 70, 3: 11, 4: 5, 5: 3}

        _is_32bit_platform = np.intp(0).itemsize < 8
        for ndim in xrange(2, 6):
            # Generate an uniform grid in n-d unit cube
            x = np.linspace(0, 1, npoints[ndim])
            grid = np.c_[list(map(np.ravel, np.broadcast_arrays(*np.ix_(*([x]*ndim)))))].T

            err_msg = "ndim=%d" % ndim

            # Check using regular grid
            tri = qhull.Delaunay(grid)
            self._check_barycentric_transforms(tri, err_msg=err_msg,
                                               unit_cube=True)

            # Check with eps-perturbations
            np.random.seed(1234)
            m = (np.random.rand(grid.shape[0]) < 0.2)
            grid[m,:] += 2*eps*(np.random.rand(*grid[m,:].shape) - 0.5)

            tri = qhull.Delaunay(grid)
            self._check_barycentric_transforms(tri, err_msg=err_msg,
                                               unit_cube=True,
                                               unit_cube_tol=2*eps)

            # Check with duplicated data
            tri = qhull.Delaunay(np.r_[grid, grid])
            self._check_barycentric_transforms(tri, err_msg=err_msg,
                                               unit_cube=True,
                                               unit_cube_tol=2*eps)

            if not _is_32bit_platform:
                # test numerically unstable, and reported to fail on 32-bit
                # installs

                # Check with larger perturbations
                np.random.seed(4321)
                m = (np.random.rand(grid.shape[0]) < 0.2)
                grid[m,:] += 1000*eps*(np.random.rand(*grid[m,:].shape) - 0.5)

                tri = qhull.Delaunay(grid)
                self._check_barycentric_transforms(tri, err_msg=err_msg,
                                                   unit_cube=True,
                                                   unit_cube_tol=1500*eps)

                # Check with yet larger perturbations
                np.random.seed(4321)
                m = (np.random.rand(grid.shape[0]) < 0.2)
                grid[m,:] += 1e6*eps*(np.random.rand(*grid[m,:].shape) - 0.5)

                tri = qhull.Delaunay(grid)
                self._check_barycentric_transforms(tri, err_msg=err_msg,
                                                   unit_cube=True,
                                                   unit_cube_tol=1e7*eps)
Exemple #28
0
    def test_incremental(self):
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=DeprecationWarning)
            P = PiecewisePolynomial([self.xi[0]], [self.yi[0]], 3)

        for i in xrange(1, len(self.xi)):
            P.append(self.xi[i], self.yi[i], 3)
        assert_almost_equal(P(self.test_xs), self.spline_ys)
 def test_inverse(self):
     for n in xrange(1, 10):
         a = hilbert(n)
         b = invhilbert(n)
         # The Hilbert matrix is increasingly badly conditioned,
         # so take that into account in the test
         c = cond(a)
         assert_allclose(a.dot(b), eye(n), atol=1e-15*c, rtol=1e-15*c)
Exemple #30
0
 def hermite_recursion(n, nodes):
     H = np.zeros((n, nodes.size))
     H[0,:] = np.pi**(-0.25) * np.exp(-0.5*nodes**2)
     if n > 1:
         H[1,:] = sqrt(2.0) * nodes * H[0,:]
         for k in xrange(2, n):
             H[k,:] = sqrt(2.0/k) * nodes * H[k-1,:] - sqrt((k-1.0)/k) * H[k-2,:]
     return H
Exemple #31
0
    def solve(self):
        """
        Runs the DifferentialEvolutionSolver.

        Returns
        -------
        res : OptimizeResult
            The optimization result represented as a ``OptimizeResult`` object.
            Important attributes are: ``x`` the solution array, ``success`` a
            Boolean flag indicating if the optimizer exited successfully and
            ``message`` which describes the cause of the termination. See
            `OptimizeResult` for a description of other attributes.  If `polish`
            was employed, and a lower minimum was obtained by the polishing,
            then OptimizeResult also contains the ``jac`` attribute.
        """
        nit, warning_flag = 0, False

        # dictionary that holds standard status messages of optimizers
        status_message = _status_message['success']

        # The population may have just been initialized (all entries are
        # np.inf). If it has you have to calculate the initial energies.
        # Although this is also done in the evolve generator it's possible
        # that someone can set maxiter=0, at which point we still want the
        # initial energies to be calculated (the following loop isn't run).

        #np.all checks that there are no 0's in the array
        if self.maxiter == 0:
            if np.all(np.isinf(self.population_energies)):
                if self.disp:
                    print("Calculating initial energies when maxiter = 0")
                self._calculate_population_energies()

#        for i in range(self.num_population_members):
#            print(self.population[i,:])
# do the optimisation.
        for nit in xrange(1, self.maxiter + 1):
            if self.disp:
                print("iter: ", nit)
            # evolve the population by a generation
            try:
                next(self)
            except StopIteration:
                warning_flag = True
                status_message = _status_message['maxfev']
                break

            print("differential_evolution step %d: f(x)= %g" %
                  (nit, self.population_energies[0]))

            #save populations at each iter and rank to analyze after
            #            np.save("before_rank"+str(self.rank)+"iter"+str(nit), self.population)

            #migrate
            self.migration()

            #            np.save("after_rank"+str(self.rank)+"iter"+str(nit), self.population)

            # should the solver terminate?
            #            print("Checking if should converge")
            #            convergence = self.convergence
            #
            #            if (self.callback and
            #                    self.callback(self._scale_parameters(self.population[0]),
            #                                  convergence=self.tol / convergence) is True):
            #
            #                warning_flag = True
            #                status_message = ('callback function requested stop early '
            #                                  'by returning True')
            #                break
            #            print("checking if tolerance level reached")
            ##            intol = (np.std(self.population_energies) <=
            ##                     self.atol +
            ##                     self.tol * np.abs(np.mean(self.population_energies)))
            #
            #            intol = self.population_energies[0] <= self.mse_thresh
            #            if warning_flag or intol:
            #                print("stopping iterations")
            #                break
            print("Starting next iter")

        else:
            status_message = _status_message['maxiter']
            warning_flag = True

        DE_result = OptimizeResult(x=self.x,
                                   fun=self.population_energies[0],
                                   nfev=self._nfev,
                                   nit=nit,
                                   message=status_message,
                                   success=(warning_flag is not True))

        print("done iters")
        if self.polish:
            print("performing final polishing")
            result = minimize(self.func,
                              np.copy(DE_result.x),
                              method='L-BFGS-B',
                              bounds=self.limits.T,
                              args=self.args)

            self._nfev += result.nfev
            DE_result.nfev = self._nfev

            if result.fun < DE_result.fun:
                DE_result.fun = result.fun
                DE_result.x = result.x
                DE_result.jac = result.jac
                # to keep internal state consistent
                self.population_energies[0] = result.fun
                self.population[0] = self._unscale_parameters(result.x)

        return DE_result
def create_individual(data):
    return [random.randint(0, 1) for _ in xrange(len(data))]
 def _get_sliceXslice(self, row, col):
     row = xrange(*row.indices(self.shape[0]))
     return self._get_row_ranges(row, col)
Exemple #34
0
def romb(y, dx=1.0, axis=-1, show=False):
    """
    Romberg integration using samples of a function.

    Parameters
    ----------
    y : array_like
        A vector of ``2**k + 1`` equally-spaced samples of a function.
    dx : float, optional
        The sample spacing. Default is 1.
    axis : int, optional
        The axis along which to integrate. Default is -1 (last axis).
    show : bool, optional
        When `y` is a single 1-D array, then if this argument is True
        print the table showing Richardson extrapolation from the
        samples. Default is False.

    Returns
    -------
    romb : ndarray
        The integrated result for `axis`.

    See also
    --------
    quad : adaptive quadrature using QUADPACK
    romberg : adaptive Romberg quadrature
    quadrature : adaptive Gaussian quadrature
    fixed_quad : fixed-order Gaussian quadrature
    dblquad : double integrals
    tplquad : triple integrals
    simps : integrators for sampled data
    cumtrapz : cumulative integration for sampled data
    ode : ODE integrators
    odeint : ODE integrators

    Examples
    --------
    >>> from scipy import integrate
    >>> x = np.arange(10, 14.25, 0.25)
    >>> y = np.arange(3, 12)

    >>> integrate.romb(y)
    56.0

    >>> y = np.sin(np.power(x, 2.5))
    >>> integrate.romb(y)
    -0.742561336672229

    >>> integrate.romb(y, show=True)
    Richardson Extrapolation Table for Romberg Integration
    ====================================================================
    -0.81576
    4.63862  6.45674
    -1.10581 -3.02062 -3.65245
    -2.57379 -3.06311 -3.06595 -3.05664
    -1.34093 -0.92997 -0.78776 -0.75160 -0.74256
    ====================================================================
    -0.742561336672229
    """
    y = np.asarray(y)
    nd = len(y.shape)
    Nsamps = y.shape[axis]
    Ninterv = Nsamps - 1
    n = 1
    k = 0
    while n < Ninterv:
        n <<= 1
        k += 1
    if n != Ninterv:
        raise ValueError("Number of samples must be one plus a "
                         "non-negative power of 2.")

    R = {}
    slice_all = (slice(None), ) * nd
    slice0 = tupleset(slice_all, axis, 0)
    slicem1 = tupleset(slice_all, axis, -1)
    h = Ninterv * np.asarray(dx, dtype=float)
    R[(0, 0)] = (y[slice0] + y[slicem1]) / 2.0 * h
    slice_R = slice_all
    start = stop = step = Ninterv
    for i in xrange(1, k + 1):
        start >>= 1
        slice_R = tupleset(slice_R, axis, slice(start, stop, step))
        step >>= 1
        R[(i, 0)] = 0.5 * (R[(i - 1, 0)] + h * y[slice_R].sum(axis=axis))
        for j in xrange(1, i + 1):
            prev = R[(i, j - 1)]
            R[(i, j)] = prev + (prev - R[(i - 1, j - 1)]) / ((1 <<
                                                              (2 * j)) - 1)
        h /= 2.0

    if show:
        if not np.isscalar(R[(0, 0)]):
            print("*** Printing table only supported for integrals" +
                  " of a single data set.")
        else:
            try:
                precis = show[0]
            except (TypeError, IndexError):
                precis = 5
            try:
                width = show[1]
            except (TypeError, IndexError):
                width = 8
            formstr = "%%%d.%df" % (width, precis)

            title = "Richardson Extrapolation Table for Romberg Integration"
            print("", title.center(68), "=" * 68, sep="\n", end="\n")
            for i in xrange(k + 1):
                for j in xrange(i + 1):
                    print(formstr % R[(i, j)], end=" ")
                print()
            print("=" * 68)
            print()

    return R[(k, k)]
Exemple #35
0
def quadrature(func,
               a,
               b,
               args=(),
               tol=1.49e-8,
               rtol=1.49e-8,
               maxiter=50,
               vec_func=True,
               miniter=1):
    """
    Compute a definite integral using fixed-tolerance Gaussian quadrature.

    Integrate `func` from `a` to `b` using Gaussian quadrature
    with absolute tolerance `tol`.

    Parameters
    ----------
    func : function
        A Python function or method to integrate.
    a : float
        Lower limit of integration.
    b : float
        Upper limit of integration.
    args : tuple, optional
        Extra arguments to pass to function.
    tol, rtol : float, optional
        Iteration stops when error between last two iterates is less than
        `tol` OR the relative change is less than `rtol`.
    maxiter : int, optional
        Maximum order of Gaussian quadrature.
    vec_func : bool, optional
        True or False if func handles arrays as arguments (is
        a "vector" function). Default is True.
    miniter : int, optional
        Minimum order of Gaussian quadrature.

    Returns
    -------
    val : float
        Gaussian quadrature approximation (within tolerance) to integral.
    err : float
        Difference between last two estimates of the integral.

    See also
    --------
    romberg: adaptive Romberg quadrature
    fixed_quad: fixed-order Gaussian quadrature
    quad: adaptive quadrature using QUADPACK
    dblquad: double integrals
    tplquad: triple integrals
    romb: integrator for sampled data
    simps: integrator for sampled data
    cumtrapz: cumulative integration for sampled data
    ode: ODE integrator
    odeint: ODE integrator

    Examples
    --------
    >>> from scipy import integrate
    >>> f = lambda x: x**8
    >>> integrate.quadrature(f, 0.0, 1.0)
    (0.11111111111111106, 4.163336342344337e-17)
    >>> print(1/9.0)  # analytical result
    0.1111111111111111

    >>> integrate.quadrature(np.cos, 0.0, np.pi/2)
    (0.9999999999999536, 3.9611425250996035e-11)
    >>> np.sin(np.pi/2)-np.sin(0)  # analytical result
    1.0

    """
    if not isinstance(args, tuple):
        args = (args, )
    vfunc = vectorize1(func, args, vec_func=vec_func)
    val = np.inf
    err = np.inf
    maxiter = max(miniter + 1, maxiter)
    for n in xrange(miniter, maxiter + 1):
        newval = fixed_quad(vfunc, a, b, (), n)[0]
        err = abs(newval - val)
        val = newval

        if err < tol or err < rtol * abs(val):
            break
    else:
        warnings.warn(
            "maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
            AccuracyWarning)
    return val, err
Exemple #36
0
def scalar_search_wolfe2(phi, derphi=None, phi0=None,
                         old_phi0=None, derphi0=None,
                         c1=1e-4, c2=0.9, amax=None,
                         extra_condition=None, maxiter=10):
    """Find alpha that satisfies strong Wolfe conditions.

    alpha > 0 is assumed to be a descent direction.

    Parameters
    ----------
    phi : callable f(x)
        Objective scalar function.
    derphi : callable f'(x), optional
        Objective function derivative (can be None)
    phi0 : float, optional
        Value of phi at s=0
    old_phi0 : float, optional
        Value of phi at previous point
    derphi0 : float, optional
        Value of derphi at s=0
    c1 : float, optional
        Parameter for Armijo condition rule.
    c2 : float, optional
        Parameter for curvature condition rule.
    amax : float, optional
        Maximum step size
    extra_condition : callable, optional
        A callable of the form ``extra_condition(alpha, phi_value)``
        returning a boolean. The line search accepts the value
        of ``alpha`` only if this callable returns ``True``.
        If the callable returns ``False`` for the step length,
        the algorithm will continue with new iterates.
        The callable is only called for iterates satisfying
        the strong Wolfe conditions.
    maxiter : int, optional
        Maximum number of iterations to perform

    Returns
    -------
    alpha_star : float or None
        Best alpha, or None if the line search algorithm did not converge.
    phi_star : float
        phi at alpha_star
    phi0 : float
        phi at 0
    derphi_star : float or None
        derphi at alpha_star, or None if the line search algorithm
        did not converge.

    Notes
    -----
    Uses the line search algorithm to enforce strong Wolfe
    conditions.  See Wright and Nocedal, 'Numerical Optimization',
    1999, pg. 59-60.

    For the zoom phase it uses an algorithm by [...].

    """

    if phi0 is None:
        phi0 = phi(0.)

    if derphi0 is None and derphi is not None:
        derphi0 = derphi(0.)

    alpha0 = 0
    if old_phi0 is not None and derphi0 != 0:
        alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
    else:
        alpha1 = 1.0

    if alpha1 < 0:
        alpha1 = 1.0

    phi_a1 = phi(alpha1)
    #derphi_a1 = derphi(alpha1)  evaluated below

    phi_a0 = phi0
    derphi_a0 = derphi0

    if extra_condition is None:
        extra_condition = lambda alpha, phi: True

    for i in xrange(maxiter):
        if alpha1 == 0 or (amax is not None and alpha0 == amax):
            # alpha1 == 0: This shouldn't happen. Perhaps the increment has
            # slipped below machine precision?
            alpha_star = None
            phi_star = phi0
            phi0 = old_phi0
            derphi_star = None

            if alpha1 == 0:
                msg = 'Rounding errors prevent the line search from converging'
            else:
                msg = "The line search algorithm could not find a solution " + \
                      "less than or equal to amax: %s" % amax

            warn(msg, LineSearchWarning)
            break

        if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or \
           ((phi_a1 >= phi_a0) and (i > 1)):
            alpha_star, phi_star, derphi_star = \
                        _zoom(alpha0, alpha1, phi_a0,
                              phi_a1, derphi_a0, phi, derphi,
                              phi0, derphi0, c1, c2, extra_condition)
            break

        derphi_a1 = derphi(alpha1)
        if (abs(derphi_a1) <= -c2*derphi0):
            if extra_condition(alpha1, phi_a1):
                alpha_star = alpha1
                phi_star = phi_a1
                derphi_star = derphi_a1
                break

        if (derphi_a1 >= 0):
            alpha_star, phi_star, derphi_star = \
                        _zoom(alpha1, alpha0, phi_a1,
                              phi_a0, derphi_a1, phi, derphi,
                              phi0, derphi0, c1, c2, extra_condition)
            break

        alpha2 = 2 * alpha1  # increase by factor of two on each iteration
        if amax is not None:
            alpha2 = min(alpha2, amax)
        alpha0 = alpha1
        alpha1 = alpha2
        phi_a0 = phi_a1
        phi_a1 = phi(alpha1)
        derphi_a0 = derphi_a1

    else:
        # stopping test maxiter reached
        alpha_star = alpha1
        phi_star = phi_a1
        derphi_star = None
        warn('The line search algorithm did not converge', LineSearchWarning)

    return alpha_star, phi_star, phi0, derphi_star
def nonlin_solve(F,
                 x0,
                 jacobian='krylov',
                 iter=None,
                 verbose=False,
                 maxiter=None,
                 f_tol=None,
                 f_rtol=None,
                 x_tol=None,
                 x_rtol=None,
                 tol_norm=None,
                 line_search='armijo',
                 callback=None,
                 full_output=False,
                 raise_exception=True):
    """
    Find a root of a function, in a way suitable for large-scale problems.

    Parameters
    ----------
    %(params_basic)s
    jacobian : Jacobian
        A Jacobian approximation: `Jacobian` object or something that
        `asjacobian` can transform to one. Alternatively, a string specifying
        which of the builtin Jacobian approximations to use:

            krylov, broyden1, broyden2, anderson
            diagbroyden, linearmixing, excitingmixing

    %(params_extra)s
    full_output : bool
        If true, returns a dictionary `info` containing convergence
        information.
    raise_exception : bool
        If True, a `NoConvergence` exception is raise if no solution is found.

    See Also
    --------
    asjacobian, Jacobian

    Notes
    -----
    This algorithm implements the inexact Newton method, with
    backtracking or full line searches. Several Jacobian
    approximations are available, including Krylov and Quasi-Newton
    methods.

    References
    ----------
    .. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
       Equations\". Society for Industrial and Applied Mathematics. (1995)
       https://archive.siam.org/books/kelley/fr16/

    """
    # Can't use default parameters because it's being explicitly passed as None
    # from the calling function, so we need to set it here.
    tol_norm = maxnorm if tol_norm is None else tol_norm
    condition = TerminationCondition(f_tol=f_tol,
                                     f_rtol=f_rtol,
                                     x_tol=x_tol,
                                     x_rtol=x_rtol,
                                     iter=iter,
                                     norm=tol_norm)

    x0 = _as_inexact(x0)
    func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
    x = x0.flatten()

    dx = np.inf
    Fx = func(x)
    Fx_norm = norm(Fx)

    jacobian = asjacobian(jacobian)
    jacobian.setup(x.copy(), Fx, func)

    if maxiter is None:
        if iter is not None:
            maxiter = iter + 1
        else:
            maxiter = 100 * (x.size + 1)

    if line_search is True:
        line_search = 'armijo'
    elif line_search is False:
        line_search = None

    if line_search not in (None, 'armijo', 'wolfe'):
        raise ValueError("Invalid line search")

    # Solver tolerance selection
    gamma = 0.9
    eta_max = 0.9999
    eta_treshold = 0.1
    eta = 1e-3

    for n in xrange(maxiter):
        status = condition.check(Fx, x, dx)
        if status:
            break

        # The tolerance, as computed for scipy.sparse.linalg.* routines
        tol = min(eta, eta * Fx_norm)
        dx = -jacobian.solve(Fx, tol=tol)

        if norm(dx) == 0:
            raise ValueError("Jacobian inversion yielded zero vector. "
                             "This indicates a bug in the Jacobian "
                             "approximation.")

        # Line search, or Newton step
        if line_search:
            s, x, Fx, Fx_norm_new = _nonlin_line_search(
                func, x, Fx, dx, line_search)
        else:
            s = 1.0
            x = x + dx
            Fx = func(x)
            Fx_norm_new = norm(Fx)

        jacobian.update(x.copy(), Fx)

        if callback:
            callback(x, Fx)

        # Adjust forcing parameters for inexact methods
        eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
        if gamma * eta**2 < eta_treshold:
            eta = min(eta_max, eta_A)
        else:
            eta = min(eta_max, max(eta_A, gamma * eta**2))

        Fx_norm = Fx_norm_new

        # Print status
        if verbose:
            sys.stdout.write("%d:  |F(x)| = %g; step %g\n" %
                             (n, tol_norm(Fx), s))
            sys.stdout.flush()
    else:
        if raise_exception:
            raise NoConvergence(_array_like(x, x0))
        else:
            status = 2

    if full_output:
        info = {
            'nit': condition.iteration,
            'fun': Fx,
            'status': status,
            'success': status == 1,
            'message': {
                1: 'A solution was found at the specified '
                'tolerance.',
                2: 'The maximum number of iterations allowed '
                'has been reached.'
            }[status]
        }
        return _array_like(x, x0), info
    else:
        return _array_like(x, x0)
Exemple #38
0
    def test_more_barycentric_transforms(self):
        # Triangulate some "nasty" grids

        eps = np.finfo(float).eps

        npoints = {2: 70, 3: 11, 4: 5, 5: 3}

        _is_32bit_platform = np.intp(0).itemsize < 8
        for ndim in xrange(2, 6):
            # Generate an uniform grid in n-d unit cube
            x = np.linspace(0, 1, npoints[ndim])
            grid = np.c_[list(
                map(np.ravel, np.broadcast_arrays(*np.ix_(*([x] * ndim)))))].T

            err_msg = "ndim=%d" % ndim

            # Check using regular grid
            tri = qhull.Delaunay(grid)
            self._check_barycentric_transforms(tri,
                                               err_msg=err_msg,
                                               unit_cube=True)

            # Check with eps-perturbations
            np.random.seed(1234)
            m = (np.random.rand(grid.shape[0]) < 0.2)
            grid[m, :] += 2 * eps * (np.random.rand(*grid[m, :].shape) - 0.5)

            tri = qhull.Delaunay(grid)
            self._check_barycentric_transforms(tri,
                                               err_msg=err_msg,
                                               unit_cube=True,
                                               unit_cube_tol=2 * eps)

            # Check with duplicated data
            tri = qhull.Delaunay(np.r_[grid, grid])
            self._check_barycentric_transforms(tri,
                                               err_msg=err_msg,
                                               unit_cube=True,
                                               unit_cube_tol=2 * eps)

            if not _is_32bit_platform:
                # test numerically unstable, and reported to fail on 32-bit
                # installs

                # Check with larger perturbations
                np.random.seed(4321)
                m = (np.random.rand(grid.shape[0]) < 0.2)
                grid[m, :] += 1000 * eps * (np.random.rand(*grid[m, :].shape) -
                                            0.5)

                tri = qhull.Delaunay(grid)
                self._check_barycentric_transforms(tri,
                                                   err_msg=err_msg,
                                                   unit_cube=True,
                                                   unit_cube_tol=1500 * eps)

                # Check with yet larger perturbations
                np.random.seed(4321)
                m = (np.random.rand(grid.shape[0]) < 0.2)
                grid[m, :] += 1e6 * eps * (np.random.rand(*grid[m, :].shape) -
                                           0.5)

                tri = qhull.Delaunay(grid)
                self._check_barycentric_transforms(tri,
                                                   err_msg=err_msg,
                                                   unit_cube=True,
                                                   unit_cube_tol=1e7 * eps)
Exemple #39
0
def random(m, n, density=0.01, format='coo', dtype=None,
           random_state=None, data_rvs=None):
    """Generate a sparse matrix of the given shape and density with randomly
    distributed values.

    Parameters
    ----------
    m, n : int
        shape of the matrix
    density : real, optional
        density of the generated matrix: density equal to one means a full
        matrix, density of 0 means a matrix with no non-zero items.
    format : str, optional
        sparse matrix format.
    dtype : dtype, optional
        type of the returned matrix values.
    random_state : {numpy.random.RandomState, int}, optional
        Random number generator or random seed. If not given, the singleton
        numpy.random will be used.  This random state will be used
        for sampling the sparsity structure, but not necessarily for sampling
        the values of the structurally nonzero entries of the matrix.
    data_rvs : callable, optional
        Samples a requested number of random values.
        This function should take a single argument specifying the length
        of the ndarray that it will return.  The structurally nonzero entries
        of the sparse random matrix will be taken from the array sampled
        by this function.  By default, uniform [0, 1) random values will be
        sampled using the same random state as is used for sampling
        the sparsity structure.

    Examples
    --------
    >>> from scipy.sparse import construct
    >>> from scipy import stats
    >>> class CustomRandomState(object):
        ...     def randint(self, k):
        ...         i = np.random.randint(k)
        ...         return i - i % 2
    >>> rs = CustomRandomState()
    >>> rvs = stats.poisson(25, loc=10).rvs
    >>> S = construct.random(3, 4, density=0.25, random_state=rs, data_rvs=rvs)
    >>> S.A
    array([[ 36.,   0.,  33.,   0.],
           [  0.,   0.,   0.,   0.],
           [  0.,   0.,  36.,   0.]])

    Notes
    -----
    Only float types are supported for now.
    """
    if density < 0 or density > 1:
        raise ValueError("density expected to be 0 <= density <= 1")
    if dtype and (dtype not in [np.float32, np.float64, np.longdouble]):
        raise NotImplementedError("type %s not supported" % dtype)

    mn = m * n

    tp = np.intc
    if mn > np.iinfo(tp).max:
        tp = np.int64

    if mn > np.iinfo(tp).max:
        msg = """\
Trying to generate a random sparse matrix such as the product of dimensions is
greater than %d - this is not supported on this machine
"""
        raise ValueError(msg % np.iinfo(tp).max)

    # Number of non zero values
    k = int(density * m * n)

    if random_state is None:
        random_state = np.random
    elif isinstance(random_state, (int, np.integer)):
        random_state = np.random.RandomState(random_state)
    if data_rvs is None:
        data_rvs = random_state.rand

    # Use the algorithm from python's random.sample for k < mn/3.
    if mn < 3*k:
        # We should use this line, but choice is only available in numpy >= 1.7
        # ind = random_state.choice(mn, size=k, replace=False)
        ind = random_state.permutation(mn)[:k]
    else:
        ind = np.empty(k, dtype=tp)
        selected = set()
        for i in xrange(k):
            j = random_state.randint(mn)
            while j in selected:
                j = random_state.randint(mn)
            selected.add(j)
            ind[i] = j

    j = np.floor(ind * 1. / m).astype(tp)
    i = (ind - j * m).astype(tp)
    vals = data_rvs(k).astype(dtype)
    return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format)
Exemple #40
0
def gcrotmk(A,
            b,
            x0=None,
            tol=1e-5,
            maxiter=1000,
            M=None,
            callback=None,
            m=20,
            k=None,
            CU=None,
            discard_C=False,
            truncate='oldest',
            atol=None):
    """
    Solve a matrix equation using flexible GCROT(m,k) algorithm.

    Parameters
    ----------
    A : {sparse matrix, dense matrix, LinearOperator}
        The real or complex N-by-N matrix of the linear system.
    b : {array, matrix}
        Right hand side of the linear system. Has shape (N,) or (N,1).
    x0  : {array, matrix}
        Starting guess for the solution.
    tol, atol : float, optional
        Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
        The default for ``atol`` is `tol`.

        .. warning::

           The default value for `atol` will be changed in a future release.
           For future compatibility, specify `atol` explicitly.
    maxiter : int, optional
        Maximum number of iterations.  Iteration will stop after maxiter
        steps even if the specified tolerance has not been achieved.
    M : {sparse matrix, dense matrix, LinearOperator}, optional
        Preconditioner for A.  The preconditioner should approximate the
        inverse of A. gcrotmk is a 'flexible' algorithm and the preconditioner
        can vary from iteration to iteration. Effective preconditioning
        dramatically improves the rate of convergence, which implies that
        fewer iterations are needed to reach a given error tolerance.
    callback : function, optional
        User-supplied function to call after each iteration.  It is called
        as callback(xk), where xk is the current solution vector.
    m : int, optional
        Number of inner FGMRES iterations per each outer iteration.
        Default: 20
    k : int, optional
        Number of vectors to carry between inner FGMRES iterations.
        According to [2]_, good values are around m.
        Default: m
    CU : list of tuples, optional
        List of tuples ``(c, u)`` which contain the columns of the matrices
        C and U in the GCROT(m,k) algorithm. For details, see [2]_.
        The list given and vectors contained in it are modified in-place.
        If not given, start from empty matrices. The ``c`` elements in the
        tuples can be ``None``, in which case the vectors are recomputed
        via ``c = A u`` on start and orthogonalized as described in [3]_.
    discard_C : bool, optional
        Discard the C-vectors at the end. Useful if recycling Krylov subspaces
        for different linear systems.
    truncate : {'oldest', 'smallest'}, optional
        Truncation scheme to use. Drop: oldest vectors, or vectors with
        smallest singular values using the scheme discussed in [1,2].
        See [2]_ for detailed comparison.
        Default: 'oldest'

    Returns
    -------
    x : array or matrix
        The solution found.
    info : int
        Provides convergence information:

        * 0  : successful exit
        * >0 : convergence to tolerance not achieved, number of iterations

    References
    ----------
    .. [1] E. de Sturler, ''Truncation strategies for optimal Krylov subspace
           methods'', SIAM J. Numer. Anal. 36, 864 (1999).
    .. [2] J.E. Hicken and D.W. Zingg, ''A simplified and flexible variant
           of GCROT for solving nonsymmetric linear systems'',
           SIAM J. Sci. Comput. 32, 172 (2010).
    .. [3] M.L. Parks, E. de Sturler, G. Mackey, D.D. Johnson, S. Maiti,
           ''Recycling Krylov subspaces for sequences of linear systems'',
           SIAM J. Sci. Comput. 28, 1651 (2006).

    """
    A, M, x, b, postprocess = make_system(A, M, x0, b)

    if not np.isfinite(b).all():
        raise ValueError("RHS must contain only finite numbers")

    if truncate not in ('oldest', 'smallest'):
        raise ValueError("Invalid value for 'truncate': %r" % (truncate, ))

    if atol is None:
        warnings.warn(
            "scipy.sparse.linalg.gcrotmk called without specifying `atol`. "
            "The default value will change in the future. To preserve "
            "current behavior, set ``atol=tol``.",
            category=DeprecationWarning,
            stacklevel=2)
        atol = tol

    matvec = A.matvec
    psolve = M.matvec

    if CU is None:
        CU = []

    if k is None:
        k = m

    axpy, dot, scal = None, None, None

    r = b - matvec(x)

    axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'],
                                           (x, r))

    b_norm = nrm2(b)

    if discard_C:
        CU[:] = [(None, u) for c, u in CU]

    # Reorthogonalize old vectors
    if CU:
        # Sort already existing vectors to the front
        CU.sort(key=lambda cu: cu[0] is not None)

        # Fill-in missing ones
        C = np.empty((A.shape[0], len(CU)), dtype=r.dtype, order='F')
        us = []
        j = 0
        while CU:
            # More memory-efficient: throw away old vectors as we go
            c, u = CU.pop(0)
            if c is None:
                c = matvec(u)
            C[:, j] = c
            j += 1
            us.append(u)

        # Orthogonalize
        Q, R, P = qr(C, overwrite_a=True, mode='economic', pivoting=True)
        del C

        # C := Q
        cs = list(Q.T)

        # U := U P R^-1,  back-substitution
        new_us = []
        for j in xrange(len(cs)):
            u = us[P[j]]
            for i in xrange(j):
                u = axpy(us[P[i]], u, u.shape[0], -R[i, j])
            if abs(R[j, j]) < 1e-12 * abs(R[0, 0]):
                # discard rest of the vectors
                break
            u = scal(1.0 / R[j, j], u)
            new_us.append(u)

        # Form the new CU lists
        CU[:] = list(zip(cs, new_us))[::-1]

    if CU:
        axpy, dot = get_blas_funcs(['axpy', 'dot'], (r, ))

        # Solve first the projection operation with respect to the CU
        # vectors. This corresponds to modifying the initial guess to
        # be
        #
        #     x' = x + U y
        #     y = argmin_y || b - A (x + U y) ||^2
        #
        # The solution is y = C^H (b - A x)
        for c, u in CU:
            yc = dot(c, r)
            x = axpy(u, x, x.shape[0], yc)
            r = axpy(c, r, r.shape[0], -yc)

    # GCROT main iteration
    for j_outer in xrange(maxiter):
        # -- callback
        if callback is not None:
            callback(x)

        beta = nrm2(r)

        # -- check stopping condition
        beta_tol = max(atol, tol * b_norm)

        if beta <= beta_tol and (j_outer > 0 or CU):
            # recompute residual to avoid rounding error
            r = b - matvec(x)
            beta = nrm2(r)

        if beta <= beta_tol:
            j_outer = -1
            break

        ml = m + max(k - len(CU), 0)

        cs = [c for c, u in CU]

        try:
            Q, R, B, vs, zs, y, pres = _fgmres(matvec,
                                               r / beta,
                                               ml,
                                               rpsolve=psolve,
                                               atol=max(atol, tol * b_norm) /
                                               beta,
                                               cs=cs)
            y *= beta
        except LinAlgError:
            # Floating point over/underflow, non-finite result from
            # matmul etc. -- report failure.
            break

        #
        # At this point,
        #
        #     [A U, A Z] = [C, V] G;   G =  [ I  B ]
        #                                   [ 0  H ]
        #
        # where [C, V] has orthonormal columns, and r = beta v_0. Moreover,
        #
        #     || b - A (x + Z y + U q) ||_2 = || r - C B y - V H y - C q ||_2 = min!
        #
        # from which y = argmin_y || beta e_1 - H y ||_2, and q = -B y
        #

        #
        # GCROT(m,k) update
        #

        # Define new outer vectors

        # ux := (Z - U B) y
        ux = zs[0] * y[0]
        for z, yc in zip(zs[1:], y[1:]):
            ux = axpy(z, ux, ux.shape[0], yc)  # ux += z*yc
        by = B.dot(y)
        for cu, byc in zip(CU, by):
            c, u = cu
            ux = axpy(u, ux, ux.shape[0], -byc)  # ux -= u*byc

        # cx := V H y
        hy = Q.dot(R.dot(y))
        cx = vs[0] * hy[0]
        for v, hyc in zip(vs[1:], hy[1:]):
            cx = axpy(v, cx, cx.shape[0], hyc)  # cx += v*hyc

        # Normalize cx, maintaining cx = A ux
        # This new cx is orthogonal to the previous C, by construction
        try:
            alpha = 1 / nrm2(cx)
            if not np.isfinite(alpha):
                raise FloatingPointError()
        except (FloatingPointError, ZeroDivisionError):
            # Cannot update, so skip it
            continue

        cx = scal(alpha, cx)
        ux = scal(alpha, ux)

        # Update residual and solution
        gamma = dot(cx, r)
        r = axpy(cx, r, r.shape[0], -gamma)  # r -= gamma*cx
        x = axpy(ux, x, x.shape[0], gamma)  # x += gamma*ux

        # Truncate CU
        if truncate == 'oldest':
            while len(CU) >= k and CU:
                del CU[0]
        elif truncate == 'smallest':
            if len(CU) >= k and CU:
                # cf. [1,2]
                D = solve(R[:-1, :].T, B.T).T
                W, sigma, V = svd(D)

                # C := C W[:,:k-1],  U := U W[:,:k-1]
                new_CU = []
                for j, w in enumerate(W[:, :k - 1].T):
                    c, u = CU[0]
                    c = c * w[0]
                    u = u * w[0]
                    for cup, wp in zip(CU[1:], w[1:]):
                        cp, up = cup
                        c = axpy(cp, c, c.shape[0], wp)
                        u = axpy(up, u, u.shape[0], wp)

                    # Reorthogonalize at the same time; not necessary
                    # in exact arithmetic, but floating point error
                    # tends to accumulate here
                    for cp, up in new_CU:
                        alpha = dot(cp, c)
                        c = axpy(cp, c, c.shape[0], -alpha)
                        u = axpy(up, u, u.shape[0], -alpha)
                    alpha = nrm2(c)
                    c = scal(1.0 / alpha, c)
                    u = scal(1.0 / alpha, u)

                    new_CU.append((c, u))
                CU[:] = new_CU

        # Add new vector to CU
        CU.append((cx, ux))

    # Include the solution vector to the span
    CU.append((None, x.copy()))
    if discard_C:
        CU[:] = [(None, uz) for cz, uz in CU]

    return postprocess(x), j_outer + 1
def invhilbert(n, exact=False):
    """
    Compute the inverse of the Hilbert matrix of order `n`.

    The entries in the inverse of a Hilbert matrix are integers.  When `n`
    is greater than 14, some entries in the inverse exceed the upper limit
    of 64 bit integers.  The `exact` argument provides two options for
    dealing with these large integers.

    Parameters
    ----------
    n : int
        The order of the Hilbert matrix.
    exact : bool, optional
        If False, the data type of the array that is returned is np.float64,
        and the array is an approximation of the inverse.
        If True, the array is the exact integer inverse array.  To represent
        the exact inverse when n > 14, the returned array is an object array
        of long integers.  For n <= 14, the exact inverse is returned as an
        array with data type np.int64.

    Returns
    -------
    invh : (n, n) ndarray
        The data type of the array is np.float64 if `exact` is False.
        If `exact` is True, the data type is either np.int64 (for n <= 14)
        or object (for n > 14).  In the latter case, the objects in the
        array will be long integers.

    See Also
    --------
    hilbert : Create a Hilbert matrix.

    Notes
    -----
    .. versionadded:: 0.10.0

    Examples
    --------
    >>> from scipy.linalg import invhilbert
    >>> invhilbert(4)
    array([[   16.,  -120.,   240.,  -140.],
           [ -120.,  1200., -2700.,  1680.],
           [  240., -2700.,  6480., -4200.],
           [ -140.,  1680., -4200.,  2800.]])
    >>> invhilbert(4, exact=True)
    array([[   16,  -120,   240,  -140],
           [ -120,  1200, -2700,  1680],
           [  240, -2700,  6480, -4200],
           [ -140,  1680, -4200,  2800]], dtype=int64)
    >>> invhilbert(16)[7,7]
    4.2475099528537506e+19
    >>> invhilbert(16, exact=True)[7,7]
    42475099528537378560L

    """
    from scipy.special import comb
    if exact:
        if n > 14:
            dtype = object
        else:
            dtype = np.int64
    else:
        dtype = np.float64
    invh = np.empty((n, n), dtype=dtype)
    for i in xrange(n):
        for j in xrange(0, i + 1):
            s = i + j
            invh[i, j] = ((-1)**s * (s + 1) * comb(n + i, n - j - 1, exact) *
                          comb(n + j, n - i - 1, exact) * comb(s, i, exact)**2)
            if i != j:
                invh[j, i] = invh[i, j]
    return invh
Exemple #42
0
def lobpcg(A,
           X,
           B=None,
           M=None,
           Y=None,
           tol=None,
           maxiter=20,
           largest=True,
           verbosityLevel=0,
           retLambdaHistory=False,
           retResidualNormsHistory=False):
    """Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)

    LOBPCG is a preconditioned eigensolver for large symmetric positive
    definite (SPD) generalized eigenproblems.

    Parameters
    ----------
    A : {sparse matrix, dense matrix, LinearOperator}
        The symmetric linear operator of the problem, usually a
        sparse matrix.  Often called the "stiffness matrix".
    X : array_like
        Initial approximation to the k eigenvectors. If A has
        shape=(n,n) then X should have shape shape=(n,k).
    B : {dense matrix, sparse matrix, LinearOperator}, optional
        the right hand side operator in a generalized eigenproblem.
        by default, B = Identity
        often called the "mass matrix"
    M : {dense matrix, sparse matrix, LinearOperator}, optional
        preconditioner to A; by default M = Identity
        M should approximate the inverse of A
    Y : array_like, optional
        n-by-sizeY matrix of constraints, sizeY < n
        The iterations will be performed in the B-orthogonal complement
        of the column-space of Y. Y must be full rank.

    Returns
    -------
    w : array
        Array of k eigenvalues
    v : array
        An array of k eigenvectors.  V has the same shape as X.

    Other Parameters
    ----------------
    tol : scalar, optional
        Solver tolerance (stopping criterion)
        by default: tol=n*sqrt(eps)
    maxiter : integer, optional
        maximum number of iterations
        by default: maxiter=min(n,20)
    largest : bool, optional
        when True, solve for the largest eigenvalues, otherwise the smallest
    verbosityLevel : integer, optional
        controls solver output.  default: verbosityLevel = 0.
    retLambdaHistory : boolean, optional
        whether to return eigenvalue history
    retResidualNormsHistory : boolean, optional
        whether to return history of residual norms

    Examples
    --------

    Solve A x = lambda B x with constraints and preconditioning.

    >>> from scipy.sparse import spdiags, issparse
    >>> from scipy.sparse.linalg import lobpcg, LinearOperator
    >>> n = 100
    >>> vals = [np.arange(n, dtype=np.float64) + 1]
    >>> A = spdiags(vals, 0, n, n)
    >>> A.toarray()
    array([[   1.,    0.,    0., ...,    0.,    0.,    0.],
           [   0.,    2.,    0., ...,    0.,    0.,    0.],
           [   0.,    0.,    3., ...,    0.,    0.,    0.],
           ...,
           [   0.,    0.,    0., ...,   98.,    0.,    0.],
           [   0.,    0.,    0., ...,    0.,   99.,    0.],
           [   0.,    0.,    0., ...,    0.,    0.,  100.]])

    Constraints.

    >>> Y = np.eye(n, 3)

    Initial guess for eigenvectors, should have linearly independent
    columns. Column dimension = number of requested eigenvalues.

    >>> X = np.random.rand(n, 3)

    Preconditioner -- inverse of A (as an abstract linear operator).

    >>> invA = spdiags([1./vals[0]], 0, n, n)
    >>> def precond( x ):
    ...     return invA  * x
    >>> M = LinearOperator(matvec=precond, shape=(n, n), dtype=float)

    Here, ``invA`` could of course have been used directly as a preconditioner.
    Let us then solve the problem:

    >>> eigs, vecs = lobpcg(A, X, Y=Y, M=M, tol=1e-4, maxiter=40, largest=False)
    >>> eigs
    array([ 4.,  5.,  6.])

    Note that the vectors passed in Y are the eigenvectors of the 3 smallest
    eigenvalues. The results returned are orthogonal to those.

    Notes
    -----
    If both retLambdaHistory and retResidualNormsHistory are True,
    the return tuple has the following format
    (lambda, V, lambda history, residual norms history).

    In the following ``n`` denotes the matrix size and ``m`` the number
    of required eigenvalues (smallest or largest).

    The LOBPCG code internally solves eigenproblems of the size 3``m`` on every
    iteration by calling the "standard" dense eigensolver, so if ``m`` is not
    small enough compared to ``n``, it does not make sense to call the LOBPCG
    code, but rather one should use the "standard" eigensolver,
    e.g. numpy or scipy function in this case.
    If one calls the LOBPCG algorithm for 5``m``>``n``,
    it will most likely break internally, so the code tries to call the standard
    function instead.

    It is not that n should be large for the LOBPCG to work, but rather the
    ratio ``n``/``m`` should be large. It you call the LOBPCG code with ``m``=1
    and ``n``=10, it should work, though ``n`` is small. The method is intended
    for extremely large ``n``/``m``, see e.g., reference [28] in
    https://arxiv.org/abs/0705.2626

    The convergence speed depends basically on two factors:

    1.  How well relatively separated the seeking eigenvalues are
        from the rest of the eigenvalues.
        One can try to vary ``m`` to make this better.

    2.  How well conditioned the problem is. This can be changed by using proper
        preconditioning. For example, a rod vibration test problem (under tests
        directory) is ill-conditioned for large ``n``, so convergence will be
        slow, unless efficient preconditioning is used.
        For this specific problem, a good simple preconditioner function would
        be a linear solve for A, which is easy to code since A is tridiagonal.

    *Acknowledgements*

    lobpcg.py code was written by Robert Cimrman.
    Many thanks belong to Andrew Knyazev, the author of the algorithm,
    for lots of advice and support.

    References
    ----------
    .. [1] A. V. Knyazev (2001),
           Toward the Optimal Preconditioned Eigensolver: Locally Optimal
           Block Preconditioned Conjugate Gradient Method.
           SIAM Journal on Scientific Computing 23, no. 2,
           pp. 517-541. :doi:`10.1137/S1064827500366124`

    .. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),
           Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)
           in hypre and PETSc.  https://arxiv.org/abs/0705.2626

    .. [3] A. V. Knyazev's C and MATLAB implementations:
           https://bitbucket.org/joseroman/blopex

    """
    blockVectorX = X
    blockVectorY = Y
    residualTolerance = tol
    maxIterations = maxiter

    if blockVectorY is not None:
        sizeY = blockVectorY.shape[1]
    else:
        sizeY = 0

    # Block size.
    if len(blockVectorX.shape) != 2:
        raise ValueError('expected rank-2 array for argument X')

    n, sizeX = blockVectorX.shape
    if sizeX > n:
        raise ValueError('X column dimension exceeds the row dimension')

    A = _makeOperator(A, (n, n))
    B = _makeOperator(B, (n, n))
    M = _makeOperator(M, (n, n))

    if (n - sizeY) < (5 * sizeX):
        # warn('The problem size is small compared to the block size.' \
        #        ' Using dense eigensolver instead of LOBPCG.')

        if blockVectorY is not None:
            raise NotImplementedError('The dense eigensolver '
                                      'does not support constraints.')

        # Define the closed range of indices of eigenvalues to return.
        if largest:
            eigvals = (n - sizeX, n - 1)
        else:
            eigvals = (0, sizeX - 1)

        A_dense = A(np.eye(n))
        B_dense = None if B is None else B(np.eye(n))
        return eigh(A_dense, B_dense, eigvals=eigvals, check_finite=False)

    if residualTolerance is None:
        residualTolerance = np.sqrt(1e-15) * n

    maxIterations = min(n, maxIterations)

    if verbosityLevel:
        aux = "Solving "
        if B is None:
            aux += "standard"
        else:
            aux += "generalized"
        aux += " eigenvalue problem with"
        if M is None:
            aux += "out"
        aux += " preconditioning\n\n"
        aux += "matrix size %d\n" % n
        aux += "block size %d\n\n" % sizeX
        if blockVectorY is None:
            aux += "No constraints\n\n"
        else:
            if sizeY > 1:
                aux += "%d constraints\n\n" % sizeY
            else:
                aux += "%d constraint\n\n" % sizeY
        print(aux)

    ##
    # Apply constraints to X.
    if blockVectorY is not None:

        if B is not None:
            blockVectorBY = B(blockVectorY)
        else:
            blockVectorBY = blockVectorY

        # gramYBY is a dense array.
        gramYBY = np.dot(blockVectorY.T.conj(), blockVectorBY)
        try:
            # gramYBY is a Cholesky factor from now on...
            gramYBY = cho_factor(gramYBY)
        except:
            raise ValueError('cannot handle linearly dependent constraints')

        _applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)

    ##
    # B-orthonormalize X.
    blockVectorX, blockVectorBX = _b_orthonormalize(B, blockVectorX)

    ##
    # Compute the initial Ritz vectors: solve the eigenproblem.
    blockVectorAX = A(blockVectorX)
    gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)

    _lambda, eigBlockVector = eigh(gramXAX, check_finite=False)
    ii = np.argsort(_lambda)[:sizeX]
    if largest:
        ii = ii[::-1]
    _lambda = _lambda[ii]

    eigBlockVector = np.asarray(eigBlockVector[:, ii])
    blockVectorX = np.dot(blockVectorX, eigBlockVector)
    blockVectorAX = np.dot(blockVectorAX, eigBlockVector)
    if B is not None:
        blockVectorBX = np.dot(blockVectorBX, eigBlockVector)

    ##
    # Active index set.
    activeMask = np.ones((sizeX, ), dtype=bool)

    lambdaHistory = [_lambda]
    residualNormsHistory = []

    previousBlockSize = sizeX
    ident = np.eye(sizeX, dtype=A.dtype)
    ident0 = np.eye(sizeX, dtype=A.dtype)

    ##
    # Main iteration loop.

    blockVectorP = None  # set during iteration
    blockVectorAP = None
    blockVectorBP = None

    for iterationNumber in xrange(maxIterations):
        if verbosityLevel > 0:
            print('iteration %d' % iterationNumber)

        aux = blockVectorBX * _lambda[np.newaxis, :]
        blockVectorR = blockVectorAX - aux

        aux = np.sum(blockVectorR.conjugate() * blockVectorR, 0)
        residualNorms = np.sqrt(aux)

        residualNormsHistory.append(residualNorms)

        ii = np.where(residualNorms > residualTolerance, True, False)
        activeMask = activeMask & ii
        if verbosityLevel > 2:
            print(activeMask)

        currentBlockSize = activeMask.sum()
        if currentBlockSize != previousBlockSize:
            previousBlockSize = currentBlockSize
            ident = np.eye(currentBlockSize, dtype=A.dtype)

        if currentBlockSize == 0:
            break

        if verbosityLevel > 0:
            print('current block size:', currentBlockSize)
            print('eigenvalue:', _lambda)
            print('residual norms:', residualNorms)
        if verbosityLevel > 10:
            print(eigBlockVector)

        activeBlockVectorR = as2d(blockVectorR[:, activeMask])

        if iterationNumber > 0:
            activeBlockVectorP = as2d(blockVectorP[:, activeMask])
            activeBlockVectorAP = as2d(blockVectorAP[:, activeMask])
            activeBlockVectorBP = as2d(blockVectorBP[:, activeMask])

        if M is not None:
            # Apply preconditioner T to the active residuals.
            activeBlockVectorR = M(activeBlockVectorR)

        ##
        # Apply constraints to the preconditioned residuals.
        if blockVectorY is not None:
            _applyConstraints(activeBlockVectorR, gramYBY, blockVectorBY,
                              blockVectorY)

        ##
        # B-orthonormalize the preconditioned residuals.

        aux = _b_orthonormalize(B, activeBlockVectorR)
        activeBlockVectorR, activeBlockVectorBR = aux

        activeBlockVectorAR = A(activeBlockVectorR)

        if iterationNumber > 0:
            aux = _b_orthonormalize(B,
                                    activeBlockVectorP,
                                    activeBlockVectorBP,
                                    retInvR=True)
            activeBlockVectorP, activeBlockVectorBP, invR = aux
            activeBlockVectorAP = np.dot(activeBlockVectorAP, invR)

        ##
        # Perform the Rayleigh Ritz Procedure:
        # Compute symmetric Gram matrices:

        xaw = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)
        waw = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)
        xbw = np.dot(blockVectorX.T.conj(), activeBlockVectorBR)

        if iterationNumber > 0:
            xap = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)
            wap = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)
            pap = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)
            xbp = np.dot(blockVectorX.T.conj(), activeBlockVectorBP)
            wbp = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP)

            gramA = np.bmat([[np.diag(_lambda), xaw, xap],
                             [xaw.T.conj(), waw, wap],
                             [xap.T.conj(), wap.T.conj(), pap]])

            gramB = np.bmat([[ident0, xbw, xbp], [xbw.T.conj(), ident, wbp],
                             [xbp.T.conj(), wbp.T.conj(), ident]])
        else:
            gramA = np.bmat([[np.diag(_lambda), xaw], [xaw.T.conj(), waw]])
            gramB = np.bmat([[ident0, xbw], [xbw.T.conj(), ident]])

        _assert_symmetric(gramA)
        _assert_symmetric(gramB)

        if verbosityLevel > 10:
            save(gramA, 'gramA')
            save(gramB, 'gramB')

        # Solve the generalized eigenvalue problem.
        _lambda, eigBlockVector = eigh(gramA, gramB, check_finite=False)
        ii = np.argsort(_lambda)[:sizeX]
        if largest:
            ii = ii[::-1]
        if verbosityLevel > 10:
            print(ii)

        _lambda = _lambda[ii]
        eigBlockVector = eigBlockVector[:, ii]

        lambdaHistory.append(_lambda)

        if verbosityLevel > 10:
            print('lambda:', _lambda)
##         # Normalize eigenvectors!
##         aux = np.sum( eigBlockVector.conjugate() * eigBlockVector, 0 )
##         eigVecNorms = np.sqrt( aux )
##         eigBlockVector = eigBlockVector / eigVecNorms[np.newaxis,:]
#        eigBlockVector, aux = _b_orthonormalize( B, eigBlockVector )

        if verbosityLevel > 10:
            print(eigBlockVector)
            pause()

        ##
        # Compute Ritz vectors.
        if iterationNumber > 0:
            eigBlockVectorX = eigBlockVector[:sizeX]
            eigBlockVectorR = eigBlockVector[sizeX:sizeX + currentBlockSize]
            eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]

            pp = np.dot(activeBlockVectorR, eigBlockVectorR)
            pp += np.dot(activeBlockVectorP, eigBlockVectorP)

            app = np.dot(activeBlockVectorAR, eigBlockVectorR)
            app += np.dot(activeBlockVectorAP, eigBlockVectorP)

            bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
            bpp += np.dot(activeBlockVectorBP, eigBlockVectorP)
        else:
            eigBlockVectorX = eigBlockVector[:sizeX]
            eigBlockVectorR = eigBlockVector[sizeX:]

            pp = np.dot(activeBlockVectorR, eigBlockVectorR)
            app = np.dot(activeBlockVectorAR, eigBlockVectorR)
            bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)

        if verbosityLevel > 10:
            print(pp)
            print(app)
            print(bpp)
            pause()

        blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
        blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
        blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp

        blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp

    aux = blockVectorBX * _lambda[np.newaxis, :]
    blockVectorR = blockVectorAX - aux

    aux = np.sum(blockVectorR.conjugate() * blockVectorR, 0)
    residualNorms = np.sqrt(aux)

    if verbosityLevel > 0:
        print('final eigenvalue:', _lambda)
        print('final residual norms:', residualNorms)

    if retLambdaHistory:
        if retResidualNormsHistory:
            return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
        else:
            return _lambda, blockVectorX, lambdaHistory
    else:
        if retResidualNormsHistory:
            return _lambda, blockVectorX, residualNormsHistory
        else:
            return _lambda, blockVectorX
Exemple #43
0
def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None,
                         c1=1e-4, c2=0.9,
                         amax=50, amin=1e-8, xtol=1e-14):
    """
    Scalar function search for alpha that satisfies strong Wolfe conditions

    alpha > 0 is assumed to be a descent direction.

    Parameters
    ----------
    phi : callable phi(alpha)
        Function at point `alpha`
    derphi : callable dphi(alpha)
        Derivative `d phi(alpha)/ds`. Returns a scalar.

    phi0 : float, optional
        Value of `f` at 0
    old_phi0 : float, optional
        Value of `f` at the previous point
    derphi0 : float, optional
        Value `derphi` at 0
    c1, c2 : float, optional
        Wolfe parameters
    amax, amin : float, optional
        Maximum and minimum step size
    xtol : float, optional
        Relative tolerance for an acceptable step.

    Returns
    -------
    alpha : float
        Step size, or None if no suitable step was found
    phi : float
        Value of `phi` at the new point `alpha`
    phi0 : float
        Value of `phi` at `alpha=0`

    Notes
    -----
    Uses routine DCSRCH from MINPACK.

    """

    if phi0 is None:
        phi0 = phi(0.)
    if derphi0 is None:
        derphi0 = derphi(0.)

    if old_phi0 is not None and derphi0 != 0:
        alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
        if alpha1 < 0:
            alpha1 = 1.0
    else:
        alpha1 = 1.0

    phi1 = phi0
    derphi1 = derphi0
    isave = np.zeros((2,), np.intc)
    dsave = np.zeros((13,), float)
    task = b'START'

    maxiter = 100
    for i in xrange(maxiter):
        stp, phi1, derphi1, task = minpack2.dcsrch(alpha1, phi1, derphi1,
                                                   c1, c2, xtol, task,
                                                   amin, amax, isave, dsave)
        if task[:2] == b'FG':
            alpha1 = stp
            phi1 = phi(stp)
            derphi1 = derphi(stp)
        else:
            break
    else:
        # maxiter reached, the line search did not converge
        stp = None

    if task[:5] == b'ERROR' or task[:4] == b'WARN':
        stp = None  # failed

    return stp, phi1, phi0
Exemple #44
0
    def solve(self):
        """
        Runs the DifferentialEvolutionSolver.

        Returns
        -------
        res : OptimizeResult
            The optimization result represented as a ``OptimizeResult`` object.
            Important attributes are: ``x`` the solution array, ``success`` a
            Boolean flag indicating if the optimizer exited successfully and
            ``message`` which describes the cause of the termination. See
            `OptimizeResult` for a description of other attributes.  If `polish`
            was employed, and a lower minimum was obtained by the polishing,
            then OptimizeResult also contains the ``jac`` attribute.
        """
        nit, warning_flag = 0, False
        status_message = _status_message['success']

        # The population may have just been initialized (all entries are
        # np.inf). If it has you have to calculate the initial energies.
        # Although this is also done in the evolve generator it's possible
        # that someone can set maxiter=0, at which point we still want the
        # initial energies to be calculated (the following loop isn't run).
        if np.all(np.isinf(self.population_energies)):
            self.population_energies[:] = self._calculate_population_energies(
                self.population)
            self._promote_lowest_energy()

        # do the optimisation.
        for nit in xrange(1, self.maxiter + 1):
            # evolve the population by a generation
            try:
                next(self)
            except StopIteration:
                warning_flag = True
                if self._nfev > self.maxfun:
                    status_message = _status_message['maxfev']
                elif self._nfev == self.maxfun:
                    status_message = ('Maximum number of function evaluations'
                                      ' has been reached.')
                break

            if self.disp:
                print("differential_evolution step %d: f(x)= %g"
                      % (nit,
                         self.population_energies[0]))

            # should the solver terminate?
            convergence = self.convergence

            if (self.callback and
                    self.callback(self._scale_parameters(self.population[0]),
                                  convergence=self.tol / convergence) is True):

                warning_flag = True
                status_message = ('callback function requested stop early '
                                  'by returning True')
                break

            if np.any(np.isinf(self.population_energies)):
                intol = False
            else:
                intol = (np.std(self.population_energies) <=
                         self.atol +
                         self.tol * np.abs(np.mean(self.population_energies)))
            if warning_flag or intol:
                break

        else:
            status_message = _status_message['maxiter']
            warning_flag = True

        DE_result = OptimizeResult(
            x=self.x,
            fun=self.population_energies[0],
            nfev=self._nfev,
            nit=nit,
            message=status_message,
            success=(warning_flag is not True))

        if self.polish:
            result = minimize(self.func,
                              np.copy(DE_result.x),
                              method='L-BFGS-B',
                              bounds=self.limits.T)

            self._nfev += result.nfev
            DE_result.nfev = self._nfev

            if result.fun < DE_result.fun:
                DE_result.fun = result.fun
                DE_result.x = result.x
                DE_result.jac = result.jac
                # to keep internal state consistent
                self.population_energies[0] = result.fun
                self.population[0] = self._unscale_parameters(result.x)

        return DE_result
Exemple #45
0
def _identify_ridge_lines(matr, max_distances, gap_thresh):
    """
    Identify ridges in the 2-D matrix.

    Expect that the width of the wavelet feature increases with increasing row
    number.

    Parameters
    ----------
    matr : 2-D ndarray
        Matrix in which to identify ridge lines.
    max_distances : 1-D sequence
        At each row, a ridge line is only connected
        if the relative max at row[n] is within
        `max_distances`[n] from the relative max at row[n+1].
    gap_thresh : int
        If a relative maximum is not found within `max_distances`,
        there will be a gap. A ridge line is discontinued if
        there are more than `gap_thresh` points without connecting
        a new relative maximum.

    Returns
    -------
    ridge_lines : tuple
        Tuple of 2 1-D sequences. `ridge_lines`[ii][0] are the rows of the
        ii-th ridge-line, `ridge_lines`[ii][1] are the columns. Empty if none
        found.  Each ridge-line will be sorted by row (increasing), but the
        order of the ridge lines is not specified.

    References
    ----------
    Bioinformatics (2006) 22 (17): 2059-2065.
    :doi:`10.1093/bioinformatics/btl355`
    http://bioinformatics.oxfordjournals.org/content/22/17/2059.long

    Examples
    --------
    >>> data = np.random.rand(5,5)
    >>> ridge_lines = _identify_ridge_lines(data, 1, 1)

    Notes
    -----
    This function is intended to be used in conjunction with `cwt`
    as part of `find_peaks_cwt`.

    """
    if(len(max_distances) < matr.shape[0]):
        raise ValueError('Max_distances must have at least as many rows '
                         'as matr')

    all_max_cols = _boolrelextrema(matr, np.greater, axis=1, order=1)
    # Highest row for which there are any relative maxima
    has_relmax = np.where(all_max_cols.any(axis=1))[0]
    if(len(has_relmax) == 0):
        return []
    start_row = has_relmax[-1]
    # Each ridge line is a 3-tuple:
    # rows, cols,Gap number
    ridge_lines = [[[start_row],
                   [col],
                   0] for col in np.where(all_max_cols[start_row])[0]]
    final_lines = []
    rows = np.arange(start_row - 1, -1, -1)
    cols = np.arange(0, matr.shape[1])
    for row in rows:
        this_max_cols = cols[all_max_cols[row]]

        # Increment gap number of each line,
        # set it to zero later if appropriate
        for line in ridge_lines:
            line[2] += 1

        # XXX These should always be all_max_cols[row]
        # But the order might be different. Might be an efficiency gain
        # to make sure the order is the same and avoid this iteration
        prev_ridge_cols = np.array([line[1][-1] for line in ridge_lines])
        # Look through every relative maximum found at current row
        # Attempt to connect them with existing ridge lines.
        for ind, col in enumerate(this_max_cols):
            # If there is a previous ridge line within
            # the max_distance to connect to, do so.
            # Otherwise start a new one.
            line = None
            if(len(prev_ridge_cols) > 0):
                diffs = np.abs(col - prev_ridge_cols)
                closest = np.argmin(diffs)
                if diffs[closest] <= max_distances[row]:
                    line = ridge_lines[closest]
            if(line is not None):
                # Found a point close enough, extend current ridge line
                line[1].append(col)
                line[0].append(row)
                line[2] = 0
            else:
                new_line = [[row],
                            [col],
                            0]
                ridge_lines.append(new_line)

        # Remove the ridge lines with gap_number too high
        # XXX Modifying a list while iterating over it.
        # Should be safe, since we iterate backwards, but
        # still tacky.
        for ind in xrange(len(ridge_lines) - 1, -1, -1):
            line = ridge_lines[ind]
            if line[2] > gap_thresh:
                final_lines.append(line)
                del ridge_lines[ind]

    out_lines = []
    for line in (final_lines + ridge_lines):
        sortargs = np.array(np.argsort(line[0]))
        rows, cols = np.zeros_like(sortargs), np.zeros_like(sortargs)
        rows[sortargs] = line[0]
        cols[sortargs] = line[1]
        out_lines.append([rows, cols])

    return out_lines
 def test_derivative(self):
     P = KroghInterpolator(self.xs,self.ys)
     m = 10
     r = P.derivatives(self.test_xs,m)
     for i in xrange(m):
         assert_almost_equal(P.derivative(self.test_xs,i),r[i])
Exemple #47
0
    def __getitem__(self, index):
        """Return the element(s) index=(i, j), where j may be a slice.
        This always returns a copy for consistency, since slices into
        Python lists return copies.
        """

        # Scalar fast path first
        if isinstance(index, tuple) and len(index) == 2:
            i, j = index
            # Use isinstance checks for common index types; this is
            # ~25-50% faster than isscalarlike. Other types are
            # handled below.
            if ((isinstance(i, int) or isinstance(i, np.integer)) and
                    (isinstance(j, int) or isinstance(j, np.integer))):
                v = _csparsetools.lil_get1(self.shape[0], self.shape[1],
                                           self.rows, self.data,
                                           i, j)
                return self.dtype.type(v)

        # Utilities found in IndexMixin
        i, j = self._unpack_index(index)

        # Proper check for other scalar index types
        i_intlike = isintlike(i)
        j_intlike = isintlike(j)

        if i_intlike and j_intlike:
            v = _csparsetools.lil_get1(self.shape[0], self.shape[1],
                                       self.rows, self.data,
                                       i, j)
            return self.dtype.type(v)
        elif j_intlike or isinstance(j, slice):
            # column slicing fast path
            if j_intlike:
                j = self._check_col_bounds(j)
                j = slice(j, j+1)

            if i_intlike:
                i = self._check_row_bounds(i)
                i = xrange(i, i+1)
                i_shape = None
            elif isinstance(i, slice):
                i = xrange(*i.indices(self.shape[0]))
                i_shape = None
            else:
                i = np.atleast_1d(i)
                i_shape = i.shape

            if i_shape is None or len(i_shape) == 1:
                return self._get_row_ranges(i, j)

        i, j = self._index_to_arrays(i, j)
        if i.size == 0:
            return lil_matrix(i.shape, dtype=self.dtype)

        new = lil_matrix(i.shape, dtype=self.dtype)

        i, j = _prepare_index_for_memoryview(i, j)
        _csparsetools.lil_fancy_get(self.shape[0], self.shape[1],
                                    self.rows, self.data,
                                    new.rows, new.data,
                                    i, j)
        return new
Exemple #48
0
 def test_high_derivative(self):
     P = KroghInterpolator(self.xs, self.ys)
     for i in xrange(len(self.xs), 2 * len(self.xs)):
         assert_almost_equal(P.derivative(self.test_xs, i),
                             np.zeros(len(self.test_xs)))
Exemple #49
0
    def solve(self):
        """
        Runs the DifferentialEvolutionSolver.

        Returns
        -------
        res : OptimizeResult
            The optimization result represented as a ``OptimizeResult`` object.
            Important attributes are: ``x`` the solution array, ``success`` a
            Boolean flag indicating if the optimizer exited successfully and
            ``message`` which describes the cause of the termination. See
            `OptimizeResult` for a description of other attributes.  If `polish`
            was employed, and a lower minimum was obtained by the polishing,
            then OptimizeResult also contains the ``jac`` attribute.
        """
        nit, warning_flag = 0, False
        status_message = _status_message['success']

        # The population may have just been initialized (all entries are
        # np.inf). If it has you have to calculate the initial energies.
        # Although this is also done in the evolve generator it's possible
        # that someone can set maxiter=0, at which point we still want the
        # initial energies to be calculated (the following loop isn't run).
        if np.all(np.isinf(self.population_energies)):
            self._calculate_population_energies()

        for nmig in xrange(1, self.number_of_migrations + 1):
            if nmig != 1:
                # Get the host node
                host = int(self.island_marker[-1])

                # Get all the neighbors list
                neighbors = self.topology.neighbors(host)

                neighbor_results = {}
                neighbor_energy_results = {}

                for each_neighbor in neighbors:
                    replacement = client.get(self.key + str(each_neighbor))
                    if replacement is None:
                        for _ in range(int(self.wait_time / self.poll_time)):
                            replacement = client.get(self.key +
                                                     str(each_neighbor))
                            if replacement is None:
                                print("POLLING!!!")
                                time.sleep(self.poll_time)
                            else:
                                break
                        if replacement is not None:
                            neighbor_results[each_neighbor] = np.array([
                                float(items)
                                for items in replacement.split(",")
                            ])
                            neighbor_energy_results[each_neighbor] = self.func(
                                neighbor_results[each_neighbor], *self.args)

                total_computed_neighbors = len(neighbor_results)
                energies = []

                for each_neighbor in neighbor_results.keys():
                    energies.append((neighbor_results[each_neighbor],
                                     neighbor_energy_results[each_neighbor]))

                for pop_index in range(1, total_computed_neighbors + 1):
                    energies.append((self.population[pop_index],
                                     self.population_energies[pop_index]))

                energies.sort(key=lambda x: x[-1])
                energies = energies[:total_computed_neighbors]

                for pop_index in range(1, total_computed_neighbors + 1):
                    self.population[pop_index] = energies[pop_index - 1][0]
                    self.population_energies[pop_index] = energies[pop_index -
                                                                   1][1]

            # do the optimisation.
            is_optimisation_complete = False
            for nit in xrange(1, self.maxiter + 1):
                # evolve the population by a generation
                try:
                    next(self)
                except StopIteration:
                    warning_flag = True
                    status_message = _status_message['maxfev']
                    #is_optimisation_complete = False
                    break

                if self.disp:
                    print("differential_evolution step %d: f(x)= %g" %
                          (nit, self.population_energies[0]))

                # should the solver terminate?
                convergence = self.convergence

                if (self.callback and self.callback(
                        self._scale_parameters(self.population[0]),
                        convergence=self.tol / convergence) is True):

                    warning_flag = True
                    status_message = ('callback function requested stop early '
                                      'by returning True')
                    is_optimisation_complete = False
                    break

                intol = (np.std(self.population_energies) <= self.atol +
                         self.tol * np.abs(np.mean(self.population_energies)))
                if intol:
                    is_optimisation_complete = False
                if warning_flag or intol:
                    break

            else:
                status_message = _status_message['maxiter']
                warning_flag = True

            client.set(self.island_marker,
                       ",".join([str(items) for items in self.x]))
            print("MARKED IN MEMCACHE")
            print(self.island_marker,
                  ",".join([str(items) for items in self.x]))
            if not is_optimisation_complete:
                #break
                print("Exited due to some break condition above!!",
                      status_message)

        DE_result = OptimizeResult(x=self.x,
                                   fun=self.population_energies[0],
                                   nfev=self._nfev,
                                   nit=nit,
                                   message=status_message,
                                   success=(warning_flag is not True))

        if self.polish:
            result = minimize(self.func,
                              np.copy(DE_result.x),
                              method='L-BFGS-B',
                              bounds=self.limits.T,
                              args=self.args)

            self._nfev += result.nfev
            DE_result.nfev = self._nfev

            if result.fun < DE_result.fun:
                DE_result.fun = result.fun
                DE_result.x = result.x
                DE_result.jac = result.jac
                # to keep internal state consistent
                self.population_energies[0] = result.fun
                self.population[0] = self._unscale_parameters(result.x)

        return DE_result
 def test_low_derivatives(self):
     P = KroghInterpolator(self.xs,self.ys)
     D = P.derivatives(self.test_xs,len(self.xs)+2)
     for i in xrange(D.shape[0]):
         assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
                             D[i])
Exemple #51
0
def _fgmres(matvec,
            v0,
            m,
            atol,
            lpsolve=None,
            rpsolve=None,
            cs=(),
            outer_v=(),
            prepend_outer_v=False):
    """
    FGMRES Arnoldi process, with optional projection or augmentation

    Parameters
    ----------
    matvec : callable
        Operation A*x
    v0 : ndarray
        Initial vector, normalized to nrm2(v0) == 1
    m : int
        Number of GMRES rounds
    atol : float
        Absolute tolerance for early exit
    lpsolve : callable
        Left preconditioner L
    rpsolve : callable
        Right preconditioner R
    CU : list of (ndarray, ndarray)
        Columns of matrices C and U in GCROT
    outer_v : list of ndarrays
        Augmentation vectors in LGMRES
    prepend_outer_v : bool, optional
        Whether augmentation vectors come before or after 
        Krylov iterates

    Raises
    ------
    LinAlgError
        If nans encountered

    Returns
    -------
    Q, R : ndarray
        QR decomposition of the upper Hessenberg H=QR
    B : ndarray
        Projections corresponding to matrix C
    vs : list of ndarray
        Columns of matrix V
    zs : list of ndarray
        Columns of matrix Z
    y : ndarray
        Solution to ||H y - e_1||_2 = min!
    res : float
        The final (preconditioned) residual norm

    """

    if lpsolve is None:
        lpsolve = lambda x: x
    if rpsolve is None:
        rpsolve = lambda x: x

    axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'],
                                           (v0, ))

    vs = [v0]
    zs = []
    y = None
    res = np.nan

    m = m + len(outer_v)

    # Orthogonal projection coefficients
    B = np.zeros((len(cs), m), dtype=v0.dtype)

    # H is stored in QR factorized form
    Q = np.ones((1, 1), dtype=v0.dtype)
    R = np.zeros((1, 0), dtype=v0.dtype)

    eps = np.finfo(v0.dtype).eps

    breakdown = False

    # FGMRES Arnoldi process
    for j in xrange(m):
        # L A Z = C B + V H

        if prepend_outer_v and j < len(outer_v):
            z, w = outer_v[j]
        elif prepend_outer_v and j == len(outer_v):
            z = rpsolve(v0)
            w = None
        elif not prepend_outer_v and j >= m - len(outer_v):
            z, w = outer_v[j - (m - len(outer_v))]
        else:
            z = rpsolve(vs[-1])
            w = None

        if w is None:
            w = lpsolve(matvec(z))
        else:
            # w is clobbered below
            w = w.copy()

        w_norm = nrm2(w)

        # GCROT projection: L A -> (1 - C C^H) L A
        # i.e. orthogonalize against C
        for i, c in enumerate(cs):
            alpha = dot(c, w)
            B[i, j] = alpha
            w = axpy(c, w, c.shape[0], -alpha)  # w -= alpha*c

        # Orthogonalize against V
        hcur = np.zeros(j + 2, dtype=Q.dtype)
        for i, v in enumerate(vs):
            alpha = dot(v, w)
            hcur[i] = alpha
            w = axpy(v, w, v.shape[0], -alpha)  # w -= alpha*v
        hcur[i + 1] = nrm2(w)

        with np.errstate(over='ignore', divide='ignore'):
            # Careful with denormals
            alpha = 1 / hcur[-1]

        if np.isfinite(alpha):
            w = scal(alpha, w)

        if not (hcur[-1] > eps * w_norm):
            # w essentially in the span of previous vectors,
            # or we have nans. Bail out after updating the QR
            # solution.
            breakdown = True

        vs.append(w)
        zs.append(z)

        # Arnoldi LSQ problem

        # Add new column to H=Q*R, padding other columns with zeros
        Q2 = np.zeros((j + 2, j + 2), dtype=Q.dtype, order='F')
        Q2[:j + 1, :j + 1] = Q
        Q2[j + 1, j + 1] = 1

        R2 = np.zeros((j + 2, j), dtype=R.dtype, order='F')
        R2[:j + 1, :] = R

        Q, R = qr_insert(Q2,
                         R2,
                         hcur,
                         j,
                         which='col',
                         overwrite_qru=True,
                         check_finite=False)

        # Transformed least squares problem
        # || Q R y - inner_res_0 * e_1 ||_2 = min!
        # Since R = [R'; 0], solution is y = inner_res_0 (R')^{-1} (Q^H)[:j,0]

        # Residual is immediately known
        res = abs(Q[0, -1])

        # Check for termination
        if res < atol or breakdown:
            break

    if not np.isfinite(R[j, j]):
        # nans encountered, bail out
        raise LinAlgError()

    # -- Get the LSQ problem solution

    # The problem is triangular, but the condition number may be
    # bad (or in case of breakdown the last diagonal entry may be
    # zero), so use lstsq instead of trtrs.
    y, _, _, _, = lstsq(R[:j + 1, :j + 1], Q[0, :j + 1].conj())

    B = B[:, :j + 1]

    return Q, R, B, vs, zs, y, res
Exemple #52
0
 def getrow(self, i):
     """Returns the i-th row as a (1 x n) DOK matrix."""
     new = dok_matrix((1, self.shape[1]), dtype=self.dtype)
     dict.update(new, (((0, j), self[i, j]) for j in xrange(self.shape[1])))
     return new
Exemple #53
0
 def __iter__(self):
     for r in xrange(self.shape[0]):
         yield self[r, :]
Exemple #54
0
 def getcol(self, j):
     """Returns the j-th column as a (m x 1) DOK matrix."""
     new = dok_matrix((self.shape[0], 1), dtype=self.dtype)
     dict.update(new, (((i, 0), self[i, j]) for i in xrange(self.shape[0])))
     return new
Exemple #55
0
 def test_daub(self):
     for i in xrange(1, 15):
         assert_equal(len(wavelets.daub(i)), i * 2)
Exemple #56
0
    def __getitem__(self, index):
        """If key=(i, j) is a pair of integers, return the corresponding
        element.  If either i or j is a slice or sequence, return a new sparse
        matrix with just these elements.
        """
        zero = self.dtype.type(0)
        i, j = self._unpack_index(index)

        i_intlike = isintlike(i)
        j_intlike = isintlike(j)

        if i_intlike and j_intlike:
            i = int(i)
            j = int(j)
            if i < 0:
                i += self.shape[0]
            if i < 0 or i >= self.shape[0]:
                raise IndexError('Index out of bounds.')
            if j < 0:
                j += self.shape[1]
            if j < 0 or j >= self.shape[1]:
                raise IndexError('Index out of bounds.')
            return dict.get(self, (i,j), zero)
        elif ((i_intlike or isinstance(i, slice)) and
              (j_intlike or isinstance(j, slice))):
            # Fast path for slicing very sparse matrices
            i_slice = slice(i, i+1) if i_intlike else i
            j_slice = slice(j, j+1) if j_intlike else j
            i_indices = i_slice.indices(self.shape[0])
            j_indices = j_slice.indices(self.shape[1])
            i_seq = xrange(*i_indices)
            j_seq = xrange(*j_indices)
            newshape = (len(i_seq), len(j_seq))
            newsize = _prod(newshape)

            if len(self) < 2*newsize and newsize != 0:
                # Switch to the fast path only when advantageous
                # (count the iterations in the loops, adjust for complexity)
                #
                # We also don't handle newsize == 0 here (if
                # i/j_intlike, it can mean index i or j was out of
                # bounds)
                return self._getitem_ranges(i_indices, j_indices, newshape)

        i, j = self._index_to_arrays(i, j)

        if i.size == 0:
            return dok_matrix(i.shape, dtype=self.dtype)

        min_i = i.min()
        if min_i < -self.shape[0] or i.max() >= self.shape[0]:
            raise IndexError('Index (%d) out of range -%d to %d.' %
                             (i.min(), self.shape[0], self.shape[0]-1))
        if min_i < 0:
            i = i.copy()
            i[i < 0] += self.shape[0]

        min_j = j.min()
        if min_j < -self.shape[1] or j.max() >= self.shape[1]:
            raise IndexError('Index (%d) out of range -%d to %d.' %
                             (j.min(), self.shape[1], self.shape[1]-1))
        if min_j < 0:
            j = j.copy()
            j[j < 0] += self.shape[1]

        newdok = dok_matrix(i.shape, dtype=self.dtype)

        for key in itertools.product(xrange(i.shape[0]), xrange(i.shape[1])):
            v = dict.get(self, (i[key], j[key]), zero)
            if v:
                dict.__setitem__(newdok, key, v)

        return newdok
Exemple #57
0
def romberg(function,
            a,
            b,
            args=(),
            tol=1.48e-8,
            rtol=1.48e-8,
            show=False,
            divmax=10,
            vec_func=False):
    """
    Romberg integration of a callable function or method.

    Returns the integral of `function` (a function of one variable)
    over the interval (`a`, `b`).

    If `show` is 1, the triangular array of the intermediate results
    will be printed.  If `vec_func` is True (default is False), then
    `function` is assumed to support vector arguments.

    Parameters
    ----------
    function : callable
        Function to be integrated.
    a : float
        Lower limit of integration.
    b : float
        Upper limit of integration.

    Returns
    -------
    results  : float
        Result of the integration.

    Other Parameters
    ----------------
    args : tuple, optional
        Extra arguments to pass to function. Each element of `args` will
        be passed as a single argument to `func`. Default is to pass no
        extra arguments.
    tol, rtol : float, optional
        The desired absolute and relative tolerances. Defaults are 1.48e-8.
    show : bool, optional
        Whether to print the results. Default is False.
    divmax : int, optional
        Maximum order of extrapolation. Default is 10.
    vec_func : bool, optional
        Whether `func` handles arrays as arguments (i.e whether it is a
        "vector" function). Default is False.

    See Also
    --------
    fixed_quad : Fixed-order Gaussian quadrature.
    quad : Adaptive quadrature using QUADPACK.
    dblquad : Double integrals.
    tplquad : Triple integrals.
    romb : Integrators for sampled data.
    simps : Integrators for sampled data.
    cumtrapz : Cumulative integration for sampled data.
    ode : ODE integrator.
    odeint : ODE integrator.

    References
    ----------
    .. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method

    Examples
    --------
    Integrate a gaussian from 0 to 1 and compare to the error function.

    >>> from scipy import integrate
    >>> from scipy.special import erf
    >>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
    >>> result = integrate.romberg(gaussian, 0, 1, show=True)
    Romberg integration of <function vfunc at ...> from [0, 1]

    ::

       Steps  StepSize  Results
           1  1.000000  0.385872
           2  0.500000  0.412631  0.421551
           4  0.250000  0.419184  0.421368  0.421356
           8  0.125000  0.420810  0.421352  0.421350  0.421350
          16  0.062500  0.421215  0.421350  0.421350  0.421350  0.421350
          32  0.031250  0.421317  0.421350  0.421350  0.421350  0.421350  0.421350

    The final result is 0.421350396475 after 33 function evaluations.

    >>> print("%g %g" % (2*result, erf(1)))
    0.842701 0.842701

    """
    if np.isinf(a) or np.isinf(b):
        raise ValueError("Romberg integration only available "
                         "for finite limits.")
    vfunc = vectorize1(function, args, vec_func=vec_func)
    n = 1
    interval = [a, b]
    intrange = b - a
    ordsum = _difftrap(vfunc, interval, n)
    result = intrange * ordsum
    resmat = [[result]]
    err = np.inf
    last_row = resmat[0]
    for i in xrange(1, divmax + 1):
        n *= 2
        ordsum += _difftrap(vfunc, interval, n)
        row = [intrange * ordsum / n]
        for k in xrange(i):
            row.append(_romberg_diff(last_row[k], row[k], k + 1))
        result = row[i]
        lastresult = last_row[i - 1]
        if show:
            resmat.append(row)
        err = abs(result - lastresult)
        if err < tol or err < rtol * abs(result):
            break
        last_row = row
    else:
        warnings.warn(
            "divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
            AccuracyWarning)

    if show:
        _printresmat(vfunc, interval, resmat)
    return result
    def svd_reduce(self, max_rank, to_retain=None):
        """
        Reduce the rank of the matrix by retaining some SVD components.

        This corresponds to the \"Broyden Rank Reduction Inverse\"
        algorithm described in [1]_.

        Note that the SVD decomposition can be done by solving only a
        problem whose size is the effective rank of this matrix, which
        is viable even for large problems.

        Parameters
        ----------
        max_rank : int
            Maximum rank of this matrix after reduction.
        to_retain : int, optional
            Number of SVD components to retain when reduction is done
            (ie. rank > max_rank). Default is ``max_rank - 2``.

        References
        ----------
        .. [1] B.A. van der Rotten, PhD thesis,
           \"A limited memory Broyden method to solve high-dimensional
           systems of nonlinear equations\". Mathematisch Instituut,
           Universiteit Leiden, The Netherlands (2003).

           https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf

        """
        if self.collapsed is not None:
            return

        p = max_rank
        if to_retain is not None:
            q = to_retain
        else:
            q = p - 2

        if self.cs:
            p = min(p, len(self.cs[0]))
        q = max(0, min(q, p - 1))

        m = len(self.cs)
        if m < p:
            # nothing to do
            return

        C = np.array(self.cs).T
        D = np.array(self.ds).T

        D, R = qr(D, mode='economic')
        C = dot(C, R.T.conj())

        U, S, WH = svd(C, full_matrices=False, compute_uv=True)

        C = dot(C, inv(WH))
        D = dot(D, WH.T.conj())

        for k in xrange(q):
            self.cs[k] = C[:, k].copy()
            self.ds[k] = D[:, k].copy()

        del self.cs[q:]
        del self.ds[q:]
def binned_statistic_dd(sample, values, statistic='mean',
                        bins=10, range=None, expand_binnumbers=False):
    """
    Compute a multidimensional binned statistic for a set of data.

    This is a generalization of a histogramdd function.  A histogram divides
    the space into bins, and returns the count of the number of points in
    each bin.  This function allows the computation of the sum, mean, median,
    or other statistic of the values within each bin.

    Parameters
    ----------
    sample : array_like
        Data to histogram passed as a sequence of D arrays of length N, or
        as an (N,D) array.
    values : (N,) array_like or list of (N,) array_like
        The data on which the statistic will be computed.  This must be
        the same shape as `sample`, or a list of sequences - each with the
        same shape as `sample`.  If `values` is such a list, the statistic
        will be computed on each independently.
    statistic : string or callable, optional
        The statistic to compute (default is 'mean').
        The following statistics are available:

          * 'mean' : compute the mean of values for points within each bin.
            Empty bins will be represented by NaN.
          * 'median' : compute the median of values for points within each
            bin. Empty bins will be represented by NaN.
          * 'count' : compute the count of points within each bin.  This is
            identical to an unweighted histogram.  `values` array is not
            referenced.
          * 'sum' : compute the sum of values for points within each bin.
            This is identical to a weighted histogram.
          * 'std' : compute the standard deviation within each bin. This 
            is implicitly calculated with ddof=0.
          * 'min' : compute the minimum of values for points within each bin.
            Empty bins will be represented by NaN.
          * 'max' : compute the maximum of values for point within each bin.
            Empty bins will be represented by NaN.
          * function : a user-defined function which takes a 1D array of
            values, and outputs a single numerical statistic. This function
            will be called on the values in each bin.  Empty bins will be
            represented by function([]), or NaN if this returns an error.

    bins : sequence or int, optional
        The bin specification must be in one of the following forms:

          * A sequence of arrays describing the bin edges along each dimension.
          * The number of bins for each dimension (nx, ny, ... = bins).
          * The number of bins for all dimensions (nx = ny = ... = bins).

    range : sequence, optional
        A sequence of lower and upper bin edges to be used if the edges are
        not given explicitly in `bins`. Defaults to the minimum and maximum
        values along each dimension.
    expand_binnumbers : bool, optional
        'False' (default): the returned `binnumber` is a shape (N,) array of
        linearized bin indices.
        'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
        ndarray, where each row gives the bin numbers in the corresponding
        dimension.
        See the `binnumber` returned value, and the `Examples` section of
        `binned_statistic_2d`.

        .. versionadded:: 0.17.0

    Returns
    -------
    statistic : ndarray, shape(nx1, nx2, nx3,...)
        The values of the selected statistic in each two-dimensional bin.
    bin_edges : list of ndarrays
        A list of D arrays describing the (nxi + 1) bin edges for each
        dimension.
    binnumber : (N,) array of ints or (D,N) ndarray of ints
        This assigns to each element of `sample` an integer that represents the
        bin in which this observation falls.  The representation depends on the
        `expand_binnumbers` argument.  See `Notes` for details.


    See Also
    --------
    numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d

    Notes
    -----
    Binedges:
    All but the last (righthand-most) bin is half-open in each dimension.  In
    other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
    ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``.  The
    last bin, however, is ``[3, 4]``, which *includes* 4.

    `binnumber`:
    This returned argument assigns to each element of `sample` an integer that
    represents the bin in which it belongs.  The representation depends on the
    `expand_binnumbers` argument. If 'False' (default): The returned
    `binnumber` is a shape (N,) array of linearized indices mapping each
    element of `sample` to its corresponding bin (using row-major ordering).
    If 'True': The returned `binnumber` is a shape (D,N) ndarray where
    each row indicates bin placements for each dimension respectively.  In each
    dimension, a binnumber of `i` means the corresponding value is between
    (bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.

    .. versionadded:: 0.11.0

    """
    known_stats = ['mean', 'median', 'count', 'sum', 'std','min','max']
    if not callable(statistic) and statistic not in known_stats:
        raise ValueError('invalid statistic %r' % (statistic,))

    # `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
    # `Dlen` is the length of elements along each dimension.
    # This code is based on np.histogramdd
    try:
        # `sample` is an ND-array.
        Dlen, Ndim = sample.shape
    except (AttributeError, ValueError):
        # `sample` is a sequence of 1D arrays.
        sample = np.atleast_2d(sample).T
        Dlen, Ndim = sample.shape

    # Store initial shape of `values` to preserve it in the output
    values = np.asarray(values)
    input_shape = list(values.shape)
    # Make sure that `values` is 2D to iterate over rows
    values = np.atleast_2d(values)
    Vdim, Vlen = values.shape

    # Make sure `values` match `sample`
    if(statistic != 'count' and Vlen != Dlen):
        raise AttributeError('The number of `values` elements must match the '
                             'length of each `sample` dimension.')

    nbin = np.empty(Ndim, int)    # Number of bins in each dimension
    edges = Ndim * [None]         # Bin edges for each dim (will be 2D array)
    dedges = Ndim * [None]        # Spacing between edges (will be 2D array)

    try:
        M = len(bins)
        if M != Ndim:
            raise AttributeError('The dimension of bins must be equal '
                                 'to the dimension of the sample x.')
    except TypeError:
        bins = Ndim * [bins]

    # Select range for each dimension
    # Used only if number of bins is given.
    if range is None:
        smin = np.atleast_1d(np.array(sample.min(axis=0), float))
        smax = np.atleast_1d(np.array(sample.max(axis=0), float))
    else:
        smin = np.zeros(Ndim)
        smax = np.zeros(Ndim)
        for i in xrange(Ndim):
            smin[i], smax[i] = range[i]

    # Make sure the bins have a finite width.
    for i in xrange(len(smin)):
        if smin[i] == smax[i]:
            smin[i] = smin[i] - .5
            smax[i] = smax[i] + .5

    # Create edge arrays
    for i in xrange(Ndim):
        if np.isscalar(bins[i]):
            nbin[i] = bins[i] + 2  # +2 for outlier bins
            edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
        else:
            edges[i] = np.asarray(bins[i], float)
            nbin[i] = len(edges[i]) + 1  # +1 for outlier bins
        dedges[i] = np.diff(edges[i])

    nbin = np.asarray(nbin)

    # Compute the bin number each sample falls into, in each dimension
    sampBin = [
        np.digitize(sample[:, i], edges[i])
        for i in xrange(Ndim)
    ]

    # Using `digitize`, values that fall on an edge are put in the right bin.
    # For the rightmost bin, we want values equal to the right
    # edge to be counted in the last bin, and not as an outlier.
    for i in xrange(Ndim):
        # Find the rounding precision
        decimal = int(-np.log10(dedges[i].min())) + 6
        # Find which points are on the rightmost edge.
        on_edge = np.where(np.around(sample[:, i], decimal) ==
                           np.around(edges[i][-1], decimal))[0]
        # Shift these points one bin to the left.
        sampBin[i][on_edge] -= 1

    # Compute the sample indices in the flattened statistic matrix.
    binnumbers = np.ravel_multi_index(sampBin, nbin)

    result = np.empty([Vdim, nbin.prod()], float)

    if statistic == 'mean':
        result.fill(np.nan)
        flatcount = np.bincount(binnumbers, None)
        a = flatcount.nonzero()
        for vv in xrange(Vdim):
            flatsum = np.bincount(binnumbers, values[vv])
            result[vv, a] = flatsum[a] / flatcount[a]
    elif statistic == 'std':
        result.fill(0)
        flatcount = np.bincount(binnumbers, None)
        a = flatcount.nonzero()
        for vv in xrange(Vdim):
            flatsum = np.bincount(binnumbers, values[vv])
            flatsum2 = np.bincount(binnumbers, values[vv] ** 2)
            result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] -
                                    (flatsum[a] / flatcount[a]) ** 2)
    elif statistic == 'count':
        result.fill(0)
        flatcount = np.bincount(binnumbers, None)
        a = np.arange(len(flatcount))
        result[:, a] = flatcount[np.newaxis, :]
    elif statistic == 'sum':
        result.fill(0)
        for vv in xrange(Vdim):
            flatsum = np.bincount(binnumbers, values[vv])
            a = np.arange(len(flatsum))
            result[vv, a] = flatsum
    elif statistic == 'median':
        result.fill(np.nan)
        for i in np.unique(binnumbers):
            for vv in xrange(Vdim):
                result[vv, i] = np.median(values[vv, binnumbers == i])
    elif statistic == 'min':
        result.fill(np.nan)
        for i in np.unique(binnumbers):
            for vv in xrange(Vdim):
                result[vv, i] = np.min(values[vv, binnumbers == i])
    elif statistic == 'max':
        result.fill(np.nan)
        for i in np.unique(binnumbers):
            for vv in xrange(Vdim):
                result[vv, i] = np.max(values[vv, binnumbers == i])
    elif callable(statistic):
        with np.errstate(invalid='ignore'), suppress_warnings() as sup:
            sup.filter(RuntimeWarning)
            try:
                null = statistic([])
            except Exception:
                null = np.nan
        result.fill(null)
        for i in np.unique(binnumbers):
            for vv in xrange(Vdim):
                result[vv, i] = statistic(values[vv, binnumbers == i])

    # Shape into a proper matrix
    result = result.reshape(np.append(Vdim, nbin))

    # Remove outliers (indices 0 and -1 for each bin-dimension).
    core = tuple([slice(None)] + Ndim * [slice(1, -1)])
    result = result[core]

    # Unravel binnumbers into an ndarray, each row the bins for each dimension
    if(expand_binnumbers and Ndim > 1):
        binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))

    if np.any(result.shape[1:] != nbin - 2):
        raise RuntimeError('Internal Shape Error')

    # Reshape to have output (`reulst`) match input (`values`) shape
    result = result.reshape(input_shape[:-1] + list(nbin-2))

    return BinnedStatisticddResult(result, edges, binnumbers)
    des_list.append((image_path, des))

print("Feature Extraction Finished")

descriptors = des_list[0][1]
for image_path, descriptor in des_list[1:]:
    descriptors = np.vstack((descriptors, descriptor))

# Perform k-means clustering
k = 100
voc, variance = kmeans(descriptors, k, 1)
print("Clustering K Means Finished")

# Calculate the histogram of features
im_features = np.zeros((len(image_paths), k), "float32")
for i in xrange(len(image_paths)):
    words, distance = vq(des_list[i][1], voc)
    for w in words:
        im_features[i][w] += 1
print("Histogram Finished")

# Perform Tf-Idf vectorization
nbr_occurences = np.sum((im_features > 0) * 1, axis=0)
idf = np.array(
    np.log((1.0 * len(image_paths) + 1) / (1.0 * nbr_occurences + 1)),
    'float32')
print("Vectorization Finished")

# Scaling the words
stdSlr = StandardScaler().fit(im_features)
im_features = stdSlr.transform(im_features)