Example #1
0
    def __init__(self, xi, yi, axis=0):
        _Interpolator1DWithDerivatives.__init__(self, xi, yi, axis)

        self.xi = np.asarray(xi)
        self.yi = self._reshape_yi(yi)
        self.n, self.r = self.yi.shape

        c = np.zeros((self.n+1, self.r), dtype=self.dtype)
        c[0] = self.yi[0]
        Vk = np.zeros((self.n, self.r), dtype=self.dtype)
        for k in xrange(1,self.n):
            s = 0
            while s <= k and xi[k-s] == xi[k]:
                s += 1
            s -= 1
            Vk[0] = self.yi[k]/float(factorial(s))
            for i in xrange(k-s):
                if xi[i] == xi[k]:
                    raise ValueError("Elements if `xi` can't be equal.")
                if s == 0:
                    Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
                else:
                    Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
            c[k] = Vk[k-s]
        self.c = c
Example #2
0
 def __add__(self, other):
     # First check if argument is a scalar
     if isscalarlike(other):
         new = dok_matrix(self.shape, dtype=self.dtype)
         # Add this scalar to every element.
         M, N = self.shape
         for i in xrange(M):
             for j in xrange(N):
                 aij = self.get((i, j), 0) + other
                 if aij != 0:
                     new[i, j] = aij
         # new.dtype.char = self.dtype.char
     elif isinstance(other, dok_matrix):
         if other.shape != self.shape:
             raise ValueError("matrix dimensions are not equal")
         # We could alternatively set the dimensions to the the largest of
         # the two matrices to be summed.  Would this be a good idea?
         new = dok_matrix(self.shape, dtype=self.dtype)
         new.update(self)
         for key in other.keys():
             new[key] += other[key]
     elif isspmatrix(other):
         csc = self.tocsc()
         new = csc + other
     elif isdense(other):
         new = self.todense() + other
     else:
         raise TypeError("data type not understood")
     return new
Example #3
0
 def __radd__(self, other):
     # First check if argument is a scalar
     if isscalarlike(other):
         new = dok_matrix(self.shape, dtype=self.dtype)
         # Add this scalar to every element.
         M, N = self.shape
         for i in xrange(M):
             for j in xrange(N):
                 aij = self.get((i, j), 0) + other
                 if aij != 0:
                     new[i, j] = aij
     elif isinstance(other, dok_matrix):
         if other.shape != self.shape:
             raise ValueError("matrix dimensions are not equal")
         new = dok_matrix(self.shape, dtype=self.dtype)
         new.update(self)
         for key in other:
             new[key] += other[key]
     elif isspmatrix(other):
         csc = self.tocsc()
         new = csc + other
     elif isdense(other):
         new = other + self.todense()
     else:
         raise TypeError("data type not understood")
     return new
Example #4
0
    def _evaluate_derivatives(self, x, der=None):
        n = self.n
        r = self.r

        if der is None:
            der = self.n
        pi = np.zeros((n, len(x)))
        w = np.zeros((n, len(x)))
        pi[0] = 1
        p = np.zeros((len(x), self.r))
        p += self.c[0,np.newaxis,:]

        for k in xrange(1,n):
            w[k-1] = x - self.xi[k-1]
            pi[k] = w[k-1]*pi[k-1]
            p += pi[k,:,np.newaxis]*self.c[k]

        cn = np.zeros((max(der,n+1), len(x), r), dtype=self.dtype)
        cn[:n+1,:,:] += self.c[:n+1,np.newaxis,:]
        cn[0] = p
        for k in xrange(1,n):
            for i in xrange(1,n-k+1):
                pi[i] = w[k+i-1]*pi[i-1]+pi[i]
                cn[k] = cn[k]+pi[i,:,np.newaxis]*cn[k+i]
            cn[k] *= factorial(k)

        cn[n,:,:] = 0
        return cn[:der]
Example #5
0
def lagrange(x, w):
    """
    Return a Lagrange interpolating polynomial.

    Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
    polynomial through the points ``(x, w)``.

    Warning: This implementation is numerically unstable. Do not expect to
    be able to use more than about 20 points even if they are chosen optimally.

    Parameters
    ----------
    x : array_like
        `x` represents the x-coordinates of a set of datapoints.
    w : array_like
        `w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).

    """
    M = len(x)
    p = poly1d(0.0)
    for j in xrange(M):
        pt = poly1d(w[j])
        for k in xrange(M):
            if k == j: continue
            fac = x[j]-x[k]
            pt *= poly1d([1.0,-x[k]])/fac
        p += pt
    return p
Example #6
0
    def extend(self, xi, yi, orders=None):
        """
        Extend the PiecewisePolynomial by a list of points

        Parameters
        ----------
        xi : array_like of length N1
            a sorted list of x-coordinates
        yi : list of lists of length N1
            yi[i] (if axis==0) is the list of derivatives known at xi[i]
        orders : list of integers, or integer
            a list of polynomial orders, or a single universal order
        direction : {None, 1, -1}
            indicates whether the xi are increasing or decreasing
            +1 indicates increasing
            -1 indicates decreasing
            None indicates that it should be deduced from the first two xi

        """
        if self._y_axis == 0:
            # allow yi to be a ragged list
            for i in xrange(len(xi)):
                if orders is None or _isscalar(orders):
                    self.append(xi[i],yi[i],orders)
                else:
                    self.append(xi[i],yi[i],orders[i])
        else:
            preslice = (slice(None,None,None),) * self._y_axis
            for i in xrange(len(xi)):
                if orders is None or _isscalar(orders):
                    self.append(xi[i],yi[preslice + (i,)],orders)
                else:
                    self.append(xi[i],yi[preslice + (i,)],orders[i])
 def test_cascade(self):
     for J in xrange(1, 7):
         for i in xrange(1, 5):
             lpcoef = wavelets.daub(i)
             k = len(lpcoef)
             x, phi, psi = wavelets.cascade(lpcoef, J)
             assert_(len(x) == len(phi) == len(psi))
             assert_equal(len(x), (k - 1) * 2 ** J)
Example #8
0
 def test_improvement(self):
     import time
     start = time.time()
     for i in xrange(100):
         quad(self.lib.sin, 0, 100)
     fast = time.time() - start
     start = time.time()
     for i in xrange(100):
         quad(math.sin, 0, 100)
     slow = time.time() - start
     assert_(fast < 0.5*slow, (fast, slow))
Example #9
0
    def test_nd_simplex(self):
        # simple smoke test: triangulate a n-dimensional simplex
        for nd in xrange(2, 8):
            points = np.zeros((nd+1, nd))
            for j in xrange(nd):
                points[j,j] = 1.0
            points[-1,:] = 1.0

            tri = qhull.Delaunay(points)

            tri.vertices.sort()

            assert_equal(tri.vertices, np.arange(nd+1, dtype=np.int)[None,:])
            assert_equal(tri.neighbors, -1 + np.zeros((nd+1), dtype=np.int)[None,:])
Example #10
0
    def __getitem__(self, index):
        """Return the element(s) index=(i, j), where j may be a slice.
        This always returns a copy for consistency, since slices into
        Python lists return copies.
        """
        i, j = self._unpack_index(index)

        if isscalarlike(i) and isscalarlike(j):
            return self._get1(int(i), int(j))

        i, j = self._index_to_arrays(i, j)
        if i.size == 0:
            return lil_matrix((0,0), dtype=self.dtype)
        return self.__class__([[self._get1(int(i[ii, jj]), int(j[ii, jj])) for jj in
                                xrange(i.shape[1])] for ii in 
                               xrange(i.shape[0])])
    def test_concurrent_ok(self):
        f = lambda t, y: 1.0

        for k in xrange(3):
            for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'):
                r = ode(f).set_integrator(sol)
                r.set_initial_value(0, 0)

                r2 = ode(f).set_integrator(sol)
                r2.set_initial_value(0, 0)

                r.integrate(r.t + 0.1)
                r2.integrate(r2.t + 0.1)
                r2.integrate(r2.t + 0.1)

                assert_allclose(r.y, 0.1)
                assert_allclose(r2.y, 0.2)

            for sol in ('dopri5', 'dop853'):
                r = ode(f).set_integrator(sol)
                r.set_initial_value(0, 0)

                r2 = ode(f).set_integrator(sol)
                r2.set_initial_value(0, 0)

                r.integrate(r.t + 0.1)
                r.integrate(r.t + 0.1)
                r2.integrate(r2.t + 0.1)
                r.integrate(r.t + 0.1)
                r2.integrate(r2.t + 0.1)

                assert_allclose(r.y, 0.3)
                assert_allclose(r2.y, 0.2)
Example #12
0
    def derivatives(self, x, der):
        """
        Evaluate a derivative of the piecewise polynomial

        Parameters
        ----------
        x : scalar or array_like of length N

        der : integer
            how many derivatives (including the function value as
            0th derivative) to extract

        Returns
        -------
        y : array_like of shape der by R or der by N or der by N by R

        """
        if _isscalar(x):
            pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n - 2)
            y = self.polynomials[pos].derivatives(x, der=der)
        else:
            x = np.asarray(x)
            m = len(x)
            pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n - 2)
            if self.vector_valued:
                y = np.zeros((der, m, self.r))
            else:
                y = np.zeros((der, m))
            for i in xrange(self.n - 1):
                c = pos == i
                y[:, c] = self.polynomials[i].derivatives(x[c], der=der)
        return y
Example #13
0
    def __call__(self, x):
        """Evaluate the piecewise polynomial

        Parameters
        ----------
        x : scalar or array-like of length N

        Returns
        -------
        y : scalar or array-like of length R or length N or N by R
        """
        if _isscalar(x):
            pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n - 2)
            y = self.polynomials[pos](x)
        else:
            x = np.asarray(x)
            m = len(x)
            pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n - 2)
            if self.vector_valued:
                y = np.zeros((m, self.r))
            else:
                y = np.zeros(m)
            for i in xrange(self.n - 1):
                c = pos == i
                y[c] = self.polynomials[i](x[c])
        return y
Example #14
0
 def _setitem_setrow(self, row, data, j, xrow, xdata, xcols):
     if isinstance(j, slice):
         j = self._slicetoseq(j, self.shape[1])
     if issequence(j):
         if xcols == len(j):
             for jj, xi in zip(j, xrange(xcols)):
                 pos = bisect_left(xrow, xi)
                 if pos != len(xdata) and xrow[pos] == xi:
                     self._insertat2(row, data, jj, xdata[pos])
                 else:
                     self._insertat2(row, data, jj, 0)
         elif xcols == 1:           # OK, broadcast across row
             if len(xdata) > 0 and xrow[0] == 0:
                 val = xdata[0]
             else:
                 val = 0
             for jj in j:
                 self._insertat2(row, data, jj,val)
         else:
             raise IndexError('invalid index')
     elif np.isscalar(j):
         if not xcols == 1:
             raise ValueError('array dimensions are not compatible for copy')
         if len(xdata) > 0 and xrow[0] == 0:
             self._insertat2(row, data, j, xdata[0])
         else:
             self._insertat2(row, data, j, 0)
     else:
         raise ValueError('invalid column value: %s' % str(j))
Example #15
0
    def __init__(self, xi, yi=None):
        """Construct an object capable of interpolating functions sampled at xi

        The values yi need to be provided before the function is evaluated,
        but none of the preprocessing depends on them, so rapid updates
        are possible.

        Parameters
        ----------
        xi : array-like of length N
            The x coordinates of the points the polynomial should pass through
        yi : array-like N by R or None
            The y coordinates of the points the polynomial should pass through;
            if R>1 the polynomial is vector-valued. If None the y values
            will be supplied later.
        """
        self.n = len(xi)
        self.xi = np.asarray(xi)
        if yi is not None and len(yi) != len(self.xi):
            raise ValueError("yi dimensions do not match xi dimensions")
        self.set_yi(yi)
        self.wi = np.zeros(self.n)
        self.wi[0] = 1
        for j in xrange(1, self.n):
            self.wi[:j] *= self.xi[j] - self.xi[:j]
            self.wi[j] = np.multiply.reduce(self.xi[:j] - self.xi[j])
        self.wi **= -1
Example #16
0
 def _setitem_setrow(self, row, data, j, xrow, xdata, xcols):
     if isinstance(j, slice):
         j = self._slicetoseq(j, self.shape[1])
     if issequence(j):
         if xcols == len(j):
             for jj, xi in zip(j, xrange(xcols)):
                 pos = bisect_left(xrow, xi)
                 if pos != len(xdata) and xrow[pos] == xi:
                     self._insertat2(row, data, jj, xdata[pos])
                 else:
                     self._insertat2(row, data, jj, 0)
         elif xcols == 1:  # OK, broadcast across row
             if len(xdata) > 0 and xrow[0] == 0:
                 val = xdata[0]
             else:
                 val = 0
             for jj in j:
                 self._insertat2(row, data, jj, val)
         else:
             raise IndexError('invalid index')
     elif np.isscalar(j):
         if not xcols == 1:
             raise ValueError(
                 'array dimensions are not compatible for copy')
         if len(xdata) > 0 and xrow[0] == 0:
             self._insertat2(row, data, j, xdata[0])
         else:
             self._insertat2(row, data, j, 0)
     else:
         raise ValueError('invalid column value: %s' % str(j))
Example #17
0
    def test_concurrent_ok(self):
        f = lambda t, y: 1.0

        for k in xrange(3):
            for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'):
                r = ode(f).set_integrator(sol)
                r.set_initial_value(0, 0)

                r2 = ode(f).set_integrator(sol)
                r2.set_initial_value(0, 0)

                r.integrate(r.t + 0.1)
                r2.integrate(r2.t + 0.1)
                r2.integrate(r2.t + 0.1)

                assert_allclose(r.y, 0.1)
                assert_allclose(r2.y, 0.2)

            for sol in ('dopri5', 'dop853'):
                r = ode(f).set_integrator(sol)
                r.set_initial_value(0, 0)

                r2 = ode(f).set_integrator(sol)
                r2.set_initial_value(0, 0)

                r.integrate(r.t + 0.1)
                r.integrate(r.t + 0.1)
                r2.integrate(r2.t + 0.1)
                r.integrate(r.t + 0.1)
                r2.integrate(r2.t + 0.1)

                assert_allclose(r.y, 0.3)
                assert_allclose(r2.y, 0.2)
Example #18
0
    def _get_slice(self, i, start, stop, stride, shape):
        """Returns a copy of the elements
            [i, start:stop:string] for row-oriented matrices
            [start:stop:string, i] for column-oriented matrices
        """
        if stride != 1:
            raise ValueError("slicing with step != 1 not supported")
        if stop <= start:
            raise ValueError("slice width must be >= 1")

        # TODO make [i,:] faster
        # TODO implement [i,x:y:z]

        indices = []

        for ind in xrange(self.indptr[i], self.indptr[i + 1]):
            if self.indices[ind] >= start and self.indices[ind] < stop:
                indices.append(ind)

        index = self.indices[indices] - start
        data = self.data[indices]
        indptr = np.array([0, len(indices)])
        return self.__class__((data, index, indptr),
                              shape=shape,
                              dtype=self.dtype)
 def test_derivatives(self):
     P = PiecewisePolynomial(self.xi, self.yi, 3)
     m = 4
     r = P.derivatives(self.test_xs, m)
     #print r.shape, r
     for i in xrange(m):
         assert_almost_equal(P.derivative(self.test_xs, i), r[i])
 def test_exponential(self):
     degree = 5
     p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
     for i in xrange(degree + 1):
         assert_almost_equal(p(0), 1)
         p = p.deriv()
     assert_almost_equal(p(0), 0)
Example #21
0
    def piecefuncgen(num):
        Mk = order // 2 - num
        if (Mk < 0):
            return 0  # final function is 0
        coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
                  for k in xrange(Mk + 1)]
        shifts = [-bound - k for k in xrange(Mk + 1)]
        #print "Adding piece number %d with coeffs %s and shifts %s" \
        #      % (num, str(coeffs), str(shifts))

        def thefunc(x):
            res = 0.0
            for k in range(Mk + 1):
                res += coeffs[k] * (x + shifts[k]) ** order
            return res
        return thefunc
Example #22
0
 def test_derivatives(self):
     P = PiecewisePolynomial(self.xi,self.yi,3)
     m = 4
     r = P.derivatives(self.test_xs,m)
     #print r.shape, r
     for i in xrange(m):
         assert_almost_equal(P.derivative(self.test_xs,i),r[i])
Example #23
0
 def test_call(self):
     poly = []
     for n in xrange(5):
         poly.extend([
             x.strip() for x in ("""
             orth.jacobi(%(n)d,0.3,0.9)
             orth.sh_jacobi(%(n)d,0.3,0.9)
             orth.genlaguerre(%(n)d,0.3)
             orth.laguerre(%(n)d)
             orth.hermite(%(n)d)
             orth.hermitenorm(%(n)d)
             orth.gegenbauer(%(n)d,0.3)
             orth.chebyt(%(n)d)
             orth.chebyu(%(n)d)
             orth.chebyc(%(n)d)
             orth.chebys(%(n)d)
             orth.sh_chebyt(%(n)d)
             orth.sh_chebyu(%(n)d)
             orth.legendre(%(n)d)
             orth.sh_legendre(%(n)d)
             """ % dict(n=n)).split()
         ])
     olderr = np.seterr(all='ignore')
     try:
         for pstr in poly:
             p = eval(pstr)
             assert_almost_equal(p(0.315),
                                 np.poly1d(p)(0.315),
                                 err_msg=pstr)
     finally:
         np.seterr(**olderr)
Example #24
0
 def test_exponential(self):
     degree = 5
     p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
     for i in xrange(degree+1):
         assert_almost_equal(p(0),1)
         p = p.deriv()
     assert_almost_equal(p(0),0)
Example #25
0
 def setUp(self):
     self.tck = splrep([0,1,2,3,4,5], [0,10,-1,3,7,2], s=0)
     self.test_xs = np.linspace(-1,6,100)
     self.spline_ys = splev(self.test_xs, self.tck)
     self.spline_yps = splev(self.test_xs, self.tck, der=1)
     self.xi = np.unique(self.tck[0])
     self.yi = [[splev(x, self.tck, der=j) for j in xrange(3)] for x in self.xi]
 def test_call(self):
     poly = []
     for n in xrange(5):
         poly.extend([x.strip() for x in
             ("""
             orth.jacobi(%(n)d,0.3,0.9)
             orth.sh_jacobi(%(n)d,0.3,0.9)
             orth.genlaguerre(%(n)d,0.3)
             orth.laguerre(%(n)d)
             orth.hermite(%(n)d)
             orth.hermitenorm(%(n)d)
             orth.gegenbauer(%(n)d,0.3)
             orth.chebyt(%(n)d)
             orth.chebyu(%(n)d)
             orth.chebyc(%(n)d)
             orth.chebys(%(n)d)
             orth.sh_chebyt(%(n)d)
             orth.sh_chebyu(%(n)d)
             orth.legendre(%(n)d)
             orth.sh_legendre(%(n)d)
             """ % dict(n=n)).split()
         ])
     olderr = np.seterr(all='ignore')
     try:
         for pstr in poly:
             p = eval(pstr)
             assert_almost_equal(p(0.315), np.poly1d(p)(0.315), err_msg=pstr)
     finally:
         np.seterr(**olderr)
Example #27
0
    def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
        np.random.seed(123)

        N = 7

        def rand(*a):
            q = np.random.rand(*a)
            if complex:
                q = q + 1j * np.random.rand(*a)
            return q

        def assert_close(a, b, msg):
            d = abs(a - b).max()
            f = tol + abs(b).max() * tol
            if d > f:
                raise AssertionError('%s: err %g' % (msg, d))

        self.A = rand(N, N)

        # initialize
        x0 = np.random.rand(N)
        jac = jac_cls(**kw)
        jac.setup(x0, self._func(x0), self._func)

        # check consistency
        for k in xrange(2 * N):
            v = rand(N)

            if hasattr(jac, '__array__'):
                Jd = np.array(jac)
                if hasattr(jac, 'solve'):
                    Gv = jac.solve(v)
                    Gv2 = np.linalg.solve(Jd, v)
                    assert_close(Gv, Gv2, 'solve vs array')
                if hasattr(jac, 'rsolve'):
                    Gv = jac.rsolve(v)
                    Gv2 = np.linalg.solve(Jd.T.conj(), v)
                    assert_close(Gv, Gv2, 'rsolve vs array')
                if hasattr(jac, 'matvec'):
                    Jv = jac.matvec(v)
                    Jv2 = np.dot(Jd, v)
                    assert_close(Jv, Jv2, 'dot vs array')
                if hasattr(jac, 'rmatvec'):
                    Jv = jac.rmatvec(v)
                    Jv2 = np.dot(Jd.T.conj(), v)
                    assert_close(Jv, Jv2, 'rmatvec vs array')

            if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
                Jv = jac.matvec(v)
                Jv2 = jac.solve(jac.matvec(Jv))
                assert_close(Jv, Jv2, 'dot vs solve')

            if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
                Jv = jac.rmatvec(v)
                Jv2 = jac.rmatvec(jac.rsolve(Jv))
                assert_close(Jv, Jv2, 'rmatvec vs rsolve')

            x = rand(N)
            jac.update(x, self._func(x))
Example #28
0
    def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
        np.random.seed(123)

        N = 7

        def rand(*a):
            q = np.random.rand(*a)
            if complex:
                q = q + 1j*np.random.rand(*a)
            return q

        def assert_close(a, b, msg):
            d = abs(a - b).max()
            f = tol + abs(b).max()*tol
            if d > f:
                raise AssertionError('%s: err %g' % (msg, d))

        self.A = rand(N, N)

        # initialize
        x0 = np.random.rand(N)
        jac = jac_cls(**kw)
        jac.setup(x0, self._func(x0), self._func)

        # check consistency
        for k in xrange(2*N):
            v = rand(N)

            if hasattr(jac, '__array__'):
                Jd = np.array(jac)
                if hasattr(jac, 'solve'):
                    Gv = jac.solve(v)
                    Gv2 = np.linalg.solve(Jd, v)
                    assert_close(Gv, Gv2, 'solve vs array')
                if hasattr(jac, 'rsolve'):
                    Gv = jac.rsolve(v)
                    Gv2 = np.linalg.solve(Jd.T.conj(), v)
                    assert_close(Gv, Gv2, 'rsolve vs array')
                if hasattr(jac, 'matvec'):
                    Jv = jac.matvec(v)
                    Jv2 = np.dot(Jd, v)
                    assert_close(Jv, Jv2, 'dot vs array')
                if hasattr(jac, 'rmatvec'):
                    Jv = jac.rmatvec(v)
                    Jv2 = np.dot(Jd.T.conj(), v)
                    assert_close(Jv, Jv2, 'rmatvec vs array')

            if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
                Jv = jac.matvec(v)
                Jv2 = jac.solve(jac.matvec(Jv))
                assert_close(Jv, Jv2, 'dot vs solve')

            if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
                Jv = jac.rmatvec(v)
                Jv2 = jac.rmatvec(jac.rsolve(Jv))
                assert_close(Jv, Jv2, 'rmatvec vs rsolve')

            x = rand(N)
            jac.update(x, self._func(x))
def matrixmultiply(a, b):
    if len(b.shape) == 1:
        b_is_vector = True
        b = b[:,newaxis]
    else:
        b_is_vector = False
    assert_(a.shape[1] == b.shape[0])
    c = zeros((a.shape[0], b.shape[1]), common_type(a, b))
    for i in xrange(a.shape[0]):
        for j in xrange(b.shape[1]):
            s = 0
            for k in xrange(a.shape[1]):
                s += a[i,k] * b[k, j]
            c[i,j] = s
    if b_is_vector:
        c = c.reshape((a.shape[0],))
    return c
Example #30
0
 def test_vector(self):
     xs = [0, 1, 2]
     ys = np.array([[0,1],[1,0],[2,1]])
     P = BarycentricInterpolator(xs,ys)
     Pi = [BarycentricInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])]
     test_xs = np.linspace(-1,3,100)
     assert_almost_equal(P(test_xs),
             np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))
Example #31
0
 def test_inverse(self):
     for n in xrange(1, 10):
         a = hilbert(n)
         b = invhilbert(n)
         # The Hilbert matrix is increasingly badly conditioned,
         # so take that into account in the test
         c = cond(a)
         assert_allclose(a.dot(b), eye(n), atol=1e-15*c, rtol=1e-15*c)
 def setUp(self):
     self.tck = splrep([0, 1, 2, 3, 4, 5], [0, 10, -1, 3, 7, 2], s=0)
     self.test_xs = np.linspace(-1, 6, 100)
     self.spline_ys = splev(self.test_xs, self.tck)
     self.spline_yps = splev(self.test_xs, self.tck, der=1)
     self.xi = np.unique(self.tck[0])
     self.yi = [[splev(x, self.tck, der=j) for j in xrange(3)]
                for x in self.xi]
Example #33
0
def matrixmultiply(a, b):
    if len(b.shape) == 1:
        b_is_vector = True
        b = b[:,newaxis]
    else:
        b_is_vector = False
    assert_equal(a.shape[1], b.shape[0])
    c = zeros((a.shape[0], b.shape[1]), common_type(a, b))
    for i in xrange(a.shape[0]):
        for j in xrange(b.shape[1]):
            s = 0
            for k in xrange(a.shape[1]):
                s += a[i,k] * b[k, j]
            c[i,j] = s
    if b_is_vector:
        c = c.reshape((a.shape[0],))
    return c
Example #34
0
    def __getitem__(self, index):
        """Return the element(s) index=(i, j), where j may be a slice.
        This always returns a copy for consistency, since slices into
        Python lists return copies.
        """
        i, j = self._unpack_index(index)

        if isscalarlike(i) and isscalarlike(j):
            return self._get1(int(i), int(j))

        i, j = self._index_to_arrays(i, j)
        if i.size == 0:
            return lil_matrix((0, 0), dtype=self.dtype)
        return self.__class__([[
            self._get1(int(i[ii, jj]), int(j[ii, jj]))
            for jj in xrange(i.shape[1])
        ] for ii in xrange(i.shape[0])])
 def test_inverse(self):
     for n in xrange(1, 10):
         a = hilbert(n)
         b = invhilbert(n)
         # The Hilbert matrix is increasingly badly conditioned,
         # so take that into account in the test
         c = cond(a)
         assert_allclose(a.dot(b), eye(n), atol=1e-15 * c, rtol=1e-15 * c)
def _boolrelextrema(data, comparator,
                  axis=0, order=1, mode='clip'):
    """
    Calculate the relative extrema of `data`.

    Relative extrema are calculated by finding locations where
    ``comparator(data[n], data[n+1:n+order+1])`` is True.

    Parameters
    ----------
    data : ndarray
        Array in which to find the relative extrema.
    comparator : callable
        Function to use to compare two data points.
        Should take 2 numbers as arguments.
    axis : int, optional
        Axis over which to select from `data`.  Default is 0.
    order : int, optional
        How many points on each side to use for the comparison
        to consider ``comparator(n,n+x)`` to be True.
    mode : str, optional
        How the edges of the vector are treated.  'wrap' (wrap around) or
        'clip' (treat overflow as the same as the last (or first) element).
        Default 'clip'.  See numpy.take

    Returns
    -------
    extrema : ndarray
        Indices of the extrema, as boolean array of same shape as data.
        True for an extrema, False else.

    See also
    --------
    argrelmax, argrelmin

    Examples
    --------
    >>> testdata = np.array([1,2,3,2,1])
    >>> argrelextrema(testdata, np.greater, axis=0)
    array([False, False,  True, False, False], dtype=bool)

    """
    if((int(order) != order) or (order < 1)):
        raise ValueError('Order must be an int >= 1')

    datalen = data.shape[axis]
    locs = np.arange(0, datalen)

    results = np.ones(data.shape, dtype=bool)
    main = data.take(locs, axis=axis, mode=mode)
    for shift in xrange(1, order + 1):
        plus = data.take(locs + shift, axis=axis, mode=mode)
        minus = data.take(locs - shift, axis=axis, mode=mode)
        results &= comparator(main, plus)
        results &= comparator(main, minus)
        if(~results.any()):
            return results
    return results
Example #37
0
def _boolrelextrema(data, comparator, axis=0, order=1, mode='clip'):
    """
    Calculate the relative extrema of `data`.

    Relative extrema are calculated by finding locations where
    ``comparator(data[n], data[n+1:n+order+1])`` is True.

    Parameters
    ----------
    data : ndarray
        Array in which to find the relative extrema.
    comparator : callable
        Function to use to compare two data points.
        Should take 2 numbers as arguments.
    axis : int, optional
        Axis over which to select from `data`.  Default is 0.
    order : int, optional
        How many points on each side to use for the comparison
        to consider ``comparator(n,n+x)`` to be True.
    mode : str, optional
        How the edges of the vector are treated.  'wrap' (wrap around) or
        'clip' (treat overflow as the same as the last (or first) element).
        Default 'clip'.  See numpy.take

    Returns
    -------
    extrema : ndarray
        Indices of the extrema, as boolean array of same shape as data.
        True for an extrema, False else.

    See also
    --------
    argrelmax, argrelmin

    Examples
    --------
    >>> testdata = np.array([1,2,3,2,1])
    >>> argrelextrema(testdata, np.greater, axis=0)
    array([False, False,  True, False, False], dtype=bool)

    """
    if ((int(order) != order) or (order < 1)):
        raise ValueError('Order must be an int >= 1')

    datalen = data.shape[axis]
    locs = np.arange(0, datalen)

    results = np.ones(data.shape, dtype=bool)
    main = data.take(locs, axis=axis, mode=mode)
    for shift in xrange(1, order + 1):
        plus = data.take(locs + shift, axis=axis, mode=mode)
        minus = data.take(locs - shift, axis=axis, mode=mode)
        results &= comparator(main, plus)
        results &= comparator(main, minus)
        if (~results.any()):
            return results
    return results
Example #38
0
def factorial2(n, exact=False):
    """
    Double factorial.

    This is the factorial with every second value skipped, i.e.,
    ``7!! = 7 * 5 * 3 * 1``.  It can be approximated numerically as::

      n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi)  n odd
          = 2**(n/2) * (n/2)!                           n even

    Parameters
    ----------
    n : int or array_like
        Calculate ``n!!``.  Arrays are only supported with `exact` set
        to False.  If ``n < 0``, the return value is 0.
    exact : bool, optional
        The result can be approximated rapidly using the gamma-formula
        above (default).  If `exact` is set to True, calculate the
        answer exactly using integer arithmetic.

    Returns
    -------
    nff : float or int
        Double factorial of `n`, as an int or a float depending on
        `exact`.

    Examples
    --------
    >>> factorial2(7, exact=False)
    array(105.00000000000001)
    >>> factorial2(7, exact=True)
    105L

    """
    if exact:
        if n < -1:
            return 0
        if n <= 0:
            return 1
        val = 1
        for k in xrange(n, 0, -2):
            val *= k
        return val
    else:
        from scipy import special
        n = asarray(n)
        vals = zeros(n.shape, 'd')
        cond1 = (n % 2) & (n >= -1)
        cond2 = (1 - (n % 2)) & (n >= -1)
        oddn = extract(cond1, n)
        evenn = extract(cond2, n)
        nd2o = oddn / 2.0
        nd2e = evenn / 2.0
        place(vals, cond1,
              special.gamma(nd2o + 1) / sqrt(pi) * pow(2.0, nd2o + 0.5))
        place(vals, cond2, special.gamma(nd2e + 1) * pow(2.0, nd2e))
        return vals
Example #39
0
 def fromspline(cls, xk, cvals, order, fill=0.0):
     N = len(xk)-1
     sivals = np.empty((order+1,N), dtype=float)
     for m in xrange(order,-1,-1):
         fact = spec.gamma(m+1)
         res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
         res /= fact
         sivals[order-m,:] = res
     return cls(sivals, xk, fill=fill)
Example #40
0
    def piecefuncgen(num):
        Mk = order // 2 - num
        if (Mk < 0):
            return 0  # final function is 0
        coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
                  for k in xrange(Mk + 1)]
        shifts = [-bound - k for k in xrange(Mk + 1)]

        #print "Adding piece number %d with coeffs %s and shifts %s" \
        #      % (num, str(coeffs), str(shifts))

        def thefunc(x):
            res = 0.0
            for k in range(Mk + 1):
                res += coeffs[k] * (x + shifts[k])**order
            return res

        return thefunc
Example #41
0
 def fromspline(cls, xk, cvals, order, fill=0.0):
     N = len(xk) - 1
     sivals = np.empty((order + 1, N), dtype=float)
     for m in xrange(order, -1, -1):
         fact = spec.gamma(m + 1)
         res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
         res /= fact
         sivals[order - m, :] = res
     return cls(sivals, xk, fill=fill)
Example #42
0
        def check(name, chunksize):
            points = DATASETS[name]
            ndim = points.shape[1]

            opts = None
            nmin = ndim + 2

            if name == 'some-points':
                # since Qz is not allowed, use QJ 
                opts = 'QJ Pp'
            elif name == 'pathological-1':
                # include enough points so that we get different x-coordinates
                nmin = 12

            obj = qhull.Voronoi(points[:nmin], incremental=True,
                                 qhull_options=opts)
            for j in xrange(nmin, len(points), chunksize):
                obj.add_points(points[j:j+chunksize])

            obj2 = qhull.Voronoi(points)

            obj3 = qhull.Voronoi(points[:nmin], incremental=True,
                                 qhull_options=opts)
            obj3.add_points(points[nmin:], restart=True)

            # -- Check that the incremental mode agrees with upfront mode

            # The vertices may be in different order or duplicated in
            # the incremental map
            for objx in obj, obj3:
                vertex_map = {-1: -1}
                for i, v in enumerate(objx.vertices):
                    for j, v2 in enumerate(obj2.vertices):
                        if np.allclose(v, v2):
                            vertex_map[i] = j

                def remap(x):
                    if hasattr(x, '__len__'):
                        return tuple(set([remap(y) for y in x]))
                    return vertex_map.get(x, x)

                def simplified(x):
                    items = set(map(sorted_tuple, x))
                    if () in items:
                        items.remove(())
                    items = [x for x in items if len(x) > 1]
                    items.sort()
                    return items

                assert_equal(
                    simplified(remap(objx.regions)),
                    simplified(obj2.regions)
                    )
                assert_equal(
                    simplified(remap(objx.ridge_vertices)),
                    simplified(obj2.ridge_vertices)
                    )
Example #43
0
def factorial2(n, exact=False):
    """
    Double factorial.

    This is the factorial with every second value skipped, i.e.,
    ``7!! = 7 * 5 * 3 * 1``.  It can be approximated numerically as::

      n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi)  n odd
          = 2**(n/2) * (n/2)!                           n even

    Parameters
    ----------
    n : int or array_like
        Calculate ``n!!``.  Arrays are only supported with `exact` set
        to False.  If ``n < 0``, the return value is 0.
    exact : bool, optional
        The result can be approximated rapidly using the gamma-formula
        above (default).  If `exact` is set to True, calculate the
        answer exactly using integer arithmetic.

    Returns
    -------
    nff : float or int
        Double factorial of `n`, as an int or a float depending on
        `exact`.

    Examples
    --------
    >>> factorial2(7, exact=False)
    array(105.00000000000001)
    >>> factorial2(7, exact=True)
    105L

    """
    if exact:
        if n < -1:
            return 0
        if n <= 0:
            return 1
        val = 1
        for k in xrange(n,0,-2):
            val *= k
        return val
    else:
        from scipy import special
        n = asarray(n)
        vals = zeros(n.shape,'d')
        cond1 = (n % 2) & (n >= -1)
        cond2 = (1-(n % 2)) & (n >= -1)
        oddn = extract(cond1,n)
        evenn = extract(cond2,n)
        nd2o = oddn / 2.0
        nd2e = evenn / 2.0
        place(vals,cond1,special.gamma(nd2o+1)/sqrt(pi)*pow(2.0,nd2o+0.5))
        place(vals,cond2,special.gamma(nd2e+1) * pow(2.0,nd2e))
        return vals
 def test_vector(self):
     xs = [0, 1, 2]
     ys = np.array([[0, 1], [1, 0], [2, 1]])
     P = BarycentricInterpolator(xs, ys)
     Pi = [
         BarycentricInterpolator(xs, ys[:, i]) for i in xrange(ys.shape[1])
     ]
     test_xs = np.linspace(-1, 3, 100)
     assert_almost_equal(
         P(test_xs), np.rollaxis(np.asarray([p(test_xs) for p in Pi]), -1))
Example #45
0
    def test_more_barycentric_transforms(self):
        # Triangulate some "nasty" grids

        eps = np.finfo(float).eps

        npoints = {2: 70, 3: 11, 4: 5, 5: 3}

        for ndim in xrange(2, 6):
            # Generate an uniform grid in n-d unit cube
            x = np.linspace(0, 1, npoints[ndim])
            grid = np.c_[list(map(np.ravel, np.broadcast_arrays(*np.ix_(*([x]*ndim)))))].T

            err_msg = "ndim=%d" % ndim

            # Check using regular grid
            tri = qhull.Delaunay(grid)
            self._check_barycentric_transforms(tri, err_msg=err_msg,
                                               unit_cube=True)

            # Check with eps-perturbations
            np.random.seed(1234)
            m = (np.random.rand(grid.shape[0]) < 0.2)
            grid[m,:] += 2*eps*(np.random.rand(*grid[m,:].shape) - 0.5)

            tri = qhull.Delaunay(grid)
            self._check_barycentric_transforms(tri, err_msg=err_msg,
                                               unit_cube=True,
                                               unit_cube_tol=2*eps)

            # Check with duplicated data
            tri = qhull.Delaunay(np.r_[grid, grid])
            self._check_barycentric_transforms(tri, err_msg=err_msg,
                                               unit_cube=True,
                                               unit_cube_tol=2*eps)

            # Check with larger perturbations
            np.random.seed(4321)
            m = (np.random.rand(grid.shape[0]) < 0.2)
            grid[m,:] += 1000*eps*(np.random.rand(*grid[m,:].shape) - 0.5)

            tri = qhull.Delaunay(grid)
            self._check_barycentric_transforms(tri, err_msg=err_msg,
                                               unit_cube=True,
                                               unit_cube_tol=1500*eps)

            # Check with yet larger perturbations
            np.random.seed(4321)
            m = (np.random.rand(grid.shape[0]) < 0.2)
            grid[m,:] += 1e6*eps*(np.random.rand(*grid[m,:].shape) - 0.5)

            tri = qhull.Delaunay(grid)
            self._check_barycentric_transforms(tri, err_msg=err_msg,
                                               unit_cube=True,
                                               unit_cube_tol=1e7*eps)
Example #46
0
def bessel_diff_formula(v, z, n, L, phase):
    # from AMS55.
    # L(v,z) = J(v,z), Y(v,z), H1(v,z), H2(v,z), phase = -1
    # L(v,z) = I(v,z) or exp(v*pi*i)K(v,z), phase = 1
    # For K, you can pull out the exp((v-k)*pi*i) into the caller
    p = 1.0
    s = L(v-n, z)
    for i in xrange(1, n+1):
        p = phase * (p * (n-i+1)) / i   # = choose(k, i)
        s += p*L(v-n + i*2, z)
    return s / (2.**n)
Example #47
0
def bessel_diff_formula(v, z, n, L, phase):
    # from AMS55.
    # L(v,z) = J(v,z), Y(v,z), H1(v,z), H2(v,z), phase = -1
    # L(v,z) = I(v,z) or exp(v*pi*i)K(v,z), phase = 1
    # For K, you can pull out the exp((v-k)*pi*i) into the caller
    p = 1.0
    s = L(v - n, z)
    for i in xrange(1, n + 1):
        p = phase * (p * (n - i + 1)) / i  # = choose(k, i)
        s += p * L(v - n + i * 2, z)
    return s / (2.**n)
 def test_vector(self):
     xs = [0, 1, 2]
     ys = np.array([[0, 1], [1, 0], [2, 1]])
     P = KroghInterpolator(xs, ys)
     Pi = [KroghInterpolator(xs, ys[:, i]) for i in xrange(ys.shape[1])]
     test_xs = np.linspace(-1, 3, 100)
     assert_almost_equal(
         P(test_xs), np.rollaxis(np.asarray([p(test_xs) for p in Pi]), -1))
     assert_almost_equal(
         P.derivatives(test_xs),
         np.transpose(np.asarray([p.derivatives(test_xs) for p in Pi]),
                      (1, 2, 0)))
Example #49
0
def comb(N, k, exact=0):
    """
    The number of combinations of N things taken k at a time.

    This is often expressed as "N choose k".

    Parameters
    ----------
    N : int, ndarray
        Number of things.
    k : int, ndarray
        Number of elements taken.
    exact : int, optional
        If `exact` is 0, then floating point precision is used, otherwise
        exact long integer is computed.

    Returns
    -------
    val : int, ndarray
        The total number of combinations.

    Notes
    -----
    - Array arguments accepted only for exact=0 case.
    - If k > N, N < 0, or k < 0, then a 0 is returned.

    Examples
    --------
    >>> k = np.array([3, 4])
    >>> n = np.array([10, 10])
    >>> sc.comb(n, k, exact=False)
    array([ 120.,  210.])
    >>> sc.comb(10, 3, exact=True)
    120L

    """
    if exact:
        if (k > N) or (N < 0) or (k < 0):
            return 0
        val = 1
        for j in xrange(min(k, N - k)):
            val = (val * (N - j)) // (j + 1)
        return val
    else:
        from scipy import special
        k, N = asarray(k), asarray(N)
        lgam = special.gammaln
        cond = (k <= N) & (N >= 0) & (k >= 0)
        sv = special.errprint(0)
        vals = exp(lgam(N + 1) - lgam(N - k + 1) - lgam(k + 1))
        sv = special.errprint(sv)
        return where(cond, vals, 0.0)
Example #50
0
    def matvec(self, f):
        dx = -f/self.alpha

        n = len(self.dx)
        if n == 0:
            return dx

        df_f = np.empty(n, dtype=f.dtype)
        for k in xrange(n):
            df_f[k] = vdot(self.df[k], f)

        b = np.empty((n, n), dtype=f.dtype)
        for i in xrange(n):
            for j in xrange(n):
                b[i,j] = vdot(self.df[i], self.dx[j])
                if i == j and self.w0 != 0:
                    b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
        gamma = solve(b, df_f)

        for m in xrange(n):
            dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
        return dx
Example #51
0
    def __init__(self, xi, yi=None, axis=0):
        _Interpolator1D.__init__(self, xi, yi, axis)

        self.xi = np.asarray(xi)
        self.set_yi(yi)
        self.n = len(self.xi)

        self.wi = np.zeros(self.n)
        self.wi[0] = 1
        for j in xrange(1, self.n):
            self.wi[:j] *= (self.xi[j] - self.xi[:j])
            self.wi[j] = np.multiply.reduce(self.xi[:j] - self.xi[j])
        self.wi **= -1
Example #52
0
    def solve(self, f, tol=0):
        dx = -self.alpha * f

        n = len(self.dx)
        if n == 0:
            return dx

        df_f = np.empty(n, dtype=f.dtype)
        for k in xrange(n):
            df_f[k] = vdot(self.df[k], f)

        try:
            gamma = solve(self.a, df_f)
        except LinAlgError:
            # singular; reset the Jacobian approximation
            del self.dx[:]
            del self.df[:]
            return dx

        for m in xrange(n):
            dx += gamma[m] * (self.dx[m] + self.alpha * self.df[m])
        return dx
Example #53
0
 def _evaluate(self, x):
     if _isscalar(x):
         pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n - 2)
         y = self.polynomials[pos](x)
     else:
         m = len(x)
         pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n - 2)
         y = np.zeros((m, self.r), dtype=self.dtype)
         if y.size > 0:
             for i in xrange(self.n - 1):
                 c = pos == i
                 y[c] = self.polynomials[i](x[c])
     return y
Example #54
0
    def extend(self, xi, yi, orders=None):
        """
        Extend the PiecewisePolynomial by a list of points

        Parameters
        ----------
        xi : array_like
            A sorted list of x-coordinates.
        yi : list of lists of length N1
            ``yi[i]`` (if ``axis == 0``) is the list of derivatives known
            at ``xi[i]``.
        orders : int or list of ints
            A list of polynomial orders, or a single universal order.
        direction : {None, 1, -1}
            Indicates whether the `xi` are increasing or decreasing.

                +1 indicates increasing

                -1 indicates decreasing

            None indicates that it should be deduced from the first two `xi`.

        """
        if self._y_axis == 0:
            # allow yi to be a ragged list
            for i in xrange(len(xi)):
                if orders is None or _isscalar(orders):
                    self.append(xi[i], yi[i], orders)
                else:
                    self.append(xi[i], yi[i], orders[i])
        else:
            preslice = (slice(None, None, None), ) * self._y_axis
            for i in xrange(len(xi)):
                if orders is None or _isscalar(orders):
                    self.append(xi[i], yi[preslice + (i, )], orders)
                else:
                    self.append(xi[i], yi[preslice + (i, )], orders[i])
 def vfunc(x):
     if isscalar(x):
         return func(x, *args)
     x = asarray(x)
     # call with first point to get output type
     y0 = func(x[0], *args)
     n = len(x)
     if hasattr(y0, 'dtype'):
         output = empty((n,), dtype=y0.dtype)
     else:
         output = empty((n,), dtype=type(y0))
     output[0] = y0
     for i in xrange(1, n):
         output[i] = func(x[i], *args)
     return output
Example #56
0
    def _update(self, x, f, dx, df, dx_norm, df_norm):
        if self.M == 0:
            return

        self.dx.append(dx)
        self.df.append(df)

        while len(self.dx) > self.M:
            self.dx.pop(0)
            self.df.pop(0)

        n = len(self.dx)
        a = np.zeros((n, n), dtype=f.dtype)

        for i in xrange(n):
            for j in xrange(i, n):
                if i == j:
                    wd = self.w0**2
                else:
                    wd = 0
                a[i, j] = (1 + wd) * vdot(self.df[i], self.df[j])

        a += np.triu(a, 1).T.conj()
        self.a = a
 def test_vector(self):
     xs = [0, 1, 2]
     ys = [[[0, 1]], [[1, 0], [-1, -1]], [[2, 1]]]
     P = PiecewisePolynomial(xs, ys)
     Pi = [
         PiecewisePolynomial(xs, [[yd[i] for yd in y] for y in ys])
         for i in xrange(len(ys[0][0]))
     ]
     test_xs = np.linspace(-1, 3, 100)
     assert_almost_equal(
         P(test_xs), np.rollaxis(np.asarray([p(test_xs) for p in Pi]), -1))
     assert_almost_equal(
         P.derivative(test_xs, 1),
         np.transpose(np.asarray([p.derivative(test_xs, 1) for p in Pi]),
                      (1, 0)))