Exemplo n.º 1
0
 def __radd__(self, other):
     # First check if argument is a scalar
     if isscalarlike(other):
         new = dok_matrix(self.shape, dtype=self.dtype)
         # Add this scalar to every element.
         M, N = self.shape
         for i in xrange(M):
             for j in xrange(N):
                 aij = self.get((i, j), 0) + other
                 if aij != 0:
                     new[i, j] = aij
     elif isinstance(other, dok_matrix):
         if other.shape != self.shape:
             raise ValueError("matrix dimensions are not equal")
         new = dok_matrix(self.shape, dtype=self.dtype)
         new.update(self)
         for key in other:
             new[key] += other[key]
     elif isspmatrix(other):
         csc = self.tocsc()
         new = csc + other
     elif isdense(other):
         new = other + self.todense()
     else:
         return NotImplemented
     return new
Exemplo n.º 2
0
 def _setdiag(self, values, k):
     M, N = self.shape
     if k < 0:
         if values.ndim == 0:
             # broadcast
             max_index = min(M + k, N)
             for i in xrange(max_index):
                 self[i - k, i] = values
         else:
             max_index = min(M + k, N, len(values))
             if max_index <= 0:
                 return
             for i, v in enumerate(values[:max_index]):
                 self[i - k, i] = v
     else:
         if values.ndim == 0:
             # broadcast
             max_index = min(M, N - k)
             for i in xrange(max_index):
                 self[i, i + k] = values
         else:
             max_index = min(M, N - k, len(values))
             if max_index <= 0:
                 return
             for i, v in enumerate(values[:max_index]):
                 self[i, i + k] = v
Exemplo n.º 3
0
def lagrange(x, w):
    """
    Return a Lagrange interpolating polynomial.

    Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
    polynomial through the points ``(x, w)``.

    Warning: This implementation is numerically unstable. Do not expect to
    be able to use more than about 20 points even if they are chosen optimally.

    Parameters
    ----------
    x : array_like
        `x` represents the x-coordinates of a set of datapoints.
    w : array_like
        `w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).

    Returns
    -------
    lagrange : numpy.poly1d instance
        The Lagrange interpolating polynomial.

    """
    M = len(x)
    p = poly1d(0.0)
    for j in xrange(M):
        pt = poly1d(w[j])
        for k in xrange(M):
            if k == j:
                continue
            fac = x[j] - x[k]
            pt *= poly1d([1.0, -x[k]]) / fac
        p += pt
    return p
Exemplo n.º 4
0
    def _evaluate_derivatives(self, x, der=None):
        n = self.n
        r = self.r

        if der is None:
            der = self.n
        pi = np.zeros((n, len(x)))
        w = np.zeros((n, len(x)))
        pi[0] = 1
        p = np.zeros((len(x), self.r))
        p += self.c[0,np.newaxis,:]

        for k in xrange(1,n):
            w[k-1] = x - self.xi[k-1]
            pi[k] = w[k-1]*pi[k-1]
            p += pi[k,:,np.newaxis]*self.c[k]

        cn = np.zeros((max(der,n+1), len(x), r), dtype=self.dtype)
        cn[:n+1,:,:] += self.c[:n+1,np.newaxis,:]
        cn[0] = p
        for k in xrange(1,n):
            for i in xrange(1,n-k+1):
                pi[i] = w[k+i-1]*pi[i-1]+pi[i]
                cn[k] = cn[k]+pi[i,:,np.newaxis]*cn[k+i]
            cn[k] *= factorial(k)

        cn[n,:,:] = 0
        return cn[:der]
Exemplo n.º 5
0
 def _setdiag(self, values, k):
     M, N = self.shape
     if k < 0:
         if values.ndim == 0:
             # broadcast
             max_index = min(M+k, N)
             for i in xrange(max_index):
                 self[i - k, i] = values
         else:
             max_index = min(M+k, N, len(values))
             if max_index <= 0:
                 return
             for i,v in enumerate(values[:max_index]):
                 self[i - k, i] = v
     else:
         if values.ndim == 0:
             # broadcast
             max_index = min(M, N-k)
             for i in xrange(max_index):
                 self[i, i + k] = values
         else:
             max_index = min(M, N-k, len(values))
             if max_index <= 0:
                 return
             for i,v in enumerate(values[:max_index]):
                 self[i, i + k] = v
Exemplo n.º 6
0
    def __init__(self, xi, yi, axis=0):
        _Interpolator1DWithDerivatives.__init__(self, xi, yi, axis)

        self.xi = np.asarray(xi)
        self.yi = self._reshape_yi(yi)
        self.n, self.r = self.yi.shape

        c = np.zeros((self.n + 1, self.r), dtype=self.dtype)
        c[0] = self.yi[0]
        Vk = np.zeros((self.n, self.r), dtype=self.dtype)
        for k in xrange(1, self.n):
            s = 0
            while s <= k and xi[k - s] == xi[k]:
                s += 1
            s -= 1
            Vk[0] = self.yi[k] / float(factorial(s))
            for i in xrange(k - s):
                if xi[i] == xi[k]:
                    raise ValueError("Elements if `xi` can't be equal.")
                if s == 0:
                    Vk[i + 1] = (c[i] - Vk[i]) / (xi[i] - xi[k])
                else:
                    Vk[i + 1] = (Vk[i + 1] - Vk[i]) / (xi[i] - xi[k])
            c[k] = Vk[k - s]
        self.c = c
Exemplo n.º 7
0
 def __radd__(self, other):
     # First check if argument is a scalar
     if isscalarlike(other):
         new = dok_matrix(self.shape, dtype=self.dtype)
         # Add this scalar to every element.
         M, N = self.shape
         for i in xrange(M):
             for j in xrange(N):
                 aij = self.get((i, j), 0) + other
                 if aij != 0:
                     new[i, j] = aij
     elif isinstance(other, dok_matrix):
         if other.shape != self.shape:
             raise ValueError("matrix dimensions are not equal")
         new = dok_matrix(self.shape, dtype=self.dtype)
         new.update(self)
         for key in other:
             new[key] += other[key]
     elif isspmatrix(other):
         csc = self.tocsc()
         new = csc + other
     elif isdense(other):
         new = other + self.todense()
     else:
         return NotImplemented
     return new
Exemplo n.º 8
0
    def _evaluate_derivatives(self, x, der=None):
        n = self.n
        r = self.r

        if der is None:
            der = self.n
        pi = np.zeros((n, len(x)))
        w = np.zeros((n, len(x)))
        pi[0] = 1
        p = np.zeros((len(x), self.r))
        p += self.c[0, np.newaxis, :]

        for k in xrange(1, n):
            w[k - 1] = x - self.xi[k - 1]
            pi[k] = w[k - 1] * pi[k - 1]
            p += pi[k, :, np.newaxis] * self.c[k]

        cn = np.zeros((max(der, n + 1), len(x), r), dtype=self.dtype)
        cn[:n + 1, :, :] += self.c[:n + 1, np.newaxis, :]
        cn[0] = p
        for k in xrange(1, n):
            for i in xrange(1, n - k + 1):
                pi[i] = w[k + i - 1] * pi[i - 1] + pi[i]
                cn[k] = cn[k] + pi[i, :, np.newaxis] * cn[k + i]
            cn[k] *= factorial(k)

        cn[n, :, :] = 0
        return cn[:der]
Exemplo n.º 9
0
 def __add__(self, other):
     # First check if argument is a scalar
     if isscalarlike(other):
         res_dtype = upcast_scalar(self.dtype, other)
         new = dok_matrix(self.shape, dtype=res_dtype)
         # Add this scalar to every element.
         M, N = self.shape
         for i in xrange(M):
             for j in xrange(N):
                 aij = self.get((i, j), 0) + other
                 if aij != 0:
                     new[i, j] = aij
         # new.dtype.char = self.dtype.char
     elif isinstance(other, dok_matrix):
         if other.shape != self.shape:
             raise ValueError("matrix dimensions are not equal")
         # We could alternatively set the dimensions to the largest of
         # the two matrices to be summed.  Would this be a good idea?
         res_dtype = upcast(self.dtype, other.dtype)
         new = dok_matrix(self.shape, dtype=res_dtype)
         new.update(self)
         for key in other.keys():
             new[key] += other[key]
     elif isspmatrix(other):
         csc = self.tocsc()
         new = csc + other
     elif isdense(other):
         new = self.todense() + other
     else:
         return NotImplemented
     return new
Exemplo n.º 10
0
 def __add__(self, other):
     # First check if argument is a scalar
     if isscalarlike(other):
         res_dtype = upcast_scalar(self.dtype, other)
         new = dok_matrix(self.shape, dtype=res_dtype)
         # Add this scalar to every element.
         M, N = self.shape
         for i in xrange(M):
             for j in xrange(N):
                 aij = self.get((i, j), 0) + other
                 if aij != 0:
                     new[i, j] = aij
         # new.dtype.char = self.dtype.char
     elif isinstance(other, dok_matrix):
         if other.shape != self.shape:
             raise ValueError("matrix dimensions are not equal")
         # We could alternatively set the dimensions to the largest of
         # the two matrices to be summed.  Would this be a good idea?
         res_dtype = upcast(self.dtype, other.dtype)
         new = dok_matrix(self.shape, dtype=res_dtype)
         new.update(self)
         for key in other.keys():
             new[key] += other[key]
     elif isspmatrix(other):
         csc = self.tocsc()
         new = csc + other
     elif isdense(other):
         new = self.todense() + other
     else:
         return NotImplemented
     return new
Exemplo n.º 11
0
    def __init__(self, xi, yi, axis=0):
        _Interpolator1DWithDerivatives.__init__(self, xi, yi, axis)

        self.xi = np.asarray(xi)
        self.yi = self._reshape_yi(yi)
        self.n, self.r = self.yi.shape

        c = np.zeros((self.n+1, self.r), dtype=self.dtype)
        c[0] = self.yi[0]
        Vk = np.zeros((self.n, self.r), dtype=self.dtype)
        for k in xrange(1,self.n):
            s = 0
            while s <= k and xi[k-s] == xi[k]:
                s += 1
            s -= 1
            Vk[0] = self.yi[k]/float(factorial(s))
            for i in xrange(k-s):
                if xi[i] == xi[k]:
                    raise ValueError("Elements if `xi` can't be equal.")
                if s == 0:
                    Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
                else:
                    Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
            c[k] = Vk[k-s]
        self.c = c
Exemplo n.º 12
0
def lagrange(x, w):
    """
    Return a Lagrange interpolating polynomial.

    Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
    polynomial through the points ``(x, w)``.

    Warning: This implementation is numerically unstable. Do not expect to
    be able to use more than about 20 points even if they are chosen optimally.

    Parameters
    ----------
    x : array_like
        `x` represents the x-coordinates of a set of datapoints.
    w : array_like
        `w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).

    Returns
    -------
    lagrange : numpy.poly1d instance
        The Lagrange interpolating polynomial.

    """
    M = len(x)
    p = poly1d(0.0)
    for j in xrange(M):
        pt = poly1d(w[j])
        for k in xrange(M):
            if k == j:
                continue
            fac = x[j]-x[k]
            pt *= poly1d([1.0, -x[k]])/fac
        p += pt
    return p
Exemplo n.º 13
0
 def test_cascade(self):
     for J in xrange(1, 7):
         for i in xrange(1, 5):
             lpcoef = wavelets.daub(i)
             k = len(lpcoef)
             x, phi, psi = wavelets.cascade(lpcoef, J)
             assert_(len(x) == len(phi) == len(psi))
             assert_equal(len(x), (k - 1) * 2 ** J)
Exemplo n.º 14
0
 def test_cascade(self):
     for J in xrange(1, 7):
         for i in xrange(1, 5):
             lpcoef = wavelets.daub(i)
             k = len(lpcoef)
             x, phi, psi = wavelets.cascade(lpcoef, J)
             assert_(len(x) == len(phi) == len(psi))
             assert_equal(len(x), (k - 1) * 2 ** J)
Exemplo n.º 15
0
 def test_improvement(self):
     import time
     start = time.time()
     for i in xrange(100):
         quad(self.lib.sin, 0, 100)
     fast = time.time() - start
     start = time.time()
     for i in xrange(100):
         quad(math.sin, 0, 100)
     slow = time.time() - start
     assert_(fast < 0.5 * slow, (fast, slow))
Exemplo n.º 16
0
 def test_correspond_2_and_up(self):
     # Tests correspond(Z, y) on linkage and CDMs over observation sets of
     # different sizes.
     for i in xrange(2, 4):
         y = np.random.rand(i * (i - 1) // 2)
         Z = linkage(y)
         self.assertTrue(correspond(Z, y))
     for i in xrange(4, 15, 3):
         y = np.random.rand(i * (i - 1) // 2)
         Z = linkage(y)
         self.assertTrue(correspond(Z, y))
Exemplo n.º 17
0
 def test_improvement(self):
     import time
     start = time.time()
     for i in xrange(100):
         quad(self.lib.sin, 0, 100)
     fast = time.time() - start
     start = time.time()
     for i in xrange(100):
         quad(math.sin, 0, 100)
     slow = time.time() - start
     assert_(fast < 0.5*slow, (fast, slow))
Exemplo n.º 18
0
 def test_correspond_2_and_up(self):
     # Tests correspond(Z, y) on linkage and CDMs over observation sets of
     # different sizes.
     for i in xrange(2, 4):
         y = np.random.rand(i*(i-1)//2)
         Z = linkage(y)
         self.assertTrue(correspond(Z, y))
     for i in xrange(4, 15, 3):
         y = np.random.rand(i*(i-1)//2)
         Z = linkage(y)
         self.assertTrue(correspond(Z, y))
Exemplo n.º 19
0
    def piecefuncgen(num):
        Mk = order // 2 - num
        if (Mk < 0):
            return 0  # final function is 0
        coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
                  for k in xrange(Mk + 1)]
        shifts = [-bound - k for k in xrange(Mk + 1)]

        def thefunc(x):
            res = 0.0
            for k in range(Mk + 1):
                res += coeffs[k] * (x + shifts[k]) ** order
            return res
        return thefunc
Exemplo n.º 20
0
    def test_nd_simplex(self):
        # simple smoke test: triangulate a n-dimensional simplex
        for nd in xrange(2, 8):
            points = np.zeros((nd+1, nd))
            for j in xrange(nd):
                points[j,j] = 1.0
            points[-1,:] = 1.0

            tri = qhull.Delaunay(points)

            tri.vertices.sort()

            assert_equal(tri.vertices, np.arange(nd+1, dtype=np.int)[None,:])
            assert_equal(tri.neighbors, -1 + np.zeros((nd+1), dtype=np.int)[None,:])
Exemplo n.º 21
0
    def test_nd_simplex(self):
        # simple smoke test: triangulate a n-dimensional simplex
        for nd in xrange(2, 8):
            points = np.zeros((nd+1, nd))
            for j in xrange(nd):
                points[j,j] = 1.0
            points[-1,:] = 1.0

            tri = qhull.Delaunay(points)

            tri.vertices.sort()

            assert_equal(tri.vertices, np.arange(nd+1, dtype=np.int)[None,:])
            assert_equal(tri.neighbors, -1 + np.zeros((nd+1), dtype=np.int)[None,:])
Exemplo n.º 22
0
 def test_improvement(self):
     def myfunc(x):           # Euler's constant integrand
         return -exp(-x)*log(x)
     import time
     start = time.time()
     for i in xrange(20):
         quad(self.lib._multivariate_indefinite, 0, 100)
     fast = time.time() - start
     start = time.time()
     for i in xrange(20):
         quad(myfunc, 0, 100)
     slow = time.time() - start
     # 2+ times faster speeds generated by nontrivial ctypes
     # function (single variable)
     assert_(fast < 0.5*slow, (fast, slow))
Exemplo n.º 23
0
 def test_num_obs_linkage_multi_matrix(self):
     # Tests num_obs_linkage with observation matrices of multiple sizes.
     for n in xrange(2, 10):
         X = np.random.rand(n, 4)
         Y = pdist(X)
         Z = linkage(Y)
         self.assertTrue(num_obs_linkage(Z) == n)
Exemplo n.º 24
0
 def test_call(self):
     poly = []
     for n in xrange(5):
         poly.extend([
             x.strip() for x in ("""
             orth.jacobi(%(n)d,0.3,0.9)
             orth.sh_jacobi(%(n)d,0.3,0.9)
             orth.genlaguerre(%(n)d,0.3)
             orth.laguerre(%(n)d)
             orth.hermite(%(n)d)
             orth.hermitenorm(%(n)d)
             orth.gegenbauer(%(n)d,0.3)
             orth.chebyt(%(n)d)
             orth.chebyu(%(n)d)
             orth.chebyc(%(n)d)
             orth.chebys(%(n)d)
             orth.sh_chebyt(%(n)d)
             orth.sh_chebyu(%(n)d)
             orth.legendre(%(n)d)
             orth.sh_legendre(%(n)d)
             """ % dict(n=n)).split()
         ])
     olderr = np.seterr(all='ignore')
     try:
         for pstr in poly:
             p = eval(pstr)
             assert_almost_equal(p(0.315),
                                 np.poly1d(p)(0.315),
                                 err_msg=pstr)
     finally:
         np.seterr(**olderr)
Exemplo n.º 25
0
 def test_num_obs_linkage_4_and_up(self):
     # Tests num_obs_linkage(Z) on linkage on observation sets between sizes
     # 4 and 15 (step size 3).
     for i in xrange(4, 15, 3):
         y = np.random.rand(i*(i-1)//2)
         Z = linkage(y)
         self.assertTrue(num_obs_linkage(Z) == i)
Exemplo n.º 26
0
 def setUp(self):
     self.tck = splrep([0,1,2,3,4,5], [0,10,-1,3,7,2], s=0)
     self.test_xs = np.linspace(-1,6,100)
     self.spline_ys = splev(self.test_xs, self.tck)
     self.spline_yps = splev(self.test_xs, self.tck, der=1)
     self.xi = np.unique(self.tck[0])
     self.yi = [[splev(x, self.tck, der=j) for j in xrange(3)] for x in self.xi]
Exemplo n.º 27
0
 def test_is_valid_linkage_4_and_up(self):
     # Tests is_valid_linkage(Z) on linkage on observation sets between
     # sizes 4 and 15 (step size 3).
     for i in xrange(4, 15, 3):
         y = np.random.rand(i*(i-1)//2)
         Z = linkage(y)
         assert_(is_valid_linkage(Z) == True)
Exemplo n.º 28
0
 def test_derivatives(self):
     P = PiecewisePolynomial(self.xi,self.yi,3)
     m = 4
     r = P.derivatives(self.test_xs,m)
     #print r.shape, r
     for i in xrange(m):
         assert_almost_equal(P.derivative(self.test_xs,i),r[i])
Exemplo n.º 29
0
    def piecefuncgen(num):
        Mk = order // 2 - num
        if (Mk < 0):
            return 0  # final function is 0
        coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
                  for k in xrange(Mk + 1)]
        shifts = [-bound - k for k in xrange(Mk + 1)]
        #print "Adding piece number %d with coeffs %s and shifts %s" \
        #      % (num, str(coeffs), str(shifts))

        def thefunc(x):
            res = 0.0
            for k in range(Mk + 1):
                res += coeffs[k] * (x + shifts[k]) ** order
            return res
        return thefunc
Exemplo n.º 30
0
 def test_num_obs_linkage_multi_matrix(self):
     # Tests num_obs_linkage with observation matrices of multiple sizes.
     for n in xrange(2, 10):
         X = np.random.rand(n, 4)
         Y = pdist(X)
         Z = linkage(Y)
         self.assertTrue(num_obs_linkage(Z) == n)
Exemplo n.º 31
0
    def test_concurrent_ok(self):
        f = lambda t, y: 1.0

        for k in xrange(3):
            for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'):
                r = ode(f).set_integrator(sol)
                r.set_initial_value(0, 0)

                r2 = ode(f).set_integrator(sol)
                r2.set_initial_value(0, 0)

                r.integrate(r.t + 0.1)
                r2.integrate(r2.t + 0.1)
                r2.integrate(r2.t + 0.1)

                assert_allclose(r.y, 0.1)
                assert_allclose(r2.y, 0.2)

            for sol in ('dopri5', 'dop853'):
                r = ode(f).set_integrator(sol)
                r.set_initial_value(0, 0)

                r2 = ode(f).set_integrator(sol)
                r2.set_initial_value(0, 0)

                r.integrate(r.t + 0.1)
                r.integrate(r.t + 0.1)
                r2.integrate(r2.t + 0.1)
                r.integrate(r.t + 0.1)
                r2.integrate(r2.t + 0.1)

                assert_allclose(r.y, 0.3)
                assert_allclose(r2.y, 0.2)
Exemplo n.º 32
0
 def test_num_obs_linkage_4_and_up(self):
     # Tests num_obs_linkage(Z) on linkage on observation sets between sizes
     # 4 and 15 (step size 3).
     for i in xrange(4, 15, 3):
         y = np.random.rand(i * (i - 1) // 2)
         Z = linkage(y)
         self.assertTrue(num_obs_linkage(Z) == i)
Exemplo n.º 33
0
 def test_is_valid_linkage_4_and_up(self):
     # Tests is_valid_linkage(Z) on linkage on observation sets between
     # sizes 4 and 15 (step size 3).
     for i in xrange(4, 15, 3):
         y = np.random.rand(i * (i - 1) // 2)
         Z = linkage(y)
         assert_(is_valid_linkage(Z) == True)
Exemplo n.º 34
0
 def test_exponential(self):
     degree = 5
     p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
     for i in xrange(degree + 1):
         assert_almost_equal(p(0), 1)
         p = p.deriv()
     assert_almost_equal(p(0), 0)
Exemplo n.º 35
0
    def _get_slice(self, i, start, stop, stride, shape):
        """Returns a copy of the elements
            [i, start:stop:string] for row-oriented matrices
            [start:stop:string, i] for column-oriented matrices
        """
        if stride != 1:
            raise ValueError("slicing with step != 1 not supported")
        if stop <= start:
            raise ValueError("slice width must be >= 1")

        # TODO make [i,:] faster
        # TODO implement [i,x:y:z]

        indices = []

        for ind in xrange(self.indptr[i], self.indptr[i + 1]):
            if self.indices[ind] >= start and self.indices[ind] < stop:
                indices.append(ind)

        index = self.indices[indices] - start
        data = self.data[indices]
        indptr = np.array([0, len(indices)])
        return self.__class__((data, index, indptr),
                              shape=shape,
                              dtype=self.dtype)
Exemplo n.º 36
0
    def piecefuncgen(num):
        Mk = order // 2 - num
        if (Mk < 0):
            return 0  # final function is 0
        coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
                  for k in xrange(Mk + 1)]
        shifts = [-bound - k for k in xrange(Mk + 1)]
        #print "Adding piece number %d with coeffs %s and shifts %s" \
        #      % (num, str(coeffs), str(shifts))

        def thefunc(x):
            res = 0.0
            for k in range(Mk + 1):
                res += coeffs[k] * (x + shifts[k]) ** order
            return res
        return thefunc
Exemplo n.º 37
0
 def test_exponential(self):
     degree = 5
     p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
     for i in xrange(degree+1):
         assert_almost_equal(p(0),1)
         p = p.deriv()
     assert_almost_equal(p(0),0)
Exemplo n.º 38
0
 def setUp(self):
     self.tck = splrep([0,1,2,3,4,5], [0,10,-1,3,7,2], s=0)
     self.test_xs = np.linspace(-1,6,100)
     self.spline_ys = splev(self.test_xs, self.tck)
     self.spline_yps = splev(self.test_xs, self.tck, der=1)
     self.xi = np.unique(self.tck[0])
     self.yi = [[splev(x, self.tck, der=j) for j in xrange(3)] for x in self.xi]
Exemplo n.º 39
0
    def test_improvement(self):
        def myfunc(x):  # Euler's constant integrand
            return -exp(-x) * log(x)

        import time
        start = time.time()
        for i in xrange(20):
            quad(self.lib._multivariate_indefinite, 0, 100)
        fast = time.time() - start
        start = time.time()
        for i in xrange(20):
            quad(myfunc, 0, 100)
        slow = time.time() - start
        # 2+ times faster speeds generated by nontrivial ctypes
        # function (single variable)
        assert_(fast < 0.5 * slow, (fast, slow))
Exemplo n.º 40
0
 def test_call(self):
     poly = []
     for n in xrange(5):
         poly.extend([x.strip() for x in
             ("""
             orth.jacobi(%(n)d,0.3,0.9)
             orth.sh_jacobi(%(n)d,0.3,0.9)
             orth.genlaguerre(%(n)d,0.3)
             orth.laguerre(%(n)d)
             orth.hermite(%(n)d)
             orth.hermitenorm(%(n)d)
             orth.gegenbauer(%(n)d,0.3)
             orth.chebyt(%(n)d)
             orth.chebyu(%(n)d)
             orth.chebyc(%(n)d)
             orth.chebys(%(n)d)
             orth.sh_chebyt(%(n)d)
             orth.sh_chebyu(%(n)d)
             orth.legendre(%(n)d)
             orth.sh_legendre(%(n)d)
             """ % dict(n=n)).split()
         ])
     olderr = np.seterr(all='ignore')
     try:
         for pstr in poly:
             p = eval(pstr)
             assert_almost_equal(p(0.315), np.poly1d(p)(0.315), err_msg=pstr)
     finally:
         np.seterr(**olderr)
Exemplo n.º 41
0
 def test_derivatives(self):
     P = PiecewisePolynomial(self.xi,self.yi,3)
     m = 4
     r = P.derivatives(self.test_xs,m)
     #print r.shape, r
     for i in xrange(m):
         assert_almost_equal(P.derivative(self.test_xs,i),r[i])
Exemplo n.º 42
0
    def test_concurrent_ok(self):
        f = lambda t, y: 1.0

        for k in xrange(3):
            for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'):
                r = ode(f).set_integrator(sol)
                r.set_initial_value(0, 0)

                r2 = ode(f).set_integrator(sol)
                r2.set_initial_value(0, 0)

                r.integrate(r.t + 0.1)
                r2.integrate(r2.t + 0.1)
                r2.integrate(r2.t + 0.1)

                assert_allclose(r.y, 0.1)
                assert_allclose(r2.y, 0.2)

            for sol in ('dopri5', 'dop853'):
                r = ode(f).set_integrator(sol)
                r.set_initial_value(0, 0)

                r2 = ode(f).set_integrator(sol)
                r2.set_initial_value(0, 0)

                r.integrate(r.t + 0.1)
                r.integrate(r.t + 0.1)
                r2.integrate(r2.t + 0.1)
                r.integrate(r.t + 0.1)
                r2.integrate(r2.t + 0.1)

                assert_allclose(r.y, 0.3)
                assert_allclose(r2.y, 0.2)
Exemplo n.º 43
0
Arquivo: base.py Projeto: AI-Org/scipy
    def setdiag(self, values, k=0):
        """
        Set diagonal or off-diagonal elements of the array.

        Parameters
        ----------
        values : array_like
            New values of the diagonal elements.

            Values may have any length.  If the diagonal is longer than values,
            then the remaining diagonal entries will not be set.  If values if
            longer than the diagonal, then the remaining values are ignored.

            If a scalar value is given, all of the diagonal is set to it.

        k : int, optional
            Which off-diagonal to set, corresponding to elements a[i,i+k].
            Default: 0 (the main diagonal).

        """
        M, N = self.shape
        if (k > 0 and k >= N) or (k < 0 and -k >= M):
            raise ValueError("k exceeds matrix dimensions")
        if k < 0:
            if np.asarray(values).ndim == 0:
                # broadcast
                max_index = min(M+k, N)
                for i in xrange(max_index):
                    self[i - k, i] = values
            else:
                max_index = min(M+k, N, len(values))
                if max_index <= 0:
                    return
                for i,v in enumerate(values[:max_index]):
                    self[i - k, i] = v
        else:
            if np.asarray(values).ndim == 0:
                # broadcast
                max_index = min(M, N-k)
                for i in xrange(max_index):
                    self[i, i + k] = values
            else:
                max_index = min(M, N-k, len(values))
                if max_index <= 0:
                    return
                for i,v in enumerate(values[:max_index]):
                    self[i, i + k] = v
Exemplo n.º 44
0
Arquivo: base.py Projeto: yotam/scipy
    def setdiag(self, values, k=0):
        """
        Set diagonal or off-diagonal elements of the array.

        Parameters
        ----------
        values : array_like
            New values of the diagonal elements.

            Values may have any length.  If the diagonal is longer than values,
            then the remaining diagonal entries will not be set.  If values if
            longer than the diagonal, then the remaining values are ignored.

            If a scalar value is given, all of the diagonal is set to it.

        k : int, optional
            Which off-diagonal to set, corresponding to elements a[i,i+k].
            Default: 0 (the main diagonal).

        """
        M, N = self.shape
        if (k > 0 and k >= N) or (k < 0 and -k >= M):
            raise ValueError("k exceeds matrix dimensions")
        if k < 0:
            if np.asarray(values).ndim == 0:
                # broadcast
                max_index = min(M + k, N)
                for i in xrange(max_index):
                    self[i - k, i] = values
            else:
                max_index = min(M + k, N, len(values))
                if max_index <= 0:
                    return
                for i, v in enumerate(values[:max_index]):
                    self[i - k, i] = v
        else:
            if np.asarray(values).ndim == 0:
                # broadcast
                max_index = min(M, N - k)
                for i in xrange(max_index):
                    self[i, i + k] = values
            else:
                max_index = min(M, N - k, len(values))
                if max_index <= 0:
                    return
                for i, v in enumerate(values[:max_index]):
                    self[i, i + k] = v
Exemplo n.º 45
0
def comb(N,k,exact=False,repetition=False):
    """
    The number of combinations of N things taken k at a time.

    This is often expressed as "N choose k".

    Parameters
    ----------
    N : int, ndarray
        Number of things.
    k : int, ndarray
        Number of elements taken.
    exact : bool, optional
        If `exact` is False, then floating point precision is used, otherwise
        exact long integer is computed.
    repetition : bool, optional
        If `repetition` is True, then the number of combinations with
        repetition is computed.

    Returns
    -------
    val : int, ndarray
        The total number of combinations.

    Notes
    -----
    - Array arguments accepted only for exact=False case.
    - If k > N, N < 0, or k < 0, then a 0 is returned.

    Examples
    --------
    >>> k = np.array([3, 4])
    >>> n = np.array([10, 10])
    >>> sc.comb(n, k, exact=False)
    array([ 120.,  210.])
    >>> sc.comb(10, 3, exact=True)
    120L
    >>> sc.comb(10, 3, exact=True, repetition=True)
    220L

    """
    if repetition:
        return comb(N + k - 1, k, exact)
    if exact:
        if (k > N) or (N < 0) or (k < 0):
            return 0
        val = 1
        for j in xrange(min(k, N-k)):
            val = (val*(N-j))//(j+1)
        return val
    else:
        k,N = asarray(k), asarray(N)
        cond = (k <= N) & (N >= 0) & (k >= 0)
        vals = binom(N, k)
        if isinstance(vals, np.ndarray):
            vals[~cond] = 0
        elif not cond:
            vals = np.float64(0)
        return vals
Exemplo n.º 46
0
    def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
        np.random.seed(123)

        N = 7

        def rand(*a):
            q = np.random.rand(*a)
            if complex:
                q = q + 1j * np.random.rand(*a)
            return q

        def assert_close(a, b, msg):
            d = abs(a - b).max()
            f = tol + abs(b).max() * tol
            if d > f:
                raise AssertionError('%s: err %g' % (msg, d))

        self.A = rand(N, N)

        # initialize
        x0 = np.random.rand(N)
        jac = jac_cls(**kw)
        jac.setup(x0, self._func(x0), self._func)

        # check consistency
        for k in xrange(2 * N):
            v = rand(N)

            if hasattr(jac, '__array__'):
                Jd = np.array(jac)
                if hasattr(jac, 'solve'):
                    Gv = jac.solve(v)
                    Gv2 = np.linalg.solve(Jd, v)
                    assert_close(Gv, Gv2, 'solve vs array')
                if hasattr(jac, 'rsolve'):
                    Gv = jac.rsolve(v)
                    Gv2 = np.linalg.solve(Jd.T.conj(), v)
                    assert_close(Gv, Gv2, 'rsolve vs array')
                if hasattr(jac, 'matvec'):
                    Jv = jac.matvec(v)
                    Jv2 = np.dot(Jd, v)
                    assert_close(Jv, Jv2, 'dot vs array')
                if hasattr(jac, 'rmatvec'):
                    Jv = jac.rmatvec(v)
                    Jv2 = np.dot(Jd.T.conj(), v)
                    assert_close(Jv, Jv2, 'rmatvec vs array')

            if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
                Jv = jac.matvec(v)
                Jv2 = jac.solve(jac.matvec(Jv))
                assert_close(Jv, Jv2, 'dot vs solve')

            if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
                Jv = jac.rmatvec(v)
                Jv2 = jac.rmatvec(jac.rsolve(Jv))
                assert_close(Jv, Jv2, 'rmatvec vs rsolve')

            x = rand(N)
            jac.update(x, self._func(x))
Exemplo n.º 47
0
    def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
        np.random.seed(123)

        N = 7

        def rand(*a):
            q = np.random.rand(*a)
            if complex:
                q = q + 1j*np.random.rand(*a)
            return q

        def assert_close(a, b, msg):
            d = abs(a - b).max()
            f = tol + abs(b).max()*tol
            if d > f:
                raise AssertionError('%s: err %g' % (msg, d))

        self.A = rand(N, N)

        # initialize
        x0 = np.random.rand(N)
        jac = jac_cls(**kw)
        jac.setup(x0, self._func(x0), self._func)

        # check consistency
        for k in xrange(2*N):
            v = rand(N)

            if hasattr(jac, '__array__'):
                Jd = np.array(jac)
                if hasattr(jac, 'solve'):
                    Gv = jac.solve(v)
                    Gv2 = np.linalg.solve(Jd, v)
                    assert_close(Gv, Gv2, 'solve vs array')
                if hasattr(jac, 'rsolve'):
                    Gv = jac.rsolve(v)
                    Gv2 = np.linalg.solve(Jd.T.conj(), v)
                    assert_close(Gv, Gv2, 'rsolve vs array')
                if hasattr(jac, 'matvec'):
                    Jv = jac.matvec(v)
                    Jv2 = np.dot(Jd, v)
                    assert_close(Jv, Jv2, 'dot vs array')
                if hasattr(jac, 'rmatvec'):
                    Jv = jac.rmatvec(v)
                    Jv2 = np.dot(Jd.T.conj(), v)
                    assert_close(Jv, Jv2, 'rmatvec vs array')

            if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
                Jv = jac.matvec(v)
                Jv2 = jac.solve(jac.matvec(Jv))
                assert_close(Jv, Jv2, 'dot vs solve')

            if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
                Jv = jac.rmatvec(v)
                Jv2 = jac.rmatvec(jac.rsolve(Jv))
                assert_close(Jv, Jv2, 'rmatvec vs rsolve')

            x = rand(N)
            jac.update(x, self._func(x))
Exemplo n.º 48
0
def matrixmultiply(a, b):
    if len(b.shape) == 1:
        b_is_vector = True
        b = b[:,newaxis]
    else:
        b_is_vector = False
    assert_equal(a.shape[1], b.shape[0])
    c = zeros((a.shape[0], b.shape[1]), common_type(a, b))
    for i in xrange(a.shape[0]):
        for j in xrange(b.shape[1]):
            s = 0
            for k in xrange(a.shape[1]):
                s += a[i,k] * b[k, j]
            c[i,j] = s
    if b_is_vector:
        c = c.reshape((a.shape[0],))
    return c
Exemplo n.º 49
0
 def test_vector(self):
     xs = [0, 1, 2]
     ys = np.array([[0,1],[1,0],[2,1]])
     P = BarycentricInterpolator(xs,ys)
     Pi = [BarycentricInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])]
     test_xs = np.linspace(-1,3,100)
     assert_almost_equal(P(test_xs),
             np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))
Exemplo n.º 50
0
    def test_incremental(self):
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', category=DeprecationWarning)
            P = PiecewisePolynomial([self.xi[0]], [self.yi[0]], 3)

        for i in xrange(1, len(self.xi)):
            P.append(self.xi[i], self.yi[i], 3)
        assert_almost_equal(P(self.test_xs), self.spline_ys)
Exemplo n.º 51
0
 def test_inverse(self):
     for n in xrange(1, 10):
         a = hilbert(n)
         b = invhilbert(n)
         # The Hilbert matrix is increasingly badly conditioned,
         # so take that into account in the test
         c = cond(a)
         assert_allclose(a.dot(b), eye(n), atol=1e-15*c, rtol=1e-15*c)
Exemplo n.º 52
0
 def test_is_valid_im_4_and_up(self):
     # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
     # (step size 3).
     for i in xrange(4, 15, 3):
         y = np.random.rand(i * (i - 1) // 2)
         Z = linkage(y)
         R = inconsistent(Z)
         assert_(is_valid_im(R) == True)
Exemplo n.º 53
0
def matrixmultiply(a, b):
    if len(b.shape) == 1:
        b_is_vector = True
        b = b[:, newaxis]
    else:
        b_is_vector = False
    assert_equal(a.shape[1], b.shape[0])
    c = zeros((a.shape[0], b.shape[1]), common_type(a, b))
    for i in xrange(a.shape[0]):
        for j in xrange(b.shape[1]):
            s = 0
            for k in xrange(a.shape[1]):
                s += a[i, k] * b[k, j]
            c[i, j] = s
    if b_is_vector:
        c = c.reshape((a.shape[0], ))
    return c
Exemplo n.º 54
0
 def test_vector(self):
     xs = [0, 1, 2]
     ys = np.array([[0,1],[1,0],[2,1]])
     P = BarycentricInterpolator(xs,ys)
     Pi = [BarycentricInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])]
     test_xs = np.linspace(-1,3,100)
     assert_almost_equal(P(test_xs),
             np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))
Exemplo n.º 55
0
 def test_inverse(self):
     for n in xrange(1, 10):
         a = hilbert(n)
         b = invhilbert(n)
         # The Hilbert matrix is increasingly badly conditioned,
         # so take that into account in the test
         c = cond(a)
         assert_allclose(a.dot(b), eye(n), atol=1e-15 * c, rtol=1e-15 * c)
Exemplo n.º 56
0
 def test_is_valid_im_4_and_up(self):
     # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
     # (step size 3).
     for i in xrange(4, 15, 3):
         y = np.random.rand(i*(i-1)//2)
         Z = linkage(y)
         R = inconsistent(Z)
         assert_(is_valid_im(R) == True)
Exemplo n.º 57
0
    def test_incremental(self):
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', category=DeprecationWarning)
            P = PiecewisePolynomial([self.xi[0]], [self.yi[0]], 3)

        for i in xrange(1,len(self.xi)):
            P.append(self.xi[i],self.yi[i],3)
        assert_almost_equal(P(self.test_xs),self.spline_ys)
Exemplo n.º 58
0
def _boolrelextrema(data, comparator,
                  axis=0, order=1, mode='clip'):
    """
    Calculate the relative extrema of `data`.

    Relative extrema are calculated by finding locations where
    ``comparator(data[n], data[n+1:n+order+1])`` is True.

    Parameters
    ----------
    data : ndarray
        Array in which to find the relative extrema.
    comparator : callable
        Function to use to compare two data points.
        Should take 2 numbers as arguments.
    axis : int, optional
        Axis over which to select from `data`.  Default is 0.
    order : int, optional
        How many points on each side to use for the comparison
        to consider ``comparator(n,n+x)`` to be True.
    mode : str, optional
        How the edges of the vector are treated.  'wrap' (wrap around) or
        'clip' (treat overflow as the same as the last (or first) element).
        Default 'clip'.  See numpy.take

    Returns
    -------
    extrema : ndarray
        Boolean array of the same shape as `data` that is True at an extrema,
        False otherwise.

    See also
    --------
    argrelmax, argrelmin

    Examples
    --------
    >>> testdata = np.array([1,2,3,2,1])
    >>> _boolrelextrema(testdata, np.greater, axis=0)
    array([False, False,  True, False, False], dtype=bool)

    """
    if((int(order) != order) or (order < 1)):
        raise ValueError('Order must be an int >= 1')

    datalen = data.shape[axis]
    locs = np.arange(0, datalen)

    results = np.ones(data.shape, dtype=bool)
    main = data.take(locs, axis=axis, mode=mode)
    for shift in xrange(1, order + 1):
        plus = data.take(locs + shift, axis=axis, mode=mode)
        minus = data.take(locs - shift, axis=axis, mode=mode)
        results &= comparator(main, plus)
        results &= comparator(main, minus)
        if(~results.any()):
            return results
    return results
Exemplo n.º 59
0
def _boolrelextrema(data, comparator,
                  axis=0, order=1, mode='clip'):
    """
    Calculate the relative extrema of `data`.

    Relative extrema are calculated by finding locations where
    ``comparator(data[n], data[n+1:n+order+1])`` is True.

    Parameters
    ----------
    data : ndarray
        Array in which to find the relative extrema.
    comparator : callable
        Function to use to compare two data points.
        Should take 2 numbers as arguments.
    axis : int, optional
        Axis over which to select from `data`.  Default is 0.
    order : int, optional
        How many points on each side to use for the comparison
        to consider ``comparator(n,n+x)`` to be True.
    mode : str, optional
        How the edges of the vector are treated.  'wrap' (wrap around) or
        'clip' (treat overflow as the same as the last (or first) element).
        Default 'clip'.  See numpy.take

    Returns
    -------
    extrema : ndarray
        Boolean array of the same shape as `data` that is True at an extrema,
        False otherwise.

    See also
    --------
    argrelmax, argrelmin

    Examples
    --------
    >>> testdata = np.array([1,2,3,2,1])
    >>> _boolrelextrema(testdata, np.greater, axis=0)
    array([False, False,  True, False, False], dtype=bool)

    """
    if((int(order) != order) or (order < 1)):
        raise ValueError('Order must be an int >= 1')

    datalen = data.shape[axis]
    locs = np.arange(0, datalen)

    results = np.ones(data.shape, dtype=bool)
    main = data.take(locs, axis=axis, mode=mode)
    for shift in xrange(1, order + 1):
        plus = data.take(locs + shift, axis=axis, mode=mode)
        minus = data.take(locs - shift, axis=axis, mode=mode)
        results &= comparator(main, plus)
        results &= comparator(main, minus)
        if(~results.any()):
            return results
    return results