Ejemplo n.º 1
0
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
    import numpy as _nx
    a, wrap = _makearray(a)

    if hermitian:
        if compute_uv:
            s, u = eigh(a)
            sgn = sign(s)
            s = abs(s)
            sidx = argsort(s)[..., ::-1]
            sgn = _nx.take_along_axis(sgn, sidx, axis=-1)
            s = _nx.take_along_axis(s, sidx, axis=-1)
            u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1)
            vt = transpose(u * sgn[..., None, :]).conjugate()
            return wrap(u), s, wrap(vt)
        else:
            s = eigvalsh(a)
            s = s[..., ::-1]
            s = abs(s)
            return sort(s)[..., ::-1]

    _assert_stacked_2d(a)
    t, result_t = _commonType(a)

    extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)

    m, n = a.shape[-2:]
    if compute_uv:
        if full_matrices:
            if m < n:
                gufunc = _umath_linalg.svd_m_f
            else:
                gufunc = _umath_linalg.svd_n_f
        else:
            if m < n:
                gufunc = _umath_linalg.svd_m_s
            else:
                gufunc = _umath_linalg.svd_n_s

        signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
        u, s, vh = gufunc(a, signature=signature, extobj=extobj)
        u = u.astype(result_t, copy=False)
        s = s.astype(_realType(result_t), copy=False)
        vh = vh.astype(result_t, copy=False)
        return wrap(u), s, wrap(vh)
    else:
        if m < n:
            gufunc = _umath_linalg.svd_m
        else:
            gufunc = _umath_linalg.svd_n

        signature = 'D->d' if isComplexType(t) else 'd->d'
        s = gufunc(a, signature=signature, extobj=extobj)
        s = s.astype(_realType(result_t), copy=False)
        return s
Ejemplo n.º 2
0
	def analyse():
		pts = mesh.points		
		for p, neigh in it:
			# decide a local coordinate system
			u,v,w = dirbase(normals[p])
			# get neighboors contributions
			b = np.empty(len(neigh))
			a = np.empty((len(neigh), 14))
			for i,n in enumerate(neigh):
				e = pts[n] - pts[p]
				b[i] = dot(e,w)
				eu = dot(e,u)
				ev = dot(e,v)
				# these are the monoms to compose to build a polynom approximating the surface until 4th-order derivatives
				a[i] = (	eu**2, ev**2, eu*ev,
							eu, ev,
							eu**3, eu**2*ev, eu*ev**2, ev**3,
							eu**4, eu**3*ev, eu**2*ev**2, eu*ev**3, ev**4,
							)

			# least squares resulution, the complexity is roughly the same as inverting a mat3
			(au, av, auv, *_), residuals, *_ = np.linalg.lstsq(a, b)
			# diagonalize the curve tensor to get the principal curvatures
			diag, transfer = np.linalg.eigh(mat2(2*au, auv, 
													auv, 2*av))
			yield 1/np.max(np.abs(diag)), p
Ejemplo n.º 3
0
def polyadd(a1, a2):
    """
    Find the sum of two polynomials.

    Returns the polynomial resulting from the sum of two input polynomials.
    Each input must be either a poly1d object or a 1D sequence of polynomial
    coefficients, from highest to lowest degree.

    Parameters
    ----------
    a1, a2 : array_like or poly1d object
        Input polynomials.

    Returns
    -------
    out : ndarray or poly1d object
        The sum of the inputs. If either input is a poly1d object, then the
        output is also a poly1d object. Otherwise, it is a 1D array of
        polynomial coefficients from highest to lowest degree.

    See Also
    --------
    poly1d : A one-dimensional polynomial class.
    poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval

    Examples
    --------
    >>> np.polyadd([1, 2], [9, 5, 4])
    array([9, 6, 6])

    Using poly1d objects:

    >>> p1 = np.poly1d([1, 2])
    >>> p2 = np.poly1d([9, 5, 4])
    >>> print p1
    1 x + 2
    >>> print p2
       2
    9 x + 5 x + 4
    >>> print np.polyadd(p1, p2)
       2
    9 x + 6 x + 6

    """
    truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
    a1 = atleast_1d(a1)
    a2 = atleast_1d(a2)
    diff = len(a2) - len(a1)
    if diff == 0:
        val = a1 + a2
    elif diff > 0:
        zr = NX.zeros(diff, a1.dtype)
        val = NX.concatenate((zr, a1)) + a2
    else:
        zr = NX.zeros(abs(diff), a2.dtype)
        val = a1 + NX.concatenate((zr, a2))
    if truepoly:
        val = poly1d(val)
    return val
Ejemplo n.º 4
0
def polyadd(a1, a2):
    """
    Find the sum of two polynomials.

    Returns the polynomial resulting from the sum of two input polynomials.
    Each input must be either a poly1d object or a 1D sequence of polynomial
    coefficients, from highest to lowest degree.

    Parameters
    ----------
    a1, a2 : array_like or poly1d object
        Input polynomials.

    Returns
    -------
    out : ndarray or poly1d object
        The sum of the inputs. If either input is a poly1d object, then the
        output is also a poly1d object. Otherwise, it is a 1D array of
        polynomial coefficients from highest to lowest degree.

    See Also
    --------
    poly1d : A one-dimensional polynomial class.
    poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval

    Examples
    --------
    >>> np.polyadd([1, 2], [9, 5, 4])
    array([9, 6, 6])

    Using poly1d objects:

    >>> p1 = np.poly1d([1, 2])
    >>> p2 = np.poly1d([9, 5, 4])
    >>> print p1
    1 x + 2
    >>> print p2
       2
    9 x + 5 x + 4
    >>> print np.polyadd(p1, p2)
       2
    9 x + 6 x + 6

    """
    truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
    a1 = atleast_1d(a1)
    a2 = atleast_1d(a2)
    diff = len(a2) - len(a1)
    if diff == 0:
        val = a1 + a2
    elif diff > 0:
        zr = NX.zeros(diff, a1.dtype)
        val = NX.concatenate((zr, a1)) + a2
    else:
        zr = NX.zeros(abs(diff), a2.dtype)
        val = a1 + NX.concatenate((zr, a2))
    if truepoly:
        val = poly1d(val)
    return val
Ejemplo n.º 5
0
def mesh_curvature_radius(mesh, conn=None, normals=None, propagate=2) -> '(distance, point)':
	''' find the minimum curvature radius of a mesh.
	
		Parameters:
		
			mesh:			the surface/line to search
			conn:			a point-to-point connectivity (computed if not provided)
			normals:		the vertex normals (computed if not provided)
			propagate(int):	the maximum propagation rank for points to pick for the regression
	
		Returns:	`(distance: float, point: int)` where primitives varies according to the input mesh dimension
	'''
		
	curvatures = mesh_curvatures(mesh, conn, normals, propagate)
	
	place = min(range(len(mesh.points)),
				key=lambda p: 1/np.max(np.abs(curvatures[p][0])), 
				default=None)
	return 1/np.max(np.abs(curvatures[place][0])), place
Ejemplo n.º 6
0
def polysub(a1, a2):
    """
    Difference (subtraction) of two polynomials.

    .. note::
       This forms part of the old polynomial API. Since version 1.4, the
       new polynomial API defined in `numpy.polynomial` is preferred.
       A summary of the differences can be found in the
       :doc:`transition guide </reference/routines.polynomials>`.

    Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
    `a1` and `a2` can be either array_like sequences of the polynomials'
    coefficients (including coefficients equal to zero), or `poly1d` objects.

    Parameters
    ----------
    a1, a2 : array_like or poly1d
        Minuend and subtrahend polynomials, respectively.

    Returns
    -------
    out : ndarray or poly1d
        Array or `poly1d` object of the difference polynomial's coefficients.

    See Also
    --------
    polyval, polydiv, polymul, polyadd

    Examples
    --------
    .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)

    >>> np.polysub([2, 10, -2], [3, 10, -4])
    array([-1,  0,  2])

    """
    truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
    a1 = atleast_1d(a1)
    a2 = atleast_1d(a2)
    diff = len(a2) - len(a1)
    if diff == 0:
        val = a1 - a2
    elif diff > 0:
        zr = NX.zeros(diff, a1.dtype)
        val = NX.concatenate((zr, a1)) - a2
    else:
        zr = NX.zeros(abs(diff), a2.dtype)
        val = a1 - NX.concatenate((zr, a2))
    if truepoly:
        val = poly1d(val)
    return val
Ejemplo n.º 7
0
    def __str__(self):
        thestr = "0"
        var = self.variable

        # Remove leading zeros
        coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
        N = len(coeffs) - 1

        for k in range(len(coeffs)):
            coefstr = '%.4g' % abs(coeffs[k])
            if coefstr[-4:] == '0000':
                coefstr = coefstr[:-5]
            power = (N - k)
            if power == 0:
                if coefstr != '0':
                    newstr = '%s' % (coefstr, )
                else:
                    if k == 0:
                        newstr = '0'
                    else:
                        newstr = ''
            elif power == 1:
                if coefstr == '0':
                    newstr = ''
                elif coefstr == 'b':
                    newstr = var
                else:
                    newstr = '%s %s' % (coefstr, var)
            else:
                if coefstr == '0':
                    newstr = ''
                elif coefstr == 'b':
                    newstr = '%s**%d' % (
                        var,
                        power,
                    )
                else:
                    newstr = '%s %s**%d' % (coefstr, var, power)

            if k > 0:
                if newstr != '':
                    if coeffs[k] < 0:
                        thestr = "%s - %s" % (thestr, newstr)
                    else:
                        thestr = "%s + %s" % (thestr, newstr)
            elif (k == 0) and (newstr != '') and (coeffs[k] < 0):
                thestr = "-%s" % (newstr, )
            else:
                thestr = newstr
        return _raise_power(thestr)
Ejemplo n.º 8
0
    def __str__(self):
        thestr = "0"
        var = self.variable

        # Remove leading zeros
        coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
        N = len(coeffs)-1

        for k in range(len(coeffs)):
            coefstr ='%.4g' % abs(coeffs[k])
            if coefstr[-4:] == '0000':
                coefstr = coefstr[:-5]
            power = (N-k)
            if power == 0:
                if coefstr != '0':
                    newstr = '%s' % (coefstr,)
                else:
                    if k == 0:
                        newstr = '0'
                    else:
                        newstr = ''
            elif power == 1:
                if coefstr == '0':
                    newstr = ''
                elif coefstr == 'b':
                    newstr = var
                else:
                    newstr = '%s %s' % (coefstr, var)
            else:
                if coefstr == '0':
                    newstr = ''
                elif coefstr == 'b':
                    newstr = '%s**%d' % (var, power,)
                else:
                    newstr = '%s %s**%d' % (coefstr, var, power)

            if k > 0:
                if newstr != '':
                    if coeffs[k] < 0:
                        thestr = "%s - %s" % (thestr, newstr)
                    else:
                        thestr = "%s + %s" % (thestr, newstr)
            elif (k == 0) and (newstr != '') and (coeffs[k] < 0):
                thestr = "-%s" % (newstr,)
            else:
                thestr = newstr
        return _raise_power(thestr)
Ejemplo n.º 9
0
def polysub(a1, a2):
    """
    Returns difference from subtraction of two polynomials input as sequences.

    Returns difference of polynomials; `a1` - `a2`.  Input polynomials are
    represented as an array_like sequence of terms or a poly1d object.

    Parameters
    ----------
    a1 : {array_like, poly1d}
        Minuend polynomial as sequence of terms.
    a2 : {array_like, poly1d}
        Subtrahend polynomial as sequence of terms.

    Returns
    -------
    out : {ndarray, poly1d}
        Array representing the polynomial terms.

    See Also
    --------
    polyval, polydiv, polymul, polyadd

    Examples
    --------
    .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)

    >>> np.polysub([2, 10, -2], [3, 10, -4])
    array([-1,  0,  2])

    """
    truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
    a1 = atleast_1d(a1)
    a2 = atleast_1d(a2)
    diff = len(a2) - len(a1)
    if diff == 0:
        val = a1 - a2
    elif diff > 0:
        zr = NX.zeros(diff, a1.dtype)
        val = NX.concatenate((zr, a1)) - a2
    else:
        zr = NX.zeros(abs(diff), a2.dtype)
        val = a1 - NX.concatenate((zr, a2))
    if truepoly:
        val = poly1d(val)
    return val
Ejemplo n.º 10
0
    def __str__(self):
        N = self.order
        thestr = "0"
        var = self.variable
        for k in range(len(self.coeffs)):
            coefstr = '%.4g' % abs(self.coeffs[k])
            if coefstr[-4:] == '0000':
                coefstr = coefstr[:-5]
            power = (N - k)
            if power == 0:
                if coefstr != '0':
                    newstr = '%s' % (coefstr, )
                else:
                    if k == 0:
                        newstr = '0'
                    else:
                        newstr = ''
            elif power == 1:
                if coefstr == '0':
                    newstr = ''
                elif coefstr == 'b':
                    newstr = var
                else:
                    newstr = '%s %s' % (coefstr, var)
            else:
                if coefstr == '0':
                    newstr = ''
                elif coefstr == 'b':
                    newstr = '%s**%d' % (
                        var,
                        power,
                    )
                else:
                    newstr = '%s %s**%d' % (coefstr, var, power)

            if k > 0:
                if newstr != '':
                    if self.coeffs[k] < 0:
                        thestr = "%s - %s" % (thestr, newstr)
                    else:
                        thestr = "%s + %s" % (thestr, newstr)
            elif (k == 0) and (newstr != '') and (self.coeffs[k] < 0):
                thestr = "-%s" % (newstr, )
            else:
                thestr = newstr
        return _raise_power(thestr)
Ejemplo n.º 11
0
def polysub(a1, a2):
    """
    Returns difference from subtraction of two polynomials input as sequences.

    Returns difference of polynomials; `a1` - `a2`.  Input polynomials are
    represented as an array_like sequence of terms or a poly1d object.

    Parameters
    ----------
    a1 : {array_like, poly1d}
        Minuend polynomial as sequence of terms.
    a2 : {array_like, poly1d}
        Subtrahend polynomial as sequence of terms.

    Returns
    -------
    out : {ndarray, poly1d}
        Array representing the polynomial terms.

    See Also
    --------
    polyval, polydiv, polymul, polyadd

    Examples
    --------
    .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)

    >>> np.polysub([2, 10, -2], [3, 10, -4])
    array([-1,  0,  2])

    """
    truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
    a1 = atleast_1d(a1)
    a2 = atleast_1d(a2)
    diff = len(a2) - len(a1)
    if diff == 0:
        val = a1 - a2
    elif diff > 0:
        zr = NX.zeros(diff, a1.dtype)
        val = NX.concatenate((zr, a1)) - a2
    else:
        zr = NX.zeros(abs(diff), a2.dtype)
        val = a1 - NX.concatenate((zr, a2))
    if truepoly:
        val = poly1d(val)
    return val
Ejemplo n.º 12
0
def polysub(a1, a2):
    """
    Difference (subtraction) of two polynomials.

    Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
    `a1` and `a2` can be either array_like sequences of the polynomials'
    coefficients (including coefficients equal to zero), or `poly1d` objects.

    Parameters
    ----------
    a1, a2 : array_like or poly1d
        Minuend and subtrahend polynomials, respectively.

    Returns
    -------
    out : ndarray or poly1d
        Array or `poly1d` object of the difference polynomial's coefficients.

    See Also
    --------
    polyval, polydiv, polymul, polyadd

    Examples
    --------
    .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)

    >>> np.polysub([2, 10, -2], [3, 10, -4])
    array([-1,  0,  2])

    """
    truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
    a1 = atleast_1d(a1)
    a2 = atleast_1d(a2)
    diff = len(a2) - len(a1)
    if diff == 0:
        val = a1 - a2
    elif diff > 0:
        zr = NX.zeros(diff, a1.dtype)
        val = NX.concatenate((zr, a1)) - a2
    else:
        zr = NX.zeros(abs(diff), a2.dtype)
        val = a1 - NX.concatenate((zr, a2))
    if truepoly:
        val = poly1d(val)
    return val
Ejemplo n.º 13
0
def polysub(a1, a2):
    """
    Difference (subtraction) of two polynomials.

    Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
    `a1` and `a2` can be either array_like sequences of the polynomials'
    coefficients (including coefficients equal to zero), or `poly1d` objects.

    Parameters
    ----------
    a1, a2 : array_like or poly1d
        Minuend and subtrahend polynomials, respectively.

    Returns
    -------
    out : ndarray or poly1d
        Array or `poly1d` object of the difference polynomial's coefficients.

    See Also
    --------
    polyval, polydiv, polymul, polyadd

    Examples
    --------
    .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)

    >>> np.polysub([2, 10, -2], [3, 10, -4])
    array([-1,  0,  2])

    """
    truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
    a1 = atleast_1d(a1)
    a2 = atleast_1d(a2)
    diff = len(a2) - len(a1)
    if diff == 0:
        val = a1 - a2
    elif diff > 0:
        zr = NX.zeros(diff, a1.dtype)
        val = NX.concatenate((zr, a1)) - a2
    else:
        zr = NX.zeros(abs(diff), a2.dtype)
        val = a1 - NX.concatenate((zr, a2))
    if truepoly:
        val = poly1d(val)
    return val
Ejemplo n.º 14
0
def polysub(a1, a2):
    """Subtracts two polynomials represented as sequences
    """
    truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
    a1 = atleast_1d(a1)
    a2 = atleast_1d(a2)
    diff = len(a2) - len(a1)
    if diff == 0:
        val = a1 - a2
    elif diff > 0:
        zr = NX.zeros(diff, a1.dtype)
        val = NX.concatenate((zr, a1)) - a2
    else:
        zr = NX.zeros(abs(diff), a2.dtype)
        val = a1 - NX.concatenate((zr, a2))
    if truepoly:
        val = poly1d(val)
    return val
Ejemplo n.º 15
0
    def __str__(self):
        N = self.order
        thestr = "0"
        var = self.variable
        for k in range(len(self.coeffs)):
            coefstr ='%.4g' % abs(self.coeffs[k])
            if coefstr[-4:] == '0000':
                coefstr = coefstr[:-5]
            power = (N-k)
            if power == 0:
                if coefstr != '0':
                    newstr = '%s' % (coefstr,)
                else:
                    if k == 0:
                        newstr = '0'
                    else:
                        newstr = ''
            elif power == 1:
                if coefstr == '0':
                    newstr = ''
                elif coefstr == 'b':
                    newstr = var
                else:
                    newstr = '%s %s' % (coefstr, var)
            else:
                if coefstr == '0':
                    newstr = ''
                elif coefstr == 'b':
                    newstr = '%s**%d' % (var, power,)
                else:
                    newstr = '%s %s**%d' % (coefstr, var, power)

            if k > 0:
                if newstr != '':
                    if self.coeffs[k] < 0:
                        thestr = "%s - %s" % (thestr, newstr)
                    else:
                        thestr = "%s + %s" % (thestr, newstr)
            elif (k == 0) and (newstr != '') and (self.coeffs[k] < 0):
                thestr = "-%s" % (newstr,)
            else:
                thestr = newstr
        return _raise_power(thestr)
Ejemplo n.º 16
0
def polyadd(a1, a2):
    """
    Returns sum of two polynomials.

    Returns sum of polynomials; `a1` + `a2`.  Input polynomials are
    represented as an array_like sequence of terms or a poly1d object.

    Parameters
    ----------
    a1 : {array_like, poly1d}
        Polynomial as sequence of terms.
    a2 : {array_like, poly1d}
        Polynomial as sequence of terms.

    Returns
    -------
    out : {ndarray, poly1d}
        Array representing the polynomial terms.

    See Also
    --------
    polyval, polydiv, polymul, polyadd

    """
    truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
    a1 = atleast_1d(a1)
    a2 = atleast_1d(a2)
    diff = len(a2) - len(a1)
    if diff == 0:
        val = a1 + a2
    elif diff > 0:
        zr = NX.zeros(diff, a1.dtype)
        val = NX.concatenate((zr, a1)) + a2
    else:
        zr = NX.zeros(abs(diff), a2.dtype)
        val = a1 + NX.concatenate((zr, a2))
    if truepoly:
        val = poly1d(val)
    return val
Ejemplo n.º 17
0
def polyadd(a1, a2):
    """
    Returns sum of two polynomials.

    Returns sum of polynomials; `a1` + `a2`.  Input polynomials are
    represented as an array_like sequence of terms or a poly1d object.

    Parameters
    ----------
    a1 : {array_like, poly1d}
        Polynomial as sequence of terms.
    a2 : {array_like, poly1d}
        Polynomial as sequence of terms.

    Returns
    -------
    out : {ndarray, poly1d}
        Array representing the polynomial terms.

    See Also
    --------
    polyval, polydiv, polymul, polyadd

    """
    truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
    a1 = atleast_1d(a1)
    a2 = atleast_1d(a2)
    diff = len(a2) - len(a1)
    if diff == 0:
        val = a1 + a2
    elif diff > 0:
        zr = NX.zeros(diff, a1.dtype)
        val = NX.concatenate((zr, a1)) + a2
    else:
        zr = NX.zeros(abs(diff), a2.dtype)
        val = a1 + NX.concatenate((zr, a2))
    if truepoly:
        val = poly1d(val)
    return val
Ejemplo n.º 18
0
def polyfit(x, y, deg, rcond=None, full=False):
    """
    Least squares polynomial fit.

    Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
    to points `(x, y)`. Returns a vector of coefficients `p` that minimises
    the squared error.

    Parameters
    ----------
    x : array_like, shape (M,)
        x-coordinates of the M sample points ``(x[i], y[i])``.
    y : array_like, shape (M,) or (M, K)
        y-coordinates of the sample points. Several data sets of sample
        points sharing the same x-coordinates can be fitted at once by
        passing in a 2D-array that contains one dataset per column.
    deg : int
        Degree of the fitting polynomial
    rcond : float, optional
        Relative condition number of the fit. Singular values smaller than this
        relative to the largest singular value will be ignored. The default
        value is len(x)*eps, where eps is the relative precision of the float
        type, about 2e-16 in most cases.
    full : bool, optional
        Switch determining nature of return value. When it is
        False (the default) just the coefficients are returned, when True
        diagnostic information from the singular value decomposition is also
        returned.

    Returns
    -------
    p : ndarray, shape (M,) or (M, K)
        Polynomial coefficients, highest power first.
        If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``.

    residuals, rank, singular_values, rcond : present only if `full` = True
        Residuals of the least-squares fit, the effective rank of the scaled
        Vandermonde coefficient matrix, its singular values, and the specified
        value of `rcond`. For more details, see `linalg.lstsq`.

    Warns
    -----
    RankWarning
        The rank of the coefficient matrix in the least-squares fit is
        deficient. The warning is only raised if `full` = False.

        The warnings can be turned off by

        >>> import warnings
        >>> warnings.simplefilter('ignore', np.RankWarning)

    See Also
    --------
    polyval : Computes polynomial values.
    linalg.lstsq : Computes a least-squares fit.
    scipy.interpolate.UnivariateSpline : Computes spline fits.

    Notes
    -----
    The solution minimizes the squared error

    .. math ::
        E = \\sum_{j=0}^k |p(x_j) - y_j|^2

    in the equations::

        x[0]**n * p[n] + ... + x[0] * p[1] + p[0] = y[0]
        x[1]**n * p[n] + ... + x[1] * p[1] + p[0] = y[1]
        ...
        x[k]**n * p[n] + ... + x[k] * p[1] + p[0] = y[k]

    The coefficient matrix of the coefficients `p` is a Vandermonde matrix.

    `polyfit` issues a `RankWarning` when the least-squares fit is badly
    conditioned. This implies that the best fit is not well-defined due
    to numerical error. The results may be improved by lowering the polynomial
    degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
    can also be set to a value smaller than its default, but the resulting
    fit may be spurious: including contributions from the small singular
    values can add numerical noise to the result.

    Note that fitting polynomial coefficients is inherently badly conditioned
    when the degree of the polynomial is large or the interval of sample points
    is badly centered. The quality of the fit should always be checked in these
    cases. When polynomial fits are not satisfactory, splines may be a good
    alternative.

    References
    ----------
    .. [1] Wikipedia, "Curve fitting",
           http://en.wikipedia.org/wiki/Curve_fitting
    .. [2] Wikipedia, "Polynomial interpolation",
           http://en.wikipedia.org/wiki/Polynomial_interpolation

    Examples
    --------
    >>> x = np.array([0.0, 1.0, 2.0, 3.0,  4.0,  5.0])
    >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
    >>> z = np.polyfit(x, y, 3)
    >>> z
    array([ 0.08703704, -0.81349206,  1.69312169, -0.03968254])

    It is convenient to use `poly1d` objects for dealing with polynomials:

    >>> p = np.poly1d(z)
    >>> p(0.5)
    0.6143849206349179
    >>> p(3.5)
    -0.34732142857143039
    >>> p(10)
    22.579365079365115

    High-order polynomials may oscillate wildly:

    >>> p30 = np.poly1d(np.polyfit(x, y, 30))
    /... RankWarning: Polyfit may be poorly conditioned...
    >>> p30(4)
    -0.80000000000000204
    >>> p30(5)
    -0.99999999999999445
    >>> p30(4.5)
    -0.10547061179440398

    Illustration:

    >>> import matplotlib.pyplot as plt
    >>> xp = np.linspace(-2, 6, 100)
    >>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
    [<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
    >>> plt.ylim(-2,2)
    (-2, 2)
    >>> plt.show()

    """
    order = int(deg) + 1
    x = NX.asarray(x) + 0.0
    y = NX.asarray(y) + 0.0

    # check arguments.
    if deg < 0 :
        raise ValueError("expected deg >= 0")
    if x.ndim != 1:
        raise TypeError("expected 1D vector for x")
    if x.size == 0:
        raise TypeError("expected non-empty vector for x")
    if y.ndim < 1 or y.ndim > 2 :
        raise TypeError("expected 1D or 2D array for y")
    if x.shape[0] != y.shape[0] :
        raise TypeError("expected x and y to have same length")

    # set rcond
    if rcond is None :
        rcond = len(x)*finfo(x.dtype).eps

    # scale x to improve condition number
    scale = abs(x).max()
    if scale != 0 :
        x /= scale

    # solve least squares equation for powers of x
    v = vander(x, order)
    c, resids, rank, s = lstsq(v, y, rcond)

    # warn on rank reduction, which indicates an ill conditioned matrix
    if rank != order and not full:
        msg = "Polyfit may be poorly conditioned"
        warnings.warn(msg, RankWarning)

    # scale returned coefficients
    if scale != 0 :
        if c.ndim == 1 :
            c /= vander([scale], order)[0]
        else :
            c /= vander([scale], order).T

    if full :
        return c, resids, rank, s, rcond
    else :
        return c
Ejemplo n.º 19
0
def norm(x, ord=None, axis=None):
    """
    Norm of a sparse matrix

    This function is able to return one of seven different matrix norms,
    depending on the value of the ``ord`` parameter.

    Parameters
    ----------
    x : a sparse matrix
        Input sparse matrix.
    ord : {non-zero int, inf, -inf, 'fro'}, optional
        Order of the norm (see table under ``Notes``). inf means numpy's
        `inf` object.
    axis : {int, 2-tuple of ints, None}, optional
        If `axis` is an integer, it specifies the axis of `x` along which to
        compute the vector norms.  If `axis` is a 2-tuple, it specifies the
        axes that hold 2-D matrices, and the matrix norms of these matrices
        are computed.  If `axis` is None then either a vector norm (when `x`
        is 1-D) or a matrix norm (when `x` is 2-D) is returned.

    Returns
    -------
    n : float or ndarray

    Notes
    -----
    Some of the ord are not implemented because some associated functions like, 
    _multi_svd_norm, are not yet available for sparse matrix. 

    This docstring is modified based on numpy.linalg.norm. 
    https://github.com/numpy/numpy/blob/master/numpy/linalg/linalg.py 

    The following norms can be calculated:

    =====  ============================  
    ord    norm for sparse matrices             
    =====  ============================  
    None   Frobenius norm                
    'fro'  Frobenius norm                
    inf    max(sum(abs(x), axis=1))      
    -inf   min(sum(abs(x), axis=1))      
    0      abs(x).sum(axis=axis)                           
    1      max(sum(abs(x), axis=0))      
    -1     min(sum(abs(x), axis=0))      
    2      Not implemented  
    -2     Not implemented      
    other  Not implemented                               
    =====  ============================  

    The Frobenius norm is given by [1]_:

        :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`

    References
    ----------
    .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
        Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15

    Examples
    --------
    >>> from scipy.sparse import *
    >>> import numpy as np
    >>> from scipy.sparse.linalg import norm
    >>> a = np.arange(9) - 4
    >>> a
    array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
    >>> b = a.reshape((3, 3))
    >>> b
    array([[-4, -3, -2],
           [-1, 0, 1],
           [ 2, 3, 4]])

    >>> b = csr_matrix(b)
    >>> norm(b)
    7.745966692414834
    >>> norm(b, 'fro')
    7.745966692414834
    >>> norm(b, np.inf)
    9
    >>> norm(b, -np.inf)
    2
    >>> norm(b, 1)
    7
    >>> norm(b, -1)
    6

    """
    if not issparse(x):
        raise TypeError("input is not sparse. use numpy.linalg.norm")

    # Check the default case first and handle it immediately.
    if axis is None and ord in (None, 'fro', 'f'):
        return _sparse_frobenius_norm(x)

    # Some norms require functions that are not implemented for all types.
    x = x.tocsr()

    if axis is None:
        axis = (0, 1)
    elif not isinstance(axis, tuple):
        msg = "'axis' must be None, an integer or a tuple of integers"
        try:
            int_axis = int(axis)
        except TypeError:
            raise TypeError(msg)
        if axis != int_axis:
            raise TypeError(msg)
        axis = (int_axis, )

    nd = 2
    if len(axis) == 2:
        row_axis, col_axis = axis
        if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
            raise ValueError('Invalid axis %r for an array with shape %r' %
                             (axis, x.shape))
        if row_axis % nd == col_axis % nd:
            raise ValueError('Duplicate axes given.')
        if ord == 2:
            raise NotImplementedError
            #return _multi_svd_norm(x, row_axis, col_axis, amax)
        elif ord == -2:
            raise NotImplementedError
            #return _multi_svd_norm(x, row_axis, col_axis, amin)
        elif ord == 1:
            return abs(x).sum(axis=row_axis).max(axis=col_axis)[0, 0]
        elif ord == Inf:
            return abs(x).sum(axis=col_axis).max(axis=row_axis)[0, 0]
        elif ord == -1:
            return abs(x).sum(axis=row_axis).min(axis=col_axis)[0, 0]
        elif ord == -Inf:
            return abs(x).sum(axis=col_axis).min(axis=row_axis)[0, 0]
        elif ord in (None, 'f', 'fro'):
            # The axis order does not matter for this norm.
            return _sparse_frobenius_norm(x)
        else:
            raise ValueError("Invalid norm order for matrices.")
    elif len(axis) == 1:
        a, = axis
        if not (-nd <= a < nd):
            raise ValueError('Invalid axis %r for an array with shape %r' %
                             (axis, x.shape))
        if ord == Inf:
            M = abs(x).max(axis=a)
        elif ord == -Inf:
            M = abs(x).min(axis=a)
        elif ord == 0:
            # Zero norm
            M = (x != 0).sum(axis=a)
        elif ord == 1:
            # special case for speedup
            M = abs(x).sum(axis=a)
        elif ord in (2, None):
            M = sqrt(abs(x).power(2).sum(axis=a))
        else:
            try:
                ord + 1
            except TypeError:
                raise ValueError('Invalid norm order for vectors.')
            M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord)
        return M.A.ravel()
    else:
        raise ValueError("Improper number of dimensions to norm.")
Ejemplo n.º 20
0
def _sparse_frobenius_norm(x):
    if np.issubdtype(x.dtype, np.complexfloating):
        sqnorm = abs(x).power(2).sum()
    else:
        sqnorm = x.power(2).sum()
    return sqrt(sqnorm)
Ejemplo n.º 21
0
def polyfit(x, y, deg, rcond=None, full=False):
    """
    Least squares polynomial fit.

    Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
    to points `(x, y)`. Returns a vector of coefficients `p` that minimises
    the squared error.

    Parameters
    ----------
    x : array_like, shape (M,)
        x-coordinates of the M sample points ``(x[i], y[i])``.
    y : array_like, shape (M,) or (M, K)
        y-coordinates of the sample points. Several data sets of sample
        points sharing the same x-coordinates can be fitted at once by
        passing in a 2D-array that contains one dataset per column.
    deg : int
        Degree of the fitting polynomial
    rcond : float, optional
        Relative condition number of the fit. Singular values smaller than this
        relative to the largest singular value will be ignored. The default
        value is len(x)*eps, where eps is the relative precision of the float
        type, about 2e-16 in most cases.
    full : bool, optional
        Switch determining nature of return value. When it is
        False (the default) just the coefficients are returned, when True
        diagnostic information from the singular value decomposition is also
        returned.

    Returns
    -------
    p : ndarray, shape (M,) or (M, K)
        Polynomial coefficients, highest power first.
        If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``.

    residuals, rank, singular_values, rcond : present only if `full` = True
        Residuals of the least-squares fit, the effective rank of the scaled
        Vandermonde coefficient matrix, its singular values, and the specified
        value of `rcond`. For more details, see `linalg.lstsq`.

    Warns
    -----
    RankWarning
        The rank of the coefficient matrix in the least-squares fit is
        deficient. The warning is only raised if `full` = False.

        The warnings can be turned off by

        >>> import warnings
        >>> warnings.simplefilter('ignore', np.RankWarning)

    See Also
    --------
    polyval : Computes polynomial values.
    linalg.lstsq : Computes a least-squares fit.
    scipy.interpolate.UnivariateSpline : Computes spline fits.

    Notes
    -----
    The solution minimizes the squared error

    .. math ::
        E = \\sum_{j=0}^k |p(x_j) - y_j|^2

    in the equations::

        x[0]**n * p[n] + ... + x[0] * p[1] + p[0] = y[0]
        x[1]**n * p[n] + ... + x[1] * p[1] + p[0] = y[1]
        ...
        x[k]**n * p[n] + ... + x[k] * p[1] + p[0] = y[k]

    The coefficient matrix of the coefficients `p` is a Vandermonde matrix.

    `polyfit` issues a `RankWarning` when the least-squares fit is badly
    conditioned. This implies that the best fit is not well-defined due
    to numerical error. The results may be improved by lowering the polynomial
    degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
    can also be set to a value smaller than its default, but the resulting
    fit may be spurious: including contributions from the small singular
    values can add numerical noise to the result.

    Note that fitting polynomial coefficients is inherently badly conditioned
    when the degree of the polynomial is large or the interval of sample points
    is badly centered. The quality of the fit should always be checked in these
    cases. When polynomial fits are not satisfactory, splines may be a good
    alternative.

    References
    ----------
    .. [1] Wikipedia, "Curve fitting",
           http://en.wikipedia.org/wiki/Curve_fitting
    .. [2] Wikipedia, "Polynomial interpolation",
           http://en.wikipedia.org/wiki/Polynomial_interpolation

    Examples
    --------
    >>> x = np.array([0.0, 1.0, 2.0, 3.0,  4.0,  5.0])
    >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
    >>> z = np.polyfit(x, y, 3)
    >>> z
    array([ 0.08703704, -0.81349206,  1.69312169, -0.03968254])

    It is convenient to use `poly1d` objects for dealing with polynomials:

    >>> p = np.poly1d(z)
    >>> p(0.5)
    0.6143849206349179
    >>> p(3.5)
    -0.34732142857143039
    >>> p(10)
    22.579365079365115

    High-order polynomials may oscillate wildly:

    >>> p30 = np.poly1d(np.polyfit(x, y, 30))
    /... RankWarning: Polyfit may be poorly conditioned...
    >>> p30(4)
    -0.80000000000000204
    >>> p30(5)
    -0.99999999999999445
    >>> p30(4.5)
    -0.10547061179440398

    Illustration:

    >>> import matplotlib.pyplot as plt
    >>> xp = np.linspace(-2, 6, 100)
    >>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
    [<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
    >>> plt.ylim(-2,2)
    (-2, 2)
    >>> plt.show()

    """
    order = int(deg) + 1
    x = NX.asarray(x) + 0.0
    y = NX.asarray(y) + 0.0

    # check arguments.
    if deg < 0:
        raise ValueError, "expected deg >= 0"
    if x.ndim != 1:
        raise TypeError, "expected 1D vector for x"
    if x.size == 0:
        raise TypeError, "expected non-empty vector for x"
    if y.ndim < 1 or y.ndim > 2:
        raise TypeError, "expected 1D or 2D array for y"
    if x.shape[0] != y.shape[0]:
        raise TypeError, "expected x and y to have same length"

    # set rcond
    if rcond is None:
        rcond = len(x) * finfo(x.dtype).eps

    # scale x to improve condition number
    scale = abs(x).max()
    if scale != 0:
        x /= scale

    # solve least squares equation for powers of x
    v = vander(x, order)
    c, resids, rank, s = lstsq(v, y, rcond)

    # warn on rank reduction, which indicates an ill conditioned matrix
    if rank != order and not full:
        msg = "Polyfit may be poorly conditioned"
        warnings.warn(msg, RankWarning)

    # scale returned coefficients
    if scale != 0:
        if c.ndim == 1:
            c /= vander([scale], order)[0]
        else:
            c /= vander([scale], order).T

    if full:
        return c, resids, rank, s, rcond
    else:
        return c
Ejemplo n.º 22
0
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
    """
    full_matrix = True # If True,
                    # (m, m) X (min(m, n),) X (n, n)
                    # Else:
                    # (m, min(m, n)) X (min(m, n),) X (min(m, n), n)
    compute_uv = True
    hermitian = False  # a가 에르미트 행렬인지 아닌지
                    # A = A^\star
                    # 실수 대칭 행렬의 일반화
                    # 복소수 정사각 행렬
    """
    a, wrap = _makearray(a)

    if hermitian:
        if compute_uv:
            s, u = eigh(a)
            s = s[..., ::-1]
            u = u[..., ::-1]
            # singular values are unsigned, move the sign into v
            vt = transpose(u * sign(s)[..., None, :]).conjugate()
            s = abs(s)
            print(wrap(u), s, wrap(vt))
        else:
            s = LA.eigvalsh(a)
            s = s[..., ::-1]
            s = abs(s)
            print(s)

    _assertRankAtLeast2(a)
    t, result_t = _commonType(a)

    extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)

    m, n = a.shape[-2:]

    if compute_uv:
        if full_matrix:
            # FUNC_ARRAY_NAME(svd_A)
            if m < n:
                gufunc = _umath_linalg.svd_m_f
            else:
                gufunc = _umath_linalg.svd_n_f
        else:
            # FUNC_ARRAY_NAME(svd_S)
            if m < n:
                gufunc = _umath_linalg.svd_m_s
            else:
                gufunc = _umath_linalg.svd_n_s
        
        signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
        u, s, vh = gufunc(a, signature=signature, extobj=extobj)
        u = u.astype(result_t, copy=False)
        s = s.astype(_realType(result_t), copy=False)
        vh = vh.astype(result_t, copy=False)
        return wrap(u), s, wrap(vh)
    else:
        # FUNC_ARRAY_NAME(svd_N)
        if m < n:
            gufunc = _umath_linalg.svd_m
        else:
            gufunc = _umath_linalg.svd_n
        
        signature = 'D->d' if ixComplexType(t) else 'd->d'
        s = gufunc(a, signature=signature, extobj=extobj)
        s = s.astype(_realType(result_t), copy=False)
        return s
Ejemplo n.º 23
0
def norm(x, ord=None, axis=None):
    """
    Norm of a sparse matrix

    This function is able to return one of seven different matrix norms,
    depending on the value of the ``ord`` parameter.

    Parameters
    ----------
    x : a sparse matrix
        Input sparse matrix.
    ord : {non-zero int, inf, -inf, 'fro'}, optional
        Order of the norm (see table under ``Notes``). inf means numpy's
        `inf` object.
    axis : {int, 2-tuple of ints, None}, optional
        If `axis` is an integer, it specifies the axis of `x` along which to
        compute the vector norms.  If `axis` is a 2-tuple, it specifies the
        axes that hold 2-D matrices, and the matrix norms of these matrices
        are computed.  If `axis` is None then either a vector norm (when `x`
        is 1-D) or a matrix norm (when `x` is 2-D) is returned.

    Returns
    -------
    n : float or ndarray

    Notes
    -----
    Some of the ord are not implemented because some associated functions like, 
    _multi_svd_norm, are not yet available for sparse matrix. 

    This docstring is modified based on numpy.linalg.norm. 
    https://github.com/numpy/numpy/blob/master/numpy/linalg/linalg.py 

    The following norms can be calculated:

    =====  ============================  
    ord    norm for sparse matrices             
    =====  ============================  
    None   Frobenius norm                
    'fro'  Frobenius norm                
    inf    max(sum(abs(x), axis=1))      
    -inf   min(sum(abs(x), axis=1))      
    0      abs(x).sum(axis=axis)                           
    1      max(sum(abs(x), axis=0))      
    -1     min(sum(abs(x), axis=0))      
    2      Not implemented  
    -2     Not implemented      
    other  Not implemented                               
    =====  ============================  

    The Frobenius norm is given by [1]_:

        :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`

    References
    ----------
    .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
        Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15

    Examples
    --------
    >>> from scipy.sparse import *
    >>> import numpy as np
    >>> from scipy.sparse.linalg import norm
    >>> a = np.arange(9) - 4
    >>> a
    array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
    >>> b = a.reshape((3, 3))
    >>> b
    array([[-4, -3, -2],
           [-1, 0, 1],
           [ 2, 3, 4]])

    >>> b = csr_matrix(b)
    >>> norm(b)
    7.745966692414834
    >>> norm(b, 'fro')
    7.745966692414834
    >>> norm(b, np.inf)
    9
    >>> norm(b, -np.inf)
    2
    >>> norm(b, 1)
    7
    >>> norm(b, -1)
    6

    """
    if not issparse(x):
        raise TypeError("input is not sparse. use numpy.linalg.norm")

    # Check the default case first and handle it immediately.
    if axis is None and ord in (None, "fro", "f"):
        return _sparse_frobenius_norm(x)

    # Some norms require functions that are not implemented for all types.
    x = x.tocsr()

    if axis is None:
        axis = (0, 1)
    elif not isinstance(axis, tuple):
        msg = "'axis' must be None, an integer or a tuple of integers"
        try:
            int_axis = int(axis)
        except TypeError:
            raise TypeError(msg)
        if axis != int_axis:
            raise TypeError(msg)
        axis = (int_axis,)

    nd = 2
    if len(axis) == 2:
        row_axis, col_axis = axis
        if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
            raise ValueError("Invalid axis %r for an array with shape %r" % (axis, x.shape))
        if row_axis % nd == col_axis % nd:
            raise ValueError("Duplicate axes given.")
        if ord == 2:
            raise NotImplementedError
            # return _multi_svd_norm(x, row_axis, col_axis, amax)
        elif ord == -2:
            raise NotImplementedError
            # return _multi_svd_norm(x, row_axis, col_axis, amin)
        elif ord == 1:
            return abs(x).sum(axis=row_axis).max(axis=col_axis)[0, 0]
        elif ord == Inf:
            return abs(x).sum(axis=col_axis).max(axis=row_axis)[0, 0]
        elif ord == -1:
            return abs(x).sum(axis=row_axis).min(axis=col_axis)[0, 0]
        elif ord == -Inf:
            return abs(x).sum(axis=col_axis).min(axis=row_axis)[0, 0]
        elif ord in (None, "f", "fro"):
            # The axis order does not matter for this norm.
            return _sparse_frobenius_norm(x)
        else:
            raise ValueError("Invalid norm order for matrices.")
    elif len(axis) == 1:
        a, = axis
        if not (-nd <= a < nd):
            raise ValueError("Invalid axis %r for an array with shape %r" % (axis, x.shape))
        if ord == Inf:
            M = abs(x).max(axis=a)
        elif ord == -Inf:
            M = abs(x).min(axis=a)
        elif ord == 0:
            # Zero norm
            M = (x != 0).sum(axis=a)
        elif ord == 1:
            # special case for speedup
            M = abs(x).sum(axis=a)
        elif ord in (2, None):
            M = sqrt(abs(x).power(2).sum(axis=a))
        else:
            try:
                ord + 1
            except TypeError:
                raise ValueError("Invalid norm order for vectors.")
            M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord)
        return M.A.ravel()
    else:
        raise ValueError("Improper number of dimensions to norm.")
Ejemplo n.º 24
0
def norm(x, ord=None, axis=None):
    """
    Norm of a sparse matrix

    This function is able to return one of seven different matrix norms,
    depending on the value of the ``ord`` parameter.
    
    Parameters
    ----------
    x : a sparse matrix
        Input sparse matrix. If `axis` is None, `x` must be 1-D or 2-D sparse matrix.
    ord : {non-zero int, inf, -inf, 'fro'}, optional
        Order of the norm (see table under ``Notes``). inf means numpy's
        `inf` object.
    axis : {int, None}, optional
        If `axis` is an integer, it specifies the axis of `x` along which to
        compute the vector norms. 
    
    Returns
    -------
    n : float or matrix
    
    Notes
    -----
    Some of the ord are not implemented because some associated functions like, 
    _multi_svd_norm, are not yet available for sparse matrix. 
    
    This docstring is modified based on numpy.linalg.norm. 
    https://github.com/numpy/numpy/blob/master/numpy/linalg/linalg.py 
    
    The following norms can be calculated:
    
    =====  ============================  
    ord    norm for sparse matrices             
    =====  ============================  
    None   Frobenius norm                
    'fro'  Frobenius norm                
    inf    max(sum(abs(x), axis=1))      
    -inf   min(sum(abs(x), axis=1))      
    0      abs(x).sum(axis=axis)                           
    1      max(sum(abs(x), axis=0))      
    -1     min(sum(abs(x), axis=0))      
    2      Not implemented  
    -2     Not implemented      
    other  Not implemented                               
    =====  ============================  
    
    The Frobenius norm is given by [1]_:
    
        :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
    
    References
    ----------
    .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
        Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
    
    Examples
    --------
    >>> from scipy.sparse import *
    >>> import numpy as np
    >>> from scipy.sparse.linalg import norm
    >>> a = np.arange(9) - 4
    >>> a
    array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
    >>> b = a.reshape((3, 3))
    >>> b
    array([[-4, -3, -2],
           [-1, 0, 1],
           [ 2, 3, 4]])
           
    >>> b = csr_matrix(b)
    >>> norm(b)
    7.745966692414834
    >>> norm(b, 'fro')
    7.745966692414834
    >>> norm(b, np.inf)
    9
    >>> norm(b, -np.inf)
    2
    >>> norm(b, 1)
    7
    >>> norm(b, -1)
    6
    
    Using the `axis` argument to compute vector norms:
    
    >>> c = np.array([[ 1, 2, 3],
    ...               [-1, 1, 4]])
    >>> c = csr_matrix(c)
    >>> norm(c, axis=0)
    matrix[[ 1.41421356, 2.23606798, 5. ]]
    >>> norm(c, axis=1)
    matrix[[ 3.74165739, 4.24264069]]
    >>> norm(c, ord=1, axis=1)
    matrix[[6]
           [6]]

"""
    if not issparse(x):
        raise TypeError("input is not sparse. use numpy.linalg.norm")

    # Check the default case first and handle it immediately.
    if ord in [None, 'fro', 'f'] and axis is None:
        if isComplexType(x.dtype.type):
            sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
        else:
            sqnorm = x.power(2).sum()
        return sqrt(sqnorm)

    # Normalize the `axis` argument to a tuple.
    nd = x.ndim
    if axis is None:
        axis = tuple(range(nd))
    
    if np.isscalar(axis):
        if ord == Inf:
            return max(abs(x).sum(axis=axis))
        elif ord == -Inf:
            return min(abs(x).sum(axis=axis))
        elif ord == 0:
            # Zero norm
            return (x != 0).sum(axis=axis)
        elif ord == 1:
            # special case for speedup
            return abs(x).sum(axis=axis)
        elif ord == -1:
            return min(abs(x).sum(axis=axis))             
        elif ord is None:            
            return sqrt(x.power(2).sum(axis=axis))        
        else:
            raise NotImplementedError
    elif len(axis) == 2:
        row_axis, col_axis = axis
        if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
            raise ValueError('Invalid axis %r for an array with shape %r' %
                             (axis, x.shape))
        if row_axis % nd == col_axis % nd:
            raise ValueError('Duplicate axes given.')
        if ord == 2:
            raise NotImplementedError
            #return _multi_svd_norm(x, row_axis, col_axis, amax)
        elif ord == -2:
            raise NotImplementedError
            #return _multi_svd_norm(x, row_axis, col_axis, amin)
        elif ord == 1:
            return abs(x).sum(axis=row_axis).max(axis=col_axis)[0,0]
        elif ord == Inf:
            return abs(x).sum(axis=col_axis).max(axis=row_axis)[0,0]
        elif ord == -1:
            return abs(x).sum(axis=row_axis).min(axis=col_axis)[0,0]
        elif ord == -Inf:
            return abs(x).sum(axis=col_axis).min(axis=row_axis)[0,0]
        elif ord in [None, 'fro', 'f']:
            return sqrt(x.power(2).sum(axis=axis))
        else:
            raise ValueError("Invalid norm order for matrices.")
    else:
        raise ValueError("Improper number of dimensions to norm.")
Ejemplo n.º 25
0
def polyfit(x, y, deg, rcond=None, full=False):
    """Least squares polynomial fit.

    Do a best fit polynomial of degree 'deg' of 'x' to 'y'.  Return value is a
    vector of polynomial coefficients [pk ... p1 p0].  Eg, for n=2

        p2*x0^2 +  p1*x0 + p0 = y1
        p2*x1^2 +  p1*x1 + p0 = y1
        p2*x2^2 +  p1*x2 + p0 = y2
        .....
        p2*xk^2 +  p1*xk + p0 = yk

    Parameters
    ----------
    x : array_like
        1D vector of sample points.
    y : array_like
        1D vector or 2D array of values to fit. The values should run down the
        columes in the 2D case.
    deg : integer
        Degree of the fitting polynomial
    rcond: {None, float}, optional
        Relative condition number of the fit. Singular values smaller than this
        relative to the largest singular value will be ignored. The defaul value
        is len(x)*eps, where eps is the relative precision of the float type,
        about 2e-16 in most cases.
    full : {False, boolean}, optional
        Switch determining nature of return value. When it is False just the
        coefficients are returned, when True diagnostic information from the
        singular value decomposition is also returned.

    Returns
    -------
    coefficients, [residuals, rank, singular_values, rcond] : variable
        When full=False, only the coefficients are returned, running down the
        appropriate colume when y is a 2D array. When full=True, the rank of the
        scaled Vandermonde matrix, it's effective rank in light of the rcond
        value, its singular values, and the specified value of rcond are also
        returned.

    Warns
    -----
    RankWarning : if rank is reduced and not full output
        The warnings can be turned off by:
        >>> import numpy as np
        >>> import warnings
        >>> warnings.simplefilter('ignore',np.RankWarning)


    See Also
    --------
    polyval : computes polynomial values.

    Notes
    -----
    If X is a the Vandermonde Matrix computed from x (see
    http://mathworld.wolfram.com/VandermondeMatrix.html), then the
    polynomial least squares solution is given by the 'p' in

        X*p = y

    where X.shape is a matrix of dimensions (len(x), deg + 1), p is a vector of
    dimensions (deg + 1, 1), and y is a vector of dimensions (len(x), 1).

    This equation can be solved as

        p = (XT*X)^-1 * XT * y

    where XT is the transpose of X and -1 denotes the inverse. However, this
    method is susceptible to rounding errors and generally the singular value
    decomposition of the matrix X is preferred and that is what is done here.
    The singular value method takes a paramenter, 'rcond', which sets a limit on
    the relative size of the smallest singular value to be used in solving the
    equation. This may result in lowering the rank of the Vandermonde matrix, in
    which case a RankWarning is issued. If polyfit issues a RankWarning, try a
    fit of lower degree or replace x by x - x.mean(), both of which will
    generally improve the condition number. The routine already normalizes the
    vector x by its maximum absolute value to help in this regard. The rcond
    parameter can be set to a value smaller than its default, but the resulting
    fit may be spurious. The current default value of rcond is len(x)*eps, where
    eps is the relative precision of the floating type being used, generally
    around 1e-7 and 2e-16 for IEEE single and double precision respectively.
    This value of rcond is fairly conservative but works pretty well when x -
    x.mean() is used in place of x.


    DISCLAIMER: Power series fits are full of pitfalls for the unwary once the
    degree of the fit becomes large or the interval of sample points is badly
    centered. The problem is that the powers x**n are generally a poor basis for
    the polynomial functions on the sample interval, resulting in a Vandermonde
    matrix is ill conditioned and coefficients sensitive to rounding erros. The
    computation of the polynomial values will also sensitive to rounding errors.
    Consequently, the quality of the polynomial fit should be checked against
    the data whenever the condition number is large.  The quality of polynomial
    fits *can not* be taken for granted. If all you want to do is draw a smooth
    curve through the y values and polyfit is not doing the job, try centering
    the sample range or look into scipy.interpolate, which includes some nice
    spline fitting functions that may be of use.

    For more info, see
    http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html,
    but note that the k's and n's in the superscripts and subscripts
    on that page.  The linear algebra is correct, however.

    """
    order = int(deg) + 1
    x = NX.asarray(x) + 0.0
    y = NX.asarray(y) + 0.0

    # check arguments.
    if deg < 0 :
        raise ValueError, "expected deg >= 0"
    if x.ndim != 1:
        raise TypeError, "expected 1D vector for x"
    if x.size == 0:
        raise TypeError, "expected non-empty vector for x"
    if y.ndim < 1 or y.ndim > 2 :
        raise TypeError, "expected 1D or 2D array for y"
    if x.shape[0] != y.shape[0] :
        raise TypeError, "expected x and y to have same length"

    # set rcond
    if rcond is None :
        xtype = x.dtype
        if xtype == NX.single or xtype == NX.csingle :
            rcond = len(x)*_single_eps
        else :
            rcond = len(x)*_double_eps

    # scale x to improve condition number
    scale = abs(x).max()
    if scale != 0 :
        x /= scale

    # solve least squares equation for powers of x
    v = vander(x, order)
    c, resids, rank, s = _lstsq(v, y, rcond)

    # warn on rank reduction, which indicates an ill conditioned matrix
    if rank != order and not full:
        msg = "Polyfit may be poorly conditioned"
        warnings.warn(msg, RankWarning)

    # scale returned coefficients
    if scale != 0 :
        if c.ndim == 1 :
            c /= vander([scale], order)[0]
        else :
            c /= vander([scale], order).T

    if full :
        return c, resids, rank, s, rcond
    else :
        return c
Ejemplo n.º 26
0
def polyfit(x, y, deg, rcond=None, full=False):
    """Least squares polynomial fit.

    Required arguments

        x -- vector of sample points
        y -- vector or 2D array of values to fit
        deg -- degree of the fitting polynomial

    Keyword arguments

        rcond -- relative condition number of the fit (default len(x)*eps)
        full -- return full diagnostic output (default False)

    Returns

        full == False -- coefficients
        full == True -- coefficients, residuals, rank, singular values, rcond.

    Warns

        RankWarning -- if rank is reduced and not full output

    Do a best fit polynomial of degree 'deg' of 'x' to 'y'.  Return value is a
    vector of polynomial coefficients [pk ... p1 p0].  Eg, for n=2

      p2*x0^2 +  p1*x0 + p0 = y1
      p2*x1^2 +  p1*x1 + p0 = y1
      p2*x2^2 +  p1*x2 + p0 = y2
      .....
      p2*xk^2 +  p1*xk + p0 = yk


    Method: if X is a the Vandermonde Matrix computed from x (see
    http://mathworld.wolfram.com/VandermondeMatrix.html), then the
    polynomial least squares solution is given by the 'p' in

      X*p = y

    where X is a len(x) x N+1 matrix, p is a N+1 length vector, and y
    is a len(x) x 1 vector

    This equation can be solved as

      p = (XT*X)^-1 * XT * y

    where XT is the transpose of X and -1 denotes the inverse. However, this
    method is susceptible to rounding errors and generally the singular value
    decomposition is preferred and that is the method used here. The singular
    value method takes a paramenter, 'rcond', which sets a limit on the
    relative size of the smallest singular value to be used in solving the
    equation. This may result in lowering the rank of the Vandermonde matrix,
    in which case a RankWarning is issued. If polyfit issues a RankWarning, try
    a fit of lower degree or replace x by x - x.mean(), both of which will
    generally improve the condition number. The routine already normalizes the
    vector x by its maximum absolute value to help in this regard. The rcond
    parameter may also be set to a value smaller than its default, but this may
    result in bad fits. The current default value of rcond is len(x)*eps, where
    eps is the relative precision of the floating type being used, generally
    around 1e-7 and 2e-16 for IEEE single and double precision respectively.
    This value of rcond is fairly conservative but works pretty well when x -
    x.mean() is used in place of x.

    The warnings can be turned off by:

    >>> import numpy
    >>> import warnings
    >>> warnings.simplefilter('ignore',numpy.RankWarning)

    DISCLAIMER: Power series fits are full of pitfalls for the unwary once the
    degree of the fit becomes large or the interval of sample points is badly
    centered. The basic problem is that the powers x**n are generally a poor
    basis for the functions on the sample interval with the result that the
    Vandermonde matrix is ill conditioned and computation of the polynomial
    values is sensitive to coefficient error. The quality of the resulting fit
    should be checked against the data whenever the condition number is large,
    as the quality of polynomial fits *can not* be taken for granted. If all
    you want to do is draw a smooth curve through the y values and polyfit is
    not doing the job, try centering the sample range or look into
    scipy.interpolate, which includes some nice spline fitting functions that
    may be of use.

    For more info, see
    http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html,
    but note that the k's and n's in the superscripts and subscripts
    on that page.  The linear algebra is correct, however.

    See also polyval

    """
    order = int(deg) + 1
    x = NX.asarray(x) + 0.0
    y = NX.asarray(y) + 0.0

    # check arguments.
    if deg < 0 :
        raise ValueError, "expected deg >= 0"
    if x.ndim != 1 or x.size == 0:
        raise TypeError, "expected non-empty vector for x"
    if y.ndim < 1 or y.ndim > 2 :
        raise TypeError, "expected 1D or 2D array for y"
    if x.shape[0] != y.shape[0] :
        raise TypeError, "expected x and y to have same length"

    # set rcond
    if rcond is None :
        xtype = x.dtype
        if xtype == NX.single or xtype == NX.csingle :
            rcond = len(x)*_single_eps
        else :
            rcond = len(x)*_double_eps

    # scale x to improve condition number
    scale = abs(x).max()
    if scale != 0 :
        x /= scale

    # solve least squares equation for powers of x
    v = vander(x, order)
    c, resids, rank, s = _lstsq(v, y, rcond)

    # warn on rank reduction, which indicates an ill conditioned matrix
    if rank != order and not full:
        msg = "Polyfit may be poorly conditioned"
        warnings.warn(msg, RankWarning)

    # scale returned coefficients
    if scale != 0 :
        c /= vander([scale], order)[0]

    if full :
        return c, resids, rank, s, rcond
    else :
        return c
Ejemplo n.º 27
0
def norm(x, ord=None):
    """
    Norm of a sparse matrix

    This function is able to return one of seven different matrix norms,
    depending on the value of the ``ord`` parameter.

    Parameters
    ----------
    x : a sparse matrix
        Input sparse matrix.
    ord : {non-zero int, inf, -inf, 'fro'}, optional
        Order of the norm (see table under ``Notes``). inf means numpy's
        `inf` object.

    Returns
    -------
    n : float or matrix

    Notes
    -----
    Some of the ord are not implemented because some associated functions like, 
    _multi_svd_norm, are not yet available for sparse matrix. 

    This docstring is modified based on numpy.linalg.norm. 
    https://github.com/numpy/numpy/blob/master/numpy/linalg/linalg.py 

    The following norms can be calculated:

    =====  ============================  
    ord    norm for sparse matrices             
    =====  ============================  
    None   Frobenius norm                
    'fro'  Frobenius norm                
    inf    max(sum(abs(x), axis=1))      
    -inf   min(sum(abs(x), axis=1))      
    0      abs(x).sum(axis=axis)                           
    1      max(sum(abs(x), axis=0))      
    -1     min(sum(abs(x), axis=0))      
    2      Not implemented  
    -2     Not implemented      
    other  Not implemented                               
    =====  ============================  

    The Frobenius norm is given by [1]_:

        :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`

    References
    ----------
    .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
        Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15

    Examples
    --------
    >>> from scipy.sparse import *
    >>> import numpy as np
    >>> from scipy.sparse.linalg import norm
    >>> a = np.arange(9) - 4
    >>> a
    array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
    >>> b = a.reshape((3, 3))
    >>> b
    array([[-4, -3, -2],
           [-1, 0, 1],
           [ 2, 3, 4]])

    >>> b = csr_matrix(b)
    >>> norm(b)
    7.745966692414834
    >>> norm(b, 'fro')
    7.745966692414834
    >>> norm(b, np.inf)
    9
    >>> norm(b, -np.inf)
    2
    >>> norm(b, 1)
    7
    >>> norm(b, -1)
    6

    """
    if not issparse(x):
        raise TypeError("input is not sparse. use numpy.linalg.norm")

    # Check the default case first and handle it immediately.
    if ord in (None, 'fro', 'f'):
        if np.issubdtype(x.dtype, np.complexfloating):
            sqnorm = abs(x).power(2).sum()
        else:
            sqnorm = x.power(2).sum()
        return sqrt(sqnorm)

    nd = x.ndim
    axis = tuple(range(nd))

    if len(axis) == 2:
        row_axis, col_axis = axis
        if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
            raise ValueError('Invalid axis %r for an array with shape %r' %
                             (axis, x.shape))
        if row_axis % nd == col_axis % nd:
            raise ValueError('Duplicate axes given.')
        if ord == 2:
            raise NotImplementedError
            #return _multi_svd_norm(x, row_axis, col_axis, amax)
        elif ord == -2:
            raise NotImplementedError
            #return _multi_svd_norm(x, row_axis, col_axis, amin)
        elif ord == 1:
            return abs(x).sum(axis=row_axis).max(axis=col_axis)[0,0]
        elif ord == Inf:
            return abs(x).sum(axis=col_axis).max(axis=row_axis)[0,0]
        elif ord == -1:
            return abs(x).sum(axis=row_axis).min(axis=col_axis)[0,0]
        elif ord == -Inf:
            return abs(x).sum(axis=col_axis).min(axis=row_axis)[0,0]
        else:
            raise ValueError("Invalid norm order for matrices.")
    else:
        raise ValueError("Improper number of dimensions to norm.")
Ejemplo n.º 28
0
def _sparse_frobenius_norm(x):
    if np.issubdtype(x.dtype, np.complexfloating):
        sqnorm = abs(x).power(2).sum()
    else:
        sqnorm = x.power(2).sum()
    return sqrt(sqnorm)
Ejemplo n.º 29
0
def norm(x, ord=None):
    """
    Norm of a sparse matrix

    This function is able to return one of seven different matrix norms,
    depending on the value of the ``ord`` parameter.

    Parameters
    ----------
    x : a sparse matrix
        Input sparse matrix.
    ord : {non-zero int, inf, -inf, 'fro'}, optional
        Order of the norm (see table under ``Notes``). inf means numpy's
        `inf` object.

    Returns
    -------
    n : float or matrix

    Notes
    -----
    Some of the ord are not implemented because some associated functions like, 
    _multi_svd_norm, are not yet available for sparse matrix. 

    This docstring is modified based on numpy.linalg.norm. 
    https://github.com/numpy/numpy/blob/master/numpy/linalg/linalg.py 

    The following norms can be calculated:

    =====  ============================  
    ord    norm for sparse matrices             
    =====  ============================  
    None   Frobenius norm                
    'fro'  Frobenius norm                
    inf    max(sum(abs(x), axis=1))      
    -inf   min(sum(abs(x), axis=1))      
    0      abs(x).sum(axis=axis)                           
    1      max(sum(abs(x), axis=0))      
    -1     min(sum(abs(x), axis=0))      
    2      Not implemented  
    -2     Not implemented      
    other  Not implemented                               
    =====  ============================  

    The Frobenius norm is given by [1]_:

        :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`

    References
    ----------
    .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
        Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15

    Examples
    --------
    >>> from scipy.sparse import *
    >>> import numpy as np
    >>> from scipy.sparse.linalg import norm
    >>> a = np.arange(9) - 4
    >>> a
    array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
    >>> b = a.reshape((3, 3))
    >>> b
    array([[-4, -3, -2],
           [-1, 0, 1],
           [ 2, 3, 4]])

    >>> b = csr_matrix(b)
    >>> norm(b)
    7.745966692414834
    >>> norm(b, 'fro')
    7.745966692414834
    >>> norm(b, np.inf)
    9
    >>> norm(b, -np.inf)
    2
    >>> norm(b, 1)
    7
    >>> norm(b, -1)
    6

    """
    if not issparse(x):
        raise TypeError("input is not sparse. use numpy.linalg.norm")

    # Check the default case first and handle it immediately.
    if ord in (None, 'fro', 'f'):
        if np.issubdtype(x.dtype, np.complexfloating):
            sqnorm = abs(x).power(2).sum()
        else:
            sqnorm = x.power(2).sum()
        return sqrt(sqnorm)

    nd = x.ndim
    axis = tuple(range(nd))

    if len(axis) == 2:
        row_axis, col_axis = axis
        if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
            raise ValueError('Invalid axis %r for an array with shape %r' %
                             (axis, x.shape))
        if row_axis % nd == col_axis % nd:
            raise ValueError('Duplicate axes given.')
        if ord == 2:
            raise NotImplementedError
            #return _multi_svd_norm(x, row_axis, col_axis, amax)
        elif ord == -2:
            raise NotImplementedError
            #return _multi_svd_norm(x, row_axis, col_axis, amin)
        elif ord == 1:
            return abs(x).sum(axis=row_axis).max(axis=col_axis)[0, 0]
        elif ord == Inf:
            return abs(x).sum(axis=col_axis).max(axis=row_axis)[0, 0]
        elif ord == -1:
            return abs(x).sum(axis=row_axis).min(axis=col_axis)[0, 0]
        elif ord == -Inf:
            return abs(x).sum(axis=col_axis).min(axis=row_axis)[0, 0]
        else:
            raise ValueError("Invalid norm order for matrices.")
    else:
        raise ValueError("Improper number of dimensions to norm.")
Ejemplo n.º 30
0
def polyadd(a1, a2):
    """
    Find the sum of two polynomials.

    .. note::
       This forms part of the old polynomial API. Since version 1.4, the
       new polynomial API defined in `numpy.polynomial` is preferred.
       A summary of the differences can be found in the
       :doc:`transition guide </reference/routines.polynomials>`.

    Returns the polynomial resulting from the sum of two input polynomials.
    Each input must be either a poly1d object or a 1D sequence of polynomial
    coefficients, from highest to lowest degree.

    Parameters
    ----------
    a1, a2 : array_like or poly1d object
        Input polynomials.

    Returns
    -------
    out : ndarray or poly1d object
        The sum of the inputs. If either input is a poly1d object, then the
        output is also a poly1d object. Otherwise, it is a 1D array of
        polynomial coefficients from highest to lowest degree.

    See Also
    --------
    poly1d : A one-dimensional polynomial class.
    poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval

    Examples
    --------
    >>> np.polyadd([1, 2], [9, 5, 4])
    array([9, 6, 6])

    Using poly1d objects:

    >>> p1 = np.poly1d([1, 2])
    >>> p2 = np.poly1d([9, 5, 4])
    >>> print(p1)
    1 x + 2
    >>> print(p2)
       2
    9 x + 5 x + 4
    >>> print(np.polyadd(p1, p2))
       2
    9 x + 6 x + 6

    """
    truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
    a1 = atleast_1d(a1)
    a2 = atleast_1d(a2)
    diff = len(a2) - len(a1)
    if diff == 0:
        val = a1 + a2
    elif diff > 0:
        zr = NX.zeros(diff, a1.dtype)
        val = NX.concatenate((zr, a1)) + a2
    else:
        zr = NX.zeros(abs(diff), a2.dtype)
        val = a1 + NX.concatenate((zr, a2))
    if truepoly:
        val = poly1d(val)
    return val
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']; import functools
import operator
import warnings; from numpy.core import (
array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite,
finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
atleast_2d, intp, asanyarray, object_, matmul,
swapaxes, divide, count_nonzero, isnan, sign
); from numpy.core.multiarray import normalize_axis_index; from numpy.core.overrides import set_module; from numpy.core import overrides; from numpy.lib.twodim_base import triu, eye; from numpy.linalg import lapack_lite, _umath_linalg; array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy.linalg'); _N = b'N'; _V = b'V'; _A = b'A'; _S = b'S'; _L = b'L' fortran_int = intc @set_module('numpy.linalg') class LinAlgError(Exception): def _determine_error_states(): errobj = geterrobj() bufsize = errobj[0] with errstate(invalid='call', over='ignore', divide='ignore', under='ignore'): invalid_call_errmask = geterrobj()[1] return [bufsize, invalid_call_errmask, None]; _linalg_error_extobj = _determine_error_states(); del _determine_error_states; def _raise_linalgerror_singular(err, flag): raise LinAlgError("Singular matrix"); def _raise_linalgerror_nonposdef(err, flag): raise LinAlgError("Matrix is not positive definite"); def _raise_linalgerror_eigenvalues_nonconvergence(err, flag): raise LinAlgError("Eigenvalues did not converge"); def _raise_linalgerror_svd_nonconvergence(err, flag): raise LinAlgError("SVD did not converge"); def _raise_linalgerror_lstsq(err, flag): raise LinAlgError("SVD did not converge in Linear Least Squares"); def get_linalg_error_extobj(callback): extobj = list(_linalg_error_extobj); extobj[2] = callback; return extobj; def _makearray(a): new = asarray(a); wrap = getattr(a, "__array_prepare__", new.__array_wrap__); return new, wrap; def isComplexType(t): return issubclass(t, complexfloating); _real_types_map = {single: single,; double: double,; csingle: single,; cdouble: double}; _complex_types_map = {single: csingle,; double: cdouble,; csingle: csingle,; cdouble: cdouble}; def _realType(t, default=double): return _real_types_map.get(t, default); def _complexType(t, default=cdouble): return _complex_types_map.get(t, default); def _linalgRealType(t): """Cast the type t to either double or cdouble."""; return double; def _commonType(*arrays): result_type = single; is_complex = False; for a in arrays: if issubclass(a.dtype.type, inexact): if isComplexType(a.dtype.type): is_complex = True; rt = _realType(a.dtype.type, default=None); if rt is None: raise TypeError("array type %s is unsupported in linalg" %; (a.dtype.name,)); else: rt = double; if rt is double: result_type = double; if is_complex: t = cdouble; result_type = _complex_types_map[result_type]; else: t = double; return t, result_type;  _fastCT = fastCopyAndTranspose; def _to_native_byte_order(*arrays): ret = []; for arr in arrays: if arr.dtype.byteorder not in ('=', '|'): ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('='))); else: ret.append(arr); if len(ret) == 1: return ret[0]; else: return ret; def _fastCopyAndTranspose(type, *arrays): cast_arrays = (); for a in arrays: if a.dtype.type is type: cast_arrays = cast_arrays + (_fastCT(a),); else: cast_arrays = cast_arrays + (_fastCT(a.astype(type)),); if len(cast_arrays) == 1: return cast_arrays[0]; else: return cast_arrays; def _assert_2d(*arrays): for a in arrays: if a.ndim != 2: raise LinAlgError('%d-dimensional array given. Array must be '; 'two-dimensional' % a.ndim); def _assert_stacked_2d(*arrays): for a in arrays: if a.ndim < 2: raise LinAlgError('%d-dimensional array given. Array must be '; 'at least two-dimensional' % a.ndim); def _assert_stacked_square(*arrays): for a in arrays: m, n = a.shape[-2:]; if m != n: raise LinAlgError('Last 2 dimensions of the array must be square'); def _assert_finite(*arrays): for a in arrays: if not isfinite(a).all(): raise LinAlgError("Array must not contain infs or NaNs"); def _is_empty_2d(arr): return arr.size == 0 and product(arr.shape[-2:]) == 0; def transpose(a): a, wrap = _makearray(a); b = asarray(b); an = a.ndim; if axes is not None: allaxes = list(range(0, an)); for k in axes: allaxes.remove(k); allaxes.insert(an, k); a = a.transpose(allaxes); oldshape = a.shape[-(an-b.ndim):]; prod = 1; for k in oldshape: prod *= k; a = a.reshape(-1, prod); b = b.ravel(); res = wrap(solve(a, b)); res.shape = oldshape; return res; def _solve_dispatcher(a, b): return (a, b); @array_function_dispatch(_solve_dispatcher); def solve(a, b):  a, _ = _makearray(a); _assert_stacked_2d(a); _assert_stacked_square(a); b, wrap = _makearray(b); t, result_t = _commonType(a, b); if b.ndim == a.ndim - 1: gufunc = _umath_linalg.solve1; else: gufunc = _umath_linalg.solve; signature = 'DD->D' if isComplexType(t) else 'dd->d'; extobj = get_linalg_error_extobj(_raise_linalgerror_singular); r = gufunc(a, b, signature=signature, extobj=extobj); return wrap(r.astype(result_t, copy=False)); def _tensorinv_dispatcher(a, ind=None): return (a,); @array_function_dispatch(_tensorinv_dispatcher); def tensorinv(a, ind=2): a = asarray(a); oldshape = a.shape; prod = 1; if ind > 0: invshape = oldshape[ind:] + oldshape[:ind]; for k in oldshape[ind:]: prod *= k; else: raise ValueError("Invalid ind argument."); a = a.reshape(prod, -1); ia = inv(a); return ia.reshape(*invshape); def _unary_dispatcher(a): return (a,); @array_function_dispatch(_unary_dispatcher); def inv(a): a, wrap = _makearray(a); _assert_stacked_2d(a); _assert_stacked_square(a); t, result_t = _commonType(a); signature = 'D->D' if isComplexType(t) else 'd->d'; extobj = get_linalg_error_extobj(_raise_linalgerror_singular); ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj); return wrap(ainv.astype(result_t, copy=False)); def _matrix_power_dispatcher(a, n): return (a,); @array_function_dispatch(_matrix_power_dispatcher); def matrix_power(a, n): a = asanyarray(a); _assert_stacked_2d(a); _assert_stacked_square(a); try: n = operator.index(n); except TypeError: raise TypeError("exponent must be an integer"); if a.dtype != object: fmatmul = matmul; elif a.ndim == 2: fmatmul = dot; else: raise NotImplementedError(; "matrix_power not supported for stacks of object arrays"); if n == 0: a = empty_like(a); a[...] = eye(a.shape[-2], dtype=a.dtype); return a; elif n < 0: a = inv(a); n = abs(n); if n == 1: return a; elif n == 2: return fmatmul(a, a); elif n == 3: return fmatmul(fmatmul(a, a), a); z = result = None; while n > 0: z = a if z is None else fmatmul(z, z); n, bit = divmod(n, 2); if bit: result = z if result is None else fmatmul(result, z); return result; @array_function_dispatch(_unary_dispatcher); def cholesky(a): extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef); gufunc = _umath_linalg.cholesky_lo; a, wrap = _makearray(a); _assert_stacked_2d(a); _assert_stacked_square(a); t, result_t = _commonType(a); signature = 'D->D' if isComplexType(t) else 'd->d'; r = gufunc(a, signature=signature, extobj=extobj); return wrap(r.astype(result_t, copy=False)); def _qr_dispatcher(a, mode=None): return (a,); @array_function_dispatch(_qr_dispatcher); def qr(a, mode='reduced'): if mode not in ('reduced', 'complete',x 'r', 'raw'): if mode in ('f', 'full'): msg = "".join((; "The 'full' option is deprecated in favor of 'reduced'.\n",; "For backward compatibility let mode default.")); warnings.warn(msg, DeprecationWarning, stacklevel=3); mode = 'reduced'; elif mode in ('e', 'economic'): msg = "The 'economic' option is deprecated."; warnings.warn(msg, DeprecationWarning, stacklevel=3); mode = 'economic'; else: raise ValueError("Unrecognized mode '%s'" % mode); a, wrap = _makearray(a); _assert_2d(a); m, n = a.shape; t, result_t = _commonType(a); a = _fastCopyAndTranspose(t, a); a = _to_native_byte_order(a); mn = min(m, n); tau = zeros((mn,), t); if isComplexType(t): lapack_routine = lapack_lite.zgeqrf; routine_name = 'zgeqrf'; else: lapack_routine = lapack_lite.dgeqrf; routine_name = 'dgeqrf'; lwork = 1; work = zeros((lwork,), t); results = lapack_routine(m, n, a, max(1, m), tau, work, -1, 0); if results['info'] != 0: raise LinAlgError('%s returns %d' % (routine_name, results['info'])); lwork = max(1, n, int(abs(work[0]))); work = zeros((lwork,), t); results = lapack_routine(m, n, a, max(1, m), tau, work, lwork, 0); if results['info'] != 0: raise LinAlgError('%s returns %d' % (routine_name, results['info'])); if mode == 'r': r = _fastCopyAndTranspose(result_t, a[:, :mn]); return wrap(triu(r)); if mode == 'raw': return a, tau; if mode == 'economic': if t != result_t : a = a.astype(result_t, copy=False); return wrap(a.T); if mode == 'complete' and m > n: mc = m; q = empty((m, m), t); else: mc = mn; q = empty((n, m), t); q[:n] = a; if isComplexType(t): lapack_routine = lapack_lite.zungqr; routine_name = 'zungqr'; else: lapack_routine = lapack_lite.dorgqr; routine_name = 'dorgqr'; lwork = 1; work = zeros((lwork,), t); results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, -1, 0); if results['info'] != 0: raise LinAlgError('%s returns %d' % (routine_name, results['info'])); lwork = max(1, n, int(abs(work[0]))); work = zeros((lwork,), t); results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, lwork, 0); if results['info'] != 0: raise LinAlgError('%s returns %d' % (routine_name, results['info'])); q = _fastCopyAndTranspose(result_t, q[:mc]); r = _fastCopyAndTranspose(result_t, a[:, :mc]); return wrap(q), wrap(triu(r)); @array_function_dispatch(_unary_dispatcher); def eigvals(a): a, wrap = _makearray(a); _assert_stacked_2d(a); _assert_stacked_square(a); _assert_finite(a); t, result_t = _commonType(a); extobj = get_linalg_error_extobj(; _raise_linalgerror_eigenvalues_nonconvergence); signature = 'D->D' if isComplexType(t) else 'd->D'; w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj); if not isComplexType(t): if all(w.imag == 0): w = w.real; result_t = _realType(result_t); else: result_t = _complexType(result_t); return w.astype(result_t, copy=False); def _eigvalsh_dispatcher(a, UPLO=None): return (a,); @array_function_dispatch(_eigvalsh_dispatcher); def eigvalsh(a, UPLO='L'): UPLO = UPLO.upper(); if UPLO not in ('L', 'U'): raise ValueError("UPLO argument must be 'L' or 'U'"); extobj = get_linalg_error_extobj(; _raise_linalgerror_eigenvalues_nonconvergence); if UPLO == 'L': gufunc = _umath_linalg.eigvalsh_lo; else: gufunc = _umath_linalg.eigvalsh_up; a, wrap = _makearray(a); _assert_stacked_2d(a); _assert_stacked_square(a); t, result_t = _commonType(a); signature = 'D->d' if isComplexType(t) else 'd->d'; w = gufunc(a, signature=signature, extobj=extobj); return w.astype(_realType(result_t), copy=False); def _convertarray(a): t, result_t = _commonType(a); a = _fastCT(a.astype(t)); return a, t, result_t; def eig(a): a, wrap = _makearray(a); _assert_stacked_2d(a); _assert_stacked_square(a); _assert_finite(a); t, result_t = _commonType(a); extobj = get_linalg_error_extobj(; _raise_linalgerror_eigenvalues_nonconvergence); signature = 'D->DD' if isComplexType(t) else 'd->DD'; w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj); if not isComplexType(t) and all(w.imag == 0.0): w = w.real; vt = vt.real; result_t = _realType(result_t); else: result_t = _complexType(result_t); vt = vt.astype(result_t, copy=False); return w.astype(result_t, copy=False), wrap(vt); @array_function_dispatch(_eigvalsh_dispatcher); def eigh(a, UPLO='L'): UPLO = UPLO.upper(); if UPLO not in ('L', 'U'): raise ValueError("UPLO argument must be 'L' or 'U'"); a, wrap = _makearray(a); _assert_stacked_2d(a); _assert_stacked_square(a); t, result_t = _commonType(a); extobj = get_linalg_error_extobj(; _raise_linalgerror_eigenvalues_nonconvergence); if UPLO == 'L': gufunc = _umath_linalg.eigh_lo; else: gufunc = _umath_linalg.eigh_up; signature = 'D->dD' if isComplexType(t) else 'd->dd'; w, vt = gufunc(a, signature=signature, extobj=extobj); w = w.astype(_realType(result_t), copy=False); vt = vt.astype(result_t, copy=False); return w, wrap(vt); def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None): return (a,); @array_function_dispatch(_svd_dispatcher); def svd(a, full_matrices=True, compute_uv=True, hermitian=False): a, wrap = _makearray(a); if hermitian: if compute_uv: s, u = eigh(a); s = s[..., ::-1]; u = u[..., ::-1]; vt = transpose(u * sign(s)[..., None, :]).conjugate(); s = abs(s); return wrap(u), s, wrap(vt); else: s = eigvalsh(a); s = s[..., ::-1]; s = abs(s); return s; _assert_stacked_2d(a); t, result_t = _commonType(a); extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence); m, n = a.shape[-2:]; if compute_uv: if full_matrices: if m < n: gufunc = _umath_linalg.svd_m_f; else: gufunc = _umath_linalg.svd_n_f; else: if m < n: gufunc = _umath_linalg.svd_m_s; else: gufunc = _umath_linalg.svd_n_s; signature = 'D->DdD' if isComplexType(t) else 'd->ddd'; u, s, vh = gufunc(a, signature=signature, extobj=extobj); u = u.astype(result_t, copy=False); s = s.astype(_realType(result_t), copy=False); vh = vh.astype(result_t, copy=False); return wrap(u), s, wrap(vh); else: if m < n: gufunc = _umath_linalg.svd_m; else: gufunc = _umath_linalg.svd_n; signature = 'D->d' if isComplexType(t) else 'd->d'; s = gufunc(a, signature=signature, extobj=extobj); s = s.astype(_realType(result_t), copy=False); return s; def _cond_dispatcher(x, p=None): return (x,); @array_function_dispatch(_cond_dispatcher); def cond(x, p=None): x = asarray(x); if _is_empty_2d(x): raise LinAlgError("cond is not defined on empty arrays"); if p is None or p == 2 or p == -2: s = svd(x, compute_uv=False); with errstate(all='ignore'): if p == -2: r = s[..., -1] / s[..., 0]; else: r = s[..., 0] / s[..., -1]; else: _assert_stacked_2d(x); _assert_stacked_square(x); t, result_t = _commonType(x); signature = 'D->D' if isComplexType(t) else 'd->d'; with errstate(all='ignore'): invx = _umath_linalg.inv(x, signature=signature); r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1)); r = r.astype(result_t, copy=False); r = asarray(r); nan_mask = isnan(r); if nan_mask.any(): nan_mask &= ~isnan(x).any(axis=(-2, -1)); if r.ndim > 0: r[nan_mask] = Inf; elif nan_mask: r[()] = Inf; if r.ndim == 0: r = r[()]; return r; def _matrix_rank_dispatcher(M, tol=None, hermitian=None): return (M,); @array_function_dispatch(_matrix_rank_dispatcher); def matrix_rank(M, tol=None, hermitian=False): M = asarray(M); if M.ndim < 2: return int(not all(M==0)); S = svd(M, compute_uv=False, hermitian=hermitian); if tol is None: tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps; else: tol = asarray(tol)[..., newaxis]; return count_nonzero(S > tol, axis=-1); def pinv(a, rcond=1e-15, hermitian=False): a, wrap = _makearray(a); rcond = asarray(rcond); if _is_empty_2d(a): m, n = a.shape[-2:]; res = empty(a.shape[:-2] + (n, m), dtype=a.dtype); return wrap(res); a = a.conjugate(); u, s, vt = svd(a, full_matrices=False, hermitian=hermitian); cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True); large = s > cutoff; s = divide(1, s, where=large, out=s); s[~large] = 0; res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u))); return wrap(res); def slogdet(a): a = asarray(a); _assert_stacked_2d(a); _assert_stacked_square(a); t, result_t = _commonType(a); real_t = _realType(result_t); signature = 'D->Dd' if isComplexType(t) else 'd->dd'; sign, logdet = _umath_linalg.slogdet(a, signature=signature); sign = sign.astype(result_t, copy=False); logdet = logdet.astype(real_t, copy=False); return sign, logdet; @array_function_dispatch(_unary_dispatcher); def det(a): a = asarray(a); _assert_stacked_2d(a); _assert_stacked_square(a); t, result_t = _commonType(a); signature = 'D->D' if isComplexType(t) else 'd->d'; r = _umath_linalg.det(a, signature=signature); r = r.astype(result_t, copy=False); return r; def lstsq(a, b, rcond="warn"): a, _ = _makearray(a); b, wrap = _makearray(b); is_1d = b.ndim == 1; if is_1d: b = b[:, newaxis]; _assert_2d(a, b); m, n = a.shape[-2:]; m2, n_rhs = b.shape[-2:]; if m != m2: raise LinAlgError('Incompatible dimensions'); t, result_t = _commonType(a, b); real_t = _linalgRealType(t); result_real_t = _realType(result_t); if rcond == "warn": warnings.warn("`rcond` parameter will change to the default of "; "machine precision times ``max(M, N)`` where M and N "; "are the input matrix dimensions.\n"; "To use the future default and silence this warning "; "we advise to pass `rcond=None`, to keep using the old, "; "explicitly pass `rcond=-1`.",; FutureWarning, stacklevel=3); rcond = -1; if rcond is None: rcond = finfo(t).eps * max(n, m); if m <= n: gufunc = _umath_linalg.lstsq_m; else: gufunc = _umath_linalg.lstsq_n; signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid'; extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq); if n_rhs == 0: b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype); x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj); if m == 0: x[...] = 0; if n_rhs == 0: x = x[..., :n_rhs]; resids = resids[..., :n_rhs]; if is_1d: x = x.squeeze(axis=-1); if rank != n or m <= n: resids = array([], result_real_t); s = s.astype(result_real_t, copy=False); resids = resids.astype(result_real_t, copy=False); x = x.astype(result_t, copy=True); return wrap(x), wrap(resids), rank, s; def _multi_svd_norm(x, row_axis, col_axis, op): y = moveaxis(x, (row_axis, col_axis), (-2, -1)); result = op(svd(y, compute_uv=False), axis=-1); return result; def _norm_dispatcher(x, ord=None, axis=None, keepdims=None): return (x,); @array_function_dispatch(_norm_dispatcher); def norm(x, ord=None, axis=None, keepdims=False): x = asarray(x); if not issubclass(x.dtype.type, (inexact, object_)): x = x.astype(float); if axis is None: ndim = x.ndim; if ((ord is None) or; (ord in ('f', 'fro') and ndim == 2) or; (ord == 2 and ndim == 1)): x = x.ravel(order='K'); if isComplexType(x.dtype.type): sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag); else: sqnorm = dot(x, x); ret = sqrt(sqnorm); if keepdims: ret = ret.reshape(ndim*[1]); return ret; nd = x.ndim; if axis is None: axis = tuple(range(nd)); elif not isinstance(axis, tuple): try: axis = int(axis); except Exception: raise TypeError("'axis' must be None, an integer or a tuple of integers"); axis = (axis,); if len(axis) == 1: if ord == Inf: return abs(x).max(axis=axis, keepdims=keepdims); elif ord == -Inf: return abs(x).min(axis=axis, keepdims=keepdims); elif ord == 0: return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims); elif ord == 1: return add.reduce(abs(x), axis=axis, keepdims=keepdims); elif ord is None or ord == 2: s = (x.conj() * x).real; return sqrt(add.reduce(s, axis=axis, keepdims=keepdims)); else: try: ord + 1; except TypeError: raise ValueError("Invalid norm order for vectors."); absx = abs(x); absx **= ord; ret = add.reduce(absx, axis=axis, keepdims=keepdims); ret **= (1 / ord); return ret; elif len(axis) == 2: row_axis, col_axis = axis; row_axis = normalize_axis_index(row_axis, nd); col_axis = normalize_axis_index(col_axis, nd); if row_axis == col_axis: raise ValueError('Duplicate axes given.'); if ord == 2: ret =_multi_svd_norm(x, row_axis, col_axis, amax); elif ord == -2: ret = _multi_svd_norm(x, row_axis, col_axis, amin); elif ord == 1: if col_axis > row_axis: col_axis -= 1; ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis); elif ord == Inf: if row_axis > col_axis: row_axis -= 1; ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis); elif ord == -1: if col_axis > row_axis: col_axis -= 1; ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis); elif ord == -Inf: if row_axis > col_axis: row_axis -= 1; ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis); elif ord in [None, 'fro', 'f']: ret = sqrt(add.reduce((x.conj() * x).real, axis=axis)); elif ord == 'nuc': ret = _multi_svd_norm(x, row_axis, col_axis, sum); else: raise ValueError("Invalid norm order for matrices."); if keepdims: ret_shape = list(x.shape); ret_shape[axis[0]] = 1; ret_shape[axis[1]] = 1; ret = ret.reshape(ret_shape); return ret; else: raise ValueError("Improper number of dimensions to norm."); def multi_dot(arrays): n = len(arrays); if n < 2: raise ValueError("Expecting at least two arrays."); elif n == 2: return dot(arrays[0], arrays[1]); arrays = [asanyarray(a) for a in arrays]; ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim; if arrays[0].ndim == 1: arrays[0] = atleast_2d(arrays[0]); if arrays[-1].ndim == 1: arrays[-1] = atleast_2d(arrays[-1]).T; _assert_2d(*arrays)
if n == 3: result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else: order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
if ndim_first == 1 and ndim_last == 1: return result[0, 0]
elif ndim_first == 1 or ndim_last == 1: return result.ravel()
else: return result; def _multi_dot_three(A, B, C): a0, a1b0 = A.shape
b1c0, c1 = C.shape; cost1 = a0 * b1c0 * (a1b0 + c1); cost2 = a1b0 * c1 * (a0 + b1c0); if cost1 < cost2: return dot(dot(A, B), C); else: return dot(A, dot(B, C)); def _multi_dot_matrix_chain_order(arrays, return_costs=False): n = len(arrays); p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]; m = zeros((n, n), dtype=double); s = empty((n, n), dtype=intp); for l in range(1, n): for i in range(n - l): j = i + l; m[i, j] = Inf; for k in range(i, j): q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]; if q < m[i, j]: m[i, j] = q; s[i, j] = k; if i == j: return arrays[i]; else: return dot(_multi_dot(arrays, order, i, order[i, j]),; _multi_dot(arrays, order, order[i, j] + 1, j))
Ejemplo n.º 32
0
def norm(x, ord=None, axis=None, keepdims=False):

    x = asarray(x)

    if not issubclass(x.dtype.type, (inexact, object_)):
        x = x.astype(float)

    if axis is None:
        ndim = x.ndim
        if ((ord is None) or (ord in ('f', 'fro') and ndim == 2)
                or (ord == 2 and ndim == 1)):

            x = x.ravel(order='K')
            if isComplexType(x.dtype.type):
                sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
            else:
                sqnorm = dot(x, x)
            ret = sqrt(sqnorm)
            if keepdims:
                ret = ret.reshape(ndim * [1])
            return ret

    nd = x.ndim
    if axis is None:
        axis = tuple(range(nd))
    elif not isinstance(axis, tuple):
        try:
            axis = int(axis)
        except Exception as e:
            raise TypeError(
                "'axis' debe ser None, un entero o una tupla de enteros"
            ) from e
        axis = (axis, )

    if len(axis) == 1:
        if ord == Inf:
            return abs(x).max(axis=axis, keepdims=keepdims)
        elif ord == -Inf:
            return abs(x).min(axis=axis, keepdims=keepdims)
        elif ord == 0:
            return (x != 0).astype(x.real.dtype).sum(axis=axis,
                                                     keepdims=keepdims)
        elif ord == 1:
            return add.reduce(abs(x), axis=axis, keepdims=keepdims)
        elif ord is None or ord == 2:
            s = (x.conj() * x).real
            return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
        elif isinstance(ord, str):
            raise ValueError(
                f"Orden de norma inválida '{ord}' para los vectores")
        else:
            absx = abs(x)
            absx **= ord
            ret = add.reduce(absx, axis=axis, keepdims=keepdims)
            ret **= (1 / ord)
            return ret
    elif len(axis) == 2:
        row_axis, col_axis = axis
        row_axis = normalize_axis_index(row_axis, nd)
        col_axis = normalize_axis_index(col_axis, nd)
        if row_axis == col_axis:
            raise ValueError('Duplicado de los ejes dados.')
        if ord == 2:
            ret = _multi_svd_norm(x, row_axis, col_axis, amax)
        elif ord == -2:
            ret = _multi_svd_norm(x, row_axis, col_axis, amin)
        elif ord == 1:
            if col_axis > row_axis:
                col_axis -= 1
            ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
        elif ord == Inf:
            if row_axis > col_axis:
                row_axis -= 1
            ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
        elif ord == -1:
            if col_axis > row_axis:
                col_axis -= 1
            ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
        elif ord == -Inf:
            if row_axis > col_axis:
                row_axis -= 1
            ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
        elif ord in [None, 'fro', 'f']:
            ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
        elif ord == 'nuc':
            ret = _multi_svd_norm(x, row_axis, col_axis, sum)
        else:
            raise ValueError("Orden normativo inválido para las matrices.")
        if keepdims:
            ret_shape = list(x.shape)
            ret_shape[axis[0]] = 1
            ret_shape[axis[1]] = 1
            ret = ret.reshape(ret_shape)
        return ret
    else:
        raise ValueError("Número inadecuado de dimensiones a la norma.")
Ejemplo n.º 33
0
def polyfit(x, y, deg, rcond=None, full=False):
    """Least squares polynomial fit.

    Required arguments

        x -- vector of sample points
        y -- vector or 2D array of values to fit
        deg -- degree of the fitting polynomial

    Keyword arguments

        rcond -- relative condition number of the fit (default len(x)*eps)
        full -- return full diagnostic output (default False)

    Returns

        full == False -- coefficients
        full == True -- coefficients, residuals, rank, singular values, rcond.

    Warns

        RankWarning -- if rank is reduced and not full output

    Do a best fit polynomial of degree 'deg' of 'x' to 'y'.  Return value is a
    vector of polynomial coefficients [pk ... p1 p0].  Eg, for n=2

      p2*x0^2 +  p1*x0 + p0 = y1
      p2*x1^2 +  p1*x1 + p0 = y1
      p2*x2^2 +  p1*x2 + p0 = y2
      .....
      p2*xk^2 +  p1*xk + p0 = yk


    Method: if X is a the Vandermonde Matrix computed from x (see
    http://mathworld.wolfram.com/VandermondeMatrix.html), then the
    polynomial least squares solution is given by the 'p' in

      X*p = y

    where X is a len(x) x N+1 matrix, p is a N+1 length vector, and y
    is a len(x) x 1 vector

    This equation can be solved as

      p = (XT*X)^-1 * XT * y

    where XT is the transpose of X and -1 denotes the inverse. However, this
    method is susceptible to rounding errors and generally the singular value
    decomposition is preferred and that is the method used here. The singular
    value method takes a paramenter, 'rcond', which sets a limit on the
    relative size of the smallest singular value to be used in solving the
    equation. This may result in lowering the rank of the Vandermonde matrix,
    in which case a RankWarning is issued. If polyfit issues a RankWarning, try
    a fit of lower degree or replace x by x - x.mean(), both of which will
    generally improve the condition number. The routine already normalizes the
    vector x by its maximum absolute value to help in this regard. The rcond
    parameter may also be set to a value smaller than its default, but this may
    result in bad fits. The current default value of rcond is len(x)*eps, where
    eps is the relative precision of the floating type being used, generally
    around 1e-7 and 2e-16 for IEEE single and double precision respectively.
    This value of rcond is fairly conservative but works pretty well when x -
    x.mean() is used in place of x.

    The warnings can be turned off by:

    >>> import numpy
    >>> import warnings
    >>> warnings.simplefilter('ignore',numpy.RankWarning)

    DISCLAIMER: Power series fits are full of pitfalls for the unwary once the
    degree of the fit becomes large or the interval of sample points is badly
    centered. The basic problem is that the powers x**n are generally a poor
    basis for the functions on the sample interval with the result that the
    Vandermonde matrix is ill conditioned and computation of the polynomial
    values is sensitive to coefficient error. The quality of the resulting fit
    should be checked against the data whenever the condition number is large,
    as the quality of polynomial fits *can not* be taken for granted. If all
    you want to do is draw a smooth curve through the y values and polyfit is
    not doing the job, try centering the sample range or look into
    scipy.interpolate, which includes some nice spline fitting functions that
    may be of use.

    For more info, see
    http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html,
    but note that the k's and n's in the superscripts and subscripts
    on that page.  The linear algebra is correct, however.

    See also polyval

    """
    order = int(deg) + 1
    x = NX.asarray(x) + 0.0
    y = NX.asarray(y) + 0.0

    # check arguments.
    if deg < 0 :
        raise ValueError, "expected deg >= 0"
    if x.ndim != 1 or x.size == 0:
        raise TypeError, "expected non-empty vector for x"
    if y.ndim < 1 or y.ndim > 2 :
        raise TypeError, "expected 1D or 2D array for y"
    if x.shape[0] != y.shape[0] :
        raise TypeError, "expected x and y to have same length"

    # set rcond
    if rcond is None :
        xtype = x.dtype
        if xtype == NX.single or xtype == NX.csingle :
            rcond = len(x)*_single_eps
        else :
            rcond = len(x)*_double_eps

    # scale x to improve condition number
    scale = abs(x).max()
    if scale != 0 :
        x /= scale

    # solve least squares equation for powers of x
    v = vander(x, order)
    c, resids, rank, s = _lstsq(v, y, rcond)

    # warn on rank reduction, which indicates an ill conditioned matrix
    if rank != order and not full:
        msg = "Polyfit may be poorly conditioned"
        warnings.warn(msg, RankWarning)

    # scale returned coefficients
    if scale != 0 :
        c /= vander([scale], order)[0]

    if full :
        return c, resids, rank, s, rcond
    else :
        return c