def polyfit(x, y, deg, rcond=None, full=False): """%s Notes ----- Any masked values in x is propagated in y, and vice-versa. """ order = int(deg) + 1 x = asarray(x) mx = getmask(x) y = asarray(y) if y.ndim == 1: m = mask_or(mx, getmask(y)) elif y.ndim == 2: y = mask_rows(y) my = getmask(y) if my is not nomask: m = mask_or(mx, my[:,0]) else: m = mx else: raise TypeError,"Expected a 1D or 2D array for y!" if m is not nomask: x[m] = y[m] = masked # Set rcond if rcond is None : if x.dtype in (np.single, np.csingle): rcond = len(x)*_single_eps else : rcond = len(x)*_double_eps # Scale x to improve condition number scale = abs(x).max() if scale != 0 : x = x / scale # solve least squares equation for powers of x v = vander(x, order) c, resids, rank, s = _lstsq(v, y.filled(0), rcond) # warn on rank reduction, which indicates an ill conditioned matrix if rank != order and not full: warnings.warn("Polyfit may be poorly conditioned", np.RankWarning) # scale returned coefficients if scale != 0 : if c.ndim == 1 : c /= np.vander([scale], order)[0] else : c /= np.vander([scale], order).T if full : return c, resids, rank, s, rcond else : return c
def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', fill_value=True): """Asserts that a comparison relation between two masked arrays is satisfied elementwise.""" # Fill the data first # xf = filled(x) # yf = filled(y) # Allocate a common mask and refill m = mask_or(getmask(x), getmask(y)) x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False) y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False) if ((x is masked) and not (y is masked)) or \ ((y is masked) and not (x is masked)): msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose, header=header, names=('x', 'y')) raise ValueError(msg) # OK, now run the basic tests on filled versions return utils.assert_array_compare(comparison, x.filled(fill_value), y.filled(fill_value), err_msg=err_msg, verbose=verbose, header=header)
def assert_array_compare(comparison, x, y, err_msg='', header='', fill_value=True): """Asserts that a comparison relation between two masked arrays is satisfied elementwise.""" xf = filled(x) yf = filled(y) m = mask_or(getmask(x), getmask(y)) x = masked_array(xf, copy=False, subok=False, mask=m).filled(fill_value) y = masked_array(yf, copy=False, subok=False, mask=m).filled(fill_value) if ((x is masked) and not (y is masked)) or \ ((y is masked) and not (x is masked)): msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y')) raise ValueError(msg) if (x.dtype.char != "O") and (x.dtype.char != "S"): x = x.astype(float_) if isinstance(x, N.ndarray) and x.size > 1: x[N.isnan(x)] = 0 elif N.isnan(x): x = 0 if (y.dtype.char != "O") and (y.dtype.char != "S"): y = y.astype(float_) if isinstance(y, N.ndarray) and y.size > 1: y[N.isnan(y)] = 0 elif N.isnan(y): y = 0 try: cond = (x.shape==() or y.shape==()) or x.shape == y.shape if not cond: msg = build_err_msg([x, y], err_msg + '\n(shapes %s, %s mismatch)' % (x.shape, y.shape), header=header, names=('x', 'y')) assert cond, msg val = comparison(x,y) if m is not nomask and fill_value: val = masked_array(val, mask=m, copy=False) if isinstance(val, bool): cond = val reduced = [0] else: reduced = val.ravel() cond = reduced.all() reduced = reduced.tolist() if not cond: match = 100-100.0*reduced.count(1)/len(reduced) msg = build_err_msg([x, y], err_msg + '\n(mismatch %s%%)' % (match,), header=header, names=('x', 'y')) assert cond, msg except ValueError: msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y')) raise ValueError(msg)
def almost(a, b, decimal=6, fill_value=True): """Returns True if a and b are equal up to decimal places. If fill_value is True, masked values considered equal. Otherwise, masked values are considered unequal. """ m = mask_or(getmask(a), getmask(b)) d1 = filled(a) d2 = filled(b) if d1.dtype.char == "O" or d2.dtype.char == "O": return np.equal(d1, d2).ravel() x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) d = np.around(np.abs(x - y), decimal) <= 10.0**(-decimal) return d.ravel()
def almost(a, b, decimal=6, fill_value=True): """Returns True if a and b are equal up to decimal places. If fill_value is True, masked values considered equal. Otherwise, masked values are considered unequal. """ m = mask_or(getmask(a), getmask(b)) d1 = filled(a) d2 = filled(b) if d1.dtype.char == "O" or d2.dtype.char == "O": return N.equal(d1,d2).ravel() x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) d = N.around(N.abs(x-y),decimal) <= 10.0**(-decimal) return d.ravel()
def approx (a, b, fill_value=1, rtol=1.e-5, atol=1.e-8): """Returns true if all components of a and b are equal subject to given tolerances. If fill_value is 1, masked values considered equal. If fill_value is 0, masked values considered unequal. The relative error rtol should be positive and << 1.0 The absolute error atol comes into play for those elements of b that are very small or zero; it says how small a must be also. """ m = mask_or(getmask(a), getmask(b)) d1 = filled(a) d2 = filled(b) if d1.dtype.char == "O" or d2.dtype.char == "O": return N.equal(d1,d2).ravel() x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) d = N.less_equal(umath.absolute(x-y), atol + rtol * umath.absolute(y)) return d.ravel()
def assert_array_compare(comparison, x, y, err_msg="", verbose=True, header="", fill_value=True): """Asserts that a comparison relation between two masked arrays is satisfied elementwise.""" # Fill the data first # xf = filled(x) # yf = filled(y) # Allocate a common mask and refill m = mask_or(getmask(x), getmask(y)) x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False) y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False) if ((x is masked) and not (y is masked)) or ((y is masked) and not (x is masked)): msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose, header=header, names=("x", "y")) raise ValueError(msg) # OK, now run the basic tests on filled versions return utils.assert_array_compare( comparison, x.filled(fill_value), y.filled(fill_value), err_msg=err_msg, verbose=verbose, header=header )
def approx(a, b, fill_value=True, rtol=1.e-5, atol=1.e-8): """Returns true if all components of a and b are equal subject to given tolerances. If fill_value is True, masked values considered equal. Otherwise, masked values are considered unequal. The relative error rtol should be positive and << 1.0 The absolute error atol comes into play for those elements of b that are very small or zero; it says how small a must be also. """ m = mask_or(getmask(a), getmask(b)) d1 = filled(a) d2 = filled(b) if d1.dtype.char == "O" or d2.dtype.char == "O": return np.equal(d1, d2).ravel() x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y)) return d.ravel()
def polyfit(x, y, deg, rcond=None, full=False): """ Least squares polynomial fit. Do a best fit polynomial of degree 'deg' of 'x' to 'y'. Return value is a vector of polynomial coefficients [pk ... p1 p0]. Eg, for ``deg = 2``:: p2*x0^2 + p1*x0 + p0 = y1 p2*x1^2 + p1*x1 + p0 = y1 p2*x2^2 + p1*x2 + p0 = y2 ..... p2*xk^2 + p1*xk + p0 = yk Parameters ---------- x : array_like 1D vector of sample points. y : array_like 1D vector or 2D array of values to fit. The values should run down the columns in the 2D case. deg : integer Degree of the fitting polynomial rcond: {None, float}, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The defaul value is len(x)*eps, where eps is the relative precision of the float type, about 2e-16 in most cases. full : {False, boolean}, optional Switch determining nature of return value. When it is False just the coefficients are returned, when True diagnostic information from the singular value decomposition is also returned. Returns ------- coefficients, [residuals, rank, singular_values, rcond] : variable When full=False, only the coefficients are returned, running down the appropriate colume when y is a 2D array. When full=True, the rank of the scaled Vandermonde matrix, its effective rank in light of the rcond value, its singular values, and the specified value of rcond are also returned. Warns ----- RankWarning : if rank is reduced and not full output The warnings can be turned off by: >>> import warnings >>> warnings.simplefilter('ignore',np.RankWarning) See Also -------- polyval : computes polynomial values. Notes ----- If X is a the Vandermonde Matrix computed from x (see http://mathworld.wolfram.com/VandermondeMatrix.html), then the polynomial least squares solution is given by the 'p' in X*p = y where X.shape is a matrix of dimensions (len(x), deg + 1), p is a vector of dimensions (deg + 1, 1), and y is a vector of dimensions (len(x), 1). This equation can be solved as p = (XT*X)^-1 * XT * y where XT is the transpose of X and -1 denotes the inverse. However, this method is susceptible to rounding errors and generally the singular value decomposition of the matrix X is preferred and that is what is done here. The singular value method takes a paramenter, 'rcond', which sets a limit on the relative size of the smallest singular value to be used in solving the equation. This may result in lowering the rank of the Vandermonde matrix, in which case a RankWarning is issued. If polyfit issues a RankWarning, try a fit of lower degree or replace x by x - x.mean(), both of which will generally improve the condition number. The routine already normalizes the vector x by its maximum absolute value to help in this regard. The rcond parameter can be set to a value smaller than its default, but the resulting fit may be spurious. The current default value of rcond is len(x)*eps, where eps is the relative precision of the floating type being used, generally around 1e-7 and 2e-16 for IEEE single and double precision respectively. This value of rcond is fairly conservative but works pretty well when x - x.mean() is used in place of x. DISCLAIMER: Power series fits are full of pitfalls for the unwary once the degree of the fit becomes large or the interval of sample points is badly centered. The problem is that the powers x**n are generally a poor basis for the polynomial functions on the sample interval, resulting in a Vandermonde matrix is ill conditioned and coefficients sensitive to rounding erros. The computation of the polynomial values will also sensitive to rounding errors. Consequently, the quality of the polynomial fit should be checked against the data whenever the condition number is large. The quality of polynomial fits *can not* be taken for granted. If all you want to do is draw a smooth curve through the y values and polyfit is not doing the job, try centering the sample range or look into scipy.interpolate, which includes some nice spline fitting functions that may be of use. For more info, see http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html, but note that the k's and n's in the superscripts and subscripts on that page. The linear algebra is correct, however. Notes ----- Any masked values in x is propagated in y, and vice-versa. """ order = int(deg) + 1 x = asarray(x) mx = getmask(x) y = asarray(y) if y.ndim == 1: m = mask_or(mx, getmask(y)) elif y.ndim == 2: y = mask_rows(y) my = getmask(y) if my is not nomask: m = mask_or(mx, my[:, 0]) else: m = mx else: raise TypeError, "Expected a 1D or 2D array for y!" if m is not nomask: x[m] = y[m] = masked # Set rcond if rcond is None: rcond = len(x) * np.finfo(x.dtype).eps # Scale x to improve condition number scale = abs(x).max() if scale != 0: x = x / scale # solve least squares equation for powers of x v = vander(x, order) c, resids, rank, s = _lstsq(v, y.filled(0), rcond) # warn on rank reduction, which indicates an ill conditioned matrix if rank != order and not full: warnings.warn("Polyfit may be poorly conditioned", np.RankWarning) # scale returned coefficients if scale != 0: if c.ndim == 1: c /= np.vander([scale], order)[0] else: c /= np.vander([scale], order).T if full: return c, resids, rank, s, rcond else: return c
def polyfit(x, y, deg, rcond=None, full=False): """ Least squares polynomial fit. Do a best fit polynomial of degree 'deg' of 'x' to 'y'. Return value is a vector of polynomial coefficients [pk ... p1 p0]. Eg, for ``deg = 2``:: p2*x0^2 + p1*x0 + p0 = y1 p2*x1^2 + p1*x1 + p0 = y1 p2*x2^2 + p1*x2 + p0 = y2 ..... p2*xk^2 + p1*xk + p0 = yk Parameters ---------- x : array_like 1D vector of sample points. y : array_like 1D vector or 2D array of values to fit. The values should run down the columns in the 2D case. deg : integer Degree of the fitting polynomial rcond: {None, float}, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The defaul value is len(x)*eps, where eps is the relative precision of the float type, about 2e-16 in most cases. full : {False, boolean}, optional Switch determining nature of return value. When it is False just the coefficients are returned, when True diagnostic information from the singular value decomposition is also returned. Returns ------- coefficients, [residuals, rank, singular_values, rcond] : variable When full=False, only the coefficients are returned, running down the appropriate colume when y is a 2D array. When full=True, the rank of the scaled Vandermonde matrix, its effective rank in light of the rcond value, its singular values, and the specified value of rcond are also returned. Warns ----- RankWarning : if rank is reduced and not full output The warnings can be turned off by: >>> import warnings >>> warnings.simplefilter('ignore',np.RankWarning) See Also -------- polyval : computes polynomial values. Notes ----- If X is a the Vandermonde Matrix computed from x (see http://mathworld.wolfram.com/VandermondeMatrix.html), then the polynomial least squares solution is given by the 'p' in X*p = y where X.shape is a matrix of dimensions (len(x), deg + 1), p is a vector of dimensions (deg + 1, 1), and y is a vector of dimensions (len(x), 1). This equation can be solved as p = (XT*X)^-1 * XT * y where XT is the transpose of X and -1 denotes the inverse. However, this method is susceptible to rounding errors and generally the singular value decomposition of the matrix X is preferred and that is what is done here. The singular value method takes a paramenter, 'rcond', which sets a limit on the relative size of the smallest singular value to be used in solving the equation. This may result in lowering the rank of the Vandermonde matrix, in which case a RankWarning is issued. If polyfit issues a RankWarning, try a fit of lower degree or replace x by x - x.mean(), both of which will generally improve the condition number. The routine already normalizes the vector x by its maximum absolute value to help in this regard. The rcond parameter can be set to a value smaller than its default, but the resulting fit may be spurious. The current default value of rcond is len(x)*eps, where eps is the relative precision of the floating type being used, generally around 1e-7 and 2e-16 for IEEE single and double precision respectively. This value of rcond is fairly conservative but works pretty well when x - x.mean() is used in place of x. DISCLAIMER: Power series fits are full of pitfalls for the unwary once the degree of the fit becomes large or the interval of sample points is badly centered. The problem is that the powers x**n are generally a poor basis for the polynomial functions on the sample interval, resulting in a Vandermonde matrix is ill conditioned and coefficients sensitive to rounding erros. The computation of the polynomial values will also sensitive to rounding errors. Consequently, the quality of the polynomial fit should be checked against the data whenever the condition number is large. The quality of polynomial fits *can not* be taken for granted. If all you want to do is draw a smooth curve through the y values and polyfit is not doing the job, try centering the sample range or look into scipy.interpolate, which includes some nice spline fitting functions that may be of use. For more info, see http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html, but note that the k's and n's in the superscripts and subscripts on that page. The linear algebra is correct, however. Notes ----- Any masked values in x is propagated in y, and vice-versa. """ order = int(deg) + 1 x = asarray(x) mx = getmask(x) y = asarray(y) if y.ndim == 1: m = mask_or(mx, getmask(y)) elif y.ndim == 2: y = mask_rows(y) my = getmask(y) if my is not nomask: m = mask_or(mx, my[:,0]) else: m = mx else: raise TypeError,"Expected a 1D or 2D array for y!" if m is not nomask: x[m] = y[m] = masked # Set rcond if rcond is None : rcond = len(x)*np.finfo(x.dtype).eps # Scale x to improve condition number scale = abs(x).max() if scale != 0 : x = x / scale # solve least squares equation for powers of x v = vander(x, order) c, resids, rank, s = _lstsq(v, y.filled(0), rcond) # warn on rank reduction, which indicates an ill conditioned matrix if rank != order and not full: warnings.warn("Polyfit may be poorly conditioned", np.RankWarning) # scale returned coefficients if scale != 0 : if c.ndim == 1 : c /= np.vander([scale], order)[0] else : c /= np.vander([scale], order).T if full : return c, resids, rank, s, rcond else : return c
def assert_array_compare(comparison, x, y, err_msg='', header='', fill_value=True): """Asserts that a comparison relation between two masked arrays is satisfied elementwise.""" xf = filled(x) yf = filled(y) m = mask_or(getmask(x), getmask(y)) x = masked_array(xf, copy=False, subok=False, mask=m).filled(fill_value) y = masked_array(yf, copy=False, subok=False, mask=m).filled(fill_value) if ((x is masked) and not (y is masked)) or \ ((y is masked) and not (x is masked)): msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y')) raise ValueError(msg) if (x.dtype.char != "O") and (x.dtype.char != "S"): x = x.astype(float_) if isinstance(x, N.ndarray) and x.size > 1: x[N.isnan(x)] = 0 elif N.isnan(x): x = 0 if (y.dtype.char != "O") and (y.dtype.char != "S"): y = y.astype(float_) if isinstance(y, N.ndarray) and y.size > 1: y[N.isnan(y)] = 0 elif N.isnan(y): y = 0 try: cond = (x.shape == () or y.shape == ()) or x.shape == y.shape if not cond: msg = build_err_msg([x, y], err_msg + '\n(shapes %s, %s mismatch)' % (x.shape, y.shape), header=header, names=('x', 'y')) assert cond, msg val = comparison(x, y) if m is not nomask and fill_value: val = masked_array(val, mask=m, copy=False) if isinstance(val, bool): cond = val reduced = [0] else: reduced = val.ravel() cond = reduced.all() reduced = reduced.tolist() if not cond: match = 100 - 100.0 * reduced.count(1) / len(reduced) msg = build_err_msg([x, y], err_msg + '\n(mismatch %s%%)' % (match, ), header=header, names=('x', 'y')) assert cond, msg except ValueError: msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y')) raise ValueError(msg)