def polyfit(x, y, deg, rcond=None, full=False): """%s Notes ----- Any masked values in x is propagated in y, and vice-versa. """ order = int(deg) + 1 x = asarray(x) mx = getmask(x) y = asarray(y) if y.ndim == 1: m = mask_or(mx, getmask(y)) elif y.ndim == 2: y = mask_rows(y) my = getmask(y) if my is not nomask: m = mask_or(mx, my[:,0]) else: m = mx else: raise TypeError,"Expected a 1D or 2D array for y!" if m is not nomask: x[m] = y[m] = masked # Set rcond if rcond is None : if x.dtype in (np.single, np.csingle): rcond = len(x)*_single_eps else : rcond = len(x)*_double_eps # Scale x to improve condition number scale = abs(x).max() if scale != 0 : x = x / scale # solve least squares equation for powers of x v = vander(x, order) c, resids, rank, s = _lstsq(v, y.filled(0), rcond) # warn on rank reduction, which indicates an ill conditioned matrix if rank != order and not full: warnings.warn("Polyfit may be poorly conditioned", np.RankWarning) # scale returned coefficients if scale != 0 : if c.ndim == 1 : c /= np.vander([scale], order)[0] else : c /= np.vander([scale], order).T if full : return c, resids, rank, s, rcond else : return c
def notmasked_edges(a, axis=None): """ Find the indices of the first and last not masked values along the given axis in a masked array. If all values are masked, return None. Otherwise, return a list of 2 tuples, corresponding to the indices of the first and last unmasked values respectively. Parameters ---------- axis : int, optional Axis along which to perform the operation. If None, applies to a flattened version of the array. """ a = asarray(a) if axis is None or a.ndim == 1: return flatnotmasked_edges(a) m = getmaskarray(a) idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) return [ tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]), tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ]
def notmasked_edges(a, axis=None): """ Find the indices of the first and last not masked values along the given axis in a masked array. If all values are masked, return None. Otherwise, return a list of 2 tuples, corresponding to the indices of the first and last unmasked values respectively. Parameters ---------- axis : int, optional Axis along which to perform the operation. If None, applies to a flattened version of the array. """ a = asarray(a) if axis is None or a.ndim == 1: return flatnotmasked_edges(a) m = getmask(a) idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) return [ tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]), tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ]
def mask_rowcols(a, axis=None): """ Mask whole rows and/or columns of a 2D array that contain masked values. The masking behavior is selected with the `axis` parameter. - If axis is None, rows and columns are masked. - If axis is 0, only rows are masked. - If axis is 1 or -1, only columns are masked. Parameters ---------- axis : int, optional Axis along which to perform the operation. If None, applies to a flattened version of the array. Returns ------- a *pure* ndarray. """ a = asarray(a) if a.ndim != 2: raise NotImplementedError, "compress2d works for 2D arrays only." m = getmask(a) # Nothing is masked: return a if m is nomask or not m.any(): return a maskedval = m.nonzero() a._mask = a._mask.copy() if not axis: a[np.unique(maskedval[0])] = masked if axis in [None, 1, -1]: a[:, np.unique(maskedval[1])] = masked return a
def mask_rowcols(a, axis=None): """Mask whole rows and/or columns of a 2D array that contain masked values. The masking behavior is selected with the `axis`parameter. - If axis is None, rows and columns are masked. - If axis is 0, only rows are masked. - If axis is 1 or -1, only columns are masked. Parameters ---------- axis : int, optional Axis along which to perform the operation. If None, applies to a flattened version of the array. Returns ------- a *pure* ndarray. """ a = asarray(a) if a.ndim != 2: raise NotImplementedError, "compress2d works for 2D arrays only." m = getmask(a) # Nothing is masked: return a if m is nomask or not m.any(): return a maskedval = m.nonzero() a._mask = a._mask.copy() if not axis: a[np.unique(maskedval[0])] = masked if axis in [None, 1, -1]: a[:,np.unique(maskedval[1])] = masked return a
def __getitem__(self,key): if isinstance(key, str): raise MAError, "Unavailable for masked array." if type(key) is not tuple: key = (key,) objs = [] scalars = [] final_dtypedescr = None for k in range(len(key)): scalar = False if type(key[k]) is slice: step = key[k].step start = key[k].start stop = key[k].stop if start is None: start = 0 if step is None: step = 1 if type(step) is type(1j): size = int(abs(step)) newobj = np.linspace(start, stop, num=size) else: newobj = np.arange(start, stop, step) elif type(key[k]) is str: if (key[k] in 'rc'): self.matrix = True self.col = (key[k] == 'c') continue try: self.axis = int(key[k]) continue except (ValueError, TypeError): raise ValueError, "Unknown special directive" elif type(key[k]) in np.ScalarType: newobj = asarray([key[k]]) scalars.append(k) scalar = True else: newobj = key[k] objs.append(newobj) if isinstance(newobj, ndarray) and not scalar: if final_dtypedescr is None: final_dtypedescr = newobj.dtype elif newobj.dtype > final_dtypedescr: final_dtypedescr = newobj.dtype if final_dtypedescr is not None: for k in scalars: objs[k] = objs[k].astype(final_dtypedescr) res = concatenate(tuple(objs),axis=self.axis) return self._retval(res)
def __getitem__(self, key): if isinstance(key, str): raise MAError, "Unavailable for masked array." if type(key) is not tuple: key = (key,) objs = [] scalars = [] final_dtypedescr = None for k in range(len(key)): scalar = False if type(key[k]) is slice: step = key[k].step start = key[k].start stop = key[k].stop if start is None: start = 0 if step is None: step = 1 if type(step) is type(1j): size = int(abs(step)) newobj = np.linspace(start, stop, num=size) else: newobj = np.arange(start, stop, step) elif type(key[k]) is str: if key[k] in "rc": self.matrix = True self.col = key[k] == "c" continue try: self.axis = int(key[k]) continue except (ValueError, TypeError): raise ValueError, "Unknown special directive" elif type(key[k]) in np.ScalarType: newobj = asarray([key[k]]) scalars.append(k) scalar = True else: newobj = key[k] objs.append(newobj) if isinstance(newobj, ndarray) and not scalar: if final_dtypedescr is None: final_dtypedescr = newobj.dtype elif newobj.dtype > final_dtypedescr: final_dtypedescr = newobj.dtype if final_dtypedescr is not None: for k in scalars: objs[k] = objs[k].astype(final_dtypedescr) res = concatenate(tuple(objs), axis=self.axis) return self._retval(res)
def setdiff1d(ar1, ar2): """ Set difference of 1D arrays with unique elements. See Also -------- numpy.setdiff1d : equivalent function for ndarrays """ aux = setmember1d(ar1,ar2) if aux.size == 0: return aux else: return ma.asarray(ar1)[aux == 0]
def setdiff1d(ar1, ar2): """ Set difference of 1D arrays with unique elements. See Also -------- numpy.setdiff1d : equivalent function for ndarrays """ aux = setmember1d(ar1, ar2) if aux.size == 0: return aux else: return ma.asarray(ar1)[aux == 0]
def setdiff1d(ar1, ar2, assume_unique=False): """ Set difference of 1D arrays with unique elements. See Also -------- numpy.setdiff1d : equivalent function for ndarrays """ if not assume_unique: ar1 = unique(ar1) ar2 = unique(ar2) aux = in1d(ar1, ar2, assume_unique=True) if aux.size == 0: return aux else: return ma.asarray(ar1)[aux == 0]
def compress_rowcols(x, axis=None): """ Suppress the rows and/or columns of a 2D array that contain masked values. The suppression behavior is selected with the `axis` parameter. - If axis is None, rows and columns are suppressed. - If axis is 0, only rows are suppressed. - If axis is 1 or -1, only columns are suppressed. Parameters ---------- axis : int, optional Axis along which to perform the operation. If None, applies to a flattened version of the array. Returns ------- compressed_array : an ndarray. """ x = asarray(x) if x.ndim != 2: raise NotImplementedError, "compress2d works for 2D arrays only." m = getmask(x) # Nothing is masked: return x if m is nomask or not m.any(): return x._data # All is masked: return empty if m.all(): return nxarray([]) # Builds a list of rows/columns indices (idxr, idxc) = (range(len(x)), range(x.shape[1])) masked = m.nonzero() if not axis: for i in np.unique(masked[0]): idxr.remove(i) if axis in [None, 1, -1]: for j in np.unique(masked[1]): idxc.remove(j) return x._data[idxr][:, idxc]
def compress_rowcols(x, axis=None): """ Suppress the rows and/or columns of a 2D array that contain masked values. The suppression behavior is selected with the `axis` parameter. - If axis is None, rows and columns are suppressed. - If axis is 0, only rows are suppressed. - If axis is 1 or -1, only columns are suppressed. Parameters ---------- axis : int, optional Axis along which to perform the operation. If None, applies to a flattened version of the array. Returns ------- compressed_array : an ndarray. """ x = asarray(x) if x.ndim != 2: raise NotImplementedError, "compress2d works for 2D arrays only." m = getmask(x) # Nothing is masked: return x if m is nomask or not m.any(): return x._data # All is masked: return empty if m.all(): return nxarray([]) # Builds a list of rows/columns indices (idxr, idxc) = (range(len(x)), range(x.shape[1])) masked = m.nonzero() if not axis: for i in np.unique(masked[0]): idxr.remove(i) if axis in [None, 1, -1]: for j in np.unique(masked[1]): idxc.remove(j) return x._data[idxr][:,idxc]
def notmasked_contiguous(a, axis=None): """Find contiguous unmasked data in a masked array along the given axis. Parameters ---------- axis : int, optional Axis along which to perform the operation. If None, applies to a flattened version of the array. Returns ------- A sorted sequence of slices (start index, end index). Notes ----- Only accepts 2D arrays at most. """ a = asarray(a) nd = a.ndim if nd > 2: raise NotImplementedError,"Currently limited to atmost 2D array." if axis is None or nd == 1: return flatnotmasked_contiguous(a) # result = [] # other = (axis+1)%2 idx = [0,0] idx[axis] = slice(None, None) # for i in range(a.shape[other]): idx[other] = i result.append( flatnotmasked_contiguous(a[idx]) ) return result
def notmasked_contiguous(a, axis=None): """ Find contiguous unmasked data in a masked array along the given axis. Parameters ---------- axis : int, optional Axis along which to perform the operation. If None, applies to a flattened version of the array. Returns ------- A sorted sequence of slices (start index, end index). Notes ----- Only accepts 2D arrays at most. """ a = asarray(a) nd = a.ndim if nd > 2: raise NotImplementedError, "Currently limited to atmost 2D array." if axis is None or nd == 1: return flatnotmasked_contiguous(a) # result = [] # other = (axis + 1) % 2 idx = [0, 0] idx[axis] = slice(None, None) # for i in range(a.shape[other]): idx[other] = i result.append(flatnotmasked_contiguous(a[idx])) return result
def ediff1d(array, to_end=None, to_begin=None): """Return the differences between consecutive elements of an array, possibly with prefixed and/or appended values. Parameters ---------- array : {array} Input array, will be flattened before the difference is taken. to_end : {number}, optional If provided, this number will be tacked onto the end of the returned differences. to_begin : {number}, optional If provided, this number will be taked onto the beginning of the returned differences. Returns ------- ed : {array} The differences. Loosely, this will be (ary[1:] - ary[:-1]). """ a = masked_array(array, copy=True) if a.ndim > 1: a.reshape((a.size,)) (d, m, n) = (a._data, a._mask, a.size-1) dd = d[1:]-d[:-1] if m is nomask: dm = nomask else: dm = m[1:]-m[:-1] # if to_end is not None: to_end = asarray(to_end) nend = to_end.size if to_begin is not None: to_begin = asarray(to_begin) nbegin = to_begin.size r_data = np.empty((n+nend+nbegin,), dtype=a.dtype) r_mask = np.zeros((n+nend+nbegin,), dtype=bool) r_data[:nbegin] = to_begin._data r_mask[:nbegin] = to_begin._mask r_data[nbegin:-nend] = dd r_mask[nbegin:-nend] = dm else: r_data = np.empty((n+nend,), dtype=a.dtype) r_mask = np.zeros((n+nend,), dtype=bool) r_data[:-nend] = dd r_mask[:-nend] = dm r_data[-nend:] = to_end._data r_mask[-nend:] = to_end._mask # elif to_begin is not None: to_begin = asarray(to_begin) nbegin = to_begin.size r_data = np.empty((n+nbegin,), dtype=a.dtype) r_mask = np.zeros((n+nbegin,), dtype=bool) r_data[:nbegin] = to_begin._data r_mask[:nbegin] = to_begin._mask r_data[nbegin:] = dd r_mask[nbegin:] = dm # else: r_data = dd r_mask = dm return masked_array(r_data, mask=r_mask)
def average(a, axis=None, weights=None, returned=False): """ Average the array over the given axis. Parameters ---------- axis : {None,int}, optional Axis along which to perform the operation. If None, applies to a flattened version of the array. weights : {None, sequence}, optional Sequence of weights. The weights must have the shape of a, or be 1D with length the size of a along the given axis. If no weights are given, weights are assumed to be 1. returned : {False, True}, optional Flag indicating whether a tuple (result, sum of weights/counts) should be returned as output (True), or just the result (False). """ a = asarray(a) mask = a.mask ash = a.shape if ash == (): ash = (1,) if axis is None: if mask is nomask: if weights is None: n = a.sum(axis=None) d = float(a.size) else: w = filled(weights, 0.0).ravel() n = umath.add.reduce(a._data.ravel() * w) d = umath.add.reduce(w) del w else: if weights is None: n = a.filled(0).sum(axis=None) d = umath.add.reduce((-mask).ravel().astype(int)) else: w = array(filled(weights, 0.0), float, mask=mask).ravel() n = add.reduce(a.ravel() * w) d = add.reduce(w) del w else: if mask is nomask: if weights is None: d = ash[axis] * 1.0 n = add.reduce(a._data, axis, dtype=float) else: w = filled(weights, 0.0) wsh = w.shape if wsh == (): wsh = (1,) if wsh == ash: w = np.array(w, float, copy=0) n = add.reduce(a * w, axis) d = add.reduce(w, axis) del w elif wsh == (ash[axis],): ni = ash[axis] r = [None] * len(ash) r[axis] = slice(None, None, 1) w = eval("w[" + repr(tuple(r)) + "] * ones(ash, float)") n = add.reduce(a * w, axis, dtype=float) d = add.reduce(w, axis, dtype=float) del w, r else: raise ValueError, "average: weights wrong shape." else: if weights is None: n = add.reduce(a, axis, dtype=float) d = umath.add.reduce((-mask), axis=axis, dtype=float) else: w = filled(weights, 0.0) wsh = w.shape if wsh == (): wsh = (1,) if wsh == ash: w = array(w, dtype=float, mask=mask, copy=0) n = add.reduce(a * w, axis, dtype=float) d = add.reduce(w, axis, dtype=float) elif wsh == (ash[axis],): ni = ash[axis] r = [None] * len(ash) r[axis] = slice(None, None, 1) w = eval("w[" + repr(tuple(r)) + "] * masked_array(ones(ash, float), mask)") n = add.reduce(a * w, axis, dtype=float) d = add.reduce(w, axis, dtype=float) else: raise ValueError, "average: weights wrong shape." del w if n is masked or d is masked: return masked result = n / d del n if isinstance(result, MaskedArray): if ((axis is None) or (axis == 0 and a.ndim == 1)) and (result.mask is nomask): result = result._data if returned: if not isinstance(d, MaskedArray): d = masked_array(d) if isinstance(d, ndarray) and (not d.shape == result.shape): d = ones(result.shape, dtype=float) * d if returned: return result, d else: return result
def apply_along_axis(func1d, axis, arr, *args, **kwargs): """ (This docstring should be overwritten) """ arr = array(arr, copy=False, subok=True) nd = arr.ndim if axis < 0: axis += nd if axis >= nd: raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d." % (axis, nd)) ind = [0] * (nd - 1) i = np.zeros(nd, "O") indlist = range(nd) indlist.remove(axis) i[axis] = slice(None, None) outshape = np.asarray(arr.shape).take(indlist) i.put(indlist, ind) j = i.copy() res = func1d(arr[tuple(i.tolist())], *args, **kwargs) # if res is a number, then we have a smaller output array asscalar = np.isscalar(res) if not asscalar: try: len(res) except TypeError: asscalar = True # Note: we shouldn't set the dtype of the output from the first result... # ...so we force the type to object, and build a list of dtypes # ...we'll just take the largest, to avoid some downcasting dtypes = [] if asscalar: dtypes.append(np.asarray(res).dtype) outarr = zeros(outshape, object) outarr[tuple(ind)] = res Ntot = np.product(outshape) k = 1 while k < Ntot: # increment the index ind[-1] += 1 n = -1 while (ind[n] >= outshape[n]) and (n > (1 - nd)): ind[n - 1] += 1 ind[n] = 0 n -= 1 i.put(indlist, ind) res = func1d(arr[tuple(i.tolist())], *args, **kwargs) outarr[tuple(ind)] = res dtypes.append(asarray(res).dtype) k += 1 else: res = array(res, copy=False, subok=True) j = i.copy() j[axis] = [slice(None, None)] * res.ndim j.put(indlist, ind) Ntot = np.product(outshape) holdshape = outshape outshape = list(arr.shape) outshape[axis] = res.shape dtypes.append(asarray(res).dtype) outshape = flatten_inplace(outshape) outarr = zeros(outshape, object) outarr[tuple(flatten_inplace(j.tolist()))] = res k = 1 while k < Ntot: # increment the index ind[-1] += 1 n = -1 while (ind[n] >= holdshape[n]) and (n > (1 - nd)): ind[n - 1] += 1 ind[n] = 0 n -= 1 i.put(indlist, ind) j.put(indlist, ind) res = func1d(arr[tuple(i.tolist())], *args, **kwargs) outarr[tuple(flatten_inplace(j.tolist()))] = res dtypes.append(asarray(res).dtype) k += 1 max_dtypes = np.dtype(np.asarray(dtypes).max()) if not hasattr(arr, "_mask"): result = np.asarray(outarr, dtype=max_dtypes) else: result = asarray(outarr, dtype=max_dtypes) result.fill_value = ma.default_fill_value(result) return result
def mask_rowcols(a, axis=None): """ Mask rows and/or columns of a 2D array that contain masked values. Mask whole rows and/or columns of a 2D array that contain masked values. The masking behavior is selected using the `axis` parameter. - If `axis` is None, rows *and* columns are masked. - If `axis` is 0, only rows are masked. - If `axis` is 1 or -1, only columns are masked. Parameters ---------- a : array_like, MaskedArray The array to mask. If not a MaskedArray instance (or if no array elements are masked). The result is a MaskedArray with `mask` set to `nomask` (False). Must be a 2D array. axis : int, optional Axis along which to perform the operation. If None, applies to a flattened version of the array. Returns ------- a : MaskedArray A modified version of the input array, masked depending on the value of the `axis` parameter. Raises ------ NotImplementedError If input array `a` is not 2D. See Also -------- mask_rows : Mask rows of a 2D array that contain masked values. mask_cols : Mask cols of a 2D array that contain masked values. masked_where : Mask where a condition is met. Notes ----- The input array's mask is modified by this function. Examples -------- >>> import numpy.ma as ma >>> a = np.zeros((3, 3), dtype=np.int) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], [0, 1, 0], [0, 0, 0]]) >>> a = ma.masked_equal(a, 1) >>> a masked_array(data = [[0 0 0] [0 -- 0] [0 0 0]], mask = [[False False False] [False True False] [False False False]], fill_value=999999) >>> ma.mask_rowcols(a) masked_array(data = [[0 -- 0] [-- -- --] [0 -- 0]], mask = [[False True False] [ True True True] [False True False]], fill_value=999999) """ a = asarray(a) if a.ndim != 2: raise NotImplementedError, "compress2d works for 2D arrays only." m = getmask(a) # Nothing is masked: return a if m is nomask or not m.any(): return a maskedval = m.nonzero() a._mask = a._mask.copy() if not axis: a[np.unique(maskedval[0])] = masked if axis in [None, 1, -1]: a[:,np.unique(maskedval[1])] = masked return a
def polyfit(x, y, deg, rcond=None, full=False): """ Least squares polynomial fit. Do a best fit polynomial of degree 'deg' of 'x' to 'y'. Return value is a vector of polynomial coefficients [pk ... p1 p0]. Eg, for ``deg = 2``:: p2*x0^2 + p1*x0 + p0 = y1 p2*x1^2 + p1*x1 + p0 = y1 p2*x2^2 + p1*x2 + p0 = y2 ..... p2*xk^2 + p1*xk + p0 = yk Parameters ---------- x : array_like 1D vector of sample points. y : array_like 1D vector or 2D array of values to fit. The values should run down the columns in the 2D case. deg : integer Degree of the fitting polynomial rcond: {None, float}, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The defaul value is len(x)*eps, where eps is the relative precision of the float type, about 2e-16 in most cases. full : {False, boolean}, optional Switch determining nature of return value. When it is False just the coefficients are returned, when True diagnostic information from the singular value decomposition is also returned. Returns ------- coefficients, [residuals, rank, singular_values, rcond] : variable When full=False, only the coefficients are returned, running down the appropriate colume when y is a 2D array. When full=True, the rank of the scaled Vandermonde matrix, its effective rank in light of the rcond value, its singular values, and the specified value of rcond are also returned. Warns ----- RankWarning : if rank is reduced and not full output The warnings can be turned off by: >>> import warnings >>> warnings.simplefilter('ignore',np.RankWarning) See Also -------- polyval : computes polynomial values. Notes ----- If X is a the Vandermonde Matrix computed from x (see http://mathworld.wolfram.com/VandermondeMatrix.html), then the polynomial least squares solution is given by the 'p' in X*p = y where X.shape is a matrix of dimensions (len(x), deg + 1), p is a vector of dimensions (deg + 1, 1), and y is a vector of dimensions (len(x), 1). This equation can be solved as p = (XT*X)^-1 * XT * y where XT is the transpose of X and -1 denotes the inverse. However, this method is susceptible to rounding errors and generally the singular value decomposition of the matrix X is preferred and that is what is done here. The singular value method takes a paramenter, 'rcond', which sets a limit on the relative size of the smallest singular value to be used in solving the equation. This may result in lowering the rank of the Vandermonde matrix, in which case a RankWarning is issued. If polyfit issues a RankWarning, try a fit of lower degree or replace x by x - x.mean(), both of which will generally improve the condition number. The routine already normalizes the vector x by its maximum absolute value to help in this regard. The rcond parameter can be set to a value smaller than its default, but the resulting fit may be spurious. The current default value of rcond is len(x)*eps, where eps is the relative precision of the floating type being used, generally around 1e-7 and 2e-16 for IEEE single and double precision respectively. This value of rcond is fairly conservative but works pretty well when x - x.mean() is used in place of x. DISCLAIMER: Power series fits are full of pitfalls for the unwary once the degree of the fit becomes large or the interval of sample points is badly centered. The problem is that the powers x**n are generally a poor basis for the polynomial functions on the sample interval, resulting in a Vandermonde matrix is ill conditioned and coefficients sensitive to rounding erros. The computation of the polynomial values will also sensitive to rounding errors. Consequently, the quality of the polynomial fit should be checked against the data whenever the condition number is large. The quality of polynomial fits *can not* be taken for granted. If all you want to do is draw a smooth curve through the y values and polyfit is not doing the job, try centering the sample range or look into scipy.interpolate, which includes some nice spline fitting functions that may be of use. For more info, see http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html, but note that the k's and n's in the superscripts and subscripts on that page. The linear algebra is correct, however. Notes ----- Any masked values in x is propagated in y, and vice-versa. """ order = int(deg) + 1 x = asarray(x) mx = getmask(x) y = asarray(y) if y.ndim == 1: m = mask_or(mx, getmask(y)) elif y.ndim == 2: y = mask_rows(y) my = getmask(y) if my is not nomask: m = mask_or(mx, my[:,0]) else: m = mx else: raise TypeError,"Expected a 1D or 2D array for y!" if m is not nomask: x[m] = y[m] = masked # Set rcond if rcond is None : rcond = len(x)*np.finfo(x.dtype).eps # Scale x to improve condition number scale = abs(x).max() if scale != 0 : x = x / scale # solve least squares equation for powers of x v = vander(x, order) c, resids, rank, s = _lstsq(v, y.filled(0), rcond) # warn on rank reduction, which indicates an ill conditioned matrix if rank != order and not full: warnings.warn("Polyfit may be poorly conditioned", np.RankWarning) # scale returned coefficients if scale != 0 : if c.ndim == 1 : c /= np.vander([scale], order)[0] else : c /= np.vander([scale], order).T if full : return c, resids, rank, s, rcond else : return c
def polyfit(x, y, deg, rcond=None, full=False): """ Least squares polynomial fit. Do a best fit polynomial of degree 'deg' of 'x' to 'y'. Return value is a vector of polynomial coefficients [pk ... p1 p0]. Eg, for ``deg = 2``:: p2*x0^2 + p1*x0 + p0 = y1 p2*x1^2 + p1*x1 + p0 = y1 p2*x2^2 + p1*x2 + p0 = y2 ..... p2*xk^2 + p1*xk + p0 = yk Parameters ---------- x : array_like 1D vector of sample points. y : array_like 1D vector or 2D array of values to fit. The values should run down the columns in the 2D case. deg : integer Degree of the fitting polynomial rcond: {None, float}, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The defaul value is len(x)*eps, where eps is the relative precision of the float type, about 2e-16 in most cases. full : {False, boolean}, optional Switch determining nature of return value. When it is False just the coefficients are returned, when True diagnostic information from the singular value decomposition is also returned. Returns ------- coefficients, [residuals, rank, singular_values, rcond] : variable When full=False, only the coefficients are returned, running down the appropriate colume when y is a 2D array. When full=True, the rank of the scaled Vandermonde matrix, its effective rank in light of the rcond value, its singular values, and the specified value of rcond are also returned. Warns ----- RankWarning : if rank is reduced and not full output The warnings can be turned off by: >>> import warnings >>> warnings.simplefilter('ignore',np.RankWarning) See Also -------- polyval : computes polynomial values. Notes ----- If X is a the Vandermonde Matrix computed from x (see http://mathworld.wolfram.com/VandermondeMatrix.html), then the polynomial least squares solution is given by the 'p' in X*p = y where X.shape is a matrix of dimensions (len(x), deg + 1), p is a vector of dimensions (deg + 1, 1), and y is a vector of dimensions (len(x), 1). This equation can be solved as p = (XT*X)^-1 * XT * y where XT is the transpose of X and -1 denotes the inverse. However, this method is susceptible to rounding errors and generally the singular value decomposition of the matrix X is preferred and that is what is done here. The singular value method takes a paramenter, 'rcond', which sets a limit on the relative size of the smallest singular value to be used in solving the equation. This may result in lowering the rank of the Vandermonde matrix, in which case a RankWarning is issued. If polyfit issues a RankWarning, try a fit of lower degree or replace x by x - x.mean(), both of which will generally improve the condition number. The routine already normalizes the vector x by its maximum absolute value to help in this regard. The rcond parameter can be set to a value smaller than its default, but the resulting fit may be spurious. The current default value of rcond is len(x)*eps, where eps is the relative precision of the floating type being used, generally around 1e-7 and 2e-16 for IEEE single and double precision respectively. This value of rcond is fairly conservative but works pretty well when x - x.mean() is used in place of x. DISCLAIMER: Power series fits are full of pitfalls for the unwary once the degree of the fit becomes large or the interval of sample points is badly centered. The problem is that the powers x**n are generally a poor basis for the polynomial functions on the sample interval, resulting in a Vandermonde matrix is ill conditioned and coefficients sensitive to rounding erros. The computation of the polynomial values will also sensitive to rounding errors. Consequently, the quality of the polynomial fit should be checked against the data whenever the condition number is large. The quality of polynomial fits *can not* be taken for granted. If all you want to do is draw a smooth curve through the y values and polyfit is not doing the job, try centering the sample range or look into scipy.interpolate, which includes some nice spline fitting functions that may be of use. For more info, see http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html, but note that the k's and n's in the superscripts and subscripts on that page. The linear algebra is correct, however. Notes ----- Any masked values in x is propagated in y, and vice-versa. """ order = int(deg) + 1 x = asarray(x) mx = getmask(x) y = asarray(y) if y.ndim == 1: m = mask_or(mx, getmask(y)) elif y.ndim == 2: y = mask_rows(y) my = getmask(y) if my is not nomask: m = mask_or(mx, my[:, 0]) else: m = mx else: raise TypeError, "Expected a 1D or 2D array for y!" if m is not nomask: x[m] = y[m] = masked # Set rcond if rcond is None: rcond = len(x) * np.finfo(x.dtype).eps # Scale x to improve condition number scale = abs(x).max() if scale != 0: x = x / scale # solve least squares equation for powers of x v = vander(x, order) c, resids, rank, s = _lstsq(v, y.filled(0), rcond) # warn on rank reduction, which indicates an ill conditioned matrix if rank != order and not full: warnings.warn("Polyfit may be poorly conditioned", np.RankWarning) # scale returned coefficients if scale != 0: if c.ndim == 1: c /= np.vander([scale], order)[0] else: c /= np.vander([scale], order).T if full: return c, resids, rank, s, rcond else: return c
def apply_along_axis(func1d, axis, arr, *args, **kwargs): """Execute func1d(arr[i],*args) where func1d takes 1-D arrays and arr is an N-d array. i varies so as to apply the function along the given axis for each 1-d subarray in arr. """ arr = core.array(arr, copy=False, subok=True) nd = arr.ndim if axis < 0: axis += nd if (axis >= nd): raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d." % (axis,nd)) ind = [0]*(nd-1) i = np.zeros(nd,'O') indlist = range(nd) indlist.remove(axis) i[axis] = slice(None,None) outshape = np.asarray(arr.shape).take(indlist) i.put(indlist, ind) j = i.copy() res = func1d(arr[tuple(i.tolist())], *args, **kwargs) # if res is a number, then we have a smaller output array asscalar = np.isscalar(res) if not asscalar: try: len(res) except TypeError: asscalar = True # Note: we shouldn't set the dtype of the output from the first result... #...so we force the type to object, and build a list of dtypes #...we'll just take the largest, to avoid some downcasting dtypes = [] if asscalar: dtypes.append(np.asarray(res).dtype) outarr = zeros(outshape, object) outarr[tuple(ind)] = res Ntot = np.product(outshape) k = 1 while k < Ntot: # increment the index ind[-1] += 1 n = -1 while (ind[n] >= outshape[n]) and (n > (1-nd)): ind[n-1] += 1 ind[n] = 0 n -= 1 i.put(indlist, ind) res = func1d(arr[tuple(i.tolist())], *args, **kwargs) outarr[tuple(ind)] = res dtypes.append(asarray(res).dtype) k += 1 else: res = core.array(res, copy=False, subok=True) j = i.copy() j[axis] = ([slice(None, None)] * res.ndim) j.put(indlist, ind) Ntot = np.product(outshape) holdshape = outshape outshape = list(arr.shape) outshape[axis] = res.shape dtypes.append(asarray(res).dtype) outshape = flatten_inplace(outshape) outarr = zeros(outshape, object) outarr[tuple(flatten_inplace(j.tolist()))] = res k = 1 while k < Ntot: # increment the index ind[-1] += 1 n = -1 while (ind[n] >= holdshape[n]) and (n > (1-nd)): ind[n-1] += 1 ind[n] = 0 n -= 1 i.put(indlist, ind) j.put(indlist, ind) res = func1d(arr[tuple(i.tolist())], *args, **kwargs) outarr[tuple(flatten_inplace(j.tolist()))] = res dtypes.append(asarray(res).dtype) k += 1 max_dtypes = np.dtype(np.asarray(dtypes).max()) if not hasattr(arr, '_mask'): result = np.asarray(outarr, dtype=max_dtypes) else: result = core.asarray(outarr, dtype=max_dtypes) result.fill_value = core.default_fill_value(result) return result
def average(a, axis=None, weights=None, returned=False): """Average the array over the given axis. Parameters ---------- axis : {None,int}, optional Axis along which to perform the operation. If None, applies to a flattened version of the array. weights : {None, sequence}, optional Sequence of weights. The weights must have the shape of a, or be 1D with length the size of a along the given axis. If no weights are given, weights are assumed to be 1. returned : {False, True}, optional Flag indicating whether a tuple (result, sum of weights/counts) should be returned as output (True), or just the result (False). """ a = asarray(a) mask = a.mask ash = a.shape if ash == (): ash = (1,) if axis is None: if mask is nomask: if weights is None: n = a.sum(axis=None) d = float(a.size) else: w = filled(weights, 0.0).ravel() n = umath.add.reduce(a._data.ravel() * w) d = umath.add.reduce(w) del w else: if weights is None: n = a.filled(0).sum(axis=None) d = umath.add.reduce((-mask).ravel().astype(int)) else: w = array(filled(weights, 0.0), float, mask=mask).ravel() n = add.reduce(a.ravel() * w) d = add.reduce(w) del w else: if mask is nomask: if weights is None: d = ash[axis] * 1.0 n = add.reduce(a._data, axis, dtype=float) else: w = filled(weights, 0.0) wsh = w.shape if wsh == (): wsh = (1,) if wsh == ash: w = np.array(w, float, copy=0) n = add.reduce(a*w, axis) d = add.reduce(w, axis) del w elif wsh == (ash[axis],): ni = ash[axis] r = [None]*len(ash) r[axis] = slice(None, None, 1) w = eval ("w["+ repr(tuple(r)) + "] * ones(ash, float)") n = add.reduce(a*w, axis, dtype=float) d = add.reduce(w, axis, dtype=float) del w, r else: raise ValueError, 'average: weights wrong shape.' else: if weights is None: n = add.reduce(a, axis, dtype=float) d = umath.add.reduce((-mask), axis=axis, dtype=float) else: w = filled(weights, 0.0) wsh = w.shape if wsh == (): wsh = (1,) if wsh == ash: w = array(w, dtype=float, mask=mask, copy=0) n = add.reduce(a*w, axis, dtype=float) d = add.reduce(w, axis, dtype=float) elif wsh == (ash[axis],): ni = ash[axis] r = [None]*len(ash) r[axis] = slice(None, None, 1) w = eval ("w["+ repr(tuple(r)) + \ "] * masked_array(ones(ash, float), mask)") n = add.reduce(a*w, axis, dtype=float) d = add.reduce(w, axis, dtype=float) else: raise ValueError, 'average: weights wrong shape.' del w if n is masked or d is masked: return masked result = n/d del n if isinstance(result, MaskedArray): if ((axis is None) or (axis==0 and a.ndim == 1)) and \ (result.mask is nomask): result = result._data if returned: if not isinstance(d, MaskedArray): d = masked_array(d) if isinstance(d, ndarray) and (not d.shape == result.shape): d = ones(result.shape, dtype=float) * d if returned: return result, d else: return result