def setmember1d(ar1, ar2): """ Return a boolean array set True where first element is in second array. See Also -------- numpy.setmember1d : equivalent function for ndarrays. """ ar1 = ma.asanyarray(ar1) ar2 = ma.asanyarray(ar2) ar = ma.concatenate((ar1, ar2)) b1 = ma.zeros(ar1.shape, dtype=np.int8) b2 = ma.ones(ar2.shape, dtype=np.int8) tt = ma.concatenate((b1, b2)) # We need this to be a stable sort, so always use 'mergesort' here. The # values from the first array should always come before the values from the # second array. perm = ar.argsort(kind="mergesort") aux = ar[perm] aux2 = tt[perm] # flag = ediff1d( aux, 1 ) == 0 flag = ma.concatenate((aux[1:] == aux[:-1], [False])) ii = ma.where(flag * aux2)[0] aux = perm[ii + 1] perm[ii + 1] = perm[ii] perm[ii] = aux # indx = perm.argsort(kind="mergesort")[: len(ar1)] # return flag[indx]
def setmember1d(ar1, ar2): """ Return a boolean array set True where first element is in second array. See Also -------- numpy.setmember1d : equivalent function for ndarrays. """ ar1 = ma.asanyarray(ar1) ar2 = ma.asanyarray( ar2 ) ar = ma.concatenate((ar1, ar2 )) b1 = ma.zeros(ar1.shape, dtype = np.int8) b2 = ma.ones(ar2.shape, dtype = np.int8) tt = ma.concatenate((b1, b2)) # We need this to be a stable sort, so always use 'mergesort' here. The # values from the first array should always come before the values from the # second array. perm = ar.argsort(kind='mergesort') aux = ar[perm] aux2 = tt[perm] # flag = ediff1d( aux, 1 ) == 0 flag = ma.concatenate((aux[1:] == aux[:-1], [False])) ii = ma.where( flag * aux2 )[0] aux = perm[ii+1] perm[ii+1] = perm[ii] perm[ii] = aux # indx = perm.argsort(kind='mergesort')[:len( ar1 )] # return flag[indx]
def in1d(ar1, ar2, assume_unique=False): """ Test whether each element of an array is also present in a second array. See Also -------- numpy.in1d : equivalent function for ndarrays Notes ----- .. versionadded:: 1.4.0 """ if not assume_unique: ar1, rev_idx = unique(ar1, return_inverse=True) ar2 = unique(ar2) ar = ma.concatenate( (ar1, ar2) ) # We need this to be a stable sort, so always use 'mergesort' # here. The values from the first array should always come before # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] equal_adj = (sar[1:] == sar[:-1]) flag = ma.concatenate( (equal_adj, [False] ) ) indx = order.argsort(kind='mergesort')[:len( ar1 )] if assume_unique: return flag[indx] else: return flag[indx][rev_idx]
def setmember1d(ar1, ar2): """ This function is deprecated. Use ma.in1d() instead.""" ar1 = ma.asanyarray(ar1) ar2 = ma.asanyarray( ar2 ) ar = ma.concatenate((ar1, ar2 )) b1 = ma.zeros(ar1.shape, dtype = np.int8) b2 = ma.ones(ar2.shape, dtype = np.int8) tt = ma.concatenate((b1, b2)) # We need this to be a stable sort, so always use 'mergesort' here. The # values from the first array should always come before the values from the # second array. perm = ar.argsort(kind='mergesort') aux = ar[perm] aux2 = tt[perm] # flag = ediff1d( aux, 1 ) == 0 flag = ma.concatenate((aux[1:] == aux[:-1], [False])) ii = ma.where( flag * aux2 )[0] aux = perm[ii+1] perm[ii+1] = perm[ii] perm[ii] = aux # indx = perm.argsort(kind='mergesort')[:len( ar1 )] # return flag[indx]
def intersect1d(ar1, ar2, assume_unique=False): """ Returns the unique elements common to both arrays. Masked values are considered equal one to the other. The output is always a masked array. See Also -------- numpy.intersect1d : equivalent function for ndarrays. Examples -------- >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) >>> intersect1d(x, y) masked_array(data = [1 3 --], mask = [False False True], fill_value = 999999) """ if assume_unique: aux = ma.concatenate((ar1, ar2)) else: # Might be faster than unique1d( intersect1d( ar1, ar2 ) )? aux = ma.concatenate((unique(ar1), unique(ar2))) aux.sort() return aux[aux[1:] == aux[:-1]]
def in1d(ar1, ar2, assume_unique=False): """ Test whether each element of an array is also present in a second array. See Also -------- numpy.in1d : equivalent function for ndarrays Notes ----- .. versionadded:: 1.4.0 """ if not assume_unique: ar1, rev_idx = unique(ar1, return_inverse=True) ar2 = unique(ar2) ar = ma.concatenate((ar1, ar2)) # We need this to be a stable sort, so always use 'mergesort' # here. The values from the first array should always come before # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] equal_adj = (sar[1:] == sar[:-1]) flag = ma.concatenate((equal_adj, [False])) indx = order.argsort(kind='mergesort')[:len(ar1)] if assume_unique: return flag[indx] else: return flag[indx][rev_idx]
def setmember1d(ar1, ar2): """ This function is deprecated. Use ma.in1d() instead.""" ar1 = ma.asanyarray(ar1) ar2 = ma.asanyarray(ar2) ar = ma.concatenate((ar1, ar2)) b1 = ma.zeros(ar1.shape, dtype=np.int8) b2 = ma.ones(ar2.shape, dtype=np.int8) tt = ma.concatenate((b1, b2)) # We need this to be a stable sort, so always use 'mergesort' here. The # values from the first array should always come before the values from the # second array. perm = ar.argsort(kind='mergesort') aux = ar[perm] aux2 = tt[perm] # flag = ediff1d( aux, 1 ) == 0 flag = ma.concatenate((aux[1:] == aux[:-1], [False])) ii = ma.where(flag * aux2)[0] aux = perm[ii + 1] perm[ii + 1] = perm[ii] perm[ii] = aux # indx = perm.argsort(kind='mergesort')[:len(ar1)] # return flag[indx]
def setxor1d(ar1, ar2): """ Set exclusive-or of 1D arrays with unique elements. See Also -------- numpy.setxor1d : equivalent function for ndarrays """ aux = ma.concatenate((ar1, ar2)) if aux.size == 0: return aux aux.sort() auxf = aux.filled() # flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True])) # flag2 = ediff1d( flag ) == 0 flag2 = (flag[1:] == flag[:-1]) return aux[flag2]
def union1d(ar1, ar2): """ Union of two arrays. See also -------- numpy.union1d : equivalent function for ndarrays. """ return unique(ma.concatenate((ar1, ar2)))
def setxor1d(ar1, ar2): """ Set exclusive-or of 1D arrays with unique elements. See Also -------- numpy.setxor1d : equivalent function for ndarrays """ aux = ma.concatenate((ar1, ar2)) if aux.size == 0: return aux aux.sort() auxf = aux.filled() # flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True])) # flag2 = ediff1d( flag ) == 0 flag2 = flag[1:] == flag[:-1] return aux[flag2]
def union1d(ar1, ar2): """ Union of 1D arrays with unique elements. See also -------- numpy.union1d : equivalent function for ndarrays. """ return unique1d(ma.concatenate((ar1, ar2)))
def concatenate_ds(datasets, axis=0): """ concatenate two datasets along an existing dimension Parameters: ----------- datasets: sequence of datasets axis: axis along which to concatenate Returns: -------- joint Dataset along axis NOTE: will raise an error if variables are there which do not contain the required dimension See Also: --------- stack_ds Examples: --------- >>> a = da.zeros(axes=[list('abc')], dims=('x0',)) # 1-D DimArray >>> b = da.zeros(axes=[list('abc'), [1,2]], dims=('x0','x1')) # 2-D DimArray >>> ds = Dataset({'a':a,'b':b}) # dataset of 2 variables from an experiment >>> a2 = da.ones(axes=[list('def')], dims=('x0',)) >>> b2 = da.ones(axes=[list('def'), [1,2]], dims=('x0','x1')) # 2-D DimArray >>> ds2 = Dataset({'a':a2,'b':b2}) # dataset of 2 variables from a second experiment >>> concatenate_ds([ds, ds2]) Dataset of 2 variables dimensions: 'x0', 'x1' 0 / x0 (6): a to f 1 / x1 (2): 1 to 2 a: ('x0',) b: ('x0', 'x1') """ # find the list of variables common to all datasets variables = None for ds in datasets: # check that variables have the same variables if variables is None: variables = ds.keys() else: assert sorted(ds.keys()) == sorted( variables), "variables differ across datasets" # Compute concatenated dataset dataset = Dataset() for v in variables: arrays = [ds[v] for ds in datasets] array = concatenate(arrays, axis=axis) dataset[v] = array return dataset
def concatenate_ds(datasets, axis=0): """ concatenate two datasets along an existing dimension Parameters: ----------- datasets: sequence of datasets axis: axis along which to concatenate Returns: -------- joint Dataset along axis NOTE: will raise an error if variables are there which do not contain the required dimension See Also: --------- stack_ds Examples: --------- >>> a = da.zeros(axes=[list('abc')], dims=('x0',)) # 1-D DimArray >>> b = da.zeros(axes=[list('abc'), [1,2]], dims=('x0','x1')) # 2-D DimArray >>> ds = Dataset({'a':a,'b':b}) # dataset of 2 variables from an experiment >>> a2 = da.ones(axes=[list('def')], dims=('x0',)) >>> b2 = da.ones(axes=[list('def'), [1,2]], dims=('x0','x1')) # 2-D DimArray >>> ds2 = Dataset({'a':a2,'b':b2}) # dataset of 2 variables from a second experiment >>> concatenate_ds([ds, ds2]) Dataset of 2 variables dimensions: 'x0', 'x1' 0 / x0 (6): a to f 1 / x1 (2): 1 to 2 a: ('x0',) b: ('x0', 'x1') """ # find the list of variables common to all datasets variables = None for ds in datasets: # check that variables have the same variables if variables is None: variables = ds.keys() else: assert sorted(ds.keys()) == sorted(variables), "variables differ across datasets" # Compute concatenated dataset dataset = Dataset() for v in variables: arrays = [ds[v] for ds in datasets] array = concatenate(arrays, axis=axis) dataset[v] = array return dataset
def __getitem__(self,key): if isinstance(key, str): raise MAError, "Unavailable for masked array." if type(key) is not tuple: key = (key,) objs = [] scalars = [] final_dtypedescr = None for k in range(len(key)): scalar = False if type(key[k]) is slice: step = key[k].step start = key[k].start stop = key[k].stop if start is None: start = 0 if step is None: step = 1 if type(step) is type(1j): size = int(abs(step)) newobj = np.linspace(start, stop, num=size) else: newobj = np.arange(start, stop, step) elif type(key[k]) is str: if (key[k] in 'rc'): self.matrix = True self.col = (key[k] == 'c') continue try: self.axis = int(key[k]) continue except (ValueError, TypeError): raise ValueError, "Unknown special directive" elif type(key[k]) in np.ScalarType: newobj = asarray([key[k]]) scalars.append(k) scalar = True else: newobj = key[k] objs.append(newobj) if isinstance(newobj, ndarray) and not scalar: if final_dtypedescr is None: final_dtypedescr = newobj.dtype elif newobj.dtype > final_dtypedescr: final_dtypedescr = newobj.dtype if final_dtypedescr is not None: for k in scalars: objs[k] = objs[k].astype(final_dtypedescr) res = concatenate(tuple(objs),axis=self.axis) return self._retval(res)
def __getitem__(self, key): if isinstance(key, str): raise MAError, "Unavailable for masked array." if type(key) is not tuple: key = (key,) objs = [] scalars = [] final_dtypedescr = None for k in range(len(key)): scalar = False if type(key[k]) is slice: step = key[k].step start = key[k].start stop = key[k].stop if start is None: start = 0 if step is None: step = 1 if type(step) is type(1j): size = int(abs(step)) newobj = np.linspace(start, stop, num=size) else: newobj = np.arange(start, stop, step) elif type(key[k]) is str: if key[k] in "rc": self.matrix = True self.col = key[k] == "c" continue try: self.axis = int(key[k]) continue except (ValueError, TypeError): raise ValueError, "Unknown special directive" elif type(key[k]) in np.ScalarType: newobj = asarray([key[k]]) scalars.append(k) scalar = True else: newobj = key[k] objs.append(newobj) if isinstance(newobj, ndarray) and not scalar: if final_dtypedescr is None: final_dtypedescr = newobj.dtype elif newobj.dtype > final_dtypedescr: final_dtypedescr = newobj.dtype if final_dtypedescr is not None: for k in scalars: objs[k] = objs[k].astype(final_dtypedescr) res = concatenate(tuple(objs), axis=self.axis) return self._retval(res)
def _covhelper(x, y=None, rowvar=True, allow_masked=True): """ Private function for the computation of covariance and correlation coefficients. """ x = ma.array(x, ndmin=2, copy=True, dtype=float) xmask = ma.getmaskarray(x) # Quick exit if we can't process masked data if not allow_masked and xmask.any(): raise ValueError("Cannot process masked data...") # if x.shape[0] == 1: rowvar = True # Make sure that rowvar is either 0 or 1 rowvar = int(bool(rowvar)) axis = 1 - rowvar if rowvar: tup = (slice(None), None) else: tup = (None, slice(None)) # if y is None: xnotmask = np.logical_not(xmask).astype(int) else: y = array(y, copy=False, ndmin=2, dtype=float) ymask = ma.getmaskarray(y) if not allow_masked and ymask.any(): raise ValueError("Cannot process masked data...") if xmask.any() or ymask.any(): if y.shape == x.shape: # Define some common mask common_mask = np.logical_or(xmask, ymask) if common_mask is not nomask: x.unshare_mask() y.unshare_mask() xmask = x._mask = y._mask = ymask = common_mask x = ma.concatenate((x, y), axis) xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int) x -= x.mean(axis=rowvar)[tup] return (x, xnotmask, rowvar)
def intersect1d(ar1, ar2): """ Returns the repeated or unique elements belonging to the two arrays. Masked values are assumed equals one to the other. The output is always a masked array See Also -------- numpy.intersect1d : equivalent function for ndarrays. Examples -------- >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) >>> intersect1d(x, y) masked_array(data = [1 1 3 3 --], mask = [False False False False True], fill_value = 999999) """ aux = ma.concatenate((ar1,ar2)) aux.sort() return aux[aux[1:] == aux[:-1]]
def intersect1d(ar1, ar2): """ Returns the repeated or unique elements belonging to the two arrays. Masked values are assumed equals one to the other. The output is always a masked array See Also -------- numpy.intersect1d : equivalent function for ndarrays. Examples -------- >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) >>> intersect1d(x, y) masked_array(data = [1 1 3 3 --], mask = [False False False False True], fill_value = 999999) """ aux = ma.concatenate((ar1, ar2)) aux.sort() return aux[aux[1:] == aux[:-1]]
def intersect1d_nu(ar1, ar2): """ This function is deprecated. Use ma.intersect1d() instead.""" # Might be faster than unique1d( intersect1d( ar1, ar2 ) )? aux = ma.concatenate((unique1d(ar1), unique1d(ar2))) aux.sort() return aux[aux[1:] == aux[:-1]]