def view(A, offset=0): """ Get a view on the diagonal elements of a 2D array. This is actually a view (!) on the diagonal of the array, so you can in-place adjust the view. :param :class:`ndarray` A: 2 dimensional numpy array :param int offset: view offset to give back (negative entries allowed) :rtype: :class:`ndarray` view of diag(A) >>> import numpy as np >>> X = np.arange(9).reshape(3,3) >>> view(X) array([0, 4, 8]) >>> d = view(X) >>> d += 2 >>> view(X) array([ 2, 6, 10]) >>> view(X, offset=-1) array([3, 7]) >>> subtract(X, 3, offset=-1) array([[ 2, 1, 2], [ 0, 6, 5], [ 6, 4, 10]]) """ from numpy.lib.stride_tricks import as_strided assert A.ndim == 2, "only implemented for 2 dimensions" assert A.shape[0] == A.shape[1], "attempting to get the view of non-square matrix?!" if offset > 0: return as_strided(A[0, offset:], shape=(A.shape[0] - offset, ), strides=((A.shape[0]+1)*A.itemsize, )) elif offset < 0: return as_strided(A[-offset:, 0], shape=(A.shape[0] + offset, ), strides=((A.shape[0]+1)*A.itemsize, )) else: return as_strided(A, shape=(A.shape[0], ), strides=((A.shape[0]+1)*A.itemsize, ))
def test_subclasses(): # test that subclass is preserved only if subok=True a = VerySimpleSubClass([1, 2, 3, 4]) assert_(type(a) is VerySimpleSubClass) a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) assert_(type(a_view) is np.ndarray) a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) assert_(type(a_view) is VerySimpleSubClass) # test that if a subclass has __array_finalize__, it is used a = SimpleSubClass([1, 2, 3, 4]) a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) assert_(type(a_view) is SimpleSubClass) assert_(a_view.info == 'simple finalized') # similar tests for broadcast_arrays b = np.arange(len(a)).reshape(-1, 1) a_view, b_view = broadcast_arrays(a, b) assert_(type(a_view) is np.ndarray) assert_(type(b_view) is np.ndarray) assert_(a_view.shape == b_view.shape) a_view, b_view = broadcast_arrays(a, b, subok=True) assert_(type(a_view) is SimpleSubClass) assert_(a_view.info == 'simple finalized') assert_(type(b_view) is np.ndarray) assert_(a_view.shape == b_view.shape) # and for broadcast_to shape = (2, 4) a_view = broadcast_to(a, shape) assert_(type(a_view) is np.ndarray) assert_(a_view.shape == shape) a_view = broadcast_to(a, shape, subok=True) assert_(type(a_view) is SimpleSubClass) assert_(a_view.info == 'simple finalized') assert_(a_view.shape == shape)
def meshgrid_as_strided(x, y, mask=None): """numpy.meshgrid without copying data (using as_strided)""" if mask is None: return (as_strided(x, strides=(0, x.strides[0]), shape=(y.size, x.size)), as_strided(y, strides=(y.strides[0],0), shape=(y.size, x.size))) else: return (np.ma.array(as_strided(x, strides=(0, x.strides[0]), shape=(y.size, x.size)), mask=mask), np.ma.array(as_strided(y, strides=(y.strides[0],0), shape=(y.size, x.size)), mask=mask))
def broadcasted_shape(shp1, shp2): # determine shape of array of shp1 and shp2 broadcast against one another. x = np.array([1]) # trick to define array with certain shape that doesn't allocate all the # memory. a = as_strided(x, shape=shp1, strides=[0] * len(shp1)) b = as_strided(x, shape=shp2, strides=[0] * len(shp2)) return np.broadcast(a, b).shape
def disparity_ssd(L, R, window_size = 21): """Compute disparity map D(y, x) such that: L(y, x) = R(y, x + D(y, x)) Params: L: Grayscale left image, in range [0.0, 1.0] R: Grayscale right image, same size as L Returns: Disparity map, same size as L, R """ D = np.zeros(L.shape, dtype=np.float) # subtract 1 due to the starting pixel offset = (window_size) / 2 L = cv2.copyMakeBorder(L, offset, offset, offset, offset, cv2.BORDER_CONSTANT,value=0) R = cv2.copyMakeBorder(R, offset, offset, offset, offset, cv2.BORDER_CONSTANT,value=0) shape = L.shape height = shape[0] width = shape[1] r_shape = (R.shape[0]-(window_size-1), R.shape[1]-(window_size-1), window_size, window_size) r_strides = (R.shape[1] * R.itemsize, R.itemsize, R.itemsize * R.shape[1], R.itemsize) r_strips = as_strided(R, r_shape, r_strides) for y in range(offset, height - offset): r_strip = r_strips[y-offset] for x in range(offset, width-offset): l_patch = get_patch(L, offset, offset, offset, offset, y, x) copy_patch = np.copy(l_patch) l_strip = as_strided(copy_patch, r_strip.shape, (0, copy_patch.itemsize*window_size, copy_patch.itemsize)) ssd = ((l_strip - r_strip)**2).sum((1, 2)) x_prime = np.argmin(ssd) D[y-offset][x-offset] = x_prime - x #print D.max() return D # def test_disparity_ssd2(l_image, r_image, problem, window_size = 21): # L = cv2.imread(os.path.join('input', l_image), 0) * (1 / 255.0) # grayscale, scale to [0.0, 1.0] # R = cv2.imread(os.path.join('input', r_image), 0) * (1 / 255.0) # # # Compute disparity (using method disparity_ssd defined in disparity_ssd.py) # start = time.time() # D = disparity_ssd(L, R, window_size) # TODO# : implemenet disparity_ssd() # print "first: " + str(time.time() - start) # start = time.time() # D2 = disparity_ssd_2(L, R, window_size) # print "second: " + str(time.time() - start) #print D == D2 cv2.imwrite(os.path.join("output", "ps3-" + problem + ".png"), np.clip(D2, 0, 255).astype(np.uint8))
def demosaic(self): if self._demo is None: # XXX Again, should take into account camera's vflip and hflip here # Construct representation of the bayer pattern bayer = np.zeros(self.array.shape, dtype=np.uint8) bayer[1::2, 0::2, 0] = 1 # Red bayer[0::2, 0::2, 1] = 1 # Green bayer[1::2, 1::2, 1] = 1 # Green bayer[0::2, 1::2, 2] = 1 # Blue # Allocate output array with same shape as data and set up some # constants to represent the weighted average window window = (3, 3) borders = (window[0] - 1, window[1] - 1) border = (borders[0] // 2, borders[1] // 2) # Pad out the data and the bayer pattern (np.pad is faster but # unavailable on the version of numpy shipped with Raspbian at the # time of writing) rgb = np.zeros(( self.array.shape[0] + borders[0], self.array.shape[1] + borders[1], self.array.shape[2]), dtype=self.array.dtype) rgb[ border[0]:rgb.shape[0] - border[0], border[1]:rgb.shape[1] - border[1], :] = self.array bayer_pad = np.zeros(( self.array.shape[0] + borders[0], self.array.shape[1] + borders[1], self.array.shape[2]), dtype=bayer.dtype) bayer_pad[ border[0]:bayer_pad.shape[0] - border[0], border[1]:bayer_pad.shape[1] - border[1], :] = bayer bayer = bayer_pad # For each plane in the RGB data, construct a view over the plane # of 3x3 matrices. Then do the same for the bayer array and use # Einstein summation to get the weighted average self._demo = np.empty(self.array.shape, dtype=self.array.dtype) for plane in range(3): p = rgb[..., plane] b = bayer[..., plane] pview = as_strided(p, shape=( p.shape[0] - borders[0], p.shape[1] - borders[1]) + window, strides=p.strides * 2) bview = as_strided(b, shape=( b.shape[0] - borders[0], b.shape[1] - borders[1]) + window, strides=b.strides * 2) psum = np.einsum('ijkl->ij', pview) bsum = np.einsum('ijkl->ij', bview) self._demo[..., plane] = psum // bsum return self._demo
def test_internal_overlap_manual(): # Stride tricks can construct arrays with internal overlap # We don't care about memory bounds, the array is not # read/write accessed x = np.arange(1).astype(np.int8) # Check low-dimensional special cases check_internal_overlap(x, False) # 1-dim check_internal_overlap(x.reshape([]), False) # 0-dim a = as_strided(x, strides=(3, 4), shape=(4, 4)) check_internal_overlap(a, False) a = as_strided(x, strides=(3, 4), shape=(5, 4)) check_internal_overlap(a, True) a = as_strided(x, strides=(0,), shape=(0,)) check_internal_overlap(a, False) a = as_strided(x, strides=(0,), shape=(1,)) check_internal_overlap(a, False) a = as_strided(x, strides=(0,), shape=(2,)) check_internal_overlap(a, True) a = as_strided(x, strides=(0, -9993), shape=(87, 22)) check_internal_overlap(a, True) a = as_strided(x, strides=(0, -9993), shape=(1, 22)) check_internal_overlap(a, False) a = as_strided(x, strides=(0, -9993), shape=(0, 22)) check_internal_overlap(a, False)
def test_as_strided(): a = np.array([None]) a_view = as_strided(a) expected = np.array([None]) assert_array_equal(a_view, np.array([None])) a = np.array([1, 2, 3, 4]) a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) expected = np.array([1, 3]) assert_array_equal(a_view, expected) a = np.array([1, 2, 3, 4]) a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize)) expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) assert_array_equal(a_view, expected)
def moving_avg(a, halfwindow, mask=None): """ Performs a fast n-point moving average of (the last dimension of) array *a*, by using stride tricks to roll a window on *a*. Note that *halfwindow* gives the nb of points on each side, so that n = 2*halfwindow + 1. If *mask* is provided, values of *a* where mask = False are skipped. Returns an array of same size as *a* (which means that near the edges, the averaging window is actually < *npt*). """ # padding array with zeros on the left and on the right: # e.g., if halfwindow = 2: # a_padded = [0 0 a0 a1 ... aN 0 0] # mask_padded = [F F ? ? ? F F] if mask is None: mask = np.ones_like(a, dtype='bool') zeros = np.zeros(a.shape[:-1] + (halfwindow,)) falses = zeros.astype('bool') a_padded = np.concatenate((zeros, np.where(mask, a, 0), zeros), axis=-1) mask_padded = np.concatenate((falses, mask, falses), axis=-1) # rolling window on padded array using stride trick # # E.g., if halfwindow=2: # rolling_a[:, 0] = [0 0 a0 a1 ... aN] # rolling_a[:, 1] = [0 a0 a1 a2 ... aN 0 ] # ... # rolling_a[:, 4] = [a2 a3 ... aN 0 0] npt = 2 * halfwindow + 1 # total size of the averaging window rolling_a = as_strided(a_padded, shape=a.shape + (npt,), strides=a_padded.strides + (a.strides[-1],)) rolling_mask = as_strided(mask_padded, shape=mask.shape + (npt,), strides=mask_padded.strides + (mask.strides[-1],)) # moving average n = rolling_mask.sum(axis=-1) return np.where(n > 0, rolling_a.sum(axis=-1).astype('float') / n, np.nan)
def sl_window(arr: np.ndarray, window: int, stride: int, axis=0, sl_axis=0): """ Generates staggered windows of an array. Given an array a of dimension N, stride size `stride`, and window size `window`, returns an array of dimension N + 1 of `window`-sized windows, each offset by `stride` from the previous. The 'sliding' happens along `axis` and the windows lie along `sl_axis` of the output array. Args: arr: array over which to generate windows window: window size stride: stride size axis: axis of `arr` along which to slide the window sl_axis: axis of output array along which windows will lie Returns: out: array of windows; shape nwindows on zeroth axis, w on axis corresponding to 'axis' argument, other dimensions unchanged """ num_windows = 1 + (arr.shape[axis] - window) // stride win_stride = stride * arr.strides[axis] new_shape = arr.shape[:axis] + (window,) + arr.shape[axis + 1 :] new_shape = new_shape[:sl_axis] + (num_windows,) + new_shape[sl_axis:] new_strides = arr.strides[:sl_axis] + (win_stride,) + arr.strides[sl_axis:] return as_strided(arr, new_shape, new_strides)
def split_into_subimgs(img, labels, sub_img_shape, debug, step=1): shape = (int(np.floor((img.shape[HEIGHT] - sub_img_shape[HEIGHT]) / step)), int(np.floor((img.shape[WIDTH] - sub_img_shape[WIDTH]) / step)), SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH) # shape = (lbl_array.shape[0], SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH) result_array = as_strided(img, shape=shape, strides=( img.strides[1] * step + (img.shape[WIDTH] - sub_img_shape[WIDTH]) % step * img.strides[2], img.strides[2] * step, img.strides[0], img.strides[1], img.strides[2])) lbl_array = np.zeros(shape=(result_array.shape[0], result_array.shape[1])) index = 0 coords = dict() for i in range(lbl_array.shape[0]): for ii in range(lbl_array.shape[1]): # Rectangle = namedtuple('Rectangle', ['xmin', 'ymin', 'xmax', 'ymax']) window = nn.Rectangle(ii * step, i * step, ii * step + sub_img_shape[HEIGHT], i * step + sub_img_shape[WIDTH]) cover = np.array([compute_covering(window=window, label=nn.Rectangle(lbl[0], lbl[1], lbl[2], lbl[3])) for lbl in labels]) is_cover = int(np.any(cover > COVER_PERCENT)) lbl_array[i, ii] = is_cover coords[index] = window index += 1 return result_array, lbl_array, coords
def test_internal_overlap_fuzz(): # Fuzz check; the brute-force check is fairly slow x = np.arange(1).astype(np.int8) overlap = 0 no_overlap = 0 min_count = 100 rng = np.random.RandomState(1234) while min(overlap, no_overlap) < min_count: ndim = rng.randint(1, 4, dtype=np.intp) strides = tuple(rng.randint(-1000, 1000, dtype=np.intp) for j in range(ndim)) shape = tuple(rng.randint(1, 30, dtype=np.intp) for j in range(ndim)) a = as_strided(x, strides=strides, shape=shape) result = check_internal_overlap(a) if result: overlap += 1 else: no_overlap += 1
def kron_id_view(vec, id_length, axis=-1): shape = (vec.shape[:axis] + (vec.shape[axis] - id_length + 1, id_length) + vec.shape[axis % vec.ndim + 1:]) strides = vec.strides[:axis] + (vec.strides[axis],) + vec.strides[axis:] return as_strided(vec, shape=shape, strides=strides)
def __init__(self, *shape): if len(shape) == 1 and isinstance(shape[0], tuple): shape = shape[0] x = as_strided(_nx.zeros(1), shape=shape, strides=_nx.zeros_like(shape)) self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'], order='C')
def __init__(self, which_set, context_len, data_mode, shuffle=True): self.__dict__.update(locals()) del self.self # Load data into self._data (defined in PennTreebank) self._load_data(which_set, context_len, data_mode) self._data = as_strided(self._raw_data, shape=(len(self._raw_data) - context_len, context_len + 1), strides=(self._raw_data.itemsize, self._raw_data.itemsize)) super(PennTreebankNGrams, self).__init__( X=self._data[:, :-1], y=self._data[:, -1:], X_labels=self._max_labels, y_labels=self._max_labels ) if shuffle: warnings.warn("Note that the PennTreebank samples are only " "shuffled when the iterator method is used to " "retrieve them.") self._iter_subset_class = resolve_iterator_class( 'shuffled_sequential' )
def semicast(*arrays): """ Broadcast compatible ndarray shape prefixes. """ # establish the final prefix shape pre_ndim = max(len(a.shape[:i]) for (a, i) in arrays) pre_padding = [(1,) * (pre_ndim - len(a.shape[:i])) for (a, i) in arrays] pre_shape = tuple(map(max, *(p + a.shape[:i] for ((a, i), p) in zip(arrays, pre_padding)))) # broadcast the arrays from numpy.lib.stride_tricks import as_strided casts = [] for ((a, i), p) in zip(arrays, pre_padding): if i is None: i = len(a.shape) for (c, d) in zip(pre_shape[len(p):], a.shape[:i]): if c != d and d != 1: raise ValueError("array shapes incompatible for semicast") strides = (0,) * len(p) + tuple(0 if d == 1 else s for (d, s) in zip(a.shape, a.strides)) casts += [as_strided(a, pre_shape + a.shape[i:], strides)] # repair dtypes (broken by as_strided) for ((a, _), cast) in zip(arrays, casts): cast.dtype = a.dtype # done return (pre_shape, casts)
def __init__(self, label_image=None, connectivity=1, data=None, **attr): super(RAG, self).__init__(data, **attr) if self.number_of_nodes() == 0: self.max_id = 0 else: self.max_id = max(self.nodes_iter()) if label_image is not None: fp = ndi.generate_binary_structure(label_image.ndim, connectivity) # In the next ``ndi.generic_filter`` function, the kwarg # ``output`` is used to provide a strided array with a single # 64-bit floating point number, to which the function repeatedly # writes. This is done because even if we don't care about the # output, without this, a float array of the same shape as the # input image will be created and that could be expensive in # memory consumption. ndi.generic_filter( label_image, function=_add_edge_filter, footprint=fp, mode='nearest', output=as_strided(np.empty((1,), dtype=np.float_), shape=label_image.shape, strides=((0,) * label_image.ndim)), extra_arguments=(self,))
def energy(audioData, windowSize = 256): """ Compute the energy of the given audio data, using the given windowSize Example: >>> from test import chirp >>> s = chirp() >>> e = energy(s) >>> e array([ 0.26917694, 0.26901879, 0.26918094, ..., 0.18757919, 0.18656895, 0.18561012]) """ N = len(audioData) window = numpy.hamming(windowSize) window.shape = (windowSize, 1) n = N - windowSize # number of windowed samples. # Create a view of audioData who's shape is (n, windowSize). Use stride_tricks such that each stide jumps only one item. p = numpy.power(audioData, 2) s = stride_tricks.as_strided(p, shape=(n, windowSize), strides=(audioData.itemsize, audioData.itemsize)) e = numpy.dot(s, window) / windowSize e.shape = (e.shape[0], ) return e
def stride_help_array(self, data): """ Method to stride through the data matrix, extracting the outer array with nr of elements as Column length. """ # Extract shapes from data. NE, NS, NM, NO, ND, Col = data.shape # Calculate how many small matrices. Nr_mat = NE * NS * NM * NO * ND # Define the shape for the stride view. shape = (Nr_mat, Col) # Get itemsize, Length of one array element in bytes. Depends on dtype. float64=8, complex128=16. itz = data.itemsize # Bytes_between_elements bbe = 1 * itz # Bytes between row. The distance in bytes to next row is number of Columns elements multiplied with itemsize. bbr = Col * itz # Make a tuple of the strides. strides = (bbr, bbe) # Make the stride view. data_view = as_strided(data, shape=shape, strides=strides) return data_view
def cross_correlation(x, y, maxlag): """ Cross correlation with a maximum number of lags. `x` and `y` must be one-dimensional numpy arrays with the same length. This computes the same result as numpy.correlate(x, y, mode='full')[len(a)-maxlag-1:len(a)+maxlag] The return vaue has length 2*maxlag + 1. Author: http://stackoverflow.com/questions/30677241 Warren Weckesser """ from numpy.lib.stride_tricks import as_strided def _check_arg(x, xname): x = np.asarray(x) if x.ndim != 1: raise ValueError('%s must be one-dimensional.' % xname) return x x = _check_arg(x, 'x') y = _check_arg(y, 'y') py = np.pad(y.conj(), 2*maxlag, mode='constant') T = as_strided(py[2*maxlag:], shape=(2*maxlag+1, len(y) + 2*maxlag), strides=(-py.strides[0], py.strides[0])) px = np.pad(x, maxlag, mode='constant') return T.dot(px)
def test_writeable(): # broadcast_to should return a readonly array original = np.array([1, 2, 3]) result = broadcast_to(original, (2, 3)) assert_equal(result.flags.writeable, False) assert_raises(ValueError, result.__setitem__, slice(None), 0) # but the result of broadcast_arrays needs to be writeable (for now), to # preserve backwards compatibility for results in [broadcast_arrays(original), broadcast_arrays(0, original)]: for result in results: assert_equal(result.flags.writeable, True) # keep readonly input readonly original.flags.writeable = False _, result = broadcast_arrays(0, original) assert_equal(result.flags.writeable, False) # regression test for GH6491 shape = (2,) strides = [0] tricky_array = as_strided(np.array(0), shape, strides) other = np.zeros((1,)) first, second = broadcast_arrays(tricky_array, other) assert_(first.shape == second.shape)
def _fast_synthesize(frequency): """A faster way to synthesize a signal. Generate one cycle, and simulate arbitrary repetitions using array indexing tricks. """ # hack so that we can ensure an integer number of periods and samples # rounds frequency to 1st decimal, s.t. 10 * frequency will be an int frequency = np.round(frequency, n_dec) # Generate 10*frequency periods at this frequency # Equivalent to n_samples = int(n_periods * fs / frequency) # n_periods = 10*frequency is the smallest integer that guarantees # that n_samples will be an integer, since assuming 10*frequency # is an integer n_samples = int(10.0**n_dec * fs) short_signal = function(2.0 * np.pi * np.arange(n_samples) * frequency / fs) # Calculate the number of loops we need to fill the duration n_repeats = int(np.ceil(length/float(short_signal.shape[0]))) # Simulate tiling the short buffer by using stride tricks long_signal = as_strided(short_signal, shape=(n_repeats, len(short_signal)), strides=(0, short_signal.itemsize)) # Use a flatiter to simulate a long 1D buffer return long_signal.flat
def stft(samples, frame_size, overlap=0.4, window=np.hanning): # First calculate our Hann (or other) weights win_weights = window(frame_size) # Calculate hop size hop = int(np.ceil((1-overlap)*frame_size)) # Add zeros at the end to make sure we read the entire file samples = np.append(samples, np.zeros(frame_size)) # Now we reshape our data using strides. If we had no overlap between our windows # we could simply make sure the length of our data is a multiple of frame_size # and then simply reshape as a (len(data)/frame_size, frame_size) matrix. But with # overlap it is a bit more complicated. Strides give us the number of bytes we have # to step to go the next item in an array. We can play with these numbers to get what # we want. As an example imagine an array of 1byte ints, a frame size of 10 and # overlap of 0.5. We want to reshape our array so that it takes 1 byte to move to the # next item in a row, but only 5 bytes to move to the next row. # First let's figure out the dimensions of our new data. The number of columns is simply # the frame size and the number of rows is: rows = np.ceil((len(samples) - frame_size) / float(hop)) + 1 # Now let's reshape the data frames = as_strided(samples, shape=(rows, frame_size), strides=(samples.strides[0] * hop, samples.strides[0])) # Finally let's scale each row by the window weights windowed_frames = [] for row in frames: windowed_frames.append([x*y for x,y in zip(row, win_weights)]) # And take the Fourier Transform return np.fft.rfft(windowed_frames), hop
def as_strided_writeable(): arr = np.ones(10) view = as_strided(arr, writeable=False) assert_(not view.flags.writeable) # Check that writeable also is fine: view = as_strided(arr, writeable=True) assert_(view.flags.writeable) view[...] = 3 assert_array_equal(arr, np.full_like(arr, 3)) # Test that things do not break down for readonly: arr.flags.writeable = False view = as_strided(arr, writeable=False) view = as_strided(arr, writeable=True) assert_(not view.flags.writeable)
def apply_kaiserbessel_window(X, alpha=6.5): """ Apply a Kaiser-Bessel window to X. Parameters ---------- X : ndarray, shape=(n_samples, n_features) Input array of samples alpha : float, optional (default=6.5) Tuning parameter for Kaiser-Bessel function. alpha=6.5 should make perfect reconstruction possible for MDCT. Returns ------- X_windowed : ndarray, shape=(n_samples, n_features) Windowed version of X. """ beta = np.pi * alpha win = sg.kaiser(X.shape[1], beta) row_stride = 0 col_stride = win.itemsize strided_win = as_strided(win, shape=X.shape, strides=(row_stride, col_stride)) return X * strided_win
def convolve(A, B, axes=None, dot_axes=[(),()], mode='full'): assert mode in ['valid', 'full'], "Mode {0} not yet implemented".format(mode) if axes is None: axes = [list(range(A.ndim)), list(range(A.ndim))] wrong_order = any([B.shape[ax_B] < A.shape[ax_A] for ax_A, ax_B in zip(*axes)]) if wrong_order: if mode=='valid' and not all([B.shape[ax_B] <= A.shape[ax_A] for ax_A, ax_B in zip(*axes)]): raise Exception("One array must be larger than the other along all convolved dimensions") elif mode != 'full' or B.size <= A.size: # Tie breaker i1 = B.ndim - len(dot_axes[1]) - len(axes[1]) # B ignore i2 = i1 + A.ndim - len(dot_axes[0]) - len(axes[0]) # A ignore i3 = i2 + len(axes[0]) ignore_B = list(range(i1)) ignore_A = list(range(i1, i2)) conv = list(range(i2, i3)) return convolve(B, A, axes=axes[::-1], dot_axes=dot_axes[::-1], mode=mode).transpose(ignore_A + ignore_B + conv) if mode == 'full': B = pad_to_full(B, A, axes[::-1]) B_view_shape = list(B.shape) B_view_strides = list(B.strides) flipped_idxs = [slice(None)] * A.ndim for ax_A, ax_B in zip(*axes): B_view_shape.append(abs(B.shape[ax_B] - A.shape[ax_A]) + 1) B_view_strides.append(B.strides[ax_B]) B_view_shape[ax_B] = A.shape[ax_A] flipped_idxs[ax_A] = slice(None, None, -1) B_view = as_strided(B, B_view_shape, B_view_strides) A_view = A[flipped_idxs] all_axes = [list(axes[i]) + list(dot_axes[i]) for i in [0, 1]] return einsum_tensordot(A_view, B_view, all_axes)
def stride_help_element(self, data): """ Method to stride through the data matrix, extracting the outer element. """ # Extract shapes from data. NE, NS, NM, NO, Col = data.shape # Calculate how many small matrices. Nr_mat = NE * NS * NM * NO * Col # Define the shape for the stride view. shape = (Nr_mat, 1) # Get itemsize, Length of one array element in bytes. Depends on dtype. float64=8, complex128=16. itz = data.itemsize # FIXME: Explain this. bbe = Col * itz # FIXME: Explain this. bbr = 1 * itz # Make a tuple of the strides. strides = (bbr, bbe) # Make the stride view. data_view = as_strided(data, shape=shape, strides=strides) return data_view
def get_test_array(shape, dtype, strides=None, no_zeros=False, high=None): shape = wrap_in_tuple(shape) dtype = dtypes.normalize_type(dtype) if dtype.names is not None: result = numpy.empty(shape, dtype) for name in dtype.names: result[name] = get_test_array(shape, dtype[name], no_zeros=no_zeros, high=high) else: if dtypes.is_integer(dtype): low = 1 if no_zeros else 0 if high is None: high = 100 # will work even with signed chars get_arr = lambda: numpy.random.randint(low, high, shape).astype(dtype) else: low = 0.01 if no_zeros else 0 if high is None: high = 1.0 get_arr = lambda: numpy.random.uniform(low, high, shape).astype(dtype) if dtypes.is_complex(dtype): result = get_arr() + 1j * get_arr() else: result = get_arr() if strides is not None: result = as_strided(result, result.shape, strides) return result
def diag_windows(x, n): from numpy.lib.stride_tricks import as_strided if x.ndim != 2 or x.shape[0] != x.shape[1] or x.shape[0] < n: raise ValueError("Invalid input") w = as_strided(x, shape=(x.shape[0] - n + 1, n, n), strides=(x.strides[0]+x.strides[1], x.strides[0], x.strides[1])) return w
def halfoverlap(X, window_size): """ Create an overlapped version of X using 50% of window_size as overlap. Parameters ---------- X : ndarray, shape=(n_samples,) Input signal to window and overlap window_size : int Size of windows to take Returns ------- X_strided : shape=(n_windows, window_size) 2D array of overlapped X """ if window_size % 2 != 0: raise ValueError("Window size must be even!") window_step = window_size // 2 # Make sure there are an even number of windows before stridetricks append = np.zeros((window_size - len(X) % window_size)) X = np.hstack((X, append)) num_frames = len(X) // window_step - 1 row_stride = X.itemsize * window_step col_stride = X.itemsize X_strided = as_strided(X, shape=(num_frames, window_size), strides=(row_stride, col_stride)) return X_strided
def view_as_blocks(arr_in, block_shape): """Block view of the input n-dimensional array (using re-striding). Blocks are non-overlapping views of the input array. Parameters ---------- arr_in: ndarray The n-dimensional input array. block_shape: tuple The shape of the block. Each dimension must divide evenly into the corresponding dimensions of `arr_in`. Returns ------- arr_out: ndarray Block view of the input array. Examples -------- >>> import numpy as np >>> from skimage.util.shape import view_as_blocks >>> A = np.arange(4*4).reshape(4,4) >>> A array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) >>> B = view_as_blocks(A, block_shape=(2, 2)) >>> B[0, 0] array([[0, 1], [4, 5]]) >>> B[0, 1] array([[2, 3], [6, 7]]) >>> B[1, 0, 1, 1] 13 >>> A = np.arange(4*4*6).reshape(4,4,6) >>> A # doctest: +NORMALIZE_WHITESPACE array([[[ 0, 1, 2, 3, 4, 5], [ 6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17], [18, 19, 20, 21, 22, 23]], [[24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35], [36, 37, 38, 39, 40, 41], [42, 43, 44, 45, 46, 47]], [[48, 49, 50, 51, 52, 53], [54, 55, 56, 57, 58, 59], [60, 61, 62, 63, 64, 65], [66, 67, 68, 69, 70, 71]], [[72, 73, 74, 75, 76, 77], [78, 79, 80, 81, 82, 83], [84, 85, 86, 87, 88, 89], [90, 91, 92, 93, 94, 95]]]) >>> B = view_as_blocks(A, block_shape=(1, 2, 2)) >>> B.shape (4, 2, 3, 1, 2, 2) >>> B[2:, 0, 2] # doctest: +NORMALIZE_WHITESPACE array([[[[52, 53], [58, 59]]], [[[76, 77], [82, 83]]]]) """ # -- basic checks on arguments if not isinstance(block_shape, tuple): raise TypeError('block needs to be a tuple') block_shape = np.array(block_shape) if (block_shape <= 0).any(): raise ValueError("'block_shape' elements must be strictly positive") if block_shape.size != arr_in.ndim: raise ValueError("'block_shape' must have the same length " "as 'arr_in.shape'") arr_shape = np.array(arr_in.shape) if (arr_shape % block_shape).sum() != 0: raise ValueError("'block_shape' is not compatible with 'arr_in'") # -- restride the array to build the block view arr_in = np.ascontiguousarray(arr_in) new_shape = tuple(arr_shape / block_shape) + tuple(block_shape) new_strides = tuple(arr_in.strides * block_shape) + arr_in.strides arr_out = as_strided(arr_in, shape=new_shape, strides=new_strides) return arr_out
def repeat_dim(X, dim, n): sh = list(X.shape) sh.insert(dim, n) st = list(X.strides) st.insert(dim, 0) return as_strided(X, sh, st)
def rolling(a, window): shape = (a.size - window + 1, window) strides = (a.itemsize, a.itemsize) return stride_tricks.as_strided(a, shape=shape, strides=strides)
def __call__(self, observer, targets, times=None, time_range=None, time_grid_resolution=0.5 * u.hour, grid_times_targets=False): """ Compute the constraint for this class Parameters ---------- observer : `~astroplan.Observer` the observation location from which to apply the constraints targets : sequence of `~astroplan.Target` The targets on which to apply the constraints. times : `~astropy.time.Time` The times to compute the constraint. WHAT HAPPENS WHEN BOTH TIMES AND TIME_RANGE ARE SET? time_range : `~astropy.time.Time` (length = 2) Lower and upper bounds on time sequence. time_grid_resolution : `~astropy.units.quantity` Time-grid spacing grid_times_targets : bool if True, grids the constraint result with targets along the first index and times along the second. Otherwise, we rely on broadcasting the shapes together using standard numpy rules. Returns ------- constraint_result : 1D or 2D array of float or bool The constraints. If 2D with targets along the first index and times along the second. """ if times is None and time_range is not None: times = time_grid_from_range(time_range, time_resolution=time_grid_resolution) if grid_times_targets: targets = get_skycoord(targets) # TODO: these broadcasting operations are relatively slow # but there is potential for huge speedup if the end user # disables gridding and re-shapes the coords themselves # prior to evaluating multiple constraints. if targets.isscalar: # ensure we have a (1, 1) shape coord targets = SkyCoord(np.tile(targets, 1))[:, np.newaxis] else: targets = targets[..., np.newaxis] times, targets = observer._preprocess_inputs(times, targets, grid_times_targets=False) result = self.compute_constraint(times, observer, targets) # make sure the output has the same shape as would result from # broadcasting times and targets against each other if targets is not None: # broadcasting times v targets is slow due to # complex nature of these objects. We make # to simple numpy arrays of the same shape and # broadcast these to find the correct shape shp1, shp2 = times.shape, targets.shape x = np.array([1]) a = as_strided(x, shape=shp1, strides=[0] * len(shp1)) b = as_strided(x, shape=shp2, strides=[0] * len(shp2)) output_shape = np.broadcast(a, b).shape if output_shape != np.array(result).shape: result = np.broadcast_to(result, output_shape) return result
def tile_array(a, b0, b1): r, c = a.shape # number of rows/columns rs, cs = a.strides # row/column strides x = as_strided(a, (r, b0, c, b1), (rs, 0, cs, 0)) # view a as larger 4D array return x.reshape(r * b0, c * b1)
def Iota(dim: Dim, offset=0, floattype=dace.float64): arr = numpy.arange(offset, offset + dim.total_size).astype(floattype.type) byte_strides = [s * floattype.bytes for s in dim.strides] return as_strided(arr, shape=dim.shape, strides=byte_strides)
def Zeros(dim: Dim, floattype=dace.float64): arr = numpy.zeros(dim.total_size, dtype=floattype.type) byte_strides = [s * floattype.bytes for s in dim.strides] return as_strided(arr, shape=dim.shape, strides=byte_strides)
def _rolling_block(A, block=(3, 3)): """Applies sliding window to given matrix.""" shape = (A.shape[0] - block[0] + 1, A.shape[1] - block[1] + 1) + block strides = (A.strides[0], A.strides[1]) + A.strides return as_strided(A, shape=shape, strides=strides)
def prepare_overlap_sequences(ms, vs, bk, l_size, o_lap, bsize): """ Method to prepare overlapping sequences of the given magnitude spectra. Args: ms : (2D Array) Mixture magnitude spectra (Time frames times Frequency sub-bands). vs : (2D Array) Singing voice magnitude spectra (Time frames times Frequency sub-bands). bk : (2D Array) Background magnitude spectra (Time frames times Frequency sub-bands). l_size : (int) Length of the time-sequence. o_lap : (int) Overlap between spectrogram time-sequences (to recover the missing information from the context information). bsize : (int) Batch size. Returns: ms : (3D Array) Mixture magnitude spectra training data reshaped into overlapping sequences. vs : (3D Array) Singing voice magnitude spectra training data reshaped into overlapping sequences. bk : (3D Array) Background magnitude spectra training data reshaped into overlapping sequences. """ trim_frame = ms.shape[0] % (l_size - o_lap) trim_frame -= (l_size - o_lap) trim_frame = np.abs(trim_frame) # Zero-padding if trim_frame != 0: ms = np.pad(ms, ((0, trim_frame), (0, 0)), 'constant', constant_values=(0, 0)) vs = np.pad(vs, ((0, trim_frame), (0, 0)), 'constant', constant_values=(0, 0)) bk = np.pad(bk, ((0, trim_frame), (0, 0)), 'constant', constant_values=(0, 0)) # Reshaping with overlap ms = stride_tricks.as_strided(ms, shape=(ms.shape[0] / (l_size - o_lap), l_size, ms.shape[1]), strides=(ms.strides[0] * (l_size - o_lap), ms.strides[0], ms.strides[1])) ms = ms[:-1, :, :] vs = stride_tricks.as_strided(vs, shape=(vs.shape[0] / (l_size - o_lap), l_size, vs.shape[1]), strides=(vs.strides[0] * (l_size - o_lap), vs.strides[0], vs.strides[1])) vs = vs[:-1, :, :] bk = stride_tricks.as_strided(bk, shape=(bk.shape[0] / (l_size - o_lap), l_size, bk.shape[1]), strides=(bk.strides[0] * (l_size - o_lap), bk.strides[0], bk.strides[1])) bk = bk[:-1, :, :] b_trim_frame = (ms.shape[0] % bsize) if b_trim_frame != 0: ms = ms[:-b_trim_frame, :, :] vs = vs[:-b_trim_frame, :, :] bk = bk[:-b_trim_frame, :, :] return ms, vs, bk
def binarize(input_files, filenames_no_extensions, ngram=None): """ By "binarize" we mean mapping from strings to indices :param input_files: :param filenames_no_extensions: :param ngram: :return: """ if ngram is not None: assert numpy.iinfo(numpy.uint16).max > len(vocab) ngrams = numpy.empty( (sum(combined_counter.values()) + sum(sentence_counts), ngram), dtype='uint16') binarized_corpora = [] total_ngram_count = 0 for input_file, base_filename, sentence_count in \ zip(input_files, filenames_no_extensions, sentence_counts): input_filename = os.path.basename(input_file.name) logger.info("Binarizing %s." % (input_filename)) binarized_corpus = [] ngram_count = 0 for sentence_count, sentence in enumerate(input_file): if args.lowercase: sentence = sentence.lower() if args.char: words = list(sentence.strip().decode('utf-8')) else: words = sentence.strip().split(' ') binarized_sentence = [vocab.get(word, 1) for word in words] binarized_corpus.append(binarized_sentence) if args.ngram: padded_sentence = numpy.asarray([0] * (args.ngram - 1) + binarized_sentence + [0]) ngrams[total_ngram_count + ngram_count: total_ngram_count + ngram_count + len(words) + 1] = \ as_strided( padded_sentence, shape=(len(words) + 1, args.ngram), strides=(padded_sentence.itemsize, padded_sentence.itemsize) ) ngram_count += len(words) + 1 # end for sentence in input_file # Output if args.each: if args.pickle: safe_pickle(binarized_corpus, base_filename + '.pkl') if args.ngram and args.split: if args.split >= 1: rows = int(args.split) else: rows = int(ngram_count * args.split) logger.info("Saving training set (%d samples) and validation " "set (%d samples)." % (ngram_count - rows, rows)) rows = numpy.random.choice(ngram_count, rows, replace=False) safe_hdf(ngrams[total_ngram_count + rows], base_filename + '_valid') safe_hdf( ngrams[total_ngram_count + numpy.setdiff1d( numpy.arange(ngram_count), rows, True)], base_filename + '_train') elif args.ngram: logger.info("Saving n-grams to %s." % (base_filename + '.hdf')) safe_hdf(ngrams, base_filename) binarized_corpora += binarized_corpus total_ngram_count += ngram_count input_file.seek(0) # endfor input_file in args.input if args.pickle: safe_pickle(binarized_corpora, args.binarized_text) if args.ngram and args.split: if args.split >= 1: rows = int(args.split) else: rows = int(total_ngram_count * args.split) logger.info("Saving training set (%d samples) and validation set (%d " "samples)." % (total_ngram_count - rows, rows)) rows = numpy.random.choice(total_ngram_count, rows, replace=False) safe_hdf(ngrams[rows], 'combined_valid') safe_hdf( ngrams[numpy.setdiff1d(numpy.arange(total_ngram_count), rows, True)], 'combined_train') elif args.ngram: safe_hdf(ngrams, 'combined')
def generate_training_data_lstm(dataset, train_cv_test_split=train_cv_test_split, cleanse=False): """ From Pandas DataFrame (timestep, watthour) generate all valid training examples and split respectivly; X and y are scaled to model scale. """ # load raw data, df for dataframe ds = dataset.copy(deep=True) ds.watthour = np.nan_to_num(ds.watthour.values) # 5mins load (288 loads for each day) scaling_factor = np.max(np.array(ds.watthour)) nb_forecast_steps = int( dt.timedelta(minutes=forecast_horizon_mins).total_seconds() / granularity_s) nb_examples = len(np.array( ds.watthour)) - nb_forecast_steps - sliding_window_width #history_offset = sliding_window_width + nb_forecast_steps - 1 #lagged_vals = np.array(list_5mins_load[-history_offset:]) s = np.array(ds.watthour).itemsize lagged_vals = as_strided(np.array(ds.watthour), shape=(nb_examples, sliding_window_width), strides=(s, s)) if sliding_window_width != 0: t0 = ds.index[sliding_window_width - 1:-nb_forecast_steps - 1] else: t0 = ds.index[:-nb_forecast_steps] if forecast_type == 'watthours': s = np.array(ds.watthour).itemsize watthour_intervals = as_strided(np.array( ds.watthour)[sliding_window_width:], shape=(nb_examples, nb_forecast_steps), strides=(s, s)) # print "fc_hor", watthour_intervals[2] # print "last_elem_of_sum", watthour_intervals[:,-1] # print "P_t1", ds.loc[t0+pd.Timedelta(minutes=self.forecast_horizon_mins)].watthour.values mask = np.unique( np.where(watthour_intervals != 0.0)[0] ) # np.where returns indices for nonzero values as [xi][yi]; take only unique row indices #?????找出watthour_intervals中不是0的数的行数 ground_truth = np.sum( watthour_intervals, axis=-1 ) # integrate watthour over forecast horizon to get total energy in Wh #????? # print y.shape elif forecast_type == 'watts': ground_truth = ds.watthour.values[sliding_window_width + nb_forecast_steps - 1:-1] #mask = np.array(np.where(y != 0.0)).reshape((-1)) # print 'gt', ground_truth # print "P_t1", ds.loc[t0+pd.Timedelta(minutes=self.forecast_horizon_mins)].watthour.values else: print( 'Unsupported forecast type. Please define forecast type as either \'watts\' or \'watthours\'.' ) # print(">>ground truth", ground_truth) # ground truth real values ground_truth = ground_truth.reshape(-1, 1) ground_truth = (ground_truth / scaling_factor) #????????为什么这里ground_truth没有大于1的?----因为前面*granularity_s/3600,另外这里跟源代码不一样 # generate_input_data X = generate_input_data(lagged_vals, t0, scaling_factor) # y is an vector with normalized energy consumption within the time interval y = generate_output_data(ground_truth) # print(">>ground truth", ground_truth) # print(">>output", y) if cleanse: ground_truth = ground_truth[mask] y = y[mask] # cleansing the data leads to extreme performance drop X = X[mask, :] t0 = t0[mask] val_idx = int(len(y) * train_cv_test_split[0]) test_idx = int(len(y) * (train_cv_test_split[0] + train_cv_test_split[1])) y_train = y[0:val_idx] X_train = X[0:val_idx, :] ground_truth_train = ground_truth[0:val_idx] t0_train = t0[0:val_idx] y_val = y[val_idx:test_idx] t0_val = t0[val_idx:test_idx] ground_truth_val = ground_truth[val_idx:test_idx] X_val = X[val_idx:test_idx, :] y_test = y[test_idx:] t0_test = t0[test_idx:] ground_truth_test = ground_truth[test_idx:] X_test = X[test_idx:, :] return (X_train, y_train, ground_truth_train, t0_train), \ (X_val, y_val, ground_truth_val, t0_val), \ (X_test, y_test, ground_truth_test, t0_test), \ scaling_factor
def create_f0_analysis( frames, samplerate, window_size=512, overlapFac=0.5, threshold=0.0, m0=None, M=None, ): """ Generate F0 contour analysis. Calculate the frequency and harmonic ratio values of windowed segments of the audio file and save to disk. """ if hasattr(frames, '__call__'): frames = frames() if not M: M = int(round(0.016 * samplerate)) hopSize = int(window_size - np.floor(overlapFac * window_size)) # zeros at beginning (thus center of 1st window should be for sample nr. 0) samples = frames #samples = np.concatenate((np.zeros(np.floor(window_size/2.0)), frames)) # cols for windowing cols = np.ceil((len(samples) - window_size) / float(hopSize)) + 1 # zeros at end (thus samples can be fully covered by frames) samples = np.concatenate((samples, np.zeros(window_size))) frames = stride_tricks.as_strided( samples, shape=(cols, window_size), strides=(samples.strides[0] * hopSize, samples.strides[0])).copy() # TODO: Replace this with zero crossing object. def feature_zcr(window): window2 = np.zeros(window.size) window2[1:-1] = window[0:-2] Z = (1 / (2 * window.size)) * np.sum( np.abs(np.sign(window) - np.sign(window2))) return Z def parabolic(f, x): """ Quadratic interpolation for estimating the true position of an inter-sample maximum when nearby samples are known. f is a vector and x is an index for that vector. Returns (vx, vy), the coordinates of the vertex of a parabola that goes through point x and its two neighbors. Example: Defining a vector f with a local maximum at index 3 (= 6), find local maximum if points 2, 3, and 4 actually defined a parabola. In [3]: f = [2, 3, 1, 6, 4, 2, 3, 1] In [4]: parabolic(f, argmax(f)) Out[4]: (3.2142857142857144, 6.1607142857142856) Ref: https://gist.github.com/endolith/255291 """ if x >= f.size - 1 or x <= 2: return x, f[x] xv = 1 / 2. * (f[x - 1] - f[x + 1]) / (f[x - 1] - 2 * f[x] + f[x + 1]) + x yv = f[x] - 1 / 4. * (f[x - 1] - f[x + 1]) * (xv - x) return (xv, yv) def per_frame_f0(frames, m0, M): if not frames.any(): HR = np.nan f0 = np.nan return f0, HR #R=autocorr([frames])[0] R = np.correlate(frames, frames, mode='full') g = R[frames.size] R = R[frames.size - 1:] if not m0: # estimate m0 (as the first zero crossing of R) m0 = np.argmin(np.diff(np.sign(R[1:]))) + 1 if m0 == 1: m0 = R.size if M > R.size: M = R.size Gamma = np.zeros(M) CSum = np.cumsum(frames * frames) with warnings.catch_warnings(): warnings.filterwarnings('error') try: Gamma[m0:M] = R[m0:M] / (np.sqrt([g * CSum[-m0:-M:-1]]) + np.finfo(float).eps) except Warning: pass # compute T0 and harmonic ratio: if np.isnan(Gamma).any(): HR = np.nan f0 = np.nan else: blag = np.argmax(Gamma) HR = Gamma[blag] interp, HR = parabolic(Gamma, blag) if not interp: f0 = np.nan HR = np.nan else: # get fundamental frequency: f0 = samplerate / interp if f0 > samplerate / 2: raise ValueError("F0 value ({0}) is above the nyquist rate " "({1}). This shouldn't happen...".format( f0, samplerate / 2)) if HR >= 1: HR = 1 return (f0, HR) output = np.apply_along_axis(per_frame_f0, 1, frames, m0, M) # output = np.empty((frames.shape[0], 2)) # for ind, i in enumerate(frames): # output[ind] = per_frame_f0(i, m0, M) return output
def get_activation(audio, sr, model_capacity='full', center=True, step_size=10, verbose=1): """ Parameters ---------- audio : np.ndarray [shape=(N,) or (N, C)] The audio samples. Multichannel audio will be downmixed. sr : int Sample rate of the audio samples. The audio will be resampled if the sample rate is not 16 kHz, which is expected by the model. model_capacity : 'tiny', 'small', 'medium', 'large', or 'full' String specifying the model capacity; see the docstring of :func:`~crepe.core.build_and_load_model` center : boolean - If `True` (default), the signal `audio` is padded so that frame `D[:, t]` is centered at `audio[t * hop_length]`. - If `False`, then `D[:, t]` begins at `audio[t * hop_length]` step_size : int The step size in milliseconds for running pitch estimation. verbose : int Set the keras verbosity mode: 1 (default) will print out a progress bar during prediction, 0 will suppress all non-error printouts. Returns ------- activation : np.ndarray [shape=(T, 360)] The raw activation matrix """ model = build_and_load_model(model_capacity) if len(audio.shape) == 2: audio = audio.mean(1) # make mono audio = audio.astype(np.float32) if sr != model_srate: # resample audio if necessary from resampy import resample audio = resample(audio, sr, model_srate) # pad so that frames are centered around their timestamps (i.e. first frame # is zero centered). if center: audio = np.pad(audio, 512, mode='constant', constant_values=0) # make 1024-sample frames of the audio with hop length of 10 milliseconds hop_length = int(model_srate * step_size / 1000) n_frames = 1 + int((len(audio) - 1024) / hop_length) frames = as_strided(audio, shape=(1024, n_frames), strides=(audio.itemsize, hop_length * audio.itemsize)) frames = frames.transpose() # normalize each frame -- this is expected by the model frames -= np.mean(frames, axis=1)[:, np.newaxis] frames /= np.std(frames, axis=1)[:, np.newaxis] # run prediction and convert the frequency bin weights to Hz return model.predict(frames, verbose=verbose)
def demosaic(self): """ Perform a rudimentary `de-mosaic`_ of ``self.array``, returning the result as a new array. The result of the demosaic is *always* three dimensional, with the last dimension being the color planes (see *output_dims* parameter on the constructor). .. _de-mosaic: https://en.wikipedia.org/wiki/Demosaicing """ if self._demo is None: # Construct 3D representation of Bayer data (if necessary) if self.output_dims == 2: array_3d = self._to_3d(self.array) else: array_3d = self.array # Construct representation of the bayer pattern bayer = np.zeros(array_3d.shape, dtype=np.uint8) ((ry, rx), (gy, gx), (Gy, Gx), (by, bx)) = PiBayerArray.BAYER_OFFSETS[self._header.bayer_order] bayer[ry::2, rx::2, 0] = 1 # Red bayer[gy::2, gx::2, 1] = 1 # Green bayer[Gy::2, Gx::2, 1] = 1 # Green bayer[by::2, bx::2, 2] = 1 # Blue # Allocate output array with same shape as data and set up some # constants to represent the weighted average window window = (3, 3) borders = (window[0] - 1, window[1] - 1) border = (borders[0] // 2, borders[1] // 2) # Pad out the data and the bayer pattern (np.pad is faster but # unavailable on the version of numpy shipped with Raspbian at the # time of writing) rgb = np.zeros((array_3d.shape[0] + borders[0], array_3d.shape[1] + borders[1], array_3d.shape[2]), dtype=array_3d.dtype) rgb[border[0]:rgb.shape[0] - border[0], border[1]:rgb.shape[1] - border[1], :] = array_3d bayer_pad = np.zeros( (array_3d.shape[0] + borders[0], array_3d.shape[1] + borders[1], array_3d.shape[2]), dtype=bayer.dtype) bayer_pad[border[0]:bayer_pad.shape[0] - border[0], border[1]:bayer_pad.shape[1] - border[1], :] = bayer bayer = bayer_pad # For each plane in the RGB data, construct a view over the plane # of 3x3 matrices. Then do the same for the bayer array and use # Einstein summation to get the weighted average self._demo = np.empty(array_3d.shape, dtype=array_3d.dtype) for plane in range(3): p = rgb[..., plane] b = bayer[..., plane] pview = as_strided( p, shape=(p.shape[0] - borders[0], p.shape[1] - borders[1]) + window, strides=p.strides * 2) bview = as_strided( b, shape=(b.shape[0] - borders[0], b.shape[1] - borders[1]) + window, strides=b.strides * 2) psum = np.einsum('ijkl->ij', pview) bsum = np.einsum('ijkl->ij', bview) self._demo[..., plane] = psum // bsum return self._demo
def strided_app(a, L, S): nrows = ((len(a) - L) // S) + 1 n = a.strides[0] return as_strided(a, shape=(nrows, L), strides=(S * n, n))
def grid_patches(img, patch_size=None, step_size=None, n_patches=None, return_loc=False, scale=False): """ extract a grid of (overlapping) patches from an image as a 2D matrix of shape (n_rows*n_cols,n_patches) """ from numpy.lib.stride_tricks import as_strided if not isinstance(img, (np.ndarray, np.core.memmap)): img = np.array(img) n_req_patches = n_patches # Check and get image dimensions if img.ndim == 3: (Ih, Iw, Ic) = img.shape patch_shape = (patch_size, patch_size, Ic) elif img.ndim == 2: (Ih, Iw) = img.shape img = img.reshape((Ih, Iw, -1)) Ic = 1 patch_shape = (patch_size, patch_size, Ic) else: raise ValueError('image must be a 2D or 3D np.array') if n_req_patches is not None: step_size = 1 slices = [ slice(None, None, step_size), slice(None, None, step_size), slice(None, None, step_size) ] n_patches_h, n_patches_w = compute_n_patches(Ih, Iw, patch_size, step_size, padding=False) n_patches = n_patches_h * n_patches_w patch_strides = img.strides indexing_strides = img[slices].strides #patch_indices_shape = np.array([n_patches_h,n_patches_w]) patch_indices_shape = ((np.array(img.shape) - np.array(patch_shape)) // np.array(step_size)) + 1 shape = tuple(list(patch_indices_shape) + list(patch_shape)) strides = tuple(list(indexing_strides) + list(patch_strides)) patches = as_strided(img, shape=shape, strides=strides) if Ic == 1: patches = patches.reshape( (n_patches_h, n_patches_w, patch_size * patch_size)) patches = patches.reshape((n_patches_h * n_patches_w, -1)).T else: patches = patches.reshape( (n_patches_h, n_patches_w, patch_size * patch_size * Ic)) patches = patches.reshape((n_patches_h * n_patches_w, -1)).T if n_req_patches is not None: if n_req_patches < n_patches: if Ic <= 3: mean_intensity = np.mean(patches, axis=0) good_patches = np.arange(n_patches) patch_idxs = np.random.choice(good_patches, n_req_patches, replace=False) else: patch_idxs = np.random.choice(np.arange(n_patches), n_req_patches, replace=False) patches = patches[:, patch_idxs] return patches
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False, random_state=None): """Update the dense dictionary factor in place. Parameters ---------- dictionary: array of shape (n_features, n_components) Value of the dictionary at the previous iteration. Y: array of shape (n_features, n_samples) Data matrix. code: array of shape (n_components, n_samples) Sparse coding of the data against which to optimize the dictionary. verbose: Degree of output the procedure will print. return_r2: bool Whether to compute and return the residual sum of squares corresponding to the computed solution. random_state: int or RandomState Pseudo number generator state used for random sampling. Returns ------- dictionary: array of shape (n_features, n_components) Updated dictionary. """ n_components = len(code) n_samples = Y.shape[0] random_state = check_random_state(random_state) # Residuals, computed 'in-place' for efficiency R = -np.dot(dictionary, code) R += Y R = np.asfortranarray(R) ger, = linalg.get_blas_funcs(('ger', ), (dictionary, code)) for k in range(n_components): # R <- 1.0 * U_k * V_k^T + R R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True) dictionary[:, k] = np.dot(R, code[k, :].T) # Scale k'th atom atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k]) if atom_norm_square < 1e-20: if verbose == 1: sys.stdout.write("+") sys.stdout.flush() elif verbose: print("Adding new random atom") dictionary[:, k] = random_state.randn(n_samples) # Setting corresponding coefs to 0 code[k, :] = 0.0 dictionary[:, k] /= sqrt(np.dot(dictionary[:, k], dictionary[:, k])) else: dictionary[:, k] /= sqrt(atom_norm_square) # R <- -1.0 * U_k * V_k^T + R R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True) if return_r2: R **= 2 # R is fortran-ordered. For numpy version < 1.6, sum does not # follow the quick striding first, and is thus inefficient on # fortran ordered data. We take a flat view of the data with no # striding R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize, )) R = np.sum(R) return dictionary, R return dictionary
def toeplitz(c, r=None): """ Construct a Toeplitz matrix. The Toeplitz matrix has constant diagonals, with c as its first column and r as its first row. If r is not given, ``r == conjugate(c)`` is assumed. Parameters ---------- c : array_like First column of the matrix. Whatever the actual shape of `c`, it will be converted to a 1-D array. r : array_like, optional First row of the matrix. If None, ``r = conjugate(c)`` is assumed; in this case, if c[0] is real, the result is a Hermitian matrix. r[0] is ignored; the first row of the returned matrix is ``[c[0], r[1:]]``. Whatever the actual shape of `r`, it will be converted to a 1-D array. Returns ------- A : (len(c), len(r)) ndarray The Toeplitz matrix. Dtype is the same as ``(c[0] + r[0]).dtype``. See Also -------- circulant : circulant matrix hankel : Hankel matrix solve_toeplitz : Solve a Toeplitz system. Notes ----- The behavior when `c` or `r` is a scalar, or when `c` is complex and `r` is None, was changed in version 0.8.0. The behavior in previous versions was undocumented and is no longer supported. Examples -------- >>> from scipy.linalg import toeplitz >>> toeplitz([1,2,3], [1,4,5,6]) array([[1, 4, 5, 6], [2, 1, 4, 5], [3, 2, 1, 4]]) >>> toeplitz([1.0, 2+3j, 4-1j]) array([[ 1.+0.j, 2.-3.j, 4.+1.j], [ 2.+3.j, 1.+0.j, 2.-3.j], [ 4.-1.j, 2.+3.j, 1.+0.j]]) """ c = np.asarray(c).ravel() if r is None: r = c.conjugate() else: r = np.asarray(r).ravel() # Form a 1-D array containing a reversed c followed by r[1:] that could be # strided to give us toeplitz matrix. vals = np.concatenate((c[::-1], r[1:])) out_shp = len(c), len(r) n = vals.strides[0] return as_strided(vals[len(c) - 1:], shape=out_shp, strides=(-n, n)).copy()
def view_as_windows(arr_in, window_shape): """Rolling window view of the input n-dimensional array. Windows are overlapping views of the input array, with adjacent windows shifted by a single row or column (or an index of a higher dimension). Parameters ---------- arr_in: ndarray The n-dimensional input array. window_shape: tuple Defines the shape of the elementary n-dimensional orthotope (better know as hyperrectangle [1]_) of the rolling window view. Returns ------- arr_out: ndarray (rolling) window view of the input array. Notes ----- One should be very careful with rolling views when it comes to memory usage. Indeed, although a 'view' has the same memory footprint as its base array, the actual array that emerges when this 'view' is used in a computation is generally a (much) larger array than the original, especially for 2-dimensional arrays and above. For example, let us consider a 3 dimensional array of size (100, 100, 100) of ``float64``. This array takes about 8*100**3 Bytes for storage which is just 8 MB. If one decides to build a rolling view on this array with a window of (3, 3, 3) the hypothetical size of the rolling view (if one was to reshape the view for example) would be 8*(100-3+1)**3*3**3 which is about 203 MB! The scaling becomes even worse as the dimension of the input array becomes larger. References ---------- .. [1] http://en.wikipedia.org/wiki/Hyperrectangle Examples -------- >>> import numpy as np >>> from skimage.util.shape import view_as_windows >>> A = np.arange(4*4).reshape(4,4) >>> A array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) >>> window_shape = (2, 2) >>> B = view_as_windows(A, window_shape) >>> B[0, 0] array([[0, 1], [4, 5]]) >>> B[0, 1] array([[1, 2], [5, 6]]) >>> A = np.arange(10) >>> A array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> window_shape = (3,) >>> B = view_as_windows(A, window_shape) >>> B.shape (8, 3) >>> B array([[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9]]) >>> A = np.arange(5*4).reshape(5, 4) >>> A array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15], [16, 17, 18, 19]]) >>> window_shape = (4, 3) >>> B = view_as_windows(A, window_shape) >>> B.shape (2, 2, 4, 3) >>> B # doctest: +NORMALIZE_WHITESPACE array([[[[ 0, 1, 2], [ 4, 5, 6], [ 8, 9, 10], [12, 13, 14]], [[ 1, 2, 3], [ 5, 6, 7], [ 9, 10, 11], [13, 14, 15]]], [[[ 4, 5, 6], [ 8, 9, 10], [12, 13, 14], [16, 17, 18]], [[ 5, 6, 7], [ 9, 10, 11], [13, 14, 15], [17, 18, 19]]]]) """ # -- basic checks on arguments if not isinstance(arr_in, np.ndarray): raise TypeError("'arr_in' must be a numpy ndarray") if not isinstance(window_shape, tuple): raise TypeError("'window_shape' must be a tuple") if not (len(window_shape) == arr_in.ndim): raise ValueError("'window_shape' is incompatible with 'arr_in.shape'") arr_shape = np.array(arr_in.shape) window_shape = np.array(window_shape, dtype=arr_shape.dtype) if ((arr_shape - window_shape) < 0).any(): raise ValueError("'window_shape' is too large") if ((window_shape - 1) < 0).any(): raise ValueError("'window_shape' is too small") # -- build rolling window view arr_in = np.ascontiguousarray(arr_in) new_shape = tuple(arr_shape - window_shape + 1) + tuple(window_shape) new_strides = arr_in.strides + arr_in.strides arr_out = as_strided(arr_in, shape=new_shape, strides=new_strides) return arr_out
# Z_stop = (P+Rs//2)+Rs%2 # R_start = (R_start - np.minimum(Z_start,0)).tolist() # Z_start = (np.maximum(Z_start,0)).tolist() # R_stop = np.maximum(R_start, (R_stop - np.maximum(Z_stop-Zs,0))).tolist() # Z_stop = (np.minimum(Z_stop,Zs)).tolist() # r = [slice(start,stop) for start,stop in zip(R_start,R_stop)] # z = [slice(start,stop) for start,stop in zip(Z_start,Z_stop)] # R[r] = Z[z] # print (Z) # print (R) # 81. 考虑一个数组Z = [1,2,3,4,5,6,7,8,9,10,11,12,13,14],如何生成一个数组R = [[1,2,3,4], [2,3,4,5], [3,4,5,6], ...,[11,12,13,14]]? x81 = np.arange(1, 15, dtype=np.uint32) x81_R = stride_tricks.as_strided(x81, (11, 4), (4, 4)) print(x81_R) # 82. 计算一个矩阵的秩 x82_Z = np.arange(16).reshape((4, 4)) x82_Z -= 1 print(x82_Z) U, S, V = np.linalg.svd(x82_Z) rank = np.sum(S > 1e-10) print(rank) # 83. 如何找到一个数组中出现频率最高的值? x83_Z = np.random.randint(0, 10, 50) print(np.bincount(x83_Z).argmax()) # 84. 从一个10x10的矩阵中提取出连续的3x3区块
def sliding_window(data, sf, window, step=None, axis=-1): """Calculate a sliding window of a 1D or 2D EEG signal. .. versionadded:: 0.1.7 Parameters ---------- data : numpy array The 1D or 2D EEG data. sf : float The sampling frequency of ``data``. window : int The sliding window length, in seconds. step : int The sliding window step length, in seconds. If None (default), ``step`` is set to ``window``, which results in no overlap between the sliding windows. axis : int The axis to slide over. Defaults to the last axis. Returns ------- times : numpy array Time vector, in seconds, corresponding to the START of each sliding epoch in ``strided``. strided : numpy array A matrix where row in last dimension consists of one instance of the sliding window, shape (n_epochs, ..., n_samples). Notes ----- This is a wrapper around the :py:func:`numpy.lib.stride_tricks.as_strided` function. Examples -------- With a 1-D array >>> import numpy as np >>> from yasa import sliding_window >>> data = np.arange(20) >>> times, epochs = sliding_window(data, sf=1, window=5) >>> times array([ 0., 5., 10., 15.]) >>> epochs array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19]]) >>> sliding_window(data, sf=1, window=5, step=1)[1] array([[ 0, 1, 2, 3, 4], [ 2, 3, 4, 5, 6], [ 4, 5, 6, 7, 8], [ 6, 7, 8, 9, 10], [ 8, 9, 10, 11, 12], [10, 11, 12, 13, 14], [12, 13, 14, 15, 16], [14, 15, 16, 17, 18]]) >>> sliding_window(data, sf=1, window=11)[1] array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]) With a N-D array >>> np.random.seed(42) >>> # 4 channels x 20 samples >>> data = np.random.randint(-100, 100, size=(4, 20)) >>> epochs = sliding_window(data, sf=1, window=10)[1] >>> epochs.shape # shape (n_epochs, n_channels, n_samples) (2, 4, 10) >>> epochs array([[[ 2, 79, -8, -86, 6, -29, 88, -80, 2, 21], [-13, 57, -63, 29, 91, 87, -80, 60, -43, -79], [-50, 7, -46, -37, 30, -50, 34, -80, -28, 66], [ -9, 10, 87, 98, 71, -93, 74, -66, -20, 63]], [[-26, -13, 16, -1, 3, 51, 30, 49, -48, -99], [-12, -52, -42, 69, 87, -86, 89, 89, 74, 89], [-83, 31, -12, -41, -87, -92, -11, -48, 29, -17], [-51, 3, 31, -99, 33, -47, 5, -97, -47, 90]]]) """ from numpy.lib.stride_tricks import as_strided assert axis <= data.ndim, "Axis value out of range." assert isinstance(sf, (int, float)), 'sf must be int or float' assert isinstance(window, (int, float)), 'window must be int or float' assert isinstance(step, (int, float, type(None))), ('step must be int, ' 'float or None.') if isinstance(sf, float): assert sf.is_integer(), 'sf must be a whole number.' sf = int(sf) assert isinstance(axis, int), 'axis must be int.' # window and step in samples instead of points window *= sf step = window if step is None else step * sf if isinstance(window, float): assert window.is_integer(), 'window * sf must be a whole number.' window = int(window) if isinstance(step, float): assert step.is_integer(), 'step * sf must be a whole number.' step = int(step) assert step >= 1, "Stepsize may not be zero or negative." assert window < data.shape[axis], ("Sliding window size may not exceed " "size of selected axis") # Define output shape shape = list(data.shape) shape[axis] = np.floor(data.shape[axis] / step - window / step + 1).astype(int) shape.append(window) # Calculate strides and time vector strides = list(data.strides) strides[axis] *= step strides.append(data.strides[axis]) strided = as_strided(data, shape=shape, strides=strides) t = np.arange(strided.shape[-2]) * (step / sf) # Swap axis: n_epochs, ..., n_samples if strided.ndim > 2: strided = np.rollaxis(strided, -2, 0) return t, strided
def rag_boundary(labels, edge_map, connectivity=2): """ Comouter RAG based on region boundaries Given an image's initial segmentation and its edge map this method constructs the corresponding Region Adjacency Graph (RAG). Each node in the RAG represents a set of pixels within the image with the same label in `labels`. The weight between two adjacent regions is the average value in `edge_map` along their boundary. labels : ndarray The labelled image. edge_map : ndarray This should have the same shape as that of `labels`. For all pixels along the boundary between 2 adjacent regions, the average value of the corresponding pixels in `edge_map` is the edge weight between them. connectivity : int, optional Pixels with a squared distance less than `connectivity` from each other are considered adjacent. It can range from 1 to `labels.ndim`. Its behavior is the same as `connectivity` parameter in `scipy.ndimage.filters.generate_binary_structure`. Examples -------- >>> from skimage import data, segmentation, filters, color >>> from skimage.future import graph >>> img = data.chelsea() >>> labels = segmentation.slic(img) >>> edge_map = filters.sobel(color.rgb2gray(img)) >>> rag = graph.rag_boundary(labels, edge_map) """ conn = ndi.generate_binary_structure(labels.ndim, connectivity) eroded = ndi.grey_erosion(labels, footprint=conn) dilated = ndi.grey_dilation(labels, footprint=conn) boundaries0 = (eroded != labels) boundaries1 = (dilated != labels) labels_small = np.concatenate((eroded[boundaries0], labels[boundaries1])) labels_large = np.concatenate((labels[boundaries0], dilated[boundaries1])) n = np.max(labels_large) + 1 # use a dummy broadcast array as data for RAG ones = as_strided(np.ones((1,), dtype=np.float), shape=labels_small.shape, strides=(0,)) count_matrix = sparse.coo_matrix((ones, (labels_small, labels_large)), dtype=np.int_, shape=(n, n)).tocsr() data = np.concatenate((edge_map[boundaries0], edge_map[boundaries1])) data_coo = sparse.coo_matrix((data, (labels_small, labels_large))) graph_matrix = data_coo.tocsr() graph_matrix.data /= count_matrix.data rag = RAG() rag.add_weighted_edges_from(_edge_generator_from_csr(graph_matrix), weight='weight') rag.add_weighted_edges_from(_edge_generator_from_csr(count_matrix), weight='count') for n in rag.nodes(): rag.node[n].update({'labels': [n]}) return rag
def stride_matrix(vector, n_lin, n_col, hop): data_matrix = stride_tricks.as_strided(vector, shape=(n_lin, n_col), strides=(vector.strides[0]*hop, vector.strides[0])) return data_matrix
np.negative(Z, out=Z) #Посчитать ранг матрицы Z = np.random.uniform(0,1,(10,10)) rank = np.linalg.matrix_rank(Z) #Найти наиболее частое значение в массиве Z = np.random.randint(0,10,50) print(np.bincount(Z).argmax(),'\n') #Извлечь все смежные 3x3 блоки из 10x10 матрицы Z = np.random.randint(0,5,(10,10)) n = 3 i = 1 + (Z.shape[0] - n) j = 1 + (Z.shape[1] - n) C = stride_tricks.as_strided(Z, shape=(i, j, n, n), strides=Z.strides + Z.strides) print(C,'\n') #Рассмотрим множество матриц (n,n) и множество из p векторов (n,1). Посчитать сумму p произведений матриц (результат имеет размерность (n,1)) p, n = 10, 20 M = np.ones((p,n,n)) V = np.ones((p,n,1)) S = np.tensordot(M, V, axes=[[0, 2], [0, 1]]) print(S,'\n') #Дан массив 16x16, посчитать сумму по блокам 4x4 Z = np.ones((16,16)) k = 4 S = np.add.reduceat(np.add.reduceat(Z, np.arange(0, Z.shape[0], k), axis=0), np.arange(0, Z.shape[1], k), axis=1)
def frame(y, frame_length=2048, hop_length=512): '''Slice a time series into overlapping frames. This implementation uses low-level stride manipulation to avoid redundant copies of the time series data. Parameters ---------- y : np.ndarray [shape=(n,)] Time series to frame. Must be one-dimensional and contiguous in memory. frame_length : int > 0 [scalar] Length of the frame in samples hop_length : int > 0 [scalar] Number of samples to hop between frames Returns ------- y_frames : np.ndarray [shape=(frame_length, N_FRAMES)] An array of frames sampled from `y`: `y_frames[i, j] == y[j * hop_length + i]` Raises ------ ParameterError If `y` is not contiguous in memory, framing is invalid. See `np.ascontiguous()` for details. If `hop_length < 1`, frames cannot advance. Examples -------- Extract 2048-sample frames from `y` with a hop of 64 samples per frame >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> librosa.util.frame(y, frame_length=2048, hop_length=64) array([[ -9.216e-06, 7.710e-06, ..., -2.117e-06, -4.362e-07], [ 2.518e-06, -6.294e-06, ..., -1.775e-05, -6.365e-06], ..., [ -7.429e-04, 5.173e-03, ..., 1.105e-05, -5.074e-06], [ 2.169e-03, 4.867e-03, ..., 3.666e-06, -5.571e-06]], dtype=float32) ''' if hop_length < 1: raise ParameterError('Invalid hop_length: {:d}'.format(hop_length)) if not y.flags['C_CONTIGUOUS']: raise ParameterError('Input buffer must be contiguous.') valid_audio(y) # Compute the number of frames that will fit. The end may get truncated. n_frames = 1 + int((len(y) - frame_length) / hop_length) if n_frames < 1: raise ParameterError('Buffer is too short (n={:d})' ' for frame_length={:d}'.format( len(y), frame_length)) # Vertical stride is one sample # Horizontal stride is `hop_length` samples y_frames = as_strided(y, shape=(frame_length, n_frames), strides=(y.itemsize, hop_length * y.itemsize)) return y_frames
def _create_grid(nrow, ncol): """ Create bounds for vtk rendering Parameters ---------- nrow : int or array-like Number of rows. If array-like, must be an array with values in ascending order between 0 and 1. ncol : int or array-like Number of columns. If array-like, must be an array with values in ascending order between 0 and 1. Returns ------- grid: ndarray, shape = (nrow, ncol, 4) Grid for vtk rendering. Examples -------- >>> _create_grid(1, 2) array([[[0. , 0. , 0.5, 1. ], [0.5, 0. , 1. , 1. ]]]) >>> _create_grid(1, [0, .5, 1]) array([[[0. , 0. , 0.5, 1. ], [0.5, 0. , 1. , 1. ]]]) >>> _create_grid(1, [0, .5, .9]) array([[[0. , 0. , 0.5, 1. ], [0.5, 0. , 0.9, 1. ]]]) >>> _create_grid(1, [0, .5, .9, 1]) array([[[0. , 0. , 0.5, 1. ], [0.5, 0. , 0.9, 1. ], [0.9, 0. , 1. , 1. ]]]) >>> _create_grid(2, [.5, .6, .7]) array([[[0.5, 0.5, 0.6, 1. ], [0.6, 0.5, 0.7, 1. ]], [[0.5, 0. , 0.6, 0.5], [0.6, 0. , 0.7, 0.5]]]) """ if not isinstance(nrow, int): nrow = np.atleast_1d(nrow) if nrow.size < 2 or np.any(np.sort(nrow) != nrow) or \ nrow[0] < 0 or nrow[-1] > 1: raise ValueError('Incorrect row values.') if not isinstance(ncol, int): ncol = np.atleast_1d(ncol) if ncol.size < 2 or np.any(np.sort(ncol) != ncol) or \ ncol[0] < 0 or ncol[-1] > 1: raise ValueError('Incorrect column values.') if isinstance(ncol, np.ndarray): x_min, x_max = ncol[:-1], ncol[1:] ncol = x_min.size else: dx = 1 / ncol x_min = np.arange(0, 1, dx) x_max = x_min + dx if isinstance(nrow, np.ndarray): y_min, y_max = nrow[:-1], nrow[1:] nrow = y_min.size else: dy = 1 / nrow y_min = np.arange(0, 1, dy) y_max = y_min + dy y_min = np.repeat(y_min, ncol)[::-1] y_max = np.repeat(y_max, ncol)[::-1] x_min = np.tile(x_min, nrow) x_max = np.tile(x_max, nrow) g = np.column_stack([x_min, y_min, x_max, y_max]) strides = (4 * g.itemsize * ncol, 4 * g.itemsize, g.itemsize) return as_strided(g, shape=(nrow, ncol, 4), strides=strides)
def firstpos(x, N=2): return as_strided(x, x.shape[:-N], x.strides[:-N])
def firstelement(x, N=2): return as_strided(x, x.shape[-N:], x.strides[-N:])
def toGrayVirtual(g): yres, xres= g.shape s0, s1 = g.strides G = as_strided(g,shape=(yres, xres, 3), strides=(s0,s1,0)) return G
def enframe(y, frame_length=2048, hop_length=512): n_frames = 1 + int((len(y) - frame_length) / hop_length) y_frames = as_strided(y, shape=(n_frames, frame_length), strides=(y.itemsize * hop_length, y.itemsize)) return y_frames