def mix2d(a): """ Calculate a DST-DCT-hybrid transform (DST in first direction, DCT in second one), jury-rigged from padded rFFT (anti-symmetrically in first direction, symmetrically in second direction). """ # NOTE: LCODE 3D uses x as the first direction, thus the confision below. M, N = a.shape # /(0 1 2 0)-2 -1 \ +----> x # / 1 2 \ | (0 3 4 0)-4 -3 | | (M) # | 3 4 | mixed-symmetrically | (0 5 6 0)-6 -5 | | # | 5 6 | padded to | (0 7 8 0)-8 -7 | v # \ 7 8 / | 0 +5 +6 0 -6 -5 | # \ 0 +3 +4 0 -4 -3 / y (N) p = cp.zeros((2 * M + 2, 2 * N - 2)) # wider than before p[1:M + 1, :N] = a p[M + 2:2 * M + 2, :N] = -cp.flipud(a) # flip to right on drawing above p[1:M + 1, N - 1:2 * N - 2] = cp.fliplr(a)[:, :-1] # flip down on drawing above p[M + 2:2 * M + 2, N - 1:2 * N - 2] = -cp.flipud(cp.fliplr(a))[:, :-1] # Note: the returned array is wider than the input array, it is padded # with zeroes (depicted above as a square region marked with round braces). return -cp.fft.rfft2(p)[:M + 2, :N].imag # FFT, cut a corner with 0s, -imag
def test_orientation(): orient = regionprops(SAMPLE)[0].orientation # determined with MATLAB assert_almost_equal(orient, -1.4663278802756865) # test diagonal regions diag = cp.eye(10, dtype=int) orient_diag = regionprops(diag)[0].orientation assert_almost_equal(orient_diag, -math.pi / 4) orient_diag = regionprops(cp.flipud(diag))[0].orientation assert_almost_equal(orient_diag, math.pi / 4) orient_diag = regionprops(cp.fliplr(diag))[0].orientation assert_almost_equal(orient_diag, math.pi / 4) orient_diag = regionprops(cp.fliplr(cp.flipud(diag)))[0].orientation assert_almost_equal(orient_diag, -math.pi / 4)
def dst2d(a): """ Calculate DST-Type1-2D, jury-rigged from anti-symmetrically-padded rFFT. """ assert a.shape[0] == a.shape[1] N = a.shape[0] # / 0 0 0 0 0 0 \ # 0 0 0 0 | 0 /1 2\ 0 -2 -1 | # 0 /1 2\ 0 anti-symmetrically | 0 \3 4/ 0 -4 -3 | # 0 \3 4/ 0 padded to | 0 0 0 0 0 0 | # 0 0 0 0 | 0 -3 -4 0 +4 +3 | # \ 0 -1 -2 0 +2 +1 / p = cp.zeros((2 * N + 2, 2 * N + 2)) p[1:N+1, 1:N+1], p[1:N+1, N+2:] = a, -cp.fliplr(a) p[N+2:, 1:N+1], p[N+2:, N+2:] = -cp.flipud(a), +cp.fliplr(cp.flipud(a)) # after padding: rFFT-2D, cut out the top-left segment, take -real part return -cp.fft.rfft2(p)[1:N+1, 1:N+1].real
def dct2d(a): """ Calculate DCT-Type1-2D, jury-rigged from symmetrically-padded rFFT. """ assert a.shape[0] == a.shape[1] N = a.shape[0] # //1 2 3 4\ 3 2 \ # /1 2 3 4\ | |5 6 7 8| 7 6 | # |5 6 7 8| symmetrically | |9 A B C| B A | # |9 A B C| padded to | \D E F G/ F E | # \D E F G/ | 9 A B C B A | # \ 5 6 7 8 7 6 / p = cp.zeros((2 * N - 2, 2 * N - 2)) p[:N, :N] = a p[N:, :N] = cp.flipud(a)[1:-1, :] # flip to right on drawing above p[:N, N:] = cp.fliplr(a)[:, 1:-1] # flip down on drawing above p[N:, N:] = cp.flipud(cp.fliplr(a))[1:-1, 1:-1] # bottom-right corner # after padding: rFFT-2D, cut out the top-left segment, take -real part return -cp.fft.rfft2(p)[:N, :N].real
def filter_traces(s, low_cutoff, high_cutoff, order=3, cmr=False, sample_chunk_size=65536, n_samples=-1): if n_samples == -1: n_samples = s.shape[1] sos = butter(order, [low_cutoff / 10000, high_cutoff / 10000], 'bandpass', output='sos') n_sample_chunks = n_samples / sample_chunk_size chunks = np.hstack( (np.arange(n_sample_chunks, dtype=int) * sample_chunk_size, n_samples)) output = np.empty((s.shape[0], n_samples)) overlap = sample_chunk_size chunk = np.zeros((s.shape[0], sample_chunk_size + overlap)) chunk[:, :overlap] = np.array([ s[:, 0], ] * overlap).transpose() for i in trange(len(chunks) - 1, ncols=100, position=0, leave=True): idx_from = chunks[i] idx_to = chunks[i + 1] chunk = chunk[:, :(idx_to - idx_from + overlap)] chunk[:, overlap:] = s[:, idx_from:idx_to] cusig = cupy.asarray(chunk, dtype=cupy.float32) cusig = cusig - cupy.mean(cusig) if cmr: cusig = cusig - cuda_median(cusig, 0) cusig = cusignal.sosfilt(sos, cusig) cusig = cupy.fliplr(cusig) cusig = cusignal.sosfilt(sos, cusig) cusig = cupy.fliplr(cusig) output[:, idx_from:idx_to] = cupy.asnumpy(cusig[:, overlap:]) chunk[:, :overlap] = chunk[:, -overlap:] return output
def convolve2dcp(image, kernel): # This function which takes an image and a kernel # and returns the convolution of them # Args: # image: a numpy array of size [image_height, image_width]. # kernel: a numpy array of size [kernel_height, kernel_width]. # Returns: # a numpy array of size [image_height, image_width] (convolution output). kernel = cp.flipud(cp.fliplr(kernel)) # Flip the kernel output = cp.zeros_like(image) # convolution output # Add zero padding to the input image image_padded = cp.zeros((image.shape[0] + 2, image.shape[1] + 2)) image_padded[1:-1, 1:-1] = image for x in range(image.shape[1]): # Loop over every pixel of the image for y in range(image.shape[0]): # element-wise multiplication of the kernel and the image output[y, x] = (kernel * image_padded[y:y + 3, x:x + 3]).sum() return output
def correct(img, shift=True): """Correct the orientation of the obtained image.""" if shift: return cp.fliplr(cp.flipud(cp.fft.fftshift(img))) else: return cp.fliplr(cp.flipud(img))
def filter_experiment_local(in_recording, stim_recording, low_cutoff, high_cutoff, order=3, cmr=False, sample_chunk_size=65536, n_samples=-1, ram_copy=False, whiten=False): channels = stim_recording.channels amps = stim_recording.amps scales = 1000 / amps n_channels = stim_recording.channels.shape[0] # Optionally save file into a tmpfs partition for processing if ram_copy: in_ramfile = RamFile(in_recording.filepath, 'r') in_filepath = in_ramfile.ram_filepath out_ramfile = RamFile(in_recording.filtered_filepath, 'w') out_filepath = out_ramfile.ram_filepath else: in_filepath = in_recording.filepath out_filepath = in_recording.filtered_filepath in_fid = h5py.File(in_filepath, 'r') # Create output file out_fid = h5py.File(out_filepath, 'w') if n_samples == -1: n_samples = in_fid['sig'].shape[1] out_mapping = in_fid['mapping'][stim_recording.connected_in_mapping] for i, m in enumerate(out_mapping): m[0] = i out_fid['mapping'] = out_mapping in_fid.copy('/message_0', out_fid) in_fid.copy('/proc0', out_fid) in_fid.copy('/settings', out_fid) in_fid.copy('/time', out_fid) in_fid.copy('/version', out_fid) if 'bits' in in_fid.keys(): in_fid.copy('/bits', out_fid) out_fid.create_dataset("sig", (n_channels, n_samples), dtype='float32') # Create filter: cutoff / 0.5 * fs sos = butter(order, [low_cutoff / 10000, high_cutoff / 10000], 'bandpass', output='sos') # Create chunks n_sample_chunks = n_samples / sample_chunk_size sample_chunks = np.hstack( (np.arange(n_sample_chunks, dtype=int) * sample_chunk_size, n_samples)) out_fid.create_dataset('saturations', (n_channels, len(sample_chunks - 1)), dtype='int32') out_fid.create_dataset('first_frame', shape=(1, ), data=in_fid["sig"][1027, 0] << 16 | in_fid["sig"][1026, 0]) overlap = sample_chunk_size chunk = np.zeros((n_channels, sample_chunk_size + overlap)) chunk[:, :overlap] = np.array([ in_fid['sig'][channels, 0], ] * overlap).transpose() for i in trange(len(sample_chunks) - 1, ncols=100, position=0, leave=True): idx_from = sample_chunks[i] idx_to = sample_chunks[i + 1] chunk = chunk[:, :(idx_to - idx_from + overlap)] chunk[:, overlap:] = in_fid['sig'][channels, idx_from:idx_to] out_fid['saturations'][:, i] = np.count_nonzero( ((0 == chunk[:, overlap:]) | (chunk[:, overlap:] == 4095)), axis=1) cusig = cupy.asarray(chunk, dtype=cupy.float32) cusig = cusig - cupy.mean(cusig) cusig = cusignal.sosfilt(sos, cusig) cusig = cupy.fliplr(cusig) cusig = cusignal.sosfilt(sos, cusig) cusig = cupy.fliplr(cusig) cusig = cusig * cupy.asarray(scales, dtype=cupy.float32)[:, None] if cmr: cusig = cusig - cupy.median(cusig, axis=0) out_fid["sig"][:, idx_from:idx_to] = cupy.asnumpy(cusig[:, overlap:]) chunk[:, :overlap] = chunk[:, -overlap:] # Writing filtered traces to disk... in_fid.close() out_fid.close() if ram_copy: in_ramfile.save() out_ramfile.save() del in_ramfile, out_ramfile
def dask_filter_chunk(in_rec_filepath, channels, idx_from, idx_to, scales, low_cutoff, high_cutoff, order=3, cmr=True, whiten=True, h5write=None): sos = butter(order, [low_cutoff / 10000, high_cutoff / 10000], 'bandpass', output='sos') file = h5py.File(in_rec_filepath, 'r') sig = file['sig'] chunk_size = idx_to - idx_from if idx_to > sig.shape[1]: idx_to = sig.shape[1] idx_from = idx_to - chunk_size if idx_from == 0: chunk = np.ones( (len(channels), chunk_size * 2)) * sig[channels, 0][:, np.newaxis] chunk[:, chunk_size:] = sig[channels, :idx_to] else: chunk = sig[channels, idx_from - chunk_size:idx_to] saturations = np.count_nonzero( ((0 == chunk[:, chunk_size:]) | (chunk[:, chunk_size:] == 4095)), axis=1) file.close() cusig = cupy.asarray(chunk, dtype=cupy.float32) cusig = cusig - cupy.mean(cusig) cusig = cusignal.sosfilt(sos, cusig) cusig = cupy.fliplr(cusig) cusig = cusignal.sosfilt(sos, cusig) cusig = cupy.fliplr(cusig) cusig = cusig * cupy.asarray(scales, dtype=cupy.float32)[:, None] if cmr: cusig = cusig - cupy.median(cusig, axis=0) cusig = cusig.get() if whiten: U, S, Vt = np.linalg.svd(cusig, full_matrices=False) w_chunk = np.dot(U, Vt) if h5write is not None: written = False while not written: try: out_fid = h5py.File(h5write, 'r+') out_fid['sig'][:, idx_from:idx_to] = cusig[:, chunk_size:] out_fid[ 'white_sig'][:, idx_from:idx_to] = w_chunk[:, chunk_size:] out_fid.close() written = True except: time.sleep(0.05) return saturations else: return cusig, w_chunk, saturations else: if h5write is not None: written = False while not written: try: out_fid = h5py.File(h5write, 'r+') if idx_from == 0: out_fid['sig'][:, :idx_to] = cusig[:, :idx_to] out_fid['sig'][:, int(idx_from + chunk_size / 2):idx_to] = cusig[:, int(chunk_size / 2):] out_fid.close() written = True except: time.sleep(0.05) return saturations else: return cusig, saturations