def test_ismember2d(self): b = np.reshape([0, 0, 0, 1, 1, 1], [3, 2]) locb = np.array([0, 1, 0, 2, 2, 1]) lia = np.array([True, True, True, True, True, True, False, False]) a = np.r_[b[locb, :], np.array([[2, 1], [1, 2]])] lia_, locb_ = bnum.ismember2d(a, b) assert np.all(lia == lia_) & np.all(locb == locb_)
def test_uuids_intersections(self): ntotal = 500 nsub = 17 nadd = 3 eids = uuid2np([uuid.uuid4() for _ in range(ntotal)]) np.random.seed(42) isel = np.floor(np.argsort(np.random.random(nsub)) / nsub * ntotal).astype(np.int16) sids = np.r_[eids[isel, :], uuid2np([uuid.uuid4() for _ in range(nadd)])] np.random.shuffle(sids) # check the intersection v, i0, i1 = intersect2d(eids, sids) assert np.all(eids[i0, :] == sids[i1, :]) assert np.all(np.sort(isel) == np.sort(i0)) v_, i0_, i1_ = np.intersect1d(eids[:, 0], sids[:, 0], return_indices=True) assert np.setxor1d(v_, v[:, 0]).size == 0 assert np.setxor1d(i0, i0_).size == 0 assert np.setxor1d(i1, i1_).size == 0 for a, b in zip(ismember2d(sids, eids), ismember(sids[:, 0], eids[:, 0])): assert np.all(a == b) # check conversion to numpy back and forth uuids = [uuid.uuid4() for _ in np.arange(4)] np_uuids = uuid2np(uuids) assert np2uuid(np_uuids) == uuids
def test_ismember2d_uuids(self): nb = 20 na = 500 np.random.seed(42) a = np.random.randint(0, nb + 3, na) b = np.arange(nb) lia, locb = bnum.ismember(a, b) bb = np.random.randint(low=np.iinfo(np.int64).min, high=np.iinfo(np.int64).max, size=(nb, 2), dtype=np.int64) aa = np.zeros((na, 2), dtype=np.int64) aa[lia, :] = bb[locb, :] lia_, locb_ = bnum.ismember2d(aa, bb) assert np.all(lia == lia_) & np.all(locb == locb_) bb[:, 0] = 0 aa[:, 0] = 0 # if the first column is equal, the distinction is to be made on the second\ assert np.unique(bb[:, 1]).size == nb lia_, locb_ = bnum.ismember2d(aa, bb) assert np.all(lia == lia_) & np.all(locb == locb_)
def _update_cache(self, ses, dataset_types): """ :param ses: session details dictionary as per Alyx response :param dataset_types: :return: is_updated (bool): if the cache was updated or not """ save = False pqt_dsets = _ses2pandas(ses, dtypes=dataset_types) # if the dataframe is empty, return if pqt_dsets.size == 0: return # if the cache is empty create the cache variable elif self._cache.size == 0: self._cache = pqt_dsets save = True # the cache is not empty and there are datasets in the query else: isin, icache = ismember2d(pqt_dsets[['id_0', 'id_1']].to_numpy(), self._cache[['id_0', 'id_1']].to_numpy()) # check if the hash / filesize fields have changed on patching heq = (self._cache['hash'].iloc[icache].to_numpy() == pqt_dsets['hash'].iloc[isin].to_numpy()) feq = np.isclose(self._cache['file_size'].iloc[icache].to_numpy(), pqt_dsets['file_size'].iloc[isin].to_numpy(), rtol=0, atol=0, equal_nan=True) eq = np.logical_and(heq, feq) # update new hash / filesizes if not np.all(eq): self._cache.iloc[icache, 4:6] = pqt_dsets.iloc[np.where(isin)[0], 4:6].to_numpy() save = True # append datasets that haven't been found if not np.all(isin): self._cache = self._cache.append( pqt_dsets.iloc[np.where(~isin)[0]]) self._cache = self._cache.reindex() save = True if save: # before saving makes sure pandas did not cast uuids in float typs = [ t for t, k in zip(self._cache.dtypes, self._cache.keys()) if 'id_' in k ] assert (all(map(lambda t: t == np.int64, typs))) # if this gets too big, look into saving only when destroying the ONE object parquet.save(self._cache_file, self._cache)
def test_rf_map(self): """ """ # Simulate fake rfmap data test_frames = np.full((60, 15, 15), 128, dtype='uint8') # Test on and off individually test_frames[10:20, 8, 8] = 0 test_frames[25:35, 10, 13] = 255 # Test that interleaved are detected correctly test_frames[40:50, 4, 9] = 0 test_frames[42:52, 6, 10] = 255 test_frames[42:55, 11, 4] = 0 test_frames[50:60, 8, 8] = 0 test_times = np.arange(60) rf_map = {} rf_map['times'] = test_times rf_map['frames'] = test_frames rf_map_times, rf_map_pos, rf_stim_frames = passive.get_on_off_times_and_positions( rf_map) assert (all(rf_map_times == test_times)) assert (rf_map_pos.shape == (15 * 15, 2)) assert (len(rf_stim_frames['on']) == 15 * 15) assert (len(rf_stim_frames['off']) == 15 * 15) # Off is for the 0 ones assert (all(rf_stim_frames['off'][ismember2d( rf_map_pos, np.array([[8, 8]]))[0]][0][0] == [10, 50])) assert (rf_stim_frames['off'][ismember2d(rf_map_pos, np.array([[4, 9] ]))[0]][0][0] == 40) assert (rf_stim_frames['off'][ismember2d(rf_map_pos, np.array([[11, 4] ]))[0]][0][0] == 42) # On is for the 255 ones assert (rf_stim_frames['on'][ismember2d(rf_map_pos, np.array([[10, 13] ]))[0]][0][0] == 25) assert (rf_stim_frames['on'][ismember2d(rf_map_pos, np.array([[6, 10] ]))[0]][0][0] == 42) # Next test that the firing rate function works # Basically just make one square responsive spike_times = np.arange(25, 35, 0.01) spike_depths = 500 * np.ones_like(spike_times) rf_map_avg, depths = passive.get_rf_map_over_depth(rf_map_times, rf_map_pos, rf_stim_frames, spike_times, spike_depths, x_lim=[0, 60]) non_zero = np.where(rf_map_avg['on'] != 0) assert (np.argmin(np.abs(depths - 500)) == non_zero[0][0]) assert (all(non_zero[1] == 10)) assert (all(non_zero[2] == 13)) assert (np.all(rf_map_avg['off'] == 0)) rf_svd = passive.get_svd_map(rf_map_avg) # Make sure that the one responsive element is non-zero assert (rf_svd['on'][non_zero[0][0]][non_zero[1][0], non_zero[2][0]] != 0) # But that all the rest are zero rf_svd['on'][non_zero[0][0]][non_zero[1][0], non_zero[2][0]] = 0 assert (np.all(np.isclose(np.vstack(rf_svd['on']), 0))) assert (np.all(np.vstack(rf_svd['off']) == 0))