class TestBrainRegions(unittest.TestCase): @classmethod def setUpClass(self): self.brs = BrainRegions() def test_init(self): pass def test_get(self): ctx = self.brs.get(688) self.assertTrue(len(ctx.acronym) == 1 and ctx.acronym == 'CTX') def test_ancestors_descendants(self): # here we use the same brain region as in the alyx test self.assertTrue(self.brs.descendants(ids=688).id.size == 567) self.assertTrue(self.brs.ancestors(ids=688).id.size == 4) def test_mappings(self): # the mapping assigns all non found regions to root (1:997), except for the void (0:0) # here we're looking at the retina (1327:304325711) inds = self.brs._mapping_from_regions_list(np.array([304325711])) inds_ = np.zeros_like(self.brs.id) + 1 inds_[-1] = 1327 inds_[0] = 0 assert np.all(inds == inds_)
def compute_similarity_matrix(self): """ Computes the similarity matrix between each alignment stored in the ephys aligned trajectory. Similarity matrix based on number of clusters that share brain region and parent brain region """ r = BrainRegions() clusters = dict() for iK, key in enumerate(self.align_keys_sorted): # Location of reference lines used for alignment feature = np.array(self.alignments[key][0]) track = np.array(self.alignments[key][1]) # Instantiate EphysAlignment object ephysalign = EphysAlignment(self.xyz_picks, self.depths, track_prev=track, feature_prev=feature, brain_atlas=self.brain_atlas) # Find xyz location of all channels xyz_channels = ephysalign.get_channel_locations(feature, track) brain_regions = ephysalign.get_brain_locations(xyz_channels) # Find the location of clusters along the alignment cluster_info = dict() cluster_info['brain_id'] = brain_regions['id'][self.cluster_chns] cluster_info['parent_id'] = r.get(ids=cluster_info['brain_id']).parent.astype(int) clusters.update({key: cluster_info}) sim_matrix = np.zeros((len(self.align_keys_sorted), len(self.align_keys_sorted))) for ik, key in enumerate(self.align_keys_sorted): for ikk, key2 in enumerate(self.align_keys_sorted): same_id = np.where(clusters[key]['brain_id'] == clusters[key2]['brain_id'])[0] not_same_id = \ np.where(clusters[key]['brain_id'] != clusters[key2]['brain_id'])[0] same_parent = np.where(clusters[key]['parent_id'][not_same_id] == clusters[key2]['parent_id'][not_same_id])[0] sim_matrix[ik, ikk] = len(same_id) + (len(same_parent) * 0.5) # Normalise sim_matrix_norm = sim_matrix / np.max(sim_matrix) return sim_matrix_norm
class TestBrainRegions(unittest.TestCase): @classmethod def setUpClass(self): self.brs = BrainRegions() def test_get(self): ctx = self.brs.get(688) self.assertTrue(len(ctx.acronym) == 1 and ctx.acronym == 'CTX') def test_ancestors_descendants(self): # here we use the same brain region as in the alyx test self.assertTrue(self.brs.descendants(ids=688).id.size == 567) self.assertTrue(self.brs.ancestors(ids=688).id.size == 4) # the leaves have no descendants but themselves leaves = self.brs.leaves() d = self.brs.descendants(ids=leaves['id']) self.assertTrue(np.all(np.sort(leaves['id']) == np.sort(d['id']))) def test_mappings_lateralized(self): # the mapping assigns all non found regions to root (1:997), except for the void (0:0) # here we're looking at the retina (1327:304325711), so we expect 1327 at index 1327 inds = self.brs._mapping_from_regions_list(np.array([304325711]), lateralize=True) inds_ = np.zeros_like(self.brs.id) + 1 inds_[int((inds.size - 1) / 2)] = 1327 inds_[-1] = 1327 * 2 inds_[0] = 0 assert np.all(inds == inds_) def test_mappings_not_lateralized(self): # if it's not lateralize, retina for both eyes should be in the inds = self.brs._mapping_from_regions_list(np.array([304325711]), lateralize=False) inds_ = np.zeros_like(self.brs.id) + 1 inds_[int((inds.size - 1) / 2)] = 1327 inds_[-1] = 1327 inds_[0] = 0 assert np.all(inds == inds_)
def load_channel_locations(eid, one=None, probe=None, aligned=False): """ From an eid, get brain locations from Alyx database analysis. :param eid: session eid or dictionary returned by one.alyx.rest('sessions', 'read', id=eid) :param dataset_types: additional spikes/clusters objects to add to the standard list :return: channels """ if isinstance(eid, dict): ses = eid eid = ses['url'][-36:] one = one or ONE() # When a specific probe has been requested if isinstance(probe, str): insertions = one.alyx.rest('insertions', 'list', session=eid, name=probe)[0] labels = [probe] if not insertions['json']: tracing = [False] resolved = [False] counts = [0] else: tracing = [(insertions.get('json', { 'temp': 0 }).get('extended_qc', { 'temp': 0 }).get('tracing_exists', False))] resolved = [(insertions.get('json', { 'temp': 0 }).get('extended_qc', { 'temp': 0 }).get('alignment_resolved', False))] counts = [(insertions.get('json', { 'temp': 0 }).get('extended_qc', { 'temp': 0 }).get('alignment_count', 0))] probe_id = [insertions['id']] # No specific probe specified, load any that is available # Need to catch for the case where we have two of the same probe insertions else: insertions = one.alyx.rest('insertions', 'list', session=eid) labels = [ins['name'] for ins in insertions] try: tracing = [ ins.get('json', { 'temp': 0 }).get('extended_qc', { 'temp': 0 }).get('tracing_exists', False) for ins in insertions ] resolved = [ ins.get('json', { 'temp': 0 }).get('extended_qc', { 'temp': 0 }).get('alignment_resolved', False) for ins in insertions ] counts = [ ins.get('json', { 'temp': 0 }).get('extended_qc', { 'temp': 0 }).get('alignment_count', 0) for ins in insertions ] except Exception: tracing = [False for ins in insertions] resolved = [False for ins in insertions] counts = [0 for ins in insertions] probe_id = [ins['id'] for ins in insertions] channels = Bunch({}) r = BrainRegions() for label, trace, resol, count, id in zip(labels, tracing, resolved, counts, probe_id): if trace: if resol: logger.info( f'Channel locations for {label} have been resolved. ' f'Channel and cluster locations obtained from ephys aligned histology ' f'track.') # download the data chans = one.load_object(eid, 'channels', collection=f'alf/{label}') # If we have successfully downloaded the data if 'brainLocationIds_ccf_2017' in chans.keys(): channels[label] = Bunch({ 'atlas_id': chans['brainLocationIds_ccf_2017'], 'acronym': r.get(chans['brainLocationIds_ccf_2017'])['acronym'], 'x': chans['mlapdv'][:, 0] / 1e6, 'y': chans['mlapdv'][:, 1] / 1e6, 'z': chans['mlapdv'][:, 2] / 1e6, 'axial_um': chans['localCoordinates'][:, 1], 'lateral_um': chans['localCoordinates'][:, 0] }) # Otherwise we just get the channels from alyx. Shouldn't happen often, only if # data is still inbetween ftp and flatiron after being resolved else: traj_id = one.alyx.rest( 'trajectories', 'list', session=eid, probe=label, provenance='Ephys aligned histology track')[0]['id'] chans = one.alyx.rest('channels', 'list', trajectory_estimate=traj_id) channels[label] = Bunch({ 'atlas_id': np.array([ch['brain_region'] for ch in chans]), 'x': np.array([ch['x'] for ch in chans]) / 1e6, 'y': np.array([ch['y'] for ch in chans]) / 1e6, 'z': np.array([ch['z'] for ch in chans]) / 1e6, 'axial_um': np.array([ch['axial'] for ch in chans]), 'lateral_um': np.array([ch['lateral'] for ch in chans]) }) channels[label]['acronym'] = r.get( channels[label]['atlas_id'])['acronym'] elif count > 0 and aligned: logger.info( f'Channel locations for {label} have not been ' f'resolved. However, alignment flag set to True so channel and cluster' f' locations will be obtained from latest available ephys aligned ' f'histology track.') # get the latest user aligned channels traj_id = one.alyx.rest( 'trajectories', 'list', session=eid, probe=label, provenance='Ephys aligned histology track')[0]['id'] chans = one.alyx.rest('channels', 'list', trajectory_estimate=traj_id) channels[label] = Bunch({ 'atlas_id': np.array([ch['brain_region'] for ch in chans]), 'x': np.array([ch['x'] for ch in chans]) / 1e6, 'y': np.array([ch['y'] for ch in chans]) / 1e6, 'z': np.array([ch['z'] for ch in chans]) / 1e6, 'axial_um': np.array([ch['axial'] for ch in chans]), 'lateral_um': np.array([ch['lateral'] for ch in chans]) }) channels[label]['acronym'] = r.get( channels[label]['atlas_id'])['acronym'] else: logger.info( f'Channel locations for {label} have not been resolved. ' f'Channel and cluster locations obtained from histology track.' ) # get the channels from histology tracing traj_id = one.alyx.rest('trajectories', 'list', session=eid, probe=label, provenance='Histology track')[0]['id'] chans = one.alyx.rest('channels', 'list', trajectory_estimate=traj_id) channels[label] = Bunch({ 'atlas_id': np.array([ch['brain_region'] for ch in chans]), 'x': np.array([ch['x'] for ch in chans]) / 1e6, 'y': np.array([ch['y'] for ch in chans]) / 1e6, 'z': np.array([ch['z'] for ch in chans]) / 1e6, 'axial_um': np.array([ch['axial'] for ch in chans]), 'lateral_um': np.array([ch['lateral'] for ch in chans]) }) channels[label]['acronym'] = r.get( channels[label]['atlas_id'])['acronym'] else: logger.warning(f'Histology tracing for {label} does not exist. ' f'No channels for {label}') return channels
def upload_channels(self, alignment_key, upload_alyx, upload_flatiron): """ Upload channels to alyx and flatiron based on the alignment specified by the alignment key """ feature = np.array(self.alignments[alignment_key][0]) track = np.array(self.alignments[alignment_key][1]) ephysalign = EphysAlignment(self.xyz_picks, self.depths, track_prev=track, feature_prev=feature, brain_atlas=self.brain_atlas) # Find the channels channels_mlapdv = np.int32(ephysalign.get_channel_locations(feature, track) * 1e6) channels_brainID = ephysalign.get_brain_locations(channels_mlapdv / 1e6)['id'] # Find the clusters r = BrainRegions() clusters_mlapdv = channels_mlapdv[self.cluster_chns] clusters_brainID = channels_brainID[self.cluster_chns] clusters_brainAcro = r.get(ids=clusters_brainID).acronym # upload datasets to flatiron files_to_register = [] if upload_flatiron: ftp_patcher = FTPPatcher(one=self.one) insertion = self.one.alyx.rest('insertions', 'read', id=self.eid) alf_path = self.one.path_from_eid(insertion['session']).joinpath('alf', insertion['name']) alf_path.mkdir(exist_ok=True, parents=True) # Make the channels.mlapdv dataset f_name = alf_path.joinpath('channels.mlapdv.npy') np.save(f_name, channels_mlapdv) files_to_register.append(f_name) # Make the channels.brainLocationIds dataset f_name = alf_path.joinpath('channels.brainLocationIds_ccf_2017.npy') np.save(f_name, channels_brainID) files_to_register.append(f_name) # Make the clusters.mlapdv dataset f_name = alf_path.joinpath('clusters.mlapdv.npy') np.save(f_name, clusters_mlapdv) files_to_register.append(f_name) # Make the clusters.brainLocationIds dataset f_name = alf_path.joinpath('clusters.brainLocationIds_ccf_2017.npy') np.save(f_name, clusters_brainID) files_to_register.append(f_name) # Make the clusters.brainLocationAcronym dataset f_name = alf_path.joinpath('clusters.brainLocationAcronyms_ccf_2017.npy') np.save(f_name, clusters_brainAcro) files_to_register.append(f_name) self.log.info("Writing datasets to FlatIron") ftp_patcher.create_dataset(path=files_to_register, created_by=self.one._par.ALYX_LOGIN) # Need to change channels stored on alyx as well as the stored key is not the same as the # latest key if upload_alyx: if alignment_key != self.align_keys_sorted[0]: histology.register_aligned_track(self.eid, channels_mlapdv / 1e6, chn_coords=SITES_COORDINATES, one=self.one, overwrite=True, channels=self.channels) ephys_traj = self.one.alyx.rest('trajectories', 'list', probe_insertion=self.eid, provenance='Ephys aligned histology track') patch_dict = {'json': self.alignments} self.one.alyx.rest('trajectories', 'partial_update', id=ephys_traj[0]['id'], data=patch_dict) return files_to_register
class TestBrainRegions(unittest.TestCase): @classmethod def setUpClass(self): self.brs = BrainRegions() def test_rgba(self): assert self.brs.rgba.shape == (self.brs.rgb.shape[0], 4) def test_get(self): ctx = self.brs.get(688) self.assertTrue(len(ctx.acronym) == 1 and ctx.acronym == 'CTX') def test_ancestors_descendants(self): # here we use the same brain region as in the alyx test self.assertTrue(self.brs.descendants(ids=688).id.size == 567) self.assertTrue(self.brs.ancestors(ids=688).id.size == 4) # the leaves have no descendants but themselves leaves = self.brs.leaves() d = self.brs.descendants(ids=leaves['id']) self.assertTrue(np.all(np.sort(leaves['id']) == np.sort(d['id']))) def test_ancestors_descendants_indices(self): br = self.brs tpath = np.array([997, 8, 567, 688, 695, 315, 453, 12993]) # /997/8/567/688/695/315/453/12993/ # check ancestors ancs = br.ancestors(12993) assert np.all(ancs.id == tpath) # check ancestors with indices ancs, inds = br.ancestors(12993, return_indices=True) assert np.all(ancs.id == tpath) # check descendants with indices desdc, inds = br.descendants(12993, return_indices=True) assert (inds == np.where(br.id == 12993)) # check full subtree chemin = br.subtree(453) assert np.all( np.sort(chemin.id) == np.unique(np.r_[br.descendants(453).id, br.ancestors(453).id])) def test_mappings_lateralized(self): # the mapping assigns all non found regions to root (1:997), except for the void (0:0) # here we're looking at the retina (1327:304325711), so we expect 1327 at index 1327 inds = self.brs._mapping_from_regions_list(np.array([304325711]), lateralize=True) inds_ = np.zeros_like(self.brs.id) + 1 inds_[int((inds.size - 1) / 2)] = 1327 inds_[-1] = 1327 * 2 inds_[0] = 0 assert np.all(inds == inds_) def test_mappings_not_lateralized(self): # if it's not lateralize, retina for both eyes should be in the inds = self.brs._mapping_from_regions_list(np.array([304325711]), lateralize=False) inds_ = np.zeros_like(self.brs.id) + 1 inds_[int((inds.size - 1) / 2)] = 1327 inds_[-1] = 1327 inds_[0] = 0 assert np.all(inds == inds_) def test_remap(self): # Test mapping atlas ids from one map to another atlas_id = np.array([463, 685]) # CA3 and PO cosmos_id = self.brs.remap(atlas_id, source_map='Allen', target_map='Cosmos') expected_cosmos_id = [1089, 549] # HPF and TH assert np.all(cosmos_id == expected_cosmos_id) def test_id2id(self): # Test remapping of atlas id to atlas id atlas_id = np.array([463, 685]) # Allen mapping, positive ids -> positive ids allen_id = self.brs.id2id(atlas_id, mapping='Allen') assert np.all(allen_id == atlas_id) # Allen mapping, negative ids -> positive ids allen_id = self.brs.id2id(-1 * atlas_id, mapping='Allen') assert np.all(allen_id == atlas_id) # Allen-lr mapping, positive ids -> positive ids allen_id = self.brs.id2id(atlas_id, mapping='Allen-lr') assert np.all(allen_id == atlas_id) # Allen-lr mapping, negative ids -> negative ids allen_id = self.brs.id2id(-1 * atlas_id, mapping='Allen-lr') assert np.all(allen_id == -1 * atlas_id) expected_cosmos_id = np.array([1089, 549]) # HPF and TH # Cosmos mapping, negative ids -> positive ids cosmos_id = self.brs.id2id(-1 * atlas_id, mapping='Cosmos') assert np.all(cosmos_id == expected_cosmos_id) # Cosmos-lr mapping, negative ids -> negative ids cosmos_id = self.brs.id2id(-1 * atlas_id, mapping='Cosmos-lr') assert np.all(cosmos_id == -1 * expected_cosmos_id) def test_id2acro(self): atlas_id = np.array([463, 685]) # CA3 and VM expected_allen_acronym = np.array(['CA3', 'VM']) # Allen mapping, positive ids, allen_acronym = self.brs.id2acronym(atlas_id, mapping='Allen') assert np.all(allen_acronym == expected_allen_acronym) # Allen-lr mapping, negative ids allen_acronym = self.brs.id2acronym(-1 * atlas_id, mapping='Allen-lr') assert np.all(allen_acronym == expected_allen_acronym) expected_cosmos_acronym = np.array(['HPF', 'TH']) cosmos_acronym = self.brs.id2acronym(atlas_id, mapping='Cosmos') assert np.all(cosmos_acronym == expected_cosmos_acronym) def test_id2index(self): atlas_id = np.array([463, 685]) # Allen mapping, positive ids -> returns index on both side allen_id, index_both = self.brs.id2index(atlas_id, mapping='Allen') assert np.all(allen_id == atlas_id) for exp, ind in zip(atlas_id, index_both): assert np.all(ind == np.where( self.brs.id[self.brs.mappings['Allen']] == exp)[0]) # Allen mapping, negative ids -> returns index on both side allen_id, index_both = self.brs.id2index(-1 * atlas_id, mapping='Allen') assert np.all(allen_id == atlas_id) for exp, ind in zip(atlas_id, index_both): assert np.all(ind == np.where( self.brs.id[self.brs.mappings['Allen']] == exp)[0]) # Allen-lr mapping, positive ids -> returns index on right side allen_id, index = self.brs.id2index(atlas_id, mapping='Allen-lr') assert np.all(allen_id == atlas_id) for i, (exp, ind) in enumerate(zip(atlas_id, index)): assert np.all(ind == index_both[i][index_both[i] <= self.brs.n_lr]) # Allen-lr mapping, negative ids -> returns index on left side allen_id, index = self.brs.id2index(-1 * atlas_id, mapping='Allen-lr') assert np.all(allen_id == -1 * atlas_id) for i, (exp, ind) in enumerate(zip(atlas_id, index)): assert np.all(ind == index_both[i][index_both[i] > self.brs.n_lr]) # Cosmos mapping, positive ids -> returns index on both sides expected_cosmos_id = [1089, 549] # HPF and TH cosmos_id, index_both = self.brs.id2index(atlas_id, mapping='Cosmos') assert np.all(cosmos_id == expected_cosmos_id) for exp, ind in zip(expected_cosmos_id, index_both): assert np.all(ind == np.where( self.brs.id[self.brs.mappings['Cosmos']] == exp)[0]) def test_acro2acro(self): acronym = np.array(['CA3', 'VM']) # Allen mapping allen_acronym = self.brs.acronym2acronym(acronym, mapping='Allen') assert np.all(acronym == allen_acronym) expected_cosmos_acronym = np.array(['HPF', 'TH']) # Cosmos mapping cosmos_acronym = self.brs.acronym2acronym(acronym, mapping='Cosmos-lr') assert np.all(cosmos_acronym == expected_cosmos_acronym) def test_acro2id(self): acronym = np.array(['CA3', 'VM']) expected_allen_id = np.array([463, 685]) # Allen mapping, both hemisphere -> positive ids allen_id = self.brs.acronym2id(acronym, mapping='Allen', hemisphere=None) assert np.all(allen_id == expected_allen_id) # Allen mapping, left hemisphere -> positive ids allen_id = self.brs.acronym2id(acronym, mapping='Allen', hemisphere='left') assert np.all(allen_id == expected_allen_id) # Allen mapping, right hemisphere -> positive ids allen_id = self.brs.acronym2id(acronym, mapping='Allen', hemisphere='right') assert np.all(allen_id == expected_allen_id) # Allen-lr mapping, both hemisphere -> negative and positive ids allen_id = self.brs.acronym2id(acronym, mapping='Allen-lr', hemisphere=None) assert np.all(allen_id.ravel() == np.c_[-1 * expected_allen_id, expected_allen_id].ravel()) # Allen-lr mapping, left hemisphere -> negative ids allen_id = self.brs.acronym2id(acronym, mapping='Allen-lr', hemisphere='left') assert np.all(allen_id == -1 * expected_allen_id) # Allen-lr mapping, right hemisphere -> positive ids allen_id = self.brs.acronym2id(acronym, mapping='Allen-lr', hemisphere='right') assert np.all(allen_id == expected_allen_id) expected_cosmos_id = np.array([1089, 549]) # Cosmos-lr mapping, left hemisphere -> negative ids cosmos_id = self.brs.acronym2id(acronym, mapping='Cosmos-lr', hemisphere='left') assert np.all(cosmos_id == -1 * expected_cosmos_id) # Cosmos mapping, left hemisphere -> positive ids cosmos_id = self.brs.acronym2id(acronym, mapping='Cosmos', hemisphere='left') assert np.all(cosmos_id == expected_cosmos_id) def test_acro2index(self): acronym = np.array(['CA3', 'VM']) # Expect it to be same regardless of lateralised or non lateralised mapping for map, expected_acronym in zip( ['Allen', 'Allen-lr', 'Cosmos', 'Cosmos-lr'], [ np.array(['CA3', 'VM']), np.array(['CA3', 'VM']), np.array(['HPF', 'TH']), np.array(['HPF', 'TH']) ]): # Mapping, both hemisphere, returns index on both sides map_acronym, index_both = self.brs.acronym2index(acronym, mapping=map, hemisphere=None) assert np.all(map_acronym == expected_acronym) for exp, ind in zip(expected_acronym, index_both): assert np.all(ind == np.where( self.brs.acronym[self.brs.mappings[map]] == exp)[0]) # Mapping, left hemisphere, returns index that are > 1327 map_acronym, index = self.brs.acronym2index(acronym, mapping=map, hemisphere='left') assert np.all(map_acronym == expected_acronym) for i, (exp, ind) in enumerate(zip(expected_acronym, index)): assert np.all( ind == index_both[i][index_both[i] > self.brs.n_lr]) # Mapping, right hemisphere, returns index that are < 1327 map_acronym, index = self.brs.acronym2index(acronym, mapping=map, hemisphere='right') assert np.all(map_acronym == expected_acronym) for i, (exp, ind) in enumerate(zip(expected_acronym, index)): assert np.all( ind == index_both[i][index_both[i] <= self.brs.n_lr]) def test_index2id(self): index = np.array([468, 646, 1973]) # Allen mapping allen_id = np.array([463, 685, 685]) assert np.all(self.brs.index2id(index, mapping='Allen') == allen_id) # Allen-lr mapping allen_id = np.array([463, 685, -685]) assert np.all(self.brs.index2id(index, mapping='Allen-lr') == allen_id) # Cosmos-lr mapping cosmos_id = np.array([1089, 549, -549]) assert np.all( self.brs.index2id(index, mapping='Cosmos-lr') == cosmos_id) def test_index2acronym(self): index = np.array([468, 646, 1973]) # Allen mapping allen_acronym = np.array(['CA3', 'VM', 'VM']) assert np.all( self.brs.index2acronym(index, mapping='Allen') == allen_acronym) # Allen-lr mapping allen_acronym = np.array(['CA3', 'VM', 'VM']) assert np.all( self.brs.index2acronym(index, mapping='Allen-lr') == allen_acronym) # Cosmos-lr mapping cosmos_acronym = np.array(['HPF', 'TH', 'TH']) assert np.all( self.brs.index2acronym(index, mapping='Cosmos-lr') == cosmos_acronym) def test_prepare_lr_data(self): acronyms_lh = np.array(['VPM', 'VPL', 'PO']) values_lh = np.array([0, 1, 2]) acronyms_rh = np.array(['VPL', 'PO', 'CA1']) values_rh = np.array([3, 4, 5]) acronyms, values = prepare_lr_data(acronyms_lh, values_lh, acronyms_rh, values_rh) assert np.array_equal(np.unique(np.r_[acronyms_lh, acronyms_rh]), acronyms) assert np.array_equal(values[acronyms == 'VPL'][0], np.array([1, 3])) np.testing.assert_equal(values[acronyms == 'VPM'][0], np.array([0, np.nan])) np.testing.assert_equal(values[acronyms == 'CA1'][0], np.array([np.nan, 5])) def test_reorder_data(self): acronyms = np.array(['AUDp1', 'AUDpo1', 'AUDv1', 'SSp-m1', 'SSp-n1']) values = np.array([0, 1, 2, 3, 4]) _, idx = ismember(acronyms, self.brs.acronym) expected_acronyms = acronyms[np.argsort(self.brs.order[idx])] expected_values = values[np.argsort(self.brs.order[idx])] values = np.array([0, 1, 2, 3, 4]) acronnyms_ordered, values_ordered = reorder_data(acronyms, values) assert np.array_equal(acronnyms_ordered, expected_acronyms) assert np.array_equal(values_ordered, expected_values) def test_argument_parser(self): acronyms = ['AUDp1', 'AUDpo1', 'AUDv1', 'SSp-m1', 'SSp-n1'] ids = self.brs.acronym2id(acronyms) assert np.all(self.brs.parse_acronyms_argument(acronyms) == ids) assert np.all( self.brs.parse_acronyms_argument(np.array(acronyms)) == ids) assert np.all(self.brs.parse_acronyms_argument(ids) == ids) assert np.all(self.brs.parse_acronyms_argument(list(ids)) == ids) # makes sure it handles exception with self.assertRaises(AssertionError): self.brs.parse_acronyms_argument(acronyms + ['toto']) assert np.all( self.brs.parse_acronyms_argument(acronyms + ['toto'], mode='clip') == ids)