Exemplo n.º 1
0
class TestBrainRegions(unittest.TestCase):
    @classmethod
    def setUpClass(self):
        self.brs = BrainRegions()

    def test_init(self):
        pass

    def test_get(self):
        ctx = self.brs.get(688)
        self.assertTrue(len(ctx.acronym) == 1 and ctx.acronym == 'CTX')

    def test_ancestors_descendants(self):
        # here we use the same brain region as in the alyx test
        self.assertTrue(self.brs.descendants(ids=688).id.size == 567)
        self.assertTrue(self.brs.ancestors(ids=688).id.size == 4)

    def test_mappings(self):
        # the mapping assigns all non found regions to root (1:997), except for the void (0:0)
        # here we're looking at the retina (1327:304325711)
        inds = self.brs._mapping_from_regions_list(np.array([304325711]))
        inds_ = np.zeros_like(self.brs.id) + 1
        inds_[-1] = 1327
        inds_[0] = 0
        assert np.all(inds == inds_)
Exemplo n.º 2
0
    def compute_similarity_matrix(self):
        """
        Computes the similarity matrix between each alignment stored in the ephys aligned
        trajectory. Similarity matrix based on number of clusters that share brain region and
        parent brain region
        """

        r = BrainRegions()

        clusters = dict()
        for iK, key in enumerate(self.align_keys_sorted):
            # Location of reference lines used for alignment
            feature = np.array(self.alignments[key][0])
            track = np.array(self.alignments[key][1])

            # Instantiate EphysAlignment object
            ephysalign = EphysAlignment(self.xyz_picks, self.depths, track_prev=track,
                                        feature_prev=feature,
                                        brain_atlas=self.brain_atlas)

            # Find xyz location of all channels
            xyz_channels = ephysalign.get_channel_locations(feature, track)
            brain_regions = ephysalign.get_brain_locations(xyz_channels)

            # Find the location of clusters along the alignment
            cluster_info = dict()
            cluster_info['brain_id'] = brain_regions['id'][self.cluster_chns]
            cluster_info['parent_id'] = r.get(ids=cluster_info['brain_id']).parent.astype(int)
            clusters.update({key: cluster_info})

        sim_matrix = np.zeros((len(self.align_keys_sorted), len(self.align_keys_sorted)))

        for ik, key in enumerate(self.align_keys_sorted):
            for ikk, key2 in enumerate(self.align_keys_sorted):
                same_id = np.where(clusters[key]['brain_id'] == clusters[key2]['brain_id'])[0]
                not_same_id = \
                    np.where(clusters[key]['brain_id'] != clusters[key2]['brain_id'])[0]
                same_parent = np.where(clusters[key]['parent_id'][not_same_id] ==
                                       clusters[key2]['parent_id'][not_same_id])[0]
                sim_matrix[ik, ikk] = len(same_id) + (len(same_parent) * 0.5)
        # Normalise
        sim_matrix_norm = sim_matrix / np.max(sim_matrix)

        return sim_matrix_norm
Exemplo n.º 3
0
def get_full_region_name(acronyms):
    brainregions = BrainRegions()
    full_region_names = []
    for i, acronym in enumerate(acronyms):
        try:
            regname = brainregions.name[np.argwhere(brainregions.acronym == acronym).flatten()][0]
            full_region_names.append(regname)
        except IndexError:
            full_region_names.append(acronym)
    if len(full_region_names) == 1:
        return full_region_names[0]
    else:
        return full_region_names
Exemplo n.º 4
0
def plot_scalar_on_barplot(acronyms,
                           values,
                           errors=None,
                           order=True,
                           ylim=None,
                           ax=None,
                           brain_regions=None):
    br = brain_regions or BrainRegions()

    if order:
        acronyms, values = reorder_data(acronyms, values, brain_regions)

    _, idx = ismember(acronyms, br.acronym)
    colours = br.rgb[idx]

    if ax:
        fig = ax.get_figure()
    else:
        fig, ax = plt.subplots()

    ax.bar(np.arange(acronyms.size), values, color=colours)

    return fig, ax
Exemplo n.º 5
0
def reorder_data(acronyms, values, brain_regions=None):
    """
    Reorder list of acronyms and values to match the Allen ordering
    :param acronyms: array of acronyms
    :param values: array of values
    :param brain_regions: BrainRegions object
    :return: ordered array of acronyms and values
    """

    br = brain_regions or BrainRegions()
    atlas_id = br.acronym2id(acronyms, hemisphere='right')
    all_ids = br.id[br.order][:br.n_lr + 1]
    ordered_ids = np.zeros_like(all_ids) * np.nan
    ordered_values = np.zeros_like(all_ids) * np.nan
    _, idx = ismember(atlas_id, all_ids)
    ordered_ids[idx] = atlas_id
    ordered_values[idx] = values

    ordered_ids = ordered_ids[~np.isnan(ordered_ids)]
    ordered_values = ordered_values[~np.isnan(ordered_values)]
    ordered_acronyms = br.id2acronym(ordered_ids)

    return ordered_acronyms, ordered_values
Exemplo n.º 6
0
class TestBrainRegions(unittest.TestCase):
    @classmethod
    def setUpClass(self):
        self.brs = BrainRegions()

    def test_get(self):
        ctx = self.brs.get(688)
        self.assertTrue(len(ctx.acronym) == 1 and ctx.acronym == 'CTX')

    def test_ancestors_descendants(self):
        # here we use the same brain region as in the alyx test
        self.assertTrue(self.brs.descendants(ids=688).id.size == 567)
        self.assertTrue(self.brs.ancestors(ids=688).id.size == 4)
        # the leaves have no descendants but themselves
        leaves = self.brs.leaves()
        d = self.brs.descendants(ids=leaves['id'])
        self.assertTrue(np.all(np.sort(leaves['id']) == np.sort(d['id'])))

    def test_mappings_lateralized(self):
        # the mapping assigns all non found regions to root (1:997), except for the void (0:0)
        # here we're looking at the retina (1327:304325711), so we expect 1327 at index 1327
        inds = self.brs._mapping_from_regions_list(np.array([304325711]),
                                                   lateralize=True)
        inds_ = np.zeros_like(self.brs.id) + 1
        inds_[int((inds.size - 1) / 2)] = 1327
        inds_[-1] = 1327 * 2
        inds_[0] = 0
        assert np.all(inds == inds_)

    def test_mappings_not_lateralized(self):
        # if it's not lateralize, retina for both eyes should be in the
        inds = self.brs._mapping_from_regions_list(np.array([304325711]),
                                                   lateralize=False)
        inds_ = np.zeros_like(self.brs.id) + 1
        inds_[int((inds.size - 1) / 2)] = 1327
        inds_[-1] = 1327
        inds_[0] = 0
        assert np.all(inds == inds_)
Exemplo n.º 7
0
def load_channel_locations(eid, one=None, probe=None, aligned=False):
    """
    From an eid, get brain locations from Alyx database
    analysis.
    :param eid: session eid or dictionary returned by one.alyx.rest('sessions', 'read', id=eid)
    :param dataset_types: additional spikes/clusters objects to add to the standard list
    :return: channels
    """
    if isinstance(eid, dict):
        ses = eid
        eid = ses['url'][-36:]

    one = one or ONE()

    # When a specific probe has been requested
    if isinstance(probe, str):
        insertions = one.alyx.rest('insertions',
                                   'list',
                                   session=eid,
                                   name=probe)[0]
        labels = [probe]
        if not insertions['json']:
            tracing = [False]
            resolved = [False]
            counts = [0]
        else:
            tracing = [(insertions.get('json', {
                'temp': 0
            }).get('extended_qc', {
                'temp': 0
            }).get('tracing_exists', False))]
            resolved = [(insertions.get('json', {
                'temp': 0
            }).get('extended_qc', {
                'temp': 0
            }).get('alignment_resolved', False))]
            counts = [(insertions.get('json', {
                'temp': 0
            }).get('extended_qc', {
                'temp': 0
            }).get('alignment_count', 0))]
        probe_id = [insertions['id']]
    # No specific probe specified, load any that is available
    # Need to catch for the case where we have two of the same probe insertions
    else:
        insertions = one.alyx.rest('insertions', 'list', session=eid)
        labels = [ins['name'] for ins in insertions]
        try:
            tracing = [
                ins.get('json', {
                    'temp': 0
                }).get('extended_qc', {
                    'temp': 0
                }).get('tracing_exists', False) for ins in insertions
            ]
            resolved = [
                ins.get('json', {
                    'temp': 0
                }).get('extended_qc', {
                    'temp': 0
                }).get('alignment_resolved', False) for ins in insertions
            ]
            counts = [
                ins.get('json', {
                    'temp': 0
                }).get('extended_qc', {
                    'temp': 0
                }).get('alignment_count', 0) for ins in insertions
            ]
        except Exception:
            tracing = [False for ins in insertions]
            resolved = [False for ins in insertions]
            counts = [0 for ins in insertions]

        probe_id = [ins['id'] for ins in insertions]

    channels = Bunch({})
    r = BrainRegions()
    for label, trace, resol, count, id in zip(labels, tracing, resolved,
                                              counts, probe_id):
        if trace:
            if resol:
                logger.info(
                    f'Channel locations for {label} have been resolved. '
                    f'Channel and cluster locations obtained from ephys aligned histology '
                    f'track.')
                # download the data
                chans = one.load_object(eid,
                                        'channels',
                                        collection=f'alf/{label}')

                # If we have successfully downloaded the data
                if 'brainLocationIds_ccf_2017' in chans.keys():

                    channels[label] = Bunch({
                        'atlas_id':
                        chans['brainLocationIds_ccf_2017'],
                        'acronym':
                        r.get(chans['brainLocationIds_ccf_2017'])['acronym'],
                        'x':
                        chans['mlapdv'][:, 0] / 1e6,
                        'y':
                        chans['mlapdv'][:, 1] / 1e6,
                        'z':
                        chans['mlapdv'][:, 2] / 1e6,
                        'axial_um':
                        chans['localCoordinates'][:, 1],
                        'lateral_um':
                        chans['localCoordinates'][:, 0]
                    })
                # Otherwise we just get the channels from alyx. Shouldn't happen often, only if
                # data is still inbetween ftp and flatiron after being resolved
                else:
                    traj_id = one.alyx.rest(
                        'trajectories',
                        'list',
                        session=eid,
                        probe=label,
                        provenance='Ephys aligned histology track')[0]['id']
                    chans = one.alyx.rest('channels',
                                          'list',
                                          trajectory_estimate=traj_id)

                    channels[label] = Bunch({
                        'atlas_id':
                        np.array([ch['brain_region'] for ch in chans]),
                        'x':
                        np.array([ch['x'] for ch in chans]) / 1e6,
                        'y':
                        np.array([ch['y'] for ch in chans]) / 1e6,
                        'z':
                        np.array([ch['z'] for ch in chans]) / 1e6,
                        'axial_um':
                        np.array([ch['axial'] for ch in chans]),
                        'lateral_um':
                        np.array([ch['lateral'] for ch in chans])
                    })
                    channels[label]['acronym'] = r.get(
                        channels[label]['atlas_id'])['acronym']

            elif count > 0 and aligned:
                logger.info(
                    f'Channel locations for {label} have not been '
                    f'resolved. However, alignment flag set to True so channel and cluster'
                    f' locations will be obtained from latest available ephys aligned '
                    f'histology track.')
                # get the latest user aligned channels
                traj_id = one.alyx.rest(
                    'trajectories',
                    'list',
                    session=eid,
                    probe=label,
                    provenance='Ephys aligned histology track')[0]['id']
                chans = one.alyx.rest('channels',
                                      'list',
                                      trajectory_estimate=traj_id)

                channels[label] = Bunch({
                    'atlas_id':
                    np.array([ch['brain_region'] for ch in chans]),
                    'x':
                    np.array([ch['x'] for ch in chans]) / 1e6,
                    'y':
                    np.array([ch['y'] for ch in chans]) / 1e6,
                    'z':
                    np.array([ch['z'] for ch in chans]) / 1e6,
                    'axial_um':
                    np.array([ch['axial'] for ch in chans]),
                    'lateral_um':
                    np.array([ch['lateral'] for ch in chans])
                })
                channels[label]['acronym'] = r.get(
                    channels[label]['atlas_id'])['acronym']
            else:
                logger.info(
                    f'Channel locations for {label} have not been resolved. '
                    f'Channel and cluster locations obtained from histology track.'
                )
                # get the channels from histology tracing
                traj_id = one.alyx.rest('trajectories',
                                        'list',
                                        session=eid,
                                        probe=label,
                                        provenance='Histology track')[0]['id']
                chans = one.alyx.rest('channels',
                                      'list',
                                      trajectory_estimate=traj_id)

                channels[label] = Bunch({
                    'atlas_id':
                    np.array([ch['brain_region'] for ch in chans]),
                    'x':
                    np.array([ch['x'] for ch in chans]) / 1e6,
                    'y':
                    np.array([ch['y'] for ch in chans]) / 1e6,
                    'z':
                    np.array([ch['z'] for ch in chans]) / 1e6,
                    'axial_um':
                    np.array([ch['axial'] for ch in chans]),
                    'lateral_um':
                    np.array([ch['lateral'] for ch in chans])
                })
                channels[label]['acronym'] = r.get(
                    channels[label]['atlas_id'])['acronym']
        else:
            logger.warning(f'Histology tracing for {label} does not exist. '
                           f'No channels for {label}')

    return channels
Exemplo n.º 8
0
 def __init__(self,
              res_um=25,
              brainmap='Allen',
              scaling=np.array([1, 1, 1]),
              mock=False,
              hist_path=None):
     """
     :param res_um: 10, 25 or 50 um
     :param brainmap: defaults to 'Allen', see ibllib.atlas.BrainRegion for re-mappings
     :param scaling: scale factor along ml, ap, dv for squeeze and stretch ([1, 1, 1])
     :param mock: for testing purpose
     :param hist_path
     :return: atlas.BrainAtlas
     """
     par = params.read('one_params')
     FLAT_IRON_ATLAS_REL_PATH = Path('histology', 'ATLAS', 'Needles',
                                     'Allen')
     LUT_VERSION = "v01"  # version 01 is the lateralized version
     regions = BrainRegions()
     xyz2dims = np.array([1, 0, 2])  # this is the c-contiguous ordering
     dims2xyz = np.array([1, 0, 2])
     # we use Bregma as the origin
     self.res_um = res_um
     ibregma = (ALLEN_CCF_LANDMARKS_MLAPDV_UM['bregma'] / self.res_um)
     dxyz = self.res_um * 1e-6 * np.array([1, -1, -1]) * scaling
     if mock:
         image, label = [
             np.zeros((528, 456, 320), dtype=np.int16) for _ in range(2)
         ]
         label[:, :, 100:
               105] = 1327  # lookup index for retina, id 304325711 (no id 1327)
     else:
         path_atlas = Path(par.CACHE_DIR).joinpath(FLAT_IRON_ATLAS_REL_PATH)
         file_image = hist_path or path_atlas.joinpath(
             f'average_template_{res_um}.nrrd')
         # get the image volume
         if not file_image.exists():
             _download_atlas_flatiron(file_image, FLAT_IRON_ATLAS_REL_PATH,
                                      par)
         # get the remapped label volume
         file_label = path_atlas.joinpath(f'annotation_{res_um}.nrrd')
         if not file_label.exists():
             _download_atlas_flatiron(file_label, FLAT_IRON_ATLAS_REL_PATH,
                                      par)
         file_label_remap = path_atlas.joinpath(
             f'annotation_{res_um}_lut_{LUT_VERSION}.npz')
         if not file_label_remap.exists():
             label = self._read_volume(file_label)
             _logger.info("computing brain atlas annotations lookup table")
             # lateralize atlas: for this the regions of the left hemisphere have primary
             # keys opposite to to the normal ones
             lateral = np.zeros(label.shape[xyz2dims[0]])
             lateral[int(np.floor(ibregma[0]))] = 1
             lateral = np.sign(
                 np.cumsum(lateral)[np.newaxis, :, np.newaxis] - 0.5)
             label = label * lateral
             _, im = ismember(label, regions.id)
             label = np.reshape(im.astype(np.uint16), label.shape)
             _logger.info(f"saving {file_label_remap} ...")
             np.savez_compressed(file_label_remap, label)
         # loads the files
         label = self._read_volume(file_label_remap)
         image = self._read_volume(file_image)
     super().__init__(image,
                      label,
                      dxyz,
                      regions,
                      ibregma,
                      dims2xyz=dims2xyz,
                      xyz2dims=xyz2dims)
Exemplo n.º 9
0
def plot_brain_regions(channel_ids,
                       channel_depths=None,
                       brain_regions=None,
                       display=True,
                       ax=None,
                       title=None):
    """
    Plot brain regions along probe, if channel depths is provided will plot along depth otherwise along channel idx
    :param channel_ids: atlas ids for each channel
    :param channel_depths: depth along probe for each channel
    :param brain_regions: BrainRegions object
    :param display: whether to output plot
    :param ax: axis to plot on
    :param title: title for plot
    :return:
    """

    if channel_depths is not None:
        assert channel_ids.shape[0] == channel_depths.shape[0]
    else:
        channel_depths = np.arange(channel_ids.shape[0])

    br = brain_regions or BrainRegions()

    region_info = br.get(channel_ids)
    boundaries = np.where(np.diff(region_info.id) != 0)[0]
    boundaries = np.r_[0, boundaries, region_info.id.shape[0] - 1]

    regions = np.c_[boundaries[0:-1], boundaries[1:]]
    if channel_depths is not None:
        regions = channel_depths[regions]
    region_labels = np.c_[np.mean(regions, axis=1),
                          region_info.acronym[boundaries[1:]]]
    region_colours = region_info.rgb[boundaries[1:]]

    if display:
        if ax is None:
            fig, ax = plt.subplots()
        else:
            fig = ax.get_figure()

        for reg, col in zip(regions, region_colours):
            height = np.abs(reg[1] - reg[0])
            color = col / 255
            ax.bar(x=0.5,
                   height=height,
                   width=1,
                   color=color,
                   bottom=reg[0],
                   edgecolor='w')
        ax.set_yticks(region_labels[:, 0].astype(int))
        ax.yaxis.set_tick_params(labelsize=8)
        ax.set_ylim(np.nanmin(channel_depths), np.nanmax(channel_depths))
        ax.get_xaxis().set_visible(False)
        ax.set_yticklabels(region_labels[:, 1])
        ax.spines['right'].set_visible(False)
        ax.spines['top'].set_visible(False)
        ax.spines['bottom'].set_visible(False)
        if title:
            ax.set_title(title)

        return fig, ax
    else:
        return regions, region_labels, region_colours
Exemplo n.º 10
0
    def __init__(self,
                 res_um=25,
                 scaling=np.array([1, 1, 1]),
                 mock=False,
                 hist_path=None):
        """
        :param res_um: 10, 25 or 50 um
        :param scaling: scale factor along ml, ap, dv for squeeze and stretch ([1, 1, 1])
        :param mock: for testing purpose
        :param hist_path
        :return: atlas.BrainAtlas
        """

        par = one.params.get(silent=True)
        FLAT_IRON_ATLAS_REL_PATH = PurePosixPath('histology', 'ATLAS',
                                                 'Needles', 'Allen')
        LUT_VERSION = "v01"  # version 01 is the lateralized version
        regions = BrainRegions()
        xyz2dims = np.array([1, 0, 2])  # this is the c-contiguous ordering
        dims2xyz = np.array([1, 0, 2])
        # we use Bregma as the origin
        self.res_um = res_um
        ibregma = (ALLEN_CCF_LANDMARKS_MLAPDV_UM['bregma'] / self.res_um)
        dxyz = self.res_um * 1e-6 * np.array([1, -1, -1]) * scaling
        if mock:
            image, label = [
                np.zeros((528, 456, 320), dtype=np.int16) for _ in range(2)
            ]
            label[:, :, 100:
                  105] = 1327  # lookup index for retina, id 304325711 (no id 1327)
        else:
            path_atlas = Path(par.CACHE_DIR).joinpath(FLAT_IRON_ATLAS_REL_PATH)
            file_image = hist_path or path_atlas.joinpath(
                f'average_template_{res_um}.nrrd')
            # get the image volume
            if not file_image.exists():
                _download_atlas_allen(file_image, FLAT_IRON_ATLAS_REL_PATH,
                                      par)
            # get the remapped label volume
            file_label = path_atlas.joinpath(f'annotation_{res_um}.nrrd')
            if not file_label.exists():
                _download_atlas_allen(file_label, FLAT_IRON_ATLAS_REL_PATH,
                                      par)
            file_label_remap = path_atlas.joinpath(
                f'annotation_{res_um}_lut_{LUT_VERSION}.npz')
            if not file_label_remap.exists():
                label = self._read_volume(file_label).astype(dtype=np.int32)
                _logger.info("computing brain atlas annotations lookup table")
                # lateralize atlas: for this the regions of the left hemisphere have primary
                # keys opposite to to the normal ones
                lateral = np.zeros(label.shape[xyz2dims[0]])
                lateral[int(np.floor(ibregma[0]))] = 1
                lateral = np.sign(
                    np.cumsum(lateral)[np.newaxis, :, np.newaxis] - 0.5)
                label = label * lateral.astype(np.int32)
                # the 10 um atlas is too big to fit in memory so work by chunks instead
                if res_um == 10:
                    first, ncols = (0, 10)
                    while True:
                        last = np.minimum(first + ncols, label.shape[-1])
                        _logger.info(
                            f"Computing... {last} on {label.shape[-1]}")
                        _, im = ismember(label[:, :, first:last], regions.id)
                        label[:, :, first:last] = np.reshape(
                            im, label[:, :, first:last].shape)
                        if last == label.shape[-1]:
                            break
                        first += ncols
                    label = label.astype(dtype=np.uint16)
                    _logger.info("Saving npz, this can take a long time")
                else:
                    _, im = ismember(label, regions.id)
                    label = np.reshape(im.astype(np.uint16), label.shape)
                np.savez_compressed(file_label_remap, label)
                _logger.info(f"Cached remapping file {file_label_remap} ...")
            # loads the files
            label = self._read_volume(file_label_remap)
            image = self._read_volume(file_image)

        super().__init__(image,
                         label,
                         dxyz,
                         regions,
                         ibregma,
                         dims2xyz=dims2xyz,
                         xyz2dims=xyz2dims)
Exemplo n.º 11
0
 def setUpClass(self):
     self.brs = BrainRegions()
Exemplo n.º 12
0
    def upload_channels(self, alignment_key, upload_alyx, upload_flatiron):
        """
        Upload channels to alyx and flatiron based on the alignment specified by the alignment key
        """

        feature = np.array(self.alignments[alignment_key][0])
        track = np.array(self.alignments[alignment_key][1])
        ephysalign = EphysAlignment(self.xyz_picks, self.depths,
                                    track_prev=track,
                                    feature_prev=feature,
                                    brain_atlas=self.brain_atlas)

        # Find the channels
        channels_mlapdv = np.int32(ephysalign.get_channel_locations(feature, track) * 1e6)
        channels_brainID = ephysalign.get_brain_locations(channels_mlapdv / 1e6)['id']

        # Find the clusters
        r = BrainRegions()
        clusters_mlapdv = channels_mlapdv[self.cluster_chns]
        clusters_brainID = channels_brainID[self.cluster_chns]
        clusters_brainAcro = r.get(ids=clusters_brainID).acronym

        # upload datasets to flatiron
        files_to_register = []
        if upload_flatiron:
            ftp_patcher = FTPPatcher(one=self.one)
            insertion = self.one.alyx.rest('insertions', 'read', id=self.eid)
            alf_path = self.one.path_from_eid(insertion['session']).joinpath('alf',
                                                                             insertion['name'])
            alf_path.mkdir(exist_ok=True, parents=True)

            # Make the channels.mlapdv dataset
            f_name = alf_path.joinpath('channels.mlapdv.npy')
            np.save(f_name, channels_mlapdv)
            files_to_register.append(f_name)

            # Make the channels.brainLocationIds dataset
            f_name = alf_path.joinpath('channels.brainLocationIds_ccf_2017.npy')
            np.save(f_name, channels_brainID)
            files_to_register.append(f_name)

            # Make the clusters.mlapdv dataset
            f_name = alf_path.joinpath('clusters.mlapdv.npy')
            np.save(f_name, clusters_mlapdv)
            files_to_register.append(f_name)

            # Make the clusters.brainLocationIds dataset
            f_name = alf_path.joinpath('clusters.brainLocationIds_ccf_2017.npy')
            np.save(f_name, clusters_brainID)
            files_to_register.append(f_name)

            # Make the clusters.brainLocationAcronym dataset
            f_name = alf_path.joinpath('clusters.brainLocationAcronyms_ccf_2017.npy')
            np.save(f_name, clusters_brainAcro)
            files_to_register.append(f_name)

            self.log.info("Writing datasets to FlatIron")
            ftp_patcher.create_dataset(path=files_to_register, created_by=self.one._par.ALYX_LOGIN)

        # Need to change channels stored on alyx as well as the stored key is not the same as the
        # latest key
        if upload_alyx:
            if alignment_key != self.align_keys_sorted[0]:
                histology.register_aligned_track(self.eid, channels_mlapdv / 1e6,
                                                 chn_coords=SITES_COORDINATES, one=self.one,
                                                 overwrite=True, channels=self.channels)

                ephys_traj = self.one.alyx.rest('trajectories', 'list', probe_insertion=self.eid,
                                                provenance='Ephys aligned histology track')
                patch_dict = {'json': self.alignments}
                self.one.alyx.rest('trajectories', 'partial_update', id=ephys_traj[0]['id'],
                                   data=patch_dict)

        return files_to_register
Exemplo n.º 13
0
class TestBrainRegions(unittest.TestCase):
    @classmethod
    def setUpClass(self):
        self.brs = BrainRegions()

    def test_rgba(self):
        assert self.brs.rgba.shape == (self.brs.rgb.shape[0], 4)

    def test_get(self):
        ctx = self.brs.get(688)
        self.assertTrue(len(ctx.acronym) == 1 and ctx.acronym == 'CTX')

    def test_ancestors_descendants(self):
        # here we use the same brain region as in the alyx test
        self.assertTrue(self.brs.descendants(ids=688).id.size == 567)
        self.assertTrue(self.brs.ancestors(ids=688).id.size == 4)
        # the leaves have no descendants but themselves
        leaves = self.brs.leaves()
        d = self.brs.descendants(ids=leaves['id'])
        self.assertTrue(np.all(np.sort(leaves['id']) == np.sort(d['id'])))

    def test_ancestors_descendants_indices(self):
        br = self.brs
        tpath = np.array([997, 8, 567, 688, 695, 315, 453, 12993])
        # /997/8/567/688/695/315/453/12993/
        # check ancestors
        ancs = br.ancestors(12993)
        assert np.all(ancs.id == tpath)
        # check ancestors with indices
        ancs, inds = br.ancestors(12993, return_indices=True)
        assert np.all(ancs.id == tpath)
        # check descendants with indices
        desdc, inds = br.descendants(12993, return_indices=True)
        assert (inds == np.where(br.id == 12993))
        # check full subtree
        chemin = br.subtree(453)
        assert np.all(
            np.sort(chemin.id) == np.unique(np.r_[br.descendants(453).id,
                                                  br.ancestors(453).id]))

    def test_mappings_lateralized(self):
        # the mapping assigns all non found regions to root (1:997), except for the void (0:0)
        # here we're looking at the retina (1327:304325711), so we expect 1327 at index 1327
        inds = self.brs._mapping_from_regions_list(np.array([304325711]),
                                                   lateralize=True)
        inds_ = np.zeros_like(self.brs.id) + 1
        inds_[int((inds.size - 1) / 2)] = 1327
        inds_[-1] = 1327 * 2
        inds_[0] = 0
        assert np.all(inds == inds_)

    def test_mappings_not_lateralized(self):
        # if it's not lateralize, retina for both eyes should be in the
        inds = self.brs._mapping_from_regions_list(np.array([304325711]),
                                                   lateralize=False)
        inds_ = np.zeros_like(self.brs.id) + 1
        inds_[int((inds.size - 1) / 2)] = 1327
        inds_[-1] = 1327
        inds_[0] = 0
        assert np.all(inds == inds_)

    def test_remap(self):
        # Test mapping atlas ids from one map to another
        atlas_id = np.array([463, 685])  # CA3 and PO
        cosmos_id = self.brs.remap(atlas_id,
                                   source_map='Allen',
                                   target_map='Cosmos')
        expected_cosmos_id = [1089, 549]  # HPF and TH
        assert np.all(cosmos_id == expected_cosmos_id)

    def test_id2id(self):
        # Test remapping of atlas id to atlas id
        atlas_id = np.array([463, 685])
        # Allen mapping, positive ids -> positive ids
        allen_id = self.brs.id2id(atlas_id, mapping='Allen')
        assert np.all(allen_id == atlas_id)
        # Allen mapping, negative ids -> positive ids
        allen_id = self.brs.id2id(-1 * atlas_id, mapping='Allen')
        assert np.all(allen_id == atlas_id)
        # Allen-lr mapping, positive ids -> positive ids
        allen_id = self.brs.id2id(atlas_id, mapping='Allen-lr')
        assert np.all(allen_id == atlas_id)
        # Allen-lr mapping, negative ids -> negative ids
        allen_id = self.brs.id2id(-1 * atlas_id, mapping='Allen-lr')
        assert np.all(allen_id == -1 * atlas_id)

        expected_cosmos_id = np.array([1089, 549])  # HPF and TH
        # Cosmos mapping, negative ids -> positive ids
        cosmos_id = self.brs.id2id(-1 * atlas_id, mapping='Cosmos')
        assert np.all(cosmos_id == expected_cosmos_id)
        # Cosmos-lr mapping, negative ids -> negative ids
        cosmos_id = self.brs.id2id(-1 * atlas_id, mapping='Cosmos-lr')
        assert np.all(cosmos_id == -1 * expected_cosmos_id)

    def test_id2acro(self):
        atlas_id = np.array([463, 685])  # CA3 and VM
        expected_allen_acronym = np.array(['CA3', 'VM'])
        # Allen mapping, positive ids,
        allen_acronym = self.brs.id2acronym(atlas_id, mapping='Allen')
        assert np.all(allen_acronym == expected_allen_acronym)
        # Allen-lr mapping, negative ids
        allen_acronym = self.brs.id2acronym(-1 * atlas_id, mapping='Allen-lr')
        assert np.all(allen_acronym == expected_allen_acronym)

        expected_cosmos_acronym = np.array(['HPF', 'TH'])
        cosmos_acronym = self.brs.id2acronym(atlas_id, mapping='Cosmos')
        assert np.all(cosmos_acronym == expected_cosmos_acronym)

    def test_id2index(self):
        atlas_id = np.array([463, 685])

        # Allen mapping, positive ids -> returns index on both side
        allen_id, index_both = self.brs.id2index(atlas_id, mapping='Allen')
        assert np.all(allen_id == atlas_id)
        for exp, ind in zip(atlas_id, index_both):
            assert np.all(ind == np.where(
                self.brs.id[self.brs.mappings['Allen']] == exp)[0])

        # Allen mapping, negative ids -> returns index on both side
        allen_id, index_both = self.brs.id2index(-1 * atlas_id,
                                                 mapping='Allen')
        assert np.all(allen_id == atlas_id)
        for exp, ind in zip(atlas_id, index_both):
            assert np.all(ind == np.where(
                self.brs.id[self.brs.mappings['Allen']] == exp)[0])

        # Allen-lr mapping, positive ids ->  returns index on right side
        allen_id, index = self.brs.id2index(atlas_id, mapping='Allen-lr')
        assert np.all(allen_id == atlas_id)
        for i, (exp, ind) in enumerate(zip(atlas_id, index)):
            assert np.all(ind == index_both[i][index_both[i] <= self.brs.n_lr])

        # Allen-lr mapping, negative ids ->  returns index on left side
        allen_id, index = self.brs.id2index(-1 * atlas_id, mapping='Allen-lr')
        assert np.all(allen_id == -1 * atlas_id)
        for i, (exp, ind) in enumerate(zip(atlas_id, index)):
            assert np.all(ind == index_both[i][index_both[i] > self.brs.n_lr])

        # Cosmos mapping, positive ids ->  returns index on both sides
        expected_cosmos_id = [1089, 549]  # HPF and TH
        cosmos_id, index_both = self.brs.id2index(atlas_id, mapping='Cosmos')
        assert np.all(cosmos_id == expected_cosmos_id)
        for exp, ind in zip(expected_cosmos_id, index_both):
            assert np.all(ind == np.where(
                self.brs.id[self.brs.mappings['Cosmos']] == exp)[0])

    def test_acro2acro(self):
        acronym = np.array(['CA3', 'VM'])
        # Allen mapping
        allen_acronym = self.brs.acronym2acronym(acronym, mapping='Allen')
        assert np.all(acronym == allen_acronym)

        expected_cosmos_acronym = np.array(['HPF', 'TH'])
        # Cosmos mapping
        cosmos_acronym = self.brs.acronym2acronym(acronym, mapping='Cosmos-lr')
        assert np.all(cosmos_acronym == expected_cosmos_acronym)

    def test_acro2id(self):
        acronym = np.array(['CA3', 'VM'])
        expected_allen_id = np.array([463, 685])
        # Allen mapping, both hemisphere -> positive ids
        allen_id = self.brs.acronym2id(acronym,
                                       mapping='Allen',
                                       hemisphere=None)
        assert np.all(allen_id == expected_allen_id)
        # Allen mapping, left hemisphere -> positive ids
        allen_id = self.brs.acronym2id(acronym,
                                       mapping='Allen',
                                       hemisphere='left')
        assert np.all(allen_id == expected_allen_id)
        # Allen mapping, right hemisphere -> positive ids
        allen_id = self.brs.acronym2id(acronym,
                                       mapping='Allen',
                                       hemisphere='right')
        assert np.all(allen_id == expected_allen_id)

        # Allen-lr mapping, both hemisphere -> negative and positive ids
        allen_id = self.brs.acronym2id(acronym,
                                       mapping='Allen-lr',
                                       hemisphere=None)
        assert np.all(allen_id.ravel() == np.c_[-1 * expected_allen_id,
                                                expected_allen_id].ravel())
        # Allen-lr mapping, left hemisphere -> negative ids
        allen_id = self.brs.acronym2id(acronym,
                                       mapping='Allen-lr',
                                       hemisphere='left')
        assert np.all(allen_id == -1 * expected_allen_id)
        # Allen-lr mapping, right hemisphere -> positive ids
        allen_id = self.brs.acronym2id(acronym,
                                       mapping='Allen-lr',
                                       hemisphere='right')
        assert np.all(allen_id == expected_allen_id)

        expected_cosmos_id = np.array([1089, 549])
        # Cosmos-lr mapping, left hemisphere -> negative ids
        cosmos_id = self.brs.acronym2id(acronym,
                                        mapping='Cosmos-lr',
                                        hemisphere='left')
        assert np.all(cosmos_id == -1 * expected_cosmos_id)
        # Cosmos mapping, left hemisphere -> positive ids
        cosmos_id = self.brs.acronym2id(acronym,
                                        mapping='Cosmos',
                                        hemisphere='left')
        assert np.all(cosmos_id == expected_cosmos_id)

    def test_acro2index(self):
        acronym = np.array(['CA3', 'VM'])
        # Expect it to be same regardless of lateralised or non lateralised mapping
        for map, expected_acronym in zip(
            ['Allen', 'Allen-lr', 'Cosmos', 'Cosmos-lr'], [
                np.array(['CA3', 'VM']),
                np.array(['CA3', 'VM']),
                np.array(['HPF', 'TH']),
                np.array(['HPF', 'TH'])
            ]):
            # Mapping, both hemisphere, returns index on both sides
            map_acronym, index_both = self.brs.acronym2index(acronym,
                                                             mapping=map,
                                                             hemisphere=None)
            assert np.all(map_acronym == expected_acronym)
            for exp, ind in zip(expected_acronym, index_both):
                assert np.all(ind == np.where(
                    self.brs.acronym[self.brs.mappings[map]] == exp)[0])

            # Mapping, left hemisphere, returns index that are > 1327
            map_acronym, index = self.brs.acronym2index(acronym,
                                                        mapping=map,
                                                        hemisphere='left')
            assert np.all(map_acronym == expected_acronym)
            for i, (exp, ind) in enumerate(zip(expected_acronym, index)):
                assert np.all(
                    ind == index_both[i][index_both[i] > self.brs.n_lr])

            # Mapping, right hemisphere, returns index that are < 1327
            map_acronym, index = self.brs.acronym2index(acronym,
                                                        mapping=map,
                                                        hemisphere='right')
            assert np.all(map_acronym == expected_acronym)
            for i, (exp, ind) in enumerate(zip(expected_acronym, index)):
                assert np.all(
                    ind == index_both[i][index_both[i] <= self.brs.n_lr])

    def test_index2id(self):
        index = np.array([468, 646, 1973])
        # Allen mapping
        allen_id = np.array([463, 685, 685])
        assert np.all(self.brs.index2id(index, mapping='Allen') == allen_id)
        # Allen-lr mapping
        allen_id = np.array([463, 685, -685])
        assert np.all(self.brs.index2id(index, mapping='Allen-lr') == allen_id)
        # Cosmos-lr mapping
        cosmos_id = np.array([1089, 549, -549])
        assert np.all(
            self.brs.index2id(index, mapping='Cosmos-lr') == cosmos_id)

    def test_index2acronym(self):
        index = np.array([468, 646, 1973])
        # Allen mapping
        allen_acronym = np.array(['CA3', 'VM', 'VM'])
        assert np.all(
            self.brs.index2acronym(index, mapping='Allen') == allen_acronym)
        # Allen-lr mapping
        allen_acronym = np.array(['CA3', 'VM', 'VM'])
        assert np.all(
            self.brs.index2acronym(index, mapping='Allen-lr') == allen_acronym)
        # Cosmos-lr mapping
        cosmos_acronym = np.array(['HPF', 'TH', 'TH'])
        assert np.all(
            self.brs.index2acronym(index, mapping='Cosmos-lr') ==
            cosmos_acronym)

    def test_prepare_lr_data(self):
        acronyms_lh = np.array(['VPM', 'VPL', 'PO'])
        values_lh = np.array([0, 1, 2])
        acronyms_rh = np.array(['VPL', 'PO', 'CA1'])
        values_rh = np.array([3, 4, 5])
        acronyms, values = prepare_lr_data(acronyms_lh, values_lh, acronyms_rh,
                                           values_rh)

        assert np.array_equal(np.unique(np.r_[acronyms_lh, acronyms_rh]),
                              acronyms)
        assert np.array_equal(values[acronyms == 'VPL'][0], np.array([1, 3]))
        np.testing.assert_equal(values[acronyms == 'VPM'][0],
                                np.array([0, np.nan]))
        np.testing.assert_equal(values[acronyms == 'CA1'][0],
                                np.array([np.nan, 5]))

    def test_reorder_data(self):
        acronyms = np.array(['AUDp1', 'AUDpo1', 'AUDv1', 'SSp-m1', 'SSp-n1'])
        values = np.array([0, 1, 2, 3, 4])
        _, idx = ismember(acronyms, self.brs.acronym)
        expected_acronyms = acronyms[np.argsort(self.brs.order[idx])]
        expected_values = values[np.argsort(self.brs.order[idx])]
        values = np.array([0, 1, 2, 3, 4])
        acronnyms_ordered, values_ordered = reorder_data(acronyms, values)
        assert np.array_equal(acronnyms_ordered, expected_acronyms)
        assert np.array_equal(values_ordered, expected_values)

    def test_argument_parser(self):
        acronyms = ['AUDp1', 'AUDpo1', 'AUDv1', 'SSp-m1', 'SSp-n1']
        ids = self.brs.acronym2id(acronyms)
        assert np.all(self.brs.parse_acronyms_argument(acronyms) == ids)
        assert np.all(
            self.brs.parse_acronyms_argument(np.array(acronyms)) == ids)
        assert np.all(self.brs.parse_acronyms_argument(ids) == ids)
        assert np.all(self.brs.parse_acronyms_argument(list(ids)) == ids)
        # makes sure it handles exception
        with self.assertRaises(AssertionError):
            self.brs.parse_acronyms_argument(acronyms + ['toto'])
        assert np.all(
            self.brs.parse_acronyms_argument(acronyms +
                                             ['toto'], mode='clip') == ids)