def get_morphology_features(self, dataframe=False, file_name=None):
        """
        Download morphology features for all cells with reconstructions in the database.

        Parameters
        ----------

        file_name: string
            File name to save/read the ephys features metadata as CSV.
            If file_name is None, the file_name will be pulled out of the
            manifest.  If caching is disabled, no file will be saved.
            Default is None.

        dataframe: boolean
            Return the output as a Pandas DataFrame.  If False, return
            a list of dictionaries.
        """

        file_name = self.get_cache_path(
            file_name, self.MORPHOLOGY_FEATURES_KEY)

        if self.cache:
            if dataframe:
                warnings.warn("dataframe argument is deprecated.")
                args = Cache.cache_csv_dataframe()
            else:
                args = Cache.cache_csv_json()
        else:
            args = Cache.nocache_json()

        args['strategy'] = 'lazy'
        args['path'] = file_name

        return self.api.get_morphology_features(**args)
def test_file_download_cached_file(nrrd_read, safe_mkdir, mca, cache, file_exists):
    with patch.object(mca, "retrieve_file_over_http") as mock_retrieve:
        @cacheable(reader=nrrd_read,
                pathfinder=Cache.pathfinder(file_name_position=3,
                                            secondary_file_name_position=1))
        def download_volumetric_data(data_path,
                                    file_name,
                                    voxel_resolution=None,
                                    save_file_path=None,
                                    release=None,
                                    coordinate_framework=None):
            url = mca.build_volumetric_data_download_url(data_path,
                                                        file_name,
                                                        voxel_resolution,
                                                        release,
                                                        coordinate_framework)

            mca.retrieve_file_over_http(url, save_file_path)

        with patch('os.path.exists',
                Mock(name="os.path.exists",
                        return_value=file_exists)) as mkdir:
            nrrd_read.reset_mock()

            download_volumetric_data(MCA.AVERAGE_TEMPLATE,
                                    'annotation_10.nrrd',
                                    MCA.VOXEL_RESOLUTION_10_MICRONS,
                                    'volumetric.nrrd',
                                    MCA.CCF_2016,
                                    strategy='file')

        assert not mock_retrieve.called, 'server should not have been called'
        assert not safe_mkdir.called, 'safe_mkdir should not have been called.'
        nrrd_read.assert_called_once_with('volumetric.nrrd')
def test_cacheable_csv_json(mkdir, dictwriter, ju_read_url_get, ju_read,
                            ju_write, read_csv, mock_read_json):
    @cacheable()
    def get_hemispheres():
        return RmaApi().model_query(model='Hemisphere')

    with patch(builtins.__name__ + '.open',
               mock_open(),
               create=True) as open_mock:
        open_mock.return_value.write = MagicMock()
        df = get_hemispheres(path='/xyz/example.csv',
                             strategy='create',
                             **Cache.cache_csv_json())

    assert 'whatever' in df[0]

    ju_read_url_get.assert_called_once_with(
        'http://api.brain-map.org/api/v2/data/query.json?q=model::Hemisphere')
    read_csv.assert_called_once_with('/xyz/example.csv', parse_dates=True)
    dictwriter.return_value.writerow.assert_called()
    assert not mock_read_json.called, 'pj.read_json should not have been called'
    assert not ju_write.called, 'ju.write should not have been called'
    assert not ju_read.called, 'json read should not have been called'
    mkdir.assert_called_once_with('/xyz')
    open_mock.assert_called_once_with('/xyz/example.csv', 'w')
    def get_structure_tree(self, file_name=None, structure_graph_id=1):
        """
        Read the list of adult mouse structures and return an StructureTree 
        instance.

        Parameters
        ----------

        file_name: string
            File name to save/read the structures table.  If file_name is None,
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.
        structure_graph_id: int
            Build a tree using structure only from the identified structure graph.
        """
        
        file_name = self.get_cache_path(file_name, self.STRUCTURE_TREE_KEY)

        return OntologiesApi(self.api.api_url).get_structures_with_sets(
            strategy='lazy',
            path=file_name,
            pre=StructureTree.clean_structures,
            post=lambda x: StructureTree(StructureTree.clean_structures(x)), 
            structure_graph_ids=structure_graph_id,
            **Cache.cache_json())
    def _get_stimulus_mappings(self, file_name=None):
        """ Returns a mapping of which metrics are related to which stimuli. Internal use only. """

        file_name = self.get_cache_path(file_name, self.STIMULUS_MAPPINGS_KEY)

        mappings = self.api.get_stimulus_mappings(path=file_name,
                                                  strategy='lazy',
                                                  **Cache.cache_json())

        return mappings
    def get_cells(self, file_name=None,
                  require_morphology=False,
                  require_reconstruction=False,
                  reporter_status=None,
                  species=None,
                  simple=True):
        """
        Download metadata for all cells in the database and optionally return a
        subset filtered by whether or not they have a morphology or reconstruction.

        Parameters
        ----------

        file_name: string
            File name to save/read the cell metadata as JSON.  If file_name is None,
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.

        require_morphology: boolean
            Filter out cells that have no morphological images.

        require_reconstruction: boolean
            Filter out cells that have no morphological reconstructions.

        reporter_status: list
            Filter for cells that have one or more cell reporter statuses.

        species: list
            Filter for cells that belong to one or more species.  If None, return all.
            Must be one of [ CellTypesApi.MOUSE, CellTypesApi.HUMAN ].
        """

        file_name = self.get_cache_path(file_name, self.CELLS_KEY)

        cells = self.api.list_cells_api(path=file_name,
                                        strategy='lazy',
                                        **Cache.cache_json())

        if isinstance(reporter_status, string_types):
            reporter_status = [reporter_status]

        # filter the cells on the way out
        cells = self.api.filter_cells_api(cells,
                                          require_morphology,
                                          require_reconstruction,
                                          reporter_status,
                                          species,
                                          simple)


        return cells
Beispiel #7
0
def test_cacheable_no_cache_csv(mock_json_utilities, mock_dataframe):
    @cacheable()
    def get_hemispheres():
        return RmaApi().model_query(model='Hemisphere')

    df = get_hemispheres(path='/xyz/abc/example.csv',
                         strategy='file',
                         **Cache.cache_csv())

    assert df.loc[:, 'whatever'][0]

    assert not mock_json_utilities.read_url_get.called
    mock_dataframe.from_csv.assert_called_once_with('/xyz/abc/example.csv')
    assert not mock_json_utilities.write.called, 'json write should not have been called'
    assert not mock_json_utilities.read.called, 'json read should not have been called'
Beispiel #8
0
def test_cacheable_no_cache_csv(read_csv, ju_read_url_get, ju_read, ju_write):
    @cacheable()
    def get_hemispheres():
        return RmaApi().model_query(model='Hemisphere')

    df = get_hemispheres(path='/xyz/abc/example.csv',
                         strategy='file',
                         **Cache.cache_csv())

    assert df.loc[:, 'whatever'][0]

    assert not ju_read_url_get.called
    read_csv.assert_called_once_with('/xyz/abc/example.csv', parse_dates=True)
    assert not ju_write.called, 'json write should not have been called'
    assert not ju_read.called, 'json read should not have been called'
Beispiel #9
0
    def get_cells(self,
                  file_name=None,
                  require_morphology=False,
                  require_reconstruction=False,
                  reporter_status=None,
                  species=None,
                  simple=True):
        """
        Download metadata for all cells in the database and optionally return a
        subset filtered by whether or not they have a morphology or reconstruction.

        Parameters
        ----------

        file_name: string
            File name to save/read the cell metadata as JSON.  If file_name is None,
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.

        require_morphology: boolean
            Filter out cells that have no morphological images.

        require_reconstruction: boolean
            Filter out cells that have no morphological reconstructions.

        reporter_status: list
            Filter for cells that have one or more cell reporter statuses.

        species: list
            Filter for cells that belong to one or more species.  If None, return all.
            Must be one of [ CellTypesApi.MOUSE, CellTypesApi.HUMAN ].
        """

        file_name = self.get_cache_path(file_name, self.CELLS_KEY)

        cells = self.api.list_cells_api(path=file_name,
                                        strategy='lazy',
                                        **Cache.cache_json())

        if isinstance(reporter_status, string_types):
            reporter_status = [reporter_status]

        # filter the cells on the way out
        cells = self.api.filter_cells_api(cells, require_morphology,
                                          require_reconstruction,
                                          reporter_status, species, simple)

        return cells
Beispiel #10
0
def test_cacheable_no_save_dataframe(to_csv, from_csv, ju_read_url_get,
                                     ju_read, ju_write):
    @cacheable()
    def get_hemispheres():
        return RmaApi().model_query(model='Hemisphere')

    df = get_hemispheres(**Cache.nocache_dataframe())

    assert df.loc[:, 'whatever'][0]

    ju_read_url_get.assert_called_once_with(
        'http://api.brain-map.org/api/v2/data/query.json?q=model::Hemisphere')
    assert not to_csv.called, 'to_csv should not have been called'
    assert not from_csv.called, 'from_csv should not have been called'
    assert not ju_write.called, 'json write should not have been called'
    assert not ju_read.called, 'json read should not have been called'
Beispiel #11
0
def test_cacheable_lazy_csv_file_exists(mock_json_utilities, mock_dataframe):
    @cacheable()
    def get_hemispheres():
        return RmaApi().model_query(model='Hemisphere')

    with patch('os.path.exists', MagicMock(return_value=True)) as ope:
        df = get_hemispheres(path='/xyz/abc/example.csv',
                             strategy='lazy',
                             **Cache.cache_csv())

    assert df.loc[:, 'whatever'][0]

    assert not mock_json_utilities.read_url_get.called
    mock_dataframe.from_csv.assert_called_once_with('/xyz/abc/example.csv')
    assert not mock_json_utilities.write.called, 'json write should not have been called'
    assert not mock_json_utilities.read.called, 'json read should not have been called'
def test_cacheable_no_save_dataframe(to_csv, read_csv, ju_read_url_get,
                                     ju_read, ju_write):
    @cacheable()
    def get_hemispheres():
        return RmaApi().model_query(model='Hemisphere')

    df = get_hemispheres(**Cache.nocache_dataframe())

    assert df.loc[:, 'whatever'][0]

    ju_read_url_get.assert_called_once_with(
        'http://api.brain-map.org/api/v2/data/query.json?q=model::Hemisphere')
    assert not to_csv.called, 'to_csv should not have been called'
    assert not read_csv.called, 'read_csv should not have been called'
    assert not ju_write.called, 'json write should not have been called'
    assert not ju_read.called, 'json read should not have been called'
    def get_affine_parameters(self, section_data_set_id, direction='trv', file_name=None):
        ''' Extract the parameters of the 3D affine tranformation mapping this section data set's image-space stack to 
        CCF-space (or vice-versa).

        Parameters
        ----------
        section_data_set_id : int
            download the parameters for this data set.
        direction : str, optional
            Valid options are:
                trv : "transform from reference to volume". Maps CCF points to image space points. If you are 
                    resampling data into CCF, this is the direction you want.
                tvr : "transform from volume to reference". Maps image space points to CCF points.
        file_name : str
            If provided, store the downloaded file here.
 
        Returns
        -------
        alignment : numpy.ndarray
            4 X 3 matrix. In order to transform a point [X_1, X_2, X_3] run 
                np.dot([X_1, X_2, X_3, 1], alignment). In 
            to build a SimpleITK affine transform run:
                transform = sitk.AffineTransform(3)
                transform.SetParameters(alignment.flatten())

        '''

        if not direction in ('trv', 'tvr'):
            raise ArgumentError('invalid direction: {}. direction must be one of tvr, trv'.format(direction))

        file_name = self.get_cache_path(file_name, self.ALIGNMENT3D_KEY)

        raw_alignment = self.api.download_alignment3d(
            strategy='lazy',
            path=file_name,
            section_data_set_id=section_data_set_id,
            **Cache.cache_json())
    
        alignment_re = re.compile('{}_(?P<index>\d+)'.format(direction))
        alignment = np.zeros((4, 3), dtype=float)

        for entry, value in raw_alignment.items():
            match = alignment_re.match(entry)
            if match is not None:
                alignment.flat[int(match.group('index'))] = value
        
        return alignment
def test_excpt(mkdir, ju_read_url_get, ju_read, ju_write):
    @cacheable()
    def get_hemispheres_excpt():
        return RmaApi().model_query(model='Hemisphere',
                                    excpt=['symbol'])

    df = get_hemispheres_excpt(path='/xyz/abc/example.json',
                               strategy='create',
                               **Cache.cache_json())

    assert 'whatever' in df[0]

    ju_read_url_get.assert_called_once_with(
        'http://api.brain-map.org/api/v2/data/query.json?q=model::Hemisphere,rma::options%5Bexcept$eqsymbol%5D')
    ju_write.assert_called_once_with('/xyz/abc/example.json', _msg)
    ju_read.assert_called_once_with('/xyz/abc/example.json')
    mkdir.assert_called_once_with('/xyz/abc')
def test_cacheable_lazy_csv_file_exists(read_csv, ju_read_url_get, ju_read,
                                        ju_write):
    @cacheable()
    def get_hemispheres():
        return RmaApi().model_query(model='Hemisphere')

    with patch('os.path.exists', MagicMock(return_value=True)) as ope:
        df = get_hemispheres(path='/xyz/abc/example.csv',
                             strategy='lazy',
                             **Cache.cache_csv())

    assert df.loc[:, 'whatever'][0]

    assert not ju_read_url_get.called
    read_csv.assert_called_once_with('/xyz/abc/example.csv', parse_dates=True)
    assert not ju_write.called, 'json write should not have been called'
    assert not ju_read.called, 'json read should not have been called'
Beispiel #16
0
def test_excpt(mkdir, ju_read_url_get, ju_read, ju_write):
    @cacheable()
    def get_hemispheres_excpt():
        return RmaApi().model_query(model='Hemisphere',
                                    excpt=['symbol'])

    df = get_hemispheres_excpt(path='/xyz/abc/example.json',
                               strategy='create',
                               **Cache.cache_json())

    assert 'whatever' in df[0]

    ju_read_url_get.assert_called_once_with(
        'http://api.brain-map.org/api/v2/data/query.json?q=model::Hemisphere,rma::options%5Bexcept$eqsymbol%5D')
    ju_write.assert_called_once_with('/xyz/abc/example.json', _msg)
    ju_read.assert_called_once_with('/xyz/abc/example.json')
    mkdir.assert_called_once_with('/xyz/abc')
Beispiel #17
0
def test_cacheable_json(from_csv, mkdir, ju_read_url_get, ju_read, ju_write):
    @cacheable()
    def get_hemispheres():
        return RmaApi().model_query(model='Hemisphere')

    df = get_hemispheres(path='/xyz/abc/example.json',
                         strategy='create',
                         **Cache.cache_json())

    assert 'whatever' in df[0]

    ju_read_url_get.assert_called_once_with(
        'http://api.brain-map.org/api/v2/data/query.json?q=model::Hemisphere')
    assert not from_csv.called, 'from_csv should not have been called'
    ju_write.assert_called_once_with('/xyz/abc/example.json',
                                                      _msg)
    ju_read.assert_called_once_with('/xyz/abc/example.json')
def test_cacheable_json(read_csv, mkdir, ju_read_url_get, ju_read, ju_write):
    @cacheable()
    def get_hemispheres():
        return RmaApi().model_query(model='Hemisphere')

    df = get_hemispheres(path='/xyz/abc/example.json',
                         strategy='create',
                         **Cache.cache_json())

    assert 'whatever' in df[0]

    ju_read_url_get.assert_called_once_with(
        'http://api.brain-map.org/api/v2/data/query.json?q=model::Hemisphere')
    assert not read_csv.called, 'read_csv should not have been called'
    ju_write.assert_called_once_with('/xyz/abc/example.json',
                                                      _msg)
    ju_read.assert_called_once_with('/xyz/abc/example.json')
    def get_ephys_sweeps(self, specimen_id, file_name=None):
        """
        Download sweep metadata for a single cell specimen.

        Parameters
        ----------

        specimen_id: int
             ID of a cell.
        """

        file_name = self.get_cache_path(
            file_name, self.EPHYS_SWEEPS_KEY, specimen_id)

        sweeps = self.api.get_ephys_sweeps(specimen_id,
                                           strategy='lazy',
                                           path=file_name,
                                           **Cache.cache_json())

        return sweeps
Beispiel #20
0
    def get_ephys_sweeps(self, specimen_id, file_name=None):
        """
        Download sweep metadata for a single cell specimen.

        Parameters
        ----------

        specimen_id: int
             ID of a cell.
        """

        file_name = self.get_cache_path(file_name, self.EPHYS_SWEEPS_KEY,
                                        specimen_id)

        sweeps = self.api.get_ephys_sweeps(specimen_id,
                                           strategy='lazy',
                                           path=file_name,
                                           **Cache.cache_json())

        return sweeps
def test_cacheable_json_dataframe(mkdir, ju_read_url_get, ju_read, ju_write,
                                  read_csv, mock_read_json):
    @cacheable()
    def get_hemispheres():
        return RmaApi().model_query(model='Hemisphere')

    df = get_hemispheres(path='/xyz/abc/example.json',
                         strategy='create',
                         **Cache.cache_json_dataframe())

    assert df.loc[:, 'whatever'][0]

    ju_read_url_get.assert_called_once_with(
        'http://api.brain-map.org/api/v2/data/query.json?q=model::Hemisphere')
    assert not read_csv.called, 'read_csv should not have been called'
    mock_read_json.assert_called_once_with('/xyz/abc/example.json',
                                      orient='records')
    ju_write.assert_called_once_with('/xyz/abc/example.json', _msg)
    assert not ju_read.called, 'json read should not have been called'
    mkdir.assert_called_once_with('/xyz/abc')
Beispiel #22
0
def test_cacheable_json_dataframe(mkdir, ju_read_url_get, ju_read, ju_write,
                                  from_csv, mock_read_json):
    @cacheable()
    def get_hemispheres():
        return RmaApi().model_query(model='Hemisphere')

    df = get_hemispheres(path='/xyz/abc/example.json',
                         strategy='create',
                         **Cache.cache_json_dataframe())

    assert df.loc[:, 'whatever'][0]

    ju_read_url_get.assert_called_once_with(
        'http://api.brain-map.org/api/v2/data/query.json?q=model::Hemisphere')
    assert not from_csv.called, 'from_csv should not have been called'
    mock_read_json.assert_called_once_with('/xyz/abc/example.json',
                                      orient='records')
    ju_write.assert_called_once_with('/xyz/abc/example.json', _msg)
    assert not ju_read.called, 'json read should not have been called'
    mkdir.assert_called_once_with('/xyz/abc')
def test_file_download_lazy(nrrd_read, safe_mkdir, mca, cache, file_exists):
    with patch.object(mca, "retrieve_file_over_http") as mock_retrieve:
        @cacheable(strategy='lazy',
                reader=nrrd_read,
                pathfinder=Cache.pathfinder(file_name_position=3,
                                            secondary_file_name_position=1))
        def download_volumetric_data(data_path,
                                    file_name,
                                    voxel_resolution=None,
                                    save_file_path=None,
                                    release=None,
                                    coordinate_framework=None):
            url = mca.build_volumetric_data_download_url(data_path,
                                                        file_name,
                                                        voxel_resolution,
                                                        release,
                                                        coordinate_framework)

            mca.retrieve_file_over_http(url, save_file_path)

        with patch('os.path.exists',
                Mock(name="os.path.exists",
                        return_value=file_exists)) as mkdir:
            nrrd_read.reset_mock()
            download_volumetric_data(MCA.AVERAGE_TEMPLATE,
                                    'annotation_10.nrrd',
                                    MCA.VOXEL_RESOLUTION_10_MICRONS,
                                    'volumetric.nrrd',
                                    MCA.CCF_2016,
                                    strategy='lazy')

        if file_exists:
            assert not mock_retrieve.called, 'server call not needed when file exists'
        else:
            mock_retrieve.assert_called_once_with(
                'http://download.alleninstitute.org/informatics-archive/annotation/ccf_2016/mouse_ccf/average_template/annotation_10.nrrd',
                'volumetric.nrrd')
        assert not safe_mkdir.called, 'safe_mkdir should not have been called.'
        nrrd_read.assert_called_once_with('volumetric.nrrd')
Beispiel #24
0
def test_cacheable_lazy_csv_no_file(mkdir, dictwriter, mock_json_utilities,
                                    mock_dataframe):
    @cacheable()
    def get_hemispheres():
        return RmaApi().model_query(model='Hemisphere')

    with patch('os.path.exists', MagicMock(return_value=False)) as ope:
        with patch(builtins.__name__ + '.open', mock_open(),
                   create=True) as open_mock:
            open_mock.return_value.write = MagicMock()
            df = get_hemispheres(path='/xyz/abc/example.csv',
                                 strategy='lazy',
                                 **Cache.cache_csv())

    assert df.loc[:, 'whatever'][0]

    mock_json_utilities.read_url_get.assert_called_once_with(
        'http://api.brain-map.org/api/v2/data/query.json?q=model::Hemisphere')
    open_mock.assert_called_once_with('/xyz/abc/example.csv', 'w')
    dictwriter.return_value.writerow.assert_called()
    mock_dataframe.from_csv.assert_called_once_with('/xyz/abc/example.csv')
    assert not mock_json_utilities.write.called, 'json write should not have been called'
    assert not mock_json_utilities.read.called, 'json read should not have been called'
Beispiel #25
0
def test_cacheable_csv_dataframe(from_csv, dictwriter, mock_json_utilities,
                                 mock_dataframe):
    @cacheable()
    def get_hemispheres():
        return RmaApi().model_query(model='Hemisphere')

    with patch('allensdk.config.manifest.Manifest.safe_mkdir') as mkdir:
        with patch(builtins.__name__ + '.open', mock_open(),
                   create=True) as open_mock:
            open_mock.return_value.write = MagicMock()
            df = get_hemispheres(path='/xyz/abc/example.txt',
                                 strategy='create',
                                 **Cache.cache_csv_dataframe())

    assert df.loc[:, 'whatever'][0]

    mock_json_utilities.read_url_get.assert_called_once_with(
        'http://api.brain-map.org/api/v2/data/query.json?q=model::Hemisphere')
    mock_dataframe.from_csv.assert_called_once_with('/xyz/abc/example.txt')
    assert not mock_json_utilities.write.called, 'write should not have been called'
    assert not mock_json_utilities.read.called, 'read should not have been called'
    mkdir.assert_called_once_with('/xyz/abc')
    open_mock.assert_called_once_with('/xyz/abc/example.txt', 'w')
def test_cacheable_lazy_csv_no_file(mkdir, dictwriter, ju_read_url_get,
                                    ju_read, ju_write, read_csv):
    @cacheable()
    def get_hemispheres():
        return RmaApi().model_query(model='Hemisphere')

    with patch('os.path.exists', MagicMock(return_value=False)) as ope:
        with patch(builtins.__name__ + '.open',
                   mock_open(),
                   create=True) as open_mock:
            open_mock.return_value.write = MagicMock()
            df = get_hemispheres(path='/xyz/abc/example.csv',
                                 strategy='lazy',
                                 **Cache.cache_csv())

    assert df.loc[:, 'whatever'][0]

    ju_read_url_get.assert_called_once_with(
        'http://api.brain-map.org/api/v2/data/query.json?q=model::Hemisphere')
    open_mock.assert_called_once_with('/xyz/abc/example.csv', 'w')
    dictwriter.return_value.writerow.assert_called()
    read_csv.assert_called_once_with('/xyz/abc/example.csv', parse_dates=True)
    assert not ju_write.called, 'json write should not have been called'
    assert not ju_read.called, 'json read should not have been called'
Beispiel #27
0
def test_cacheable_csv_json(mkdir, dictwriter, mock_json_utilities,
                            mock_dataframe, mock_read_json):
    @cacheable()
    def get_hemispheres():
        return RmaApi().model_query(model='Hemisphere')

    with patch(builtins.__name__ + '.open', mock_open(),
               create=True) as open_mock:
        open_mock.return_value.write = MagicMock()
        df = get_hemispheres(path='/xyz/example.csv',
                             strategy='create',
                             **Cache.cache_csv_json())

    assert 'whatever' in df[0]

    mock_json_utilities.read_url_get.assert_called_once_with(
        'http://api.brain-map.org/api/v2/data/query.json?q=model::Hemisphere')
    mock_dataframe.from_csv.assert_called_once_with('/xyz/example.csv')
    dictwriter.return_value.writerow.assert_called()
    assert not mock_read_json.called, 'pj.read_json should not have been called'
    assert not mock_json_utilities.write.called, 'ju.write should not have been called'
    assert not mock_json_utilities.read.called, 'json read should not have been called'
    mkdir.assert_called_once_with('/xyz')
    open_mock.assert_called_once_with('/xyz/example.csv', 'w')
def test_cacheable_csv_dataframe(read_csv, dictwriter, ju_read_url_get,
                                 ju_read, ju_write):
    @cacheable()
    def get_hemispheres():
        return RmaApi().model_query(model='Hemisphere')

    with patch('allensdk.config.manifest.Manifest.safe_mkdir') as mkdir:
        with patch(builtins.__name__ + '.open',
                   mock_open(),
                   create=True) as open_mock:
            open_mock.return_value.write = MagicMock()
            df = get_hemispheres(path='/xyz/abc/example.txt',
                                 strategy='create',
                                 **Cache.cache_csv_dataframe())

    assert df.loc[:, 'whatever'][0]

    ju_read_url_get.assert_called_once_with(
        'http://api.brain-map.org/api/v2/data/query.json?q=model::Hemisphere')
    read_csv.assert_called_once_with('/xyz/abc/example.txt', parse_dates=True)
    assert not ju_write.called, 'write should not have been called'
    assert not ju_read.called, 'read should not have been called'
    mkdir.assert_called_once_with('/xyz/abc')
    open_mock.assert_called_once_with('/xyz/abc/example.txt', 'w')
def test_file_kwarg(nrrd_read, safe_mkdir, mca, cache, file_exists):
    with patch.object(mca, "retrieve_file_over_http") as mock_retrieve:
        @cacheable(reader=nrrd_read,
                pathfinder=Cache.pathfinder(file_name_position=3,
                                            secondary_file_name_position=1,
                                            path_keyword='save_file_path'))
        def download_volumetric_data(data_path,
                                    file_name,
                                    voxel_resolution=None,
                                    save_file_path=None,
                                    release=None,
                                    coordinate_framework=None):
            url = mca.build_volumetric_data_download_url(data_path,
                                                        file_name,
                                                        voxel_resolution,
                                                        release,
                                                        coordinate_framework)

            mca.retrieve_file_over_http(url, save_file_path)

        with patch('os.path.exists',
                Mock(name="os.path.exists",
                        return_value=file_exists)) as mkdir:
            nrrd_read.reset_mock()

            download_volumetric_data(MCA.AVERAGE_TEMPLATE,
                                    'annotation_10.nrrd',
                                    MCA.VOXEL_RESOLUTION_10_MICRONS,
                                    'volumetric.nrrd',
                                    MCA.CCF_2016,
                                    strategy='file',
                                    save_file_path='file.nrrd' )

        assert not mock_retrieve.called, 'server should not have been called'
        assert not safe_mkdir.called, 'safe_mkdir should not have been called.'
        nrrd_read.assert_called_once_with('file.nrrd')
class ReferenceSpaceApi(RmaApi):

    AVERAGE_TEMPLATE = 'average_template'
    ARA_NISSL = 'ara_nissl'
    MOUSE_2011 = 'annotation/mouse_2011'
    DEVMOUSE_2012 = 'annotation/devmouse_2012'
    CCF_2015 = 'annotation/ccf_2015'
    CCF_2016 = 'annotation/ccf_2016'
    CCF_2017 = 'annotation/ccf_2017'
    CCF_VERSION_DEFAULT = CCF_2017

    VOXEL_RESOLUTION_10_MICRONS = 10
    VOXEL_RESOLUTION_25_MICRONS = 25
    VOXEL_RESOLUTION_50_MICRONS = 50
    VOXEL_RESOLUTION_100_MICRONS = 100

    def __init__(self, base_uri=None):
        super(ReferenceSpaceApi, self).__init__(base_uri=base_uri)

    @cacheable(strategy='create',
               reader=nrrd.read,
               pathfinder=Cache.pathfinder(file_name_position=3,
                                           path_keyword='file_name'))
    def download_annotation_volume(self, ccf_version, resolution, file_name):
        '''
        Download the annotation volume at a particular resolution.

        Parameters
        ----------
        ccf_version: string
            Which reference space version to download. Defaults to "annotation/ccf_2017"
        resolution: int
            Desired resolution to download in microns.
            Must be 10, 25, 50, or 100.
        file_name: string
            Where to save the annotation volume.
        
        Note: the parameters must be used as positional parameters, not keywords
        '''

        if ccf_version is None:
            ccf_version = ReferenceSpaceApi.CCF_VERSION_DEFAULT

        self.download_volumetric_data(ccf_version,
                                      'annotation_%d.nrrd' % resolution,
                                      save_file_path=file_name)

    @cacheable(strategy='create',
               reader=sitk_utilities.read_ndarray_with_sitk,
               pathfinder=Cache.pathfinder(file_name_position=3,
                                           path_keyword='file_name'))
    def download_mouse_atlas_volume(self, age, volume_type, file_name):
        '''Download a reference volume (annotation, grid annotation, atlas volume) 
        from the mouse brain atlas project

        Parameters
        ----------
        age : str
            Specify a mouse age for which to download the reference volume
        volume_type : str
            Specify the type of volume to download
        file_name : str
            Specify the path to the downloaded volume
        '''

        remote_file_name = '{}_{}.zip'.format(age, volume_type)
        url = '/'.join([
            self.informatics_archive_endpoint, 'current-release',
            'mouse_annotation', remote_file_name
        ])

        self.retrieve_file_over_http(url, file_name, zipped=True)

    @cacheable(strategy='create',
               reader=nrrd.read,
               pathfinder=Cache.pathfinder(file_name_position=2,
                                           path_keyword='file_name'))
    def download_template_volume(self, resolution, file_name):
        '''
        Download the registration template volume at a particular resolution.

        Parameters
        ----------

        resolution: int
            Desired resolution to download in microns.  Must be 10, 25, 50, or 100.

        file_name: string
            Where to save the registration template volume.
        '''
        self.download_volumetric_data(ReferenceSpaceApi.AVERAGE_TEMPLATE,
                                      'average_template_%d.nrrd' % resolution,
                                      save_file_path=file_name)

    @cacheable(strategy='create',
               reader=nrrd.read,
               pathfinder=Cache.pathfinder(file_name_position=4,
                                           path_keyword='file_name'))
    def download_structure_mask(self, structure_id, ccf_version, resolution,
                                file_name):
        '''Download an indicator mask for a specific structure.

        Parameters
        ----------
        structure_id : int
            Unique identifier for the annotated structure
        ccf_version : string
            Which reference space version to download. Defaults to "annotation/ccf_2017"
        resolution : int
            Desired resolution to download in microns.  Must be 10, 25, 50, or 100.
        file_name : string
             Where to save the downloaded mask.

        '''

        if ccf_version is None:
            ccf_version = ReferenceSpaceApi.CCF_VERSION_DEFAULT

        structure_mask_dir = 'structure_masks_{0}'.format(resolution)
        data_path = '{0}/{1}/{2}'.format(ccf_version, 'structure_masks',
                                         structure_mask_dir)
        remote_file_name = 'structure_{0}.nrrd'.format(structure_id)

        try:
            self.download_volumetric_data(data_path,
                                          remote_file_name,
                                          save_file_path=file_name)
        except Exception as e:
            self._file_download_log.error(
                '''We weren't able to download a structure mask for structure {0}. 
                                             You can instead build the mask locally using 
                                             ReferenceSpace.many_structure_masks'''
            )
            raise

    @cacheable(strategy='create',
               reader=read_obj,
               pathfinder=Cache.pathfinder(file_name_position=3,
                                           path_keyword='file_name'))
    def download_structure_mesh(self, structure_id, ccf_version, file_name):
        '''Download a Wavefront obj file containing a triangulated 3d mesh built 
        from an annotated structure.

        Parameters
        ----------
        structure_id : int
            Unique identifier for the annotated structure
        ccf_version : string
            Which reference space version to download. Defaults to "annotation/ccf_2017"
        file_name : string
             Where to save the downloaded mask.

        '''

        if ccf_version is None:
            ccf_version = ReferenceSpaceApi.CCF_VERSION_DEFAULT

        data_path = '{0}/{1}'.format(ccf_version, 'structure_meshes')
        remote_file_name = '{0}.obj'.format(structure_id)

        try:
            self.download_volumetric_data(data_path,
                                          remote_file_name,
                                          save_file_path=file_name)
        except Exception as e:
            self._file_download_log.error(
                'unable to download a structure mesh for structure {0}.'.
                format(structure_id))
            raise

    def build_volumetric_data_download_url(self,
                                           data_path,
                                           file_name,
                                           voxel_resolution=None,
                                           release=None,
                                           coordinate_framework=None):
        '''Construct url to download 3D reference model in NRRD format.

        Parameters
        ----------
        data_path : string
            'average_template', 'ara_nissl', 'annotation/ccf_{year}', 
            'annotation/mouse_2011', or 'annotation/devmouse_2012'
        voxel_resolution : int
            10, 25, 50 or 100
        coordinate_framework : string
            'mouse_ccf' (default) or 'mouse_annotation'

        Notes
        -----
        See: `3-D Reference Models <http://help.brain-map.org/display/mouseconnectivity/API#API-3DReferenceModels>`_
        for additional documentation.
        '''

        if voxel_resolution is None:
            voxel_resolution = ReferenceSpaceApi.VOXEL_RESOLUTION_10_MICRONS

        if release is None:
            release = 'current-release'

        if coordinate_framework is None:
            coordinate_framework = 'mouse_ccf'

        url = ''.join([
            self.informatics_archive_endpoint,
            '/%s/%s/' % (release, coordinate_framework), data_path, '/',
            file_name
        ])

        return url

    def download_volumetric_data(self,
                                 data_path,
                                 file_name,
                                 voxel_resolution=None,
                                 save_file_path=None,
                                 release=None,
                                 coordinate_framework=None):
        '''Download 3D reference model in NRRD format.

        Parameters
        ----------
        data_path : string
            'average_template', 'ara_nissl', 'annotation/ccf_{year}', 
            'annotation/mouse_2011', or 'annotation/devmouse_2012'
        file_name : string
            server-side file name. 'annotation_10.nrrd' for example.
        voxel_resolution : int
            10, 25, 50 or 100
        coordinate_framework : string
            'mouse_ccf' (default) or 'mouse_annotation'

        Notes
        -----
        See: `3-D Reference Models <http://help.brain-map.org/display/mouseconnectivity/API#API-3DReferenceModels>`_
        for additional documentation.
        '''
        url = self.build_volumetric_data_download_url(data_path, file_name,
                                                      voxel_resolution,
                                                      release,
                                                      coordinate_framework)

        if save_file_path is None:
            save_file_path = file_name

        if save_file_path is None:
            save_file_path = 'volumetric_data.nrrd'

        self.retrieve_file_over_http(url, save_file_path)
    def get_cell_specimens(self,
                           file_name=None,
                           ids=None,
                           experiment_container_ids=None,
                           include_failed=False,
                           simple=True,
                           filters=None):
        """ Return cell specimens that have certain properies.

        Parameters
        ----------
        file_name: string
            File name to save/read the cell specimens.  If file_name is None,
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.

        ids: list
            List of cell specimen ids.

        experiment_container_ids: list
            List of experiment container ids.

        include_failed: bool
            Whether to include cells from failed experiment containers

        simple: boolean
            Whether or not to simplify the dictionary properties returned by this method
            to a more concise subset.

        filters: list of dicts
            List of filter dictionaries.  The Allen Brain Observatory web site can
            generate filters in this format to reproduce a filtered set of cells
            found there.  To see what these look like, visit
            http://observatory.brain-map.org/visualcoding, perform a cell search
            and apply some filters (e.g. find cells in a particular area), then
            click the "view these cells in the AllenSDK" link on the bottom-left
            of the search results page.  This will take you to a page that contains
            a code sample you can use to apply those same filters via this argument.
            For more detail on the filter syntax, see BrainObservatoryApi.dataframe_query.


        Returns
        -------
        list of dictionaries
        """

        file_name = self.get_cache_path(file_name, self.CELL_SPECIMENS_KEY)

        cell_specimens = self.api.get_cell_metrics(path=file_name,
                                                   strategy='lazy',
                                                   pre= lambda x: [y for y in x],
                                                   **Cache.cache_json())

        cell_specimens = self.api.filter_cell_specimens(cell_specimens,
                                                        ids=ids,
                                                        experiment_container_ids=experiment_container_ids,
                                                        include_failed=include_failed,
                                                        filters=filters)

        # drop the thumbnail columns
        if simple:
            mappings = self._get_stimulus_mappings()
            thumbnails = [m['item'] for m in mappings if m[
                'item_type'] == 'T' and m['level'] == 'R']
            for cs in cell_specimens:
                for t in thumbnails:
                    del cs[t]

        return cell_specimens
    def get_ophys_experiments(self,
                              file_name=None,
                              ids=None,
                              experiment_container_ids=None,
                              targeted_structures=None,
                              imaging_depths=None,
                              cre_lines=None,
                              reporter_lines=None,
                              transgenic_lines=None,
                              stimuli=None,
                              session_types=None,
                              cell_specimen_ids=None,
                              include_failed=False,
                              require_eye_tracking=False,
                              simple=True):
        """ Get a list of ophys experiments matching certain criteria.

        Parameters
        ----------
        file_name: string
            File name to save/read the ophys experiments.  If file_name is None,
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.

        ids: list
            List of ophys experiment ids.

        experiment_container_ids: list
            List of experiment container ids.

        targeted_structures: list
            List of structure acronyms.  Must be in the list returned by
            BrainObservatoryCache.get_all_targeted_structures().

        imaging_depths: list
            List of imaging depths.  Must be in the list returned by
            BrainObservatoryCache.get_all_imaging_depths().

        cre_lines: list
            List of cre lines.  Must be in the list returned by
            BrainObservatoryCache.get_all_cre_lines().
        
        reporter_lines: list
            List of reporter lines.  Must be in the list returned by
            BrainObservatoryCache.get_all_reporter_lines().

        transgenic_lines: list
            List of transgenic lines. Must be in the list returned by
            BrainObservatoryCache.get_all_cre_lines() or.
            BrainObservatoryCache.get_all_reporter_lines().

        stimuli: list
            List of stimulus names.  Must be in the list returned by
            BrainObservatoryCache.get_all_stimuli().

        session_types: list
            List of stimulus session type names.  Must be in the list returned by
            BrainObservatoryCache.get_all_session_types().

        cell_specimen_ids: list
            Only include experiments that contain cells with these ids.

        include_failed: boolean
            Whether or not to include experiments from failed experiment containers.

        simple: boolean
            Whether or not to simplify the dictionary properties returned by this method
            to a more concise subset.

        require_eye_tracking: boolean
            If True, only return experiments that have eye tracking results. Default: False.

        Returns
        -------
        list of dictionaries
        """
        _assert_not_string(targeted_structures, "targeted_structures")
        _assert_not_string(cre_lines, "cre_lines")
        _assert_not_string(reporter_lines, "reporter_lines")
        _assert_not_string(transgenic_lines, "transgenic_lines")
        _assert_not_string(stimuli, "stimuli")
        _assert_not_string(session_types, "session_types")

        file_name = self.get_cache_path(file_name, self.EXPERIMENTS_KEY)

        exps = self.api.get_ophys_experiments(path=file_name,
                                              strategy='lazy',
                                              **Cache.cache_json())

        if cell_specimen_ids is not None:
            cells = self.get_cell_specimens(ids=cell_specimen_ids)
            cell_container_ids = set(
                [cell['experiment_container_id'] for cell in cells])
            if experiment_container_ids is not None:
                experiment_container_ids = list(
                    set(experiment_container_ids) - cell_container_ids)
            else:
                experiment_container_ids = list(cell_container_ids)

        exps = self.api.filter_ophys_experiments(
            exps,
            ids=ids,
            experiment_container_ids=experiment_container_ids,
            targeted_structures=targeted_structures,
            imaging_depths=imaging_depths,
            cre_lines=cre_lines,
            reporter_lines=reporter_lines,
            transgenic_lines=transgenic_lines,
            stimuli=stimuli,
            session_types=session_types,
            include_failed=include_failed,
            require_eye_tracking=require_eye_tracking,
            simple=simple)

        return exps
summary_structures[['id',
                    'parent_structure_id',
                    'acronym']].to_csv('summary_structures.csv',
                                        index_label='structure_id')
reread = pd.read_csv('summary_structures.csv')

#===============================================================================
# example 10
#===============================================================================

for id, name, parent_structure_id in summary_structures[['name',
                                                            'parent_structure_id']].itertuples():
    print("%d %s %d" % (id, name, parent_structure_id))

#===============================================================================
# example 11
#===============================================================================

from allensdk.api.cache import Cache

cache_writer = Cache()
do_cache=True
structures_from_api = \
    cache_writer.wrap(rma.model_query,
                        path='summary.csv',
                        cache=do_cache,
                        model='Structure',
                        criteria='[graph_id$eq1]',
                        num_rows='all')
summary_structures[['id', 'parent_structure_id',
                    'acronym']].to_csv('summary_structures.csv',
                                       index_label='structure_id')
reread = pd.read_csv('summary_structures.csv')

#===============================================================================
# example 10
#===============================================================================

for id, name, parent_structure_id in summary_structures[[
        'name', 'parent_structure_id'
]].itertuples():
    print("%d %s %d" % (id, name, parent_structure_id))

#===============================================================================
# example 11
#===============================================================================

from allensdk.api.cache import Cache

cache_writer = Cache()
do_cache = True
structures_from_api = \
    cache_writer.wrap(rma.model_query,
                        path='summary.csv',
                        cache=do_cache,
                        model='Structure',
                        criteria='[graph_id$eq1]',
                        num_rows='all')
Beispiel #35
0
def cache():
    return Cache()
    def get_ophys_experiments(self,
                              file_name=None,
                              ids=None,
                              experiment_container_ids=None,
                              targeted_structures=None,
                              imaging_depths=None,
                              cre_lines=None,
                              reporter_lines=None,
                              transgenic_lines=None,
                              stimuli=None,
                              session_types=None,
                              cell_specimen_ids=None,
                              include_failed=False,
                              require_eye_tracking=False,
                              simple=True):
        """ Get a list of ophys experiments matching certain criteria.

        Parameters
        ----------
        file_name: string
            File name to save/read the ophys experiments.  If file_name is None,
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.

        ids: list
            List of ophys experiment ids.

        experiment_container_ids: list
            List of experiment container ids.

        targeted_structures: list
            List of structure acronyms.  Must be in the list returned by
            BrainObservatoryCache.get_all_targeted_structures().

        imaging_depths: list
            List of imaging depths.  Must be in the list returned by
            BrainObservatoryCache.get_all_imaging_depths().

        cre_lines: list
            List of cre lines.  Must be in the list returned by
            BrainObservatoryCache.get_all_cre_lines().
        
        reporter_lines: list
            List of reporter lines.  Must be in the list returned by
            BrainObservatoryCache.get_all_reporter_lines().

        transgenic_lines: list
            List of transgenic lines. Must be in the list returned by
            BrainObservatoryCache.get_all_cre_lines() or.
            BrainObservatoryCache.get_all_reporter_lines().

        stimuli: list
            List of stimulus names.  Must be in the list returned by
            BrainObservatoryCache.get_all_stimuli().

        session_types: list
            List of stimulus session type names.  Must be in the list returned by
            BrainObservatoryCache.get_all_session_types().

        cell_specimen_ids: list
            Only include experiments that contain cells with these ids.

        include_failed: boolean
            Whether or not to include experiments from failed experiment containers.

        simple: boolean
            Whether or not to simplify the dictionary properties returned by this method
            to a more concise subset.

        require_eye_tracking: boolean
            If True, only return experiments that have eye tracking results. Default: False.

        Returns
        -------
        list of dictionaries
        """
        _assert_not_string(targeted_structures, "targeted_structures")
        _assert_not_string(cre_lines, "cre_lines")
        _assert_not_string(reporter_lines, "reporter_lines")
        _assert_not_string(transgenic_lines, "transgenic_lines")
        _assert_not_string(stimuli, "stimuli")
        _assert_not_string(session_types, "session_types")

        file_name = self.get_cache_path(file_name, self.EXPERIMENTS_KEY)

        exps = self.api.get_ophys_experiments(path=file_name,
                                              strategy='lazy',
                                              **Cache.cache_json())

        # NOTE: Ugly hack to update the 'fail_eye_tracking' field
        # which is using True/False values for the previous eye mapping
        # implementation. This will also need to be fixed in warehouse.
        # ----- Start of ugly hack -----
        response = self.api.template_query('brain_observatory_queries',
                                           'all_eye_mapping_files')

        session_ids_with_eye_tracking: set = {
            entry['attachable_id']
            for entry in response if entry['attachable_type'] == "OphysSession"
        }

        for indx, exp in enumerate(exps):
            try:
                ophys_session_id = ophys_experiment_session_id_map[exp['id']]
                if ophys_session_id in session_ids_with_eye_tracking:
                    exps[indx]['fail_eye_tracking'] = False
                else:
                    exps[indx]['fail_eye_tracking'] = True
            except KeyError:
                exps[indx]['fail_eye_tracking'] = True
        # ----- End of ugly hack -----

        if cell_specimen_ids is not None:
            cells = self.get_cell_specimens(ids=cell_specimen_ids)
            cell_container_ids = set(
                [cell['experiment_container_id'] for cell in cells])
            if experiment_container_ids is not None:
                experiment_container_ids = list(
                    set(experiment_container_ids) - cell_container_ids)
            else:
                experiment_container_ids = list(cell_container_ids)

        exps = self.api.filter_ophys_experiments(
            exps,
            ids=ids,
            experiment_container_ids=experiment_container_ids,
            targeted_structures=targeted_structures,
            imaging_depths=imaging_depths,
            cre_lines=cre_lines,
            reporter_lines=reporter_lines,
            transgenic_lines=transgenic_lines,
            stimuli=stimuli,
            session_types=session_types,
            include_failed=include_failed,
            require_eye_tracking=require_eye_tracking,
            simple=simple)

        return exps
Beispiel #37
0
class CellTypesApi(RmaApi):
    NWB_FILE_TYPE = 'NWBDownload'
    SWC_FILE_TYPE = '3DNeuronReconstruction'
    MARKER_FILE_TYPE = '3DNeuronMarker'

    MOUSE = 'Mus musculus'
    HUMAN = 'H**o Sapiens'

    def __init__(self, base_uri=None):
        super(CellTypesApi, self).__init__(base_uri)
        

    @cacheable()
    def list_cells_api(self,
                       id=None,
                       require_morphology=False, 
                       require_reconstruction=False, 
                       reporter_status=None, 
                       species=None):
        
 
        criteria = None

        if id:
            criteria = "[specimen__id$eq%d]" % id

        cells = self.model_query(
            'ApiCellTypesSpecimenDetail', criteria=criteria, num_rows='all')
                
        return cells

    @deprecated("please use list_cells_api instead")
    def list_cells(self, 
                   id=None, 
                   require_morphology=False, 
                   require_reconstruction=False, 
                   reporter_status=None, 
                   species=None):
        """
        Query the API for a list of all cells in the Cell Types Database.

        Parameters
        ----------
        id: int
            ID of a cell.  If not provided returns all matching cells.  

        require_morphology: boolean
            Only return cells that have morphology images.

        require_reconstruction: boolean
            Only return cells that have morphological reconstructions.

        reporter_status: list
            Return cells that have a particular cell reporter status.

        species: list
            Filter for cells that belong to one or more species.  If None, return all.
            Must be one of [ CellTypesApi.MOUSE, CellTypesApi.HUMAN ].

        Returns
        -------
        list
            Meta data for all cells.
        """

        if id:
            criteria = "[id$eq'%d']" % id
        else:
            criteria = "[is_cell_specimen$eq'true'],products[name$in'Mouse Cell Types','Human Cell Types'],ephys_result[failed$eqfalse]"
        
        include = ('structure,cortex_layer,donor(transgenic_lines,organism,conditions),specimen_tags,cell_soma_locations,' +
                   'ephys_features,data_sets,neuron_reconstructions,cell_reporter')

        cells = self.model_query(
            'Specimen', criteria=criteria, include=include, num_rows='all')

        for cell in cells:
            # specimen tags
            for tag in cell['specimen_tags']:
                tag_name, tag_value = tag['name'].split(' - ')
                tag_name = tag_name.replace(' ', '_')
                cell[tag_name] = tag_value

            # morphology and reconstuction
            cell['has_reconstruction'] = len(
                cell['neuron_reconstructions']) > 0
            cell['has_morphology'] = len(cell['data_sets']) > 0

            # transgenic line
            cell['transgenic_line'] = None
            for tl in cell['donor']['transgenic_lines']:
                if tl['transgenic_line_type_name'] == 'driver':
                    cell['transgenic_line'] = tl['name']

            # cell reporter status
            cell['reporter_status'] = cell.get('cell_reporter', {}).get('name', None)

            # species
            cell['species'] = cell.get('donor',{}).get('organism',{}).get('name', None)

            # conditions (whitelist)
            condition_types = [ 'disease categories' ]
            condition_keys = dict(zip(condition_types, 
                                      [ ct.replace(' ', '_') for ct in condition_types ]))
            for ct, ck in condition_keys.items():
                cell[ck] = []

            conditions = cell.get('donor',{}).get('conditions', [])
            for condition in conditions:
                c_type, c_val = condition['name'].split(' - ')
                if c_type in condition_keys:
                    cell[condition_keys[c_type]].append(c_val)

        result = self.filter_cells(cells, require_morphology, require_reconstruction, reporter_status, species)

        return result

    def get_cell(self, id):
        '''
        Query the API for a one cells in the Cell Types Database.

        
        Returns
        -------
        list
            Meta data for one cell.
        '''

        cells = self.list_cells_api(id=id)
        cell = None if not cells else cells[0]
        return cell

    @cacheable()
    def get_ephys_sweeps(self, specimen_id):
        """
        Query the API for a list of sweeps for a particular cell in the Cell Types Database.

        Parameters
        ----------
        specimen_id: int
            Specimen ID of a cell.

        Returns
        -------
        list: List of sweep dictionaries belonging to a cell
        """
        criteria = "[specimen_id$eq%d]" % specimen_id
        sweeps = self.model_query(
            'EphysSweep', criteria=criteria, num_rows='all')
        return sorted(sweeps, key=lambda x: x['sweep_number'])


    @deprecated("please use filter_cells_api")
    def filter_cells(self, cells, require_morphology, require_reconstruction, reporter_status, species):
        """
        Filter a list of cell specimens to those that optionally have morphologies
        or have morphological reconstructions.

        Parameters
        ----------

        cells: list
            List of cell metadata dictionaries to be filtered

        require_morphology: boolean
            Filter out cells that have no morphological images.

        require_reconstruction: boolean
            Filter out cells that have no morphological reconstructions.

        reporter_status: list
            Filter for cells that have a particular cell reporter status

        species: list
            Filter for cells that belong to one or more species.  If None, return all.
            Must be one of [ CellTypesApi.MOUSE, CellTypesApi.HUMAN ].
        """

        if require_morphology:
            cells = [c for c in cells if c['has_morphology']]

        if require_reconstruction:
            cells = [c for c in cells if c['has_reconstruction']]

        if reporter_status:
            cells = [c for c in cells if c[
                'reporter_status'] in reporter_status]

        if species:
            species_lower = [ s.lower() for s in species ]
            cells = [c for c in cells if c['donor']['organism']['name'].lower() in species_lower]

        return cells

    def filter_cells_api(self, cells,
                         require_morphology=False,
                         require_reconstruction=False,
                         reporter_status=None,
                         species=None,
                         simple=True):
        """
        """
        if require_morphology or require_reconstruction:
            cells = [c for c in cells if c.get('nr__reconstruction_type') is not None]

        if reporter_status:
            cells = [c for c in cells if c.get('cell_reporter_status') in reporter_status]

        if species:
            species_lower = [ s.lower() for s in species ]
            cells = [c for c in cells if c.get('donor__species',"").lower() in species_lower]

        if simple:
            cells = self.simplify_cells_api(cells)

        return cells

    def simplify_cells_api(self, cells):
        return [{
                'reporter_status': cell['cell_reporter_status'],
                'cell_soma_location': [ cell['csl__x'], cell['csl__y'], cell['csl__z'] ],
                'species': cell['donor__species'],
                'id': cell['specimen__id'],
                'name': cell['specimen__name'],
                'structure_layer_name':  cell['structure__layer'],
                'structure_area_id': cell['structure_parent__id'],
                'structure_area_abbrev': cell['structure_parent__acronym'],
                'transgenic_line': cell['line_name'],
                'dendrite_type': cell['tag__dendrite_type'],
                'apical': cell['tag__apical'],
                'reconstruction_type': cell['nr__reconstruction_type'],
                'disease_state': cell['donor__disease_state']
        } for cell in cells ]

    @cacheable()
    def get_ephys_features(self):
        """
        Query the API for the full table of EphysFeatures for all cells.
        """

        return self.model_query(
            'EphysFeature',
            criteria='specimen(ephys_result[failed$eqfalse])',
            num_rows='all')

    @cacheable()
    def get_morphology_features(self):
        """
        Query the API for the full table of morphology features for all cells
        
        Notes
        -----
        by default the tags column is removed because it isn't useful
        """
        return self.model_query(
            'NeuronReconstruction',
            criteria="specimen(ephys_result[failed$eqfalse])",
            excpt='tags',
            num_rows='all')

    @cacheable(strategy='create',
               pathfinder=Cache.pathfinder(file_name_position=2,
                                           path_keyword='file_name'))
    def save_ephys_data(self, specimen_id, file_name):
        """
        Save the electrophysology recordings for a cell as an NWB file.

        Parameters
        ----------
        specimen_id: int
            ID of the specimen, from the Specimens database model in the Allen Institute API.

        file_name: str
            Path to save the NWB file.
        """
        criteria = '[id$eq%d],ephys_result(well_known_files(well_known_file_type[name$eq%s]))' % (
            specimen_id, self.NWB_FILE_TYPE)
        includes = 'ephys_result(well_known_files(well_known_file_type))'

        results = self.model_query('Specimen',
                                   criteria=criteria,
                                   include=includes,
                                   num_rows='all')

        try:
            file_url = results[0]['ephys_result'][
                'well_known_files'][0]['download_link']
        except Exception as _:
            raise Exception("Specimen %d has no ephys data" % specimen_id)

        self.retrieve_file_over_http(self.api_url + file_url, file_name)

    def save_reconstruction(self, specimen_id, file_name):
        """
        Save the morphological reconstruction of a cell as an SWC file.

        Parameters
        ----------
        specimen_id: int
            ID of the specimen, from the Specimens database model in the Allen Institute API.

        file_name: str
            Path to save the SWC file.
        """

        Manifest.safe_make_parent_dirs(file_name)

        criteria = '[id$eq%d],neuron_reconstructions(well_known_files)' % specimen_id
        includes = 'neuron_reconstructions(well_known_files(well_known_file_type[name$eq\'%s\']))' % self.SWC_FILE_TYPE

        results = self.model_query('Specimen',
                                   criteria=criteria,
                                   include=includes,
                                   num_rows='all')

        try:
            file_url = results[0]['neuron_reconstructions'][
                0]['well_known_files'][0]['download_link']
        except:
            raise Exception("Specimen %d has no reconstruction" % specimen_id)

        self.retrieve_file_over_http(self.api_url + file_url, file_name)

    def save_reconstruction_markers(self, specimen_id, file_name):
        """
        Save the marker file for the morphological reconstruction of a cell.  These are
        comma-delimited files indicating points of interest in a reconstruction (truncation
        points, early tracing termination, etc).

        Parameters
        ----------
        specimen_id: int
            ID of the specimen, from the Specimens database model in the Allen Institute API.

        file_name: str
            Path to save the marker file.
        """

        Manifest.safe_make_parent_dirs(file_name)

        criteria = '[id$eq%d],neuron_reconstructions(well_known_files)' % specimen_id
        includes = 'neuron_reconstructions(well_known_files(well_known_file_type[name$eq\'%s\']))' % self.MARKER_FILE_TYPE

        results = self.model_query('Specimen',
                                   criteria=criteria,
                                   include=includes,
                                   num_rows='all')

        try:
            file_url = results[0]['neuron_reconstructions'][
                0]['well_known_files'][0]['download_link']
        except:
            raise LookupError("Specimen %d has no marker file" % specimen_id)

        self.retrieve_file_over_http(self.api_url + file_url, file_name)
    def get_experiment_containers(self, file_name=None,
                                  ids=None,
                                  targeted_structures=None,
                                  imaging_depths=None,
                                  cre_lines=None,
                                  reporter_lines=None,
                                  transgenic_lines=None,
                                  include_failed=False,
                                  simple=True):
        """ Get a list of experiment containers matching certain criteria.

        Parameters
        ----------
        file_name: string
            File name to save/read the experiment containers.  If file_name is None,
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.

        ids: list
            List of experiment container ids.

        targeted_structures: list
            List of structure acronyms.  Must be in the list returned by
            BrainObservatoryCache.get_all_targeted_structures().

        imaging_depths: list
            List of imaging depths.  Must be in the list returned by
            BrainObservatoryCache.get_all_imaging_depths().

        cre_lines: list
            List of cre lines.  Must be in the list returned by
            BrainObservatoryCache.get_all_cre_lines().

        reporter_lines: list
            List of reporter lines.  Must be in the list returned by
            BrainObservatoryCache.get_all_reporter_lines().

        transgenic_lines: list
            List of transgenic lines. Must be in the list returned by
            BrainObservatoryCache.get_all_cre_lines() or.
            BrainObservatoryCache.get_all_reporter_lines().

        include_failed: boolean
            Whether or not to include failed experiment containers.

        simple: boolean
            Whether or not to simplify the dictionary properties returned by this method
            to a more concise subset.

        Returns
        -------
        list of dictionaries
        """
        _assert_not_string(targeted_structures, "targeted_structures")
        _assert_not_string(cre_lines, "cre_lines")
        _assert_not_string(reporter_lines, "reporter_lines")
        _assert_not_string(transgenic_lines, "transgenic_lines")

        file_name = self.get_cache_path(
            file_name, self.EXPERIMENT_CONTAINERS_KEY)

        containers = self.api.get_experiment_containers(path=file_name,
                                                        strategy='lazy',
                                                        **Cache.cache_json())

        containers = self.api.filter_experiment_containers(containers, ids=ids,
                                                           targeted_structures=targeted_structures,
                                                           imaging_depths=imaging_depths,
                                                           cre_lines=cre_lines,
                                                           reporter_lines=reporter_lines,
                                                           transgenic_lines=transgenic_lines,
                                                           include_failed=include_failed,
                                                           simple=simple)

        return containers
    def get_ophys_experiments(self, file_name=None,
                              ids=None,
                              experiment_container_ids=None,
                              targeted_structures=None,
                              imaging_depths=None,
                              cre_lines=None,
                              reporter_lines=None,
                              transgenic_lines=None,
                              stimuli=None,
                              session_types=None,
                              cell_specimen_ids=None,
                              include_failed=False,
                              require_eye_tracking=False,
                              simple=True):
        """ Get a list of ophys experiments matching certain criteria.

        Parameters
        ----------
        file_name: string
            File name to save/read the ophys experiments.  If file_name is None,
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.

        ids: list
            List of ophys experiment ids.

        experiment_container_ids: list
            List of experiment container ids.

        targeted_structures: list
            List of structure acronyms.  Must be in the list returned by
            BrainObservatoryCache.get_all_targeted_structures().

        imaging_depths: list
            List of imaging depths.  Must be in the list returned by
            BrainObservatoryCache.get_all_imaging_depths().

        cre_lines: list
            List of cre lines.  Must be in the list returned by
            BrainObservatoryCache.get_all_cre_lines().
        
        reporter_lines: list
            List of reporter lines.  Must be in the list returned by
            BrainObservatoryCache.get_all_reporter_lines().

        transgenic_lines: list
            List of transgenic lines. Must be in the list returned by
            BrainObservatoryCache.get_all_cre_lines() or.
            BrainObservatoryCache.get_all_reporter_lines().

        stimuli: list
            List of stimulus names.  Must be in the list returned by
            BrainObservatoryCache.get_all_stimuli().

        session_types: list
            List of stimulus session type names.  Must be in the list returned by
            BrainObservatoryCache.get_all_session_types().

        cell_specimen_ids: list
            Only include experiments that contain cells with these ids.

        include_failed: boolean
            Whether or not to include experiments from failed experiment containers.

        simple: boolean
            Whether or not to simplify the dictionary properties returned by this method
            to a more concise subset.

        require_eye_tracking: boolean
            If True, only return experiments that have eye tracking results. Default: False.

        Returns
        -------
        list of dictionaries
        """
        _assert_not_string(targeted_structures, "targeted_structures")
        _assert_not_string(cre_lines, "cre_lines")
        _assert_not_string(reporter_lines, "reporter_lines")
        _assert_not_string(transgenic_lines, "transgenic_lines")
        _assert_not_string(stimuli, "stimuli")
        _assert_not_string(session_types, "session_types")

        file_name = self.get_cache_path(file_name, self.EXPERIMENTS_KEY)

        exps = self.api.get_ophys_experiments(path=file_name,
                                              strategy='lazy',
                                              **Cache.cache_json())

        if cell_specimen_ids is not None:
            cells = self.get_cell_specimens(ids=cell_specimen_ids)
            cell_container_ids = set([cell['experiment_container_id'] for cell in cells])
            if experiment_container_ids is not None:
                experiment_container_ids = list(set(experiment_container_ids) - cell_container_ids)
            else:
                experiment_container_ids = list(cell_container_ids)

        exps = self.api.filter_ophys_experiments(exps,
                                                 ids=ids,
                                                 experiment_container_ids=experiment_container_ids,
                                                 targeted_structures=targeted_structures,
                                                 imaging_depths=imaging_depths,
                                                 cre_lines=cre_lines,
                                                 reporter_lines=reporter_lines,
                                                 transgenic_lines=transgenic_lines,
                                                 stimuli=stimuli,
                                                 session_types=session_types,
                                                 include_failed=include_failed,
                                                 require_eye_tracking=require_eye_tracking,
                                                 simple=simple)

        return exps
Beispiel #40
0
 def setUp(self):
     self.cache = Cache()
     self.api = RmaApi()
    def get_experiment_containers(self,
                                  file_name=None,
                                  ids=None,
                                  targeted_structures=None,
                                  imaging_depths=None,
                                  cre_lines=None,
                                  reporter_lines=None,
                                  transgenic_lines=None,
                                  include_failed=False,
                                  simple=True):
        """ Get a list of experiment containers matching certain criteria.

        Parameters
        ----------
        file_name: string
            File name to save/read the experiment containers.  If file_name is None,
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.

        ids: list
            List of experiment container ids.

        targeted_structures: list
            List of structure acronyms.  Must be in the list returned by
            BrainObservatoryCache.get_all_targeted_structures().

        imaging_depths: list
            List of imaging depths.  Must be in the list returned by
            BrainObservatoryCache.get_all_imaging_depths().

        cre_lines: list
            List of cre lines.  Must be in the list returned by
            BrainObservatoryCache.get_all_cre_lines().

        reporter_lines: list
            List of reporter lines.  Must be in the list returned by
            BrainObservatoryCache.get_all_reporter_lines().

        transgenic_lines: list
            List of transgenic lines. Must be in the list returned by
            BrainObservatoryCache.get_all_cre_lines() or.
            BrainObservatoryCache.get_all_reporter_lines().

        include_failed: boolean
            Whether or not to include failed experiment containers.

        simple: boolean
            Whether or not to simplify the dictionary properties returned by this method
            to a more concise subset.

        Returns
        -------
        list of dictionaries
        """
        _assert_not_string(targeted_structures, "targeted_structures")
        _assert_not_string(cre_lines, "cre_lines")
        _assert_not_string(reporter_lines, "reporter_lines")
        _assert_not_string(transgenic_lines, "transgenic_lines")

        file_name = self.get_cache_path(file_name,
                                        self.EXPERIMENT_CONTAINERS_KEY)

        containers = self.api.get_experiment_containers(path=file_name,
                                                        strategy='lazy',
                                                        **Cache.cache_json())

        containers = self.api.filter_experiment_containers(
            containers,
            ids=ids,
            targeted_structures=targeted_structures,
            imaging_depths=imaging_depths,
            cre_lines=cre_lines,
            reporter_lines=reporter_lines,
            transgenic_lines=transgenic_lines,
            include_failed=include_failed,
            simple=simple)

        return containers
Beispiel #42
0
class CacheTests(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(CacheTests, self).__init__(*args, **kwargs)
    
    
    def setUp(self):
        self.cache = Cache()
        self.api = RmaApi()
    
    
    def tearDown(self):
        self.cache = None
        self.api = None
    
    
    def test_wrap_json(self):
        msg = [ { 'whatever': True } ]
        
        ju.read_url_get = \
            MagicMock(name='read_url_get',
                      return_value={ 'msg': msg })
        ju.write = \
            MagicMock(name='write')
#        pj.read_json = \
#            MagicMock(name='read_json',
#                      return_value=msg)
        ju.read = \
            MagicMock(name='read',
                      return_value=pd.DataFrame(msg))
               
        df = self.cache.wrap(self.api.model_query,
                             'example.txt',
                             cache=True,
                             model='Hemisphere')
        
        self.assertTrue(df.loc[:,'whatever'][0])
        
        ju.read_url_get.assert_called_once_with('http://api.brain-map.org/api/v2/data/query.json?q=model::Hemisphere')
        ju.write.assert_called_once_with('example.txt', msg)
        ju.read.assert_called_once_with('example.txt')
#        pj.read_json.assert_called_once_with('example.txt')
    
    
    def test_wrap_dataframe(self):
        msg = [ { 'whatever': True } ]
        
        ju.read_url_get = \
            MagicMock(name='read_url_get',
                      return_value={ 'msg': msg })
        ju.write = \
            MagicMock(name='write')
        pj.read_json = \
            MagicMock(name='read_json',
                      return_value=msg)
        
        json_data = self.cache.wrap(self.api.model_query,
                                    'example.txt',
                                    cache=True,
                                    return_dataframe=True,
                                    model='Hemisphere')
        
        self.assertTrue(json_data[0]['whatever'])
        
        ju.read_url_get.assert_called_once_with('http://api.brain-map.org/api/v2/data/query.json?q=model::Hemisphere')
        ju.write.assert_called_once_with('example.txt', msg)
        pj.read_json.assert_called_once_with('example.txt', orient='records')
    def get_cell_specimens(self,
                           file_name=None,
                           ids=None,
                           experiment_container_ids=None,
                           include_failed=False,
                           simple=True,
                           filters=None):
        """ Return cell specimens that have certain properies.

        Parameters
        ----------
        file_name: string
            File name to save/read the cell specimens.  If file_name is None,
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.

        ids: list
            List of cell specimen ids.

        experiment_container_ids: list
            List of experiment container ids.

        include_failed: bool
            Whether to include cells from failed experiment containers

        simple: boolean
            Whether or not to simplify the dictionary properties returned by this method
            to a more concise subset.

        filters: list of dicts
            List of filter dictionaries.  The Allen Brain Observatory web site can
            generate filters in this format to reproduce a filtered set of cells
            found there.  To see what these look like, visit
            http://observatory.brain-map.org/visualcoding, perform a cell search
            and apply some filters (e.g. find cells in a particular area), then
            click the "view these cells in the AllenSDK" link on the bottom-left
            of the search results page.  This will take you to a page that contains
            a code sample you can use to apply those same filters via this argument.
            For more detail on the filter syntax, see BrainObservatoryApi.dataframe_query.


        Returns
        -------
        list of dictionaries
        """

        file_name = self.get_cache_path(file_name, self.CELL_SPECIMENS_KEY)

        cell_specimens = self.api.get_cell_metrics(
            path=file_name,
            strategy='lazy',
            pre=lambda x: [y for y in x],
            **Cache.cache_json())

        cell_specimens = self.api.filter_cell_specimens(
            cell_specimens,
            ids=ids,
            experiment_container_ids=experiment_container_ids,
            include_failed=include_failed,
            filters=filters)

        # drop the thumbnail columns
        if simple:
            mappings = self._get_stimulus_mappings()
            thumbnails = [
                m['item'] for m in mappings
                if m['item_type'] == 'T' and m['level'] == 'R'
            ]
            for cs in cell_specimens:
                for t in thumbnails:
                    del cs[t]

        return cell_specimens
Beispiel #44
0
class VoxelModelApi(MouseConnectivityApi):
    '''HTTP Client extending MouseConnectivityApi to download model data.
    '''
    HTTP_MODEL_DIRECTORY = "http://download.alleninstitute.org/publications/"\
            "A_high_resolution_data-driven_model_of_the_mouse_connectome/"

    NODES_FILE = "nodes.csv.gz"
    WEIGHTS_FILE = "weights.csv.gz"
    SOURCE_MASK_FILE = "source_mask_params.json"
    TARGET_MASK_FILE = "target_mask_params.json"

    CONNECTION_DENSITY_FILE = 'connection_density.csv.gz'
    CONNECTION_STRENGTH_FILE = 'connection_strength.csv.gz'
    NORMALIZED_CONNECTION_DENSITY_FILE = 'normalized_connection_density.csv.gz'
    NORMALIZED_CONNECTION_STRENGTH_FILE = 'normalized_connection_strength.csv.gz'

    def download_model_files(self, file_name, save_file_path=None):
        """Download  data.

        Parameters
        ----------
        file_name : string, optional
        save_file_path : string, optional
            File name to save as.
        """
        url = self.HTTP_MODEL_DIRECTORY + file_name
        self.retrieve_file_over_http(url, save_file_path)

    @cacheable(strategy='create',
               pathfinder=Cache.pathfinder(file_name_position=1, path_keyword='path'))
    def download_nodes(self, file_name):
        self.download_model_files(self.NODES_FILE, file_name)

    @cacheable(strategy='create',
               pathfinder=Cache.pathfinder(file_name_position=1, path_keyword='path'))
    def download_weights(self, file_name):
        self.download_model_files(self.WEIGHTS_FILE, file_name)

    @cacheable(strategy='create',
               pathfinder=Cache.pathfinder(file_name_position=1, path_keyword='path'))
    def download_source_mask_params(self, file_name):
        self.download_model_files(self.SOURCE_MASK_FILE, file_name)

    @cacheable(strategy='create',
               pathfinder=Cache.pathfinder(file_name_position=1, path_keyword='path'))
    def download_target_mask_params(self, file_name):
        self.download_model_files(self.TARGET_MASK_FILE, file_name)

    @cacheable(strategy='create',
               pathfinder=Cache.pathfinder(file_name_position=1, path_keyword='path'))
    def download_connection_density(self, file_name):
        self.download_model_files(self.CONNECTION_DENSITY_FILE, file_name)

    @cacheable(strategy='create',
               pathfinder=Cache.pathfinder(file_name_position=1, path_keyword='path'))
    def download_connection_strength(self, file_name):
        self.download_model_files(self.CONNECTION_STRENGTH_FILE, file_name)

    @cacheable(strategy='create',
               pathfinder=Cache.pathfinder(file_name_position=1, path_keyword='path'))
    def download_normalized_connection_density(self, file_name):
        self.download_model_files(self.NORMALIZED_CONNECTION_DENSITY_FILE, file_name)

    @cacheable(strategy='create',
               pathfinder=Cache.pathfinder(file_name_position=1, path_keyword='path'))
    def download_normalized_connection_strength(self, file_name):
        self.download_model_files(self.NORMALIZED_CONNECTION_STRENGTH_FILE, file_name)
Beispiel #45
0
class MouseAtlasApi(ReferenceSpaceApi, GridDataApi):
    ''' Downloads Mouse Brain Atlas grid data, reference volumes, and metadata.
    '''

    MOUSE_ATLAS_PRODUCTS = (1, )
    DEVMOUSE_ATLAS_PRODUCTS = (3, )
    MOUSE_ORGANISM = (2, )
    HUMAN_ORGANISM = (1, )

    @cacheable()
    @pageable(num_rows=2000, total_rows='all')
    def get_section_data_sets(self, gene_ids=None, product_ids=None, **kwargs):
        ''' Download a list of section data sets (experiments) from the Mouse Brain
        Atlas project.

        Parameters
        ----------
        gene_ids : list of int, optional
            Filter results based on the genes whose expression was characterized 
            in each experiment. Default is all.
        product_ids : list of int, optional
            Filter results to a subset of products. Default is the Mouse Brain Atlas.

        Returns
        -------
        list of dict : 
            Each element is a section data set record, with one or more gene 
            records nested in a list. 

        '''

        if product_ids is None:
            product_ids = list(self.MOUSE_ATLAS_PRODUCTS)
        criteria = 'products[id$in{}]'.format(','.join(map(str, product_ids)))

        if gene_ids is not None:
            criteria += ',genes[id$in{}]'.format(','.join(map(str, gene_ids)))

        return self.model_query(model='SectionDataSet',
                                criteria=criteria,
                                include='genes',
                                **kwargs)

    @cacheable()
    @pageable(num_rows=2000, total_rows='all')
    def get_genes(self, organism_ids=None, chromosome_ids=None, **kwargs):
        ''' Download a list of genes

        Parameters
        ----------
        organism_ids : list of int, optional
            Filter genes to those appearing in these organisms. Defaults to mouse (2).
        chromosome_ids : list of int, optional
            Filter genes to those appearing on these chromosomes. Defaults to all.

        Returns
        -------
        list of dict:
            Each element is a gene record, with a nested chromosome record (also a dict).

        '''

        if organism_ids is None:
            organism_ids = list(self.MOUSE_ORGANISM)
        criteria = '[organism_id$in{}]'.format(','.join(map(str,
                                                            organism_ids)))

        if chromosome_ids is not None:
            criteria += ',[chromosome_id$in{}]'.format(','.join(
                map(str, chromosome_ids)))

        return self.model_query(model='Gene',
                                criteria=criteria,
                                include='chromosome',
                                **kwargs)

    @cacheable(strategy='create',
               reader=sitk_utilities.read_ndarray_with_sitk,
               pathfinder=Cache.pathfinder(file_name_position=1,
                                           path_keyword='path'))
    def download_expression_density(self, path, experiment_id):
        self.download_gene_expression_grid_data(experiment_id,
                                                GridDataApi.DENSITY, path)

    @cacheable(strategy='create',
               reader=sitk_utilities.read_ndarray_with_sitk,
               pathfinder=Cache.pathfinder(file_name_position=1,
                                           path_keyword='path'))
    def download_expression_energy(self, path, experiment_id):
        self.download_gene_expression_grid_data(experiment_id,
                                                GridDataApi.ENERGY, path)

    @cacheable(strategy='create',
               reader=sitk_utilities.read_ndarray_with_sitk,
               pathfinder=Cache.pathfinder(file_name_position=1,
                                           path_keyword='path'))
    def download_expression_intensity(self, path, experiment_id):
        self.download_gene_expression_grid_data(experiment_id,
                                                GridDataApi.INTENSITY, path)
    def get_experiments(self, dataframe=False, file_name=None, cre=None, injection_structure_ids=None):
        """
        Read a list of experiments that match certain criteria.  If caching is enabled,
        this will save the whole (unfiltered) list of experiments to a file.

        Parameters
        ----------

        dataframe: boolean
            Return the list of experiments as a Pandas DataFrame.  If False,
            return a list of dictionaries.  Default False.

        file_name: string
            File name to save/read the structures table.  If file_name is None,
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.

        cre: boolean or list
            If True, return only cre-positive experiments.  If False, return only
            cre-negative experiments.  If None, return all experients. If list, return
            all experiments with cre line names in the supplied list. Default None.

        injection_structure_ids: list
            Only return experiments that were injected in the structures provided here.
            If None, return all experiments.  Default None.

        """

        file_name = self.get_cache_path(file_name, self.EXPERIMENTS_KEY)

        experiments = self.api.get_experiments_api(path=file_name,
                                                   strategy='lazy',
                                                   **Cache.cache_json())

        for e in experiments:
            # renaming id
            e['id'] = e['data_set_id']
            del e['data_set_id']

            # simplify trangsenic line
            tl = e.get('transgenic_line', None)
            if tl:
                e['transgenic_line'] = tl['name']

            # parse the injection structures
            injs = [ int(i) for i in e['injection_structures'].split('/') ]
            e['injection_structures'] = injs
            e['primary_injection_structure'] = injs[0]

            # remove storage dir
            del e['storage_directory']


        # filter the read/downloaded list of experiments
        experiments = self.filter_experiments(
            experiments, cre, injection_structure_ids)

        if dataframe:
            experiments = pd.DataFrame(experiments)
            experiments.set_index(['id'], inplace=True, drop=False)

        return experiments
Beispiel #47
0
    def get_experiments(self,
                        dataframe=False,
                        file_name=None,
                        cre=None,
                        injection_structure_ids=None):
        """
        Read a list of experiments that match certain criteria.  If caching is enabled,
        this will save the whole (unfiltered) list of experiments to a file.

        Parameters
        ----------

        dataframe: boolean
            Return the list of experiments as a Pandas DataFrame.  If False,
            return a list of dictionaries.  Default False.

        file_name: string
            File name to save/read the structures table.  If file_name is None,
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.

        cre: boolean or list
            If True, return only cre-positive experiments.  If False, return only
            cre-negative experiments.  If None, return all experients. If list, return
            all experiments with cre line names in the supplied list. Default None.

        injection_structure_ids: list
            Only return experiments that were injected in the structures provided here.
            If None, return all experiments.  Default None.

        """

        file_name = self.get_cache_path(file_name, self.EXPERIMENTS_KEY)

        experiments = self.api.get_experiments_api(path=file_name,
                                                   strategy='lazy',
                                                   **Cache.cache_json())

        for e in experiments:
            # renaming id
            e['id'] = e['data_set_id']
            del e['data_set_id']

            # simplify trangsenic line
            tl = e.get('transgenic_line', None)
            if tl:
                e['transgenic_line'] = tl['name']

            # parse the injection structures
            injs = [int(i) for i in e['injection_structures'].split('/')]
            e['injection_structures'] = injs
            e['primary_injection_structure'] = injs[0]

            # remove storage dir
            del e['storage_directory']

        # filter the read/downloaded list of experiments
        experiments = self.filter_experiments(experiments, cre,
                                              injection_structure_ids)

        if dataframe:
            experiments = pd.DataFrame(experiments)
            experiments.set_index(['id'], inplace=True, drop=False)

        return experiments
class MouseConnectivityApiPrerelease(MouseConnectivityApi):
    '''Client for retrieving prereleased mouse connectivity data from lims.

    Parameters
    ----------
    base_uri : string, optional
        Does not affect pulling from lims.
    file_name : string, optional
        File name to save/read storage_directories dict. Passed to
        GridDataApiPrerelease constructor.
    '''
    def __init__(self,
                 storage_directories_file_name,
                 cache_storage_directories=True,
                 base_uri=None):
        super(MouseConnectivityApiPrerelease, self).__init__(base_uri=base_uri)
        self.grid_data_api = GridDataApiPrerelease.from_file_name(
            storage_directories_file_name, cache=cache_storage_directories)

    @cacheable()
    def get_experiments(self):
        query_result = lu.query(_EXPERIMENT_QUERY)

        experiments = []
        for row in query_result:
            if str(row[b'id']) in self.grid_data_api.storage_directories:

                exp_dict = _experiment_dict(row)
                experiments.append(exp_dict)

        return experiments

    #@cacheable()
    def get_structure_unionizes(self):
        raise NotImplementedError()

    @cacheable(strategy='create',
               pathfinder=Cache.pathfinder(file_name_position=1,
                                           path_keyword='path'))
    def download_injection_density(self, path, experiment_id, resolution):
        file_name = "%s_%s.nrrd" % (GridDataApi.INJECTION_DENSITY, resolution)

        self.grid_data_api.download_projection_grid_data(
            path, experiment_id, file_name)

    @cacheable(strategy='create',
               pathfinder=Cache.pathfinder(file_name_position=1,
                                           path_keyword='path'))
    def download_projection_density(self, path, experiment_id, resolution):
        file_name = "%s_%s.nrrd" % (GridDataApi.PROJECTION_DENSITY, resolution)

        self.grid_data_api.download_projection_grid_data(
            path, experiment_id, file_name)

    @cacheable(strategy='create',
               pathfinder=Cache.pathfinder(file_name_position=1,
                                           path_keyword='path'))
    def download_injection_fraction(self, path, experiment_id, resolution):
        file_name = "%s_%s.nrrd" % (GridDataApi.INJECTION_FRACTION, resolution)

        self.grid_data_api.download_projection_grid_data(
            path, experiment_id, file_name)

    @cacheable(strategy='create',
               pathfinder=Cache.pathfinder(file_name_position=1,
                                           path_keyword='path'))
    def download_data_mask(self, path, experiment_id, resolution):
        file_name = "%s_%s.nrrd" % (GridDataApi.DATA_MASK, resolution)

        self.grid_data_api.download_projection_grid_data(
            path, experiment_id, file_name)