def get_injection_fraction(self, experiment_id, file_name=None):
        """ 
        Read an injection fraction volume for a single experiment. Download it 
        first if it doesn't exist.  Injection fraction is the proportion of
        pixels in the injection site in a grid voxel in [0,1].
        
        Parameters
        ----------

        experiment_id: int
            ID of the experiment to download/read.  This corresponds to
            section_data_set_id in the API.

        file_name: string
            File name to store the template volume.  If it already exists, 
            it will be read from this file.  If file_name is None, the 
            file_name will be pulled out of the manifest.  Default is None.

        """

        file_name = self.get_cache_path(file_name, self.INJECTION_FRACTION_KEY,
                                        experiment_id, self.resolution)

        if file_name is None:
            raise Exception("No file name to save volume.")

        if not os.path.exists(file_name):
            Manifest.safe_mkdir(os.path.dirname(file_name))

            self.api.download_injection_fraction(file_name, experiment_id,
                                                 self.resolution)

        return nrrd.read(file_name)
Example #2
0
    def save_ephys_data(self, specimen_id, file_name):
        """
        Save the electrophysology recordings for a cell as an NWB file.

        Parameters
        ----------
        specimen_id: int
            ID of the specimen, from the Specimens database model in the Allen Institute API.

        file_name: str
            Path to save the NWB file.
        """

        dirname = os.path.dirname(file_name)
        Manifest.safe_mkdir(dirname)

        criteria = '[id$eq%d],ephys_result(well_known_files(well_known_file_type[name$eq%s]))' % (
            specimen_id, self.NWB_FILE_TYPE)
        includes = 'ephys_result(well_known_files(well_known_file_type))'

        results = self.model_query('Specimen',
                                   criteria=criteria,
                                   include=includes,
                                   num_rows='all')

        try:
            file_url = results[0]['ephys_result'][
                'well_known_files'][0]['download_link']
        except Exception as _:
            raise Exception("Specimen %d has no ephys data" % specimen_id)

        self.retrieve_file_over_http(self.api_url + file_url, file_name)
Example #3
0
    def load_manifest(self, file_name, version=None):
        '''Read a keyed collection of path specifications.

        Parameters
        ----------
        file_name : string
            path to the manifest file

        Returns
        -------
        Manifest
        '''
        if file_name is not None:
            if not os.path.exists(file_name):

                # make the directory if it doesn't exist already
                dirname = os.path.dirname(file_name)
                if dirname:
                    Manifest.safe_mkdir(dirname)

                self.build_manifest(file_name)

            try:
                self.manifest = Manifest(
                    ju.read(file_name)['manifest'], 
                    os.path.dirname(file_name), 
                    version=version)
            except ManifestVersionError as e:
                raise ManifestVersionError(("Your manifest file (%s) is out of date" +
                                            " (version '%s' vs '%s').  Please remove this file" +
                                            " and it will be regenerated for you the next"
                                            " time you instantiate this class.") % (file_name, e.found_version, e.version),
                                           e.version, e.found_version)
        else:
            self.manifest = None
    def get_experiment_structure_unionizes(self,
                                           experiment_id,
                                           file_name=None,
                                           is_injection=None,
                                           structure_ids=None,
                                           hemisphere_ids=None):
        """
        Retrieve the structure unionize data for a specific experiment.  Filter by 
        structure, injection status, and hemisphere.

        Parameters
        ----------
        
        experiment_id: int
            ID of the experiment of interest.  Corresponds to section_data_set_id in the API.

        file_name: string
            File name to save/read the experiments list.  If file_name is None, 
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.            

        is_injection: boolean
            If True, only return unionize records that disregard non-injection pixels.
            If False, only return unionize records that disregard injection pixels.
            If None, return all records.  Default None.

        structure_ids: list
            Only return unionize records that are inside a specific set of structures.
            If None, return all records. Default None.

        hemisphere_ids: list
            Only return unionize records that disregard pixels outside of a hemisphere.
            or set of hemispheres. Left = 1, Right = 2, Both = 3.  If None, include all 
            records [1, 2, 3].  Default None.
            
        """

        file_name = self.get_cache_path(file_name,
                                        self.STRUCTURE_UNIONIZES_KEY,
                                        experiment_id)

        if os.path.exists(file_name):
            unionizes = pd.DataFrame.from_csv(file_name)
        else:
            unionizes = self.api.get_structure_unionizes([experiment_id])
            unionizes = pd.DataFrame(unionizes)

            # rename section_data_set_id column to experiment_id
            unionizes.columns = [
                'experiment_id' if c == 'section_data_set_id' else c
                for c in unionizes.columns
            ]

            if self.cache:
                Manifest.safe_mkdir(os.path.dirname(file_name))

                unionizes.to_csv(file_name)

        return self.filter_structure_unionizes(unionizes, is_injection,
                                               structure_ids, hemisphere_ids)
Example #5
0
def display_features(qc_fig_dir, data_set, feature_data):
    """

    Parameters
    ----------
    qc_fig_dir: str
        directory name for storing html pages
    data_set: NWB data set
    feature_data: dict
        cell and sweep features

    Returns
    -------

    """
    if os.path.exists(qc_fig_dir):
        logging.warning("Removing existing qc figures directory: %s",
                        qc_fig_dir)
        shutil.rmtree(qc_fig_dir)

    image_dir = os.path.join(qc_fig_dir, "img")
    Manifest.safe_mkdir(qc_fig_dir)
    Manifest.safe_mkdir(image_dir)

    logging.info("Saving figures")
    make_sweep_page(data_set, qc_fig_dir)
    make_cell_page(data_set, feature_data, qc_fig_dir)
    def get_structures(self, file_name=None):
        """ 
        Read the list of adult mouse structures and return a Pandas DataFrame.

        Parameters
        ----------

        file_name: string
            File name to save/read the structures table.  If file_name is None, 
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.
        """

        file_name = self.get_cache_path(file_name, self.STRUCTURES_KEY)

        if os.path.exists(file_name):
            structures = pd.DataFrame.from_csv(file_name)
        else:
            structures = OntologiesApi().get_structures(1)
            structures = pd.DataFrame(structures)

            if self.cache:
                Manifest.safe_mkdir(os.path.dirname(file_name))

                structures.to_csv(file_name)

        structures.set_index(['id'], inplace=True, drop=False)
        return structures
Example #7
0
def save_grand_averages(grand_up, grand_down, t, storage_directory):
    """Save capacitance check grand averages to local storage

    Need to save to separate files so that they can be loaded by NEURON fitting scripts

    Parameters
    ----------
    grand_up, grand_down : array-like
        Series of voltages responses to positive (`grand_up`) and negative (`grand_down`) current pulses
    t : array-like
        Time values for `grand_up` and `grand_down`
    storage_directory : str
        Path to storage directory for files

    Returns
    -------
    upfile, downfile : str
        Paths to the saved files
    """
    Manifest.safe_mkdir(storage_directory)
    upfile = os.path.join(storage_directory, "upbase.dat")
    downfile = os.path.join(storage_directory, "downbase.dat")
    with open(upfile, 'w') as f:
        np.savetxt(f, np.column_stack((t, grand_up)))
    with open(downfile, 'w') as f:
        np.savetxt(f, np.column_stack((t, grand_down)))

    return upfile, downfile
Example #8
0
    def load_manifest(self, file_name):
        '''Read a keyed collection of path specifications.
        
        Parameters
        ----------
        file_name : string
            path to the manifest file
        
        Returns
        -------
        Manifest
        '''
        if file_name != None:
            if not os.path.exists(file_name):

                # make the directory if it doesn't exist already
                dirname = os.path.dirname(file_name)
                Manifest.safe_mkdir(dirname)

                self.build_manifest(file_name)

            
            self.manifest = Manifest(ju.read(file_name)['manifest'], os.path.dirname(file_name))
        else:
            self.manifest = None
    def get_annotation_volume(self, file_name=None):
        """ 
        Read the annotation volume.  Download it first if it doesn't exist.

        Parameters
        ----------

        file_name: string
            File name to store the annotation volume.  If it already exists, 
            it will be read from this file.  If file_name is None, the 
            file_name will be pulled out of the manifest.  Default is None.

        """

        file_name = self.get_cache_path(file_name, self.ANNOTATION_KEY,
                                        self.resolution)

        if file_name is None:
            raise Exception(
                "No save file name provided for annotation volume.")

        if os.path.exists(file_name):
            annotation, info = nrrd.read(file_name)
        else:
            Manifest.safe_mkdir(os.path.dirname(file_name))

            annotation, info = self.api.download_annotation_volume(
                self.resolution, file_name)

        return annotation, info
Example #10
0
    def save_ephys_data(self, specimen_id, file_name):
        """
        Save the electrophysology recordings for a cell as an NWB file.

        Parameters
        ----------
        specimen_id: int
            ID of the specimen, from the Specimens database model in the Allen Institute API.

        file_name: str
            Path to save the NWB file.  
        """

        dirname = os.path.dirname(file_name)
        Manifest.safe_mkdir(dirname)

        criteria = '[id$eq%d],ephys_result(well_known_files(well_known_file_type[name$eq%s]))' % (specimen_id, self.NWB_FILE_TYPE)
        includes = 'ephys_result(well_known_files(well_known_file_type))'

        results = self.model_query('Specimen',
                                   criteria=criteria,
                                   include=includes,
                                   num_rows='all')

        try:
            file_url = results[0]['ephys_result']['well_known_files'][0]['download_link']
        except Exception as _:
            raise Exception("Specimen %d has no ephys data" % specimen_id)

        self.retrieve_file_over_http(self.api_url + file_url, file_name)
Example #11
0
    def load_manifest(self, file_name):
        '''Read a keyed collection of path specifications.

        Parameters
        ----------
        file_name : string
            path to the manifest file

        Returns
        -------
        Manifest
        '''
        if file_name is not None:
            if not os.path.exists(file_name):

                # make the directory if it doesn't exist already
                dirname = os.path.dirname(file_name)
                if dirname:
                    Manifest.safe_mkdir(dirname)

                self.build_manifest(file_name)

            self.manifest = Manifest(
                ju.read(file_name)['manifest'], os.path.dirname(file_name))
        else:
            self.manifest = None
Example #12
0
def plot_negative_baselines(raw_traces, demix_traces, mask_array, roi_ids_mask, plot_dir, ext='png'):
    N, T = raw_traces.shape
    _, x, y = mask_array.shape

    logging.debug("finding negative baselines")
    neg_inds = find_negative_baselines(demix_traces)[0]
    
    overlap_inds = set()
    logging.debug("detected negative baselines: %s", str(neg_inds))
    for roi_ind in neg_inds:
        Manifest.safe_mkdir(plot_dir)

        save_file = os.path.join(plot_dir, str(roi_ids_mask[roi_ind]) + '_negative.' + ext)
        plot_traces(raw_traces[roi_ind], demix_traces[roi_ind], roi_ids_mask[roi_ind], roi_ind, save_file)

        ''' plot overlapping masks '''
        save_file = os.path.join(plot_dir, str(roi_ids_mask[roi_ind]) + '_negative_masks.' + ext)
        roi_overlap_inds = plot_overlap_masks_lengthOne(roi_ind, mask_array, save_file)

        overlap_inds.update(roi_overlap_inds)

    zero_inds = find_zero_baselines(demix_traces)[0]
    logging.debug("detected zero baselines: %s", str(zero_inds))
    overlap_inds.update(zero_inds)

    return list(overlap_inds)
    def get_data_mask(self, experiment_id, file_name=None):
        """ 
        Read a data mask volume for a single experiment. Download it 
        first if it doesn't exist.  Data mask is a binary mask of
        voxels that have valid data.  Only use valid data in analysis!
        
        Parameters
        ----------

        experiment_id: int
            ID of the experiment to download/read.  This corresponds to
            section_data_set_id in the API.

        file_name: string
            File name to store the template volume.  If it already exists, 
            it will be read from this file.  If file_name is None, the 
            file_name will be pulled out of the manifest.  Default is None.

        """

        file_name = self.get_cache_path(file_name, self.DATA_MASK_KEY,
                                        experiment_id, self.resolution)

        if file_name is None:
            raise Exception("No file name to save volume.")

        if not os.path.exists(file_name):
            Manifest.safe_mkdir(os.path.dirname(file_name))

            self.api.download_data_mask(file_name, experiment_id,
                                        self.resolution)

        return nrrd.read(file_name)
Example #14
0
    def load_manifest(self, file_name, version=None):
        '''Read a keyed collection of path specifications.

        Parameters
        ----------
        file_name : string
            path to the manifest file

        Returns
        -------
        Manifest
        '''
        if file_name is not None:
            if not os.path.exists(file_name):

                # make the directory if it doesn't exist already
                dirname = os.path.dirname(file_name)
                if dirname:
                    Manifest.safe_mkdir(dirname)

                self.build_manifest(file_name)

            try:
                self.manifest = Manifest(
                    ju.read(file_name)['manifest'],
                    os.path.dirname(file_name),
                    version=version)
            except ManifestVersionError as e:
                if e.outdated is True:
                    intro = "is out of date"
                elif e.outdated is False:
                    intro = "was made with a newer version of the AllenSDK"
                elif e.outdated is None:
                    intro = "version did not match the expected version"

                ref_url = "https://github.com/alleninstitute/allensdk/wiki"
                raise ManifestVersionError(("Your manifest file (%s) %s" +
                                            " (its version is '%s', but" +
                                            " version '%s' is expected). " +
                                            " Please remove this file" +
                                            " and it will be regenerated for" +
                                            " you the next time you" +
                                            " instantiate this class." +
                                            " WARNING: There may be new data" +
                                            " files available that replace" +
                                            " the ones you already have" +
                                            " downloaded. Read the notes" +
                                            " for this release for more" +
                                            " details on what has changed" +
                                            " (%s).") %
                                           (file_name, intro,
                                            e.found_version, e.version,
                                            ref_url),
                                           e.version, e.found_version)

            self.manifest_path = file_name

        else:
            self.manifest = None
Example #15
0
def save_grand_averages(grand_up, grand_down, t, storage_directory):
    """Save to local storage to be loaded by NEURON fitting scripts"""
    Manifest.safe_mkdir(storage_directory)
    upfile = os.path.join(storage_directory, "upbase.dat")
    downfile = os.path.join(storage_directory, "downbase.dat")
    with open(upfile, 'w') as f:
        np.savetxt(f, np.column_stack((t, grand_up)))
    with open(downfile, 'w') as f:
        np.savetxt(f, np.column_stack((t, grand_down)))

    return upfile, downfile
def save_qc_figures(qc_fig_dir, nwb_file, output_data, plot_cell_figures):
    if os.path.exists(qc_fig_dir):
        logging.warning("removing existing qc figures directory: %s",
                        qc_fig_dir)
        shutil.rmtree(qc_fig_dir)

    Manifest.safe_mkdir(qc_fig_dir)

    logging.debug("saving qc plot figures")
    plot_qc_figures.make_sweep_page(nwb_file, output_data, qc_fig_dir)
    plot_qc_figures.make_cell_page(nwb_file, output_data, qc_fig_dir,
                                   plot_cell_figures)
def build_plots(prefix, aspect, configs, output_dir, axes=None, transparent=False):
    Manifest.safe_mkdir(output_dir)

    for config in configs:
        h = config['height_px']
        w = int(h * aspect)
        
        file_name = os.path.join(output_dir, config["pattern"] % prefix)

        logging.debug("file: %s", file_name)
        with oplots.figure_in_px(w, h, file_name, transparent=transparent) as fig:
            matplotlib.rcParams.update({'font.size': config['font_size']})
            yield file_name
Example #18
0
    def load_manifest(self, file_name, version=None):
        '''Read a keyed collection of path specifications.

        Parameters
        ----------
        file_name : string
            path to the manifest file

        Returns
        -------
        Manifest
        '''
        if file_name is not None:
            if not os.path.exists(file_name):

                # make the directory if it doesn't exist already
                dirname = os.path.dirname(file_name)
                if dirname:
                    Manifest.safe_mkdir(dirname)

                self.build_manifest(file_name)

            try:
                self.manifest = Manifest(
                    ju.read(file_name)['manifest'],
                    os.path.dirname(file_name),
                    version=version)
            except ManifestVersionError as e:
                if e.outdated is True:
                    intro = "is out of date"
                elif e.outdated is False:
                    intro = "was made with a newer version of the AllenSDK"
                elif e.outdated is None:
                    intro = "version did not match the expected version"

                raise ManifestVersionError(("Your manifest file (%s) %s" +
                                            " (its version is '%s', but version '%s' is expected).  Please remove this file" +
                                            " and it will be regenerated for you the next" +
                                            " time you instantiate this class." +
                                            " WARNING: There may be new data files available that replace the ones you already have downloaded." +
                                            " Read the notes for this release for more details on what has changed" +
                                            " (https://github.com/alleninstitute/allensdk/wiki).") % 
                                           (file_name, intro, e.found_version, e.version),
                                           e.version, e.found_version)

            self.manifest_path = file_name

        else:
            self.manifest = None
Example #19
0
    def load_manifest(self, file_name, version=None):
        '''Read a keyed collection of path specifications.

        Parameters
        ----------
        file_name : string
            path to the manifest file

        Returns
        -------
        Manifest
        '''
        if file_name is not None:
            if not os.path.exists(file_name):

                # make the directory if it doesn't exist already
                dirname = os.path.dirname(file_name)
                if dirname:
                    Manifest.safe_mkdir(dirname)

                self.build_manifest(file_name)

            try:
                self.manifest = Manifest(ju.read(file_name)['manifest'],
                                         os.path.dirname(file_name),
                                         version=version)
            except ManifestVersionError as e:
                if e.outdated is True:
                    intro = "is out of date"
                elif e.outdated is False:
                    intro = "was made with a newer version of the AllenSDK"
                elif e.outdated is None:
                    intro = "version did not match the expected version"

                raise ManifestVersionError((
                    "Your manifest file (%s) %s" +
                    " (its version is '%s', but version '%s' is expected).  Please remove this file"
                    + " and it will be regenerated for you the next"
                    " time you instantiate this class.") %
                                           (file_name, intro, e.found_version,
                                            e.version), e.version,
                                           e.found_version)

            self.manifest_path = file_name

        else:
            self.manifest = None
Example #20
0
    def save_ophys_experiment_data(self, ophys_experiment_id, file_name):
        dirname = os.path.dirname(file_name)
        Manifest.safe_mkdir(dirname)


        data = self.template_query('brain_observatory_queries',
                                   'ophys_experiment_data',
                                   ophys_experiment_id=ophys_experiment_id)
        
        try:
            file_url = data[0]['download_link']
        except Exception as _:
            raise Exception("ophys experiment %d has no data file" % ophys_experiment_id)

        self._log.warning("Downloading ophys_experiment %d NWB. This can take some time." % ophys_experiment_id)

        self.retrieve_file_over_http(self.api_url + file_url, file_name)
    def save_ophys_experiment_data(self, ophys_experiment_id, file_name):
        dirname = os.path.dirname(file_name)
        Manifest.safe_mkdir(dirname)

        data = self.template_query('brain_observatory_queries',
                                   'ophys_experiment_data',
                                   ophys_experiment_id=ophys_experiment_id)

        try:
            file_url = data[0]['download_link']
        except Exception as _:
            raise Exception("ophys experiment %d has no data file" %
                            ophys_experiment_id)

        self._log.warning(
            "Downloading ophys_experiment %d NWB. This can take some time." %
            ophys_experiment_id)

        self.retrieve_file_over_http(self.api_url + file_url, file_name)
    def get_structure_mask(self,
                           structure_id,
                           file_name=None,
                           annotation_file_name=None):
        """
        Read a 3D numpy array shaped like the annotation volume that has non-zero values where 
        voxels belong to a particular structure.  This will take care of identifying substructures.

        Parameters
        ----------
        
        structure_id: int
            ID of a structure.  

        file_name: string
            File name to store the structure mask.  If it already exists, 
            it will be read from this file.  If file_name is None, the 
            file_name will be pulled out of the manifest.  Default is None.
        
        annotation_file_name: string
            File name to store the annotation volume.  If it already exists, 
            it will be read from this file.  If file_name is None, the 
            file_name will be pulled out of the manifest.  Default is None.            
        """

        file_name = self.get_cache_path(file_name, self.STRUCTURE_MASK_KEY,
                                        structure_id)

        if os.path.exists(file_name):
            return nrrd.read(file_name)
        else:
            ont = self.get_ontology()
            structure_ids = ont.get_descendant_ids([structure_id])
            annotation, _ = self.get_annotation_volume(annotation_file_name)
            mask = self.make_structure_mask(structure_ids, annotation)

            if self.cache:
                Manifest.safe_mkdir(os.path.dirname(file_name))
                nrrd.write(file_name, mask)

            return mask, None
Example #23
0
    def cache_data(self,
                   neuronal_model_id,
                   working_directory=None):
        '''Take a an experiment id, query the Api RMA to get well-known-files
        download the files, and store them in the working directory.

        Parameters
        ----------
        neuronal_model_id : int or string representation
            found in the neuronal_model table in the api
        working_directory : string
            Absolute path name where the downloaded well-known files will be stored.
        '''
        if working_directory is None:
            working_directory = self.default_working_directory

        well_known_file_id_dict = self.get_well_known_file_ids(
            neuronal_model_id)

        if not well_known_file_id_dict or \
           (not any(list(well_known_file_id_dict.values()))):
            raise(Exception("No data found for neuronal model id %d" %
                            (neuronal_model_id)))

        Manifest.safe_mkdir(working_directory)

        work_dir = os.path.join(working_directory, 'work')
        Manifest.safe_mkdir(work_dir)

        modfile_dir = os.path.join(working_directory, 'modfiles')
        Manifest.safe_mkdir(modfile_dir)

        for key, id_dict in well_known_file_id_dict.items():
            if (not self.cache_stimulus) and (key == 'stimulus'):
                continue

            for well_known_id, filename in id_dict.items():
                well_known_file_url = self.construct_well_known_file_download_url(
                    well_known_id)
                cached_file_path = os.path.join(working_directory, filename)
                self.retrieve_file_over_http(
                    well_known_file_url, cached_file_path)

        fit_path = list(self.ids['fit'].values())[0]
        stimulus_filename = list(self.ids['stimulus'].values())[0]
        swc_morphology_path = list(self.ids['morphology'].values())[0]
        marker_path = \
            list(self.ids['marker'].values())[0] if 'marker' in self.ids else ''
        sweeps = sorted(self.sweeps)

        self.create_manifest(fit_path,
                             self.model_type,
                             stimulus_filename,
                             swc_morphology_path,
                             marker_path,
                             sweeps)

        manifest_path = os.path.join(working_directory, 'manifest.json')
        with open(manifest_path, 'w') as f:
            json.dump(self.manifest, f, indent=2)
def build_experiment_thumbnails(nwb_file, analysis_file, output_directory, 
                                types=None, threads=4):
    if types is None:
        types = PLOT_TYPES

    logging.info("nwb file: %s", nwb_file)
    logging.info("analysis file: %s", analysis_file)
    logging.info("output directory: %s", output_directory)
    logging.info("types: %s", str(types))
    Manifest.safe_mkdir(output_directory)

    if len(types) == 1:
        build_type(nwb_file, analysis_file, PLOT_CONFIGS, output_directory, types[0])
    elif threads == 1:
        for type_name in types:
            build_type(nwb_file, analysis_file, PLOT_CONFIGS, output_directory, type_name)
    else:
        p = multiprocessing.Pool(threads)

        func = functools.partial(build_type, nwb_file, analysis_file, PLOT_CONFIGS, output_directory)
        results = p.map(func, types)
        p.close()
        p.join()
def parse_input(data):
    nwb_file = data.get("nwb_file", None)

    if nwb_file is None:
        raise IOError("input JSON missing required field 'nwb_file'")
    if not os.path.exists(nwb_file):
        raise IOError("nwb file does not exists: %s" % nwb_file)

    analysis_file = data.get("analysis_file", None)

    if analysis_file is None:
        raise IOError("input JSON missing required field 'analysis_file'")
    if not os.path.exists(analysis_file):
        raise IOError("analysis file does not exists: %s" % analysis_file)


    output_directory = data.get("output_directory", None)

    if output_directory is None:
        raise IOError("input JSON missing required field 'output_directory'")

    Manifest.safe_mkdir(output_directory)
    
    return nwb_file, analysis_file, output_directory
Example #26
0
    def cache_data(self,
                   neuronal_model_id,
                   working_directory=None):
        '''Take a an experiment id, query the Api RMA to get well-known-files
        download the files, and store them in the working directory.
        
        Parameters
        ----------
        neuronal_model_id : int or string representation
            found in the neuronal_model table in the api
        working_directory : string
            Absolute path name where the downloaded well-known files will be stored.
        '''
        if working_directory is None:
            working_directory = self.default_working_directory

        well_known_file_id_dict = self.get_well_known_file_ids(neuronal_model_id)
        
        if not well_known_file_id_dict or \
           (not any(well_known_file_id_dict.values())):
            raise(Exception("No data found for neuronal model id %d" % (neuronal_model_id)))
            
        Manifest.safe_mkdir(working_directory)
        
        work_dir = os.path.join(working_directory, 'work')        
        Manifest.safe_mkdir(work_dir)
        
        modfile_dir = os.path.join(working_directory, 'modfiles')       
        Manifest.safe_mkdir(modfile_dir)
                
        for key, id_dict in well_known_file_id_dict.items():
            if (not self.cache_stimulus) and (key == 'stimulus'):
                continue
            
            for well_known_id, filename in id_dict.items():
                well_known_file_url = self.construct_well_known_file_download_url(well_known_id)
                cached_file_path = os.path.join(working_directory, filename)
                self.retrieve_file_over_http(well_known_file_url, cached_file_path)
        
        fit_path = self.ids['fit'].values()[0]
        stimulus_filename = self.ids['stimulus'].values()[0]
        swc_morphology_path = self.ids['morphology'].values()[0]
        marker_path = self.ids['marker'].values()[0] if 'marker' in self.ids else ''
        sweeps = sorted(self.sweeps)
        
        self.create_manifest(fit_path,
                             self.model_type,
                             stimulus_filename,
                             swc_morphology_path,
                             marker_path,
                             sweeps)
        
        manifest_path = os.path.join(working_directory, 'manifest.json')
        with open(manifest_path, 'wb') as f:
            f.write(json.dumps(self.manifest, indent=2))
Example #27
0
def main():
    mod = PipelineModule()
    mod.parser.add_argument("--exclude-labels",
                            nargs="*",
                            default=EXCLUDE_LABELS)

    data = mod.input_data()
    logging.debug("reading input")

    traces, masks, valid, trace_ids, movie_h5, output_h5 = parse_input(
        data, mod.args.exclude_labels)

    logging.debug("excluded masks: %s",
                  str(zip(np.where(~valid)[0], trace_ids[~valid])))
    output_dir = os.path.dirname(output_h5)
    plot_dir = os.path.join(output_dir, "demix_plots")
    if os.path.exists(plot_dir):
        shutil.rmtree(plot_dir)
    Manifest.safe_mkdir(plot_dir)

    logging.debug("reading movie")
    with h5py.File(movie_h5, 'r') as f:
        movie = f['data'].value

    # only demix non-union, non-duplicate ROIs
    valid_idxs = np.where(valid)
    demix_traces = traces[valid_idxs]
    demix_masks = masks[valid_idxs]

    logging.debug("demixing")
    demixed_traces, drop_frames = demixer.demix_time_dep_masks(
        demix_traces, movie, demix_masks)

    nt_inds = demixer.plot_negative_transients(demix_traces, demixed_traces,
                                               valid[valid_idxs], demix_masks,
                                               trace_ids[valid_idxs], plot_dir)

    logging.debug("rois with negative transients: %s",
                  str(trace_ids[valid_idxs][nt_inds]))

    nb_inds = demixer.plot_negative_baselines(demix_traces, demixed_traces,
                                              demix_masks,
                                              trace_ids[valid_idxs], plot_dir)

    # negative baseline rois (and those that overlap with them) become nans
    logging.debug("rois with negative baselines (or overlap with them): %s",
                  str(trace_ids[valid_idxs][nb_inds]))
    demixed_traces[nb_inds, :] = np.nan

    logging.info("Saving output")
    out_traces = np.zeros(traces.shape, dtype=demix_traces.dtype)
    out_traces[:] = np.nan
    out_traces[valid_idxs] = demixed_traces

    with h5py.File(output_h5, 'w') as f:
        f.create_dataset("data", data=out_traces, compression="gzip")
        roi_names = np.array([str(rn) for rn in trace_ids]).astype(np.string_)
        f.create_dataset("roi_names", data=roi_names)

    mod.write_output_data(
        dict(negative_transient_roi_ids=trace_ids[valid_idxs][nt_inds],
             negative_baseline_roi_ids=trace_ids[valid_idxs][nb_inds]))
Example #28
0
def run_module(module,
               input_data,
               storage_directory,
               optional_args=None,
               python=SHARED_PYTHON,
               sdk_path=SHARED_SDK,
               local=False,
               pbs=None):

    PBS_TEMPLATE = """
    export PYTHONPATH=%(sdk_path)s:$PYTHONPATH
    PYTHON=%(python)s
    SCRIPT="%(module)s"
    $PYTHON $SCRIPT %(optional_args)s %(input_json)s %(output_json)s 
    """

    if optional_args is None:
        optional_args = []

    input_json = os.path.join(storage_directory, "input.json")
    output_json = os.path.join(storage_directory, "output.json")
    pbs_file = os.path.join(storage_directory, "run.pbs")

    Manifest.safe_mkdir(storage_directory)

    pbs_headers = [('-j oe'),
                   ('-o %s' % os.path.join(storage_directory, "run.log"))]
    pbs = pbs if pbs is not None else {}

    queue = pbs.get('queue', 'braintv')
    pbs_headers.append('-q %s' % queue)

    walltime = pbs.get('walltime', '3:00:00')
    pbs_headers.append('-l walltime=%s' % walltime)

    vmem = pbs.get('vmem', 16)
    pbs_headers.append('-l vmem=%dgb' % vmem)

    if 'job_name' in pbs:
        pbs_headers.append('-N %s' % pbs['job_name'])

    if 'ncpus' in pbs:
        pbs_headers.append('-l ncpus=%d' % pbs['ncpus'])

    pbs_headers = ['#PBS %s' % s for s in pbs_headers]

    with open(pbs_file, "w") as f:
        f.write(
            '\n'.join(pbs_headers) + PBS_TEMPLATE % {
                "python": python,
                "sdk_path": sdk_path,
                "module": module,
                "input_json": input_json,
                "output_json": output_json,
                "optional_args": " ".join(optional_args)
            })

    ju.write(input_json, input_data)

    if local:
        subprocess.call(['sh', pbs_file])
    else:
        subprocess.call(['qsub', pbs_file])
Example #29
0
def test_notebook(fn_temp_dir):

    # coding: utf-8

    # # Reference Space
    #
    # This notebook contains example code demonstrating the use of the StructureTree and ReferenceSpace classes. These classes provide methods for interacting with the 3d spaces to which Allen Institute data and atlases are registered.
    #
    # Unlike the AllenSDK cache classes, StructureTree and ReferenceSpace operate entirely in memory. We recommend using json files to store text and nrrd files to store volumetric images.
    #
    # The MouseConnectivityCache class has methods for downloading, storing, and constructing StructureTrees and ReferenceSpaces. Please see [here](https://alleninstitute.github.io/AllenSDK/_static/examples/nb/mouse_connectivity.html) for examples.

    # ## Constructing a StructureTree
    #
    # A StructureTree object is a wrapper around a structure graph - a list of dictionaries documenting brain structures and their containment relationships. To build a structure tree, you will first need to obtain a structure graph.
    #
    # For a list of atlases and corresponding structure graph ids, see [here](http://help.brain-map.org/display/api/Atlas+Drawings+and+Ontologies).

    # In[1]:

    from allensdk.api.queries.ontologies_api import OntologiesApi
    from allensdk.core.structure_tree import StructureTree

    oapi = OntologiesApi()
    structure_graph = oapi.get_structures_with_sets(
        [1])  # 1 is the id of the adult mouse structure graph

    # This removes some unused fields returned by the query
    structure_graph = StructureTree.clean_structures(structure_graph)

    tree = StructureTree(structure_graph)

    # In[2]:

    # now let's take a look at a structure
    tree.get_structures_by_name(['Dorsal auditory area'])

    # The fields are:
    #     * acronym: a shortened name for the structure
    #     * rgb_triplet: each structure is assigned a consistent color for visualizations
    #     * graph_id: the structure graph to which this structure belongs
    #     * graph_order: each structure is assigned a consistent position in the flattened graph
    #     * id: a unique integer identifier
    #     * name: the full name of the structure
    #     * structure_id_path: traces a path from the root node of the tree to this structure
    #     * structure_set_ids: the structure belongs to these predefined groups

    # ## Using a StructureTree

    # In[3]:

    # get a structure's parent
    tree.parent([1011])

    # In[4]:

    # get a dictionary mapping structure ids to names

    name_map = tree.get_name_map()
    name_map[247]

    # In[5]:

    # ask whether one structure is contained within another

    strida = 385
    stridb = 247

    is_desc = '' if tree.structure_descends_from(385, 247) else ' not'

    print('{0} is{1} in {2}'.format(name_map[strida], is_desc,
                                    name_map[stridb]))

    # In[6]:

    # build a custom map that looks up acronyms by ids
    # the syntax here is just a pair of node-wise functions.
    # The first one returns keys while the second one returns values

    acronym_map = tree.value_map(lambda x: x['id'], lambda y: y['acronym'])
    print(acronym_map[385])

    # ## Downloading an annotation volume
    #
    # This code snippet will download and store a nrrd file containing the Allen Common Coordinate Framework annotation. We have requested an annotation with 25-micron isometric spacing. The orientation of this space is:
    #     * Anterior -> Posterior
    #     * Superior -> Inferior
    #     * Left -> Right
    # This is the no-frills way to download an annotation volume. See the <a href='_static/examples/nb/mouse_connectivity.html#Manipulating-Grid-Data'>mouse connectivity</a> examples if you want to properly cache the downloaded data.

    # In[7]:

    import os
    import nrrd
    from allensdk.api.queries.mouse_connectivity_api import MouseConnectivityApi
    from allensdk.config.manifest import Manifest

    # the annotation download writes a file, so we will need somwhere to put it
    annotation_dir = 'annotation'
    Manifest.safe_mkdir(annotation_dir)

    annotation_path = os.path.join(annotation_dir, 'annotation.nrrd')

    mcapi = MouseConnectivityApi()
    mcapi.download_annotation_volume('annotation/ccf_2016', 25,
                                     annotation_path)

    annotation, meta = nrrd.read(annotation_path)

    # ## Constructing a ReferenceSpace

    # In[8]:

    from allensdk.core.reference_space import ReferenceSpace

    # build a reference space from a StructureTree and annotation volume, the third argument is
    # the resolution of the space in microns
    rsp = ReferenceSpace(tree, annotation, [25, 25, 25])

    # ## Using a ReferenceSpace

    # #### making structure masks
    #
    # The simplest use of a Reference space is to build binary indicator masks for structures or groups of structures.

    # In[9]:

    # A complete mask for one structure
    whole_cortex_mask = rsp.make_structure_mask([315])

    # view in coronal section

    # What if you want a mask for a whole collection of ontologically disparate structures? Just pass more structure ids to make_structure_masks:

    # In[10]:

    # This gets all of the structures targeted by the Allen Brain Observatory project
    brain_observatory_structures = rsp.structure_tree.get_structures_by_set_id(
        [514166994])
    brain_observatory_ids = [st['id'] for st in brain_observatory_structures]

    brain_observatory_mask = rsp.make_structure_mask(brain_observatory_ids)

    # view in horizontal section

    # You can also make and store a number of structure_masks at once:

    # In[11]:

    import functools

    # Define a wrapper function that will control the mask generation.
    # This one checks for a nrrd file in the specified base directory
    # and builds/writes the mask only if one does not exist
    mask_writer = functools.partial(ReferenceSpace.check_and_write,
                                    annotation_dir)

    # many_structure_masks is a generator - nothing has actrually been run yet
    mask_generator = rsp.many_structure_masks([385, 1097], mask_writer)

    # consume the resulting iterator to make and write the masks
    for structure_id in mask_generator:
        print('made mask for structure {0}.'.format(structure_id))

    os.listdir(annotation_dir)

    # #### Removing unassigned structures

    # A structure graph may contain structures that are not used in a particular reference space. Having these around can complicate use of the reference space, so we generally want to remove them.
    #
    # We'll try this using "Somatosensory areas, layer 6a" as a test case. In the 2016 ccf space, this structure is unused in favor of finer distinctions (e.g. "Primary somatosensory area, barrel field, layer 6a").

    # In[12]:

    # Double-check the voxel counts
    no_voxel_id = rsp.structure_tree.get_structures_by_name(
        ['Somatosensory areas, layer 6a'])[0]['id']
    print('voxel count for structure {0}: {1}'.format(
        no_voxel_id, rsp.total_voxel_map[no_voxel_id]))

    # remove unassigned structures from the ReferenceSpace's StructureTree
    rsp.remove_unassigned()

    # check the structure tree
    no_voxel_id in rsp.structure_tree.node_ids()

    # #### View a slice from the annotation

    # In[13]:

    import numpy as np

    # #### Downsample the space
    #
    # If you want an annotation at a resolution we don't provide, you can make one with the downsample method.

    # In[14]:

    import warnings

    target_resolution = [75, 75, 75]

    # in some versions of scipy, scipy.ndimage.zoom raises a helpful but distracting
    # warning about the method used to truncate integers.
    warnings.simplefilter('ignore')

    sf_rsp = rsp.downsample(target_resolution)

    # re-enable warnings
    warnings.simplefilter('default')

    print(rsp.annotation.shape)
    print(sf_rsp.annotation.shape)
    def get_experiments(self,
                        dataframe=False,
                        file_name=None,
                        cre=None,
                        injection_structure_ids=None):
        """
        Read a list of experiments that match certain criteria.  If caching is enabled,
        this will save the whole (unfiltered) list of experiments to a file.

        Parameters
        ----------
        
        dataframe: boolean
            Return the list of experiments as a Pandas DataFrame.  If False,
            return a list of dictionaries.  Default False. 

        file_name: string
            File name to save/read the structures table.  If file_name is None, 
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.

        cre: boolean or list
            If True, return only cre-positive experiments.  If False, return only
            cre-negative experiments.  If None, return all experients. If list, return
            all experiments with cre line names in the supplied list. Default None.

        injection_structure_ids: list
            Only return experiments that were injected in the structures provided here.
            If None, return all experiments.  Default None.

        """

        file_name = self.get_cache_path(file_name, self.EXPERIMENTS_KEY)

        if os.path.exists(file_name):
            experiments = json_utilities.read(file_name)
        else:
            experiments = self.api.experiment_source_search(
                injection_structures='root')

            # removing these elements because they are specific to a particular resolution
            for e in experiments:
                del e['num-voxels']
                del e['injection-volume']
                del e['sum']
                del e['name']

            if self.cache:
                Manifest.safe_mkdir(os.path.dirname(file_name))

                json_utilities.write(file_name, experiments)

        # filter the read/downloaded list of experiments
        experiments = self.filter_experiments(experiments, cre,
                                              injection_structure_ids)

        if dataframe:
            experiments = pd.DataFrame(experiments)
            experiments.set_index(['id'], inplace=True, drop=False)

        return experiments
Example #31
0
University Hospital Cologne

More information can be found here:
http://alleninstitute.github.io/AllenSDK/_modules/allensdk/api/queries/reference_space_api.html
"""

import os
import nrrd  # pip install pynrrd, if pynrrd is not already installed
import nibabel as nib  # pip install nibabel, if nibabel is not already installed
import numpy as np
from allensdk.api.queries.reference_space_api import ReferenceSpaceApi
from allensdk.config.manifest import Manifest

# the annotation download writes a file, so we will need somwhere to put it
annotation_dir = 'annotation'
Manifest.safe_mkdir(annotation_dir)

annotation_path = os.path.join(annotation_dir, 'annotation.nrrd')

# this is a string which contains the name of the latest ccf version
annotation_version = ReferenceSpaceApi.CCF_VERSION_DEFAULT

# download annotations
mcapi = ReferenceSpaceApi()
mcapi.download_annotation_volume(annotation_version, 50, annotation_path)
annotation = nrrd.read(annotation_path)

# read nrrd data and header
_nrrd = nrrd.read(annotation_path)
data = _nrrd[0]
header = _nrrd[1]
Example #32
0
def safe_mkdir_root_dir():
    directory = os.path.abspath(os.sep)
    Manifest.safe_mkdir(directory)  # should not error
Example #33
0
maxVoxels = 0
# (0: no max)
#-------------------------------------------------------------------------------

#-------------------------------------------------------------------------------
oapi = OntologiesApi()
structure_graph = oapi.get_structures_with_sets([adultMouseStructureGraphID])
# Removes some unused fields returned by the query:
structure_graph = StructureTree.clean_structures(structure_graph)
tree = StructureTree(structure_graph)

# Example:
# tree.get_structures_by_name(['Dorsal auditory area'])
# The annotation download writes a file, so we will need somwhere to put it
annotation_dir = os.path.dirname(structIDSource)
Manifest.safe_mkdir(annotation_dir)
annotation_path = os.path.join(annotation_dir, 'annotation.nrrd')

#-------------------------------------------------------------------------------
# Use the connectivity API:
mcapi = MouseConnectivityApi()
# The name of the latest ccf version (a string):
annotation_version = mcapi.CCF_VERSION_DEFAULT
if not os.path.exists(annotation_path):
    mcapi.download_annotation_volume(annotation_version, resolution,
                                     annotation_path)
annotation, meta = nrrd.read(annotation_path)

# Build a reference space from a StructureTree and annotation volume, the third argument is
# the resolution of the space in microns
rsp = ReferenceSpace(tree, annotation, [resolution, resolution, resolution])
Example #34
0
def safe_mkdir_root_dir():
    directory = os.path.abspath(os.sep)
    Manifest.safe_mkdir(directory) # should not error
Example #35
0
# the annotation download writes a file, so we will need somewhere to put it

#######################change so that system independent and includes flirted and synned allen to AMBMC

# Define
data_path = os.path.join('Data', 'Mouse', 'Processed')
mouse_path_list = glob.glob(os.path.join(data_path, '*'))
reference_path = os.path.join('Data', 'Mouse', 'Reference')
# average_template_50_to_AMBMC_flirted.nii.gz
reference_template_path = os.path.join(reference_path, 'average_template_50_reoriented.nii.gz')
reference_annotation_path = os.path.join(reference_path, 'annotation_50_reoriented.nii.gz')

allen_dir = '/home/enzo/Desktop/allen'
allen_fsl_dir = '/usr/local/fsl/data/standard/allen_new'
Manifest.safe_mkdir(allen_dir)

# this is a string which contains the name of the latest ccf version
allen_version = ReferenceSpaceApi.CCF_VERSION_DEFAULT
allen_resolution = 50 # Set resolution in micrometers



# Download data
mcc = MouseConnectivityCache(resolution=allen_resolution)
annot, annot_header = mcc.get_annotation_volume()
template, template_header = mcc.get_template_volume()

# Define paths
allen_annotation_path = os.path.join(allen_dir, 'annotation_'+str(allen_resolution)+'.nii.gz')
allen_annotation_remapped_path = os.path.join(allen_dir, 'annotation_'+str(allen_resolution)+'_remapped.nii.gz')
Example #36
0
def prepare_for_passive_fit(sweeps, bridge_avg, is_spiny, data_set,
                            storage_directory):
    """Collect information for passive fit variations on capacitance-check sweeps

    Parameters
    ----------
    sweeps : list
        list of sweep numbers of capacitance-check sweeps
    bridge_avg: float
        average of bridge-balance value during the sweeps
    is_spiny: bool
        True if neuron has dendritic spines
    data_set: NwbDataSet
        container of sweep data
    storage_directory: str
        path to storage directory

    Returns
    -------
    paths: dict
        key-value set of relevant file paths
    passive_info: dict
        information about fitting for NEURON
    """
    if len(sweeps) == 0:
        logging.info("No cap check trace found")
        return {}, {"should_run": False}

    grand_up, grand_down, t = cap_check_grand_averages(sweeps, data_set)

    # Save to local storage to be loaded by NEURON fitting scripts
    Manifest.safe_mkdir(storage_directory)
    upfile = os.path.join(storage_directory, "upbase.dat")
    downfile = os.path.join(storage_directory, "downbase.dat")
    with open(upfile, 'w') as f:
        np.savetxt(f, np.column_stack((t, grand_up)))
    with open(downfile, 'w') as f:
        np.savetxt(f, np.column_stack((t, grand_down)))

    # Determine for how long the upward and downward responses are consistent
    grand_diff = (grand_up + grand_down) / grand_up
    avg_grand_diff = pd.rolling_mean(Series(grand_diff, index=t), 100)
    threshold = 0.2
    start_index = np.flatnonzero(t >= 4.0)[0]
    escape_indexes = np.flatnonzero(
        np.abs(avg_grand_diff.values[start_index:]) > threshold) + start_index
    if len(escape_indexes) < 1:
        escape_index = len(t) - 1
    else:
        escape_index = escape_indexes[0]
    escape_t = t[escape_index]

    paths = {
        "up": upfile,
        "down": downfile,
    }

    passive_info = {
        "should_run": True,
        "bridge": bridge_avg,
        "limit": escape_t,
        "electrode_cap": 1.0,
        "is_spiny": is_spiny,
    }

    return paths, passive_info