Ejemplo n.º 1
0
    def __init__(self, nwb_path, save_path):
        self.nwb = BrainObservatoryNwbDataSet(nwb_path)
        self.save_path = save_path
        self.save_dir = os.path.dirname(save_path)

        self.metrics_a = {}
        self.metrics_b = {}
        self.metrics_c = {}

        self.metadata = self.nwb.get_metadata()
Ejemplo n.º 2
0
    def __init__(self, nwb_path, save_path):
        self.nwb = BrainObservatoryNwbDataSet(nwb_path)
        self.save_path = save_path
        self.save_dir = os.path.dirname(save_path)

        self.metrics_a = dict(cell={},experiment={})
        self.metrics_b = dict(cell={},experiment={})
        self.metrics_c = dict(cell={},experiment={})

        self.metadata = self.nwb.get_metadata()
Ejemplo n.º 3
0
    def save_session_c2(self, lsn4, lsn8, nm1, nm2, peak):        
        """ Save the output of session C2 analysis to self.save_path. 

        Parameters
        ----------
        lsn4: LocallySparseNoise instance
            This LocallySparseNoise instance should have been created with 
            self.stimulus = stimulus_info.LOCALLY_SPARSE_NOISE_4DEG.

        lsn8: LocallySparseNoise instance
            This LocallySparseNoise instance should have been created with 
            self.stimulus = stimulus_info.LOCALLY_SPARSE_NOISE_8DEG.

        nm1: NaturalMovie instance
            This NaturalMovie instance should have been created with
            movie_name = stimulus_info.NATURAL_MOVIE_ONE

        nm2: NaturalMovie instance
            This NaturalMovie instance should have been created with
            movie_name = stimulus_info.NATURAL_MOVIE_TWO

        peak: pd.DataFrame
            The combined peak response property table created in self.session_c2().
        """

        nwb = BrainObservatoryNwbDataSet(self.save_path)

        nwb.save_analysis_dataframes(
            ('stim_table_lsn4', lsn4.stim_table),
            ('stim_table_lsn8', lsn8.stim_table),
            ('sweep_response_nm1', nm1.sweep_response),
            ('peak', peak),
            ('sweep_response_nm2', nm2.sweep_response),
            ('sweep_response_lsn4', lsn4.sweep_response),
            ('sweep_response_lsn8', lsn8.sweep_response),
            ('mean_sweep_response_lsn4', lsn4.mean_sweep_response),
            ('mean_sweep_response_lsn8', lsn8.mean_sweep_response))

        merge_mean_response = LocallySparseNoise.merge_mean_response(
            lsn4.mean_response,
            lsn8.mean_response)

        nwb.save_analysis_arrays(
            ('mean_response_lsn4', lsn4.mean_response),
            ('mean_response_lsn8', lsn8.mean_response),
            ('receptive_field_lsn4', lsn4.receptive_field),
            ('receptive_field_lsn8', lsn8.receptive_field),
            ('merge_mean_response', merge_mean_response),
            ('binned_dx_sp', nm1.binned_dx_sp),
            ('binned_dx_vis', nm1.binned_dx_vis),
            ('binned_cells_sp', nm1.binned_cells_sp),
            ('binned_cells_vis', nm1.binned_cells_vis))

        LocallySparseNoise.save_cell_index_receptive_field_analysis(lsn4.cell_index_receptive_field_analysis_data, nwb, stimulus_info.LOCALLY_SPARSE_NOISE_4DEG)
        LocallySparseNoise.save_cell_index_receptive_field_analysis(lsn8.cell_index_receptive_field_analysis_data, nwb, stimulus_info.LOCALLY_SPARSE_NOISE_8DEG)
def test_locally_sparse_noise(lsn, nwb_c, analysis_c_new):
    ds = BODS(nwb_c)
    session_type = ds.get_metadata()['session_type']
    logging.debug(session_type)

    if session_type == si.THREE_SESSION_C:
        lsn_new = LocallySparseNoise.from_analysis_file(ds, analysis_c_new, si.LOCALLY_SPARSE_NOISE)
    elif session_type == si.THREE_SESSION_C2:
        lsn_new = LocallySparseNoise.from_analysis_file(ds, analysis_c_new, si.LOCALLY_SPARSE_NOISE_4DEG)
        
    #assert np.allclose(lsn.sweep_response, lsn_new.sweep_response)
    assert np.allclose(lsn.mean_sweep_response, lsn_new.mean_sweep_response, equal_nan=True)
Ejemplo n.º 5
0
def test_locally_sparse_noise(lsn, nwb_c, analysis_c_new):
    ds = BODS(nwb_c)
    session_type = ds.get_metadata()['session_type']
    logging.debug(session_type)

    if session_type == si.THREE_SESSION_C:
        lsn_new = LocallySparseNoise.from_analysis_file(ds, analysis_c_new, si.LOCALLY_SPARSE_NOISE)
    elif session_type == si.THREE_SESSION_C2:
        lsn_new = LocallySparseNoise.from_analysis_file(ds, analysis_c_new, si.LOCALLY_SPARSE_NOISE_4DEG)
        
    #assert np.allclose(lsn.sweep_response, lsn_new.sweep_response)
    assert np.allclose(lsn.mean_sweep_response, lsn_new.mean_sweep_response, equal_nan=True)
Ejemplo n.º 6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("input_h5")
    parser.add_argument("output_h5")
    parser.add_argument("--plot_dir")
    parser.add_argument("--log_level", default=logging.INFO)

    args = parser.parse_args()

    logging.getLogger().setLevel(args.log_level)

    # read from "data"
    if args.input_h5.endswith("nwb"):
        timestamps, traces = BrainObservatoryNwbDataSet(
            args.input_h5).get_corrected_fluorescence_traces()
    else:
        input_h5 = h5py.File(args.input_h5, "r")
        traces = input_h5["data"].value
        input_h5.close()

    dff = calculate_dff(traces, save_plot_dir=args.plot_dir)

    # write to "data"
    output_h5 = h5py.File(args.output_h5, "w")
    output_h5["data"] = dff
    output_h5.close()
    def __call__(self):
        if self.obj is None:
            exp = self.experiment_for_session(self.session)
            data_set = BrainObservatoryNwbDataSet(exp['nwb_file'])
            self.obj = self.klass.from_analysis_file(data_set, exp['analysis_file'], *self.args)

        return self.obj
    def get_ophys_experiment_data(self, ophys_experiment_id, file_name=None):
        """ Download the NWB file for an ophys_experiment (if it hasn't already been
        downloaded) and return a data accessor object.

        Parameters
        ----------
        file_name: string
            File name to save/read the data set.  If file_name is None, 
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.        

        ophys_experiment_id: integer
            id of the ophys_experiment to retrieve
            
        Returns
        -------
        BrainObservatoryNwbDataSet
        """
        file_name = self.get_cache_path(file_name, self.EXPERIMENT_DATA_KEY,
                                        ophys_experiment_id)

        if not os.path.exists(file_name):
            self.api.save_ophys_experiment_data(ophys_experiment_id, file_name)

        return BrainObservatoryNwbDataSet(file_name)
Ejemplo n.º 9
0
 def save_session_c(self, lsn, nm1, nm2, peak):                
     nwb = BrainObservatoryNwbDataSet(self.save_path)
     nwb.save_analysis_dataframes(
         ('stim_table_lsn', lsn.stim_table),
         ('sweep_response_nm1', nm1.sweep_response),
         ('peak', peak),
         ('sweep_response_nm2', nm2.sweep_response),
         ('sweep_response_lsn', lsn.sweep_response),
         ('mean_sweep_response_lsn', lsn.mean_sweep_response))  
     
     nwb.save_analysis_arrays(
         ('receptive_field_lsn', lsn.receptive_field),
         ('celltraces_dff', nm1.dfftraces),
         ('binned_dx_sp', nm1.binned_dx_sp),
         ('binned_dx_vis', nm1.binned_dx_vis),    
         ('binned_cells_sp', nm1.binned_cells_sp),
         ('binned_cells_vis', nm1.binned_cells_vis))
Ejemplo n.º 10
0
 def save_session_a(self, dg, nm1, nm3, peak):
     nwb = BrainObservatoryNwbDataSet(self.save_path)
     nwb.save_analysis_dataframes(
         ('stim_table_dg', dg.stim_table),
         ('sweep_response_dg', dg.sweep_response),
         ('mean_sweep_response_dg', dg.mean_sweep_response),
         ('peak', peak),        
         ('sweep_response_nm1', nm1.sweep_response),
         ('stim_table_nm1', nm1.stim_table),
         ('sweep_response_nm3', nm3.sweep_response))
     
     nwb.save_analysis_arrays(
         ('celltraces_dff', nm1.dfftraces),
         ('response_dg', dg.response),
         ('binned_cells_sp', nm1.binned_cells_sp),
         ('binned_cells_vis', nm1.binned_cells_vis),
         ('binned_dx_sp', nm1.binned_dx_sp),
         ('binned_dx_vis', nm1.binned_dx_vis))
Ejemplo n.º 11
0
    def __init__(self, nwb_path, save_path):
        self.nwb = BrainObservatoryNwbDataSet(nwb_path)                        
        self.save_path = save_path
        self.save_dir = os.path.dirname(save_path)

        self.metrics_a = {}
        self.metrics_b = {}
        self.metrics_c = {}

        self.metadata = self.nwb.get_metadata()
Ejemplo n.º 12
0
    def save_session_c2(self, lsn4, lsn8, nm1, nm2, peak):
        """ Save the output of session C2 analysis to self.save_path. 

        Parameters
        ----------
        lsn4: LocallySparseNoise instance
            This LocallySparseNoise instance should have been created with 
            self.stimulus = stimulus_info.LOCALLY_SPARSE_NOISE_4DEG.

        lsn8: LocallySparseNoise instance
            This LocallySparseNoise instance should have been created with 
            self.stimulus = stimulus_info.LOCALLY_SPARSE_NOISE_8DEG.

        nm1: NaturalMovie instance
            This NaturalMovie instance should have been created with
            movie_name = stimulus_info.NATURAL_MOVIE_ONE

        nm2: NaturalMovie instance
            This NaturalMovie instance should have been created with
            movie_name = stimulus_info.NATURAL_MOVIE_TWO

        peak: pd.DataFrame
            The combined peak response property table created in self.session_c2().
        """

        nwb = BrainObservatoryNwbDataSet(self.save_path)

        nwb.save_analysis_dataframes(
            ('stim_table_lsn4', lsn4.stim_table),
            ('stim_table_lsn8', lsn8.stim_table),
            ('sweep_response_nm1', nm1.sweep_response), ('peak', peak),
            ('sweep_response_nm2', nm2.sweep_response),
            ('sweep_response_lsn4', lsn4.sweep_response),
            ('sweep_response_lsn8', lsn8.sweep_response),
            ('mean_sweep_response_lsn4', lsn4.mean_sweep_response),
            ('mean_sweep_response_lsn8', lsn8.mean_sweep_response))

        merge_mean_response = LocallySparseNoise.merge_mean_response(
            lsn4.mean_response, lsn8.mean_response)

        nwb.save_analysis_arrays(
            ('mean_response_lsn4', lsn4.mean_response),
            ('mean_response_lsn8', lsn8.mean_response),
            ('receptive_field_lsn4', lsn4.receptive_field),
            ('receptive_field_lsn8', lsn8.receptive_field),
            ('merge_mean_response', merge_mean_response),
            ('binned_dx_sp', nm1.binned_dx_sp),
            ('binned_dx_vis', nm1.binned_dx_vis),
            ('binned_cells_sp', nm1.binned_cells_sp),
            ('binned_cells_vis', nm1.binned_cells_vis))

        LocallySparseNoise.save_cell_index_receptive_field_analysis(
            lsn4.cell_index_receptive_field_analysis_data, nwb,
            stimulus_info.LOCALLY_SPARSE_NOISE_4DEG)
        LocallySparseNoise.save_cell_index_receptive_field_analysis(
            lsn8.cell_index_receptive_field_analysis_data, nwb,
            stimulus_info.LOCALLY_SPARSE_NOISE_8DEG)
def build_type(nwb_file, data_file, configs, output_dir, type_name):
    data_set = BrainObservatoryNwbDataSet(nwb_file)
    try:
        if type_name == "dg":
            dga = DriftingGratings.from_analysis_file(data_set, data_file)
            build_drifting_gratings(dga, configs, output_dir)
        elif type_name == "sg":
            sga = StaticGratings.from_analysis_file(data_set, data_file)
            build_static_gratings(sga, configs, output_dir)
        elif type_name == "nm1":
            nma = NaturalMovie.from_analysis_file(data_set, data_file, si.NATURAL_MOVIE_ONE)
            build_natural_movie(nma, configs, output_dir, si.NATURAL_MOVIE_ONE)
        elif type_name == "nm2":
            nma = NaturalMovie.from_analysis_file(data_set, data_file, si.NATURAL_MOVIE_TWO)
            build_natural_movie(nma, configs, output_dir, si.NATURAL_MOVIE_TWO)
        elif type_name == "nm3":
            nma = NaturalMovie.from_analysis_file(data_set, data_file, si.NATURAL_MOVIE_THREE)
            build_natural_movie(nma, configs, output_dir, si.NATURAL_MOVIE_THREE)
        elif type_name == "ns":
            nsa = NaturalScenes.from_analysis_file(data_set, data_file)
            build_natural_scenes(nsa, configs, output_dir)
        elif type_name == "sp":
            nma = NaturalMovie.from_analysis_file(data_set, data_file, si.NATURAL_MOVIE_ONE)
            build_speed_tuning(nma, configs, output_dir)
        elif type_name == "lsn_on":
            lsna = lsna_check_hvas(data_set, data_file)
            build_locally_sparse_noise(lsna, configs, output_dir, True)
        elif type_name == "lsn_off":
            lsna = lsna_check_hvas(data_set, data_file)
            build_locally_sparse_noise(lsna, configs, output_dir, False)
        elif type_name == "rf":
            lsna = lsna_check_hvas(data_set, data_file)
            build_receptive_field(lsna, configs, output_dir)
        elif type_name == "corr":
            build_correlation_plots(data_set, data_file, configs, output_dir)
        elif type_name == "eye":
            build_eye_tracking_plots(data_set, configs, output_dir)

    except MissingStimulusException as e:
        logging.warning("could not load stimulus (%s)", type_name)
    except Exception as e:
        traceback.print_exc()
        logging.critical("error running stimulus (%s)", type_name)
        raise e
Ejemplo n.º 14
0
    def save_session_c(self, lsn, nm1, nm2, peak):
        nwb = BrainObservatoryNwbDataSet(self.save_path)
        nwb.save_analysis_dataframes(
            ('stim_table_lsn', lsn.stim_table),
            ('sweep_response_nm1', nm1.sweep_response), ('peak', peak),
            ('sweep_response_nm2', nm2.sweep_response),
            ('sweep_response_lsn', lsn.sweep_response),
            ('mean_sweep_response_lsn', lsn.mean_sweep_response))

        nwb.save_analysis_arrays(('receptive_field_lsn', lsn.receptive_field),
                                 ('celltraces_dff', nm1.dfftraces),
                                 ('binned_dx_sp', nm1.binned_dx_sp),
                                 ('binned_dx_vis', nm1.binned_dx_vis),
                                 ('binned_cells_sp', nm1.binned_cells_sp),
                                 ('binned_cells_vis', nm1.binned_cells_vis))
Ejemplo n.º 15
0
    def save_session_a(self, dg, nm1, nm3, peak):
        nwb = BrainObservatoryNwbDataSet(self.save_path)
        nwb.save_analysis_dataframes(
            ('stim_table_dg', dg.stim_table),
            ('sweep_response_dg', dg.sweep_response),
            ('mean_sweep_response_dg', dg.mean_sweep_response), ('peak', peak),
            ('sweep_response_nm1', nm1.sweep_response),
            ('stim_table_nm1', nm1.stim_table),
            ('sweep_response_nm3', nm3.sweep_response))

        nwb.save_analysis_arrays(('celltraces_dff', nm1.dfftraces),
                                 ('response_dg', dg.response),
                                 ('binned_cells_sp', nm1.binned_cells_sp),
                                 ('binned_cells_vis', nm1.binned_cells_vis),
                                 ('binned_dx_sp', nm1.binned_dx_sp),
                                 ('binned_dx_vis', nm1.binned_dx_vis))
Ejemplo n.º 16
0
    def save_session_b(self, sg, nm1, ns, peak):
        """ Save the output of session B analysis to self.save_path.  

        Parameters
        ----------
        sg: StaticGratings instance

        nm1: NaturalMovie instance
            This NaturalMovie instance should have been created with
            movie_name = stimulus_info.NATURAL_MOVIE_ONE

        ns: NaturalScenes instance

        peak: pd.DataFrame
            The combined peak response property table created in self.session_b().
        """

        nwb = BrainObservatoryNwbDataSet(self.save_path)

        nwb.save_analysis_dataframes(
            ('stim_table_sg', sg.stim_table),
            ('sweep_response_sg', sg.sweep_response),
            ('mean_sweep_response_sg', sg.mean_sweep_response),
            ('sweep_response_nm1', nm1.sweep_response),
            ('stim_table_nm1', nm1.stim_table),
            ('sweep_response_ns', ns.sweep_response),
            ('stim_table_ns', ns.stim_table),
            ('mean_sweep_response_ns', ns.mean_sweep_response),
            ('peak', peak))

        nwb.save_analysis_arrays(
            ('response_sg', sg.response),
            ('response_ns', ns.response),
            ('binned_cells_sp', nm1.binned_cells_sp),
            ('binned_cells_vis', nm1.binned_cells_vis),
            ('binned_dx_sp', nm1.binned_dx_sp),
            ('binned_dx_vis', nm1.binned_dx_vis),
            ('noise_corr_sg', sg.noise_correlation),
            ('signal_corr_sg', sg.signal_correlation),
            ('rep_similarity_sg', sg.representational_similarity),
            ('noise_corr_ns', ns.noise_correlation),
            ('signal_corr_ns', ns.signal_correlation),
            ('rep_similarity_ns', ns.representational_similarity)
            )
Ejemplo n.º 17
0
class SessionAnalysis(object):
    """ 
    Run all of the stimulus-specific analyses associated with a single experiment session. 

    Parameters
    ----------
    nwb_path: string, path to NWB file

    save_path: string, path to HDF5 file to store outputs.  Recommended NOT to modify the NWB file.
    """

    _log = logging.getLogger('allensdk.brain_observatory.session_analysis')

    def __init__(self, nwb_path, save_path):
        self.nwb = BrainObservatoryNwbDataSet(nwb_path)
        self.save_path = save_path
        self.save_dir = os.path.dirname(save_path)

        self.metrics_a = dict(cell={},experiment={})
        self.metrics_b = dict(cell={},experiment={})
        self.metrics_c = dict(cell={},experiment={})

        self.metadata = self.nwb.get_metadata()

    def append_metadata(self, df):
        """ Append the metadata fields from the NWB file as columns to a pd.DataFrame """

        for k, v in six.iteritems(self.metadata):
            df[k] = v

    def save_session_a(self, dg, nm1, nm3, peak):
        """ Save the output of session A analysis to self.save_path.  

        Parameters
        ----------
        dg: DriftingGratings instance

        nm1: NaturalMovie instance
            This NaturalMovie instance should have been created with
            movie_name = stimulus_info.NATURAL_MOVIE_ONE

        nm3: NaturalMovie instance
            This NaturalMovie instance should have been created with
            movie_name = stimulus_info.NATURAL_MOVIE_THREE

        peak: pd.DataFrame
            The combined peak response property table created in self.session_a().
        """

        nwb = BrainObservatoryNwbDataSet(self.save_path)

        nwb.save_analysis_dataframes(
            ('stim_table_dg', dg.stim_table),
            ('sweep_response_dg', dg.sweep_response),
            ('mean_sweep_response_dg', dg.mean_sweep_response),
            ('peak', peak),
            ('sweep_response_nm1', nm1.sweep_response),
            ('stim_table_nm1', nm1.stim_table),
            ('sweep_response_nm3', nm3.sweep_response))

        nwb.save_analysis_arrays(
            ('response_dg', dg.response),
            ('binned_cells_sp', nm1.binned_cells_sp),
            ('binned_cells_vis', nm1.binned_cells_vis),
            ('binned_dx_sp', nm1.binned_dx_sp),
            ('binned_dx_vis', nm1.binned_dx_vis),
            ('noise_corr_dg', dg.noise_correlation),
            ('signal_corr_dg', dg.signal_correlation),
            ('rep_similarity_dg', dg.representational_similarity)
            )


    def save_session_b(self, sg, nm1, ns, peak):
        """ Save the output of session B analysis to self.save_path.  

        Parameters
        ----------
        sg: StaticGratings instance

        nm1: NaturalMovie instance
            This NaturalMovie instance should have been created with
            movie_name = stimulus_info.NATURAL_MOVIE_ONE

        ns: NaturalScenes instance

        peak: pd.DataFrame
            The combined peak response property table created in self.session_b().
        """

        nwb = BrainObservatoryNwbDataSet(self.save_path)

        nwb.save_analysis_dataframes(
            ('stim_table_sg', sg.stim_table),
            ('sweep_response_sg', sg.sweep_response),
            ('mean_sweep_response_sg', sg.mean_sweep_response),
            ('sweep_response_nm1', nm1.sweep_response),
            ('stim_table_nm1', nm1.stim_table),
            ('sweep_response_ns', ns.sweep_response),
            ('stim_table_ns', ns.stim_table),
            ('mean_sweep_response_ns', ns.mean_sweep_response),
            ('peak', peak))

        nwb.save_analysis_arrays(
            ('response_sg', sg.response),
            ('response_ns', ns.response),
            ('binned_cells_sp', nm1.binned_cells_sp),
            ('binned_cells_vis', nm1.binned_cells_vis),
            ('binned_dx_sp', nm1.binned_dx_sp),
            ('binned_dx_vis', nm1.binned_dx_vis),
            ('noise_corr_sg', sg.noise_correlation),
            ('signal_corr_sg', sg.signal_correlation),
            ('rep_similarity_sg', sg.representational_similarity),
            ('noise_corr_ns', ns.noise_correlation),
            ('signal_corr_ns', ns.signal_correlation),
            ('rep_similarity_ns', ns.representational_similarity)
            )

    def save_session_c(self, lsn, nm1, nm2, peak):
        """ Save the output of session C analysis to self.save_path.  

        Parameters
        ----------
        lsn: LocallySparseNoise instance

        nm1: NaturalMovie instance
            This NaturalMovie instance should have been created with
            movie_name = stimulus_info.NATURAL_MOVIE_ONE

        nm2: NaturalMovie instance
            This NaturalMovie instance should have been created with
            movie_name = stimulus_info.NATURAL_MOVIE_TWO

        peak: pd.DataFrame
            The combined peak response property table created in self.session_c().
        """

        nwb = BrainObservatoryNwbDataSet(self.save_path)

        nwb.save_analysis_dataframes(
            ('stim_table_lsn', lsn.stim_table),
            ('sweep_response_nm1', nm1.sweep_response),
            ('peak', peak),
            ('sweep_response_nm2', nm2.sweep_response),
            ('sweep_response_lsn', lsn.sweep_response),
            ('mean_sweep_response_lsn', lsn.mean_sweep_response))

        nwb.save_analysis_arrays(
            ('receptive_field_lsn', lsn.receptive_field),
            ('mean_response_lsn', lsn.mean_response),
            ('binned_dx_sp', nm1.binned_dx_sp),
            ('binned_dx_vis', nm1.binned_dx_vis),
            ('binned_cells_sp', nm1.binned_cells_sp),
            ('binned_cells_vis', nm1.binned_cells_vis))

        LocallySparseNoise.save_cell_index_receptive_field_analysis(lsn.cell_index_receptive_field_analysis_data, nwb, stimulus_info.LOCALLY_SPARSE_NOISE)

    def save_session_c2(self, lsn4, lsn8, nm1, nm2, peak):        
        """ Save the output of session C2 analysis to self.save_path. 

        Parameters
        ----------
        lsn4: LocallySparseNoise instance
            This LocallySparseNoise instance should have been created with 
            self.stimulus = stimulus_info.LOCALLY_SPARSE_NOISE_4DEG.

        lsn8: LocallySparseNoise instance
            This LocallySparseNoise instance should have been created with 
            self.stimulus = stimulus_info.LOCALLY_SPARSE_NOISE_8DEG.

        nm1: NaturalMovie instance
            This NaturalMovie instance should have been created with
            movie_name = stimulus_info.NATURAL_MOVIE_ONE

        nm2: NaturalMovie instance
            This NaturalMovie instance should have been created with
            movie_name = stimulus_info.NATURAL_MOVIE_TWO

        peak: pd.DataFrame
            The combined peak response property table created in self.session_c2().
        """

        nwb = BrainObservatoryNwbDataSet(self.save_path)

        nwb.save_analysis_dataframes(
            ('stim_table_lsn4', lsn4.stim_table),
            ('stim_table_lsn8', lsn8.stim_table),
            ('sweep_response_nm1', nm1.sweep_response),
            ('peak', peak),
            ('sweep_response_nm2', nm2.sweep_response),
            ('sweep_response_lsn4', lsn4.sweep_response),
            ('sweep_response_lsn8', lsn8.sweep_response),
            ('mean_sweep_response_lsn4', lsn4.mean_sweep_response),
            ('mean_sweep_response_lsn8', lsn8.mean_sweep_response))

        merge_mean_response = LocallySparseNoise.merge_mean_response(
            lsn4.mean_response,
            lsn8.mean_response)

        nwb.save_analysis_arrays(
            ('mean_response_lsn4', lsn4.mean_response),
            ('mean_response_lsn8', lsn8.mean_response),
            ('receptive_field_lsn4', lsn4.receptive_field),
            ('receptive_field_lsn8', lsn8.receptive_field),
            ('merge_mean_response', merge_mean_response),
            ('binned_dx_sp', nm1.binned_dx_sp),
            ('binned_dx_vis', nm1.binned_dx_vis),
            ('binned_cells_sp', nm1.binned_cells_sp),
            ('binned_cells_vis', nm1.binned_cells_vis))

        LocallySparseNoise.save_cell_index_receptive_field_analysis(lsn4.cell_index_receptive_field_analysis_data, nwb, stimulus_info.LOCALLY_SPARSE_NOISE_4DEG)
        LocallySparseNoise.save_cell_index_receptive_field_analysis(lsn8.cell_index_receptive_field_analysis_data, nwb, stimulus_info.LOCALLY_SPARSE_NOISE_8DEG)

    def append_metrics_drifting_grating(self, metrics, dg):
        """ Extract metrics from the DriftingGratings peak response table into a dictionary. """

        metrics["osi_dg"] = dg.peak["osi_dg"]
        metrics["dsi_dg"] = dg.peak["dsi_dg"]
        metrics["pref_dir_dg"] = [dg.orivals[i]
                                  for i in dg.peak["ori_dg"].values]
        metrics["pref_tf_dg"] = [dg.tfvals[i] for i in dg.peak["tf_dg"].values]
        metrics["p_dg"] = dg.peak["ptest_dg"]
        metrics["g_osi_dg"] = dg.peak["cv_os_dg"]
        metrics["g_dsi_dg"] = dg.peak["cv_ds_dg"]
        metrics["reliability_dg"] = dg.peak["reliability_dg"]
        metrics["tfdi_dg"] = dg.peak["tf_index_dg"]
        metrics["run_mod_dg"] = dg.peak["run_modulation_dg"]
        metrics["p_run_mod_dg"] = dg.peak["p_run_dg"]
        metrics["peak_dff_dg"] = dg.peak["peak_dff_dg"]

    def append_metrics_static_grating(self, metrics, sg):
        """ Extract metrics from the StaticGratings peak response table into a dictionary. """

        metrics["osi_sg"] = sg.peak["osi_sg"]
        metrics["pref_ori_sg"] = [sg.orivals[i]
                                  for i in sg.peak["ori_sg"].values]
        metrics["pref_sf_sg"] = [sg.sfvals[i] for i in sg.peak["sf_sg"].values]
        metrics["pref_phase_sg"] = [sg.phasevals[i]
                                    for i in sg.peak["phase_sg"].values]
        metrics["p_sg"] = sg.peak["ptest_sg"]
        metrics["time_to_peak_sg"] = sg.peak["time_to_peak_sg"]
        metrics["run_mod_sg"] = sg.peak["run_modulation_sg"]
        metrics["p_run_mod_sg"] = sg.peak["p_run_sg"]
        metrics["g_osi_sg"] = sg.peak["cv_os_sg"]
        metrics["sfdi_sg"] = sg.peak["sf_index_sg"]
        metrics["peak_dff_sg"] = sg.peak["peak_dff_sg"]
        metrics["reliability_sg"] = sg.peak["reliability_sg"]

    def append_metrics_natural_scene(self, metrics, ns):
        """ Extract metrics from the NaturalScenes peak response table into a dictionary. """

        metrics["pref_image_ns"] = ns.peak["scene_ns"]
        metrics["p_ns"] = ns.peak["ptest_ns"]
        metrics["time_to_peak_ns"] = ns.peak["time_to_peak_ns"]
        metrics["image_sel_ns"] = ns.peak["image_selectivity_ns"]
        metrics["reliability_ns"] = ns.peak["reliability_ns"]
        metrics["run_mod_ns"] = ns.peak["run_modulation_ns"]
        metrics["p_run_mod_ns"] = ns.peak["p_run_ns"]
        metrics["peak_dff_ns"] = ns.peak["peak_dff_ns"]

    def append_metrics_locally_sparse_noise(self, metrics, lsn):
        """ Extract metrics from the LocallySparseNoise peak response table into a dictionary. """

        metrics['rf_chi2_lsn'] = lsn.peak['rf_chi2_lsn']
        metrics['rf_area_on_lsn'] = lsn.peak['rf_area_on_lsn']
        metrics['rf_center_on_x_lsn'] = lsn.peak['rf_center_on_x_lsn']
        metrics['rf_center_on_y_lsn'] = lsn.peak['rf_center_on_y_lsn']
        metrics['rf_area_off_lsn'] = lsn.peak['rf_area_off_lsn']
        metrics['rf_center_off_x_lsn'] = lsn.peak['rf_center_off_x_lsn']
        metrics['rf_center_off_y_lsn'] = lsn.peak['rf_center_off_y_lsn']
        metrics['rf_distance_lsn'] = lsn.peak['rf_distance_lsn']
        metrics['rf_overlap_index_lsn'] = lsn.peak['rf_overlap_index_lsn']

    def append_metrics_natural_movie_one(self, metrics, nma):
        """ Extract metrics from the NaturalMovie(stimulus_info.NATURAL_MOVIE_ONE) peak response table into a dictionary. """
        metrics['reliability_nm1'] = nma.peak['response_reliability_nm1']

    def append_metrics_natural_movie_two(self, metrics, nma):
        """ Extract metrics from the NaturalMovie(stimulus_info.NATURAL_MOVIE_TWO) peak response table into a dictionary. """
        metrics['reliability_nm2'] = nma.peak['response_reliability_nm2']

    def append_metrics_natural_movie_three(self, metrics, nma):
        """ Extract metrics from the NaturalMovie(stimulus_info.NATURAL_MOVIE_THREE) peak response table into a dictionary. """
        metrics['reliability_nm3'] = nma.peak['response_reliability_nm3']

    def append_experiment_metrics(self, metrics):
        """ Extract stimulus-agnostic metrics from an experiment into a dictionary """
        dxcm, dxtime = self.nwb.get_running_speed()
        metrics['mean_running_speed'] = np.nanmean(dxcm)

    def verify_roi_lists_equal(self, roi1, roi2):
        """ TODO: replace this with simpler numpy comparisons """

        if len(roi1) != len(roi2):
            raise BrainObservatoryAnalysisException(
                "Error -- ROI lists are of different length")

        for i in range(len(roi1)):
            if roi1[i] != roi2[i]:
                raise BrainObservatoryAnalysisException(
                    "Error -- ROI lists have different entries")

    def session_a(self, plot_flag=False, save_flag=True):
        """ Run stimulus-specific analysis for natural movie one, natural movie three, and drifting gratings.
        The input NWB be for a stimulus_info.THREE_SESSION_A experiment.

        Parameters
        ----------
        plot_flag: bool
            Whether to generate brain_observatory_plotting work plots after running analysis.

        save_flag: bool
            Whether to save the output of analysis to self.save_path upon completion.
        """

        nm1 = NaturalMovie(self.nwb, 'natural_movie_one')
        nm3 = NaturalMovie(self.nwb, 'natural_movie_three')
        dg = DriftingGratings(self.nwb)

        dg.noise_correlation, _, _, _ = dg.get_noise_correlation()
        dg.signal_correlation, _ = dg.get_signal_correlation()
        dg.representational_similarity, _ = dg.get_representational_similarity()

        SessionAnalysis._log.info("Session A analyzed")
        peak = multi_dataframe_merge(
            [nm1.peak_run, dg.peak, nm1.peak, nm3.peak])

        self.append_metrics_drifting_grating(self.metrics_a['cell'], dg)
        self.append_metrics_natural_movie_one(self.metrics_a['cell'], nm1)
        self.append_metrics_natural_movie_three(self.metrics_a['cell'], nm3)
        self.append_experiment_metrics(self.metrics_a['experiment'])
        self.metrics_a['cell']['roi_id'] = dg.roi_id

        self.append_metadata(peak)

        if save_flag:
            self.save_session_a(dg, nm1, nm3, peak)

        if plot_flag:
            cp._plot_3sa(dg, nm1, nm3, self.save_dir)
            cp.plot_drifting_grating_traces(dg, self.save_dir)

    def session_b(self, plot_flag=False, save_flag=True):
        """ Run stimulus-specific analysis for natural scenes, static gratings, and natural movie one.
        The input NWB be for a stimulus_info.THREE_SESSION_B experiment.

        Parameters
        ----------
        plot_flag: bool
            Whether to generate brain_observatory_plotting work plots after running analysis.

        save_flag: bool
            Whether to save the output of analysis to self.save_path upon completion.
        """

        ns = NaturalScenes(self.nwb)
        sg = StaticGratings(self.nwb)
        nm1 = NaturalMovie(self.nwb, 'natural_movie_one')
        SessionAnalysis._log.info("Session B analyzed")
        peak = multi_dataframe_merge(
            [nm1.peak_run, sg.peak, ns.peak, nm1.peak])
        self.append_metadata(peak)

        self.append_metrics_static_grating(self.metrics_b['cell'], sg)
        self.append_metrics_natural_scene(self.metrics_b['cell'], ns)
        self.append_metrics_natural_movie_one(self.metrics_b['cell'], nm1)
        self.append_experiment_metrics(self.metrics_b['experiment'])
        self.verify_roi_lists_equal(sg.roi_id, ns.roi_id)
        self.metrics_b['cell']['roi_id'] = sg.roi_id

        sg.noise_correlation, _, _, _ = sg.get_noise_correlation()
        sg.signal_correlation, _ = sg.get_signal_correlation()
        sg.representational_similarity, _ = sg.get_representational_similarity()

        ns.noise_correlation, _ = ns.get_noise_correlation()
        ns.signal_correlation, _ = ns.get_signal_correlation()
        ns.representational_similarity, _ = ns.get_representational_similarity()

        if save_flag:
            self.save_session_b(sg, nm1, ns, peak)

        if plot_flag:
            cp._plot_3sb(sg, nm1, ns, self.save_dir)
            cp.plot_ns_traces(ns, self.save_dir)
            cp.plot_sg_traces(sg, self.save_dir)

    def session_c(self, plot_flag=False, save_flag=True):
        """ Run stimulus-specific analysis for natural movie one, natural movie two, and locally sparse noise.
        The input NWB be for a stimulus_info.THREE_SESSION_C experiment.

        Parameters
        ----------
        plot_flag: bool
            Whether to generate brain_observatory_plotting work plots after running analysis.

        save_flag: bool
            Whether to save the output of analysis to self.save_path upon completion.
        """

        lsn = LocallySparseNoise(self.nwb, stimulus_info.LOCALLY_SPARSE_NOISE)
        nm2 = NaturalMovie(self.nwb, 'natural_movie_two')
        nm1 = NaturalMovie(self.nwb, 'natural_movie_one')
        SessionAnalysis._log.info("Session C analyzed")
        peak = multi_dataframe_merge([nm1.peak_run, nm1.peak, nm2.peak, lsn.peak])
        self.append_metadata(peak)

        self.append_metrics_locally_sparse_noise(self.metrics_c['cell'], lsn)
        self.append_metrics_natural_movie_one(self.metrics_c['cell'], nm1)
        self.append_metrics_natural_movie_two(self.metrics_c['cell'], nm2)
        self.append_experiment_metrics(self.metrics_c['experiment'])
        self.metrics_c['cell']['roi_id'] = nm1.roi_id

        if save_flag:
            self.save_session_c(lsn, nm1, nm2, peak)

        if plot_flag:
            cp._plot_3sc(lsn, nm1, nm2, self.save_dir)
            cp.plot_lsn_traces(lsn, self.save_dir)

    def session_c2(self, plot_flag=False, save_flag=True):
        """ Run stimulus-specific analysis for locally sparse noise (4 deg.), locally sparse noise (8 deg.),
        natural movie one, and natural movie two. The input NWB be for a stimulus_info.THREE_SESSION_C2 experiment.

        Parameters
        ----------
        plot_flag: bool
            Whether to generate brain_observatory_plotting work plots after running analysis.

        save_flag: bool
            Whether to save the output of analysis to self.save_path upon completion.
        """

        lsn4 = LocallySparseNoise(self.nwb, stimulus_info.LOCALLY_SPARSE_NOISE_4DEG)
        lsn8 = LocallySparseNoise(self.nwb, stimulus_info.LOCALLY_SPARSE_NOISE_8DEG)

        nm2 = NaturalMovie(self.nwb, 'natural_movie_two')
        nm1 = NaturalMovie(self.nwb, 'natural_movie_one')
        SessionAnalysis._log.info("Session C2 analyzed")

        if self.nwb.get_metadata()['targeted_structure'] == 'VISp':
            lsn_peak = lsn4
        else:
            lsn_peak = lsn8

        peak = multi_dataframe_merge([nm1.peak_run, nm1.peak, nm2.peak, lsn_peak.peak])
        self.append_metadata(peak)

        self.append_metrics_locally_sparse_noise(self.metrics_c['cell'], lsn_peak)
        self.append_metrics_natural_movie_one(self.metrics_c['cell'], nm1)
        self.append_metrics_natural_movie_two(self.metrics_c['cell'], nm2)
        self.append_experiment_metrics(self.metrics_c['experiment'])
        self.metrics_c['cell']['roi_id'] = nm1.roi_id

        if save_flag:
            self.save_session_c2(lsn4, lsn8, nm1, nm2, peak)

        if plot_flag:
            cp._plot_3sc(lsn4, nm1, nm2, self.save_dir, '_4deg')
            cp._plot_3sc(lsn8, nm1, nm2, self.save_dir, '_8deg')
            cp.plot_lsn_traces(lsn4, self.save_dir, '_4deg')
            cp.plot_lsn_traces(lsn4, self.save_dir, '_8deg')
def data_set(request):
    assert os.path.exists(request.param)
    data_set = BrainObservatoryNwbDataSet(request.param)

    return data_set
Ejemplo n.º 19
0
class SessionAnalysis(object):
    _log = logging.getLogger('allensdk.brain_observatory.session_analysis')

    def __init__(self, nwb_path, save_path):
        self.nwb = BrainObservatoryNwbDataSet(nwb_path)
        self.save_path = save_path
        self.save_dir = os.path.dirname(save_path)

        self.metrics_a = {}
        self.metrics_b = {}
        self.metrics_c = {}

        self.metadata = self.nwb.get_metadata()

    def append_metadata(self, df):
        for k, v in self.metadata.iteritems():
            df[k] = v

    def save_session_a(self, dg, nm1, nm3, peak):
        nwb = BrainObservatoryNwbDataSet(self.save_path)
        nwb.save_analysis_dataframes(
            ('stim_table_dg', dg.stim_table),
            ('sweep_response_dg', dg.sweep_response),
            ('mean_sweep_response_dg', dg.mean_sweep_response), ('peak', peak),
            ('sweep_response_nm1', nm1.sweep_response),
            ('stim_table_nm1', nm1.stim_table),
            ('sweep_response_nm3', nm3.sweep_response))

        nwb.save_analysis_arrays(('celltraces_dff', nm1.dfftraces),
                                 ('response_dg', dg.response),
                                 ('binned_cells_sp', nm1.binned_cells_sp),
                                 ('binned_cells_vis', nm1.binned_cells_vis),
                                 ('binned_dx_sp', nm1.binned_dx_sp),
                                 ('binned_dx_vis', nm1.binned_dx_vis))

    def save_session_b(self, sg, nm1, ns, peak):
        nwb = BrainObservatoryNwbDataSet(self.save_path)
        nwb.save_analysis_dataframes(
            ('stim_table_sg', sg.stim_table),
            ('sweep_response_sg', sg.sweep_response),
            ('mean_sweep_response_sg', sg.mean_sweep_response),
            ('sweep_response_nm1', nm1.sweep_response),
            ('stim_table_nm1', nm1.stim_table),
            ('sweep_response_ns', ns.sweep_response),
            ('stim_table_ns', ns.stim_table),
            ('mean_sweep_response_ns', ns.mean_sweep_response), ('peak', peak))

        nwb.save_analysis_arrays(('celltraces_dff', nm1.dfftraces),
                                 ('response_sg', sg.response),
                                 ('response_ns', ns.response),
                                 ('binned_cells_sp', nm1.binned_cells_sp),
                                 ('binned_cells_vis', nm1.binned_cells_vis),
                                 ('binned_dx_sp', nm1.binned_dx_sp),
                                 ('binned_dx_vis', nm1.binned_dx_vis))

    def save_session_c(self, lsn, nm1, nm2, peak):
        nwb = BrainObservatoryNwbDataSet(self.save_path)
        nwb.save_analysis_dataframes(
            ('stim_table_lsn', lsn.stim_table),
            ('sweep_response_nm1', nm1.sweep_response), ('peak', peak),
            ('sweep_response_nm2', nm2.sweep_response),
            ('sweep_response_lsn', lsn.sweep_response),
            ('mean_sweep_response_lsn', lsn.mean_sweep_response))

        nwb.save_analysis_arrays(('receptive_field_lsn', lsn.receptive_field),
                                 ('celltraces_dff', nm1.dfftraces),
                                 ('binned_dx_sp', nm1.binned_dx_sp),
                                 ('binned_dx_vis', nm1.binned_dx_vis),
                                 ('binned_cells_sp', nm1.binned_cells_sp),
                                 ('binned_cells_vis', nm1.binned_cells_vis))

    def append_metrics_drifting_grating(self, metrics, dg):
        metrics["osi_dg"] = dg.peak["osi_dg"]
        metrics["dsi_dg"] = dg.peak["dsi_dg"]
        metrics["pref_dir_dg"] = [
            dg.orivals[i] for i in dg.peak["ori_dg"].values
        ]
        metrics["pref_tf_dg"] = [dg.tfvals[i] for i in dg.peak["tf_dg"].values]
        metrics["p_dg"] = dg.peak["ptest_dg"]

    def append_metrics_static_grating(self, metrics, sg):
        metrics["osi_sg"] = sg.peak["osi_sg"]
        metrics["pref_ori_sg"] = [
            sg.orivals[i] for i in sg.peak["ori_sg"].values
        ]
        metrics["pref_sf_sg"] = [sg.sfvals[i] for i in sg.peak["sf_sg"].values]
        metrics["pref_phase_sg"] = [
            sg.phasevals[i] for i in sg.peak["phase_sg"].values
        ]
        metrics["p_sg"] = sg.peak["ptest_sg"]
        metrics["time_to_peak_sg"] = sg.peak["time_to_peak_sg"]

    def append_metrics_natural_scene(self, metrics, ns):
        metrics["pref_image_ns"] = ns.peak["scene_ns"]
        metrics["p_ns"] = ns.peak["ptest_ns"]
        metrics["time_to_peak_ns"] = ns.peak["time_to_peak_ns"]

    def verify_roi_lists_equal(self, roi1, roi2):
        if len(roi1) != len(roi2):
            raise BrainObservatoryAnalysisException(
                "Error -- ROI lists are of different length")
        for i in range(len(roi1)):
            if roi1[i] != roi2[i]:
                raise BrainObservatoryAnalysisException(
                    "Error -- ROI lists have different entries")

    def session_a(self, plot_flag=False, save_flag=True):
        nm1 = NaturalMovie(self.nwb, 'natural_movie_one', speed_tuning=True)
        nm3 = NaturalMovie(self.nwb, 'natural_movie_three')
        dg = DriftingGratings(self.nwb)

        SessionAnalysis._log.info("Session A analyzed")
        peak = multi_dataframe_merge(
            [nm1.peak_run, dg.peak, nm1.peak, nm3.peak])

        self.append_metrics_drifting_grating(self.metrics_a, dg)
        self.metrics_a["roi_id"] = dg.roi_id

        self.append_metadata(peak)

        if save_flag:
            self.save_session_a(dg, nm1, nm3, peak)

        if plot_flag:
            cp._plot_3sa(dg, nm1, nm3, self.save_dir)
            cp.plot_drifting_grating_traces(dg, self.save_dir)

    def session_b(self, plot_flag=False, save_flag=True):
        ns = NaturalScenes(self.nwb)
        sg = StaticGratings(self.nwb)
        nm1 = NaturalMovie(self.nwb, 'natural_movie_one', speed_tuning=True)
        SessionAnalysis._log.info("Session B analyzed")
        peak = multi_dataframe_merge(
            [nm1.peak_run, sg.peak, ns.peak, nm1.peak])
        self.append_metadata(peak)

        self.append_metrics_static_grating(self.metrics_b, sg)
        self.append_metrics_natural_scene(self.metrics_b, ns)
        self.verify_roi_lists_equal(sg.roi_id, ns.roi_id)
        self.metrics_b["roi_id"] = sg.roi_id

        if save_flag:
            self.save_session_b(sg, nm1, ns, peak)

        if plot_flag:
            cp._plot_3sb(sg, nm1, ns, self.save_dir)
            cp.plot_ns_traces(ns, self.save_dir)
            cp.plot_sg_traces(sg, self.save_dir)

    def session_c(self, plot_flag=False, save_flag=True):
        lsn = LocallySparseNoise(self.nwb)
        nm2 = NaturalMovie(self.nwb, 'natural_movie_two')
        nm1 = NaturalMovie(self.nwb, 'natural_movie_one', speed_tuning=True)
        SessionAnalysis._log.info("Session C analyzed")
        peak = multi_dataframe_merge([nm1.peak_run, nm1.peak, nm2.peak])
        self.append_metadata(peak)

        self.metrics_c["roi_id"] = nm1.roi_id

        if save_flag:
            self.save_session_c(lsn, nm1, nm2, peak)

        if plot_flag:
            cp._plot_3sc(lsn, nm1, nm2, self.save_dir)
            cp.plot_lsn_traces(lsn, self.save_dir)
Ejemplo n.º 20
0
class SessionAnalysis(object):
    _log = logging.getLogger('allensdk.brain_observatory.session_analysis')    

    def __init__(self, nwb_path, save_path):
        self.nwb = BrainObservatoryNwbDataSet(nwb_path)                        
        self.save_path = save_path
        self.save_dir = os.path.dirname(save_path)

        self.metrics_a = {}
        self.metrics_b = {}
        self.metrics_c = {}

        self.metadata = self.nwb.get_metadata()

    def append_metadata(self, df):
        for k,v in self.metadata.iteritems():
            df[k] = v

    def save_session_a(self, dg, nm1, nm3, peak):
        nwb = BrainObservatoryNwbDataSet(self.save_path)
        nwb.save_analysis_dataframes(
            ('stim_table_dg', dg.stim_table),
            ('sweep_response_dg', dg.sweep_response),
            ('mean_sweep_response_dg', dg.mean_sweep_response),
            ('peak', peak),        
            ('sweep_response_nm1', nm1.sweep_response),
            ('stim_table_nm1', nm1.stim_table),
            ('sweep_response_nm3', nm3.sweep_response))
        
        nwb.save_analysis_arrays(
            ('celltraces_dff', nm1.dfftraces),
            ('response_dg', dg.response),
            ('binned_cells_sp', nm1.binned_cells_sp),
            ('binned_cells_vis', nm1.binned_cells_vis),
            ('binned_dx_sp', nm1.binned_dx_sp),
            ('binned_dx_vis', nm1.binned_dx_vis))
    
        
    def save_session_b(self, sg, nm1, ns, peak): 
        nwb = BrainObservatoryNwbDataSet(self.save_path)
        nwb.save_analysis_dataframes(
            ('stim_table_sg', sg.stim_table),
            ('sweep_response_sg', sg.sweep_response),
            ('mean_sweep_response_sg', sg.mean_sweep_response),
            ('sweep_response_nm1', nm1.sweep_response),
            ('stim_table_nm1', nm1.stim_table),
            ('sweep_response_ns', ns.sweep_response),
            ('stim_table_ns', ns.stim_table),
            ('mean_sweep_response_ns', ns.mean_sweep_response),
            ('peak', peak))

        nwb.save_analysis_arrays(
            ('celltraces_dff', nm1.dfftraces),
            ('response_sg', sg.response),
            ('response_ns', ns.response),
            ('binned_cells_sp', nm1.binned_cells_sp),
            ('binned_cells_vis', nm1.binned_cells_vis),
            ('binned_dx_sp', nm1.binned_dx_sp),
            ('binned_dx_vis', nm1.binned_dx_vis))
    
    
    def save_session_c(self, lsn, nm1, nm2, peak):                
        nwb = BrainObservatoryNwbDataSet(self.save_path)
        nwb.save_analysis_dataframes(
            ('stim_table_lsn', lsn.stim_table),
            ('sweep_response_nm1', nm1.sweep_response),
            ('peak', peak),
            ('sweep_response_nm2', nm2.sweep_response),
            ('sweep_response_lsn', lsn.sweep_response),
            ('mean_sweep_response_lsn', lsn.mean_sweep_response))  
        
        nwb.save_analysis_arrays(
            ('receptive_field_lsn', lsn.receptive_field),
            ('celltraces_dff', nm1.dfftraces),
            ('binned_dx_sp', nm1.binned_dx_sp),
            ('binned_dx_vis', nm1.binned_dx_vis),    
            ('binned_cells_sp', nm1.binned_cells_sp),
            ('binned_cells_vis', nm1.binned_cells_vis))
    
    def append_metrics_drifting_grating(self, metrics, dg):
        metrics["osi_dg"] = dg.peak["osi_dg"]
        metrics["dsi_dg"] = dg.peak["dsi_dg"]
        metrics["pref_dir_dg"] = [ dg.orivals[i] for i in dg.peak["ori_dg"].values ]
        metrics["pref_tf_dg"] = [ dg.tfvals[i] for i in dg.peak["tf_dg"].values ]
        metrics["p_dg"] = dg.peak["ptest_dg"]
    
    def append_metrics_static_grating(self, metrics, sg):
        metrics["osi_sg"] = sg.peak["osi_sg"]
        metrics["pref_ori_sg"] = [ sg.orivals[i] for i in sg.peak["ori_sg"].values ]
        metrics["pref_sf_sg"] = [ sg.sfvals[i] for i in sg.peak["sf_sg"].values ]
        metrics["pref_phase_sg"] = [ sg.phasevals[i] for i in sg.peak["phase_sg"].values ]
        metrics["p_sg"] = sg.peak["ptest_sg"]
        metrics["time_to_peak_sg"] = sg.peak["time_to_peak_sg"]

    def append_metrics_natural_scene(self, metrics, ns):
        metrics["pref_image_ns"] = ns.peak["scene_ns"]
        metrics["p_ns"] = ns.peak["ptest_ns"]
        metrics["time_to_peak_ns"] = ns.peak["time_to_peak_ns"]

    def verify_roi_lists_equal(self, roi1, roi2):
        if len(roi1) != len(roi2):
            raise BrainObservatoryAnalysisException("Error -- ROI lists are of different length")
        for i in range(len(roi1)):
            if roi1[i] != roi2[i]:
                raise BrainObservatoryAnalysisException("Error -- ROI lists have different entries")
    
    def session_a(self, plot_flag=False, save_flag=True):
        nm1 = NaturalMovie(self.nwb, 'natural_movie_one', speed_tuning=True)
        nm3 = NaturalMovie(self.nwb, 'natural_movie_three')
        dg = DriftingGratings(self.nwb)

        SessionAnalysis._log.info("Session A analyzed")
        peak = multi_dataframe_merge([nm1.peak_run, dg.peak, nm1.peak, nm3.peak])
        

        self.append_metrics_drifting_grating(self.metrics_a, dg)
        self.metrics_a["roi_id"] = dg.roi_id

        self.append_metadata(peak)

        if save_flag:
            self.save_session_a(dg, nm1, nm3, peak)

        if plot_flag:
            cp._plot_3sa(dg, nm1, nm3, self.save_dir)
            cp.plot_drifting_grating_traces(dg, self.save_dir)
    
    
    def session_b(self, plot_flag=False, save_flag=True):
        ns = NaturalScenes(self.nwb)
        sg = StaticGratings(self.nwb)
        nm1 = NaturalMovie(self.nwb, 'natural_movie_one', speed_tuning=True)
        SessionAnalysis._log.info("Session B analyzed")
        peak = multi_dataframe_merge([nm1.peak_run, sg.peak, ns.peak, nm1.peak])
        self.append_metadata(peak)

        self.append_metrics_static_grating(self.metrics_b, sg)
        self.append_metrics_natural_scene(self.metrics_b, ns)
        self.verify_roi_lists_equal(sg.roi_id, ns.roi_id)
        self.metrics_b["roi_id"] = sg.roi_id
            
        if save_flag:
            self.save_session_b(sg, nm1, ns, peak)    

        if plot_flag:
            cp._plot_3sb(sg, nm1, ns, self.save_dir)
            cp.plot_ns_traces(ns, self.save_dir)
            cp.plot_sg_traces(sg, self.save_dir)
                    
    
    def session_c(self, plot_flag=False, save_flag=True):
        lsn = LocallySparseNoise(self.nwb)
        nm2 = NaturalMovie(self.nwb, 'natural_movie_two')
        nm1 = NaturalMovie(self.nwb, 'natural_movie_one', speed_tuning=True)
        SessionAnalysis._log.info("Session C analyzed")
        peak = multi_dataframe_merge([nm1.peak_run, nm1.peak, nm2.peak])
        self.append_metadata(peak)
                
        self.metrics_c["roi_id"] = nm1.roi_id
        
        if save_flag:
            self.save_session_c(lsn, nm1, nm2, peak)

        if plot_flag:
            cp._plot_3sc(lsn, nm1, nm2, self.save_dir)
            cp.plot_lsn_traces(lsn, self.save_dir)
Ejemplo n.º 21
0
def data_set(request):
    data_set = BrainObservatoryNwbDataSet(request.param)

    return data_set
Ejemplo n.º 22
0
def run(ids, ann_data):

    # boc = BrainObservatoryCache(manifest_file='boc/manifest.json')

    # Get data container information
    count = 0

    db_params = {
        u'dbname': u'lims2',
        u'user': u'limsreader',
        u'host': u'limsdb2',
        u'password': u'limsro',
        u'port': 5432
    }

    QUERY = " ".join((
        "SELECT sp.name, ec.id, ec.workflow_state, eso.stimulus_name, eso.id, eso.workflow_state",
        "FROM experiment_sessions eso",
        "LEFT JOIN experiment_containers ec ON ec.id = eso.experiment_container_id",
        "JOIN specimens sp ON sp.id=eso.specimen_id", "WHERE eso.id='{}';"))

    def get_db_cursor(dbname, user, host, password, port):
        con = connect(dbname=dbname,
                      user=user,
                      host=host,
                      password=password,
                      port=port)
        return con.cursor()

    cur = get_db_cursor(**db_params)

    # find container status
    def find_status(lims_id):
        query = QUERY.format(lims_id)
        cur.execute(query)
        return cur.fetchall()

    # types of possible stimuli
    stimuli = [
        'spontaneous_stimulus', 'drifting_gratings_stimulus',
        'natural_movie_one_stimulus', 'natural_movie_two_stimulus',
        'natural_movie_three_stimulus', 'static_gratings_stimulus',
        'locally_sparse_noise_stimulus'
    ]

    A_count = 0
    B_count = 0
    C_count = 0

    # read in global report for analysis and read excel ids
    excel_data = pandas.read_excel(
        'C:\Users\mahdir\Documents\Allen Projects\Behavior Annotation\global_report.xlsx',
        sheetname='all')
    excel_ids = excel_data['lims ID']
    mid_list = excel_data['lims_database_specimen_id']

    # unique = np.unique(rig_list)
    # # unique_operator = np.unique(operator_list).tolist()
    # session_results = [[] for _ in range(17)]

    anxiety = []
    session = []

    for id in ids:
        if id.strip() == '509292861':
            id = id.strip()
            # qc_status = np.array(find_status(id.strip()))
            # status = qc_status[(0)][2]
            # check if failed
            # if 'failed' not in status:
            #     print (' experiment ' + str(id) + ' did not pass QC')
            #

            # get new video directory
            new_dir = '\\' + str(ld(id).get_video_directory())[0:-1]

            nwb_path = None
            h5_file = None
            pkl_file = None
            video_file = None
            # get experiment's associated files
            for file in os.listdir(new_dir):
                if file.endswith(".nwb"):
                    # make sure file is in there!
                    nwb_path = os.path.join(new_dir, file)
                    # input file path, r is for read only
            if bool(nwb_path) == False:
                print('NWB file not found')
                continue

            for file in os.listdir(new_dir):
                # looks for the h5 file and makes the directory to it
                if file.endswith("sync.h5"):
                    h5_file = os.path.join(new_dir, file)
            if bool(h5_file) == False:
                print('H5 file not found')
                continue

            for file in os.listdir(new_dir):
                # looks for the pkl file and makes the directory to it
                if file.endswith("stim.pkl"):
                    pkl_file = os.path.join(new_dir, file)
            if bool(pkl_file) == False:
                print('PKL file not found')
                continue

            # get wheel data and first non Nan value
            grab_wheel = gw(h5_file)
            frames = grab_wheel.return_frames()
            wheel = grab_wheel.getRunningData(pkl_file, frames)
            first_non_nan = next(x for x in wheel if not isnan(x))
            first_index = np.where(wheel == first_non_nan)[0]
            imp = Imputer(missing_values='NaN', strategy='mean')
            # normalize wheel data according to wheel scaler
            wheel = imp.fit_transform(wheel)
            k = first_index[0]

            # get behavior and neural activity timing, as well as annotated data and dff traces
            neuron = BrainObservatoryNwbDataSet(nwb_path)
            n_data = neuron.get_dff_traces()

            beh_time = sm(new_dir).get_frame_times_behavior()
            neuro_time = sm(new_dir).get_frame_times_physio()
            data = ann_data[count]

            # get visual cortex movie
            path = '\\\\allen\\programs\\braintv\\production\\neuralcoding\\prod6\\specimen_501800347\\ophys_experiment_509292861\\processed\\concat_31Hz_0.h5'
            F = h5py.File(path)

            # determine how many unique fidget examples there are
            flu_count = 0

            limit = len(data) - 200
            if limit > len(beh_time):
                limit = len(beh_time)
            if limit > len(neuro_time):
                limit = len(neuro_time)

            for f in range(len(data) - 200):
                if (data[f] == 1
                        and data[f + 1] == 0) or (data[f] == 2
                                                  and data[f + 1] == 0):
                    flu_count += 1

            # initialize data arrays
            fluorescent_traces = [[[] for _ in range(len(n_data[1]))]
                                  for _ in range(flu_count)]
            fluorescent_traces_cell = [[[] for _ in range(flu_count)]
                                       for _ in range(len(n_data[1]))]
            video_traces = [[[] for _ in range(flu_count)] for _ in range(300)]

            print(flu_count)
            flu_count = 0

            indices = []

            print('calculating average movie and neural activity')
            # for each frame, check whether its fidget or not
            for f in range(len(data) - 200):
                if (data[f] == 1
                        and data[f + 1] == 0) or (data[f] == 2
                                                  and data[f + 1] == 0):
                    # get behavior time (must be offset by the index of first wheel value since annotated data starts then)
                    b_time = beh_time[f + k]
                    # use to get associated video time in seconds
                    t = beh_time[f]
                    # get mapped fluorescence index
                    idx = (np.abs(neuro_time - b_time)).argmin()
                    # store each cell's fluorescent trace 100 frames back and 200 forward from start of fidget
                    for p in range(len(n_data[1])):
                        # keeps track per fidget example
                        fluorescent_traces[flu_count][p] = n_data[1][p][idx -
                                                                        100:
                                                                        (idx +
                                                                         200)]
                        # keeps track per cell
                        fluorescent_traces_cell[p][flu_count] = n_data[1][p][
                            idx - 100:(idx + 200)]
                    # save video trace of neural activity
                    # for h in range(300):
                    #
                    #     video_traces[h][flu_count] = np.array(F['data'][ int(idx-100+h),:,:])

                    # data_pointer.release()
                    # cv2.destroyAllWindows()
                    flu_count += 1

                if f % 1000 == 0:
                    print(f)

            average_video = [[] for _ in range(300)]
            firing1 = []
            firing2 = []
            indices = []

            # for each fidget example, get examples where there is a significant difference between during and after fidget
            # response, average those videos together and write them out (then use makeavi programto make video)
            # for i in range (flu_count):
            #     temp = np.array(fluorescent_traces[i])
            #     array1 = np.mean(temp[:, 100:150])
            #     array2 = np.mean(temp[:, 150:200])
            #
            #     # if array2 > 0.08 and array1 < 0.08:
            #     if array2-array1 > 0.06:
            #         indices.append(i)
            #         firing1.append(array1)
            #         firing2.append(array2)
            # temp =[]
            # for p in range(300):
            #     temp= [video_traces[p][i] for i in indices]
            #     average_video[p] = np.mean(temp, axis=0)
            #     # print(np.min(video_traces[p][0]), np.max(video_traces[p][0]))
            #     toimage(average_video[p], cmin=0, cmax=400).save(
            #         'C:\Users\mahdir\Documents\Allen Projects\Behavior Annotation\\neural_images_select2\\' + 'image_' + str(
            #             p) + '.png')

            # Use code to plot average fluorescence per fidget example

            # # plt.plot(firing, color = 'b')
            # plt.title("Average Fluroscence per Fidget Example During and After Fidget")
            # plt.plot(firing1, 'bo')
            # plt.plot(firing2, 'ro')
            # plt.xlabel('Fidget Example')
            # plt.ylabel('Average Fluorescence')
            # plt.show()
            #
            # workbook = xlsxwriter.Workbook(
            #     'C:\Users\mahdir\Documents\Allen Projects\Behavior Annotation\\average_fluoresence_select2.xlsx')
            # worksheet = workbook.add_worksheet()
            #
            # worksheet.write_row(0, 0, firing1)
            # worksheet.write_row(1, 0, firing2)
            #
            # workbook.close()

            # use code to plot average cell response

            fluorescence_average = [[] for _ in range(len(n_data[1]))]

            plt.title('Fluorescent Traces During Fidget for id ' + str(id))

            for p in range(len(n_data[1])):
                fluorescence_average[p] = np.mean(
                    fluorescent_traces_cell[p][:], axis=0)
                fluorescence_average[
                    p] = fluorescence_average[p] - fluorescence_average[p][0]
                if np.max(fluorescence_average[p]) > 20:
                    print(p)
                # plt.plot(fluorescence_average[p])

            # plt.show()

            plt.imshow(fluorescence_average[:])
            plt.clim(-0.01, 0.01)
            plt.show()
        count += 1
Ejemplo n.º 23
0
def data_set():
    nwb_file = '/projects/neuralcoding/vol1/prod6/specimen_497258322/ophys_experiment_506954308/506954308.nwb'
    data_set = BrainObservatoryNwbDataSet(nwb_file)
    return data_set