def __call__(self):
        if self.obj is None:
            exp = self.experiment_for_session(self.session)
            data_set = BrainObservatoryNwbDataSet(exp['nwb_file'])
            self.obj = self.klass.from_analysis_file(data_set, exp['analysis_file'], *self.args)

        return self.obj
示例#2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("input_h5")
    parser.add_argument("output_h5")
    parser.add_argument("--plot_dir")
    parser.add_argument("--log_level", default=logging.INFO)

    args = parser.parse_args()

    logging.getLogger().setLevel(args.log_level)

    # read from "data"
    if args.input_h5.endswith("nwb"):
        timestamps, traces = BrainObservatoryNwbDataSet(
            args.input_h5).get_corrected_fluorescence_traces()
    else:
        input_h5 = h5py.File(args.input_h5, "r")
        traces = input_h5["data"].value
        input_h5.close()

    dff = calculate_dff(traces, save_plot_dir=args.plot_dir)

    # write to "data"
    output_h5 = h5py.File(args.output_h5, "w")
    output_h5["data"] = dff
    output_h5.close()
    def get_ophys_experiment_data(self, ophys_experiment_id, file_name=None):
        """ Download the NWB file for an ophys_experiment (if it hasn't already been
        downloaded) and return a data accessor object.

        Parameters
        ----------
        file_name: string
            File name to save/read the data set.  If file_name is None, 
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.        

        ophys_experiment_id: integer
            id of the ophys_experiment to retrieve
            
        Returns
        -------
        BrainObservatoryNwbDataSet
        """
        file_name = self.get_cache_path(file_name, self.EXPERIMENT_DATA_KEY,
                                        ophys_experiment_id)

        if not os.path.exists(file_name):
            self.api.save_ophys_experiment_data(ophys_experiment_id, file_name)

        return BrainObservatoryNwbDataSet(file_name)
示例#4
0
    def save_session_c2(self, lsn4, lsn8, nm1, nm2, peak):
        """ Save the output of session C2 analysis to self.save_path. 

        Parameters
        ----------
        lsn4: LocallySparseNoise instance
            This LocallySparseNoise instance should have been created with 
            self.stimulus = stimulus_info.LOCALLY_SPARSE_NOISE_4DEG.

        lsn8: LocallySparseNoise instance
            This LocallySparseNoise instance should have been created with 
            self.stimulus = stimulus_info.LOCALLY_SPARSE_NOISE_8DEG.

        nm1: NaturalMovie instance
            This NaturalMovie instance should have been created with
            movie_name = stimulus_info.NATURAL_MOVIE_ONE

        nm2: NaturalMovie instance
            This NaturalMovie instance should have been created with
            movie_name = stimulus_info.NATURAL_MOVIE_TWO

        peak: pd.DataFrame
            The combined peak response property table created in self.session_c2().
        """

        nwb = BrainObservatoryNwbDataSet(self.save_path)

        nwb.save_analysis_dataframes(
            ('stim_table_lsn4', lsn4.stim_table),
            ('stim_table_lsn8', lsn8.stim_table),
            ('sweep_response_nm1', nm1.sweep_response), ('peak', peak),
            ('sweep_response_nm2', nm2.sweep_response),
            ('sweep_response_lsn4', lsn4.sweep_response),
            ('sweep_response_lsn8', lsn8.sweep_response),
            ('mean_sweep_response_lsn4', lsn4.mean_sweep_response),
            ('mean_sweep_response_lsn8', lsn8.mean_sweep_response))

        merge_mean_response = LocallySparseNoise.merge_mean_response(
            lsn4.mean_response, lsn8.mean_response)

        nwb.save_analysis_arrays(
            ('mean_response_lsn4', lsn4.mean_response),
            ('mean_response_lsn8', lsn8.mean_response),
            ('receptive_field_lsn4', lsn4.receptive_field),
            ('receptive_field_lsn8', lsn8.receptive_field),
            ('merge_mean_response', merge_mean_response),
            ('binned_dx_sp', nm1.binned_dx_sp),
            ('binned_dx_vis', nm1.binned_dx_vis),
            ('binned_cells_sp', nm1.binned_cells_sp),
            ('binned_cells_vis', nm1.binned_cells_vis))

        LocallySparseNoise.save_cell_index_receptive_field_analysis(
            lsn4.cell_index_receptive_field_analysis_data, nwb,
            stimulus_info.LOCALLY_SPARSE_NOISE_4DEG)
        LocallySparseNoise.save_cell_index_receptive_field_analysis(
            lsn8.cell_index_receptive_field_analysis_data, nwb,
            stimulus_info.LOCALLY_SPARSE_NOISE_8DEG)
示例#5
0
    def __init__(self, nwb_path, save_path):
        self.nwb = BrainObservatoryNwbDataSet(nwb_path)
        self.save_path = save_path
        self.save_dir = os.path.dirname(save_path)

        self.metrics_a = dict(cell={},experiment={})
        self.metrics_b = dict(cell={},experiment={})
        self.metrics_c = dict(cell={},experiment={})

        self.metadata = self.nwb.get_metadata()
示例#6
0
    def __init__(self, nwb_path, save_path):
        self.nwb = BrainObservatoryNwbDataSet(nwb_path)
        self.save_path = save_path
        self.save_dir = os.path.dirname(save_path)

        self.metrics_a = {}
        self.metrics_b = {}
        self.metrics_c = {}

        self.metadata = self.nwb.get_metadata()
示例#7
0
    def save_session_c(self, lsn, nm1, nm2, peak):
        nwb = BrainObservatoryNwbDataSet(self.save_path)
        nwb.save_analysis_dataframes(
            ('stim_table_lsn', lsn.stim_table),
            ('sweep_response_nm1', nm1.sweep_response), ('peak', peak),
            ('sweep_response_nm2', nm2.sweep_response),
            ('sweep_response_lsn', lsn.sweep_response),
            ('mean_sweep_response_lsn', lsn.mean_sweep_response))

        nwb.save_analysis_arrays(('receptive_field_lsn', lsn.receptive_field),
                                 ('celltraces_dff', nm1.dfftraces),
                                 ('binned_dx_sp', nm1.binned_dx_sp),
                                 ('binned_dx_vis', nm1.binned_dx_vis),
                                 ('binned_cells_sp', nm1.binned_cells_sp),
                                 ('binned_cells_vis', nm1.binned_cells_vis))
示例#8
0
    def save_session_a(self, dg, nm1, nm3, peak):
        nwb = BrainObservatoryNwbDataSet(self.save_path)
        nwb.save_analysis_dataframes(
            ('stim_table_dg', dg.stim_table),
            ('sweep_response_dg', dg.sweep_response),
            ('mean_sweep_response_dg', dg.mean_sweep_response), ('peak', peak),
            ('sweep_response_nm1', nm1.sweep_response),
            ('stim_table_nm1', nm1.stim_table),
            ('sweep_response_nm3', nm3.sweep_response))

        nwb.save_analysis_arrays(('celltraces_dff', nm1.dfftraces),
                                 ('response_dg', dg.response),
                                 ('binned_cells_sp', nm1.binned_cells_sp),
                                 ('binned_cells_vis', nm1.binned_cells_vis),
                                 ('binned_dx_sp', nm1.binned_dx_sp),
                                 ('binned_dx_vis', nm1.binned_dx_vis))
示例#9
0
    def save_session_b(self, sg, nm1, ns, peak):
        """ Save the output of session B analysis to self.save_path.  

        Parameters
        ----------
        sg: StaticGratings instance

        nm1: NaturalMovie instance
            This NaturalMovie instance should have been created with
            movie_name = stimulus_info.NATURAL_MOVIE_ONE

        ns: NaturalScenes instance

        peak: pd.DataFrame
            The combined peak response property table created in self.session_b().
        """

        nwb = BrainObservatoryNwbDataSet(self.save_path)

        nwb.save_analysis_dataframes(
            ('stim_table_sg', sg.stim_table),
            ('sweep_response_sg', sg.sweep_response),
            ('mean_sweep_response_sg', sg.mean_sweep_response),
            ('sweep_response_nm1', nm1.sweep_response),
            ('stim_table_nm1', nm1.stim_table),
            ('sweep_response_ns', ns.sweep_response),
            ('stim_table_ns', ns.stim_table),
            ('mean_sweep_response_ns', ns.mean_sweep_response),
            ('peak', peak))

        nwb.save_analysis_arrays(
            ('response_sg', sg.response),
            ('response_ns', ns.response),
            ('binned_cells_sp', nm1.binned_cells_sp),
            ('binned_cells_vis', nm1.binned_cells_vis),
            ('binned_dx_sp', nm1.binned_dx_sp),
            ('binned_dx_vis', nm1.binned_dx_vis),
            ('noise_corr_sg', sg.noise_correlation),
            ('signal_corr_sg', sg.signal_correlation),
            ('rep_similarity_sg', sg.representational_similarity),
            ('noise_corr_ns', ns.noise_correlation),
            ('signal_corr_ns', ns.signal_correlation),
            ('rep_similarity_ns', ns.representational_similarity)
            )
def build_type(nwb_file, data_file, configs, output_dir, type_name):
    data_set = BrainObservatoryNwbDataSet(nwb_file)
    try:
        if type_name == "dg":
            dga = DriftingGratings.from_analysis_file(data_set, data_file)
            build_drifting_gratings(dga, configs, output_dir)
        elif type_name == "sg":
            sga = StaticGratings.from_analysis_file(data_set, data_file)
            build_static_gratings(sga, configs, output_dir)
        elif type_name == "nm1":
            nma = NaturalMovie.from_analysis_file(data_set, data_file, si.NATURAL_MOVIE_ONE)
            build_natural_movie(nma, configs, output_dir, si.NATURAL_MOVIE_ONE)
        elif type_name == "nm2":
            nma = NaturalMovie.from_analysis_file(data_set, data_file, si.NATURAL_MOVIE_TWO)
            build_natural_movie(nma, configs, output_dir, si.NATURAL_MOVIE_TWO)
        elif type_name == "nm3":
            nma = NaturalMovie.from_analysis_file(data_set, data_file, si.NATURAL_MOVIE_THREE)
            build_natural_movie(nma, configs, output_dir, si.NATURAL_MOVIE_THREE)
        elif type_name == "ns":
            nsa = NaturalScenes.from_analysis_file(data_set, data_file)
            build_natural_scenes(nsa, configs, output_dir)
        elif type_name == "sp":
            nma = NaturalMovie.from_analysis_file(data_set, data_file, si.NATURAL_MOVIE_ONE)
            build_speed_tuning(nma, configs, output_dir)
        elif type_name == "lsn_on":
            lsna = lsna_check_hvas(data_set, data_file)
            build_locally_sparse_noise(lsna, configs, output_dir, True)
        elif type_name == "lsn_off":
            lsna = lsna_check_hvas(data_set, data_file)
            build_locally_sparse_noise(lsna, configs, output_dir, False)
        elif type_name == "rf":
            lsna = lsna_check_hvas(data_set, data_file)
            build_receptive_field(lsna, configs, output_dir)
        elif type_name == "corr":
            build_correlation_plots(data_set, data_file, configs, output_dir)
        elif type_name == "eye":
            build_eye_tracking_plots(data_set, configs, output_dir)

    except MissingStimulusException as e:
        logging.warning("could not load stimulus (%s)", type_name)
    except Exception as e:
        traceback.print_exc()
        logging.critical("error running stimulus (%s)", type_name)
        raise e
示例#11
0
def data_set():
    nwb_file = '/projects/neuralcoding/vol1/prod6/specimen_497258322/ophys_experiment_506954308/506954308.nwb'
    data_set = BrainObservatoryNwbDataSet(nwb_file)
    return data_set
示例#12
0
def run(ids, ann_data):

    # boc = BrainObservatoryCache(manifest_file='boc/manifest.json')

    # Get data container information
    count = 0

    db_params = {
        u'dbname': u'lims2',
        u'user': u'limsreader',
        u'host': u'limsdb2',
        u'password': u'limsro',
        u'port': 5432
    }

    QUERY = " ".join((
        "SELECT sp.name, ec.id, ec.workflow_state, eso.stimulus_name, eso.id, eso.workflow_state",
        "FROM experiment_sessions eso",
        "LEFT JOIN experiment_containers ec ON ec.id = eso.experiment_container_id",
        "JOIN specimens sp ON sp.id=eso.specimen_id", "WHERE eso.id='{}';"))

    def get_db_cursor(dbname, user, host, password, port):
        con = connect(dbname=dbname,
                      user=user,
                      host=host,
                      password=password,
                      port=port)
        return con.cursor()

    cur = get_db_cursor(**db_params)

    # find container status
    def find_status(lims_id):
        query = QUERY.format(lims_id)
        cur.execute(query)
        return cur.fetchall()

    # types of possible stimuli
    stimuli = [
        'spontaneous_stimulus', 'drifting_gratings_stimulus',
        'natural_movie_one_stimulus', 'natural_movie_two_stimulus',
        'natural_movie_three_stimulus', 'static_gratings_stimulus',
        'locally_sparse_noise_stimulus'
    ]

    A_count = 0
    B_count = 0
    C_count = 0

    # read in global report for analysis and read excel ids
    excel_data = pandas.read_excel(
        'C:\Users\mahdir\Documents\Allen Projects\Behavior Annotation\global_report.xlsx',
        sheetname='all')
    excel_ids = excel_data['lims ID']
    mid_list = excel_data['lims_database_specimen_id']

    # unique = np.unique(rig_list)
    # # unique_operator = np.unique(operator_list).tolist()
    # session_results = [[] for _ in range(17)]

    anxiety = []
    session = []

    for id in ids:
        if id.strip() == '509292861':
            id = id.strip()
            # qc_status = np.array(find_status(id.strip()))
            # status = qc_status[(0)][2]
            # check if failed
            # if 'failed' not in status:
            #     print (' experiment ' + str(id) + ' did not pass QC')
            #

            # get new video directory
            new_dir = '\\' + str(ld(id).get_video_directory())[0:-1]

            nwb_path = None
            h5_file = None
            pkl_file = None
            video_file = None
            # get experiment's associated files
            for file in os.listdir(new_dir):
                if file.endswith(".nwb"):
                    # make sure file is in there!
                    nwb_path = os.path.join(new_dir, file)
                    # input file path, r is for read only
            if bool(nwb_path) == False:
                print('NWB file not found')
                continue

            for file in os.listdir(new_dir):
                # looks for the h5 file and makes the directory to it
                if file.endswith("sync.h5"):
                    h5_file = os.path.join(new_dir, file)
            if bool(h5_file) == False:
                print('H5 file not found')
                continue

            for file in os.listdir(new_dir):
                # looks for the pkl file and makes the directory to it
                if file.endswith("stim.pkl"):
                    pkl_file = os.path.join(new_dir, file)
            if bool(pkl_file) == False:
                print('PKL file not found')
                continue

            # get wheel data and first non Nan value
            grab_wheel = gw(h5_file)
            frames = grab_wheel.return_frames()
            wheel = grab_wheel.getRunningData(pkl_file, frames)
            first_non_nan = next(x for x in wheel if not isnan(x))
            first_index = np.where(wheel == first_non_nan)[0]
            imp = Imputer(missing_values='NaN', strategy='mean')
            # normalize wheel data according to wheel scaler
            wheel = imp.fit_transform(wheel)
            k = first_index[0]

            # get behavior and neural activity timing, as well as annotated data and dff traces
            neuron = BrainObservatoryNwbDataSet(nwb_path)
            n_data = neuron.get_dff_traces()

            beh_time = sm(new_dir).get_frame_times_behavior()
            neuro_time = sm(new_dir).get_frame_times_physio()
            data = ann_data[count]

            # get visual cortex movie
            path = '\\\\allen\\programs\\braintv\\production\\neuralcoding\\prod6\\specimen_501800347\\ophys_experiment_509292861\\processed\\concat_31Hz_0.h5'
            F = h5py.File(path)

            # determine how many unique fidget examples there are
            flu_count = 0

            limit = len(data) - 200
            if limit > len(beh_time):
                limit = len(beh_time)
            if limit > len(neuro_time):
                limit = len(neuro_time)

            for f in range(len(data) - 200):
                if (data[f] == 1
                        and data[f + 1] == 0) or (data[f] == 2
                                                  and data[f + 1] == 0):
                    flu_count += 1

            # initialize data arrays
            fluorescent_traces = [[[] for _ in range(len(n_data[1]))]
                                  for _ in range(flu_count)]
            fluorescent_traces_cell = [[[] for _ in range(flu_count)]
                                       for _ in range(len(n_data[1]))]
            video_traces = [[[] for _ in range(flu_count)] for _ in range(300)]

            print(flu_count)
            flu_count = 0

            indices = []

            print('calculating average movie and neural activity')
            # for each frame, check whether its fidget or not
            for f in range(len(data) - 200):
                if (data[f] == 1
                        and data[f + 1] == 0) or (data[f] == 2
                                                  and data[f + 1] == 0):
                    # get behavior time (must be offset by the index of first wheel value since annotated data starts then)
                    b_time = beh_time[f + k]
                    # use to get associated video time in seconds
                    t = beh_time[f]
                    # get mapped fluorescence index
                    idx = (np.abs(neuro_time - b_time)).argmin()
                    # store each cell's fluorescent trace 100 frames back and 200 forward from start of fidget
                    for p in range(len(n_data[1])):
                        # keeps track per fidget example
                        fluorescent_traces[flu_count][p] = n_data[1][p][idx -
                                                                        100:
                                                                        (idx +
                                                                         200)]
                        # keeps track per cell
                        fluorescent_traces_cell[p][flu_count] = n_data[1][p][
                            idx - 100:(idx + 200)]
                    # save video trace of neural activity
                    # for h in range(300):
                    #
                    #     video_traces[h][flu_count] = np.array(F['data'][ int(idx-100+h),:,:])

                    # data_pointer.release()
                    # cv2.destroyAllWindows()
                    flu_count += 1

                if f % 1000 == 0:
                    print(f)

            average_video = [[] for _ in range(300)]
            firing1 = []
            firing2 = []
            indices = []

            # for each fidget example, get examples where there is a significant difference between during and after fidget
            # response, average those videos together and write them out (then use makeavi programto make video)
            # for i in range (flu_count):
            #     temp = np.array(fluorescent_traces[i])
            #     array1 = np.mean(temp[:, 100:150])
            #     array2 = np.mean(temp[:, 150:200])
            #
            #     # if array2 > 0.08 and array1 < 0.08:
            #     if array2-array1 > 0.06:
            #         indices.append(i)
            #         firing1.append(array1)
            #         firing2.append(array2)
            # temp =[]
            # for p in range(300):
            #     temp= [video_traces[p][i] for i in indices]
            #     average_video[p] = np.mean(temp, axis=0)
            #     # print(np.min(video_traces[p][0]), np.max(video_traces[p][0]))
            #     toimage(average_video[p], cmin=0, cmax=400).save(
            #         'C:\Users\mahdir\Documents\Allen Projects\Behavior Annotation\\neural_images_select2\\' + 'image_' + str(
            #             p) + '.png')

            # Use code to plot average fluorescence per fidget example

            # # plt.plot(firing, color = 'b')
            # plt.title("Average Fluroscence per Fidget Example During and After Fidget")
            # plt.plot(firing1, 'bo')
            # plt.plot(firing2, 'ro')
            # plt.xlabel('Fidget Example')
            # plt.ylabel('Average Fluorescence')
            # plt.show()
            #
            # workbook = xlsxwriter.Workbook(
            #     'C:\Users\mahdir\Documents\Allen Projects\Behavior Annotation\\average_fluoresence_select2.xlsx')
            # worksheet = workbook.add_worksheet()
            #
            # worksheet.write_row(0, 0, firing1)
            # worksheet.write_row(1, 0, firing2)
            #
            # workbook.close()

            # use code to plot average cell response

            fluorescence_average = [[] for _ in range(len(n_data[1]))]

            plt.title('Fluorescent Traces During Fidget for id ' + str(id))

            for p in range(len(n_data[1])):
                fluorescence_average[p] = np.mean(
                    fluorescent_traces_cell[p][:], axis=0)
                fluorescence_average[
                    p] = fluorescence_average[p] - fluorescence_average[p][0]
                if np.max(fluorescence_average[p]) > 20:
                    print(p)
                # plt.plot(fluorescence_average[p])

            # plt.show()

            plt.imshow(fluorescence_average[:])
            plt.clim(-0.01, 0.01)
            plt.show()
        count += 1
示例#13
0
def data_set(request):
    data_set = BrainObservatoryNwbDataSet(request.param)

    return data_set
def data_set(request):
    assert os.path.exists(request.param)
    data_set = BrainObservatoryNwbDataSet(request.param)

    return data_set