Beispiel #1
0
    def get_stimulus_presentations(self):
        stimulus_timestamps = self.get_stimulus_timestamps()
        behavior_stimulus_file = self.get_behavior_stimulus_file()
        data = pd.read_pickle(behavior_stimulus_file)
        stimulus_presentations_df_pre = get_stimulus_presentations(
            data, stimulus_timestamps)
        stimulus_metadata_df = get_stimulus_metadata(data)
        idx_name = stimulus_presentations_df_pre.index.name
        stimulus_index_df = stimulus_presentations_df_pre.reset_index().merge(
            stimulus_metadata_df.reset_index(),
            on=['image_name']).set_index(idx_name)
        stimulus_index_df.sort_index(inplace=True)
        stimulus_index_df = stimulus_index_df[[
            'image_set', 'image_index', 'start_time'
        ]].rename(columns={'start_time': 'timestamps'})
        stimulus_index_df.set_index('timestamps', inplace=True, drop=True)
        stimulus_presentations_df = stimulus_presentations_df_pre.merge(
            stimulus_index_df,
            left_on='start_time',
            right_index=True,
            how='left')
        assert len(stimulus_presentations_df_pre) == len(
            stimulus_presentations_df)

        return stimulus_presentations_df[sorted(
            stimulus_presentations_df.columns)]
def test_get_stimulus_presentations(behavior_stimuli_time_fixture,
                                    behavior_stimuli_data_fixture, expected):
    presentations_df = get_stimulus_presentations(
        behavior_stimuli_data_fixture, behavior_stimuli_time_fixture)

    expected_df = pd.DataFrame.from_dict(expected)

    assert presentations_df.equals(expected_df)
def generate_behavior_stim_table(pkl_data,
                                 sync_dataset,
                                 frame_offset=0,
                                 block_offset=0):

    p = pkl_data
    image_set = p['items']['behavior']['params']['stimulus']['params'][
        'image_set']
    image_set = image_set.split('/')[-1].split('.')[0]
    num_frames = p['items']['behavior']['intervalsms'].size + 1

    frame_timestamps = get_vsyncs(sync_dataset)
    epoch_timestamps = frame_timestamps[frame_offset:frame_offset + num_frames]

    stim_table = get_stimulus_presentations(p, epoch_timestamps)
    stim_table['stimulus_block'] = block_offset
    stim_table['stimulus_name'] = image_set
    stim_table = stim_table.rename(columns={
        'frame': 'start_frame',
        'start_time': 'Start',
        'stop_time': 'End'
    })

    # add columns for change and flashes since change
    change = np.zeros(len(stim_table))
    repeat_number = np.zeros(len(stim_table))
    current_image = stim_table.iloc[0]['stimulus_name']
    for index, row in stim_table.iterrows():
        if (row['image_name'] == 'omitted') or (row['omitted']):
            repeat_number[index] = repeat_number[index - 1]
        else:
            if row['image_name'] != current_image:
                change[index] = 1
                repeat_number[index] = 0
                current_image = row['image_name']
            else:
                repeat_number[index] = repeat_number[index - 1] + 1

    #don't call first change a change
    change[np.where(change)[0][0]] = 0
    stim_table['change'] = change.astype(int)

    #stim_table.loc[0, 'change'] = 0 # MAKE BETTER SOMETIMES THIS IS THE SECOND ROW SINCE THE FIRST CAN BE OMITTED
    stim_table['flashes_since_change'] = repeat_number.astype(int)
    stim_table['active'] = True

    # Fill in 'end frame' and 'End' for omitted stimuli
    median_stim_frame_duration = np.nanmedian(stim_table['end_frame'] -
                                              stim_table['start_frame'])
    stim_table.loc[stim_table['omitted'], 'end_frame'] = stim_table[
        stim_table['omitted']]['start_frame'] + median_stim_frame_duration
    stim_table.loc[stim_table['omitted'], 'End'] = epoch_timestamps[stim_table[
        stim_table['omitted']]['end_frame'].astype(int)]
    stim_table['common_name'] = 'behavior'

    return stim_table
Beispiel #4
0
    def get_stimulus_presentations(self) -> pd.DataFrame:
        """Get stimulus presentation data.

        NOTE: Uses timestamps that do not account for monitor delay.

        :returns: pd.DataFrame --
            Table whose rows are stimulus presentations
            (i.e. a given image, for a given duration, typically 250 ms)
            and whose columns are presentation characteristics.
        """
        stimulus_timestamps = self.get_stimulus_timestamps()
        data = self._behavior_stimulus_file()
        raw_stim_pres_df = get_stimulus_presentations(data,
                                                      stimulus_timestamps)

        # Fill in nulls for image_name
        # This makes two assumptions:
        #   1. Nulls in `image_name` should be "gratings_<orientation>"
        #   2. Gratings are only present (or need to be fixed) when all
        #      values for `image_name` are null.
        if pd.isnull(raw_stim_pres_df["image_name"]).all():
            if ~pd.isnull(raw_stim_pres_df["orientation"]).all():
                raw_stim_pres_df["image_name"] = (
                    raw_stim_pres_df["orientation"].apply(
                        lambda x: f"gratings_{x}"))
            else:
                raise ValueError("All values for 'orentation' and 'image_name'"
                                 " are null.")

        stimulus_metadata_df = get_stimulus_metadata(data)

        idx_name = raw_stim_pres_df.index.name
        stimulus_index_df = (raw_stim_pres_df.reset_index().merge(
            stimulus_metadata_df.reset_index(),
            on=["image_name"]).set_index(idx_name))
        stimulus_index_df = (stimulus_index_df[[
            "image_set", "image_index", "start_time", "phase",
            "spatial_frequency"
        ]].rename(columns={
            "start_time": "timestamps"
        }).sort_index().set_index("timestamps", drop=True))
        stim_pres_df = raw_stim_pres_df.merge(stimulus_index_df,
                                              left_on="start_time",
                                              right_index=True,
                                              how="left")
        if len(raw_stim_pres_df) != len(stim_pres_df):
            raise ValueError("Length of `stim_pres_df` should not change after"
                             f" merge; was {len(raw_stim_pres_df)}, now "
                             f" {len(stim_pres_df)}.")

        stim_pres_df['is_change'] = is_change_event(
            stimulus_presentations=stim_pres_df)

        # Sort columns then drop columns which contain only all NaN values
        return stim_pres_df[sorted(stim_pres_df)].dropna(axis=1, how='all')
Beispiel #5
0
    def from_stimulus_file(
            cls,
            stimulus_file: StimulusFile,
            stimulus_timestamps: StimulusTimestamps,
            limit_to_images: Optional[List] = None) -> "Presentations":
        """Get stimulus presentation data.

        :param stimulus_file
        :param limit_to_images
            Only return images given by these image names
        :param stimulus_timestamps


        :returns: pd.DataFrame --
            Table whose rows are stimulus presentations
            (i.e. a given image, for a given duration, typically 250 ms)
            and whose columns are presentation characteristics.
        """
        stimulus_timestamps = stimulus_timestamps.value
        data = stimulus_file.data
        raw_stim_pres_df = get_stimulus_presentations(data,
                                                      stimulus_timestamps)

        # Fill in nulls for image_name
        # This makes two assumptions:
        #   1. Nulls in `image_name` should be "gratings_<orientation>"
        #   2. Gratings are only present (or need to be fixed) when all
        #      values for `image_name` are null.
        if pd.isnull(raw_stim_pres_df["image_name"]).all():
            if ~pd.isnull(raw_stim_pres_df["orientation"]).all():
                raw_stim_pres_df["image_name"] = (
                    raw_stim_pres_df["orientation"].apply(
                        lambda x: f"gratings_{x}"))
            else:
                raise ValueError("All values for 'orentation' and 'image_name'"
                                 " are null.")

        stimulus_metadata_df = get_stimulus_metadata(data)

        idx_name = raw_stim_pres_df.index.name
        stimulus_index_df = (raw_stim_pres_df.reset_index().merge(
            stimulus_metadata_df.reset_index(),
            on=["image_name"]).set_index(idx_name))
        stimulus_index_df = (stimulus_index_df[[
            "image_set", "image_index", "start_time", "phase",
            "spatial_frequency"
        ]].rename(columns={
            "start_time": "timestamps"
        }).sort_index().set_index("timestamps", drop=True))
        stim_pres_df = raw_stim_pres_df.merge(stimulus_index_df,
                                              left_on="start_time",
                                              right_index=True,
                                              how="left")
        if len(raw_stim_pres_df) != len(stim_pres_df):
            raise ValueError("Length of `stim_pres_df` should not change after"
                             f" merge; was {len(raw_stim_pres_df)}, now "
                             f" {len(stim_pres_df)}.")

        stim_pres_df['is_change'] = is_change_event(
            stimulus_presentations=stim_pres_df)

        # Sort columns then drop columns which contain only all NaN values
        stim_pres_df = \
            stim_pres_df[sorted(stim_pres_df)].dropna(axis=1, how='all')
        if limit_to_images is not None:
            stim_pres_df = \
                stim_pres_df[stim_pres_df['image_name'].isin(limit_to_images)]
            stim_pres_df.index = pd.Int64Index(range(stim_pres_df.shape[0]),
                                               name=stim_pres_df.index.name)
        stim_pres_df = cls._postprocess(presentations=stim_pres_df)
        return Presentations(presentations=stim_pres_df)