Пример #1
0
 def get_reward_rate(self):
     response_latency_list = []
     for _, t in self.trials.iterrows():
         valid_response_licks = \
                 [x for x in t.lick_times
                  if x - t.change_time >
                     self.task_parameters['response_window_sec'][0]]
         response_latency = (float('inf') if len(valid_response_licks) == 0
                             else valid_response_licks[0] - t.change_time)
         response_latency_list.append(response_latency)
     reward_rate = calculate_reward_rate(
         response_latency=response_latency_list,
         starttime=self.trials.start_time.values)
     reward_rate[np.isinf(reward_rate)] = float('nan')
     return reward_rate
Пример #2
0
def test_calculate_reward_rate(kwargs, expected):
    assert np.allclose(
        trials_processing.calculate_reward_rate(**kwargs),
        expected,
    ), "calculated reward rate should match expected reward rate :("
Пример #3
0
    def get_trials(self, filter_aborted_trials=True):
        trials = super(ExtendedNwbApi, self).get_trials()
        stimulus_presentations = super(ExtendedNwbApi,
                                       self).get_stimulus_presentations()

        # Note: everything between dashed lines is a patch to deal with timing issues in
        # the AllenSDK
        # This should be removed in the future after issues #876 and #802 are fixed.
        # --------------------------------------------------------------------------------

        # gets start_time of next stimulus after timestamp in stimulus_presentations
        def get_next_flash(timestamp):
            query = stimulus_presentations.query('start_time >= @timestamp')
            if len(query) > 0:
                return query.iloc[0]['start_time']
            else:
                return None

        trials['change_time'] = trials['change_time'].map(
            lambda x: get_next_flash(x))

        ### This method can lead to a NaN change time for any trials at the end of the session.
        ### However, aborted trials at the end of the session also don't have change times.
        ### The safest method seems like just droping any trials that aren't covered by the
        ### stimulus_presentations
        # Using start time in case last stim is omitted
        last_stimulus_presentation = stimulus_presentations.iloc[-1][
            'start_time']
        trials = trials[np.logical_not(
            trials['stop_time'] > last_stimulus_presentation)]

        # recalculates response latency based on corrected change time and first lick time
        def recalculate_response_latency(row):
            if len(row['lick_times'] > 0) and not pd.isnull(
                    row['change_time']):
                return row['lick_times'][0] - row['change_time']
            else:
                return np.nan

        trials['response_latency'] = trials.apply(recalculate_response_latency,
                                                  axis=1)
        # -------------------------------------------------------------------------------

        # asserts that every change time exists in the stimulus_presentations table
        for change_time in trials[
                trials['change_time'].notna()]['change_time']:
            assert change_time in stimulus_presentations['start_time'].values

        # Return only non-aborted trials from this API by default
        if filter_aborted_trials:
            trials = trials.query('not aborted')

        # Reorder / drop some columns to make more sense to students
        trials = trials[[
            'initial_image_name', 'change_image_name', 'change_time',
            'lick_times', 'response_latency', 'reward_time', 'go', 'catch',
            'hit', 'miss', 'false_alarm', 'correct_reject', 'aborted',
            'auto_rewarded', 'reward_volume', 'start_time', 'stop_time',
            'trial_length'
        ]]

        # Calculate reward rate per trial
        trials['reward_rate'] = calculate_reward_rate(
            response_latency=trials.response_latency,
            starttime=trials.start_time,
            window=.75,
            trial_window=25,
            initial_trials=10)

        # Response_binary is just whether or not they responded - e.g. true for hit or FA.
        hit = trials['hit'].values
        fa = trials['false_alarm'].values
        trials['response_binary'] = np.logical_or(hit, fa)

        return trials