Ejemplo n.º 1
0
    def form_string_all_trials_perf(self, translated_trial_matrix):
        """Form a string with side perf and anova for all trials"""
        side2perf_all = count_hits_by_type_from_trials_info(
            translated_trial_matrix, split_key='rewside')

        string_perf_by_side = self.form_string_perf_by_side(side2perf_all)

        if len(translated_trial_matrix
               ) > self.cached_anova_len2 or self.cached_anova_text2 == '':
            numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
                translated_trial_matrix)
            anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
            self.cached_anova_text2 = anova_stats
            self.cached_anova_len2 = len(translated_trial_matrix)
        else:
            anova_stats = self.cached_anova_text2

        return 'All: ' + string_perf_by_side + '. Biases: ' + anova_stats
Ejemplo n.º 2
0
 def form_string_all_trials_perf(self, translated_trial_matrix):
     """Form a string with side perf and anova for all trials"""
     side2perf_all = count_hits_by_type_from_trials_info(
         translated_trial_matrix, 
         split_key='rewside')     
     
     string_perf_by_side = self.form_string_perf_by_side(side2perf_all)
     
     if len(translated_trial_matrix) > self.cached_anova_len2 or self.cached_anova_text2 == '':
         numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
             translated_trial_matrix)
         anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
         self.cached_anova_text2 = anova_stats
         self.cached_anova_len2 = len(translated_trial_matrix)
     else:
         anova_stats = self.cached_anova_text2
     
     return 'All: ' + string_perf_by_side + '. Biases: ' + anova_stats
Ejemplo n.º 3
0
 def form_string_recent_trials_perf(self, translated_trial_matrix):
     """Form a string with side perf and anova for recent trials
     
     cached in cached_anova_text3 and cached_anova_len3
     """
     side2perf = count_hits_by_type_from_trials_info(
         translated_trial_matrix.iloc[-60:], split_key='rewside')     
     
     string_perf_by_side = self.form_string_perf_by_side(side2perf)
     
     if len(translated_trial_matrix) > self.cached_anova_len3 or self.cached_anova_text3 == '':
         numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
             translated_trial_matrix.iloc[-60:])
         anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
         self.cached_anova_text3 = anova_stats
         self.cached_anova_len3 = len(translated_trial_matrix)
     else:
         anova_stats = self.cached_anova_text3
     
     return 'Recent: ' + string_perf_by_side + '. Biases: ' + anova_stats
Ejemplo n.º 4
0
    def form_string_recent_trials_perf(self, translated_trial_matrix):
        """Form a string with side perf and anova for recent trials
        
        cached in cached_anova_text3 and cached_anova_len3
        """
        side2perf = count_hits_by_type_from_trials_info(
            translated_trial_matrix.iloc[-60:], split_key='rewside')

        string_perf_by_side = self.form_string_perf_by_side(side2perf)

        if len(translated_trial_matrix
               ) > self.cached_anova_len3 or self.cached_anova_text3 == '':
            numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
                translated_trial_matrix.iloc[-60:])
            anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
            self.cached_anova_text3 = anova_stats
            self.cached_anova_len3 = len(translated_trial_matrix)
        else:
            anova_stats = self.cached_anova_text3

        return 'Recent: ' + string_perf_by_side + '. Biases: ' + anova_stats
Ejemplo n.º 5
0
    def form_string_unforced_trials_perf(self, translated_trial_matrix):
        """Exactly the same as form_string_all_trials_perf, except that:
        
        We drop all trials where bad is True.
        We use cached_anova_len1 and cached_anova_text1 instead of 2.
        """
        side2perf = count_hits_by_type_from_trials_info(
            translated_trial_matrix[~translated_trial_matrix.bad], 
            split_key='rewside')

        string_perf_by_side = self.form_string_perf_by_side(side2perf)
        
        if len(translated_trial_matrix) > self.cached_anova_len1 or self.cached_anova_text1 == '':
            numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
                translated_trial_matrix[~translated_trial_matrix.bad])
            anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
            self.cached_anova_text1 = anova_stats
            self.cached_anova_len1 = len(translated_trial_matrix)
        else:
            anova_stats = self.cached_anova_text1
        
        return 'UF: ' + string_perf_by_side + '. Biases: ' + anova_stats        
Ejemplo n.º 6
0
    def form_string_unforced_trials_perf(self, translated_trial_matrix):
        """Exactly the same as form_string_all_trials_perf, except that:
        
        We drop all trials where bad is True.
        We use cached_anova_len1 and cached_anova_text1 instead of 2.
        """
        side2perf = count_hits_by_type_from_trials_info(
            translated_trial_matrix[~translated_trial_matrix.bad],
            split_key='rewside')

        string_perf_by_side = self.form_string_perf_by_side(side2perf)

        if len(translated_trial_matrix
               ) > self.cached_anova_len1 or self.cached_anova_text1 == '':
            numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
                translated_trial_matrix[~translated_trial_matrix.bad])
            anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
            self.cached_anova_text1 = anova_stats
            self.cached_anova_len1 = len(translated_trial_matrix)
        else:
            anova_stats = self.cached_anova_text1

        return 'UF: ' + string_perf_by_side + '. Biases: ' + anova_stats
Ejemplo n.º 7
0
    def choose_scheduler_main_body(self, translated_trial_matrix):
        # Main body of session
        this_trial = len(translated_trial_matrix)

        # Do nothing if we've changed recently
        if this_trial < self.last_changed_trial + self.n_trials_sticky:
            return

        # Check whether we've had at least 10 random in the last 50
        recents = translated_trial_matrix['isrnd'].values[-self.
                                                          n_trials_recent_win:]
        recent_randoms = recents.sum()
        if len(recents) == self.n_trials_recent_win and \
            recent_randoms < self.n_trials_recent_random_thresh:
            # Set to occasional random
            self.current_sub_scheduler = self.sub_schedulers['RandomStim']
            self.last_changed_trial = this_trial
            self.params['status'] = 'randchk' + str(this_trial)
            return

        # Run the anova on all trials (used for checking for stay bias)
        numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
            translated_trial_matrix)
        #~ recent_ntm = numericated_trial_matrix.iloc[
        #~ -self.n_trials_recent_for_side_bias:]
        aov_res = TrialMatrix._run_anova(numericated_trial_matrix)
        if aov_res is None:
            self.current_sub_scheduler = self.sub_schedulers['RandomStim']
            self.last_changed_trial = this_trial
            self.params['status'] = 'an_none' + str(this_trial)
            return

        # Also calculate the side bias in all recent trials
        recent_ttm = translated_trial_matrix.iloc[
            -self.n_trials_recent_for_side_bias:]

        # Take the largest significant bias
        # Actually, better to take the diff of perf between sides for forced
        # side. Although this is a bigger issue than unexplainable variance
        # shouldn't be interpreted.
        side2perf_all = TrialMatrix.count_hits_by_type(recent_ttm,
                                                       split_key='rewside')
        if 'left' in side2perf_all and 'right' in side2perf_all:
            lperf = side2perf_all['left'][0] / float(side2perf_all['left'][1])
            rperf = side2perf_all['right'][0] / float(
                side2perf_all['right'][1])
            sideperf_diff = rperf - lperf
        else:
            sideperf_diff = 0

        # Decide whether stay, side, or neither bias is critical
        if aov_res['pvals']['p_prevchoice'] < 0.05 and aov_res['fit'][
                'fit_prevchoice'] > 0:
            # Stay bias
            self.last_changed_trial = this_trial
            self.params['status'] = 'antistay' + str(this_trial)
            self.current_sub_scheduler = self.sub_schedulers[
                'ForcedAlternation']
        elif np.abs(sideperf_diff) > .25:
            # Side bias
            self.last_changed_trial = this_trial
            self.params['status'] = 'antiside' + str(this_trial)
            self.current_sub_scheduler = self.sub_schedulers['ForcedSide']

            if sideperf_diff > 0:
                self.current_sub_scheduler.params['side'] = 'left'
            else:
                self.current_sub_scheduler.params['side'] = 'right'
        else:
            # No bias
            self.last_changed_trial = this_trial
            self.params['status'] = 'good' + str(this_trial)
            self.current_sub_scheduler = self.sub_schedulers['RandomStim']
Ejemplo n.º 8
0
 def choose_scheduler_main_body(self, translated_trial_matrix):
     # Main body of session
     this_trial = len(translated_trial_matrix)
     
     # Do nothing if we've changed recently
     if this_trial < self.last_changed_trial + self.n_trials_sticky:
         return
     
     # Check whether we've had at least 10 random in the last 50
     recents = translated_trial_matrix['isrnd'].values[
         -self.n_trials_recent_win:]
     recent_randoms = recents.sum()
     if len(recents) == self.n_trials_recent_win and \
         recent_randoms < self.n_trials_recent_random_thresh:
         # Set to occasional random
         self.current_sub_scheduler = self.sub_schedulers['RandomStim']
         self.last_changed_trial = this_trial
         self.params['status'] = 'randchk' + str(this_trial)       
         return
     
     # Run the anova on all trials (used for checking for stay bias)
     numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
         translated_trial_matrix)
     #~ recent_ntm = numericated_trial_matrix.iloc[
         #~ -self.n_trials_recent_for_side_bias:]
     aov_res = TrialMatrix._run_anova(numericated_trial_matrix)        
     if aov_res is None:
         self.current_sub_scheduler = self.sub_schedulers['RandomStim']
         self.last_changed_trial = this_trial
         self.params['status'] = 'an_none' + str(this_trial)
         return
     
     # Also calculate the side bias in all recent trials
     recent_ttm = translated_trial_matrix.iloc[
         -self.n_trials_recent_for_side_bias:]
     
     # Take the largest significant bias
     # Actually, better to take the diff of perf between sides for forced
     # side. Although this is a bigger issue than unexplainable variance
     # shouldn't be interpreted.
     side2perf_all = TrialMatrix.count_hits_by_type(
         recent_ttm, split_key='rewside')     
     if 'left' in side2perf_all and 'right' in side2perf_all:
         lperf = side2perf_all['left'][0] / float(side2perf_all['left'][1])
         rperf = side2perf_all['right'][0] / float(side2perf_all['right'][1])
         sideperf_diff = rperf - lperf
     else:
         sideperf_diff = 0
     
     # Decide whether stay, side, or neither bias is critical
     if aov_res['pvals']['p_prevchoice'] < 0.05 and aov_res['fit']['fit_prevchoice'] > 0:
         # Stay bias
         self.last_changed_trial = this_trial
         self.params['status'] = 'antistay' + str(this_trial)
         self.current_sub_scheduler = self.sub_schedulers['ForcedAlternation']
     elif np.abs(sideperf_diff) > .25:
         # Side bias
         self.last_changed_trial = this_trial
         self.params['status'] = 'antiside' + str(this_trial)
         self.current_sub_scheduler = self.sub_schedulers['ForcedSide']
         
         if sideperf_diff > 0:
             self.current_sub_scheduler.params['side'] = 'left'
         else:
             self.current_sub_scheduler.params['side'] = 'right'
     else:
         # No bias
         self.last_changed_trial = this_trial
         self.params['status'] = 'good' + str(this_trial)
         self.current_sub_scheduler = self.sub_schedulers['RandomStim']