Example #1
0
    def form_string_all_trials_perf(self, translated_trial_matrix):
        """Form a string with side perf and anova for all trials"""
        side2perf_all = count_hits_by_type_from_trials_info(
            translated_trial_matrix, split_key='rewside')

        string_perf_by_side = self.form_string_perf_by_side(side2perf_all)

        if len(translated_trial_matrix
               ) > self.cached_anova_len2 or self.cached_anova_text2 == '':
            numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
                translated_trial_matrix)
            anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
            self.cached_anova_text2 = anova_stats
            self.cached_anova_len2 = len(translated_trial_matrix)
        else:
            anova_stats = self.cached_anova_text2

        return 'All: ' + string_perf_by_side + '. Biases: ' + anova_stats
Example #2
0
 def form_string_all_trials_perf(self, translated_trial_matrix):
     """Form a string with side perf and anova for all trials"""
     side2perf_all = count_hits_by_type_from_trials_info(
         translated_trial_matrix, 
         split_key='rewside')     
     
     string_perf_by_side = self.form_string_perf_by_side(side2perf_all)
     
     if len(translated_trial_matrix) > self.cached_anova_len2 or self.cached_anova_text2 == '':
         numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
             translated_trial_matrix)
         anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
         self.cached_anova_text2 = anova_stats
         self.cached_anova_len2 = len(translated_trial_matrix)
     else:
         anova_stats = self.cached_anova_text2
     
     return 'All: ' + string_perf_by_side + '. Biases: ' + anova_stats
Example #3
0
 def form_string_recent_trials_perf(self, translated_trial_matrix):
     """Form a string with side perf and anova for recent trials
     
     cached in cached_anova_text3 and cached_anova_len3
     """
     side2perf = count_hits_by_type_from_trials_info(
         translated_trial_matrix.iloc[-60:], split_key='rewside')     
     
     string_perf_by_side = self.form_string_perf_by_side(side2perf)
     
     if len(translated_trial_matrix) > self.cached_anova_len3 or self.cached_anova_text3 == '':
         numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
             translated_trial_matrix.iloc[-60:])
         anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
         self.cached_anova_text3 = anova_stats
         self.cached_anova_len3 = len(translated_trial_matrix)
     else:
         anova_stats = self.cached_anova_text3
     
     return 'Recent: ' + string_perf_by_side + '. Biases: ' + anova_stats
Example #4
0
    def form_string_recent_trials_perf(self, translated_trial_matrix):
        """Form a string with side perf and anova for recent trials
        
        cached in cached_anova_text3 and cached_anova_len3
        """
        side2perf = count_hits_by_type_from_trials_info(
            translated_trial_matrix.iloc[-60:], split_key='rewside')

        string_perf_by_side = self.form_string_perf_by_side(side2perf)

        if len(translated_trial_matrix
               ) > self.cached_anova_len3 or self.cached_anova_text3 == '':
            numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
                translated_trial_matrix.iloc[-60:])
            anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
            self.cached_anova_text3 = anova_stats
            self.cached_anova_len3 = len(translated_trial_matrix)
        else:
            anova_stats = self.cached_anova_text3

        return 'Recent: ' + string_perf_by_side + '. Biases: ' + anova_stats
Example #5
0
    def form_string_unforced_trials_perf(self, translated_trial_matrix):
        """Exactly the same as form_string_all_trials_perf, except that:
        
        We drop all trials where bad is True.
        We use cached_anova_len1 and cached_anova_text1 instead of 2.
        """
        side2perf = count_hits_by_type_from_trials_info(
            translated_trial_matrix[~translated_trial_matrix.bad], 
            split_key='rewside')

        string_perf_by_side = self.form_string_perf_by_side(side2perf)
        
        if len(translated_trial_matrix) > self.cached_anova_len1 or self.cached_anova_text1 == '':
            numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
                translated_trial_matrix[~translated_trial_matrix.bad])
            anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
            self.cached_anova_text1 = anova_stats
            self.cached_anova_len1 = len(translated_trial_matrix)
        else:
            anova_stats = self.cached_anova_text1
        
        return 'UF: ' + string_perf_by_side + '. Biases: ' + anova_stats        
Example #6
0
    def form_string_unforced_trials_perf(self, translated_trial_matrix):
        """Exactly the same as form_string_all_trials_perf, except that:
        
        We drop all trials where bad is True.
        We use cached_anova_len1 and cached_anova_text1 instead of 2.
        """
        side2perf = count_hits_by_type_from_trials_info(
            translated_trial_matrix[~translated_trial_matrix.bad],
            split_key='rewside')

        string_perf_by_side = self.form_string_perf_by_side(side2perf)

        if len(translated_trial_matrix
               ) > self.cached_anova_len1 or self.cached_anova_text1 == '':
            numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
                translated_trial_matrix[~translated_trial_matrix.bad])
            anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
            self.cached_anova_text1 = anova_stats
            self.cached_anova_len1 = len(translated_trial_matrix)
        else:
            anova_stats = self.cached_anova_text1

        return 'UF: ' + string_perf_by_side + '. Biases: ' + anova_stats
Example #7
0
    def update(self, filename):
        """Read info from filename and update the plot"""
        ## Load data and make trials_info
        # Check log
        lines = TrialSpeak.read_lines_from_file(filename)
        splines = TrialSpeak.split_by_trial(lines)

        # Really we should wait until we hear something from the arduino
        # Simply wait till at least one line has been received
        if len(splines) == 0 or len(splines[0]) == 0:
            return

        # Construct trial_matrix. I believe this will always have at least
        # one line in it now, even if it's composed entirely of Nones.
        trials_info = TrialMatrix.make_trials_info_from_splines(splines)

        ## Translate condensed trialspeak into full data
        # Put this part into TrialSpeak.py
        translated_trial_matrix = TrialSpeak.translate_trial_matrix(
            trials_info)

        # return if nothing to do
        if len(translated_trial_matrix) < 1:
            return

        # define the "bad" trials
        # these are marked differently and discounted from certain ANOVAs
        # maybe include user delivery trials too?
        if 'isrnd' in translated_trial_matrix:
            translated_trial_matrix['bad'] = ~translated_trial_matrix['isrnd']
        else:
            translated_trial_matrix['bad'] = False

        ## Define trial types, the ordering on the plot
        # Make any updates to trial type parameters (child-class-dependent)
        self.update_trial_type_parameters(lines)

        # Add type information to trials_info and generate type names
        translated_trial_matrix = self.assign_trial_type_to_trials_info(
            translated_trial_matrix)
        trial_type_names = self.get_list_of_trial_type_names()

        ## Count performance by type
        # Hits by type
        typ2perf = count_hits_by_type_from_trials_info(
            translated_trial_matrix[~translated_trial_matrix.bad])
        typ2perf_all = count_hits_by_type_from_trials_info(
            translated_trial_matrix)

        # Combined
        total_nhit, total_ntot = calculate_nhit_ntot(
            translated_trial_matrix[~translated_trial_matrix.bad])

        # Turn the typ2perf into ticklabels
        ytick_labels = typ2perf2ytick_labels(trial_type_names, typ2perf,
                                             typ2perf_all)

        ## title string
        # number of rewards
        title_string = self.form_string_rewards(splines,
                                                translated_trial_matrix)

        # This depends on rewside existing, which is only true for 2AC
        if 'rewside' in translated_trial_matrix.columns:
            title_string += '\n' + self.form_string_all_trials_perf(
                translated_trial_matrix)
            title_string += '\n' + self.form_string_recent_trials_perf(
                translated_trial_matrix)
            title_string += '\n' + self.form_string_unforced_trials_perf(
                translated_trial_matrix)

        ## PLOTTING
        # plot each outcome
        for outcome in ['hit', 'error', 'spoil', 'curr']:
            # Get rows corresponding to this outcome
            msk = translated_trial_matrix['outcome'] == outcome

            # Get the line corresponding to this outcome and set the xdata
            # to the appropriate trial numbers and the ydata to the trial types
            line = self.graphics_handles['label2lines'][outcome]
            line.set_xdata(np.where(msk)[0])
            line.set_ydata(translated_trial_matrix['trial_type'][msk].values)

        # plot vert bars where bad trials occurred
        msk = translated_trial_matrix['bad']
        line = self.graphics_handles['label2lines']['bad']
        line.set_xdata(np.where(msk)[0])
        line.set_ydata(translated_trial_matrix['trial_type'][msk])

        ## PLOTTING axis labels and title
        ax = self.graphics_handles['ax']
        f = self.graphics_handles['f']

        # Use the ytick_labels calculated above
        ax.set_yticks(range(len(trial_type_names)))
        ax.set_yticklabels(ytick_labels, size='small')

        # The ylimits go BACKWARDS so that trial types are from top to bottom
        ymax = np.max(ax.get_yticks())
        ymin = np.min(ax.get_yticks())
        ax.set_ylim((ymax + .5, ymin - .5))

        # The xlimits are a sliding window of size TRIAL_PLOT_WINDOW_SIZE
        ax.set_xlim(
            (len(translated_trial_matrix) - self.trial_plot_window_size,
             len(translated_trial_matrix)))

        # title set above
        #~ ax.set_title(title_string, size='small')
        self.graphics_handles['suptitle'].set_text(title_string)

        ## plot division between L and R
        line = self.graphics_handles['label2lines']['divis']
        line.set_xdata(ax.get_xlim())
        line.set_ydata([np.mean(ax.get_yticks())] * 2)

        ## PLOTTING finalize
        plt.show()
        plt.draw()
Example #8
0
    def update(self, filename):   
        """Read info from filename and update the plot"""
        ## Load data and make trials_info
        # Check log
        lines = TrialSpeak.read_lines_from_file(filename)
        splines = TrialSpeak.split_by_trial(lines)        
        
        # Really we should wait until we hear something from the arduino
        # Simply wait till at least one line has been received
        if len(splines) == 0 or len(splines[0]) == 0:
            return

        # Construct trial_matrix. I believe this will always have at least
        # one line in it now, even if it's composed entirely of Nones.
        trials_info = TrialMatrix.make_trials_info_from_splines(splines)

        ## Translate condensed trialspeak into full data
        # Put this part into TrialSpeak.py
        translated_trial_matrix = TrialSpeak.translate_trial_matrix(trials_info)
        
        # return if nothing to do
        if len(translated_trial_matrix) < 1:
            return
        
        # define the "bad" trials
        # these are marked differently and discounted from certain ANOVAs
        # maybe include user delivery trials too?
        if 'isrnd' in translated_trial_matrix:
            translated_trial_matrix['bad'] = ~translated_trial_matrix['isrnd']
        else:
            translated_trial_matrix['bad'] = False

        ## Define trial types, the ordering on the plot
        # Make any updates to trial type parameters (child-class-dependent)
        self.update_trial_type_parameters(lines)
        
        # Add type information to trials_info and generate type names
        translated_trial_matrix = self.assign_trial_type_to_trials_info(translated_trial_matrix)
        trial_type_names = self.get_list_of_trial_type_names()

        ## Count performance by type
        # Hits by type
        typ2perf = count_hits_by_type_from_trials_info(
            translated_trial_matrix[~translated_trial_matrix.bad])
        typ2perf_all = count_hits_by_type_from_trials_info(
            translated_trial_matrix)
        
        # Combined
        total_nhit, total_ntot = calculate_nhit_ntot(
            translated_trial_matrix[~translated_trial_matrix.bad])

        # Turn the typ2perf into ticklabels
        ytick_labels = typ2perf2ytick_labels(trial_type_names, 
            typ2perf, typ2perf_all)

        ## title string
        # number of rewards
        title_string = self.form_string_rewards(splines, 
            translated_trial_matrix)
        
        # This depends on rewside existing, which is only true for 2AC
        if 'rewside' in translated_trial_matrix.columns:
            title_string += '\n' + self.form_string_all_trials_perf(
                translated_trial_matrix)
            title_string += '\n' + self.form_string_recent_trials_perf(
                translated_trial_matrix)
            title_string += '\n' + self.form_string_unforced_trials_perf(
                translated_trial_matrix)

        ## PLOTTING
        # plot each outcome
        for outcome in ['hit', 'error', 'spoil', 'curr']:
            # Get rows corresponding to this outcome
            msk = translated_trial_matrix['outcome'] == outcome

            # Get the line corresponding to this outcome and set the xdata
            # to the appropriate trial numbers and the ydata to the trial types
            line = self.graphics_handles['label2lines'][outcome]
            line.set_xdata(np.where(msk)[0])
            line.set_ydata(translated_trial_matrix['trial_type'][msk].values)

        # plot vert bars where bad trials occurred
        msk = translated_trial_matrix['bad']
        line = self.graphics_handles['label2lines']['bad']
        line.set_xdata(np.where(msk)[0])
        line.set_ydata(translated_trial_matrix['trial_type'][msk])


        ## PLOTTING axis labels and title
        ax = self.graphics_handles['ax']
        f = self.graphics_handles['f']
        
        # Use the ytick_labels calculated above
        ax.set_yticks(range(len(trial_type_names)))
        ax.set_yticklabels(ytick_labels, size='small')
        
        # The ylimits go BACKWARDS so that trial types are from top to bottom
        ymax = np.max(ax.get_yticks())
        ymin = np.min(ax.get_yticks())
        ax.set_ylim((ymax + .5, ymin -.5))
        
        # The xlimits are a sliding window of size TRIAL_PLOT_WINDOW_SIZE
        ax.set_xlim((
            len(translated_trial_matrix) - self.trial_plot_window_size, 
            len(translated_trial_matrix)))    
        
        # title set above
        #~ ax.set_title(title_string, size='small')
        self.graphics_handles['suptitle'].set_text(title_string)
        
        ## plot division between L and R
        line = self.graphics_handles['label2lines']['divis']
        line.set_xdata(ax.get_xlim())
        line.set_ydata([np.mean(ax.get_yticks())] * 2)
        
        ## PLOTTING finalize
        plt.show()
        plt.draw()