Пример #1
0
    def form_string_all_trials_perf(self, translated_trial_matrix):
        """Form a string with side perf and anova for all trials"""
        side2perf_all = count_hits_by_type_from_trials_info(
            translated_trial_matrix, split_key='rewside')

        string_perf_by_side = self.form_string_perf_by_side(side2perf_all)

        if len(translated_trial_matrix
               ) > self.cached_anova_len2 or self.cached_anova_text2 == '':
            numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
                translated_trial_matrix)
            anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
            self.cached_anova_text2 = anova_stats
            self.cached_anova_len2 = len(translated_trial_matrix)
        else:
            anova_stats = self.cached_anova_text2

        return 'All: ' + string_perf_by_side + '. Biases: ' + anova_stats
Пример #2
0
 def form_string_all_trials_perf(self, translated_trial_matrix):
     """Form a string with side perf and anova for all trials"""
     side2perf_all = count_hits_by_type_from_trials_info(
         translated_trial_matrix, 
         split_key='rewside')     
     
     string_perf_by_side = self.form_string_perf_by_side(side2perf_all)
     
     if len(translated_trial_matrix) > self.cached_anova_len2 or self.cached_anova_text2 == '':
         numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
             translated_trial_matrix)
         anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
         self.cached_anova_text2 = anova_stats
         self.cached_anova_len2 = len(translated_trial_matrix)
     else:
         anova_stats = self.cached_anova_text2
     
     return 'All: ' + string_perf_by_side + '. Biases: ' + anova_stats
Пример #3
0
 def form_string_recent_trials_perf(self, translated_trial_matrix):
     """Form a string with side perf and anova for recent trials
     
     cached in cached_anova_text3 and cached_anova_len3
     """
     side2perf = count_hits_by_type_from_trials_info(
         translated_trial_matrix.iloc[-60:], split_key='rewside')     
     
     string_perf_by_side = self.form_string_perf_by_side(side2perf)
     
     if len(translated_trial_matrix) > self.cached_anova_len3 or self.cached_anova_text3 == '':
         numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
             translated_trial_matrix.iloc[-60:])
         anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
         self.cached_anova_text3 = anova_stats
         self.cached_anova_len3 = len(translated_trial_matrix)
     else:
         anova_stats = self.cached_anova_text3
     
     return 'Recent: ' + string_perf_by_side + '. Biases: ' + anova_stats
Пример #4
0
    def form_string_recent_trials_perf(self, translated_trial_matrix):
        """Form a string with side perf and anova for recent trials
        
        cached in cached_anova_text3 and cached_anova_len3
        """
        side2perf = count_hits_by_type_from_trials_info(
            translated_trial_matrix.iloc[-60:], split_key='rewside')

        string_perf_by_side = self.form_string_perf_by_side(side2perf)

        if len(translated_trial_matrix
               ) > self.cached_anova_len3 or self.cached_anova_text3 == '':
            numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
                translated_trial_matrix.iloc[-60:])
            anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
            self.cached_anova_text3 = anova_stats
            self.cached_anova_len3 = len(translated_trial_matrix)
        else:
            anova_stats = self.cached_anova_text3

        return 'Recent: ' + string_perf_by_side + '. Biases: ' + anova_stats
Пример #5
0
    def form_string_unforced_trials_perf(self, translated_trial_matrix):
        """Exactly the same as form_string_all_trials_perf, except that:
        
        We drop all trials where bad is True.
        We use cached_anova_len1 and cached_anova_text1 instead of 2.
        """
        side2perf = count_hits_by_type_from_trials_info(
            translated_trial_matrix[~translated_trial_matrix.bad], 
            split_key='rewside')

        string_perf_by_side = self.form_string_perf_by_side(side2perf)
        
        if len(translated_trial_matrix) > self.cached_anova_len1 or self.cached_anova_text1 == '':
            numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
                translated_trial_matrix[~translated_trial_matrix.bad])
            anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
            self.cached_anova_text1 = anova_stats
            self.cached_anova_len1 = len(translated_trial_matrix)
        else:
            anova_stats = self.cached_anova_text1
        
        return 'UF: ' + string_perf_by_side + '. Biases: ' + anova_stats        
Пример #6
0
    def form_string_unforced_trials_perf(self, translated_trial_matrix):
        """Exactly the same as form_string_all_trials_perf, except that:
        
        We drop all trials where bad is True.
        We use cached_anova_len1 and cached_anova_text1 instead of 2.
        """
        side2perf = count_hits_by_type_from_trials_info(
            translated_trial_matrix[~translated_trial_matrix.bad],
            split_key='rewside')

        string_perf_by_side = self.form_string_perf_by_side(side2perf)

        if len(translated_trial_matrix
               ) > self.cached_anova_len1 or self.cached_anova_text1 == '':
            numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
                translated_trial_matrix[~translated_trial_matrix.bad])
            anova_stats = TrialMatrix.run_anova(numericated_trial_matrix)
            self.cached_anova_text1 = anova_stats
            self.cached_anova_len1 = len(translated_trial_matrix)
        else:
            anova_stats = self.cached_anova_text1

        return 'UF: ' + string_perf_by_side + '. Biases: ' + anova_stats
Пример #7
0
    res['STPPOS'] = np.random.randint(1, 10)
    res['SRVPOS'] = np.random.randint(1, 10)
    res['ITI'] = np.random.randint(10000)
    
    return res

## Main loop
last_released_trial = 0
try:
    while True:
        # Update chatter
        chatter.update(echo_to_stdout=True)
        
        # Check log
        splines = TrialSpeak.load_splines_from_file(logfilename)
        trial_matrix = TrialMatrix.make_trials_info_from_splines(splines)
        
        # Switch on which trial, and whether it's been released and/or completed
        if trial_matrix is None: # or if splines is empty?
            # It's the first tiral
            # Send each initial param
            for param_name, param_val in initial_params.items():
                chatter.write_to_device(
                    TrialSpeak.command_set_parameter(
                        param_name, param_val))            
            
            # Release
            chatter.write_to_device(TrialSpeak.command_release_trial())
        elif 'response' not in trial_matrix or trial_matrix['response'].isnull().irow(-1):
            # Trial has not completed, keep waiting
            continue
Пример #8
0
    def choose_scheduler_main_body(self, translated_trial_matrix):
        # Main body of session
        this_trial = len(translated_trial_matrix)

        # Do nothing if we've changed recently
        if this_trial < self.last_changed_trial + self.n_trials_sticky:
            return

        # Check whether we've had at least 10 random in the last 50
        recents = translated_trial_matrix['isrnd'].values[-self.
                                                          n_trials_recent_win:]
        recent_randoms = recents.sum()
        if len(recents) == self.n_trials_recent_win and \
            recent_randoms < self.n_trials_recent_random_thresh:
            # Set to occasional random
            self.current_sub_scheduler = self.sub_schedulers['RandomStim']
            self.last_changed_trial = this_trial
            self.params['status'] = 'randchk' + str(this_trial)
            return

        # Run the anova on all trials (used for checking for stay bias)
        numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
            translated_trial_matrix)
        #~ recent_ntm = numericated_trial_matrix.iloc[
        #~ -self.n_trials_recent_for_side_bias:]
        aov_res = TrialMatrix._run_anova(numericated_trial_matrix)
        if aov_res is None:
            self.current_sub_scheduler = self.sub_schedulers['RandomStim']
            self.last_changed_trial = this_trial
            self.params['status'] = 'an_none' + str(this_trial)
            return

        # Also calculate the side bias in all recent trials
        recent_ttm = translated_trial_matrix.iloc[
            -self.n_trials_recent_for_side_bias:]

        # Take the largest significant bias
        # Actually, better to take the diff of perf between sides for forced
        # side. Although this is a bigger issue than unexplainable variance
        # shouldn't be interpreted.
        side2perf_all = TrialMatrix.count_hits_by_type(recent_ttm,
                                                       split_key='rewside')
        if 'left' in side2perf_all and 'right' in side2perf_all:
            lperf = side2perf_all['left'][0] / float(side2perf_all['left'][1])
            rperf = side2perf_all['right'][0] / float(
                side2perf_all['right'][1])
            sideperf_diff = rperf - lperf
        else:
            sideperf_diff = 0

        # Decide whether stay, side, or neither bias is critical
        if aov_res['pvals']['p_prevchoice'] < 0.05 and aov_res['fit'][
                'fit_prevchoice'] > 0:
            # Stay bias
            self.last_changed_trial = this_trial
            self.params['status'] = 'antistay' + str(this_trial)
            self.current_sub_scheduler = self.sub_schedulers[
                'ForcedAlternation']
        elif np.abs(sideperf_diff) > .25:
            # Side bias
            self.last_changed_trial = this_trial
            self.params['status'] = 'antiside' + str(this_trial)
            self.current_sub_scheduler = self.sub_schedulers['ForcedSide']

            if sideperf_diff > 0:
                self.current_sub_scheduler.params['side'] = 'left'
            else:
                self.current_sub_scheduler.params['side'] = 'right'
        else:
            # No bias
            self.last_changed_trial = this_trial
            self.params['status'] = 'good' + str(this_trial)
            self.current_sub_scheduler = self.sub_schedulers['RandomStim']
Пример #9
0
    def update(self, filename):
        """Read info from filename and update the plot"""
        ## Load data and make trials_info
        # Check log
        lines = TrialSpeak.read_lines_from_file(filename)
        splines = TrialSpeak.split_by_trial(lines)

        # Really we should wait until we hear something from the arduino
        # Simply wait till at least one line has been received
        if len(splines) == 0 or len(splines[0]) == 0:
            return

        # Construct trial_matrix. I believe this will always have at least
        # one line in it now, even if it's composed entirely of Nones.
        trials_info = TrialMatrix.make_trials_info_from_splines(splines)

        ## Translate condensed trialspeak into full data
        # Put this part into TrialSpeak.py
        translated_trial_matrix = TrialSpeak.translate_trial_matrix(
            trials_info)

        # return if nothing to do
        if len(translated_trial_matrix) < 1:
            return

        # define the "bad" trials
        # these are marked differently and discounted from certain ANOVAs
        # maybe include user delivery trials too?
        if 'isrnd' in translated_trial_matrix:
            translated_trial_matrix['bad'] = ~translated_trial_matrix['isrnd']
        else:
            translated_trial_matrix['bad'] = False

        ## Define trial types, the ordering on the plot
        # Make any updates to trial type parameters (child-class-dependent)
        self.update_trial_type_parameters(lines)

        # Add type information to trials_info and generate type names
        translated_trial_matrix = self.assign_trial_type_to_trials_info(
            translated_trial_matrix)
        trial_type_names = self.get_list_of_trial_type_names()

        ## Count performance by type
        # Hits by type
        typ2perf = count_hits_by_type_from_trials_info(
            translated_trial_matrix[~translated_trial_matrix.bad])
        typ2perf_all = count_hits_by_type_from_trials_info(
            translated_trial_matrix)

        # Combined
        total_nhit, total_ntot = calculate_nhit_ntot(
            translated_trial_matrix[~translated_trial_matrix.bad])

        # Turn the typ2perf into ticklabels
        ytick_labels = typ2perf2ytick_labels(trial_type_names, typ2perf,
                                             typ2perf_all)

        ## title string
        # number of rewards
        title_string = self.form_string_rewards(splines,
                                                translated_trial_matrix)

        # This depends on rewside existing, which is only true for 2AC
        if 'rewside' in translated_trial_matrix.columns:
            title_string += '\n' + self.form_string_all_trials_perf(
                translated_trial_matrix)
            title_string += '\n' + self.form_string_recent_trials_perf(
                translated_trial_matrix)
            title_string += '\n' + self.form_string_unforced_trials_perf(
                translated_trial_matrix)

        ## PLOTTING
        # plot each outcome
        for outcome in ['hit', 'error', 'spoil', 'curr']:
            # Get rows corresponding to this outcome
            msk = translated_trial_matrix['outcome'] == outcome

            # Get the line corresponding to this outcome and set the xdata
            # to the appropriate trial numbers and the ydata to the trial types
            line = self.graphics_handles['label2lines'][outcome]
            line.set_xdata(np.where(msk)[0])
            line.set_ydata(translated_trial_matrix['trial_type'][msk].values)

        # plot vert bars where bad trials occurred
        msk = translated_trial_matrix['bad']
        line = self.graphics_handles['label2lines']['bad']
        line.set_xdata(np.where(msk)[0])
        line.set_ydata(translated_trial_matrix['trial_type'][msk])

        ## PLOTTING axis labels and title
        ax = self.graphics_handles['ax']
        f = self.graphics_handles['f']

        # Use the ytick_labels calculated above
        ax.set_yticks(range(len(trial_type_names)))
        ax.set_yticklabels(ytick_labels, size='small')

        # The ylimits go BACKWARDS so that trial types are from top to bottom
        ymax = np.max(ax.get_yticks())
        ymin = np.min(ax.get_yticks())
        ax.set_ylim((ymax + .5, ymin - .5))

        # The xlimits are a sliding window of size TRIAL_PLOT_WINDOW_SIZE
        ax.set_xlim(
            (len(translated_trial_matrix) - self.trial_plot_window_size,
             len(translated_trial_matrix)))

        # title set above
        #~ ax.set_title(title_string, size='small')
        self.graphics_handles['suptitle'].set_text(title_string)

        ## plot division between L and R
        line = self.graphics_handles['label2lines']['divis']
        line.set_xdata(ax.get_xlim())
        line.set_ydata([np.mean(ax.get_yticks())] * 2)

        ## PLOTTING finalize
        plt.show()
        plt.draw()
Пример #10
0
 def choose_scheduler_main_body(self, translated_trial_matrix):
     # Main body of session
     this_trial = len(translated_trial_matrix)
     
     # Do nothing if we've changed recently
     if this_trial < self.last_changed_trial + self.n_trials_sticky:
         return
     
     # Check whether we've had at least 10 random in the last 50
     recents = translated_trial_matrix['isrnd'].values[
         -self.n_trials_recent_win:]
     recent_randoms = recents.sum()
     if len(recents) == self.n_trials_recent_win and \
         recent_randoms < self.n_trials_recent_random_thresh:
         # Set to occasional random
         self.current_sub_scheduler = self.sub_schedulers['RandomStim']
         self.last_changed_trial = this_trial
         self.params['status'] = 'randchk' + str(this_trial)       
         return
     
     # Run the anova on all trials (used for checking for stay bias)
     numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
         translated_trial_matrix)
     #~ recent_ntm = numericated_trial_matrix.iloc[
         #~ -self.n_trials_recent_for_side_bias:]
     aov_res = TrialMatrix._run_anova(numericated_trial_matrix)        
     if aov_res is None:
         self.current_sub_scheduler = self.sub_schedulers['RandomStim']
         self.last_changed_trial = this_trial
         self.params['status'] = 'an_none' + str(this_trial)
         return
     
     # Also calculate the side bias in all recent trials
     recent_ttm = translated_trial_matrix.iloc[
         -self.n_trials_recent_for_side_bias:]
     
     # Take the largest significant bias
     # Actually, better to take the diff of perf between sides for forced
     # side. Although this is a bigger issue than unexplainable variance
     # shouldn't be interpreted.
     side2perf_all = TrialMatrix.count_hits_by_type(
         recent_ttm, split_key='rewside')     
     if 'left' in side2perf_all and 'right' in side2perf_all:
         lperf = side2perf_all['left'][0] / float(side2perf_all['left'][1])
         rperf = side2perf_all['right'][0] / float(side2perf_all['right'][1])
         sideperf_diff = rperf - lperf
     else:
         sideperf_diff = 0
     
     # Decide whether stay, side, or neither bias is critical
     if aov_res['pvals']['p_prevchoice'] < 0.05 and aov_res['fit']['fit_prevchoice'] > 0:
         # Stay bias
         self.last_changed_trial = this_trial
         self.params['status'] = 'antistay' + str(this_trial)
         self.current_sub_scheduler = self.sub_schedulers['ForcedAlternation']
     elif np.abs(sideperf_diff) > .25:
         # Side bias
         self.last_changed_trial = this_trial
         self.params['status'] = 'antiside' + str(this_trial)
         self.current_sub_scheduler = self.sub_schedulers['ForcedSide']
         
         if sideperf_diff > 0:
             self.current_sub_scheduler.params['side'] = 'left'
         else:
             self.current_sub_scheduler.params['side'] = 'right'
     else:
         # No bias
         self.last_changed_trial = this_trial
         self.params['status'] = 'good' + str(this_trial)
         self.current_sub_scheduler = self.sub_schedulers['RandomStim']        
Пример #11
0
    res['STPPOS'] = np.random.randint(1, 10)
    res['SRVPOS'] = np.random.randint(1, 10)
    res['ITI'] = np.random.randint(10000)
    
    return res

## Main loop
last_released_trial = 0
try:
    while True:
        # Update chatter
        chatter.update(echo_to_stdout=True)
        
        # Check log
        splines = TrialSpeak.load_splines_from_file(logfilename)
        trial_matrix = TrialMatrix.make_trials_info_from_splines(splines)
        
        # Switch on which trial, and whether it's been released and/or completed
        if trial_matrix is None: # or if splines is empty?
            # It's the first tiral
            # Send each initial param
            for param_name, param_val in list(initial_params.items()):
                chatter.write_to_device(
                    TrialSpeak.command_set_parameter(
                        param_name, param_val))            
            
            # Release
            chatter.write_to_device(TrialSpeak.command_release_trial())
        elif 'response' not in trial_matrix or trial_matrix['response'].isnull().irow(-1):
            # Trial has not completed, keep waiting
            continue
Пример #12
0
    def update(self, filename):   
        """Read info from filename and update the plot"""
        ## Load data and make trials_info
        # Check log
        lines = TrialSpeak.read_lines_from_file(filename)
        splines = TrialSpeak.split_by_trial(lines)        
        
        # Really we should wait until we hear something from the arduino
        # Simply wait till at least one line has been received
        if len(splines) == 0 or len(splines[0]) == 0:
            return

        # Construct trial_matrix. I believe this will always have at least
        # one line in it now, even if it's composed entirely of Nones.
        trials_info = TrialMatrix.make_trials_info_from_splines(splines)

        ## Translate condensed trialspeak into full data
        # Put this part into TrialSpeak.py
        translated_trial_matrix = TrialSpeak.translate_trial_matrix(trials_info)
        
        # return if nothing to do
        if len(translated_trial_matrix) < 1:
            return
        
        # define the "bad" trials
        # these are marked differently and discounted from certain ANOVAs
        # maybe include user delivery trials too?
        if 'isrnd' in translated_trial_matrix:
            translated_trial_matrix['bad'] = ~translated_trial_matrix['isrnd']
        else:
            translated_trial_matrix['bad'] = False

        ## Define trial types, the ordering on the plot
        # Make any updates to trial type parameters (child-class-dependent)
        self.update_trial_type_parameters(lines)
        
        # Add type information to trials_info and generate type names
        translated_trial_matrix = self.assign_trial_type_to_trials_info(translated_trial_matrix)
        trial_type_names = self.get_list_of_trial_type_names()

        ## Count performance by type
        # Hits by type
        typ2perf = count_hits_by_type_from_trials_info(
            translated_trial_matrix[~translated_trial_matrix.bad])
        typ2perf_all = count_hits_by_type_from_trials_info(
            translated_trial_matrix)
        
        # Combined
        total_nhit, total_ntot = calculate_nhit_ntot(
            translated_trial_matrix[~translated_trial_matrix.bad])

        # Turn the typ2perf into ticklabels
        ytick_labels = typ2perf2ytick_labels(trial_type_names, 
            typ2perf, typ2perf_all)

        ## title string
        # number of rewards
        title_string = self.form_string_rewards(splines, 
            translated_trial_matrix)
        
        # This depends on rewside existing, which is only true for 2AC
        if 'rewside' in translated_trial_matrix.columns:
            title_string += '\n' + self.form_string_all_trials_perf(
                translated_trial_matrix)
            title_string += '\n' + self.form_string_recent_trials_perf(
                translated_trial_matrix)
            title_string += '\n' + self.form_string_unforced_trials_perf(
                translated_trial_matrix)

        ## PLOTTING
        # plot each outcome
        for outcome in ['hit', 'error', 'spoil', 'curr']:
            # Get rows corresponding to this outcome
            msk = translated_trial_matrix['outcome'] == outcome

            # Get the line corresponding to this outcome and set the xdata
            # to the appropriate trial numbers and the ydata to the trial types
            line = self.graphics_handles['label2lines'][outcome]
            line.set_xdata(np.where(msk)[0])
            line.set_ydata(translated_trial_matrix['trial_type'][msk].values)

        # plot vert bars where bad trials occurred
        msk = translated_trial_matrix['bad']
        line = self.graphics_handles['label2lines']['bad']
        line.set_xdata(np.where(msk)[0])
        line.set_ydata(translated_trial_matrix['trial_type'][msk])


        ## PLOTTING axis labels and title
        ax = self.graphics_handles['ax']
        f = self.graphics_handles['f']
        
        # Use the ytick_labels calculated above
        ax.set_yticks(range(len(trial_type_names)))
        ax.set_yticklabels(ytick_labels, size='small')
        
        # The ylimits go BACKWARDS so that trial types are from top to bottom
        ymax = np.max(ax.get_yticks())
        ymin = np.min(ax.get_yticks())
        ax.set_ylim((ymax + .5, ymin -.5))
        
        # The xlimits are a sliding window of size TRIAL_PLOT_WINDOW_SIZE
        ax.set_xlim((
            len(translated_trial_matrix) - self.trial_plot_window_size, 
            len(translated_trial_matrix)))    
        
        # title set above
        #~ ax.set_title(title_string, size='small')
        self.graphics_handles['suptitle'].set_text(title_string)
        
        ## plot division between L and R
        line = self.graphics_handles['label2lines']['divis']
        line.set_xdata(ax.get_xlim())
        line.set_ydata([np.mean(ax.get_yticks())] * 2)
        
        ## PLOTTING finalize
        plt.show()
        plt.draw()