Пример #1
0
def plot_avg_week_for_stations(start_time_matrix,
                               station_idx,
                               station_info,
                               station_ids=None,
                               plot_title="weekly behavior",
                               file_name="temp.pdf",
                               y_axis="Number of Trips",
                               normalize=False,
                               round=False):
    print("Plotting normalized average weeks for stations")
    if (start_time_matrix.shape[1] > utils.INTERVAL_WEEKLY):
        avg = utils.get_station_agg_trips_over_week(start_time_matrix, np.mean)
    else:
        avg = start_time_matrix
    if normalize:
        avg = utils.normalize(avg)
    if round:
        avg = utils.round(avg, 4)

    # plt.title(plot_title)
    plt.ylabel(y_axis)

    x_axis = [utils.time_at_idx(i) for i in range(0, 48 * 7)]
    if station_ids is not None:
        for station_id in station_ids:
            plt.plot(x_axis,
                     avg[station_idx[station_id], :],
                     linestyle="solid",
                     alpha=1,
                     label=station_info[station_id]['stationName'])
    else:
        for i in range(avg.shape[0]):
            plt.plot(x_axis, avg[i], linestyle="solid", alpha=0.03, color="k")
            plt.ylim(-50, 40)

    xticks = [x for x in x_axis if x.minute == 0 and x.hour in [0, 6, 12, 18]]
    xticklabels = [
        x.strftime("%a") if x.hour == 0 else x.hour if x.hour in [12] else ""
        for x in xticks
    ]
    plt.xticks(xticks, xticklabels, rotation=70)
    if station_ids is not None:
        plt.legend(loc="upper right")
    savefig(file_name)
Пример #2
0
 def translate_x_coord(self, x):
     """translate x coordinate into a date
     Return None if x value is out of range.
     """
     # the x pixel value does not start from 0.
     xborder = self.get_horizontal_border()
     x -= xborder
     xscale = self.get_xscale()
     if x < xborder:
         x = xborder
     if x > self.get_width() + xborder:
         x = self.get_width() + xborder
     bar = float(x) / float(xscale)
     date = self.dsget('date')
     bar = utils.round(bar)
     if self.get_start_bar() + bar >= len(date):
         return date[-1]
     else:
         return date[self.get_start_bar() + bar]
Пример #3
0
 def forward(ctx, input, rounding='deterministic'):
     return utils.round(input, rounding)
Пример #4
0
    def update(self, i_trial):
        # Standard values

        # Stores which lateral port the animal poked into (if any)
        self.ChoiceLeft[i_trial] = None
        # Stores whether the animal poked into the correct port (if any)
        self.ChoiceCorrect[i_trial] = None
        # Signals whether confidence was used in this trial. Set to false if
        # lateral ports choice timed-out (i.e, MissedChoice(i) is true), it
        # also should be set to false (but not due to a bug) if the animal
        # poked the a lateral port but didn't complete the feedback period
        # (even with using grace).
        self.Feedback[i_trial] = True
        # How long the animal spent waiting for the reward (whether in correct
        # or in incorrect ports)
        self.FeedbackTime[i_trial] = None
        # Signals whether the animal broke fixation during stimulus delay state
        self.FixBroke[i_trial] = False
        # Signals whether the animal broke fixation during sampling but before
        # min-sampling ends
        self.EarlyWithdrawal[i_trial] = False
        # Signals whether the animal correctly finished min-sampling but failed
        # to poke any of the lateral ports within ChoiceDeadLine period
        self.MissedChoice[i_trial] = False
        # How long the animal remained fixated in center poke
        self.FixDur[i_trial] = None
        # How long between sample end and making a choice (timeout-choice
        # trials are excluded)
        self.MT[i_trial] = None
        # How long the animal sampled. If RewardAfterMinSampling is enabled and
        # animal completed min sampling, then it's equal to MinSample time,
        # otherwise it's how long the animal remained fixated in center-port
        # until it either poked-out or the max allowed sampling time was
        # reached.
        self.ST[i_trial] = None
        # Signals whether a reward was given to the animal (it also includes
        # if the animal poked into the correct reward port but poked out
        # afterwards and didn't receive a reward, due to 'RewardGrace' being
        # counted as reward).
        self.Rewarded[i_trial] = False
        # Signals whether a center-port reward was given after min-sampling
        # ends.
        self.RewardAfterMinSampling[i_trial] = False
        # Tracks the amount of water the animal received up tp this point
        # TODO: Check if RewardReceivedTotal is needed and calculate it using
        # CalcRewObtained() function.
        # We will updated later
        self.RewardReceivedTotal[i_trial + 1] = 0

        self.TrialNumber[i_trial] = i_trial

        self.Timer.customInitialize[i_trial] = time.time()

        # Checking states and rewriting standard

        # Extract the states that were used in the last trial
        statesVisitedThisTrialNames = self.RawData.StatesVisitedNames(i_trial)
        statesVisitedThisTrialTimes = self.RawData.StatesVisitedTimes(i_trial)
        if str(MatrixState.WaitForStimulus) in statesVisitedThisTrialNames:
            lastWaitForStimulusStateTimes = statesVisitedThisTrialTimes[str(
                MatrixState.WaitForStimulus)][-1]
            lastTriggerWaitForStimulusStateTimes = statesVisitedThisTrialTimes[
                str(MatrixState.TriggerWaitForStimulus)][-1]
            self.FixDur[i_trial] = lastWaitForStimulusStateTimes[1] - \
                lastWaitForStimulusStateTimes[0] + \
                lastTriggerWaitForStimulusStateTimes[1] - \
                lastTriggerWaitForStimulusStateTimes[0]
        if str(MatrixState.stimulus_delivery) in statesVisitedThisTrialNames:
            stimulus_deliveryStateTimes = statesVisitedThisTrialTimes[str(
                MatrixState.stimulus_delivery)]
            if self.task_parameters.RewardAfterMinSampling:
                self.ST[i_trial] = diff(stimulus_deliveryStateTimes)
            else:
                # 'CenterPortRewardDelivery' state would exist even if no
                # 'RewardAfterMinSampling' is active, in such case it means
                # that min sampling is done and we are in the optional
                # sampling stage.
                if str(MatrixState.CenterPortRewardDelivery) in \
                        statesVisitedThisTrialNames and \
                        self.task_parameters.StimulusTime > \
                        self.task_parameters.MinSample:
                    CenterPortRewardDeliveryStateTimes = \
                        statesVisitedThisTrialTimes[
                            str(MatrixState.CenterPortRewardDelivery)]
                    self.ST[i_trial] = [
                        CenterPortRewardDeliveryStateTimes[0][1] -
                        stimulus_deliveryStateTimes[0][0]
                    ]
                else:
                    # This covers early_withdrawal
                    self.ST[i_trial] = diff(stimulus_deliveryStateTimes)

        if str(MatrixState.WaitForChoice) in statesVisitedThisTrialNames and \
            str(MatrixState.timeOut_missed_choice) not in \
                statesVisitedThisTrialNames:
            WaitForChoiceStateTimes = statesVisitedThisTrialTimes[str(
                MatrixState.WaitForChoice)]
            WaitForChoiceStateStartTimes = [
                start_time for start_time, end_time in WaitForChoiceStateTimes
            ]
            # We might have more than multiple WaitForChoice if
            # HabituateIgnoreIncorrect is enabeld
            self.MT[-1] = diff(WaitForChoiceStateStartTimes[:2])

        # Extract trial outcome. Check first if it's a wrong choice or a
        # HabituateIgnoreIncorrect but first choice was wrong choice
        if str(MatrixState.WaitForPunishStart) in \
            statesVisitedThisTrialNames or \
           str(MatrixState.RegisterWrongWaitCorrect) in \
                statesVisitedThisTrialNames:
            self.ChoiceCorrect[i_trial] = False
            # Correct choice = left
            if self.LeftRewarded[i_trial]:
                self.ChoiceLeft[i_trial] = False  # Left not chosen
            else:
                self.ChoiceLeft[i_trial] = True
            # Feedback waiting time
            if str(MatrixState.WaitForPunish) in statesVisitedThisTrialNames:
                WaitForPunishStateTimes = statesVisitedThisTrialTimes[str(
                    MatrixState.WaitForPunish)]
                WaitForPunishStartStateTimes = statesVisitedThisTrialTimes[str(
                    MatrixState.WaitForPunishStart)]
                self.FeedbackTime[i_trial] = WaitForPunishStateTimes[-1][
                    1] - WaitForPunishStartStateTimes[0][0]
            else:  # It was a  RegisterWrongWaitCorrect state
                self.FeedbackTime[i_trial] = None
        # CorrectChoice
        elif str(MatrixState.WaitForRewardStart) in \
                statesVisitedThisTrialNames:
            self.ChoiceCorrect[i_trial] = True
            if self.CatchTrial[i_trial]:
                catch_stim_idx = GetCatchStimIdx(self.StimulusOmega[i_trial])
                # Lookup the stimulus probability and increase by its
                # 1/frequency.
                stim_val = self.StimulusOmega[i_trial] * 100
                if stim_val < 50:
                    stim_val = 100 - stim_val
                stim_prob = self.task_parameters.OmegaTable.columns.OmegaProb[
                    self.task_parameters.OmegaTable.columns.Omega.index(
                        stim_val)]
                sum_all_prob = sum(
                    self.task_parameters.OmegaTable.columns.OmegaProb)
                stim_prob = (1 + sum_all_prob - stim_prob) / sum_all_prob
                self.CatchCount[catch_stim_idx] += stim_prob
                self.LastSuccessCatchTial = i_trial
            # Feedback waiting time
            if str(MatrixState.WaitForReward) in statesVisitedThisTrialNames:
                WaitForRewardStateTimes = statesVisitedThisTrialTimes[str(
                    MatrixState.WaitForReward)]
                WaitForRewardStartStateTimes = statesVisitedThisTrialTimes[str(
                    MatrixState.WaitForRewardStart)]
                self.FeedbackTime[i_trial] = WaitForRewardStateTimes[-1][
                    1] - WaitForRewardStartStateTimes[0][0]
                # Correct choice = left
                if self.LeftRewarded[i_trial]:
                    self.ChoiceLeft[i_trial] = True  # Left chosen
                else:
                    self.ChoiceLeft[i_trial] = False
            else:
                warning("'WaitForReward' state should always appear"
                        " if 'WaitForRewardStart' was initiated")
        elif str(MatrixState.broke_fixation) in statesVisitedThisTrialNames:
            self.FixBroke[i_trial] = True
        elif str(MatrixState.early_withdrawal) in statesVisitedThisTrialNames:
            self.EarlyWithdrawal[i_trial] = True
        elif str(MatrixState.timeOut_missed_choice) in \
                statesVisitedThisTrialNames:
            self.Feedback[i_trial] = False
            self.MissedChoice[i_trial] = True
        if str(MatrixState.timeOut_SkippedFeedback) in \
                statesVisitedThisTrialNames:
            self.Feedback[i_trial] = False
        if str(MatrixState.Reward) in statesVisitedThisTrialNames:
            self.Rewarded[i_trial] = True
            self.RewardReceivedTotal[i_trial] += \
                self.task_parameters.RewardAmount
        if str(MatrixState.CenterPortRewardDelivery) in \
                statesVisitedThisTrialNames and \
           self.task_parameters.RewardAfterMinSampling:
            self.RewardAfterMinSampling[i_trial] = True
            self.RewardReceivedTotal[i_trial] += \
                self.task_parameters.CenterPortRewAmount
        if str(MatrixState.WaitCenterPortOut) in statesVisitedThisTrialNames:
            WaitCenterPortOutStateTimes = statesVisitedThisTrialTimes[str(
                MatrixState.WaitCenterPortOut)]
            self.ReactionTime[i_trial] = diff(WaitCenterPortOutStateTimes)
        else:
            # Assign with -1 so we can differentiate it from None trials
            # where the state potentially existed but we didn't calculate it
            self.ReactionTime[i_trial] = -1
        # State-independent fields
        self.StimDelay[i_trial] = self.task_parameters.StimDelay
        self.FeedbackDelay[i_trial] = self.task_parameters.FeedbackDelay
        self.MinSample[i_trial] = self.task_parameters.MinSample
        self.RewardMagnitude[i_trial + 1] = [
            self.task_parameters.RewardAmount,
            self.task_parameters.RewardAmount
        ]
        self.CenterPortRewAmount[i_trial +
                                 1] = self.task_parameters.CenterPortRewAmount
        self.PreStimCntrReward[
            i_trial + 1] = self.task_parameters.PreStimuDelayCntrReward
        self.Timer.customExtractData[i_trial] = time.time()

        # IF we are running grating experiments,
        # add the grating orientation that was used
        if self.task_parameters.ExperimentType == \
                ExperimentType.GratingOrientation:
            self.GratingOrientation[
                i_trial] = self.drawParams.gratingOrientation

        # Updating Delays
        # stimulus delay
        if self.task_parameters.StimDelayAutoincrement:
            if self.FixBroke[i_trial]:
                self.task_parameters.StimDelay = max(
                    self.task_parameters.StimDelayMin,
                    min(
                        self.task_parameters.StimDelayMax,
                        self.StimDelay[i_trial] -
                        self.task_parameters.StimDelayDecr))
            else:
                self.task_parameters.StimDelay = min(
                    self.task_parameters.StimDelayMax,
                    max(
                        self.task_parameters.StimDelayMin,
                        self.StimDelay[i_trial] +
                        self.task_parameters.StimDelayIncr))
        else:
            if not self.FixBroke[i_trial]:
                self.task_parameters.StimDelay = random_unif(
                    self.task_parameters.StimDelayMin,
                    self.task_parameters.StimDelayMax)
            else:
                self.task_parameters.StimDelay = self.StimDelay[i_trial]
        self.Timer.customStimDelay[i_trial] = time.time()

        # min sampling time
        if i_trial > self.task_parameters.StartEasyTrials:
            if self.task_parameters.MinSampleType == MinSampleType.FixMin:
                self.task_parameters.MinSample = \
                    self.task_parameters.MinSampleMin
            elif self.task_parameters.MinSampleType == \
                    MinSampleType.AutoIncr:
                # Check if animal completed pre-stimulus delay successfully
                if not self.FixBroke[i_trial]:
                    if self.Rewarded[i_trial]:
                        min_sample_incremented = self.MinSample[
                            i_trial] + self.task_parameters.MinSampleIncr
                        self.task_parameters.MinSample = min(
                            self.task_parameters.MinSampleMax,
                            max(self.task_parameters.MinSampleMin,
                                min_sample_incremented))
                    elif self.EarlyWithdrawal[i_trial]:
                        min_sample_decremented = self.MinSample[
                            i_trial] - self.task_parameters.MinSampleDecr
                        self.task_parameters.MinSample = max(
                            self.task_parameters.MinSampleMin,
                            min(self.task_parameters.MinSampleMax,
                                min_sample_decremented))
                else:
                    # Read new updated GUI values
                    self.task_parameters.MinSample = max(
                        self.task_parameters.MinSampleMin,
                        min(self.task_parameters.MinSampleMax,
                            self.MinSample[i_trial]))
            elif self.task_parameters.MinSampleType == \
                    MinSampleType.RandBetMinMax_DefIsMax:
                use_rand = rand(1, 1) < self.task_parameters.MinSampleRandProb
                if not use_rand:
                    self.task_parameters.MinSample = \
                        self.task_parameters.MinSampleMax
                else:
                    min_sample_difference = \
                        self.task_parameters.MinSampleMax - \
                        self.task_parameters.MinSampleMin
                    self.task_parameters.MinSample = \
                        min_sample_difference * \
                        rand(1, 1) + self.task_parameters.MinSampleMin
            elif MinSampleType.RandNumIntervalsMinMax_DefIsMax:
                use_rand = rand(1, 1) < self.task_parameters.MinSampleRandProb
                if not use_rand:
                    self.task_parameters.MinSample = \
                        self.task_parameters.MinSampleMax
                else:
                    self.task_parameters.MinSampleNumInterval = round(
                        self.task_parameters.MinSampleNumInterval)
                    if self.task_parameters.MinSampleNumInterval == 0 or \
                       self.task_parameters.MinSampleNumInterval == 1:
                        self.task_parameters.MinSample = \
                            self.task_parameters.MinSampleMin
                    else:
                        min_sample_difference = \
                            self.task_parameters.MinSampleMax - \
                            self.task_parameters.MinSampleMin
                        step = min_sample_difference / (
                            self.task_parameters.MinSampleNumInterval - 1)
                        intervals = list(
                            range(self.task_parameters.MinSampleMin,
                                  self.task_parameters.MinSampleMax + 1, step))
                        intervals_idx = randi(
                            1, self.task_parameters.MinSampleNumInterval)
                        print("Intervals:")  # disp("Intervals:");
                        print(intervals)  # disp(intervals)
                        self.task_parameters.MinSample = intervals[
                            intervals_idx]
            else:
                error('Unexpected MinSampleType value')
        self.Timer.customMinSampling[i_trial] = time.time()

        # feedback delay
        if self.task_parameters.FeedbackDelaySelection == \
                FeedbackDelaySelection.none:
            self.task_parameters.FeedbackDelay = 0
        elif self.task_parameters.FeedbackDelaySelection == \
                FeedbackDelaySelection.AutoIncr:
            # if no feedback was not completed then use the last value unless
            # then decrement the feedback.
            # Do we consider the case where 'broke_fixation' or
            # 'early_withdrawal' terminated early the trial?
            if not self.Feedback[i_trial]:
                feedback_delay_decremented = self.FeedbackDelay[
                    i_trial] - self.task_parameters.FeedbackDelayDecr
                self.task_parameters.FeedbackDelay = max(
                    self.task_parameters.FeedbackDelayMin,
                    min(self.task_parameters.FeedbackDelayMax,
                        feedback_delay_decremented))
            else:
                # Increase the feedback if the feedback was successfully
                # completed in the last trial, or use the the GUI value that
                # the user updated if needed.
                # Do we also here consider the case where 'broke_fixation' or
                # 'early_withdrawal' terminated early the trial?
                feedback_delay_incremented = self.FeedbackDelay[
                    i_trial] + self.task_parameters.FeedbackDelayIncr
                self.task_parameters.FeedbackDelay = min(
                    self.task_parameters.FeedbackDelayMax,
                    max(self.task_parameters.FeedbackDelayMin,
                        feedback_delay_incremented))
        elif FeedbackDelaySelection.TruncExp:
            self.task_parameters.FeedbackDelay = TruncatedExponential(
                self.task_parameters.FeedbackDelayMin,
                self.task_parameters.FeedbackDelayMax,
                self.task_parameters.FeedbackDelayTau)
        elif FeedbackDelaySelection.Fix:
            #     ATTEMPT TO GRAY OUT FIELDS
            if self.task_parametersMeta.FeedbackDelay.Style != 'edit':
                self.task_parametersMeta.FeedbackDelay.Style = 'edit'
            self.task_parameters.FeedbackDelay = \
                self.task_parameters.FeedbackDelayMax
        else:
            error('Unexpected FeedbackDelaySelection value')
        self.Timer.customFeedbackDelay[i_trial] = time.time()

        # Drawing future trials

        # Calculate bias
        # Consider bias only on the last 8 trials/
        # indicesRwdLi = find(self.Rewarded,8,'last');
        # if length(indicesRwdLi) ~= 0
        #   indicesRwd = indicesRwdLi(1);
        # else
        #   indicesRwd = 1;
        # end
        LAST_TRIALS = 20
        indicesRwd = iff(i_trial > LAST_TRIALS, i_trial - LAST_TRIALS, 1)
        # ndxRewd = self.Rewarded(indicesRwd:i_trial);
        choice_correct_slice = self.ChoiceCorrect[indicesRwd:i_trial + 1]
        choice_left_slice = self.ChoiceLeft[indicesRwd:i_trial + 1]
        left_rewarded_slice = self.LeftRewarded[indicesRwd:i_trial + 1]
        ndxLeftRewd = [
            choice_c and choice_l for choice_c, choice_l in zip(
                choice_correct_slice, choice_left_slice)
        ]
        ndxLeftRewDone = [
            l_rewarded
            and choice_l is not None for l_rewarded, choice_l in zip(
                left_rewarded_slice, choice_left_slice)
        ]
        ndxRightRewd = [
            choice_c and not choice_l for choice_c, choice_l in zip(
                choice_correct_slice, choice_left_slice)
        ]
        ndxRightRewDone = [
            not l_rewarded
            and choice_l is not None for l_rewarded, choice_l in zip(
                left_rewarded_slice, choice_left_slice)
        ]
        if not any(ndxLeftRewDone):
            # Since we don't have trials on this side, then measure by how good
            # the animals was performing on the other side. If it did bad on
            # the side then then consider this side performance to be good so
            # it'd still get more trials on the other side.
            PerfL = 1 - (sum(ndxRightRewd) / (LAST_TRIALS * 2))
        else:
            PerfL = sum(ndxLeftRewd) / sum(ndxLeftRewDone)
        if not any(ndxRightRewDone):
            PerfR = 1 - (sum(ndxLeftRewd) / (LAST_TRIALS * 2))
        else:
            PerfR = sum(ndxRightRewd) / sum(ndxRightRewDone)
        self.task_parameters.CalcLeftBias = (PerfL - PerfR) / 2 + 0.5

        choiceMadeTrials = [
            choice_c is not None for choice_c in self.ChoiceCorrect
        ]
        rewardedTrialsCount = sum([r is True for r in self.Rewarded])
        lengthChoiceMadeTrials = len(choiceMadeTrials)
        if lengthChoiceMadeTrials >= 1:
            performance = rewardedTrialsCount / lengthChoiceMadeTrials
            self.task_parameters.Performance = [
                f'{performance * 100:.2f}', '#/',
                str(lengthChoiceMadeTrials), 'T'
            ]
            performance = rewardedTrialsCount / (i_trial + 1)
            self.task_parameters.AllPerformance = [
                f'{performance * 100:.2f}', '#/',
                str(i_trial + 1), 'T'
            ]
            NUM_LAST_TRIALS = 20
            if i_trial > NUM_LAST_TRIALS:
                if lengthChoiceMadeTrials > NUM_LAST_TRIALS:
                    rewardedTrials_ = choiceMadeTrials[
                        lengthChoiceMadeTrials - NUM_LAST_TRIALS +
                        1:lengthChoiceMadeTrials + 1]
                    performance = sum(rewardedTrials_) / NUM_LAST_TRIALS
                    self.task_parameters.Performance = [
                        self.task_parameters.Performance, ' - ',
                        f'{performance * 100:.2f}', '#/',
                        str(NUM_LAST_TRIALS), 'T'
                    ]
                rewardedTrialsCount = sum(
                    self.Rewarded[i_trial - NUM_LAST_TRIALS + 1:i_trial + 1])
                performance = rewardedTrialsCount / NUM_LAST_TRIALS
                self.task_parameters.AllPerformance = [
                    self.task_parameters.AllPerformance, ' - ',
                    f'{performance * 100:.2f}', '#/',
                    str(NUM_LAST_TRIALS), 'T'
                ]
        self.Timer.customCalcBias[i_trial] = time.time()

        # Create future trials
        # Check if its time to generate more future trials
        if i_trial > len(self.DV) - Const.PRE_GENERATE_TRIAL_CHECK:
            # Do bias correction only if we have enough trials
            # sum(ndxRewd) > Const.BIAS_CORRECT_MIN_RWD_TRIALS
            if self.task_parameters.CorrectBias and i_trial > 7:
                LeftBias = self.task_parameters.CalcLeftBias
                # if LeftBias < 0.2 || LeftBias > 0.8 # Bias is too much,
                # swing it all the way to the other side
                # LeftBias = round(LeftBias);
                # else
                if 0.45 <= LeftBias and LeftBias <= 0.55:
                    LeftBias = 0.5
                if LeftBias is None:
                    print(f'Left bias is None.')
                    LeftBias = 0.5
            else:
                LeftBias = self.task_parameters.LeftBias
            self.Timer.customAdjustBias[i_trial] = time.time()

            # Adjustment of P(Omega) to make sure that sum(P(Omega))=1
            if self.task_parameters.StimulusSelectionCriteria != \
                    StimulusSelectionCriteria.BetaDistribution:
                omega_prob_sum = sum(
                    self.task_parameters.OmegaTable.columns.OmegaProb)
                # Avoid having no probability and avoid dividing by zero
                if omega_prob_sum == 0:
                    self.task_parameters.OmegaTable.columns.OmegaProb = [1] * \
                        len(self.task_parameters.OmegaTable.columns.OmegaProb)
                self.task_parameters.OmegaTable.columns.OmegaProb = [
                    omega_prob / omega_prob_sum for omega_prob in
                    self.task_parameters.OmegaTable.columns.OmegaProb
                ]
            self.Timer.customCalcOmega[i_trial] = time.time()

            # make future trials
            lastidx = len(self.DV) - 1
            # Generate guaranteed equal possibility of >0.5 and <0.5
            IsLeftRewarded = [0] * round(
                Const.PRE_GENERATE_TRIAL_COUNT * LeftBias) + [1] * round(
                    Const.PRE_GENERATE_TRIAL_COUNT * (1 - LeftBias))
            # Shuffle array and convert it
            random.Shuffle(IsLeftRewarded)
            IsLeftRewarded = [
                l_rewarded > LeftBias for l_rewarded in IsLeftRewarded
            ]
            self.Timer.customPrepNewTrials[i_trial] = time.time()
            for a in range(Const.PRE_GENERATE_TRIAL_COUNT):
                # If it's a fifty-fifty trial, then place stimulus in the
                # middle 50Fifty trials
                if rand(1, 1) < self.task_parameters.Percent50Fifty and \
                    (lastidx + a) > \
                        self.task_parameters.StartEasyTrials:
                    self.StimulusOmega[lastidx + a] = 0.5
                else:
                    if self.task_parameters.StimulusSelectionCriteria == \
                            StimulusSelectionCriteria.BetaDistribution:
                        # Divide beta by 4 if we are in an easy trial
                        beta_div_condition = (lastidx + a) <= \
                            self.task_parameters.StartEasyTrials
                        BetaDiv = iff(beta_div_condition, 4, 1)
                        betarnd_param = \
                            self.task_parameters.BetaDistAlphaNBeta / \
                            BetaDiv
                        Intensity = betarnd(betarnd_param, betarnd_param)
                        # prevent extreme values
                        Intensity = iff(Intensity < 0.1, 0.1, Intensity)
                        # prevent extreme values
                        Intensity = iff(Intensity > 0.9, 0.9, Intensity)
                    elif self.task_parameters.\
                        StimulusSelectionCriteria == \
                            StimulusSelectionCriteria.DiscretePairs:
                        if (lastidx + a) <= \
                                self.task_parameters.StartEasyTrials:
                            index = next(prob[0] for prob in enumerate(
                                self.task_parameters.OmegaTable.columns.
                                OmegaProb) if prob[1] > 0)
                            Intensity = \
                                self.task_parameters.OmegaTable.Omega[
                                    index] / 100
                        else:
                            # Choose a value randomly given the each value
                            # probability
                            Intensity = randsample(
                                self.task_parameters.OmegaTable.columns.Omega,
                                weights=self.task_parameters.OmegaTable.
                                columns.OmegaProb)[0] / 100
                    else:
                        error('Unexpected StimulusSelectionCriteria')
                    # In case of beta distribution, our distribution is
                    # symmetric, so prob < 0.5 is == prob > 0.5, so we can
                    # just pick the value that corrects the bias
                    if (IsLeftRewarded[a] and Intensity < 0.5) or \
                       (not IsLeftRewarded[a] and Intensity >= 0.5):
                        Intensity = -Intensity + 1
                    self.StimulusOmega[lastidx + a] = Intensity

                if self.task_parameters.ExperimentType == \
                        ExperimentType.Auditory:
                    DV = CalcAudClickTrain(lastidx + a)
                elif self.task_parameters.ExperimentType == \
                        ExperimentType.LightIntensity:
                    DV = CalcLightIntensity(lastidx + a, self)
                elif self.task_parameters.ExperimentType == \
                        ExperimentType.GratingOrientation:
                    DV = CalcGratingOrientation(lastidx + a)
                elif self.task_parameters.ExperimentType == \
                        ExperimentType.RandomDots:
                    DV = CalcDotsCoherence(lastidx + a)
                else:
                    error('Unexpected ExperimentType')
                if DV > 0:
                    self.LeftRewarded[lastidx + a] = True
                elif DV < 0:
                    self.LeftRewarded[lastidx + a] = False
                else:
                    # It's equal distribution
                    self.LeftRewarded[lastidx + a] = rand() < 0.5
                # cross-modality difficulty for plotting
                #  0 <= (left - right) / (left + right) <= 1
                self.DV[lastidx + a] = DV
            self.Timer.customGenNewTrials[i_trial] = time.time()
        else:
            self.Timer.customAdjustBias[i_trial] = 0
            self.Timer.customCalcOmega[i_trial] = 0
            self.Timer.customPrepNewTrials[i_trial] = 0
            self.Timer.customGenNewTrials[i_trial] = 0

        # Update RDK GUI
        self.task_parameters.OmegaTable.columns.RDK = [
            (value - 50) * 2
            for value in self.task_parameters.OmegaTable.columns.Omega
        ]
        # Set current stimulus for next trial
        DV = self.DV[i_trial + 1]
        if self.task_parameters.ExperimentType == \
                ExperimentType.RandomDots:
            self.task_parameters.CurrentStim = \
                f"{abs(DV / 0.01)}{iff(DV < 0, '# R cohr.', '# L cohr.')}"
        else:
            # Set between -100 to +100
            StimIntensity = f'{iff(DV > 0, (DV + 1) / 0.02, (DV - 1) / -0.02)}'
            self.task_parameters.CurrentStim = \
                f"{StimIntensity}{iff(DV < 0, '# R', '# L')}"

        self.Timer.customFinalizeUpdate[i_trial] = time.time()

        # determine if optogentics trial
        OptoEnabled = rand(1, 1) < self.task_parameters.OptoProb
        if i_trial < self.task_parameters.StartEasyTrials:
            OptoEnabled = False
        self.OptoEnabled[i_trial + 1] = OptoEnabled
        self.task_parameters.IsOptoTrial = iff(OptoEnabled, 'true', 'false')

        # determine if catch trial
        if i_trial < self.task_parameters.StartEasyTrials or \
                self.task_parameters.PercentCatch == 0:
            self.CatchTrial[i_trial + 1] = False
        else:
            every_n_trials = round(1 / self.task_parameters.PercentCatch)
            limit = round(every_n_trials * 0.2)
            lower_limit = every_n_trials - limit
            upper_limit = every_n_trials + limit
            if not self.Rewarded[i_trial] or i_trial + 1 < \
                    self.LastSuccessCatchTial + lower_limit:
                self.CatchTrial[i_trial + 1] = False
            elif i_trial + 1 < self.LastSuccessCatchTial + upper_limit:
                # TODO: If OmegaProb changed since last time, then redo it
                non_zero_prob = [
                    self.task_parameters.OmegaTable.Omega[i] / 100
                    for i, prob in enumerate(
                        self.task_parameters.OmegaTable.columns.OmegaProb)
                    if prob > 0
                ]
                complement_non_zero_prob = [1 - prob for prob in non_zero_prob]
                inverse_non_zero_prob = non_zero_prob[::-1]
                active_stim_idxs = GetCatchStimIdx(complement_non_zero_prob +
                                                   inverse_non_zero_prob)
                cur_stim_idx = GetCatchStimIdx(self.StimulusOmega[i_trial + 1])
                min_catch_counts = min(self.CatchCount[i]
                                       for i in active_stim_idxs)
                min_catch_idxs = list(
                    set(active_stim_idxs).intersection({
                        i
                        for i, cc in enumerate(self.CatchCount)
                        if floor(cc) == min_catch_counts
                    }))
                self.CatchTrial[i_trial + 1] = cur_stim_idx in min_catch_idxs
            else:
                self.CatchTrial[i_trial + 1] = True
        # Create as char vector rather than string so that
        # GUI sync doesn't complain
        self.task_parameters.IsCatch = iff(self.CatchTrial[i_trial + 1],
                                           'true', 'false')
        # Determine if Forced LED trial:
        if self.task_parameters.PortLEDtoCueReward:
            self.ForcedLEDTrial[i_trial + 1] = rand(1, 1) < \
                self.task_parameters.PercentForcedLEDTrial
        else:
            self.ForcedLEDTrial[i_trial + 1] = False
        self.Timer.customCatchNForceLed[i_trial] = time.time()
Пример #5
0
TableNote = 'Edit Stim % to update RDK'
TimeOutBrokeFixation = 0
TimeOutEarlyWithdrawal = 0
TimeOutIncorrectChoice = 0
TimeOutMissedChoice = 0
TimeOutSkippedFeedback = 0
VevaiometricMinWT = 0.5
VevaiometricNBin = 8
VevaiometricShowPoints = 1
VevaiometricYLim = 20
VisualStimAnglePortLeft = VisualStimAngle.Degrees270
VisualStimAnglePortRight = VisualStimAngle.Degrees90
Wire1VideoTrigger = False
ApertureSizeHeight = 36
ApertureSizeWidth = 36
CenterX = 0
CenterY = 0
CircleArea = math.pi * ((ApertureSizeWidth / 2)**2)
CyclesPerSecondDrift = 5
DotLifetimeSecs = 1
DotSizeInDegs = 2
DotSpeedDegsPerSec = 25
DrawRatio = 0.2
GaborSizeFactor = 1.2
GaussianFilterRatio = 0.1
nDots = round(CircleArea * 0.05)
NumCycles = 20
Phase = 0  # Phase of the wave, goes between 0 to 360
ScreenDistCm = 30
ScreenWidthCm = 20
Пример #6
0
    def __init__(self, bpod, task_parameters, data, i_trial):
        super().__init__(bpod)
        # Define ports
        lmr_air_ports = task_parameters.Ports_LMRAir
        LeftPort = floor(mod(lmr_air_ports / 1000, 10))
        CenterPort = floor(mod(lmr_air_ports / 100, 10))
        RightPort = floor(mod(lmr_air_ports / 10, 10))
        AirSolenoid = mod(task_parameters.Ports_LMRAir, 10)
        LeftPortOut = port_str(LeftPort, out=True)
        CenterPortOut = port_str(CenterPort, out=True)
        RightPortOut = port_str(RightPort, out=True)
        LeftPortIn = port_str(LeftPort)
        CenterPortIn = port_str(CenterPort)
        RightPortIn = port_str(RightPort)

        # Duration of the TTL signal to denote start and end of trial for 2P
        WireTTLDuration = DEFAULT_WIRE_TTL_DURATION

        # PWM = (255 * (100-Attenuation))/100
        LeftPWM = round((100 - task_parameters.LeftPokeAttenPrcnt) * 2.55)
        CenterPWM = round((100 - task_parameters.CenterPokeAttenPrcnt) * 2.55)
        RightPWM = round((100 - task_parameters.RightPokeAttenPrcnt) * 2.55)

        LEDErrorRate = DEFAULT_LED_ERROR_RATE

        IsLeftRewarded = data.Custom.LeftRewarded[i_trial]

        if task_parameters.ExperimentType == ExperimentType.Auditory:
            # In MATLAB: 'BNCState' instead of 'BNC1'
            DeliverStimulus = [('BNC1', 1)]
            ContDeliverStimulus = []
            StopStimulus = iff(task_parameters.StimAfterPokeOut, [],
                               [('BNC1', 0)])
            ChoiceStopStimulus = iff(task_parameters.StimAfterPokeOut,
                                     [('BNC1', 0)], [])
            EWDStopStimulus = [('BNC1', 0)]
        elif task_parameters.ExperimentType == \
                ExperimentType.LightIntensity:
            # Divide Intensity by 100 to get fraction value
            LeftPWMStim = round(data.Custom.LightIntensityLeft[i_trial] *
                                LeftPWM / 100)
            RightPWMStim = round(data.Custom.LightIntensityRight[i_trial] *
                                 RightPWM / 100)
            DeliverStimulus = [(pwm_str(LeftPort), LeftPWMStim),
                               (pwm_str(RightPort), RightPWMStim)]
            ContDeliverStimulus = DeliverStimulus
            StopStimulus = iff(task_parameters.StimAfterPokeOut,
                               DeliverStimulus, [])
            ChoiceStopStimulus = []
            EWDStopStimulus = []
        elif task_parameters.ExperimentType == \
                ExperimentType.GratingOrientation:
            rightPortAngle = VisualStimAngle.get_degrees(
                task_parameters.VisualStimAnglePortRight.value)
            leftPortAngle = VisualStimAngle.get_degrees(
                task_parameters.VisualStimAnglePortLeft.value)
            # Calculate the distance between right and left port angle to
            # determine whether we should use the circle arc between the two
            # values in the clock-wise or counter-clock-wise direction to
            # calculate the different difficulties.
            ccw = iff(
                mod(rightPortAngle - leftPortAngle, 360) < mod(
                    leftPortAngle - rightPortAngle, 360), True, False)
            if ccw:
                finalDV = data.Custom.DV[i_trial]
                if rightPortAngle < leftPortAngle:
                    rightPortAngle += 360
                angleDiff = rightPortAngle - leftPortAngle
                minAngle = leftPortAngle
            else:
                finalDV = -data.Custom.DV[i_trial]
                if leftPortAngle < rightPortAngle:
                    leftPortAngle += 360
                angleDiff = leftPortAngle - rightPortAngle
                minAngle = rightPortAngle
            # orientation = ((DVMax - DV)*(DVMAX-DVMin)*(
            #   MaxAngle - MinANgle)) + MinAngle
            gratingOrientation = ((1 - finalDV) * angleDiff / 2) + minAngle
            gratingOrientation = mod(gratingOrientation, 360)
            data.Custom.drawParams.stimType = DrawStimType.StaticGratings
            data.Custom.drawParams.gratingOrientation = gratingOrientation
            data.Custom.drawParams.numCycles = task_parameters.NumCycles
            data.Custom.drawParams.cyclesPerSecondDrift = \
                task_parameters.CyclesPerSecondDrift
            data.Custom.drawParams.phase = task_parameters.Phase
            data.Custom.drawParams.gaborSizeFactor = \
                task_parameters.GaborSizeFactor
            data.Custom.drawParams.gaussianFilterRatio = \
                task_parameters.GaussianFilterRatio
            # Start from the 5th byte
            # serializeAndWrite(data.dotsMapped_file, 5,
            #                   data.Custom.drawParams)
            # data.dotsMapped_file.data(1: 4) = typecast(uint32(1), 'uint8');

            DeliverStimulus = [('SoftCode', 5)]
            ContDeliverStimulus = []
            StopStimulus = iff(task_parameters.StimAfterPokeOut, [],
                               [('SoftCode', 6)])
            ChoiceStopStimulus = iff(task_parameters.StimAfterPokeOut,
                                     [('SoftCode', 6)], [])
            EWDStopStimulus = [('SoftCode', 6)]
        elif task_parameters.ExperimentType == ExperimentType.RandomDots:
            # Setup the parameters
            # Use 20% of the screen size. Assume apertureSize is the diameter
            task_parameters.circleArea = math.pi * \
                ((task_parameters.ApertureSizeWidth / 2) ** 2)
            task_parameters.nDots = round(task_parameters.CircleArea *
                                          task_parameters.DrawRatio)

            data.Custom.drawParams.stimType = DrawStimType.RDK
            data.Custom.drawParams.centerX = task_parameters.CenterX
            data.Custom.drawParams.centerY = task_parameters.CenterY
            data.Custom.drawParams.apertureSizeWidth = \
                task_parameters.ApertureSizeWidth
            data.Custom.drawParams.apertureSizeHeight = \
                task_parameters.ApertureSizeHeight
            data.Custom.drawParams.drawRatio = task_parameters.DrawRatio
            data.Custom.drawParams.mainDirection = floor(
                VisualStimAngle.get_degrees(
                    iff(IsLeftRewarded,
                        task_parameters.VisualStimAnglePortLeft.value,
                        task_parameters.VisualStimAnglePortRight.value)))
            data.Custom.drawParams.dotSpeed = \
                task_parameters.DotSpeedDegsPerSec
            data.Custom.drawParams.dotLifetimeSecs = \
                task_parameters.DotLifetimeSecs
            data.Custom.drawParams.coherence = data.Custom.DotsCoherence[
                i_trial]
            data.Custom.drawParams.screenWidthCm = \
                task_parameters.ScreenWidthCm
            data.Custom.drawParams.screenDistCm = \
                task_parameters.ScreenDistCm
            data.Custom.drawParams.dotSizeInDegs = \
                task_parameters.DotSizeInDegs

            # Start from the 5th byte
            # serializeAndWrite(data.dotsMapped_file, 5,
            #                   data.Custom.drawParams)
            # data.dotsMapped_file.data(1: 4) = \
            #   typecast(uint32(1), 'uint8');

            DeliverStimulus = [('SoftCode', 5)]
            ContDeliverStimulus = []
            StopStimulus = iff(task_parameters.StimAfterPokeOut, [],
                               [('SoftCode', 6)])
            ChoiceStopStimulus = iff(task_parameters.StimAfterPokeOut,
                                     [('SoftCode', 6)], [])
            EWDStopStimulus = [('SoftCode', 6)]
        else:
            error('Unexpected ExperimentType')

        # Valve opening is a bitmap. Open each valve separately by raising 2 to
        # the power of port number - 1
        # LeftValve = 2 ** (LeftPort - 1)
        # CenterValve = 2 ** (CenterPort - 1)
        # RightValve = 2 ** (RightPort - 1)
        # AirSolenoidOn = 2 ** (AirSolenoid - 1)
        LeftValve = LeftPort
        CenterValve = CenterPort
        RightValve = RightPort
        AirSolenoidOn = AirSolenoid

        LeftValveTime = GetValveTimes(data.Custom.RewardMagnitude[i_trial][0],
                                      LeftPort)
        CenterValveTime = GetValveTimes(
            data.Custom.CenterPortRewAmount[i_trial], CenterPort)
        RightValveTime = GetValveTimes(data.Custom.RewardMagnitude[i_trial][1],
                                       RightPort)

        RewardedPort = iff(IsLeftRewarded, LeftPort, RightPort)
        RewardedPortPWM = iff(IsLeftRewarded, LeftPWM, RightPWM)
        IncorrectConsequence = iff(
            not task_parameters.HabituateIgnoreIncorrect,
            str(MatrixState.WaitForPunishStart),
            str(MatrixState.RegisterWrongWaitCorrect))
        LeftActionState = iff(IsLeftRewarded,
                              str(MatrixState.WaitForRewardStart),
                              IncorrectConsequence)
        RightActionState = iff(IsLeftRewarded, IncorrectConsequence,
                               str(MatrixState.WaitForRewardStart))
        RewardIn = iff(IsLeftRewarded, LeftPortIn, RightPortIn)
        RewardOut = iff(IsLeftRewarded, LeftPortOut, RightPortOut)
        PunishIn = iff(IsLeftRewarded, RightPortIn, LeftPortIn)
        PunishOut = iff(IsLeftRewarded, RightPortOut, LeftPortOut)
        ValveTime = iff(IsLeftRewarded, LeftValveTime, RightValveTime)
        ValveCode = iff(IsLeftRewarded, LeftValve, RightValve)

        ValveOrWireSolenoid = 'Valve'
        if task_parameters.CutAirStimDelay and \
                task_parameters.CutAirSampling:
            AirFlowStimDelayOff = [(ValveOrWireSolenoid, AirSolenoidOn)]
            # AirFlowStimDelayOn = []
            AirFlowSamplingOff = [(ValveOrWireSolenoid, AirSolenoidOn)]
            # Must set it on again
            AirFlowSamplingOn = []
        elif task_parameters.CutAirStimDelay:
            AirFlowStimDelayOff = [(ValveOrWireSolenoid, AirSolenoidOn)]
            # AirFlowStimDelayOn = [(ValveOrWireSolenoid, AirSolenoidOff)]
            AirFlowSamplingOff = []
            AirFlowSamplingOn = []
        elif task_parameters.CutAirSampling:
            AirFlowStimDelayOff = []
            # AirFlowStimDelayOn = []
            AirFlowSamplingOff = [(ValveOrWireSolenoid, AirSolenoidOn)]
            AirFlowSamplingOn = []
        else:
            AirFlowStimDelayOff = []
            # AirFlowStimDelayOn = []
            AirFlowSamplingOff = []
            AirFlowSamplingOn = []

        if task_parameters.CutAirReward:
            AirFlowRewardOff = [('Valve', AirSolenoidOn)]
        else:
            AirFlowRewardOff = []
        AirFlowRewardOn = []

        # Check if to play beep at end of minimum sampling
        MinSampleBeep = iff(task_parameters.BeepAfterMinSampling,
                            [('SoftCode', 12)], [])
        MinSampleBeepDuration = iff(task_parameters.BeepAfterMinSampling, 0.01,
                                    0)
        # GUI option RewardAfterMinSampling
        # If center - reward is enabled, then a reward is given once MinSample
        # is over and no further sampling is given.
        RewardCenterPort = iff(task_parameters.RewardAfterMinSampling,
                               [('Valve', CenterValve)] + StopStimulus,
                               ContDeliverStimulus)
        Timer_CPRD = iff(
            task_parameters.RewardAfterMinSampling, CenterValveTime,
            task_parameters.StimulusTime - task_parameters.MinSample)

        # White Noise played as Error Feedback
        ErrorFeedback = iff(task_parameters.PlayNoiseforError,
                            [('SoftCode', 11)], [])

        # CatchTrial
        FeedbackDelayCorrect = iff(data.Custom.CatchTrial[i_trial],
                                   Const.FEEDBACK_CATCH_CORRECT_SEC,
                                   task_parameters.FeedbackDelay)

        # GUI option CatchError
        FeedbackDelayError = iff(task_parameters.CatchError,
                                 Const.FEEDBACK_CATCH_INCORRECT_SEC,
                                 task_parameters.FeedbackDelay)
        SkippedFeedbackSignal = iff(task_parameters.CatchError, [],
                                    ErrorFeedback)

        # Incorrect Choice signal
        if task_parameters.IncorrectChoiceSignalType == \
                IncorrectChoiceSignalType.NoisePulsePal:
            PunishmentDuration = 0.01
            IncorrectChoice_Signal = [('SoftCode', 11)]
        elif task_parameters.IncorrectChoiceSignalType == \
                IncorrectChoiceSignalType.BeepOnWire_1:
            PunishmentDuration = 0.25
            IncorrectChoice_Signal = [('Wire1', 1)]
        elif task_parameters.IncorrectChoiceSignalType == \
                IncorrectChoiceSignalType.PortLED:
            PunishmentDuration = 0.1
            IncorrectChoice_Signal = [(pwm_str(LeftPort), LeftPWM),
                                      (pwm_str(CenterPort), CenterPWM),
                                      (pwm_str(RightPort), RightPWM)]
        elif task_parameters.IncorrectChoiceSignalType == \
                IncorrectChoiceSignalType.none:
            PunishmentDuration = 0.01
            IncorrectChoice_Signal = []
        else:
            error('Unexpected IncorrectChoiceSignalType value')

        # ITI signal
        if task_parameters.ITISignalType == ITISignalType.Beep:
            ITI_Signal_Duration = 0.01
            ITI_Signal = [('SoftCode', 12)]
        elif task_parameters.ITISignalType == ITISignalType.PortLED:
            ITI_Signal_Duration = 0.1
            ITI_Signal = [(pwm_str(LeftPort), LeftPWM),
                          (pwm_str(CenterPort), CenterPWM),
                          (pwm_str(RightPort), RightPWM)]
        elif task_parameters.ITISignalType == ITISignalType.none:
            ITI_Signal_Duration = 0.01
            ITI_Signal = []
        else:
            error('Unexpected ITISignalType value')

        # Wire1 settings
        Wire1OutError = iff(task_parameters.Wire1VideoTrigger, [('Wire2', 2)],
                            [])
        Wire1OutCorrectCondition = task_parameters.Wire1VideoTrigger and \
            data.Custom.CatchTrial[i_trial]
        Wire1OutCorrect = iff(Wire1OutCorrectCondition, [('Wire2', 2)], [])

        # LED on the side lateral port to cue the rewarded side at the
        # beginning of the training. On auditory discrimination task, both
        # lateral ports are illuminated after end of stimulus delivery.
        if data.Custom.ForcedLEDTrial[i_trial]:
            ExtendedStimulus = [(pwm_str(RewardedPort), RewardedPortPWM)]
        elif task_parameters.ExperimentType == ExperimentType.Auditory:
            ExtendedStimulus = [(pwm_str(LeftPort), LeftPWM),
                                (pwm_str(RightPort), RightPWM)]
        else:
            ExtendedStimulus = []

        # Softcode handler for i_trial == 1 in HomeCage
        # to close training chamber door
        CloseChamber = iff(i_trial == 1 and data.Custom.IsHomeCage,
                           [('SoftCode', 30)], [])

        PCTimeout = task_parameters.PCTimeout
        # Build state matrix
        self.set_global_timer(1, FeedbackDelayCorrect)
        self.set_global_timer(2, FeedbackDelayError)
        self.set_global_timer(
            3,
            iff(task_parameters.TimeOutEarlyWithdrawal,
                task_parameters.TimeOutEarlyWithdrawal, 0.01))
        self.set_global_timer(4, task_parameters.ChoiceDeadLine)
        self.add_state(state_name=str(MatrixState.ITI_Signal),
                       state_timer=ITI_Signal_Duration,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.WaitForCenterPoke)
                       },
                       output_actions=ITI_Signal)
        self.add_state(state_name=str(MatrixState.WaitForCenterPoke),
                       state_timer=0,
                       state_change_conditions={
                           CenterPortIn: str(MatrixState.PreStimReward)
                       },
                       output_actions=[(pwm_str(CenterPort), CenterPWM)])
        PreStimRewardStateTimer = iff(
            task_parameters.PreStimuDelayCntrReward,
            GetValveTimes(task_parameters.PreStimuDelayCntrReward, CenterPort),
            0.01)
        self.add_state(state_name=str(MatrixState.PreStimReward),
                       state_timer=PreStimRewardStateTimer,
                       state_change_conditions={
                           Bpod.Events.Tup:
                           str(MatrixState.TriggerWaitForStimulus)
                       },
                       output_actions=iff(
                           task_parameters.PreStimuDelayCntrReward,
                           [('Valve', CenterValve)], []))
        # The next method is useful to close the 2 - photon shutter. It is
        # enabled by setting Optogenetics StartState to this state and end
        # state to ITI.
        self.add_state(state_name=str(MatrixState.TriggerWaitForStimulus),
                       state_timer=WireTTLDuration,
                       state_change_conditions={
                           CenterPortOut: str(MatrixState.StimDelayGrace),
                           Bpod.Events.Tup: str(MatrixState.WaitForStimulus)
                       },
                       output_actions=(CloseChamber + AirFlowStimDelayOff))
        self.add_state(state_name=str(MatrixState.WaitForStimulus),
                       state_timer=max(
                           0, task_parameters.StimDelay - WireTTLDuration),
                       state_change_conditions={
                           CenterPortOut: str(MatrixState.StimDelayGrace),
                           Bpod.Events.Tup: str(MatrixState.stimulus_delivery)
                       },
                       output_actions=AirFlowStimDelayOff)
        self.add_state(state_name=str(MatrixState.StimDelayGrace),
                       state_timer=task_parameters.StimDelayGrace,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.broke_fixation),
                           CenterPortIn:
                           str(MatrixState.TriggerWaitForStimulus)
                       },
                       output_actions=AirFlowStimDelayOff)
        self.add_state(
            state_name=str(MatrixState.broke_fixation),
            state_timer=iff(not PCTimeout,
                            task_parameters.TimeOutBrokeFixation, 0.01),
            state_change_conditions={Bpod.Events.Tup: str(MatrixState.ITI)},
            output_actions=ErrorFeedback)
        self.add_state(state_name=str(MatrixState.stimulus_delivery),
                       state_timer=task_parameters.MinSample,
                       state_change_conditions={
                           CenterPortOut: str(MatrixState.early_withdrawal),
                           Bpod.Events.Tup: str(MatrixState.BeepMinSampling)
                       },
                       output_actions=(DeliverStimulus + AirFlowSamplingOff))
        self.add_state(state_name=str(MatrixState.early_withdrawal),
                       state_timer=0,
                       state_change_conditions={
                           Bpod.Events.Tup:
                           str(MatrixState.timeOut_EarlyWithdrawal)
                       },
                       output_actions=(EWDStopStimulus + AirFlowSamplingOn +
                                       [('GlobalTimerTrig', EncTrig(3))]))
        self.add_state(state_name=str(MatrixState.BeepMinSampling),
                       state_timer=MinSampleBeepDuration,
                       state_change_conditions={
                           CenterPortOut:
                           str(MatrixState.TriggerWaitChoiceTimer),
                           Bpod.Events.Tup:
                           str(MatrixState.CenterPortRewardDelivery)
                       },
                       output_actions=(ContDeliverStimulus + MinSampleBeep))
        self.add_state(state_name=str(MatrixState.CenterPortRewardDelivery),
                       state_timer=Timer_CPRD,
                       state_change_conditions={
                           CenterPortOut:
                           str(MatrixState.TriggerWaitChoiceTimer),
                           Bpod.Events.Tup: str(MatrixState.WaitCenterPortOut)
                       },
                       output_actions=RewardCenterPort)
        # TODO: Stop stimulus is fired twice in case of center reward and then
        # wait for choice. Fix it such that it'll be always fired once.
        self.add_state(state_name=str(MatrixState.TriggerWaitChoiceTimer),
                       state_timer=0,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.WaitForChoice)
                       },
                       output_actions=(StopStimulus + ExtendedStimulus +
                                       [('GlobalTimerTrig', EncTrig(4))]))
        self.add_state(state_name=str(MatrixState.WaitCenterPortOut),
                       state_timer=0,
                       state_change_conditions={
                           CenterPortOut:
                           str(MatrixState.WaitForChoice),
                           LeftPortIn:
                           LeftActionState,
                           RightPortIn:
                           RightActionState,
                           'GlobalTimer4_End':
                           str(MatrixState.timeOut_missed_choice)
                       },
                       output_actions=(StopStimulus + ExtendedStimulus +
                                       [('GlobalTimerTrig', EncTrig(4))]))
        self.add_state(state_name=str(MatrixState.WaitForChoice),
                       state_timer=0,
                       state_change_conditions={
                           LeftPortIn:
                           LeftActionState,
                           RightPortIn:
                           RightActionState,
                           'GlobalTimer4_End':
                           str(MatrixState.timeOut_missed_choice)
                       },
                       output_actions=(StopStimulus + ExtendedStimulus))
        self.add_state(state_name=str(MatrixState.WaitForRewardStart),
                       state_timer=0,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.WaitForReward)
                       },
                       output_actions=(Wire1OutCorrect + ChoiceStopStimulus +
                                       [('GlobalTimerTrig', EncTrig(1))]))
        self.add_state(state_name=str(MatrixState.WaitForReward),
                       state_timer=FeedbackDelayCorrect,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.Reward),
                           'GlobalTimer1_End': str(MatrixState.Reward),
                           RewardOut: str(MatrixState.RewardGrace)
                       },
                       output_actions=AirFlowRewardOff)
        self.add_state(state_name=str(MatrixState.RewardGrace),
                       state_timer=task_parameters.FeedbackDelayGrace,
                       state_change_conditions={
                           RewardIn:
                           str(MatrixState.WaitForReward),
                           Bpod.Events.Tup:
                           str(MatrixState.timeOut_SkippedFeedback),
                           'GlobalTimer1_End':
                           str(MatrixState.timeOut_SkippedFeedback),
                           CenterPortIn:
                           str(MatrixState.timeOut_SkippedFeedback),
                           PunishIn:
                           str(MatrixState.timeOut_SkippedFeedback)
                       },
                       output_actions=AirFlowRewardOn)
        self.add_state(state_name=str(MatrixState.Reward),
                       state_timer=ValveTime,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.WaitRewardOut)
                       },
                       output_actions=[('Valve', ValveCode)])
        self.add_state(state_name=str(MatrixState.WaitRewardOut),
                       state_timer=1,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.ITI),
                           RewardOut: str(MatrixState.ITI)
                       },
                       output_actions=[])
        self.add_state(state_name=str(MatrixState.RegisterWrongWaitCorrect),
                       state_timer=0,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.WaitForChoice)
                       },
                       output_actions=[])
        self.add_state(state_name=str(MatrixState.WaitForPunishStart),
                       state_timer=0,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.WaitForPunish)
                       },
                       output_actions=(Wire1OutError + ChoiceStopStimulus +
                                       [('GlobalTimerTrig', EncTrig(2))]))
        self.add_state(state_name=str(MatrixState.WaitForPunish),
                       state_timer=FeedbackDelayError,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.Punishment),
                           'GlobalTimer2_End': str(MatrixState.Punishment),
                           PunishOut: str(MatrixState.PunishGrace)
                       },
                       output_actions=AirFlowRewardOff)
        self.add_state(state_name=str(MatrixState.PunishGrace),
                       state_timer=task_parameters.FeedbackDelayGrace,
                       state_change_conditions={
                           PunishIn:
                           str(MatrixState.WaitForPunish),
                           Bpod.Events.Tup:
                           str(MatrixState.timeOut_SkippedFeedback),
                           'GlobalTimer2_End':
                           str(MatrixState.timeOut_SkippedFeedback),
                           CenterPortIn:
                           str(MatrixState.timeOut_SkippedFeedback),
                           RewardIn:
                           str(MatrixState.timeOut_SkippedFeedback)
                       },
                       output_actions=[])
        self.add_state(state_name=str(MatrixState.Punishment),
                       state_timer=PunishmentDuration,
                       state_change_conditions={
                           Bpod.Events.Tup:
                           str(MatrixState.timeOut_IncorrectChoice)
                       },
                       output_actions=(IncorrectChoice_Signal +
                                       AirFlowRewardOn))
        self.add_state(state_name=str(MatrixState.timeOut_EarlyWithdrawal),
                       state_timer=LEDErrorRate,
                       state_change_conditions={
                           'GlobalTimer3_End':
                           str(MatrixState.ITI),
                           Bpod.Events.Tup:
                           str(MatrixState.timeOut_EarlyWithdrawalFlashOn)
                       },
                       output_actions=ErrorFeedback)
        self.add_state(
            state_name=str(MatrixState.timeOut_EarlyWithdrawalFlashOn),
            state_timer=LEDErrorRate,
            state_change_conditions={
                'GlobalTimer3_End': str(MatrixState.ITI),
                Bpod.Events.Tup: str(MatrixState.timeOut_EarlyWithdrawal)
            },
            output_actions=(ErrorFeedback + [(pwm_str(LeftPort), LeftPWM),
                                             (pwm_str(RightPort), RightPWM)]))
        self.add_state(
            state_name=str(MatrixState.timeOut_IncorrectChoice),
            state_timer=iff(not PCTimeout,
                            task_parameters.TimeOutIncorrectChoice, 0.01),
            state_change_conditions={Bpod.Events.Tup: str(MatrixState.ITI)},
            output_actions=[])
        self.add_state(
            state_name=str(MatrixState.timeOut_SkippedFeedback),
            state_timer=(iff(not PCTimeout,
                             task_parameters.TimeOutSkippedFeedback, 0.01)),
            state_change_conditions={Bpod.Events.Tup: str(MatrixState.ITI)},
            # TODO: See how to get around this if PCTimeout
            output_actions=SkippedFeedbackSignal)
        self.add_state(
            state_name=str(MatrixState.timeOut_missed_choice),
            state_timer=iff(not PCTimeout, task_parameters.TimeOutMissedChoice,
                            0.01),
            state_change_conditions={Bpod.Events.Tup: str(MatrixState.ITI)},
            output_actions=(ErrorFeedback + ChoiceStopStimulus))
        self.add_state(state_name=str(MatrixState.ITI),
                       state_timer=WireTTLDuration,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.ext_ITI)
                       },
                       output_actions=AirFlowRewardOn)
        self.add_state(state_name=str(MatrixState.ext_ITI),
                       state_timer=iff(not PCTimeout, task_parameters.ITI,
                                       0.01),
                       state_change_conditions={Bpod.Events.Tup: 'exit'},
                       output_actions=AirFlowRewardOn)

        # If Optogenetics/2-Photon is enabled for a particular state, then we
        # modify that gien state such that it would send a signal to arduino
        # with the required offset delay to trigger the optogentics box.
        # Note: To precisely track your optogentics signal, split the arduino
        # output to the optogentics box and feed it as an input to Bpod input
        # TTL, e.g Wire1. This way, the optogentics signal gets written as
        # part of your data file. Don't forget to activate that input in the
        # Bpod main config.

        if data.Custom.OptoEnabled[i_trial]:
            # Convert seconds to millis as we will send ints to Arduino
            OptoDelay = np.array([task_parameters.OptoStartDelay * 1000],
                                 dtype=np.uint32)
            OptoDelay = OptoDelay.view(np.uint8)
            OptoTime = np.array([task_parameters.OptoMaxTime * 1000],
                                dtype=np.uint32)
            OptoTime = OptoTime.view(np.uint8)
            if not EMULATOR_MODE or hasattr(PluginSerialPorts, 'OptoSerial'):
                fwrite(PluginSerialPorts.OptoSerial, OptoDelay, 'int8')
                fwrite(PluginSerialPorts.OptoSerial, OptoTime, 'int8')
            OptoStartEventIdx = \
                self.hardware.channels.output_channel_names.index('Wire3')
            OptoStopEventIdx = \
                self.hardware.channels.output_channel_names.index('Wire4')
            tuples = [(str(task_parameters.OptoStartState1),
                       OptoStartEventIdx),
                      (str(task_parameters.OptoEndState1), OptoStopEventIdx),
                      (str(task_parameters.OptoEndState2), OptoStopEventIdx),
                      (str(MatrixState.ext_ITI), OptoStopEventIdx)]
            for state_name, event_idx in tuples:
                TrgtStateNum = self.state_names.index(state_name)
                self.output_matrix[TrgtStateNum][event_idx] = 1