예제 #1
0
 def chao_da_celula(self, x, y):
     """ Dadas coordenadas do Turtle (x,y), retorna as coordenadas do início de uma célula.
         Por exemplo, na celula da origem com tamanho 20, a coordenada Turtle (10,10)
         representa o meio da célula. A chamada de função 'chao_da_celula(10, 10)' retorna
         as coordenadas de início dessa célula (0,0
     """
     chao_x = int(floor(x, self._tam_celula))
     chao_y = int(floor(y, self._tam_celula))
     return chao_x, chao_y
예제 #2
0
def chao_da_celula(x, y):
    """ Dadas coordenadas do Turtle (x,y), retorna as coordenadas do início de uma célula.
        Por exemplo, na celula da origem com tamanho 20, a coordenada Turtle (10,10)
        representa o meio da célula. A chamada de função 'chao_da_celula(10, 10)' retorna
        as coordenadas de início dessa célula (0,0
        Dica: para entender, veja o exemplo da função: 'uso_do_floor()''
    """
    chao_x = int(floor(x, tam_celula))
    chao_y = int(floor(y, tam_celula))
    return chao_x, chao_y
예제 #3
0
def mosaic(img):
    w, h = img.size
    n = 10  # 每块 10 px
    for i in range(w):
        for j in range(h):
            position = (i, j)
            x = floor(i / n) * n
            y = floor(j / n) * n
            temp = (x, y)
            colors = img.getpixel(temp)
            # log('colors', colors)
            img.putpixel(position, colors)
    img.save('mosaic.jpg')
    img.show()
    pass
예제 #4
0
def to_jd2(year, month, day):
	'''Gregorian to Julian Day Count for years between 1801-2099'''
	# http://quasar.as.utexas.edu/BillInfo/JulianDatesG.html

	legal_date(year, month, day)

	if month <= 2:
		year = year - 1
		month = month + 12

	a = floor(year / 100)
	b = floor(a / 4)
	c = 2 - a + b
	e = floor(365.25 * (year + 4716))
	f = floor(30.6001 * (month + 1))
	return c + day + e + f - 1524.5
예제 #5
0
def to_jd(year, month, day):
	legal_date(year, month, day)

	if month <= 2:
		leap_adj = 0
	elif isleap(year):
		leap_adj = -1
	else:
		leap_adj = -2

	return (
		EPOCH - 1 + (YEAR_DAYS * (year - 1)) +
		floor((year - 1) / LEAP_CYCLE_YEARS) +
		(-floor((year - 1) / LEAP_SUPPRESSION_YEARS)) +
		floor((year - 1) / INTERCALATION_CYCLE_YEARS) +
		floor((((367 * month) - 362) / 12) + leap_adj + day)
	)
예제 #6
0
def from_jd(jd):
	'''Return Gregorian date in a (Y, M, D) tuple'''
	wjd = floor(jd - 0.5) + 0.5
	depoch = wjd - EPOCH

	quadricent = floor(depoch / INTERCALATION_CYCLE_DAYS)
	dqc = depoch % INTERCALATION_CYCLE_DAYS

	cent = floor(dqc / LEAP_SUPPRESSION_DAYS)
	dcent = dqc % LEAP_SUPPRESSION_DAYS

	quad = floor(dcent / LEAP_CYCLE_DAYS)
	dquad = dcent % LEAP_CYCLE_DAYS

	yindex = floor(dquad / YEAR_DAYS)
	year = (
		quadricent * INTERCALATION_CYCLE_YEARS +
		cent * LEAP_SUPPRESSION_YEARS +
		quad * LEAP_CYCLE_YEARS + yindex
	)

	if not (cent == 4 or yindex == 4):
		year += 1

	yearday = wjd - to_jd(year, 1, 1)

	leap = isleap(year)

	if yearday < 58 + leap:
		leap_adj = 0
	elif leap:
		leap_adj = 1
	else:
		leap_adj = 2

	month = floor((((yearday + leap_adj) * 12) + 373) / 367)
	day = int(wjd - to_jd(year, month, 1)) + 1

	return (year, month, day)
예제 #7
0
    def render(self):
        if (utils.sum_points_inside_flat_poly(*self.parent.canvas) <= 4):
            return
        color_profile = random.choice(self.colors)

        min_x = utils.floor(min([p.x for p in self.parent.canvas]))
        max_x = utils.ceil(max([p.x for p in self.parent.canvas]))
        min_z = utils.floor(min([p.z for p in self.parent.canvas]))
        max_z = utils.ceil(max([p.z for p in self.parent.canvas]))
        min_y = utils.floor(min([p.y for p in self.parent.canvas]))

        # Cut the canvas into quarters and fill one quarter with colors.
        # Then, copy that quarter into the other three quarters.
        width = utils.floor(((max_x - min_x + 1) + 1) / 2)
        depth = utils.floor(((max_z - min_z + 1) + 1) / 2)

        points = [[-1 for j in xrange(depth)] for i in xrange(width)]
        points_left = []
        for i in xrange(width):
            for j in xrange(depth):
                points_left.append((i, j))
        bounds = utils.Box(Vec(0, 0, 0), width, 1, depth)
        p = Vec(0, 0, 0)
        color_num = 0
        prev_dir = random.randint(0, 3)
        next_dir = random.randint(0, 3)
        while len(points_left) > 0:
            # pick random starting point and walk around the matrix
            point_index = random.randint(0, len(points_left) - 1)
            p = Vec(points_left[point_index][0], 0,
                    points_left[point_index][1])

            while (bounds.containsPoint(p) and points[p.x][p.z] == -1
                   and len(points_left) > 0):
                points[p.x][p.z] = color_num
                points_left.remove((p.x, p.z))

                # pick random direction to walk, try to keep walking same
                # direction
                if random.randint(0, self._walk_weight) != 0:
                    next_dir = prev_dir
                else:
                    while next_dir == prev_dir:
                        next_dir = random.randint(0, 3)
                if next_dir == 0:  # right
                    p += Vec(1, 0, 0)
                elif next_dir == 1:  # down
                    p += Vec(0, 0, 1)
                elif next_dir == 2:  # left
                    p += Vec(-1, 0, 0)
                else:  # up
                    p += Vec(0, 0, -1)
                prev_dir = next_dir
            color_num = (color_num + 1) % len(color_profile)

        for j in xrange(max_z - min_z + 1):
            for i in xrange(max_x - min_x + 1):
                p = self.parent.loc + Vec(min_x + i, min_y, min_z + j)
                self.parent.parent.setblock(p, self.mat)
                if i < width:
                    i_adj = i
                else:
                    i_adj = 2 * width - 1 - i
                if j < depth:
                    j_adj = j
                else:
                    j_adj = 2 * depth - 1 - j
                self.parent.parent.blocks[p].data = \
                    color_profile[points[i_adj][j_adj]]

        if not self.ruin:
            return
        # this chunk of code is copied from CheckerRug's render() method
        pn = perlin.SimplexNoise(256)
        c = self.parent.canvasCenter()
        y = self.parent.canvasHeight()
        r = random.randint(1, 1000)
        maxd = max(1, self.parent.canvasWidth(), self.parent.canvasLength())
        for x in utils.iterate_points_inside_flat_poly(*self.parent.canvas):
            p = x + self.parent.loc
            d = ((Vec2f(x.x, x.z) - c).mag()) / maxd
            n = (pn.noise3((p.x + r) / 4.0, y / 4.0, p.z / 4.0) + 1.0) / 2.0
            if (n < d):
                self.parent.parent.setblock(p, materials._floor)
                self.parent.parent.blocks[p].data = 0
예제 #8
0
    def update(self, i_trial):
        # Standard values

        # Stores which lateral port the animal poked into (if any)
        self.ChoiceLeft[i_trial] = None
        # Stores whether the animal poked into the correct port (if any)
        self.ChoiceCorrect[i_trial] = None
        # Signals whether confidence was used in this trial. Set to false if
        # lateral ports choice timed-out (i.e, MissedChoice(i) is true), it
        # also should be set to false (but not due to a bug) if the animal
        # poked the a lateral port but didn't complete the feedback period
        # (even with using grace).
        self.Feedback[i_trial] = True
        # How long the animal spent waiting for the reward (whether in correct
        # or in incorrect ports)
        self.FeedbackTime[i_trial] = None
        # Signals whether the animal broke fixation during stimulus delay state
        self.FixBroke[i_trial] = False
        # Signals whether the animal broke fixation during sampling but before
        # min-sampling ends
        self.EarlyWithdrawal[i_trial] = False
        # Signals whether the animal correctly finished min-sampling but failed
        # to poke any of the lateral ports within ChoiceDeadLine period
        self.MissedChoice[i_trial] = False
        # How long the animal remained fixated in center poke
        self.FixDur[i_trial] = None
        # How long between sample end and making a choice (timeout-choice
        # trials are excluded)
        self.MT[i_trial] = None
        # How long the animal sampled. If RewardAfterMinSampling is enabled and
        # animal completed min sampling, then it's equal to MinSample time,
        # otherwise it's how long the animal remained fixated in center-port
        # until it either poked-out or the max allowed sampling time was
        # reached.
        self.ST[i_trial] = None
        # Signals whether a reward was given to the animal (it also includes
        # if the animal poked into the correct reward port but poked out
        # afterwards and didn't receive a reward, due to 'RewardGrace' being
        # counted as reward).
        self.Rewarded[i_trial] = False
        # Signals whether a center-port reward was given after min-sampling
        # ends.
        self.RewardAfterMinSampling[i_trial] = False
        # Tracks the amount of water the animal received up tp this point
        # TODO: Check if RewardReceivedTotal is needed and calculate it using
        # CalcRewObtained() function.
        # We will updated later
        self.RewardReceivedTotal[i_trial + 1] = 0

        self.TrialNumber[i_trial] = i_trial

        self.Timer.customInitialize[i_trial] = time.time()

        # Checking states and rewriting standard

        # Extract the states that were used in the last trial
        statesVisitedThisTrialNames = self.RawData.StatesVisitedNames(i_trial)
        statesVisitedThisTrialTimes = self.RawData.StatesVisitedTimes(i_trial)
        if str(MatrixState.WaitForStimulus) in statesVisitedThisTrialNames:
            lastWaitForStimulusStateTimes = statesVisitedThisTrialTimes[str(
                MatrixState.WaitForStimulus)][-1]
            lastTriggerWaitForStimulusStateTimes = statesVisitedThisTrialTimes[
                str(MatrixState.TriggerWaitForStimulus)][-1]
            self.FixDur[i_trial] = lastWaitForStimulusStateTimes[1] - \
                lastWaitForStimulusStateTimes[0] + \
                lastTriggerWaitForStimulusStateTimes[1] - \
                lastTriggerWaitForStimulusStateTimes[0]
        if str(MatrixState.stimulus_delivery) in statesVisitedThisTrialNames:
            stimulus_deliveryStateTimes = statesVisitedThisTrialTimes[str(
                MatrixState.stimulus_delivery)]
            if self.task_parameters.RewardAfterMinSampling:
                self.ST[i_trial] = diff(stimulus_deliveryStateTimes)
            else:
                # 'CenterPortRewardDelivery' state would exist even if no
                # 'RewardAfterMinSampling' is active, in such case it means
                # that min sampling is done and we are in the optional
                # sampling stage.
                if str(MatrixState.CenterPortRewardDelivery) in \
                        statesVisitedThisTrialNames and \
                        self.task_parameters.StimulusTime > \
                        self.task_parameters.MinSample:
                    CenterPortRewardDeliveryStateTimes = \
                        statesVisitedThisTrialTimes[
                            str(MatrixState.CenterPortRewardDelivery)]
                    self.ST[i_trial] = [
                        CenterPortRewardDeliveryStateTimes[0][1] -
                        stimulus_deliveryStateTimes[0][0]
                    ]
                else:
                    # This covers early_withdrawal
                    self.ST[i_trial] = diff(stimulus_deliveryStateTimes)

        if str(MatrixState.WaitForChoice) in statesVisitedThisTrialNames and \
            str(MatrixState.timeOut_missed_choice) not in \
                statesVisitedThisTrialNames:
            WaitForChoiceStateTimes = statesVisitedThisTrialTimes[str(
                MatrixState.WaitForChoice)]
            WaitForChoiceStateStartTimes = [
                start_time for start_time, end_time in WaitForChoiceStateTimes
            ]
            # We might have more than multiple WaitForChoice if
            # HabituateIgnoreIncorrect is enabeld
            self.MT[-1] = diff(WaitForChoiceStateStartTimes[:2])

        # Extract trial outcome. Check first if it's a wrong choice or a
        # HabituateIgnoreIncorrect but first choice was wrong choice
        if str(MatrixState.WaitForPunishStart) in \
            statesVisitedThisTrialNames or \
           str(MatrixState.RegisterWrongWaitCorrect) in \
                statesVisitedThisTrialNames:
            self.ChoiceCorrect[i_trial] = False
            # Correct choice = left
            if self.LeftRewarded[i_trial]:
                self.ChoiceLeft[i_trial] = False  # Left not chosen
            else:
                self.ChoiceLeft[i_trial] = True
            # Feedback waiting time
            if str(MatrixState.WaitForPunish) in statesVisitedThisTrialNames:
                WaitForPunishStateTimes = statesVisitedThisTrialTimes[str(
                    MatrixState.WaitForPunish)]
                WaitForPunishStartStateTimes = statesVisitedThisTrialTimes[str(
                    MatrixState.WaitForPunishStart)]
                self.FeedbackTime[i_trial] = WaitForPunishStateTimes[-1][
                    1] - WaitForPunishStartStateTimes[0][0]
            else:  # It was a  RegisterWrongWaitCorrect state
                self.FeedbackTime[i_trial] = None
        # CorrectChoice
        elif str(MatrixState.WaitForRewardStart) in \
                statesVisitedThisTrialNames:
            self.ChoiceCorrect[i_trial] = True
            if self.CatchTrial[i_trial]:
                catch_stim_idx = GetCatchStimIdx(self.StimulusOmega[i_trial])
                # Lookup the stimulus probability and increase by its
                # 1/frequency.
                stim_val = self.StimulusOmega[i_trial] * 100
                if stim_val < 50:
                    stim_val = 100 - stim_val
                stim_prob = self.task_parameters.OmegaTable.columns.OmegaProb[
                    self.task_parameters.OmegaTable.columns.Omega.index(
                        stim_val)]
                sum_all_prob = sum(
                    self.task_parameters.OmegaTable.columns.OmegaProb)
                stim_prob = (1 + sum_all_prob - stim_prob) / sum_all_prob
                self.CatchCount[catch_stim_idx] += stim_prob
                self.LastSuccessCatchTial = i_trial
            # Feedback waiting time
            if str(MatrixState.WaitForReward) in statesVisitedThisTrialNames:
                WaitForRewardStateTimes = statesVisitedThisTrialTimes[str(
                    MatrixState.WaitForReward)]
                WaitForRewardStartStateTimes = statesVisitedThisTrialTimes[str(
                    MatrixState.WaitForRewardStart)]
                self.FeedbackTime[i_trial] = WaitForRewardStateTimes[-1][
                    1] - WaitForRewardStartStateTimes[0][0]
                # Correct choice = left
                if self.LeftRewarded[i_trial]:
                    self.ChoiceLeft[i_trial] = True  # Left chosen
                else:
                    self.ChoiceLeft[i_trial] = False
            else:
                warning("'WaitForReward' state should always appear"
                        " if 'WaitForRewardStart' was initiated")
        elif str(MatrixState.broke_fixation) in statesVisitedThisTrialNames:
            self.FixBroke[i_trial] = True
        elif str(MatrixState.early_withdrawal) in statesVisitedThisTrialNames:
            self.EarlyWithdrawal[i_trial] = True
        elif str(MatrixState.timeOut_missed_choice) in \
                statesVisitedThisTrialNames:
            self.Feedback[i_trial] = False
            self.MissedChoice[i_trial] = True
        if str(MatrixState.timeOut_SkippedFeedback) in \
                statesVisitedThisTrialNames:
            self.Feedback[i_trial] = False
        if str(MatrixState.Reward) in statesVisitedThisTrialNames:
            self.Rewarded[i_trial] = True
            self.RewardReceivedTotal[i_trial] += \
                self.task_parameters.RewardAmount
        if str(MatrixState.CenterPortRewardDelivery) in \
                statesVisitedThisTrialNames and \
           self.task_parameters.RewardAfterMinSampling:
            self.RewardAfterMinSampling[i_trial] = True
            self.RewardReceivedTotal[i_trial] += \
                self.task_parameters.CenterPortRewAmount
        if str(MatrixState.WaitCenterPortOut) in statesVisitedThisTrialNames:
            WaitCenterPortOutStateTimes = statesVisitedThisTrialTimes[str(
                MatrixState.WaitCenterPortOut)]
            self.ReactionTime[i_trial] = diff(WaitCenterPortOutStateTimes)
        else:
            # Assign with -1 so we can differentiate it from None trials
            # where the state potentially existed but we didn't calculate it
            self.ReactionTime[i_trial] = -1
        # State-independent fields
        self.StimDelay[i_trial] = self.task_parameters.StimDelay
        self.FeedbackDelay[i_trial] = self.task_parameters.FeedbackDelay
        self.MinSample[i_trial] = self.task_parameters.MinSample
        self.RewardMagnitude[i_trial + 1] = [
            self.task_parameters.RewardAmount,
            self.task_parameters.RewardAmount
        ]
        self.CenterPortRewAmount[i_trial +
                                 1] = self.task_parameters.CenterPortRewAmount
        self.PreStimCntrReward[
            i_trial + 1] = self.task_parameters.PreStimuDelayCntrReward
        self.Timer.customExtractData[i_trial] = time.time()

        # IF we are running grating experiments,
        # add the grating orientation that was used
        if self.task_parameters.ExperimentType == \
                ExperimentType.GratingOrientation:
            self.GratingOrientation[
                i_trial] = self.drawParams.gratingOrientation

        # Updating Delays
        # stimulus delay
        if self.task_parameters.StimDelayAutoincrement:
            if self.FixBroke[i_trial]:
                self.task_parameters.StimDelay = max(
                    self.task_parameters.StimDelayMin,
                    min(
                        self.task_parameters.StimDelayMax,
                        self.StimDelay[i_trial] -
                        self.task_parameters.StimDelayDecr))
            else:
                self.task_parameters.StimDelay = min(
                    self.task_parameters.StimDelayMax,
                    max(
                        self.task_parameters.StimDelayMin,
                        self.StimDelay[i_trial] +
                        self.task_parameters.StimDelayIncr))
        else:
            if not self.FixBroke[i_trial]:
                self.task_parameters.StimDelay = random_unif(
                    self.task_parameters.StimDelayMin,
                    self.task_parameters.StimDelayMax)
            else:
                self.task_parameters.StimDelay = self.StimDelay[i_trial]
        self.Timer.customStimDelay[i_trial] = time.time()

        # min sampling time
        if i_trial > self.task_parameters.StartEasyTrials:
            if self.task_parameters.MinSampleType == MinSampleType.FixMin:
                self.task_parameters.MinSample = \
                    self.task_parameters.MinSampleMin
            elif self.task_parameters.MinSampleType == \
                    MinSampleType.AutoIncr:
                # Check if animal completed pre-stimulus delay successfully
                if not self.FixBroke[i_trial]:
                    if self.Rewarded[i_trial]:
                        min_sample_incremented = self.MinSample[
                            i_trial] + self.task_parameters.MinSampleIncr
                        self.task_parameters.MinSample = min(
                            self.task_parameters.MinSampleMax,
                            max(self.task_parameters.MinSampleMin,
                                min_sample_incremented))
                    elif self.EarlyWithdrawal[i_trial]:
                        min_sample_decremented = self.MinSample[
                            i_trial] - self.task_parameters.MinSampleDecr
                        self.task_parameters.MinSample = max(
                            self.task_parameters.MinSampleMin,
                            min(self.task_parameters.MinSampleMax,
                                min_sample_decremented))
                else:
                    # Read new updated GUI values
                    self.task_parameters.MinSample = max(
                        self.task_parameters.MinSampleMin,
                        min(self.task_parameters.MinSampleMax,
                            self.MinSample[i_trial]))
            elif self.task_parameters.MinSampleType == \
                    MinSampleType.RandBetMinMax_DefIsMax:
                use_rand = rand(1, 1) < self.task_parameters.MinSampleRandProb
                if not use_rand:
                    self.task_parameters.MinSample = \
                        self.task_parameters.MinSampleMax
                else:
                    min_sample_difference = \
                        self.task_parameters.MinSampleMax - \
                        self.task_parameters.MinSampleMin
                    self.task_parameters.MinSample = \
                        min_sample_difference * \
                        rand(1, 1) + self.task_parameters.MinSampleMin
            elif MinSampleType.RandNumIntervalsMinMax_DefIsMax:
                use_rand = rand(1, 1) < self.task_parameters.MinSampleRandProb
                if not use_rand:
                    self.task_parameters.MinSample = \
                        self.task_parameters.MinSampleMax
                else:
                    self.task_parameters.MinSampleNumInterval = round(
                        self.task_parameters.MinSampleNumInterval)
                    if self.task_parameters.MinSampleNumInterval == 0 or \
                       self.task_parameters.MinSampleNumInterval == 1:
                        self.task_parameters.MinSample = \
                            self.task_parameters.MinSampleMin
                    else:
                        min_sample_difference = \
                            self.task_parameters.MinSampleMax - \
                            self.task_parameters.MinSampleMin
                        step = min_sample_difference / (
                            self.task_parameters.MinSampleNumInterval - 1)
                        intervals = list(
                            range(self.task_parameters.MinSampleMin,
                                  self.task_parameters.MinSampleMax + 1, step))
                        intervals_idx = randi(
                            1, self.task_parameters.MinSampleNumInterval)
                        print("Intervals:")  # disp("Intervals:");
                        print(intervals)  # disp(intervals)
                        self.task_parameters.MinSample = intervals[
                            intervals_idx]
            else:
                error('Unexpected MinSampleType value')
        self.Timer.customMinSampling[i_trial] = time.time()

        # feedback delay
        if self.task_parameters.FeedbackDelaySelection == \
                FeedbackDelaySelection.none:
            self.task_parameters.FeedbackDelay = 0
        elif self.task_parameters.FeedbackDelaySelection == \
                FeedbackDelaySelection.AutoIncr:
            # if no feedback was not completed then use the last value unless
            # then decrement the feedback.
            # Do we consider the case where 'broke_fixation' or
            # 'early_withdrawal' terminated early the trial?
            if not self.Feedback[i_trial]:
                feedback_delay_decremented = self.FeedbackDelay[
                    i_trial] - self.task_parameters.FeedbackDelayDecr
                self.task_parameters.FeedbackDelay = max(
                    self.task_parameters.FeedbackDelayMin,
                    min(self.task_parameters.FeedbackDelayMax,
                        feedback_delay_decremented))
            else:
                # Increase the feedback if the feedback was successfully
                # completed in the last trial, or use the the GUI value that
                # the user updated if needed.
                # Do we also here consider the case where 'broke_fixation' or
                # 'early_withdrawal' terminated early the trial?
                feedback_delay_incremented = self.FeedbackDelay[
                    i_trial] + self.task_parameters.FeedbackDelayIncr
                self.task_parameters.FeedbackDelay = min(
                    self.task_parameters.FeedbackDelayMax,
                    max(self.task_parameters.FeedbackDelayMin,
                        feedback_delay_incremented))
        elif FeedbackDelaySelection.TruncExp:
            self.task_parameters.FeedbackDelay = TruncatedExponential(
                self.task_parameters.FeedbackDelayMin,
                self.task_parameters.FeedbackDelayMax,
                self.task_parameters.FeedbackDelayTau)
        elif FeedbackDelaySelection.Fix:
            #     ATTEMPT TO GRAY OUT FIELDS
            if self.task_parametersMeta.FeedbackDelay.Style != 'edit':
                self.task_parametersMeta.FeedbackDelay.Style = 'edit'
            self.task_parameters.FeedbackDelay = \
                self.task_parameters.FeedbackDelayMax
        else:
            error('Unexpected FeedbackDelaySelection value')
        self.Timer.customFeedbackDelay[i_trial] = time.time()

        # Drawing future trials

        # Calculate bias
        # Consider bias only on the last 8 trials/
        # indicesRwdLi = find(self.Rewarded,8,'last');
        # if length(indicesRwdLi) ~= 0
        #   indicesRwd = indicesRwdLi(1);
        # else
        #   indicesRwd = 1;
        # end
        LAST_TRIALS = 20
        indicesRwd = iff(i_trial > LAST_TRIALS, i_trial - LAST_TRIALS, 1)
        # ndxRewd = self.Rewarded(indicesRwd:i_trial);
        choice_correct_slice = self.ChoiceCorrect[indicesRwd:i_trial + 1]
        choice_left_slice = self.ChoiceLeft[indicesRwd:i_trial + 1]
        left_rewarded_slice = self.LeftRewarded[indicesRwd:i_trial + 1]
        ndxLeftRewd = [
            choice_c and choice_l for choice_c, choice_l in zip(
                choice_correct_slice, choice_left_slice)
        ]
        ndxLeftRewDone = [
            l_rewarded
            and choice_l is not None for l_rewarded, choice_l in zip(
                left_rewarded_slice, choice_left_slice)
        ]
        ndxRightRewd = [
            choice_c and not choice_l for choice_c, choice_l in zip(
                choice_correct_slice, choice_left_slice)
        ]
        ndxRightRewDone = [
            not l_rewarded
            and choice_l is not None for l_rewarded, choice_l in zip(
                left_rewarded_slice, choice_left_slice)
        ]
        if not any(ndxLeftRewDone):
            # Since we don't have trials on this side, then measure by how good
            # the animals was performing on the other side. If it did bad on
            # the side then then consider this side performance to be good so
            # it'd still get more trials on the other side.
            PerfL = 1 - (sum(ndxRightRewd) / (LAST_TRIALS * 2))
        else:
            PerfL = sum(ndxLeftRewd) / sum(ndxLeftRewDone)
        if not any(ndxRightRewDone):
            PerfR = 1 - (sum(ndxLeftRewd) / (LAST_TRIALS * 2))
        else:
            PerfR = sum(ndxRightRewd) / sum(ndxRightRewDone)
        self.task_parameters.CalcLeftBias = (PerfL - PerfR) / 2 + 0.5

        choiceMadeTrials = [
            choice_c is not None for choice_c in self.ChoiceCorrect
        ]
        rewardedTrialsCount = sum([r is True for r in self.Rewarded])
        lengthChoiceMadeTrials = len(choiceMadeTrials)
        if lengthChoiceMadeTrials >= 1:
            performance = rewardedTrialsCount / lengthChoiceMadeTrials
            self.task_parameters.Performance = [
                f'{performance * 100:.2f}', '#/',
                str(lengthChoiceMadeTrials), 'T'
            ]
            performance = rewardedTrialsCount / (i_trial + 1)
            self.task_parameters.AllPerformance = [
                f'{performance * 100:.2f}', '#/',
                str(i_trial + 1), 'T'
            ]
            NUM_LAST_TRIALS = 20
            if i_trial > NUM_LAST_TRIALS:
                if lengthChoiceMadeTrials > NUM_LAST_TRIALS:
                    rewardedTrials_ = choiceMadeTrials[
                        lengthChoiceMadeTrials - NUM_LAST_TRIALS +
                        1:lengthChoiceMadeTrials + 1]
                    performance = sum(rewardedTrials_) / NUM_LAST_TRIALS
                    self.task_parameters.Performance = [
                        self.task_parameters.Performance, ' - ',
                        f'{performance * 100:.2f}', '#/',
                        str(NUM_LAST_TRIALS), 'T'
                    ]
                rewardedTrialsCount = sum(
                    self.Rewarded[i_trial - NUM_LAST_TRIALS + 1:i_trial + 1])
                performance = rewardedTrialsCount / NUM_LAST_TRIALS
                self.task_parameters.AllPerformance = [
                    self.task_parameters.AllPerformance, ' - ',
                    f'{performance * 100:.2f}', '#/',
                    str(NUM_LAST_TRIALS), 'T'
                ]
        self.Timer.customCalcBias[i_trial] = time.time()

        # Create future trials
        # Check if its time to generate more future trials
        if i_trial > len(self.DV) - Const.PRE_GENERATE_TRIAL_CHECK:
            # Do bias correction only if we have enough trials
            # sum(ndxRewd) > Const.BIAS_CORRECT_MIN_RWD_TRIALS
            if self.task_parameters.CorrectBias and i_trial > 7:
                LeftBias = self.task_parameters.CalcLeftBias
                # if LeftBias < 0.2 || LeftBias > 0.8 # Bias is too much,
                # swing it all the way to the other side
                # LeftBias = round(LeftBias);
                # else
                if 0.45 <= LeftBias and LeftBias <= 0.55:
                    LeftBias = 0.5
                if LeftBias is None:
                    print(f'Left bias is None.')
                    LeftBias = 0.5
            else:
                LeftBias = self.task_parameters.LeftBias
            self.Timer.customAdjustBias[i_trial] = time.time()

            # Adjustment of P(Omega) to make sure that sum(P(Omega))=1
            if self.task_parameters.StimulusSelectionCriteria != \
                    StimulusSelectionCriteria.BetaDistribution:
                omega_prob_sum = sum(
                    self.task_parameters.OmegaTable.columns.OmegaProb)
                # Avoid having no probability and avoid dividing by zero
                if omega_prob_sum == 0:
                    self.task_parameters.OmegaTable.columns.OmegaProb = [1] * \
                        len(self.task_parameters.OmegaTable.columns.OmegaProb)
                self.task_parameters.OmegaTable.columns.OmegaProb = [
                    omega_prob / omega_prob_sum for omega_prob in
                    self.task_parameters.OmegaTable.columns.OmegaProb
                ]
            self.Timer.customCalcOmega[i_trial] = time.time()

            # make future trials
            lastidx = len(self.DV) - 1
            # Generate guaranteed equal possibility of >0.5 and <0.5
            IsLeftRewarded = [0] * round(
                Const.PRE_GENERATE_TRIAL_COUNT * LeftBias) + [1] * round(
                    Const.PRE_GENERATE_TRIAL_COUNT * (1 - LeftBias))
            # Shuffle array and convert it
            random.Shuffle(IsLeftRewarded)
            IsLeftRewarded = [
                l_rewarded > LeftBias for l_rewarded in IsLeftRewarded
            ]
            self.Timer.customPrepNewTrials[i_trial] = time.time()
            for a in range(Const.PRE_GENERATE_TRIAL_COUNT):
                # If it's a fifty-fifty trial, then place stimulus in the
                # middle 50Fifty trials
                if rand(1, 1) < self.task_parameters.Percent50Fifty and \
                    (lastidx + a) > \
                        self.task_parameters.StartEasyTrials:
                    self.StimulusOmega[lastidx + a] = 0.5
                else:
                    if self.task_parameters.StimulusSelectionCriteria == \
                            StimulusSelectionCriteria.BetaDistribution:
                        # Divide beta by 4 if we are in an easy trial
                        beta_div_condition = (lastidx + a) <= \
                            self.task_parameters.StartEasyTrials
                        BetaDiv = iff(beta_div_condition, 4, 1)
                        betarnd_param = \
                            self.task_parameters.BetaDistAlphaNBeta / \
                            BetaDiv
                        Intensity = betarnd(betarnd_param, betarnd_param)
                        # prevent extreme values
                        Intensity = iff(Intensity < 0.1, 0.1, Intensity)
                        # prevent extreme values
                        Intensity = iff(Intensity > 0.9, 0.9, Intensity)
                    elif self.task_parameters.\
                        StimulusSelectionCriteria == \
                            StimulusSelectionCriteria.DiscretePairs:
                        if (lastidx + a) <= \
                                self.task_parameters.StartEasyTrials:
                            index = next(prob[0] for prob in enumerate(
                                self.task_parameters.OmegaTable.columns.
                                OmegaProb) if prob[1] > 0)
                            Intensity = \
                                self.task_parameters.OmegaTable.Omega[
                                    index] / 100
                        else:
                            # Choose a value randomly given the each value
                            # probability
                            Intensity = randsample(
                                self.task_parameters.OmegaTable.columns.Omega,
                                weights=self.task_parameters.OmegaTable.
                                columns.OmegaProb)[0] / 100
                    else:
                        error('Unexpected StimulusSelectionCriteria')
                    # In case of beta distribution, our distribution is
                    # symmetric, so prob < 0.5 is == prob > 0.5, so we can
                    # just pick the value that corrects the bias
                    if (IsLeftRewarded[a] and Intensity < 0.5) or \
                       (not IsLeftRewarded[a] and Intensity >= 0.5):
                        Intensity = -Intensity + 1
                    self.StimulusOmega[lastidx + a] = Intensity

                if self.task_parameters.ExperimentType == \
                        ExperimentType.Auditory:
                    DV = CalcAudClickTrain(lastidx + a)
                elif self.task_parameters.ExperimentType == \
                        ExperimentType.LightIntensity:
                    DV = CalcLightIntensity(lastidx + a, self)
                elif self.task_parameters.ExperimentType == \
                        ExperimentType.GratingOrientation:
                    DV = CalcGratingOrientation(lastidx + a)
                elif self.task_parameters.ExperimentType == \
                        ExperimentType.RandomDots:
                    DV = CalcDotsCoherence(lastidx + a)
                else:
                    error('Unexpected ExperimentType')
                if DV > 0:
                    self.LeftRewarded[lastidx + a] = True
                elif DV < 0:
                    self.LeftRewarded[lastidx + a] = False
                else:
                    # It's equal distribution
                    self.LeftRewarded[lastidx + a] = rand() < 0.5
                # cross-modality difficulty for plotting
                #  0 <= (left - right) / (left + right) <= 1
                self.DV[lastidx + a] = DV
            self.Timer.customGenNewTrials[i_trial] = time.time()
        else:
            self.Timer.customAdjustBias[i_trial] = 0
            self.Timer.customCalcOmega[i_trial] = 0
            self.Timer.customPrepNewTrials[i_trial] = 0
            self.Timer.customGenNewTrials[i_trial] = 0

        # Update RDK GUI
        self.task_parameters.OmegaTable.columns.RDK = [
            (value - 50) * 2
            for value in self.task_parameters.OmegaTable.columns.Omega
        ]
        # Set current stimulus for next trial
        DV = self.DV[i_trial + 1]
        if self.task_parameters.ExperimentType == \
                ExperimentType.RandomDots:
            self.task_parameters.CurrentStim = \
                f"{abs(DV / 0.01)}{iff(DV < 0, '# R cohr.', '# L cohr.')}"
        else:
            # Set between -100 to +100
            StimIntensity = f'{iff(DV > 0, (DV + 1) / 0.02, (DV - 1) / -0.02)}'
            self.task_parameters.CurrentStim = \
                f"{StimIntensity}{iff(DV < 0, '# R', '# L')}"

        self.Timer.customFinalizeUpdate[i_trial] = time.time()

        # determine if optogentics trial
        OptoEnabled = rand(1, 1) < self.task_parameters.OptoProb
        if i_trial < self.task_parameters.StartEasyTrials:
            OptoEnabled = False
        self.OptoEnabled[i_trial + 1] = OptoEnabled
        self.task_parameters.IsOptoTrial = iff(OptoEnabled, 'true', 'false')

        # determine if catch trial
        if i_trial < self.task_parameters.StartEasyTrials or \
                self.task_parameters.PercentCatch == 0:
            self.CatchTrial[i_trial + 1] = False
        else:
            every_n_trials = round(1 / self.task_parameters.PercentCatch)
            limit = round(every_n_trials * 0.2)
            lower_limit = every_n_trials - limit
            upper_limit = every_n_trials + limit
            if not self.Rewarded[i_trial] or i_trial + 1 < \
                    self.LastSuccessCatchTial + lower_limit:
                self.CatchTrial[i_trial + 1] = False
            elif i_trial + 1 < self.LastSuccessCatchTial + upper_limit:
                # TODO: If OmegaProb changed since last time, then redo it
                non_zero_prob = [
                    self.task_parameters.OmegaTable.Omega[i] / 100
                    for i, prob in enumerate(
                        self.task_parameters.OmegaTable.columns.OmegaProb)
                    if prob > 0
                ]
                complement_non_zero_prob = [1 - prob for prob in non_zero_prob]
                inverse_non_zero_prob = non_zero_prob[::-1]
                active_stim_idxs = GetCatchStimIdx(complement_non_zero_prob +
                                                   inverse_non_zero_prob)
                cur_stim_idx = GetCatchStimIdx(self.StimulusOmega[i_trial + 1])
                min_catch_counts = min(self.CatchCount[i]
                                       for i in active_stim_idxs)
                min_catch_idxs = list(
                    set(active_stim_idxs).intersection({
                        i
                        for i, cc in enumerate(self.CatchCount)
                        if floor(cc) == min_catch_counts
                    }))
                self.CatchTrial[i_trial + 1] = cur_stim_idx in min_catch_idxs
            else:
                self.CatchTrial[i_trial + 1] = True
        # Create as char vector rather than string so that
        # GUI sync doesn't complain
        self.task_parameters.IsCatch = iff(self.CatchTrial[i_trial + 1],
                                           'true', 'false')
        # Determine if Forced LED trial:
        if self.task_parameters.PortLEDtoCueReward:
            self.ForcedLEDTrial[i_trial + 1] = rand(1, 1) < \
                self.task_parameters.PercentForcedLEDTrial
        else:
            self.ForcedLEDTrial[i_trial + 1] = False
        self.Timer.customCatchNForceLed[i_trial] = time.time()
예제 #9
0
def offset(point):
    "Return offset of point in tiles."
    x = (floor(point.x, 20) + 200) / 20
    y = (180 - floor(point.y, 20)) / 20
    index = int(x + y * 20)
    return index
예제 #10
0
def uso_do_floor():
    """ Função de exemplo do uso do floor (chao_da_celula) """
    for i in range(-200, 200):
        print("{}\t{}".format(i, int(floor(i, 20))))
예제 #11
0
파일: floors.py 프로젝트: orphu/mcdungeon
    def render(self):
        if (utils.sum_points_inside_flat_poly(*self.parent.canvas) <= 4):
            return
        color_profile = random.choice(self.colors)

        min_x = utils.floor(min([p.x for p in self.parent.canvas]))
        max_x = utils.ceil(max([p.x for p in self.parent.canvas]))
        min_z = utils.floor(min([p.z for p in self.parent.canvas]))
        max_z = utils.ceil(max([p.z for p in self.parent.canvas]))
        min_y = utils.floor(min([p.y for p in self.parent.canvas]))

        # Cut the canvas into quarters and fill one quarter with colors.
        # Then, copy that quarter into the other three quarters.
        width = utils.floor(((max_x - min_x + 1) + 1) / 2)
        depth = utils.floor(((max_z - min_z + 1) + 1) / 2)

        points = [[-1 for j in xrange(depth)] for i in xrange(width)]
        points_left = []
        for i in xrange(width):
            for j in xrange(depth):
                points_left.append((i, j))
        bounds = utils.Box(Vec(0, 0, 0), width, 1, depth)
        p = Vec(0, 0, 0)
        color_num = 0
        prev_dir = random.randint(0, 3)
        next_dir = random.randint(0, 3)
        while len(points_left) > 0:
            # pick random starting point and walk around the matrix
            point_index = random.randint(0, len(points_left) - 1)
            p = Vec(points_left[point_index][0],
                    0,
                    points_left[point_index][1])

            while (bounds.containsPoint(p) and
                   points[p.x][p.z] == -1 and
                   len(points_left) > 0):
                points[p.x][p.z] = color_num
                points_left.remove((p.x, p.z))

                # pick random direction to walk, try to keep walking same
                # direction
                if random.randint(0, self._walk_weight) != 0:
                    next_dir = prev_dir
                else:
                    while next_dir == prev_dir:
                        next_dir = random.randint(0, 3)
                if next_dir == 0:  # right
                    p += Vec(1, 0, 0)
                elif next_dir == 1:  # down
                    p += Vec(0, 0, 1)
                elif next_dir == 2:  # left
                    p += Vec(-1, 0, 0)
                else:  # up
                    p += Vec(0, 0, -1)
                prev_dir = next_dir
            color_num = (color_num + 1) % len(color_profile)

        for j in xrange(max_z - min_z + 1):
            for i in xrange(max_x - min_x + 1):
                p = self.parent.loc + Vec(min_x + i, min_y, min_z + j)
                self.parent.parent.setblock(p, self.mat)
                if i < width:
                    i_adj = i
                else:
                    i_adj = 2 * width - 1 - i
                if j < depth:
                    j_adj = j
                else:
                    j_adj = 2 * depth - 1 - j
                self.parent.parent.blocks[p].data = \
                    color_profile[points[i_adj][j_adj]]
        # Ruined
        if (self.ruin):
            self.ruinrender()
예제 #12
0
def analyzeData(tradingHistory, dailyLogs):
    '''
    Analyzes the trading history and daily logs of the passed account. 
    Outputs results as a dictionary, in the following format:
        {
            "General Stats":
            {
                "Start Date":str, "End Date":str, "Days Run":int, "Starting Assets":float, "Ending Assets":float, 
                Yearly Growth Rate":float, "Average Trades Per Day":float, "Average Trade %Profit":float, "Average Hold Length":float
            },
            "Stats vs Time":dataFrame,
            "Trade Stats":dataFrame
        }

    -tradingHistory is the dataframe containing the trading history of an account
    -dailyLogs is a dataframe containing the logs of an account
    '''
    #Get overall statistics
    startDate = dailyLogs.at[0, "Date"]
    endDate = dailyLogs.at[len(dailyLogs)-1, "Date"]
    daysRun = utils.getDayDifference(startDate, endDate)
    
    startingAssets = dailyLogs.at[0, "TotalAssets"]
    endingAssets = dailyLogs.at[len(dailyLogs)-1, "TotalAssets"]

    estimatedYearlyGrowth = utils.estimateYearlyGrowth(startingAssets, endingAssets, daysRun)  #solved for r in compound interest formula, assuming compounding monthly
    
    averageTradesPerDay = len(dailyLogs)/daysRun

    #Get statistics over time
    columns = ["Date", "TotalAssets", "Buys", "Sells", "AssetsInvested", "EstYearlyGrowthRate(Past30Days)"]
    statsOverTime = pd.DataFrame(columns=columns)
    statsOverTime["Date"] = pd.date_range(start=startDate, end=endDate)

    ########## Daily Log Analysis ##########
    #Iterate over daily logs to populate statistics
    logIndex = 0
    for rowIndex in range(0, daysRun+1, 1):
        currentStatDate = str(statsOverTime.at[rowIndex, "Date"])[:10]

        totalBuys = 0
        totalSells = 0

        while (True):
            row = dailyLogs.loc[logIndex]
            
            if (str(row["Date"]) == currentStatDate):  #if still on the same date
                if (row["Action"] == "Buy"):
                    totalBuys += 1
                elif (row["Action"] == "Buy"):
                    totalSells += 1
                elif (logIndex>0):  
                    if ((row["Action"] == "CHECKPOINT") and (str(dailyLogs.loc[logIndex-1]["Date"]) != str(row["Date"]))):  #No trades were conducted on that day, but we still need to update our assets
                        assets = float(row["TotalAssets"])
                        statsOverTime.at[rowIndex, "TotalAssets"] = assets
                        statsOverTime.at[rowIndex, "Buys"] = 0
                        statsOverTime.at[rowIndex, "Sells"] = 0
                        statsOverTime.at[rowIndex, "AssetsInvested"] = float(row["StockAssets"])/ assets
                        statsOverTime.at[rowIndex, "EstYearlyGrowthRate(Past30Days)"] = statsOverTime.at[rowIndex-1, "EstYearlyGrowthRate(Past30Days)"]
                        pastRowIndex = utils.floor(rowIndex-30, floor=0)
                        estRate = utils.estimateYearlyGrowth(statsOverTime.at[pastRowIndex, "TotalAssets"], assets, rowIndex-pastRowIndex)
                        statsOverTime.at[rowIndex, "EstYearlyGrowthRate(Past30Days)"] = estRate
                        logIndex +=1 
                        break
                
                logIndex += 1  #go to next log entry

                if (logIndex == len(dailyLogs)):
                    #We're at the end of the log. Fill in final row of stats
                    row = dailyLogs.loc[logIndex-1]  #We hit a transition b/w consecutive dates. Go back one row in logs

                    assets = float(row["TotalAssets"])  #assets at the end of day
                    statsOverTime.at[rowIndex, "TotalAssets"] = assets
                    
                    #Estimate yearly growth rate based on past 30 days
                    if (rowIndex == 0):
                        statsOverTime.at[rowIndex, "EstYearlyGrowthRate(Past30Days)"] = 1
                    else:
                        pastRowIndex = utils.floor(rowIndex-30, floor=0)
                        estRate = utils.estimateYearlyGrowth(statsOverTime.at[pastRowIndex, "TotalAssets"], assets, rowIndex-pastRowIndex)
                        statsOverTime.at[rowIndex, "EstYearlyGrowthRate(Past30Days)"] = estRate

                    #Total Buys and Sells
                    statsOverTime.at[rowIndex, "Buys"] = totalBuys
                    statsOverTime.at[rowIndex, "Sells"] = totalSells

                    #Percent of assets invested
                    statsOverTime.at[rowIndex, "AssetsInvested"] = float(row["StockAssets"]) / assets

                    #Exit loop
                    break

            else:
                if (rowIndex+1<daysRun+1):
                    nextStatRowDate = str(statsOverTime.at[rowIndex+1, "Date"])[:10]
                else:
                    break

                if (utils.compareDates(nextStatRowDate, row["Date"]) == -1) and (currentStatDate != str(dailyLogs.loc[logIndex-1]["Date"])):
                    #Gap in the daily logs: use previous date to fill
                    statsOverTime.at[rowIndex, "TotalAssets"] = statsOverTime.at[rowIndex-1, "TotalAssets"]
                    statsOverTime.at[rowIndex, "Buys"] = 0
                    statsOverTime.at[rowIndex, "Sells"] = 0
                    statsOverTime.at[rowIndex, "AssetsInvested"] = statsOverTime.at[rowIndex-1, "AssetsInvested"]
                    statsOverTime.at[rowIndex, "EstYearlyGrowthRate(Past30Days)"] = statsOverTime.at[rowIndex-1, "EstYearlyGrowthRate(Past30Days)"]

                    #Move on to next day in stats
                    break

                else:
                    row = dailyLogs.loc[logIndex-1]  #We hit a transition b/w consecutive dates. Go back one row in logs

                    assets = float(row["TotalAssets"])  #assets at the end of day
                    statsOverTime.at[rowIndex, "TotalAssets"] = assets
                    
                    #Estimate yearly growth rate based on past 30 days
                    if (rowIndex == 0):
                        statsOverTime.at[rowIndex, "EstYearlyGrowthRate(Past30Days)"] = 1
                    else:
                        pastRowIndex = utils.floor(rowIndex-30, floor=0)
                        estRate = utils.estimateYearlyGrowth(statsOverTime.at[pastRowIndex, "TotalAssets"], assets, rowIndex-pastRowIndex)
                        if (rowIndex < 20):
                            statsOverTime.at[rowIndex, "EstYearlyGrowthRate(Past30Days)"] = 1
                        else:
                            statsOverTime.at[rowIndex, "EstYearlyGrowthRate(Past30Days)"] = estRate

                    #Total Buys and Sells
                    statsOverTime.at[rowIndex, "Buys"] = totalBuys
                    statsOverTime.at[rowIndex, "Sells"] = totalSells

                    #Percent of assets invested
                    statsOverTime.at[rowIndex, "AssetsInvested"] = float(row["StockAssets"]) / assets

                    #Move on to next day in stats
                    break

    statsOverTime["Date"] = pd.to_datetime(statsOverTime["Date"], format='%Y-%m-%d')
    statsOverTime.set_index("Date", inplace=True)


    ########## Trading History Analysis ##########
    tradeColumns = ["Ticker", "Date Bought", "Buy Price", "Date Sold", "Sell Price", "Quantity", "Buy In Amount", "Commission", "Trade Profit", "Percent Profit", "Hold Length"]
    tradeStats = pd.DataFrame(columns=tradeColumns)

    tradeStats[["Ticker", "Date Bought", "Buy Price", "Date Sold", "Sell Price", "Quantity", "Commission", "Trade Profit"]] = tradingHistory[["Ticker", "Date Bought", "Buy Price", "Date Sold", "Sell Price", "Quantity", "Commission", "Trade Profit"]]
    tradeStats["Percent Profit"] = ((tradingHistory["Sell Price"] - tradingHistory["Buy Price"]) / (tradingHistory["Buy Price"]))*100  #NOTE Excludes commission
    tradeStats["Hold Length"] = tradingHistory.apply(lambda row: utils.getDayDifference(row["Date Bought"], row["Date Sold"]), axis=1)  #https://engineering.upside.com/a-beginners-guide-to-optimizing-pandas-code-for-speed-c09ef2c6a4d6
    tradeStats["Buy In Amount"] = tradingHistory["Buy Price"] * tradingHistory["Quantity"]

    #More general stats
    averagePercentProfit = tradeStats["Percent Profit"].mean()
    averageHoldLength = tradeStats["Hold Length"].mean()
    
    #Gather return values
    generalResults = {}
    generalResults["Start Date"] = startDate
    generalResults["End Date"] = endDate
    generalResults["Days Run"] = daysRun
    generalResults["Starting Assets"] = startingAssets
    generalResults["Ending Assets"] = endingAssets
    generalResults["Yearly Growth Rate"] =  estimatedYearlyGrowth
    generalResults["Average Trades Per Day"] = averageTradesPerDay
    generalResults["Average Trade %Profit"] = averagePercentProfit
    generalResults["Average Hold Length"] =  averageHoldLength

    returnDict = {}
    returnDict["General Stats"] = generalResults
    returnDict["Stats vs Time"] = statsOverTime
    returnDict["Trade Stats"] = tradeStats

    return returnDict
예제 #13
0
    def render(self):
        if (utils.sum_points_inside_flat_poly(*self.parent.canvas) <= 4):
            return
        color_profile = random.choice(self.colors)

        min_x = utils.floor(min([p.x for p in self.parent.canvas]))
        max_x = utils.ceil(max([p.x for p in self.parent.canvas]))
        min_z = utils.floor(min([p.z for p in self.parent.canvas]))
        max_z = utils.ceil(max([p.z for p in self.parent.canvas]))
        min_y = utils.floor(min([p.y for p in self.parent.canvas]))

        # Cut the canvas into quarters and fill one quarter with colors.
        # Then, copy that quarter into the other three quarters.
        width = utils.floor(((max_x - min_x + 1) + 1) / 2)
        depth = utils.floor(((max_z - min_z + 1) + 1) / 2)

        points = [[-1 for j in xrange(depth)] for i in xrange(width)]
        points_left = []
        for i in xrange(width):
            for j in xrange(depth):
                points_left.append((i, j))
        bounds = utils.Box(Vec(0, 0, 0), width, 1, depth)
        p = Vec(0, 0, 0)
        color_num = 0
        prev_dir = random.randint(0, 3)
        next_dir = random.randint(0, 3)
        while len(points_left) > 0:
            # pick random starting point and walk around the matrix
            point_index = random.randint(0, len(points_left)-1)
            p = Vec(points_left[point_index][0],
                    0,
                    points_left[point_index][1])

            while (bounds.containsPoint(p) and
                   points[p.x][p.z] == -1 and
                   len(points_left) > 0):
                points[p.x][p.z] = color_num
                points_left.remove((p.x, p.z))

                # pick random direction to walk, try to keep walking same direction
                if random.randint(0, self._walk_weight) != 0:
                    next_dir = prev_dir
                else:
                    while next_dir == prev_dir:
                        next_dir = random.randint(0, 3)
                if next_dir == 0:  # right
                    p += Vec(1, 0, 0)
                elif next_dir == 1:  # down
                    p += Vec(0, 0, 1)
                elif next_dir == 2:  # left
                    p += Vec(-1, 0, 0)
                else:  # up
                    p += Vec(0, 0, -1)
                prev_dir = next_dir
            color_num = (color_num + 1) % len(color_profile)

        for j in xrange(max_z - min_z + 1):
            for i in xrange(max_x - min_x + 1):
                p = self.parent.loc + Vec(min_x+i, min_y, min_z+j)
                self.parent.parent.setblock(p, self.mat)
                if i < width:
                    i_adj = i
                else:
                    i_adj = 2*width - 1 - i
                if j < depth:
                    j_adj = j
                else:
                    j_adj = 2*depth - 1 - j
                self.parent.parent.blocks[p].data = \
                    color_profile[points[i_adj][j_adj]]

        if not self.ruin:
            return
        # this chunk of code is copied from CheckerRug's render() method
        pn = perlin.SimplexNoise(256)
        c = self.parent.canvasCenter()
        y = self.parent.canvasHeight()
        r = random.randint(1, 1000)
        maxd = max(1, self.parent.canvasWidth(), self.parent.canvasLength())
        for x in utils.iterate_points_inside_flat_poly(*self.parent.canvas):
            p = x+self.parent.loc
            d = ((Vec2f(x.x, x.z) - c).mag()) / maxd
            n = (pn.noise3((p.x+r) / 4.0, y / 4.0, p.z / 4.0) + 1.0) / 2.0
            if (n < d):
                self.parent.parent.setblock(p, materials._floor)
                self.parent.parent.blocks[p].data = 0
예제 #14
0
    def __init__(self, bpod, task_parameters, data, i_trial):
        super().__init__(bpod)
        # Define ports
        lmr_air_ports = task_parameters.Ports_LMRAir
        LeftPort = floor(mod(lmr_air_ports / 1000, 10))
        CenterPort = floor(mod(lmr_air_ports / 100, 10))
        RightPort = floor(mod(lmr_air_ports / 10, 10))
        AirSolenoid = mod(task_parameters.Ports_LMRAir, 10)
        LeftPortOut = port_str(LeftPort, out=True)
        CenterPortOut = port_str(CenterPort, out=True)
        RightPortOut = port_str(RightPort, out=True)
        LeftPortIn = port_str(LeftPort)
        CenterPortIn = port_str(CenterPort)
        RightPortIn = port_str(RightPort)

        # Duration of the TTL signal to denote start and end of trial for 2P
        WireTTLDuration = DEFAULT_WIRE_TTL_DURATION

        # PWM = (255 * (100-Attenuation))/100
        LeftPWM = round((100 - task_parameters.LeftPokeAttenPrcnt) * 2.55)
        CenterPWM = round((100 - task_parameters.CenterPokeAttenPrcnt) * 2.55)
        RightPWM = round((100 - task_parameters.RightPokeAttenPrcnt) * 2.55)

        LEDErrorRate = DEFAULT_LED_ERROR_RATE

        IsLeftRewarded = data.Custom.LeftRewarded[i_trial]

        if task_parameters.ExperimentType == ExperimentType.Auditory:
            # In MATLAB: 'BNCState' instead of 'BNC1'
            DeliverStimulus = [('BNC1', 1)]
            ContDeliverStimulus = []
            StopStimulus = iff(task_parameters.StimAfterPokeOut, [],
                               [('BNC1', 0)])
            ChoiceStopStimulus = iff(task_parameters.StimAfterPokeOut,
                                     [('BNC1', 0)], [])
            EWDStopStimulus = [('BNC1', 0)]
        elif task_parameters.ExperimentType == \
                ExperimentType.LightIntensity:
            # Divide Intensity by 100 to get fraction value
            LeftPWMStim = round(data.Custom.LightIntensityLeft[i_trial] *
                                LeftPWM / 100)
            RightPWMStim = round(data.Custom.LightIntensityRight[i_trial] *
                                 RightPWM / 100)
            DeliverStimulus = [(pwm_str(LeftPort), LeftPWMStim),
                               (pwm_str(RightPort), RightPWMStim)]
            ContDeliverStimulus = DeliverStimulus
            StopStimulus = iff(task_parameters.StimAfterPokeOut,
                               DeliverStimulus, [])
            ChoiceStopStimulus = []
            EWDStopStimulus = []
        elif task_parameters.ExperimentType == \
                ExperimentType.GratingOrientation:
            rightPortAngle = VisualStimAngle.get_degrees(
                task_parameters.VisualStimAnglePortRight.value)
            leftPortAngle = VisualStimAngle.get_degrees(
                task_parameters.VisualStimAnglePortLeft.value)
            # Calculate the distance between right and left port angle to
            # determine whether we should use the circle arc between the two
            # values in the clock-wise or counter-clock-wise direction to
            # calculate the different difficulties.
            ccw = iff(
                mod(rightPortAngle - leftPortAngle, 360) < mod(
                    leftPortAngle - rightPortAngle, 360), True, False)
            if ccw:
                finalDV = data.Custom.DV[i_trial]
                if rightPortAngle < leftPortAngle:
                    rightPortAngle += 360
                angleDiff = rightPortAngle - leftPortAngle
                minAngle = leftPortAngle
            else:
                finalDV = -data.Custom.DV[i_trial]
                if leftPortAngle < rightPortAngle:
                    leftPortAngle += 360
                angleDiff = leftPortAngle - rightPortAngle
                minAngle = rightPortAngle
            # orientation = ((DVMax - DV)*(DVMAX-DVMin)*(
            #   MaxAngle - MinANgle)) + MinAngle
            gratingOrientation = ((1 - finalDV) * angleDiff / 2) + minAngle
            gratingOrientation = mod(gratingOrientation, 360)
            data.Custom.drawParams.stimType = DrawStimType.StaticGratings
            data.Custom.drawParams.gratingOrientation = gratingOrientation
            data.Custom.drawParams.numCycles = task_parameters.NumCycles
            data.Custom.drawParams.cyclesPerSecondDrift = \
                task_parameters.CyclesPerSecondDrift
            data.Custom.drawParams.phase = task_parameters.Phase
            data.Custom.drawParams.gaborSizeFactor = \
                task_parameters.GaborSizeFactor
            data.Custom.drawParams.gaussianFilterRatio = \
                task_parameters.GaussianFilterRatio
            # Start from the 5th byte
            # serializeAndWrite(data.dotsMapped_file, 5,
            #                   data.Custom.drawParams)
            # data.dotsMapped_file.data(1: 4) = typecast(uint32(1), 'uint8');

            DeliverStimulus = [('SoftCode', 5)]
            ContDeliverStimulus = []
            StopStimulus = iff(task_parameters.StimAfterPokeOut, [],
                               [('SoftCode', 6)])
            ChoiceStopStimulus = iff(task_parameters.StimAfterPokeOut,
                                     [('SoftCode', 6)], [])
            EWDStopStimulus = [('SoftCode', 6)]
        elif task_parameters.ExperimentType == ExperimentType.RandomDots:
            # Setup the parameters
            # Use 20% of the screen size. Assume apertureSize is the diameter
            task_parameters.circleArea = math.pi * \
                ((task_parameters.ApertureSizeWidth / 2) ** 2)
            task_parameters.nDots = round(task_parameters.CircleArea *
                                          task_parameters.DrawRatio)

            data.Custom.drawParams.stimType = DrawStimType.RDK
            data.Custom.drawParams.centerX = task_parameters.CenterX
            data.Custom.drawParams.centerY = task_parameters.CenterY
            data.Custom.drawParams.apertureSizeWidth = \
                task_parameters.ApertureSizeWidth
            data.Custom.drawParams.apertureSizeHeight = \
                task_parameters.ApertureSizeHeight
            data.Custom.drawParams.drawRatio = task_parameters.DrawRatio
            data.Custom.drawParams.mainDirection = floor(
                VisualStimAngle.get_degrees(
                    iff(IsLeftRewarded,
                        task_parameters.VisualStimAnglePortLeft.value,
                        task_parameters.VisualStimAnglePortRight.value)))
            data.Custom.drawParams.dotSpeed = \
                task_parameters.DotSpeedDegsPerSec
            data.Custom.drawParams.dotLifetimeSecs = \
                task_parameters.DotLifetimeSecs
            data.Custom.drawParams.coherence = data.Custom.DotsCoherence[
                i_trial]
            data.Custom.drawParams.screenWidthCm = \
                task_parameters.ScreenWidthCm
            data.Custom.drawParams.screenDistCm = \
                task_parameters.ScreenDistCm
            data.Custom.drawParams.dotSizeInDegs = \
                task_parameters.DotSizeInDegs

            # Start from the 5th byte
            # serializeAndWrite(data.dotsMapped_file, 5,
            #                   data.Custom.drawParams)
            # data.dotsMapped_file.data(1: 4) = \
            #   typecast(uint32(1), 'uint8');

            DeliverStimulus = [('SoftCode', 5)]
            ContDeliverStimulus = []
            StopStimulus = iff(task_parameters.StimAfterPokeOut, [],
                               [('SoftCode', 6)])
            ChoiceStopStimulus = iff(task_parameters.StimAfterPokeOut,
                                     [('SoftCode', 6)], [])
            EWDStopStimulus = [('SoftCode', 6)]
        else:
            error('Unexpected ExperimentType')

        # Valve opening is a bitmap. Open each valve separately by raising 2 to
        # the power of port number - 1
        # LeftValve = 2 ** (LeftPort - 1)
        # CenterValve = 2 ** (CenterPort - 1)
        # RightValve = 2 ** (RightPort - 1)
        # AirSolenoidOn = 2 ** (AirSolenoid - 1)
        LeftValve = LeftPort
        CenterValve = CenterPort
        RightValve = RightPort
        AirSolenoidOn = AirSolenoid

        LeftValveTime = GetValveTimes(data.Custom.RewardMagnitude[i_trial][0],
                                      LeftPort)
        CenterValveTime = GetValveTimes(
            data.Custom.CenterPortRewAmount[i_trial], CenterPort)
        RightValveTime = GetValveTimes(data.Custom.RewardMagnitude[i_trial][1],
                                       RightPort)

        RewardedPort = iff(IsLeftRewarded, LeftPort, RightPort)
        RewardedPortPWM = iff(IsLeftRewarded, LeftPWM, RightPWM)
        IncorrectConsequence = iff(
            not task_parameters.HabituateIgnoreIncorrect,
            str(MatrixState.WaitForPunishStart),
            str(MatrixState.RegisterWrongWaitCorrect))
        LeftActionState = iff(IsLeftRewarded,
                              str(MatrixState.WaitForRewardStart),
                              IncorrectConsequence)
        RightActionState = iff(IsLeftRewarded, IncorrectConsequence,
                               str(MatrixState.WaitForRewardStart))
        RewardIn = iff(IsLeftRewarded, LeftPortIn, RightPortIn)
        RewardOut = iff(IsLeftRewarded, LeftPortOut, RightPortOut)
        PunishIn = iff(IsLeftRewarded, RightPortIn, LeftPortIn)
        PunishOut = iff(IsLeftRewarded, RightPortOut, LeftPortOut)
        ValveTime = iff(IsLeftRewarded, LeftValveTime, RightValveTime)
        ValveCode = iff(IsLeftRewarded, LeftValve, RightValve)

        ValveOrWireSolenoid = 'Valve'
        if task_parameters.CutAirStimDelay and \
                task_parameters.CutAirSampling:
            AirFlowStimDelayOff = [(ValveOrWireSolenoid, AirSolenoidOn)]
            # AirFlowStimDelayOn = []
            AirFlowSamplingOff = [(ValveOrWireSolenoid, AirSolenoidOn)]
            # Must set it on again
            AirFlowSamplingOn = []
        elif task_parameters.CutAirStimDelay:
            AirFlowStimDelayOff = [(ValveOrWireSolenoid, AirSolenoidOn)]
            # AirFlowStimDelayOn = [(ValveOrWireSolenoid, AirSolenoidOff)]
            AirFlowSamplingOff = []
            AirFlowSamplingOn = []
        elif task_parameters.CutAirSampling:
            AirFlowStimDelayOff = []
            # AirFlowStimDelayOn = []
            AirFlowSamplingOff = [(ValveOrWireSolenoid, AirSolenoidOn)]
            AirFlowSamplingOn = []
        else:
            AirFlowStimDelayOff = []
            # AirFlowStimDelayOn = []
            AirFlowSamplingOff = []
            AirFlowSamplingOn = []

        if task_parameters.CutAirReward:
            AirFlowRewardOff = [('Valve', AirSolenoidOn)]
        else:
            AirFlowRewardOff = []
        AirFlowRewardOn = []

        # Check if to play beep at end of minimum sampling
        MinSampleBeep = iff(task_parameters.BeepAfterMinSampling,
                            [('SoftCode', 12)], [])
        MinSampleBeepDuration = iff(task_parameters.BeepAfterMinSampling, 0.01,
                                    0)
        # GUI option RewardAfterMinSampling
        # If center - reward is enabled, then a reward is given once MinSample
        # is over and no further sampling is given.
        RewardCenterPort = iff(task_parameters.RewardAfterMinSampling,
                               [('Valve', CenterValve)] + StopStimulus,
                               ContDeliverStimulus)
        Timer_CPRD = iff(
            task_parameters.RewardAfterMinSampling, CenterValveTime,
            task_parameters.StimulusTime - task_parameters.MinSample)

        # White Noise played as Error Feedback
        ErrorFeedback = iff(task_parameters.PlayNoiseforError,
                            [('SoftCode', 11)], [])

        # CatchTrial
        FeedbackDelayCorrect = iff(data.Custom.CatchTrial[i_trial],
                                   Const.FEEDBACK_CATCH_CORRECT_SEC,
                                   task_parameters.FeedbackDelay)

        # GUI option CatchError
        FeedbackDelayError = iff(task_parameters.CatchError,
                                 Const.FEEDBACK_CATCH_INCORRECT_SEC,
                                 task_parameters.FeedbackDelay)
        SkippedFeedbackSignal = iff(task_parameters.CatchError, [],
                                    ErrorFeedback)

        # Incorrect Choice signal
        if task_parameters.IncorrectChoiceSignalType == \
                IncorrectChoiceSignalType.NoisePulsePal:
            PunishmentDuration = 0.01
            IncorrectChoice_Signal = [('SoftCode', 11)]
        elif task_parameters.IncorrectChoiceSignalType == \
                IncorrectChoiceSignalType.BeepOnWire_1:
            PunishmentDuration = 0.25
            IncorrectChoice_Signal = [('Wire1', 1)]
        elif task_parameters.IncorrectChoiceSignalType == \
                IncorrectChoiceSignalType.PortLED:
            PunishmentDuration = 0.1
            IncorrectChoice_Signal = [(pwm_str(LeftPort), LeftPWM),
                                      (pwm_str(CenterPort), CenterPWM),
                                      (pwm_str(RightPort), RightPWM)]
        elif task_parameters.IncorrectChoiceSignalType == \
                IncorrectChoiceSignalType.none:
            PunishmentDuration = 0.01
            IncorrectChoice_Signal = []
        else:
            error('Unexpected IncorrectChoiceSignalType value')

        # ITI signal
        if task_parameters.ITISignalType == ITISignalType.Beep:
            ITI_Signal_Duration = 0.01
            ITI_Signal = [('SoftCode', 12)]
        elif task_parameters.ITISignalType == ITISignalType.PortLED:
            ITI_Signal_Duration = 0.1
            ITI_Signal = [(pwm_str(LeftPort), LeftPWM),
                          (pwm_str(CenterPort), CenterPWM),
                          (pwm_str(RightPort), RightPWM)]
        elif task_parameters.ITISignalType == ITISignalType.none:
            ITI_Signal_Duration = 0.01
            ITI_Signal = []
        else:
            error('Unexpected ITISignalType value')

        # Wire1 settings
        Wire1OutError = iff(task_parameters.Wire1VideoTrigger, [('Wire2', 2)],
                            [])
        Wire1OutCorrectCondition = task_parameters.Wire1VideoTrigger and \
            data.Custom.CatchTrial[i_trial]
        Wire1OutCorrect = iff(Wire1OutCorrectCondition, [('Wire2', 2)], [])

        # LED on the side lateral port to cue the rewarded side at the
        # beginning of the training. On auditory discrimination task, both
        # lateral ports are illuminated after end of stimulus delivery.
        if data.Custom.ForcedLEDTrial[i_trial]:
            ExtendedStimulus = [(pwm_str(RewardedPort), RewardedPortPWM)]
        elif task_parameters.ExperimentType == ExperimentType.Auditory:
            ExtendedStimulus = [(pwm_str(LeftPort), LeftPWM),
                                (pwm_str(RightPort), RightPWM)]
        else:
            ExtendedStimulus = []

        # Softcode handler for i_trial == 1 in HomeCage
        # to close training chamber door
        CloseChamber = iff(i_trial == 1 and data.Custom.IsHomeCage,
                           [('SoftCode', 30)], [])

        PCTimeout = task_parameters.PCTimeout
        # Build state matrix
        self.set_global_timer(1, FeedbackDelayCorrect)
        self.set_global_timer(2, FeedbackDelayError)
        self.set_global_timer(
            3,
            iff(task_parameters.TimeOutEarlyWithdrawal,
                task_parameters.TimeOutEarlyWithdrawal, 0.01))
        self.set_global_timer(4, task_parameters.ChoiceDeadLine)
        self.add_state(state_name=str(MatrixState.ITI_Signal),
                       state_timer=ITI_Signal_Duration,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.WaitForCenterPoke)
                       },
                       output_actions=ITI_Signal)
        self.add_state(state_name=str(MatrixState.WaitForCenterPoke),
                       state_timer=0,
                       state_change_conditions={
                           CenterPortIn: str(MatrixState.PreStimReward)
                       },
                       output_actions=[(pwm_str(CenterPort), CenterPWM)])
        PreStimRewardStateTimer = iff(
            task_parameters.PreStimuDelayCntrReward,
            GetValveTimes(task_parameters.PreStimuDelayCntrReward, CenterPort),
            0.01)
        self.add_state(state_name=str(MatrixState.PreStimReward),
                       state_timer=PreStimRewardStateTimer,
                       state_change_conditions={
                           Bpod.Events.Tup:
                           str(MatrixState.TriggerWaitForStimulus)
                       },
                       output_actions=iff(
                           task_parameters.PreStimuDelayCntrReward,
                           [('Valve', CenterValve)], []))
        # The next method is useful to close the 2 - photon shutter. It is
        # enabled by setting Optogenetics StartState to this state and end
        # state to ITI.
        self.add_state(state_name=str(MatrixState.TriggerWaitForStimulus),
                       state_timer=WireTTLDuration,
                       state_change_conditions={
                           CenterPortOut: str(MatrixState.StimDelayGrace),
                           Bpod.Events.Tup: str(MatrixState.WaitForStimulus)
                       },
                       output_actions=(CloseChamber + AirFlowStimDelayOff))
        self.add_state(state_name=str(MatrixState.WaitForStimulus),
                       state_timer=max(
                           0, task_parameters.StimDelay - WireTTLDuration),
                       state_change_conditions={
                           CenterPortOut: str(MatrixState.StimDelayGrace),
                           Bpod.Events.Tup: str(MatrixState.stimulus_delivery)
                       },
                       output_actions=AirFlowStimDelayOff)
        self.add_state(state_name=str(MatrixState.StimDelayGrace),
                       state_timer=task_parameters.StimDelayGrace,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.broke_fixation),
                           CenterPortIn:
                           str(MatrixState.TriggerWaitForStimulus)
                       },
                       output_actions=AirFlowStimDelayOff)
        self.add_state(
            state_name=str(MatrixState.broke_fixation),
            state_timer=iff(not PCTimeout,
                            task_parameters.TimeOutBrokeFixation, 0.01),
            state_change_conditions={Bpod.Events.Tup: str(MatrixState.ITI)},
            output_actions=ErrorFeedback)
        self.add_state(state_name=str(MatrixState.stimulus_delivery),
                       state_timer=task_parameters.MinSample,
                       state_change_conditions={
                           CenterPortOut: str(MatrixState.early_withdrawal),
                           Bpod.Events.Tup: str(MatrixState.BeepMinSampling)
                       },
                       output_actions=(DeliverStimulus + AirFlowSamplingOff))
        self.add_state(state_name=str(MatrixState.early_withdrawal),
                       state_timer=0,
                       state_change_conditions={
                           Bpod.Events.Tup:
                           str(MatrixState.timeOut_EarlyWithdrawal)
                       },
                       output_actions=(EWDStopStimulus + AirFlowSamplingOn +
                                       [('GlobalTimerTrig', EncTrig(3))]))
        self.add_state(state_name=str(MatrixState.BeepMinSampling),
                       state_timer=MinSampleBeepDuration,
                       state_change_conditions={
                           CenterPortOut:
                           str(MatrixState.TriggerWaitChoiceTimer),
                           Bpod.Events.Tup:
                           str(MatrixState.CenterPortRewardDelivery)
                       },
                       output_actions=(ContDeliverStimulus + MinSampleBeep))
        self.add_state(state_name=str(MatrixState.CenterPortRewardDelivery),
                       state_timer=Timer_CPRD,
                       state_change_conditions={
                           CenterPortOut:
                           str(MatrixState.TriggerWaitChoiceTimer),
                           Bpod.Events.Tup: str(MatrixState.WaitCenterPortOut)
                       },
                       output_actions=RewardCenterPort)
        # TODO: Stop stimulus is fired twice in case of center reward and then
        # wait for choice. Fix it such that it'll be always fired once.
        self.add_state(state_name=str(MatrixState.TriggerWaitChoiceTimer),
                       state_timer=0,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.WaitForChoice)
                       },
                       output_actions=(StopStimulus + ExtendedStimulus +
                                       [('GlobalTimerTrig', EncTrig(4))]))
        self.add_state(state_name=str(MatrixState.WaitCenterPortOut),
                       state_timer=0,
                       state_change_conditions={
                           CenterPortOut:
                           str(MatrixState.WaitForChoice),
                           LeftPortIn:
                           LeftActionState,
                           RightPortIn:
                           RightActionState,
                           'GlobalTimer4_End':
                           str(MatrixState.timeOut_missed_choice)
                       },
                       output_actions=(StopStimulus + ExtendedStimulus +
                                       [('GlobalTimerTrig', EncTrig(4))]))
        self.add_state(state_name=str(MatrixState.WaitForChoice),
                       state_timer=0,
                       state_change_conditions={
                           LeftPortIn:
                           LeftActionState,
                           RightPortIn:
                           RightActionState,
                           'GlobalTimer4_End':
                           str(MatrixState.timeOut_missed_choice)
                       },
                       output_actions=(StopStimulus + ExtendedStimulus))
        self.add_state(state_name=str(MatrixState.WaitForRewardStart),
                       state_timer=0,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.WaitForReward)
                       },
                       output_actions=(Wire1OutCorrect + ChoiceStopStimulus +
                                       [('GlobalTimerTrig', EncTrig(1))]))
        self.add_state(state_name=str(MatrixState.WaitForReward),
                       state_timer=FeedbackDelayCorrect,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.Reward),
                           'GlobalTimer1_End': str(MatrixState.Reward),
                           RewardOut: str(MatrixState.RewardGrace)
                       },
                       output_actions=AirFlowRewardOff)
        self.add_state(state_name=str(MatrixState.RewardGrace),
                       state_timer=task_parameters.FeedbackDelayGrace,
                       state_change_conditions={
                           RewardIn:
                           str(MatrixState.WaitForReward),
                           Bpod.Events.Tup:
                           str(MatrixState.timeOut_SkippedFeedback),
                           'GlobalTimer1_End':
                           str(MatrixState.timeOut_SkippedFeedback),
                           CenterPortIn:
                           str(MatrixState.timeOut_SkippedFeedback),
                           PunishIn:
                           str(MatrixState.timeOut_SkippedFeedback)
                       },
                       output_actions=AirFlowRewardOn)
        self.add_state(state_name=str(MatrixState.Reward),
                       state_timer=ValveTime,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.WaitRewardOut)
                       },
                       output_actions=[('Valve', ValveCode)])
        self.add_state(state_name=str(MatrixState.WaitRewardOut),
                       state_timer=1,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.ITI),
                           RewardOut: str(MatrixState.ITI)
                       },
                       output_actions=[])
        self.add_state(state_name=str(MatrixState.RegisterWrongWaitCorrect),
                       state_timer=0,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.WaitForChoice)
                       },
                       output_actions=[])
        self.add_state(state_name=str(MatrixState.WaitForPunishStart),
                       state_timer=0,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.WaitForPunish)
                       },
                       output_actions=(Wire1OutError + ChoiceStopStimulus +
                                       [('GlobalTimerTrig', EncTrig(2))]))
        self.add_state(state_name=str(MatrixState.WaitForPunish),
                       state_timer=FeedbackDelayError,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.Punishment),
                           'GlobalTimer2_End': str(MatrixState.Punishment),
                           PunishOut: str(MatrixState.PunishGrace)
                       },
                       output_actions=AirFlowRewardOff)
        self.add_state(state_name=str(MatrixState.PunishGrace),
                       state_timer=task_parameters.FeedbackDelayGrace,
                       state_change_conditions={
                           PunishIn:
                           str(MatrixState.WaitForPunish),
                           Bpod.Events.Tup:
                           str(MatrixState.timeOut_SkippedFeedback),
                           'GlobalTimer2_End':
                           str(MatrixState.timeOut_SkippedFeedback),
                           CenterPortIn:
                           str(MatrixState.timeOut_SkippedFeedback),
                           RewardIn:
                           str(MatrixState.timeOut_SkippedFeedback)
                       },
                       output_actions=[])
        self.add_state(state_name=str(MatrixState.Punishment),
                       state_timer=PunishmentDuration,
                       state_change_conditions={
                           Bpod.Events.Tup:
                           str(MatrixState.timeOut_IncorrectChoice)
                       },
                       output_actions=(IncorrectChoice_Signal +
                                       AirFlowRewardOn))
        self.add_state(state_name=str(MatrixState.timeOut_EarlyWithdrawal),
                       state_timer=LEDErrorRate,
                       state_change_conditions={
                           'GlobalTimer3_End':
                           str(MatrixState.ITI),
                           Bpod.Events.Tup:
                           str(MatrixState.timeOut_EarlyWithdrawalFlashOn)
                       },
                       output_actions=ErrorFeedback)
        self.add_state(
            state_name=str(MatrixState.timeOut_EarlyWithdrawalFlashOn),
            state_timer=LEDErrorRate,
            state_change_conditions={
                'GlobalTimer3_End': str(MatrixState.ITI),
                Bpod.Events.Tup: str(MatrixState.timeOut_EarlyWithdrawal)
            },
            output_actions=(ErrorFeedback + [(pwm_str(LeftPort), LeftPWM),
                                             (pwm_str(RightPort), RightPWM)]))
        self.add_state(
            state_name=str(MatrixState.timeOut_IncorrectChoice),
            state_timer=iff(not PCTimeout,
                            task_parameters.TimeOutIncorrectChoice, 0.01),
            state_change_conditions={Bpod.Events.Tup: str(MatrixState.ITI)},
            output_actions=[])
        self.add_state(
            state_name=str(MatrixState.timeOut_SkippedFeedback),
            state_timer=(iff(not PCTimeout,
                             task_parameters.TimeOutSkippedFeedback, 0.01)),
            state_change_conditions={Bpod.Events.Tup: str(MatrixState.ITI)},
            # TODO: See how to get around this if PCTimeout
            output_actions=SkippedFeedbackSignal)
        self.add_state(
            state_name=str(MatrixState.timeOut_missed_choice),
            state_timer=iff(not PCTimeout, task_parameters.TimeOutMissedChoice,
                            0.01),
            state_change_conditions={Bpod.Events.Tup: str(MatrixState.ITI)},
            output_actions=(ErrorFeedback + ChoiceStopStimulus))
        self.add_state(state_name=str(MatrixState.ITI),
                       state_timer=WireTTLDuration,
                       state_change_conditions={
                           Bpod.Events.Tup: str(MatrixState.ext_ITI)
                       },
                       output_actions=AirFlowRewardOn)
        self.add_state(state_name=str(MatrixState.ext_ITI),
                       state_timer=iff(not PCTimeout, task_parameters.ITI,
                                       0.01),
                       state_change_conditions={Bpod.Events.Tup: 'exit'},
                       output_actions=AirFlowRewardOn)

        # If Optogenetics/2-Photon is enabled for a particular state, then we
        # modify that gien state such that it would send a signal to arduino
        # with the required offset delay to trigger the optogentics box.
        # Note: To precisely track your optogentics signal, split the arduino
        # output to the optogentics box and feed it as an input to Bpod input
        # TTL, e.g Wire1. This way, the optogentics signal gets written as
        # part of your data file. Don't forget to activate that input in the
        # Bpod main config.

        if data.Custom.OptoEnabled[i_trial]:
            # Convert seconds to millis as we will send ints to Arduino
            OptoDelay = np.array([task_parameters.OptoStartDelay * 1000],
                                 dtype=np.uint32)
            OptoDelay = OptoDelay.view(np.uint8)
            OptoTime = np.array([task_parameters.OptoMaxTime * 1000],
                                dtype=np.uint32)
            OptoTime = OptoTime.view(np.uint8)
            if not EMULATOR_MODE or hasattr(PluginSerialPorts, 'OptoSerial'):
                fwrite(PluginSerialPorts.OptoSerial, OptoDelay, 'int8')
                fwrite(PluginSerialPorts.OptoSerial, OptoTime, 'int8')
            OptoStartEventIdx = \
                self.hardware.channels.output_channel_names.index('Wire3')
            OptoStopEventIdx = \
                self.hardware.channels.output_channel_names.index('Wire4')
            tuples = [(str(task_parameters.OptoStartState1),
                       OptoStartEventIdx),
                      (str(task_parameters.OptoEndState1), OptoStopEventIdx),
                      (str(task_parameters.OptoEndState2), OptoStopEventIdx),
                      (str(MatrixState.ext_ITI), OptoStopEventIdx)]
            for state_name, event_idx in tuples:
                TrgtStateNum = self.state_names.index(state_name)
                self.output_matrix[TrgtStateNum][event_idx] = 1
예제 #15
0
    def render(self):
        if (utils.sum_points_inside_flat_poly(*self.parent.canvas) <= 4):
            return
        color_profile = random.choice(self.colors)

        min_x = utils.floor(min([p.x for p in self.parent.canvas]))
        max_x = utils.ceil(max([p.x for p in self.parent.canvas]))
        min_z = utils.floor(min([p.z for p in self.parent.canvas]))
        max_z = utils.ceil(max([p.z for p in self.parent.canvas]))
        min_y = utils.floor(min([p.y for p in self.parent.canvas]))

        # Cut the canvas into quarters and fill one quarter with colors.
        # Then, copy that quarter into the other three quarters.
        width = utils.floor(((max_x - min_x + 1) + 1) / 2)
        depth = utils.floor(((max_z - min_z + 1) + 1) / 2)

        points = [[-1 for j in xrange(depth)] for i in xrange(width)]
        points_left = []
        for i in xrange(width):
            for j in xrange(depth):
                points_left.append((i, j))
        bounds = utils.Box(Vec(0, 0, 0), width, 1, depth)
        p = Vec(0, 0, 0)
        color_num = 0
        prev_dir = random.randint(0, 3)
        next_dir = random.randint(0, 3)
        while len(points_left) > 0:
            # pick random starting point and walk around the matrix
            point_index = random.randint(0, len(points_left) - 1)
            p = Vec(points_left[point_index][0], 0,
                    points_left[point_index][1])

            while (bounds.containsPoint(p) and points[p.x][p.z] == -1
                   and len(points_left) > 0):
                points[p.x][p.z] = color_num
                points_left.remove((p.x, p.z))

                # pick random direction to walk, try to keep walking same
                # direction
                if random.randint(0, self._walk_weight) != 0:
                    next_dir = prev_dir
                else:
                    while next_dir == prev_dir:
                        next_dir = random.randint(0, 3)
                if next_dir == 0:  # right
                    p += Vec(1, 0, 0)
                elif next_dir == 1:  # down
                    p += Vec(0, 0, 1)
                elif next_dir == 2:  # left
                    p += Vec(-1, 0, 0)
                else:  # up
                    p += Vec(0, 0, -1)
                prev_dir = next_dir
            color_num = (color_num + 1) % len(color_profile)

        for j in xrange(max_z - min_z + 1):
            for i in xrange(max_x - min_x + 1):
                p = self.parent.loc + Vec(min_x + i, min_y, min_z + j)
                self.parent.parent.setblock(p, self.mat)
                if i < width:
                    i_adj = i
                else:
                    i_adj = 2 * width - 1 - i
                if j < depth:
                    j_adj = j
                else:
                    j_adj = 2 * depth - 1 - j
                self.parent.parent.blocks[p].data = \
                    color_profile[points[i_adj][j_adj]]
        # Ruined
        if (self.ruin):
            self.ruinrender()
예제 #16
0
    def get_inspect_text(self):
        message = self.get_tag(True) + ": "
        message += "You are a level " + str(self.level) + " " + self.race + "."
        message += "\nYou have " + str(self.gold) + " gold."
        message += "\n"

        str_adjectives = [
            "You are emaciated and thin. ", "", "You are remarkably strong. ",
            "You are capable of great feats of strength. ",
            "Your strength is legendary. "
        ]

        dex_adjectives = [
            "You are clumsy and uncoordinated. ", "",
            "You are graceful and nimble. ", "You are exceptionally agile. ",
            "You are lightning quick. "
        ]

        int_adjectives = [
            "You are dim-witted. ", "", "You are sharp and aware. ",
            "You are remarkably smart. ",
            "You are purveyor of lost knowledge. ",
            "Your knowledge of the arcane is known throughout the land. "
        ]

        cha_adjectives = [
            "You are frequently at a loss for words. ", "",
            "You are keen and likable. ",
            "You are fabulously witty and charming. ",
            "You have legions of admirers. "
        ]

        str_index = int(utils.clamp(utils.floor(self.strength / 2), 0, 4))
        dex_index = int(utils.clamp(utils.floor(self.dexterity / 2), 0, 4))
        int_index = int(utils.clamp(utils.floor(self.intellect / 2), 0, 4))
        cha_index = int(utils.clamp(utils.floor(self.charisma / 2), 0, 4))

        message += str_adjectives[str_index]
        message += dex_adjectives[dex_index]
        message += int_adjectives[int_index]
        message += cha_adjectives[cha_index]

        # Affinities

        message += "\n"

        if self.strength_affinity >= 3:
            if self.dexterity_affinity >= 3:
                if self.alignment == 'chaotic':
                    message += "You are outgoing. "  # Str-Dex-Chaotic
                if self.alignment == 'lawful':
                    message += "You are enthusiastic. "  # Str-Dex-Lawful
            if self.intellect_affinity >= 3:
                if self.alignment == 'chaotic':
                    message += "You are determined. "  # Str-Int-Chaotic
                if self.alignment == 'lawful':
                    message += "You are disciplined. "  # Str-Int-Lawful
            if self.charisma_affinity >= 3:
                if self.alignment == 'chaotic':
                    message += "You are energetic. "  # Str-Cha-Chaotic
                if self.alignment == 'lawful':
                    message += "You think before you speak. "  # Str-Cha-Lawful
        elif self.dexterity_affinity >= 3:
            if self.intellect_affinity >= 3:
                if self.alignment == 'chaotic':
                    message += "You are wily. "  # Dex-Int-Chaotic
                if self.alignment == 'lawful':
                    message += "You are a problem-solver. "  # Dex-Int-Lawful
            if self.charisma_affinity >= 3:
                if self.alignment == 'chaotic':
                    message += "You are a show-off. "  # Dex-Cha-Chaotic
                if self.alignment == 'lawful':
                    message += "You are confident. "  # Dex-Cha-Lawful
        elif self.intellect_affinity >= 3:
            if self.charisma_affinity >= 3:
                if self.alignment == 'chaotic':
                    message += "You like puns. "  # Int-Cha-Chaotic
                if self.alignment == 'lawful':
                    message += "You appreciate a good joke. "  # Int-Cha-Lawful

        message += "You were born under the sign of " + self.horoscope + ". "

        if self.intellect - self.charisma >= 3:
            message += "(You're pretty sure that doesn't mean anything.) "
        elif self.intellect - self.charisma <= -3:
            message += "(You're pretty sure that's your most important trait.) "

        message += "\n"
        # items

        if self.get_slot_filled('armor'):
            name = self.get_item_name('armor')
            message += "You are wearing " + name + ". "

        if self.get_slot_filled('weapon'):
            name = self.get_item_name('weapon')
            message += "You wield a " + name + ". "

        if self.get_slot_filled('trinket'):
            name = self.get_item_name('trinket')
            message += "You carry a " + name + ". "

        if self.get_slot_filled('pet'):
            name = self.get_item_name('pet')
            given_name = self.get_item_instance('pet').given_name
            message += "You own a pet " + name + ", named *" + given_name + "*!"

        return message