def charmsNecklace(n, max):
    if n == 0:
        return Group()
    else:
        necklace = charmsNecklace(n - 1, max)
        randomValue = rand(0, 3)
        piece = star5 if randomValue < 1 else (
            moon if randomValue < 2 else teapot)
        locatePiece = zRot(rand(0, 360)) * translate(200, 0, 0) * yRot(
            360 * (n / max))
        necklace.add(piece, plasterMat(rand(0, 360)), locatePiece)
        return necklace
Example #2
0
 def _set_custom_data(self):
     for a in range(Const.NUM_EASY_TRIALS):
         gui_ssc = self._task_parameters.StimulusSelectionCriteria
         if gui_ssc == StimulusSelectionCriteria.BetaDistribution:
             # This random value is between 0 and 1, the beta distribution
             # parameters makes it very likely to very close to zero or very
             # close to 1.
             beta_dist_param = self._task_parameters.BetaDistAlphaNBeta / 4
             self._data.Custom.StimulusOmega[a] = [
                 betarnd(beta_dist_param, beta_dist_param)
             ]
         elif gui_ssc == StimulusSelectionCriteria.DiscretePairs:
             omega_prob = self._task_parameters.OmegaTable.columns.OmegaProb
             index = next(
                 omega_prob.index(prob) for prob in omega_prob if prob > 0)
             intensity = self._task_parameters.OmegaTable.columns.Omega[
                 index] / 100
         else:
             error('Unexpected StimulusSelectionCriteria')
         # Randomly choose right or left
         is_left_rewarded = bool(rand(1, 1) >= 0.5)
         # In case of beta distribution, our distribution is symmetric,
         # so prob < 0.5 is == prob > 0.5, so we can just pick the value
         # that corrects the bias
         if not is_left_rewarded and intensity >= 0.5:
             intensity = 1 - intensity
         # BUG: Figure out whether this or the previous assignment
         # is the correct one
         self._data.Custom.StimulusOmega[a] = intensity
         task_experiment_type = self._task_parameters.ExperimentType
         if task_experiment_type == ExperimentType.Auditory:
             dv = CalcAudClickTrain(self._data, a)
         elif task_experiment_type == ExperimentType.LightIntensity:
             dv = CalcLightIntensity(self._data, a)
         elif task_experiment_type == ExperimentType.GratingOrientation:
             dv = CalcGratingOrientation(self._data, a)
         elif task_experiment_type == ExperimentType.RandomDots:
             dv = CalcDotsCoherence(self._data, a)
         else:
             error('Unexpected ExperimentType')
         if dv > 0:
             self._data.Custom.LeftRewarded[a] = True
         elif dv < 0:
             self._data.Custom.LeftRewarded[a] = False
         else:
             self._data.Custom.LeftRewarded[a] = bool(
                 rand() < 0.5)  # It's equal distribution
         # cross - modality difficulty for plotting
         self._data.Custom.DV[a] = dv
Example #3
0
def orth_matrix(n=10):
    Y = utils.rand(n, 1)
    X = utils.zeros(n, n)
    if n > 2:
        for j in xrange(n - 1):
            x = utils.rand(n, 1)
            while abs(abs(utils.corr(x, Y)) - j / (n - 1.0)) > 0.005:
                x = utils.rand(n, 1)
            if utils.corr(x, Y) < 0:
                x *= -1
            X[:, j] = x.ravel()

    X[:, n - 1] = Y.ravel()

    return X, Y
Example #4
0
def orth_matrix(n=10):
    Y = utils.rand(n, 1)
    X = utils.zeros(n, n)
    if n > 2:
        for j in xrange(n - 1):
            x = utils.rand(n, 1)
            while abs(abs(utils.corr(x, Y)) - j / (n - 1.0)) > 0.005:
                x = utils.rand(n, 1)
            if utils.corr(x, Y) < 0:
                x *= -1
            X[:, j] = x.ravel()

    X[:, n - 1] = Y.ravel()

    return X, Y
Example #5
0
def randomCandyMat():
    mat = Material()
    colour = hsv2rgb((
        rand(0, 360),
        1,
        1,
    ))
    mat.color(colour).transparency(0.3).refraction(1.6)
    return mat
Example #6
0
def pop(n):
    if n == 1:
        return popdrop
    else:
        bud = pop(n - 1)
        gamma = 39
        gammaPrime = 40
        plant = Group()
        plant.add(popdrop, yRot(rand(0, 360)), randomCandyMat())
        plant.add(
            bud, randomCandyMat(),
            yRot(rand(0, 360)) * scale(golden) * zRot(-gamma) *
            translate(0, 61.8, 0))
        plant.add(
            bud, randomCandyMat(),
            yRot(rand(0, 360)) * scale(golden) * zRot(gammaPrime) *
            translate(0, 39, 0))
        return plant
Example #7
0
 def index_post(self, o_link):
     s_link = utils.rand()
     record = models.Url(org_link=o_link, short_link=s_link)
     try:
         session.add(record)
         session.commit()
     except Exception:
         s_link = utils.get_short_link_by_org_link(session, o_link)
     return dict(short_link=utils.get_short_url(s_link))
Example #8
0
File: user.py Project: imcj/ioa
    async def register(self, user):
        salt = utils.rand()

        user['password'] = utils.hash(user['password'], salt)
        user['salt'] = salt

        result = await self.connection.execute(UserTable.insert().values(**user))
        user['id'] = result.lastrowid

        return user
def orth_matrix(n=10):
    Y = utils.rand(n, 1)
    X = utils.zeros(n, n)
    if n > 2:
        for j in xrange(n - 1):
            x = utils.rand(n, 1)
            while abs(abs(utils.corr(x, Y)) - j / (n - 1.0)) > 0.005:
                x = utils.rand(n, 1)
            if utils.corr(x, Y) < 0:
                x *= -1
            X[:, j] = x.ravel()

    X[:, n - 1] = Y.ravel()

    return X, Y


#def check_ortho(M, err_msg):
#    K = np.dot(M.T, M)
#    assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
Example #10
0
def accept():
    data_request = request.form['org_link']
    rand_link = utils.rand()
    record = Url(org_link=data_request, short_link=rand_link)
    try:
        session.add(record)
        session.commit()
    except Exception as e:
        raise e
    url = url_for('home', _external=True)
    final_url = url + rand_link
    return render_template('output.html', url=final_url)
Example #11
0
import random

import data
from utils import rand

CITY = lambda: random.choice([
    " ".join([
        rand(data.CITY_PREFIX), "".join(
            [rand(data.FIRST_NAMES),
             rand(data.CITY_SUFFIX)])
    ]),
    " ".join([rand(data.CITY_PREFIX),
              rand(data.FIRST_NAMES)]),
    "".join([rand(data.FIRST_NAMES),
             rand(data.CITY_SUFFIX)]),
    "".join([rand(data.LAST_NAMES),
             rand(data.CITY_SUFFIX)]),
])

STREET_NAME = lambda: random.choice([
    " ".join([rand(data.LAST_NAMES),
              rand(data.STREET_SUFFIX)]), " ".join(
                  [rand(data.FIRST_NAMES),
                   rand(data.STREET_SUFFIX)])
])

COMPANY_NAME = lambda: random.choice([
    "".join([rand(data.COMPANY_NAME_PREFIX),
             rand(data.COMPANY_NAME_SUFFIX)]), "".join([
                 rand(data.COMPANY_NAME_PREFIX).capitalize(),
                 rand(data.COMPANY_NAME_SUFFIX)
Example #12
0
def get_random_data(annotation_line,
                    input_shape,
                    imageDir,
                    random=True,
                    max_boxes=20,
                    jitter=.3,
                    hue=.1,
                    sat=1.5,
                    val=1.5,
                    proc_img=True):
    """random pre processing for real-time data augmentation"""
    line = annotation_line.split(':')
    imageName = line[0]
    boxes = line[1].replace('[', '')
    boxes = boxes.replace(']\n', '')
    boxes = boxes.split('],')

    imagePath = os.path.join(imageDir, imageName)
    image = Image.open(imagePath)
    iw, ih = image.size
    h, w = input_shape
    box = np.array([np.array(list(map(int, box.split(',')))) for box in boxes])
    # fix to absolute coordinates
    box[..., 2:4] = box[..., 0:2] + box[..., 2:4]
    if not random:
        # resize image
        scale = min(w / iw, h / ih)
        nw = int(iw * scale)
        nh = int(ih * scale)
        dx = (w - nw) // 2
        dy = (h - nh) // 2
        image_data = 0
        if proc_img:
            image = image.resize((nw, nh), Image.BICUBIC)
            new_image = Image.new('RGB', (w, h), (128, 128, 128))
            new_image.paste(image, (dx, dy))
            image_data = np.array(new_image) / 255.

        # correct boxes
        box[..., 4] -= 1  # change the classes numerator to start from 0
        box_data = np.zeros((max_boxes, 5))
        if len(box) > 0:
            np.random.shuffle(box)
            if len(box) > max_boxes:
                box = box[:max_boxes]
            box[:, [0, 2]] = box[:, [0, 2]] * scale + dx
            box[:, [1, 3]] = box[:, [1, 3]] * scale + dy
            box_data[:len(box)] = box

        return image_data, box_data

    # resize image
    new_ar = w / h * rand(1 - jitter, 1 + jitter) / rand(
        1 - jitter, 1 + jitter)
    scale = rand(.25, 2)
    if new_ar < 1:
        nh = int(scale * h)
        nw = int(nh * new_ar)
    else:
        nw = int(scale * w)
        nh = int(nw / new_ar)
    image = image.resize((nw, nh), Image.BICUBIC)

    # place image
    dx = int(rand(0, w - nw))
    dy = int(rand(0, h - nh))
    new_image = Image.new('RGB', (w, h), (128, 128, 128))
    new_image.paste(image, (dx, dy))
    image = new_image

    # flip image or not
    flip = rand() < .5
    if flip:
        image = image.transpose(Image.FLIP_LEFT_RIGHT)

    # distort image
    hue = rand(-hue, hue)
    sat = rand(1, sat) if rand() < .5 else 1 / rand(1, sat)
    val = rand(1, val) if rand() < .5 else 1 / rand(1, val)
    x = rgb_to_hsv(np.array(image) / 255.)
    x[..., 0] += hue
    x[..., 0][x[..., 0] > 1] -= 1
    x[..., 0][x[..., 0] < 0] += 1
    x[..., 1] *= sat
    x[..., 2] *= val
    x[x > 1] = 1
    x[x < 0] = 0
    image_data = hsv_to_rgb(x)  # numpy array, 0 to 1

    # correct boxes
    box[..., 4] -= 1  # change the classes numerator to start from 0
    box_data = np.zeros((max_boxes, 5))
    if len(box) > 0:
        np.random.shuffle(box)
        box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
        box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
        if flip:
            box[:, [0, 2]] = w - box[:, [2, 0]]
        box[:, 0:2][box[:, 0:2] < 0] = 0
        box[:, 2][box[:, 2] > w] = w
        box[:, 3][box[:, 3] > h] = h
        box_w = box[:, 2] - box[:, 0]
        box_h = box[:, 3] - box[:, 1]
        box = box[np.logical_and(box_w > 1, box_h > 1)]  # discard invalid box
        if len(box) > max_boxes:
            box = box[:max_boxes]
        box_data[:len(box)] = box

    return image_data, box_data
Example #13
0
def main():
    # Image
    aspect_ratio = 16.0 / 9.0
    image_width = 256
    image_height = image_width/aspect_ratio
    background = Vector3(0, 0, 0)
    samples_per_pixel = 20
    max_depth = 1

    # World
    world = HittableList()

    # material_ground = Lambertian(Vector3(0.8, 0.8, 0.0))
    # material_center = Lambertian(Vector3(0.7, 0.3, 0.3))
    # material_left = Metal(Vector3(0.8, 0.8, 0.8))
    material_emissive = DiffuseLight(Vector3(1,1,1), Vector3(1,1,1))
    # material_right = Metal(Vector3(0.8, 0.6, 0.2))

    # world.append(Sphere(Vector3(0, 0, -1), 0.5, material_center))
    # world.append(Sphere(Vector3(1, 0.5, -0.5), 1, material_emissive))
    # world.append(Sphere(Vector3(-1, 0, -1), 0.5, material_right))
    # world.append(Sphere(Vector3(0, -100.5, -1), 100, material_ground))
    sphere_count = 35
    sphere_dist = 100
    for i in range(0, sphere_count):
        world.append(Sphere(Vector3(utils.rand_range(-50, 50), utils.rand_range(-40, 40),
                                    sphere_dist),
                            1,
                            material_emissive))

    # Camera
    look_from = Vector3(0, 0, 0)
    look_at = Vector3(0, 0, 1)
    v_up = Vector3(0, 1, 0)
    dist_to_focus = (look_from - look_at).length()
    f_stop = 8
    aperture = 1/f_stop
    cam = Camera(look_from, look_at, v_up, 30.0, aspect_ratio, aperture, dist_to_focus)

    # Render
    render_data = list()
    print("Commencing Rendering.")
    start_time = datetime.now()
    for j in reversed(range(0, int(image_height))):
        print("Scanlines remaining: %s" % j)
        for i in range(0, image_width):
            pixel_colour = Vector3(0, 0, 0)
            for s in range(0, samples_per_pixel):
                u = (i + utils.rand()) / (image_width-1)
                v = (j + utils.rand()) / (image_height-1)
                r = cam.get_ray(u, v, s)
                pixel_colour += ray_colour(r, background, world, max_depth)
            render_data.append(colour.write_colour(pixel_colour, samples_per_pixel))
    print("\nDone.\nTime Spent: %s" % (datetime.now() - start_time))

    file = image.write_image(
        width=image_width,
        height=image_height,
        data=render_data
    )
    return file
Example #14
0
 def random(cls, ):
     return Vector3(utils.rand(), utils.rand(), utils.rand())
Example #15
0
import random

import data
from utils import rand


CITY = lambda: random.choice([
    " ".join([rand(data.CITY_PREFIX), "".join([rand(data.FIRST_NAMES), rand(data.CITY_SUFFIX)])]),
    " ".join([rand(data.CITY_PREFIX), rand(data.FIRST_NAMES)]),
    "".join([rand(data.FIRST_NAMES), rand(data.CITY_SUFFIX)]),
    "".join([rand(data.LAST_NAMES), rand(data.CITY_SUFFIX)]),
])

STREET_NAME = lambda: random.choice([
    " ".join([rand(data.LAST_NAMES), rand(data.STREET_SUFFIX)]),
    " ".join([rand(data.FIRST_NAMES), rand(data.STREET_SUFFIX)])
])

COMPANY_NAME = lambda: random.choice([
    "".join([rand(data.COMPANY_NAME_PREFIX), rand(data.COMPANY_NAME_SUFFIX)]),
    "".join([rand(data.COMPANY_NAME_PREFIX).capitalize(), rand(data.COMPANY_NAME_SUFFIX)]),
    "".join([rand(data.COMPANY_NAME_PREFIX).capitalize(), rand(data.COMPANY_NAME_SUFFIX).capitalize()]),
    "%s %s" % ("".join([rand(data.COMPANY_NAME_PREFIX).capitalize(), rand(data.COMPANY_NAME_SUFFIX)]),
               rand(data.COMPANY_NAME_EXTRA).capitalize())
])

GENDER = lambda: random.choice(data.GENDER)

CURRENCY = lambda: random.choice(data.CURRENCY)
"""
Sinh ngẫu nhiên một ma trận 100*100 giá trị các phần tử nằm trong khoảng (0, 100) rồi tìm định thức, ma trận chuyển vị, trị riêng và vector riêng của ma trận. Lưu kết quả vào một file output.txt
"""

import os
import numpy as np
import numpy.linalg as LA
import utils


if __name__ == '__main__':
    np.random.seed(0)

    # Generate random matrix
    arr = utils.rand(0, 100, size=(5, 5))
    matrix = np.matrix(arr)

    det = LA.det(matrix)
    transpose_matrix = np.transpose(matrix)

    eigen_values, eigen_vectors = LA.eig(matrix)

    # =========================

    print("Matrix : ", matrix)
    print("Det : {:.4f}".format(det))
    print("Transpose matrix : ", transpose_matrix)

    for i in range(eigen_values.shape[0]):
        eig_value = eigen_values[i]
        eig_vector = eigen_vectors[:,i]
Example #17
0
    def update(self, i_trial):
        # Standard values

        # Stores which lateral port the animal poked into (if any)
        self.ChoiceLeft[i_trial] = None
        # Stores whether the animal poked into the correct port (if any)
        self.ChoiceCorrect[i_trial] = None
        # Signals whether confidence was used in this trial. Set to false if
        # lateral ports choice timed-out (i.e, MissedChoice(i) is true), it
        # also should be set to false (but not due to a bug) if the animal
        # poked the a lateral port but didn't complete the feedback period
        # (even with using grace).
        self.Feedback[i_trial] = True
        # How long the animal spent waiting for the reward (whether in correct
        # or in incorrect ports)
        self.FeedbackTime[i_trial] = None
        # Signals whether the animal broke fixation during stimulus delay state
        self.FixBroke[i_trial] = False
        # Signals whether the animal broke fixation during sampling but before
        # min-sampling ends
        self.EarlyWithdrawal[i_trial] = False
        # Signals whether the animal correctly finished min-sampling but failed
        # to poke any of the lateral ports within ChoiceDeadLine period
        self.MissedChoice[i_trial] = False
        # How long the animal remained fixated in center poke
        self.FixDur[i_trial] = None
        # How long between sample end and making a choice (timeout-choice
        # trials are excluded)
        self.MT[i_trial] = None
        # How long the animal sampled. If RewardAfterMinSampling is enabled and
        # animal completed min sampling, then it's equal to MinSample time,
        # otherwise it's how long the animal remained fixated in center-port
        # until it either poked-out or the max allowed sampling time was
        # reached.
        self.ST[i_trial] = None
        # Signals whether a reward was given to the animal (it also includes
        # if the animal poked into the correct reward port but poked out
        # afterwards and didn't receive a reward, due to 'RewardGrace' being
        # counted as reward).
        self.Rewarded[i_trial] = False
        # Signals whether a center-port reward was given after min-sampling
        # ends.
        self.RewardAfterMinSampling[i_trial] = False
        # Tracks the amount of water the animal received up tp this point
        # TODO: Check if RewardReceivedTotal is needed and calculate it using
        # CalcRewObtained() function.
        # We will updated later
        self.RewardReceivedTotal[i_trial + 1] = 0

        self.TrialNumber[i_trial] = i_trial

        self.Timer.customInitialize[i_trial] = time.time()

        # Checking states and rewriting standard

        # Extract the states that were used in the last trial
        statesVisitedThisTrialNames = self.RawData.StatesVisitedNames(i_trial)
        statesVisitedThisTrialTimes = self.RawData.StatesVisitedTimes(i_trial)
        if str(MatrixState.WaitForStimulus) in statesVisitedThisTrialNames:
            lastWaitForStimulusStateTimes = statesVisitedThisTrialTimes[str(
                MatrixState.WaitForStimulus)][-1]
            lastTriggerWaitForStimulusStateTimes = statesVisitedThisTrialTimes[
                str(MatrixState.TriggerWaitForStimulus)][-1]
            self.FixDur[i_trial] = lastWaitForStimulusStateTimes[1] - \
                lastWaitForStimulusStateTimes[0] + \
                lastTriggerWaitForStimulusStateTimes[1] - \
                lastTriggerWaitForStimulusStateTimes[0]
        if str(MatrixState.stimulus_delivery) in statesVisitedThisTrialNames:
            stimulus_deliveryStateTimes = statesVisitedThisTrialTimes[str(
                MatrixState.stimulus_delivery)]
            if self.task_parameters.RewardAfterMinSampling:
                self.ST[i_trial] = diff(stimulus_deliveryStateTimes)
            else:
                # 'CenterPortRewardDelivery' state would exist even if no
                # 'RewardAfterMinSampling' is active, in such case it means
                # that min sampling is done and we are in the optional
                # sampling stage.
                if str(MatrixState.CenterPortRewardDelivery) in \
                        statesVisitedThisTrialNames and \
                        self.task_parameters.StimulusTime > \
                        self.task_parameters.MinSample:
                    CenterPortRewardDeliveryStateTimes = \
                        statesVisitedThisTrialTimes[
                            str(MatrixState.CenterPortRewardDelivery)]
                    self.ST[i_trial] = [
                        CenterPortRewardDeliveryStateTimes[0][1] -
                        stimulus_deliveryStateTimes[0][0]
                    ]
                else:
                    # This covers early_withdrawal
                    self.ST[i_trial] = diff(stimulus_deliveryStateTimes)

        if str(MatrixState.WaitForChoice) in statesVisitedThisTrialNames and \
            str(MatrixState.timeOut_missed_choice) not in \
                statesVisitedThisTrialNames:
            WaitForChoiceStateTimes = statesVisitedThisTrialTimes[str(
                MatrixState.WaitForChoice)]
            WaitForChoiceStateStartTimes = [
                start_time for start_time, end_time in WaitForChoiceStateTimes
            ]
            # We might have more than multiple WaitForChoice if
            # HabituateIgnoreIncorrect is enabeld
            self.MT[-1] = diff(WaitForChoiceStateStartTimes[:2])

        # Extract trial outcome. Check first if it's a wrong choice or a
        # HabituateIgnoreIncorrect but first choice was wrong choice
        if str(MatrixState.WaitForPunishStart) in \
            statesVisitedThisTrialNames or \
           str(MatrixState.RegisterWrongWaitCorrect) in \
                statesVisitedThisTrialNames:
            self.ChoiceCorrect[i_trial] = False
            # Correct choice = left
            if self.LeftRewarded[i_trial]:
                self.ChoiceLeft[i_trial] = False  # Left not chosen
            else:
                self.ChoiceLeft[i_trial] = True
            # Feedback waiting time
            if str(MatrixState.WaitForPunish) in statesVisitedThisTrialNames:
                WaitForPunishStateTimes = statesVisitedThisTrialTimes[str(
                    MatrixState.WaitForPunish)]
                WaitForPunishStartStateTimes = statesVisitedThisTrialTimes[str(
                    MatrixState.WaitForPunishStart)]
                self.FeedbackTime[i_trial] = WaitForPunishStateTimes[-1][
                    1] - WaitForPunishStartStateTimes[0][0]
            else:  # It was a  RegisterWrongWaitCorrect state
                self.FeedbackTime[i_trial] = None
        # CorrectChoice
        elif str(MatrixState.WaitForRewardStart) in \
                statesVisitedThisTrialNames:
            self.ChoiceCorrect[i_trial] = True
            if self.CatchTrial[i_trial]:
                catch_stim_idx = GetCatchStimIdx(self.StimulusOmega[i_trial])
                # Lookup the stimulus probability and increase by its
                # 1/frequency.
                stim_val = self.StimulusOmega[i_trial] * 100
                if stim_val < 50:
                    stim_val = 100 - stim_val
                stim_prob = self.task_parameters.OmegaTable.columns.OmegaProb[
                    self.task_parameters.OmegaTable.columns.Omega.index(
                        stim_val)]
                sum_all_prob = sum(
                    self.task_parameters.OmegaTable.columns.OmegaProb)
                stim_prob = (1 + sum_all_prob - stim_prob) / sum_all_prob
                self.CatchCount[catch_stim_idx] += stim_prob
                self.LastSuccessCatchTial = i_trial
            # Feedback waiting time
            if str(MatrixState.WaitForReward) in statesVisitedThisTrialNames:
                WaitForRewardStateTimes = statesVisitedThisTrialTimes[str(
                    MatrixState.WaitForReward)]
                WaitForRewardStartStateTimes = statesVisitedThisTrialTimes[str(
                    MatrixState.WaitForRewardStart)]
                self.FeedbackTime[i_trial] = WaitForRewardStateTimes[-1][
                    1] - WaitForRewardStartStateTimes[0][0]
                # Correct choice = left
                if self.LeftRewarded[i_trial]:
                    self.ChoiceLeft[i_trial] = True  # Left chosen
                else:
                    self.ChoiceLeft[i_trial] = False
            else:
                warning("'WaitForReward' state should always appear"
                        " if 'WaitForRewardStart' was initiated")
        elif str(MatrixState.broke_fixation) in statesVisitedThisTrialNames:
            self.FixBroke[i_trial] = True
        elif str(MatrixState.early_withdrawal) in statesVisitedThisTrialNames:
            self.EarlyWithdrawal[i_trial] = True
        elif str(MatrixState.timeOut_missed_choice) in \
                statesVisitedThisTrialNames:
            self.Feedback[i_trial] = False
            self.MissedChoice[i_trial] = True
        if str(MatrixState.timeOut_SkippedFeedback) in \
                statesVisitedThisTrialNames:
            self.Feedback[i_trial] = False
        if str(MatrixState.Reward) in statesVisitedThisTrialNames:
            self.Rewarded[i_trial] = True
            self.RewardReceivedTotal[i_trial] += \
                self.task_parameters.RewardAmount
        if str(MatrixState.CenterPortRewardDelivery) in \
                statesVisitedThisTrialNames and \
           self.task_parameters.RewardAfterMinSampling:
            self.RewardAfterMinSampling[i_trial] = True
            self.RewardReceivedTotal[i_trial] += \
                self.task_parameters.CenterPortRewAmount
        if str(MatrixState.WaitCenterPortOut) in statesVisitedThisTrialNames:
            WaitCenterPortOutStateTimes = statesVisitedThisTrialTimes[str(
                MatrixState.WaitCenterPortOut)]
            self.ReactionTime[i_trial] = diff(WaitCenterPortOutStateTimes)
        else:
            # Assign with -1 so we can differentiate it from None trials
            # where the state potentially existed but we didn't calculate it
            self.ReactionTime[i_trial] = -1
        # State-independent fields
        self.StimDelay[i_trial] = self.task_parameters.StimDelay
        self.FeedbackDelay[i_trial] = self.task_parameters.FeedbackDelay
        self.MinSample[i_trial] = self.task_parameters.MinSample
        self.RewardMagnitude[i_trial + 1] = [
            self.task_parameters.RewardAmount,
            self.task_parameters.RewardAmount
        ]
        self.CenterPortRewAmount[i_trial +
                                 1] = self.task_parameters.CenterPortRewAmount
        self.PreStimCntrReward[
            i_trial + 1] = self.task_parameters.PreStimuDelayCntrReward
        self.Timer.customExtractData[i_trial] = time.time()

        # IF we are running grating experiments,
        # add the grating orientation that was used
        if self.task_parameters.ExperimentType == \
                ExperimentType.GratingOrientation:
            self.GratingOrientation[
                i_trial] = self.drawParams.gratingOrientation

        # Updating Delays
        # stimulus delay
        if self.task_parameters.StimDelayAutoincrement:
            if self.FixBroke[i_trial]:
                self.task_parameters.StimDelay = max(
                    self.task_parameters.StimDelayMin,
                    min(
                        self.task_parameters.StimDelayMax,
                        self.StimDelay[i_trial] -
                        self.task_parameters.StimDelayDecr))
            else:
                self.task_parameters.StimDelay = min(
                    self.task_parameters.StimDelayMax,
                    max(
                        self.task_parameters.StimDelayMin,
                        self.StimDelay[i_trial] +
                        self.task_parameters.StimDelayIncr))
        else:
            if not self.FixBroke[i_trial]:
                self.task_parameters.StimDelay = random_unif(
                    self.task_parameters.StimDelayMin,
                    self.task_parameters.StimDelayMax)
            else:
                self.task_parameters.StimDelay = self.StimDelay[i_trial]
        self.Timer.customStimDelay[i_trial] = time.time()

        # min sampling time
        if i_trial > self.task_parameters.StartEasyTrials:
            if self.task_parameters.MinSampleType == MinSampleType.FixMin:
                self.task_parameters.MinSample = \
                    self.task_parameters.MinSampleMin
            elif self.task_parameters.MinSampleType == \
                    MinSampleType.AutoIncr:
                # Check if animal completed pre-stimulus delay successfully
                if not self.FixBroke[i_trial]:
                    if self.Rewarded[i_trial]:
                        min_sample_incremented = self.MinSample[
                            i_trial] + self.task_parameters.MinSampleIncr
                        self.task_parameters.MinSample = min(
                            self.task_parameters.MinSampleMax,
                            max(self.task_parameters.MinSampleMin,
                                min_sample_incremented))
                    elif self.EarlyWithdrawal[i_trial]:
                        min_sample_decremented = self.MinSample[
                            i_trial] - self.task_parameters.MinSampleDecr
                        self.task_parameters.MinSample = max(
                            self.task_parameters.MinSampleMin,
                            min(self.task_parameters.MinSampleMax,
                                min_sample_decremented))
                else:
                    # Read new updated GUI values
                    self.task_parameters.MinSample = max(
                        self.task_parameters.MinSampleMin,
                        min(self.task_parameters.MinSampleMax,
                            self.MinSample[i_trial]))
            elif self.task_parameters.MinSampleType == \
                    MinSampleType.RandBetMinMax_DefIsMax:
                use_rand = rand(1, 1) < self.task_parameters.MinSampleRandProb
                if not use_rand:
                    self.task_parameters.MinSample = \
                        self.task_parameters.MinSampleMax
                else:
                    min_sample_difference = \
                        self.task_parameters.MinSampleMax - \
                        self.task_parameters.MinSampleMin
                    self.task_parameters.MinSample = \
                        min_sample_difference * \
                        rand(1, 1) + self.task_parameters.MinSampleMin
            elif MinSampleType.RandNumIntervalsMinMax_DefIsMax:
                use_rand = rand(1, 1) < self.task_parameters.MinSampleRandProb
                if not use_rand:
                    self.task_parameters.MinSample = \
                        self.task_parameters.MinSampleMax
                else:
                    self.task_parameters.MinSampleNumInterval = round(
                        self.task_parameters.MinSampleNumInterval)
                    if self.task_parameters.MinSampleNumInterval == 0 or \
                       self.task_parameters.MinSampleNumInterval == 1:
                        self.task_parameters.MinSample = \
                            self.task_parameters.MinSampleMin
                    else:
                        min_sample_difference = \
                            self.task_parameters.MinSampleMax - \
                            self.task_parameters.MinSampleMin
                        step = min_sample_difference / (
                            self.task_parameters.MinSampleNumInterval - 1)
                        intervals = list(
                            range(self.task_parameters.MinSampleMin,
                                  self.task_parameters.MinSampleMax + 1, step))
                        intervals_idx = randi(
                            1, self.task_parameters.MinSampleNumInterval)
                        print("Intervals:")  # disp("Intervals:");
                        print(intervals)  # disp(intervals)
                        self.task_parameters.MinSample = intervals[
                            intervals_idx]
            else:
                error('Unexpected MinSampleType value')
        self.Timer.customMinSampling[i_trial] = time.time()

        # feedback delay
        if self.task_parameters.FeedbackDelaySelection == \
                FeedbackDelaySelection.none:
            self.task_parameters.FeedbackDelay = 0
        elif self.task_parameters.FeedbackDelaySelection == \
                FeedbackDelaySelection.AutoIncr:
            # if no feedback was not completed then use the last value unless
            # then decrement the feedback.
            # Do we consider the case where 'broke_fixation' or
            # 'early_withdrawal' terminated early the trial?
            if not self.Feedback[i_trial]:
                feedback_delay_decremented = self.FeedbackDelay[
                    i_trial] - self.task_parameters.FeedbackDelayDecr
                self.task_parameters.FeedbackDelay = max(
                    self.task_parameters.FeedbackDelayMin,
                    min(self.task_parameters.FeedbackDelayMax,
                        feedback_delay_decremented))
            else:
                # Increase the feedback if the feedback was successfully
                # completed in the last trial, or use the the GUI value that
                # the user updated if needed.
                # Do we also here consider the case where 'broke_fixation' or
                # 'early_withdrawal' terminated early the trial?
                feedback_delay_incremented = self.FeedbackDelay[
                    i_trial] + self.task_parameters.FeedbackDelayIncr
                self.task_parameters.FeedbackDelay = min(
                    self.task_parameters.FeedbackDelayMax,
                    max(self.task_parameters.FeedbackDelayMin,
                        feedback_delay_incremented))
        elif FeedbackDelaySelection.TruncExp:
            self.task_parameters.FeedbackDelay = TruncatedExponential(
                self.task_parameters.FeedbackDelayMin,
                self.task_parameters.FeedbackDelayMax,
                self.task_parameters.FeedbackDelayTau)
        elif FeedbackDelaySelection.Fix:
            #     ATTEMPT TO GRAY OUT FIELDS
            if self.task_parametersMeta.FeedbackDelay.Style != 'edit':
                self.task_parametersMeta.FeedbackDelay.Style = 'edit'
            self.task_parameters.FeedbackDelay = \
                self.task_parameters.FeedbackDelayMax
        else:
            error('Unexpected FeedbackDelaySelection value')
        self.Timer.customFeedbackDelay[i_trial] = time.time()

        # Drawing future trials

        # Calculate bias
        # Consider bias only on the last 8 trials/
        # indicesRwdLi = find(self.Rewarded,8,'last');
        # if length(indicesRwdLi) ~= 0
        #   indicesRwd = indicesRwdLi(1);
        # else
        #   indicesRwd = 1;
        # end
        LAST_TRIALS = 20
        indicesRwd = iff(i_trial > LAST_TRIALS, i_trial - LAST_TRIALS, 1)
        # ndxRewd = self.Rewarded(indicesRwd:i_trial);
        choice_correct_slice = self.ChoiceCorrect[indicesRwd:i_trial + 1]
        choice_left_slice = self.ChoiceLeft[indicesRwd:i_trial + 1]
        left_rewarded_slice = self.LeftRewarded[indicesRwd:i_trial + 1]
        ndxLeftRewd = [
            choice_c and choice_l for choice_c, choice_l in zip(
                choice_correct_slice, choice_left_slice)
        ]
        ndxLeftRewDone = [
            l_rewarded
            and choice_l is not None for l_rewarded, choice_l in zip(
                left_rewarded_slice, choice_left_slice)
        ]
        ndxRightRewd = [
            choice_c and not choice_l for choice_c, choice_l in zip(
                choice_correct_slice, choice_left_slice)
        ]
        ndxRightRewDone = [
            not l_rewarded
            and choice_l is not None for l_rewarded, choice_l in zip(
                left_rewarded_slice, choice_left_slice)
        ]
        if not any(ndxLeftRewDone):
            # Since we don't have trials on this side, then measure by how good
            # the animals was performing on the other side. If it did bad on
            # the side then then consider this side performance to be good so
            # it'd still get more trials on the other side.
            PerfL = 1 - (sum(ndxRightRewd) / (LAST_TRIALS * 2))
        else:
            PerfL = sum(ndxLeftRewd) / sum(ndxLeftRewDone)
        if not any(ndxRightRewDone):
            PerfR = 1 - (sum(ndxLeftRewd) / (LAST_TRIALS * 2))
        else:
            PerfR = sum(ndxRightRewd) / sum(ndxRightRewDone)
        self.task_parameters.CalcLeftBias = (PerfL - PerfR) / 2 + 0.5

        choiceMadeTrials = [
            choice_c is not None for choice_c in self.ChoiceCorrect
        ]
        rewardedTrialsCount = sum([r is True for r in self.Rewarded])
        lengthChoiceMadeTrials = len(choiceMadeTrials)
        if lengthChoiceMadeTrials >= 1:
            performance = rewardedTrialsCount / lengthChoiceMadeTrials
            self.task_parameters.Performance = [
                f'{performance * 100:.2f}', '#/',
                str(lengthChoiceMadeTrials), 'T'
            ]
            performance = rewardedTrialsCount / (i_trial + 1)
            self.task_parameters.AllPerformance = [
                f'{performance * 100:.2f}', '#/',
                str(i_trial + 1), 'T'
            ]
            NUM_LAST_TRIALS = 20
            if i_trial > NUM_LAST_TRIALS:
                if lengthChoiceMadeTrials > NUM_LAST_TRIALS:
                    rewardedTrials_ = choiceMadeTrials[
                        lengthChoiceMadeTrials - NUM_LAST_TRIALS +
                        1:lengthChoiceMadeTrials + 1]
                    performance = sum(rewardedTrials_) / NUM_LAST_TRIALS
                    self.task_parameters.Performance = [
                        self.task_parameters.Performance, ' - ',
                        f'{performance * 100:.2f}', '#/',
                        str(NUM_LAST_TRIALS), 'T'
                    ]
                rewardedTrialsCount = sum(
                    self.Rewarded[i_trial - NUM_LAST_TRIALS + 1:i_trial + 1])
                performance = rewardedTrialsCount / NUM_LAST_TRIALS
                self.task_parameters.AllPerformance = [
                    self.task_parameters.AllPerformance, ' - ',
                    f'{performance * 100:.2f}', '#/',
                    str(NUM_LAST_TRIALS), 'T'
                ]
        self.Timer.customCalcBias[i_trial] = time.time()

        # Create future trials
        # Check if its time to generate more future trials
        if i_trial > len(self.DV) - Const.PRE_GENERATE_TRIAL_CHECK:
            # Do bias correction only if we have enough trials
            # sum(ndxRewd) > Const.BIAS_CORRECT_MIN_RWD_TRIALS
            if self.task_parameters.CorrectBias and i_trial > 7:
                LeftBias = self.task_parameters.CalcLeftBias
                # if LeftBias < 0.2 || LeftBias > 0.8 # Bias is too much,
                # swing it all the way to the other side
                # LeftBias = round(LeftBias);
                # else
                if 0.45 <= LeftBias and LeftBias <= 0.55:
                    LeftBias = 0.5
                if LeftBias is None:
                    print(f'Left bias is None.')
                    LeftBias = 0.5
            else:
                LeftBias = self.task_parameters.LeftBias
            self.Timer.customAdjustBias[i_trial] = time.time()

            # Adjustment of P(Omega) to make sure that sum(P(Omega))=1
            if self.task_parameters.StimulusSelectionCriteria != \
                    StimulusSelectionCriteria.BetaDistribution:
                omega_prob_sum = sum(
                    self.task_parameters.OmegaTable.columns.OmegaProb)
                # Avoid having no probability and avoid dividing by zero
                if omega_prob_sum == 0:
                    self.task_parameters.OmegaTable.columns.OmegaProb = [1] * \
                        len(self.task_parameters.OmegaTable.columns.OmegaProb)
                self.task_parameters.OmegaTable.columns.OmegaProb = [
                    omega_prob / omega_prob_sum for omega_prob in
                    self.task_parameters.OmegaTable.columns.OmegaProb
                ]
            self.Timer.customCalcOmega[i_trial] = time.time()

            # make future trials
            lastidx = len(self.DV) - 1
            # Generate guaranteed equal possibility of >0.5 and <0.5
            IsLeftRewarded = [0] * round(
                Const.PRE_GENERATE_TRIAL_COUNT * LeftBias) + [1] * round(
                    Const.PRE_GENERATE_TRIAL_COUNT * (1 - LeftBias))
            # Shuffle array and convert it
            random.Shuffle(IsLeftRewarded)
            IsLeftRewarded = [
                l_rewarded > LeftBias for l_rewarded in IsLeftRewarded
            ]
            self.Timer.customPrepNewTrials[i_trial] = time.time()
            for a in range(Const.PRE_GENERATE_TRIAL_COUNT):
                # If it's a fifty-fifty trial, then place stimulus in the
                # middle 50Fifty trials
                if rand(1, 1) < self.task_parameters.Percent50Fifty and \
                    (lastidx + a) > \
                        self.task_parameters.StartEasyTrials:
                    self.StimulusOmega[lastidx + a] = 0.5
                else:
                    if self.task_parameters.StimulusSelectionCriteria == \
                            StimulusSelectionCriteria.BetaDistribution:
                        # Divide beta by 4 if we are in an easy trial
                        beta_div_condition = (lastidx + a) <= \
                            self.task_parameters.StartEasyTrials
                        BetaDiv = iff(beta_div_condition, 4, 1)
                        betarnd_param = \
                            self.task_parameters.BetaDistAlphaNBeta / \
                            BetaDiv
                        Intensity = betarnd(betarnd_param, betarnd_param)
                        # prevent extreme values
                        Intensity = iff(Intensity < 0.1, 0.1, Intensity)
                        # prevent extreme values
                        Intensity = iff(Intensity > 0.9, 0.9, Intensity)
                    elif self.task_parameters.\
                        StimulusSelectionCriteria == \
                            StimulusSelectionCriteria.DiscretePairs:
                        if (lastidx + a) <= \
                                self.task_parameters.StartEasyTrials:
                            index = next(prob[0] for prob in enumerate(
                                self.task_parameters.OmegaTable.columns.
                                OmegaProb) if prob[1] > 0)
                            Intensity = \
                                self.task_parameters.OmegaTable.Omega[
                                    index] / 100
                        else:
                            # Choose a value randomly given the each value
                            # probability
                            Intensity = randsample(
                                self.task_parameters.OmegaTable.columns.Omega,
                                weights=self.task_parameters.OmegaTable.
                                columns.OmegaProb)[0] / 100
                    else:
                        error('Unexpected StimulusSelectionCriteria')
                    # In case of beta distribution, our distribution is
                    # symmetric, so prob < 0.5 is == prob > 0.5, so we can
                    # just pick the value that corrects the bias
                    if (IsLeftRewarded[a] and Intensity < 0.5) or \
                       (not IsLeftRewarded[a] and Intensity >= 0.5):
                        Intensity = -Intensity + 1
                    self.StimulusOmega[lastidx + a] = Intensity

                if self.task_parameters.ExperimentType == \
                        ExperimentType.Auditory:
                    DV = CalcAudClickTrain(lastidx + a)
                elif self.task_parameters.ExperimentType == \
                        ExperimentType.LightIntensity:
                    DV = CalcLightIntensity(lastidx + a, self)
                elif self.task_parameters.ExperimentType == \
                        ExperimentType.GratingOrientation:
                    DV = CalcGratingOrientation(lastidx + a)
                elif self.task_parameters.ExperimentType == \
                        ExperimentType.RandomDots:
                    DV = CalcDotsCoherence(lastidx + a)
                else:
                    error('Unexpected ExperimentType')
                if DV > 0:
                    self.LeftRewarded[lastidx + a] = True
                elif DV < 0:
                    self.LeftRewarded[lastidx + a] = False
                else:
                    # It's equal distribution
                    self.LeftRewarded[lastidx + a] = rand() < 0.5
                # cross-modality difficulty for plotting
                #  0 <= (left - right) / (left + right) <= 1
                self.DV[lastidx + a] = DV
            self.Timer.customGenNewTrials[i_trial] = time.time()
        else:
            self.Timer.customAdjustBias[i_trial] = 0
            self.Timer.customCalcOmega[i_trial] = 0
            self.Timer.customPrepNewTrials[i_trial] = 0
            self.Timer.customGenNewTrials[i_trial] = 0

        # Update RDK GUI
        self.task_parameters.OmegaTable.columns.RDK = [
            (value - 50) * 2
            for value in self.task_parameters.OmegaTable.columns.Omega
        ]
        # Set current stimulus for next trial
        DV = self.DV[i_trial + 1]
        if self.task_parameters.ExperimentType == \
                ExperimentType.RandomDots:
            self.task_parameters.CurrentStim = \
                f"{abs(DV / 0.01)}{iff(DV < 0, '# R cohr.', '# L cohr.')}"
        else:
            # Set between -100 to +100
            StimIntensity = f'{iff(DV > 0, (DV + 1) / 0.02, (DV - 1) / -0.02)}'
            self.task_parameters.CurrentStim = \
                f"{StimIntensity}{iff(DV < 0, '# R', '# L')}"

        self.Timer.customFinalizeUpdate[i_trial] = time.time()

        # determine if optogentics trial
        OptoEnabled = rand(1, 1) < self.task_parameters.OptoProb
        if i_trial < self.task_parameters.StartEasyTrials:
            OptoEnabled = False
        self.OptoEnabled[i_trial + 1] = OptoEnabled
        self.task_parameters.IsOptoTrial = iff(OptoEnabled, 'true', 'false')

        # determine if catch trial
        if i_trial < self.task_parameters.StartEasyTrials or \
                self.task_parameters.PercentCatch == 0:
            self.CatchTrial[i_trial + 1] = False
        else:
            every_n_trials = round(1 / self.task_parameters.PercentCatch)
            limit = round(every_n_trials * 0.2)
            lower_limit = every_n_trials - limit
            upper_limit = every_n_trials + limit
            if not self.Rewarded[i_trial] or i_trial + 1 < \
                    self.LastSuccessCatchTial + lower_limit:
                self.CatchTrial[i_trial + 1] = False
            elif i_trial + 1 < self.LastSuccessCatchTial + upper_limit:
                # TODO: If OmegaProb changed since last time, then redo it
                non_zero_prob = [
                    self.task_parameters.OmegaTable.Omega[i] / 100
                    for i, prob in enumerate(
                        self.task_parameters.OmegaTable.columns.OmegaProb)
                    if prob > 0
                ]
                complement_non_zero_prob = [1 - prob for prob in non_zero_prob]
                inverse_non_zero_prob = non_zero_prob[::-1]
                active_stim_idxs = GetCatchStimIdx(complement_non_zero_prob +
                                                   inverse_non_zero_prob)
                cur_stim_idx = GetCatchStimIdx(self.StimulusOmega[i_trial + 1])
                min_catch_counts = min(self.CatchCount[i]
                                       for i in active_stim_idxs)
                min_catch_idxs = list(
                    set(active_stim_idxs).intersection({
                        i
                        for i, cc in enumerate(self.CatchCount)
                        if floor(cc) == min_catch_counts
                    }))
                self.CatchTrial[i_trial + 1] = cur_stim_idx in min_catch_idxs
            else:
                self.CatchTrial[i_trial + 1] = True
        # Create as char vector rather than string so that
        # GUI sync doesn't complain
        self.task_parameters.IsCatch = iff(self.CatchTrial[i_trial + 1],
                                           'true', 'false')
        # Determine if Forced LED trial:
        if self.task_parameters.PortLEDtoCueReward:
            self.ForcedLEDTrial[i_trial + 1] = rand(1, 1) < \
                self.task_parameters.PercentForcedLEDTrial
        else:
            self.ForcedLEDTrial[i_trial + 1] = False
        self.Timer.customCatchNForceLed[i_trial] = time.time()
Example #18
0
def get_stars_coroutines(canvas):
    """return list of blinking coroutines"""
    return [
        blink(canvas, row, column, random.choice(STARS), rand(0, 1))
        for row, column in get_random_coordinates_list(canvas)
    ]
def _get_random_speed():
    """return random speed for obstacle"""
    return rand(MIN_OBSTACLES_SPEED, MAX_OBSTACLES_SPEED)