예제 #1
0
    def detect(self, img, min_size = 20, text_threshold = 0.7, low_text = 0.4,\
               link_threshold = 0.4,canvas_size = 2560, mag_ratio = 1.,\
               slope_ths = 0.1, ycenter_ths = 0.5, height_ths = 0.5,\
               width_ths = 0.5, add_margin = 0.1, reformat=True):

        if reformat:
            img, img_cv_grey = reformat_input(img)

        text_box = get_textbox(self.detector, img, canvas_size, mag_ratio,\
                               text_threshold, link_threshold, low_text,\
                               False, self.device)
        horizontal_list, free_list = group_text_box(text_box, slope_ths,\
                                                    ycenter_ths, height_ths,\
                                                    width_ths, add_margin)

        if min_size:
            horizontal_list = [
                i for i in horizontal_list
                if max(i[1] - i[0], i[3] - i[2]) > min_size
            ]
            free_list = [
                i for i in free_list
                if max(diff([c[0] for c in i]), diff([c[1]
                                                      for c in i])) > min_size
            ]

        return horizontal_list, free_list
예제 #2
0
파일: SA.py 프로젝트: vsriv9394/pyFluid1DUM
    def evalResidual(self, y, nu, states, beta_inv, res):

        u     = states[0::2]
        nu_SA = states[1::2]

        uy     = diff(y, u)
        nu_SAy = diff(y, nu_SA)

        uyy     = diff2(y, u)
        nu_SAyy = diff2(y, nu_SA)

        nu_t  = self.getEddyViscosity(nu_SA, nu, beta_inv)
        nu_ty = diff(y, nu_t)

        cw1   = self.cb1 / self.kappa**2 + (1.0+self.cb2)/self.sigma 

        S     = np.abs(uy)
        S[1:] = S[1:] + nu_SA[1:]/(self.kappa**2 * y[1:]**2) * (1.0 - beta_inv[1:]*nu_SA[1:]/(nu + beta_inv[1:]*nu_t[1:]))

        nu_SA_res =   self.cb1 * S * nu_SA \
                    + self.getDestruction(S, y, nu_SA, beta_inv, cw1) \
                    + (1.0/self.sigma) * ((nu+nu_SA) * nu_SAyy + (self.cb2 + 1.0) * nu_SAy**2)

        gridrat = (y[-1] - y[-2])/(y[-1] - y[-3])
        nu_SA_res[0]   = -nu_SA[0]
        nu_SA_res[-1]  = (nu_SA[-1]*(1-gridrat*gridrat) -\
                          nu_SA[-2] +\
                          nu_SA[-3]*gridrat*gridrat)/(gridrat*(y[-3] - y[-2]))

        res[1::2] = nu_SA_res

        tau12y = nu_t*uyy + nu_ty*uy

        return tau12y, nu_t
예제 #3
0
    def get_voxel_bounds(self, image_data, num_voxels=4000):

        camera_positions = np.vstack(
            [c.translation() for c in image_data.frames])

        xlim = [camera_positions[:, 0].min(), camera_positions[:, 0].max()]
        ylim = [camera_positions[:, 1].min(), camera_positions[:, 1].max()]
        zlim = [camera_positions[:, 2].min(), camera_positions[:, 2].max()]

        camera_range = .9 * np.sqrt(diff(xlim)**2 + diff(ylim)**2)
        for f in image_data.frames:
            viewpoint = f.translation(
            ) - camera_range * f.get_camera_direction()
            zlim[0] = min(zlim[0], viewpoint[2])
            zlim[1] = max(zlim[1], viewpoint[2])

        ####################### Debugging
        shift_x = (diff(xlim) / 4) * np.array([1, -1])

        #shift_y = (diff(ylim) / 4) * np.array([1, -1])
        shift_y = (diff(ylim) / 4) * np.array([1, -1]) + [-.05, -0.05]

        #print xlim, diff(xlim), shift_x, ylim, diff(ylim), shift_y
        #######################

        xlim = xlim + shift_x
        ylim = ylim + shift_y

        # frame_idx = 0
        # voxels, voxel_size = self.form_initial_voxels(xlim, ylim, zlim, 4000)
        # voxels = self.carve(voxels, image_data.frames[frame_idx])

        # if voxels.shape[0] > 1:
        # 	xlimp = [voxels[:,0].min(), voxels[:,0].max()]
        # 	ylimp = [voxels[:,1].min(), voxels[:,1].max()]
        # 	zlimp = [voxels[:,2].min(), voxels[:,2].max()]

        # 	xlimp = xlimp + voxel_size * 1 * np.array([-1, 1])
        # 	ylimp = ylimp + voxel_size * 1 * np.array([-1, 1])
        # 	zlimp = zlimp + voxel_size * 1 * np.array([-1, 1])

        # 	xlim = [np.max([xlim[0], xlimp[0]]), np.min([xlim[1], xlimp[1]])]
        # 	ylim = [np.max([ylim[0], ylimp[0]]), np.min([ylim[1], ylimp[1]])]
        # 	zlim = [np.max([zlim[0], zlimp[0]]), np.min([zlim[1], zlimp[1]])]

        xlim = [-0.5, .2]
        ylim = [-.01, .8]
        zlim = [-.05, 1]

        return xlim, ylim, zlim
예제 #4
0
	def updateLabMembers(self, labID, newMembers):
		
		#print "Content-type:text/html"		# TEMPORARY, REMOVE AFTER DEBUGGING TO HAVE SCRIPT REDIRECT PROPERLY!!!!!!
		#print					# DITTO
		
		db = self.db
		cursor = self.cursor
		
		uHandler = UserHandler(db, cursor)
			
		# Find out which members in old members list are not in new members list and delete them	
		oldMembers = self.findMembers(labID)
				
		# fetch the IDs of members in oldMembers (a list of User objects)
		oldMemIDs = []
		
		for m in oldMembers:
			oldMemIDs.append(m.getUserID())
				
		# Cast each element in newMembers to INT
		newMemIDs = []
		
		for n in newMembers:
			newMemIDs.append(int(n))
		
		memDel = utils.diff(oldMemIDs, newMemIDs)
		
		for memID in memDel:
			#self.deleteMember(labID, memID)
			uHandler.deleteUser(memID)
예제 #5
0
    def updateLabMembers(self, labID, newMembers):

        #print "Content-type:text/html"		# TEMPORARY, REMOVE AFTER DEBUGGING TO HAVE SCRIPT REDIRECT PROPERLY!!!!!!
        #print					# DITTO

        db = self.db
        cursor = self.cursor

        uHandler = UserHandler(db, cursor)

        # Find out which members in old members list are not in new members list and delete them
        oldMembers = self.findMembers(labID)

        # fetch the IDs of members in oldMembers (a list of User objects)
        oldMemIDs = []

        for m in oldMembers:
            oldMemIDs.append(m.getUserID())

        # Cast each element in newMembers to INT
        newMemIDs = []

        for n in newMembers:
            newMemIDs.append(int(n))

        memDel = utils.diff(oldMemIDs, newMemIDs)

        for memID in memDel:
            #self.deleteMember(labID, memID)
            uHandler.deleteUser(memID)
예제 #6
0
 def compare(self, templater, tmpfolder, profile, src, dst):
     '''Compare temporary generated dotfile with local one'''
     self.comparing = True
     retval = False, ''
     drysaved = self.dry
     self.dry = False
     diffsaved = self.diff
     self.diff = False
     src = os.path.expanduser(src)
     dst = os.path.expanduser(dst)
     if not os.path.exists(dst):
         retval = False, '\"%s\" does not exist on local\n' % (dst)
     else:
         ret, tmpdst = self._install_to_temp(templater,
                                             profile,
                                             src, dst,
                                             tmpfolder)
         if ret:
             diff = utils.diff(tmpdst, dst, log=False, raw=False)
             if diff == '':
                 retval = True, ''
             else:
                 retval = False, diff
     self.dry = drysaved
     self.diff = diffsaved
     self.comparing = False
     return retval
예제 #7
0
 def compare(self, templater, tmpfolder, profile, src, dst):
     '''Compare temporary generated dotfile with local one'''
     retval = False
     drysaved = self.dry
     self.dry = False
     diffsaved = self.diff
     self.diff = False
     src = os.path.expanduser(src)
     dst = os.path.expanduser(dst)
     if not os.path.exists(dst):
         self.log.warn('\"%s\" does not exist on local' % (dst))
     else:
         ret, tmpdst = self._install_to_temp(templater,
                                             profile,
                                             src, dst,
                                             tmpfolder)
         if ret:
             diff = utils.diff(tmpdst, dst, log=False, raw=False)
             if diff == '':
                 self.log.raw('same file')
                 retval = True
             else:
                 self.log.emph(diff)
     self.dry = drysaved
     self.diff = diffsaved
     return retval
예제 #8
0
 def gradX1(self, X1, X2=None):
     # Compute the derivative of the kernel w.r.t to the first argument
     # should return a m * n * d tensor
     X1, X2 = rescale(np.exp(self.log_b_), X1, X2)
     D = diff(X1, X2) # m * n * d array
     K = np.exp(self.log_b_ * 2 - 0.5 * np.sum(np.square(D), axis=-1)) # sum alsong the last axis, which is d
     G = -D * K[:, :, None] / self.log_c_ # G(m, n, d) corresponds to the derivative of of K(m ,n) w.r.t X1(m, d)
     return G
예제 #9
0
def solve1(voltage):
    """Multiply the group of one-difference with the group of three-difference
    <voltage>.  """

    diffs = diff(sorted(voltage + [0] + [max(voltage) + 3]))

    ones = len(list(filter(lambda a: a == 1, diffs)))
    threes = len(list(filter(lambda a: a == 3, diffs)))

    return ones * threes
예제 #10
0
    def form_initial_voxels(self, xlim, ylim, zlim, num_voxels):

        voxels = None
        voxel_size = None

        pattern_volume = diff(xlim) * diff(ylim) * diff(zlim)
        voxel_volume = pattern_volume / num_voxels
        voxel_size = abs(voxel_volume)**(1. / 3)

        xboxes = int(math.ceil(diff(xlim) / voxel_size))
        yboxes = int(math.ceil(diff(ylim) / voxel_size))
        zboxes = int(math.ceil(diff(zlim) / voxel_size))

        initx = xlim[0] + voxel_size / 2
        inity = ylim[0] + voxel_size / 2
        initz = zlim[0] + voxel_size / 2

        z = np.tile(
            np.arange(zboxes) * voxel_size + np.ones(zboxes) * initz,
            xboxes * yboxes)
        y = np.tile(
            np.repeat(np.arange(yboxes), zboxes) * voxel_size + inity, xboxes)
        x = np.repeat(np.arange(xboxes), yboxes * zboxes) * voxel_size + initx

        voxels = np.vstack((x, y, z)).T

        return voxels, voxel_size
예제 #11
0
def patch_citizen(import_id, citizen_id):
    citizen_patch = request.get_json()['data']
    citizen = select_citizen(import_id, citizen_id)
    citizen_copy = citizen.copy()

    # update citizen
    for key in citizen.keys():
        if citizen_patch[key]:
            citizen[key] = citizen_patch[key]

    update_citizen(import_id, citizen)

    if citizen_patch['relatives']:
        past_relatives_ids = diff(citizen_copy['relatives'],
                                  citizen_patch['relatives'])
        future_relatives_ids = diff(citizen_patch['relatives'],
                                    citizen_copy['relatives'])
        # update past relatives
        for relative_id in past_relatives_ids:
            relative = select_citizen(import_id, relative_id)
            relative["relatives"].remove(citizen_id)
            update_citizen(import_id, relative)

        # update future relatives
        for relative_id in future_relatives_ids:
            relative = select_citizen(import_id, relative_id)
            relative["relatives"].append(citizen_id)
            update_citizen(import_id, relative)

        for relative_id in past_relatives_ids:
            delete_relation(import_id, relative_id, citizen_id)
            delete_relation(import_id, citizen_id, relative_id)

        for relative_id in future_relatives_ids:
            insert_relation(import_id, relative_id, citizen_id)
            insert_relation(import_id, citizen_id, relative_id)

    res = {'data': citizen}

    return jsonify(res), 200
예제 #12
0
파일: mbManager.py 프로젝트: mohyt/vba
    def handle_config(self):
        try:
            config = self.config.get()
        except:
            return   
        if (config.has_key('HeartBeatTime')):
            self.hb_interval = config['HeartBeatTime']
            self.timer = self.hb_interval
        config_data = config.get('Data')
        if (config_data == None or len(config_data) == 0):
            Log.warning('VBucket map missing in config')
            return False

        Log.debug('New config from VBS: %s', str(config_data))
        server_list = []
        for row in config_data:
            server_list.append(row['Destination'])
        
        self.hb_interval = config["HeartBeatTime"]

        servers_added = utils.diff(server_list, self.monitoring_host_list)
        servers_removed = utils.diff(self.monitoring_host_list, server_list)
        
        if len(servers_removed) > 0:
            Log.info("Will stop monitoring %s" %(servers_removed))
            for mb in servers_removed:
                if mb == MembaseManager.LOCAL:
                    continue
                self.stop_monitoring(mb)
                # remove from down list
                if mb in self.down_list:
                    self.down_list.remove(mb)

        if len(servers_added) > 0:
            Log.info("Start monitoring %s" %(servers_added))
            for mb in servers_added:
                if mb != "":
                    self.start_monitoring(mb)

        self.monitoring_host_list = server_list
예제 #13
0
	def get_voxel_bounds(self, image_data, num_voxels = 4000):

		camera_positions = np.vstack([c.translation() for c in image_data.frames])

		xlim = [camera_positions[:,0].min(), camera_positions[:,0].max()]
		ylim = [camera_positions[:,1].min(), camera_positions[:,1].max()]
		zlim = [camera_positions[:,2].min(), camera_positions[:,2].max()]

		camera_range = .9 * np.sqrt(diff( xlim )**2 + diff( ylim )**2)
		for f in image_data.frames:
		    viewpoint = f.translation() - camera_range * f.get_camera_direction()
		    zlim[0] = min( zlim[0], viewpoint[2] )
		    zlim[1] = max( zlim[1], viewpoint[2] )


		shift_x = (diff(xlim) / 4) * np.array([1, -1])
		shift_y = (diff(ylim) / 4) * np.array([1, -1])
		
		xlim += shift_x
		# ylim += shift_y 

		frame_idx = 0
		voxels, voxel_size = self.form_initial_voxels(xlim, ylim, zlim, 4000)
		voxels = self.carve(voxels, image_data.frames[frame_idx])

		if voxels.shape[0] > 1:
			xlimp = [voxels[:,0].min(), voxels[:,0].max()]
			ylimp = [voxels[:,1].min(), voxels[:,1].max()]
			zlimp = [voxels[:,2].min(), voxels[:,2].max()]

			xlimp = xlimp + voxel_size * 1 * np.array([-1, 1])
			ylimp = ylimp + voxel_size * 1 * np.array([-1, 1])
			zlimp = zlimp + voxel_size * 1 * np.array([-1, 1])

			xlim = [np.max([xlim[0], xlimp[0]]), np.min([xlim[1], xlimp[1]])]
			ylim = [np.max([ylim[0], ylimp[0]]), np.min([ylim[1], ylimp[1]])]
			zlim = [np.max([zlim[0], zlimp[0]]), np.min([zlim[1], zlimp[1]])]

		return xlim, ylim, zlim
예제 #14
0
def best_split(df, target_name):
    splits = dict({})
    y = df[target_name].values

    for col_name in diff(df.columns, [target_name]):
        vals = pd.unique(df[col_name])
        m = 0
        val_max = 0

        for val in vals:
            accuracy, precision, recall, f1_score = metrics(
                y, (df[col_name] >= val).values)
            if m < f1_score:
                m = f1_score
                val_max = val

        splits[col_name] = val_max

    return splits
예제 #15
0
    def iterate(self, obstructs, c=None):
        r_new = self.r.copy()
        u_new = self.u.copy()
        if self.v_0 is not None:
            r_new = self.r + self.v_0 * self.u * self.dt
        if self.D is not None:
            r_new = utils.diff(r_new, self.D, self.dt)
        if self.p_0 is not None:
            u_new = self.tumble(u_new, c)
        if self.D_rot_0 is not None:
            u_new = utils.rot_diff(u_new, self.D_rot_0, self.dt)
        self.fold(r_new)

        # # randomise u if collided
        colls = self.collisions(r_new, u_new)

        self.colls = colls

        u_new[colls] = utils.sphere_pick(self.dim, colls.sum())
        # print(sum(colls))
        # D_rot_coll = 1.0
        # u_new[colls] = utils.rot_diff(u_new[colls], D_rot_coll, self.dt)

        self.displace(r_new, u_new, obstructs)
예제 #16
0
 def retick(self):
     utils.debug('dts:', utils.diff(self._dts))
     self._lastDts = self._dts
     self._dts = [0]
예제 #17
0
 def retick(self):
     utils.debug('dts:', utils.diff(self._dts))
     self._lastDts = self._dts
     self._dts = [0]
예제 #18
0
파일: server.py 프로젝트: riteme/toys
log.info('Image size: %sx%s' % (WIDTH, HEIGHT))

if __name__ == "__main__":
    from itertools import islice
    p = Pool(processes=len(users) / 3)
    BLOCK_SIZE = 16
    try:
        while True:
            fetched = utils.fetch()
            if not fetched:
                log.fatal('Failed to fetch canvas.')
                exit(-1)
            D = utils.slice(utils.fetch(), WIDTH, HEIGHT)
            log.info('Fetched canvas.')
            cnt = 0
            for idx in p.imap_unordered(utils.draw, utils.diff(D, T,
                                                               provider)):
                taken[idx] = False
                cnt += 1
            # for idx in p.imap_unordered(utils.draw, islice(utils.diff(D, T, provider), 0, BLOCK_SIZE)):
            #     taken[idx] = False
            log.info('Rendered %s pixel(s).' % cnt)
            if cnt:
                time.sleep(0.1)
            else:
                time.sleep(60)
    except:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        log.error('[%s] %s' % (exc_type.__name__, exc_value))
        log.error('Python traceback:\n' +
                  ''.join(traceback.format_tb(exc_traceback))[:-1])
        log.fatal('PAINT SERVICE EXITED.')
예제 #19
0
def dropsim(n, v, l, R, D, Dr, R_d, dim, t_max, dt, out, every):
    r_d = np.array(dim * [0.0])

    r = np.random.uniform(-R_d, R_d, size=[n, dim])
    u = utils.sphere_pick(dim, n)

    for i in range(n):
        # print(i)
        while True:
            r[i] = np.random.uniform(-R_d, R_d, dim)
            u[i] = utils.sphere_pick(dim)
            if obstructed(r[i], u[i], l, R, R_d, r_d):
                continue
            if i > 0 and np.any(collisions(r[:i + 1], u[:i + 1], l, R, R_d)):
                continue
            break

    if out is not None:
        np.savez(os.path.join(out, 'static'), l=l, R=R, R_d=R_d)

    t_scat = np.ones([n]) * np.inf
    r_scat = r.copy()
    t_relax = 1.8

    i = 0
    t = 0
    while t < t_max:
        # print(t)
        r_new = r.copy()
        u_new = u.copy()

        r_new = r + v * u * dt
        r_new = utils.diff(r_new, D, dt)
        u_new = utils.rot_diff(u_new, Dr, dt)

        seps = geom.cap_insphere_sep(r_new - u_new * l / 2.0, r_new + u_new * l / 2.0, R, r_d, R_d)
        over_mag = utils.vector_mag(seps) + R - R_d

        c = over_mag > 0.0

        # Translation
        u_seps = utils.vector_unit_nonull(seps[c])
        r_new[c] -= offset * u_seps * over_mag[c][:, np.newaxis]

        # Alignment
        u_dot_u_seps = np.sum(u_new[c] * u_seps, axis=-1)
        u_new[c] = utils.vector_unit_nonull(u_new[c] - u_seps * u_dot_u_seps[:, np.newaxis])

        reverts = np.zeros([n], dtype=np.bool)
        while True:
            c = collisions(r_new, u_new, l, R, R_d)
            if not np.any(c):
                break
            reverts += c
            r_new[c], u_new[c] = r[c], u[c]

        # u_new[reverts] = utils.sphere_pick(dim, reverts.sum())

        # Collisional rotational diffusion constant, in radians^2/s
        Dr_c = 20.0
        u_new[reverts] = utils.rot_diff(u_new[reverts], Dr_c, dt)

        while True:
            c = collisions(r_new, u_new, l, R, R_d)
            if not np.any(c):
                break
            r_new[c], u_new[c] = r[c], u[c]

        r, u = r_new.copy(), u_new.copy()

        i += 1
        t += dt

        if args.out is not None and not i % every:
            out_fname = '%010f' % t
            np.savez(os.path.join(out, 'dyn', out_fname), r=r, u=u)
예제 #20
0
 def add_to_recommend_list(rs_list, products, pos, no):
     products = utils.diff(products, rs_list)
     n = len(products)
     for i in range(min(n, min(no, len(rs_list) - pos))):
         rs_list[pos + i] = products[i]
     return rs_list
예제 #21
0
    def update(self, i_trial):
        # Standard values

        # Stores which lateral port the animal poked into (if any)
        self.ChoiceLeft[i_trial] = None
        # Stores whether the animal poked into the correct port (if any)
        self.ChoiceCorrect[i_trial] = None
        # Signals whether confidence was used in this trial. Set to false if
        # lateral ports choice timed-out (i.e, MissedChoice(i) is true), it
        # also should be set to false (but not due to a bug) if the animal
        # poked the a lateral port but didn't complete the feedback period
        # (even with using grace).
        self.Feedback[i_trial] = True
        # How long the animal spent waiting for the reward (whether in correct
        # or in incorrect ports)
        self.FeedbackTime[i_trial] = None
        # Signals whether the animal broke fixation during stimulus delay state
        self.FixBroke[i_trial] = False
        # Signals whether the animal broke fixation during sampling but before
        # min-sampling ends
        self.EarlyWithdrawal[i_trial] = False
        # Signals whether the animal correctly finished min-sampling but failed
        # to poke any of the lateral ports within ChoiceDeadLine period
        self.MissedChoice[i_trial] = False
        # How long the animal remained fixated in center poke
        self.FixDur[i_trial] = None
        # How long between sample end and making a choice (timeout-choice
        # trials are excluded)
        self.MT[i_trial] = None
        # How long the animal sampled. If RewardAfterMinSampling is enabled and
        # animal completed min sampling, then it's equal to MinSample time,
        # otherwise it's how long the animal remained fixated in center-port
        # until it either poked-out or the max allowed sampling time was
        # reached.
        self.ST[i_trial] = None
        # Signals whether a reward was given to the animal (it also includes
        # if the animal poked into the correct reward port but poked out
        # afterwards and didn't receive a reward, due to 'RewardGrace' being
        # counted as reward).
        self.Rewarded[i_trial] = False
        # Signals whether a center-port reward was given after min-sampling
        # ends.
        self.RewardAfterMinSampling[i_trial] = False
        # Tracks the amount of water the animal received up tp this point
        # TODO: Check if RewardReceivedTotal is needed and calculate it using
        # CalcRewObtained() function.
        # We will updated later
        self.RewardReceivedTotal[i_trial + 1] = 0

        self.TrialNumber[i_trial] = i_trial

        self.Timer.customInitialize[i_trial] = time.time()

        # Checking states and rewriting standard

        # Extract the states that were used in the last trial
        statesVisitedThisTrialNames = self.RawData.StatesVisitedNames(i_trial)
        statesVisitedThisTrialTimes = self.RawData.StatesVisitedTimes(i_trial)
        if str(MatrixState.WaitForStimulus) in statesVisitedThisTrialNames:
            lastWaitForStimulusStateTimes = statesVisitedThisTrialTimes[str(
                MatrixState.WaitForStimulus)][-1]
            lastTriggerWaitForStimulusStateTimes = statesVisitedThisTrialTimes[
                str(MatrixState.TriggerWaitForStimulus)][-1]
            self.FixDur[i_trial] = lastWaitForStimulusStateTimes[1] - \
                lastWaitForStimulusStateTimes[0] + \
                lastTriggerWaitForStimulusStateTimes[1] - \
                lastTriggerWaitForStimulusStateTimes[0]
        if str(MatrixState.stimulus_delivery) in statesVisitedThisTrialNames:
            stimulus_deliveryStateTimes = statesVisitedThisTrialTimes[str(
                MatrixState.stimulus_delivery)]
            if self.task_parameters.RewardAfterMinSampling:
                self.ST[i_trial] = diff(stimulus_deliveryStateTimes)
            else:
                # 'CenterPortRewardDelivery' state would exist even if no
                # 'RewardAfterMinSampling' is active, in such case it means
                # that min sampling is done and we are in the optional
                # sampling stage.
                if str(MatrixState.CenterPortRewardDelivery) in \
                        statesVisitedThisTrialNames and \
                        self.task_parameters.StimulusTime > \
                        self.task_parameters.MinSample:
                    CenterPortRewardDeliveryStateTimes = \
                        statesVisitedThisTrialTimes[
                            str(MatrixState.CenterPortRewardDelivery)]
                    self.ST[i_trial] = [
                        CenterPortRewardDeliveryStateTimes[0][1] -
                        stimulus_deliveryStateTimes[0][0]
                    ]
                else:
                    # This covers early_withdrawal
                    self.ST[i_trial] = diff(stimulus_deliveryStateTimes)

        if str(MatrixState.WaitForChoice) in statesVisitedThisTrialNames and \
            str(MatrixState.timeOut_missed_choice) not in \
                statesVisitedThisTrialNames:
            WaitForChoiceStateTimes = statesVisitedThisTrialTimes[str(
                MatrixState.WaitForChoice)]
            WaitForChoiceStateStartTimes = [
                start_time for start_time, end_time in WaitForChoiceStateTimes
            ]
            # We might have more than multiple WaitForChoice if
            # HabituateIgnoreIncorrect is enabeld
            self.MT[-1] = diff(WaitForChoiceStateStartTimes[:2])

        # Extract trial outcome. Check first if it's a wrong choice or a
        # HabituateIgnoreIncorrect but first choice was wrong choice
        if str(MatrixState.WaitForPunishStart) in \
            statesVisitedThisTrialNames or \
           str(MatrixState.RegisterWrongWaitCorrect) in \
                statesVisitedThisTrialNames:
            self.ChoiceCorrect[i_trial] = False
            # Correct choice = left
            if self.LeftRewarded[i_trial]:
                self.ChoiceLeft[i_trial] = False  # Left not chosen
            else:
                self.ChoiceLeft[i_trial] = True
            # Feedback waiting time
            if str(MatrixState.WaitForPunish) in statesVisitedThisTrialNames:
                WaitForPunishStateTimes = statesVisitedThisTrialTimes[str(
                    MatrixState.WaitForPunish)]
                WaitForPunishStartStateTimes = statesVisitedThisTrialTimes[str(
                    MatrixState.WaitForPunishStart)]
                self.FeedbackTime[i_trial] = WaitForPunishStateTimes[-1][
                    1] - WaitForPunishStartStateTimes[0][0]
            else:  # It was a  RegisterWrongWaitCorrect state
                self.FeedbackTime[i_trial] = None
        # CorrectChoice
        elif str(MatrixState.WaitForRewardStart) in \
                statesVisitedThisTrialNames:
            self.ChoiceCorrect[i_trial] = True
            if self.CatchTrial[i_trial]:
                catch_stim_idx = GetCatchStimIdx(self.StimulusOmega[i_trial])
                # Lookup the stimulus probability and increase by its
                # 1/frequency.
                stim_val = self.StimulusOmega[i_trial] * 100
                if stim_val < 50:
                    stim_val = 100 - stim_val
                stim_prob = self.task_parameters.OmegaTable.columns.OmegaProb[
                    self.task_parameters.OmegaTable.columns.Omega.index(
                        stim_val)]
                sum_all_prob = sum(
                    self.task_parameters.OmegaTable.columns.OmegaProb)
                stim_prob = (1 + sum_all_prob - stim_prob) / sum_all_prob
                self.CatchCount[catch_stim_idx] += stim_prob
                self.LastSuccessCatchTial = i_trial
            # Feedback waiting time
            if str(MatrixState.WaitForReward) in statesVisitedThisTrialNames:
                WaitForRewardStateTimes = statesVisitedThisTrialTimes[str(
                    MatrixState.WaitForReward)]
                WaitForRewardStartStateTimes = statesVisitedThisTrialTimes[str(
                    MatrixState.WaitForRewardStart)]
                self.FeedbackTime[i_trial] = WaitForRewardStateTimes[-1][
                    1] - WaitForRewardStartStateTimes[0][0]
                # Correct choice = left
                if self.LeftRewarded[i_trial]:
                    self.ChoiceLeft[i_trial] = True  # Left chosen
                else:
                    self.ChoiceLeft[i_trial] = False
            else:
                warning("'WaitForReward' state should always appear"
                        " if 'WaitForRewardStart' was initiated")
        elif str(MatrixState.broke_fixation) in statesVisitedThisTrialNames:
            self.FixBroke[i_trial] = True
        elif str(MatrixState.early_withdrawal) in statesVisitedThisTrialNames:
            self.EarlyWithdrawal[i_trial] = True
        elif str(MatrixState.timeOut_missed_choice) in \
                statesVisitedThisTrialNames:
            self.Feedback[i_trial] = False
            self.MissedChoice[i_trial] = True
        if str(MatrixState.timeOut_SkippedFeedback) in \
                statesVisitedThisTrialNames:
            self.Feedback[i_trial] = False
        if str(MatrixState.Reward) in statesVisitedThisTrialNames:
            self.Rewarded[i_trial] = True
            self.RewardReceivedTotal[i_trial] += \
                self.task_parameters.RewardAmount
        if str(MatrixState.CenterPortRewardDelivery) in \
                statesVisitedThisTrialNames and \
           self.task_parameters.RewardAfterMinSampling:
            self.RewardAfterMinSampling[i_trial] = True
            self.RewardReceivedTotal[i_trial] += \
                self.task_parameters.CenterPortRewAmount
        if str(MatrixState.WaitCenterPortOut) in statesVisitedThisTrialNames:
            WaitCenterPortOutStateTimes = statesVisitedThisTrialTimes[str(
                MatrixState.WaitCenterPortOut)]
            self.ReactionTime[i_trial] = diff(WaitCenterPortOutStateTimes)
        else:
            # Assign with -1 so we can differentiate it from None trials
            # where the state potentially existed but we didn't calculate it
            self.ReactionTime[i_trial] = -1
        # State-independent fields
        self.StimDelay[i_trial] = self.task_parameters.StimDelay
        self.FeedbackDelay[i_trial] = self.task_parameters.FeedbackDelay
        self.MinSample[i_trial] = self.task_parameters.MinSample
        self.RewardMagnitude[i_trial + 1] = [
            self.task_parameters.RewardAmount,
            self.task_parameters.RewardAmount
        ]
        self.CenterPortRewAmount[i_trial +
                                 1] = self.task_parameters.CenterPortRewAmount
        self.PreStimCntrReward[
            i_trial + 1] = self.task_parameters.PreStimuDelayCntrReward
        self.Timer.customExtractData[i_trial] = time.time()

        # IF we are running grating experiments,
        # add the grating orientation that was used
        if self.task_parameters.ExperimentType == \
                ExperimentType.GratingOrientation:
            self.GratingOrientation[
                i_trial] = self.drawParams.gratingOrientation

        # Updating Delays
        # stimulus delay
        if self.task_parameters.StimDelayAutoincrement:
            if self.FixBroke[i_trial]:
                self.task_parameters.StimDelay = max(
                    self.task_parameters.StimDelayMin,
                    min(
                        self.task_parameters.StimDelayMax,
                        self.StimDelay[i_trial] -
                        self.task_parameters.StimDelayDecr))
            else:
                self.task_parameters.StimDelay = min(
                    self.task_parameters.StimDelayMax,
                    max(
                        self.task_parameters.StimDelayMin,
                        self.StimDelay[i_trial] +
                        self.task_parameters.StimDelayIncr))
        else:
            if not self.FixBroke[i_trial]:
                self.task_parameters.StimDelay = random_unif(
                    self.task_parameters.StimDelayMin,
                    self.task_parameters.StimDelayMax)
            else:
                self.task_parameters.StimDelay = self.StimDelay[i_trial]
        self.Timer.customStimDelay[i_trial] = time.time()

        # min sampling time
        if i_trial > self.task_parameters.StartEasyTrials:
            if self.task_parameters.MinSampleType == MinSampleType.FixMin:
                self.task_parameters.MinSample = \
                    self.task_parameters.MinSampleMin
            elif self.task_parameters.MinSampleType == \
                    MinSampleType.AutoIncr:
                # Check if animal completed pre-stimulus delay successfully
                if not self.FixBroke[i_trial]:
                    if self.Rewarded[i_trial]:
                        min_sample_incremented = self.MinSample[
                            i_trial] + self.task_parameters.MinSampleIncr
                        self.task_parameters.MinSample = min(
                            self.task_parameters.MinSampleMax,
                            max(self.task_parameters.MinSampleMin,
                                min_sample_incremented))
                    elif self.EarlyWithdrawal[i_trial]:
                        min_sample_decremented = self.MinSample[
                            i_trial] - self.task_parameters.MinSampleDecr
                        self.task_parameters.MinSample = max(
                            self.task_parameters.MinSampleMin,
                            min(self.task_parameters.MinSampleMax,
                                min_sample_decremented))
                else:
                    # Read new updated GUI values
                    self.task_parameters.MinSample = max(
                        self.task_parameters.MinSampleMin,
                        min(self.task_parameters.MinSampleMax,
                            self.MinSample[i_trial]))
            elif self.task_parameters.MinSampleType == \
                    MinSampleType.RandBetMinMax_DefIsMax:
                use_rand = rand(1, 1) < self.task_parameters.MinSampleRandProb
                if not use_rand:
                    self.task_parameters.MinSample = \
                        self.task_parameters.MinSampleMax
                else:
                    min_sample_difference = \
                        self.task_parameters.MinSampleMax - \
                        self.task_parameters.MinSampleMin
                    self.task_parameters.MinSample = \
                        min_sample_difference * \
                        rand(1, 1) + self.task_parameters.MinSampleMin
            elif MinSampleType.RandNumIntervalsMinMax_DefIsMax:
                use_rand = rand(1, 1) < self.task_parameters.MinSampleRandProb
                if not use_rand:
                    self.task_parameters.MinSample = \
                        self.task_parameters.MinSampleMax
                else:
                    self.task_parameters.MinSampleNumInterval = round(
                        self.task_parameters.MinSampleNumInterval)
                    if self.task_parameters.MinSampleNumInterval == 0 or \
                       self.task_parameters.MinSampleNumInterval == 1:
                        self.task_parameters.MinSample = \
                            self.task_parameters.MinSampleMin
                    else:
                        min_sample_difference = \
                            self.task_parameters.MinSampleMax - \
                            self.task_parameters.MinSampleMin
                        step = min_sample_difference / (
                            self.task_parameters.MinSampleNumInterval - 1)
                        intervals = list(
                            range(self.task_parameters.MinSampleMin,
                                  self.task_parameters.MinSampleMax + 1, step))
                        intervals_idx = randi(
                            1, self.task_parameters.MinSampleNumInterval)
                        print("Intervals:")  # disp("Intervals:");
                        print(intervals)  # disp(intervals)
                        self.task_parameters.MinSample = intervals[
                            intervals_idx]
            else:
                error('Unexpected MinSampleType value')
        self.Timer.customMinSampling[i_trial] = time.time()

        # feedback delay
        if self.task_parameters.FeedbackDelaySelection == \
                FeedbackDelaySelection.none:
            self.task_parameters.FeedbackDelay = 0
        elif self.task_parameters.FeedbackDelaySelection == \
                FeedbackDelaySelection.AutoIncr:
            # if no feedback was not completed then use the last value unless
            # then decrement the feedback.
            # Do we consider the case where 'broke_fixation' or
            # 'early_withdrawal' terminated early the trial?
            if not self.Feedback[i_trial]:
                feedback_delay_decremented = self.FeedbackDelay[
                    i_trial] - self.task_parameters.FeedbackDelayDecr
                self.task_parameters.FeedbackDelay = max(
                    self.task_parameters.FeedbackDelayMin,
                    min(self.task_parameters.FeedbackDelayMax,
                        feedback_delay_decremented))
            else:
                # Increase the feedback if the feedback was successfully
                # completed in the last trial, or use the the GUI value that
                # the user updated if needed.
                # Do we also here consider the case where 'broke_fixation' or
                # 'early_withdrawal' terminated early the trial?
                feedback_delay_incremented = self.FeedbackDelay[
                    i_trial] + self.task_parameters.FeedbackDelayIncr
                self.task_parameters.FeedbackDelay = min(
                    self.task_parameters.FeedbackDelayMax,
                    max(self.task_parameters.FeedbackDelayMin,
                        feedback_delay_incremented))
        elif FeedbackDelaySelection.TruncExp:
            self.task_parameters.FeedbackDelay = TruncatedExponential(
                self.task_parameters.FeedbackDelayMin,
                self.task_parameters.FeedbackDelayMax,
                self.task_parameters.FeedbackDelayTau)
        elif FeedbackDelaySelection.Fix:
            #     ATTEMPT TO GRAY OUT FIELDS
            if self.task_parametersMeta.FeedbackDelay.Style != 'edit':
                self.task_parametersMeta.FeedbackDelay.Style = 'edit'
            self.task_parameters.FeedbackDelay = \
                self.task_parameters.FeedbackDelayMax
        else:
            error('Unexpected FeedbackDelaySelection value')
        self.Timer.customFeedbackDelay[i_trial] = time.time()

        # Drawing future trials

        # Calculate bias
        # Consider bias only on the last 8 trials/
        # indicesRwdLi = find(self.Rewarded,8,'last');
        # if length(indicesRwdLi) ~= 0
        #   indicesRwd = indicesRwdLi(1);
        # else
        #   indicesRwd = 1;
        # end
        LAST_TRIALS = 20
        indicesRwd = iff(i_trial > LAST_TRIALS, i_trial - LAST_TRIALS, 1)
        # ndxRewd = self.Rewarded(indicesRwd:i_trial);
        choice_correct_slice = self.ChoiceCorrect[indicesRwd:i_trial + 1]
        choice_left_slice = self.ChoiceLeft[indicesRwd:i_trial + 1]
        left_rewarded_slice = self.LeftRewarded[indicesRwd:i_trial + 1]
        ndxLeftRewd = [
            choice_c and choice_l for choice_c, choice_l in zip(
                choice_correct_slice, choice_left_slice)
        ]
        ndxLeftRewDone = [
            l_rewarded
            and choice_l is not None for l_rewarded, choice_l in zip(
                left_rewarded_slice, choice_left_slice)
        ]
        ndxRightRewd = [
            choice_c and not choice_l for choice_c, choice_l in zip(
                choice_correct_slice, choice_left_slice)
        ]
        ndxRightRewDone = [
            not l_rewarded
            and choice_l is not None for l_rewarded, choice_l in zip(
                left_rewarded_slice, choice_left_slice)
        ]
        if not any(ndxLeftRewDone):
            # Since we don't have trials on this side, then measure by how good
            # the animals was performing on the other side. If it did bad on
            # the side then then consider this side performance to be good so
            # it'd still get more trials on the other side.
            PerfL = 1 - (sum(ndxRightRewd) / (LAST_TRIALS * 2))
        else:
            PerfL = sum(ndxLeftRewd) / sum(ndxLeftRewDone)
        if not any(ndxRightRewDone):
            PerfR = 1 - (sum(ndxLeftRewd) / (LAST_TRIALS * 2))
        else:
            PerfR = sum(ndxRightRewd) / sum(ndxRightRewDone)
        self.task_parameters.CalcLeftBias = (PerfL - PerfR) / 2 + 0.5

        choiceMadeTrials = [
            choice_c is not None for choice_c in self.ChoiceCorrect
        ]
        rewardedTrialsCount = sum([r is True for r in self.Rewarded])
        lengthChoiceMadeTrials = len(choiceMadeTrials)
        if lengthChoiceMadeTrials >= 1:
            performance = rewardedTrialsCount / lengthChoiceMadeTrials
            self.task_parameters.Performance = [
                f'{performance * 100:.2f}', '#/',
                str(lengthChoiceMadeTrials), 'T'
            ]
            performance = rewardedTrialsCount / (i_trial + 1)
            self.task_parameters.AllPerformance = [
                f'{performance * 100:.2f}', '#/',
                str(i_trial + 1), 'T'
            ]
            NUM_LAST_TRIALS = 20
            if i_trial > NUM_LAST_TRIALS:
                if lengthChoiceMadeTrials > NUM_LAST_TRIALS:
                    rewardedTrials_ = choiceMadeTrials[
                        lengthChoiceMadeTrials - NUM_LAST_TRIALS +
                        1:lengthChoiceMadeTrials + 1]
                    performance = sum(rewardedTrials_) / NUM_LAST_TRIALS
                    self.task_parameters.Performance = [
                        self.task_parameters.Performance, ' - ',
                        f'{performance * 100:.2f}', '#/',
                        str(NUM_LAST_TRIALS), 'T'
                    ]
                rewardedTrialsCount = sum(
                    self.Rewarded[i_trial - NUM_LAST_TRIALS + 1:i_trial + 1])
                performance = rewardedTrialsCount / NUM_LAST_TRIALS
                self.task_parameters.AllPerformance = [
                    self.task_parameters.AllPerformance, ' - ',
                    f'{performance * 100:.2f}', '#/',
                    str(NUM_LAST_TRIALS), 'T'
                ]
        self.Timer.customCalcBias[i_trial] = time.time()

        # Create future trials
        # Check if its time to generate more future trials
        if i_trial > len(self.DV) - Const.PRE_GENERATE_TRIAL_CHECK:
            # Do bias correction only if we have enough trials
            # sum(ndxRewd) > Const.BIAS_CORRECT_MIN_RWD_TRIALS
            if self.task_parameters.CorrectBias and i_trial > 7:
                LeftBias = self.task_parameters.CalcLeftBias
                # if LeftBias < 0.2 || LeftBias > 0.8 # Bias is too much,
                # swing it all the way to the other side
                # LeftBias = round(LeftBias);
                # else
                if 0.45 <= LeftBias and LeftBias <= 0.55:
                    LeftBias = 0.5
                if LeftBias is None:
                    print(f'Left bias is None.')
                    LeftBias = 0.5
            else:
                LeftBias = self.task_parameters.LeftBias
            self.Timer.customAdjustBias[i_trial] = time.time()

            # Adjustment of P(Omega) to make sure that sum(P(Omega))=1
            if self.task_parameters.StimulusSelectionCriteria != \
                    StimulusSelectionCriteria.BetaDistribution:
                omega_prob_sum = sum(
                    self.task_parameters.OmegaTable.columns.OmegaProb)
                # Avoid having no probability and avoid dividing by zero
                if omega_prob_sum == 0:
                    self.task_parameters.OmegaTable.columns.OmegaProb = [1] * \
                        len(self.task_parameters.OmegaTable.columns.OmegaProb)
                self.task_parameters.OmegaTable.columns.OmegaProb = [
                    omega_prob / omega_prob_sum for omega_prob in
                    self.task_parameters.OmegaTable.columns.OmegaProb
                ]
            self.Timer.customCalcOmega[i_trial] = time.time()

            # make future trials
            lastidx = len(self.DV) - 1
            # Generate guaranteed equal possibility of >0.5 and <0.5
            IsLeftRewarded = [0] * round(
                Const.PRE_GENERATE_TRIAL_COUNT * LeftBias) + [1] * round(
                    Const.PRE_GENERATE_TRIAL_COUNT * (1 - LeftBias))
            # Shuffle array and convert it
            random.Shuffle(IsLeftRewarded)
            IsLeftRewarded = [
                l_rewarded > LeftBias for l_rewarded in IsLeftRewarded
            ]
            self.Timer.customPrepNewTrials[i_trial] = time.time()
            for a in range(Const.PRE_GENERATE_TRIAL_COUNT):
                # If it's a fifty-fifty trial, then place stimulus in the
                # middle 50Fifty trials
                if rand(1, 1) < self.task_parameters.Percent50Fifty and \
                    (lastidx + a) > \
                        self.task_parameters.StartEasyTrials:
                    self.StimulusOmega[lastidx + a] = 0.5
                else:
                    if self.task_parameters.StimulusSelectionCriteria == \
                            StimulusSelectionCriteria.BetaDistribution:
                        # Divide beta by 4 if we are in an easy trial
                        beta_div_condition = (lastidx + a) <= \
                            self.task_parameters.StartEasyTrials
                        BetaDiv = iff(beta_div_condition, 4, 1)
                        betarnd_param = \
                            self.task_parameters.BetaDistAlphaNBeta / \
                            BetaDiv
                        Intensity = betarnd(betarnd_param, betarnd_param)
                        # prevent extreme values
                        Intensity = iff(Intensity < 0.1, 0.1, Intensity)
                        # prevent extreme values
                        Intensity = iff(Intensity > 0.9, 0.9, Intensity)
                    elif self.task_parameters.\
                        StimulusSelectionCriteria == \
                            StimulusSelectionCriteria.DiscretePairs:
                        if (lastidx + a) <= \
                                self.task_parameters.StartEasyTrials:
                            index = next(prob[0] for prob in enumerate(
                                self.task_parameters.OmegaTable.columns.
                                OmegaProb) if prob[1] > 0)
                            Intensity = \
                                self.task_parameters.OmegaTable.Omega[
                                    index] / 100
                        else:
                            # Choose a value randomly given the each value
                            # probability
                            Intensity = randsample(
                                self.task_parameters.OmegaTable.columns.Omega,
                                weights=self.task_parameters.OmegaTable.
                                columns.OmegaProb)[0] / 100
                    else:
                        error('Unexpected StimulusSelectionCriteria')
                    # In case of beta distribution, our distribution is
                    # symmetric, so prob < 0.5 is == prob > 0.5, so we can
                    # just pick the value that corrects the bias
                    if (IsLeftRewarded[a] and Intensity < 0.5) or \
                       (not IsLeftRewarded[a] and Intensity >= 0.5):
                        Intensity = -Intensity + 1
                    self.StimulusOmega[lastidx + a] = Intensity

                if self.task_parameters.ExperimentType == \
                        ExperimentType.Auditory:
                    DV = CalcAudClickTrain(lastidx + a)
                elif self.task_parameters.ExperimentType == \
                        ExperimentType.LightIntensity:
                    DV = CalcLightIntensity(lastidx + a, self)
                elif self.task_parameters.ExperimentType == \
                        ExperimentType.GratingOrientation:
                    DV = CalcGratingOrientation(lastidx + a)
                elif self.task_parameters.ExperimentType == \
                        ExperimentType.RandomDots:
                    DV = CalcDotsCoherence(lastidx + a)
                else:
                    error('Unexpected ExperimentType')
                if DV > 0:
                    self.LeftRewarded[lastidx + a] = True
                elif DV < 0:
                    self.LeftRewarded[lastidx + a] = False
                else:
                    # It's equal distribution
                    self.LeftRewarded[lastidx + a] = rand() < 0.5
                # cross-modality difficulty for plotting
                #  0 <= (left - right) / (left + right) <= 1
                self.DV[lastidx + a] = DV
            self.Timer.customGenNewTrials[i_trial] = time.time()
        else:
            self.Timer.customAdjustBias[i_trial] = 0
            self.Timer.customCalcOmega[i_trial] = 0
            self.Timer.customPrepNewTrials[i_trial] = 0
            self.Timer.customGenNewTrials[i_trial] = 0

        # Update RDK GUI
        self.task_parameters.OmegaTable.columns.RDK = [
            (value - 50) * 2
            for value in self.task_parameters.OmegaTable.columns.Omega
        ]
        # Set current stimulus for next trial
        DV = self.DV[i_trial + 1]
        if self.task_parameters.ExperimentType == \
                ExperimentType.RandomDots:
            self.task_parameters.CurrentStim = \
                f"{abs(DV / 0.01)}{iff(DV < 0, '# R cohr.', '# L cohr.')}"
        else:
            # Set between -100 to +100
            StimIntensity = f'{iff(DV > 0, (DV + 1) / 0.02, (DV - 1) / -0.02)}'
            self.task_parameters.CurrentStim = \
                f"{StimIntensity}{iff(DV < 0, '# R', '# L')}"

        self.Timer.customFinalizeUpdate[i_trial] = time.time()

        # determine if optogentics trial
        OptoEnabled = rand(1, 1) < self.task_parameters.OptoProb
        if i_trial < self.task_parameters.StartEasyTrials:
            OptoEnabled = False
        self.OptoEnabled[i_trial + 1] = OptoEnabled
        self.task_parameters.IsOptoTrial = iff(OptoEnabled, 'true', 'false')

        # determine if catch trial
        if i_trial < self.task_parameters.StartEasyTrials or \
                self.task_parameters.PercentCatch == 0:
            self.CatchTrial[i_trial + 1] = False
        else:
            every_n_trials = round(1 / self.task_parameters.PercentCatch)
            limit = round(every_n_trials * 0.2)
            lower_limit = every_n_trials - limit
            upper_limit = every_n_trials + limit
            if not self.Rewarded[i_trial] or i_trial + 1 < \
                    self.LastSuccessCatchTial + lower_limit:
                self.CatchTrial[i_trial + 1] = False
            elif i_trial + 1 < self.LastSuccessCatchTial + upper_limit:
                # TODO: If OmegaProb changed since last time, then redo it
                non_zero_prob = [
                    self.task_parameters.OmegaTable.Omega[i] / 100
                    for i, prob in enumerate(
                        self.task_parameters.OmegaTable.columns.OmegaProb)
                    if prob > 0
                ]
                complement_non_zero_prob = [1 - prob for prob in non_zero_prob]
                inverse_non_zero_prob = non_zero_prob[::-1]
                active_stim_idxs = GetCatchStimIdx(complement_non_zero_prob +
                                                   inverse_non_zero_prob)
                cur_stim_idx = GetCatchStimIdx(self.StimulusOmega[i_trial + 1])
                min_catch_counts = min(self.CatchCount[i]
                                       for i in active_stim_idxs)
                min_catch_idxs = list(
                    set(active_stim_idxs).intersection({
                        i
                        for i, cc in enumerate(self.CatchCount)
                        if floor(cc) == min_catch_counts
                    }))
                self.CatchTrial[i_trial + 1] = cur_stim_idx in min_catch_idxs
            else:
                self.CatchTrial[i_trial + 1] = True
        # Create as char vector rather than string so that
        # GUI sync doesn't complain
        self.task_parameters.IsCatch = iff(self.CatchTrial[i_trial + 1],
                                           'true', 'false')
        # Determine if Forced LED trial:
        if self.task_parameters.PortLEDtoCueReward:
            self.ForcedLEDTrial[i_trial + 1] = rand(1, 1) < \
                self.task_parameters.PercentForcedLEDTrial
        else:
            self.ForcedLEDTrial[i_trial + 1] = False
        self.Timer.customCatchNForceLed[i_trial] = time.time()
예제 #22
0
파일: HKA.py 프로젝트: setupred/iit-asgn
pos,neg=[],[]
D = list(train)

for i in xrange(len(pr)):
   if(pr[i] == (-1,-1)): neg.append(D[i])
   elif(pr[i] == (1,1)): pos.append(D[i])
print len(pos),len(neg),len(train_l)

p = [1 for i in xrange(len(pos))]
n = [0 for i in xrange(len(neg))]
y =  p+n
X = pos+neg
print "hello"
Learn(pos+neg,map(lambda x:2*(x-0.5),p+n),10)
clf = svm.LinearSVC()
clf.fit(X,y)
print y
#print (map(lambda x:clf.predict(x).tolist()[0],clf.support_vectors_))
#print len(clf.support_vectors_),len(y)
p = clf.predict(X).tolist()
#ts = ts.tolist()[0]
ts=list(y)

print p.count(1),p.count(0),len(p),len(ts)

z = zip(ts,p)
print p
print z.count((1,1)),z.count((1,-1)),z.count((0,1)),z.count((0,-1))
correct =  len(ts) - utils.diff(ts,p)
print "SVM",correct*1.0/len(ts)
예제 #23
0
    def __init__(self, parent=None):
        super(ADSRDemo4C, self).__init__(parent)

        self.conf = config.get('adsrdemo.py')

        self.vlc = []

        self.plotw = PlotWindow(self)
        self.plotw.add_curve('Chan 1')
        self.plotw.add_curve('Chan 2')
        self.plotw.add_curve('Chan 3')
        self.plotw.add_curve('Chan 4')

        self.plotw.add_curve('ADSR1')
        self.plotw.add_curve('ADSR2')
        self.plotw.add_curve('ADSR3')
        self.plotw.add_curve('ADSR4')

        self.dx = diff()

        self.adsr = [adsr_vlc() for x in range(4)]

        # atributo adsr : [nombre ,                    min, max,  k,       kdisp,   default]
        self.params = {  'attackl':['Duracion Ataque', 0,   200,  1./10,   1./100,  150 ],
                         'sustl':['Duracion Sustain',  0,   900,  1,       1./10,   100 ],
                         'rell':['Duracion Release',   0,   300,  1,       1./10,   15  ],
                         'alfa_att':['alfa Ataque',    0,   1000, 1./1000, 1./1000, 300 ],
                         'alfa_sus':['alfa Sustain',   0,   1000, 1./1000, 1./1000, 850 ],
                         'alfa_rel':['alfa Release',   0,   1000, 1./1000, 1./1000, 850 ],
                         'umbral':['umbral deteccion', 0,   400,  1,       1,       100 ]
                       }


        self.main_frame = QWidget()
        lay = QGridLayout()
        vbox = QVBoxLayout()
        lay.addLayout(vbox, 0,0)
        lay.setColumnStretch(1,1)
        lay.addWidget(self.plotw,0,1)
        self.main_frame.setLayout(lay)
        self.setCentralWidget(self.main_frame)

        hb = QHBoxLayout()
        vbox.addLayout(hb)
        b = QPushButton('Guardar')
        b.clicked.connect(self.guardar)
        hb.addWidget(b)
        b = QPushButton('Cargar')
        b.clicked.connect(self.cargar)
        hb.addWidget(b)

        b = QPushButton('Conectar con vlc')
        b.clicked.connect(self.vlc_connect)
        vbox.addWidget(b)

        self.sliders = []
        for attr,params  in self.params.iteritems():
            (nom, xmin, xmax, k, kdisp, default) = params
            lbl = QLabel(nom)
            sld = QSlider(Qt.Horizontal)
            sld.setRange(xmin, xmax)
            sld.valueChanged[int].connect(self.set_param)
            sld.params = (nom, lbl, attr, k, kdisp)
            sld.setValue(default) 
            vbox.addWidget(lbl)
            vbox.addWidget(sld)
            self.sliders.append(sld)

        b = QPushButton('Resetear ADSR')
        b.clicked.connect(self.reset_adsr)
        vbox.addWidget(b)

        vbox.addStretch(1)

        self.setWindowTitle('Prueba ADSR')