def findByScan(): # Nobu: I've put in the hHeadOnly test since the bird was stopping when # it lost the ball. if gHeadOnly: hTrack.scan() else: # If the last seen ball is far away, then continue walking for a while. # Use obstacle occluded as well? if Global.weightedVisBallDist > 150: hTrack.scan(lowCrane=0, highCrane=0, minPan=-60, maxPan=60, lookDown=False) Action.walk(Action.MAX_FORWARD, 0, 0, minorWalkType=Action.SkeFastForwardMWT) else: hTrack.scan() Action.walk(0, 0, 0, minorWalkType=Action.SkeFastForwardMWT)
def doGetToTargetPoint(targetX, targetY): global gTargetPointReqAccuracy selfX, selfY = Global.selfLoc.getX(), Global.selfLoc.getY() ballX, ballY, ballH = Global.ballX, Global.ballY, Global.ballH h = hMath.normalizeAngle_0_360(hMath.RAD2DEG(math.atan2(\ ballY - selfY, ballX - selfX))) #angle = hMath.absAngleBetweenTwoPointsFromPivotPoint(ballX, ballY, \ # targetX, targetY, \ # selfX, selfY) distSquared = hMath.getDistSquaredBetween(targetX,targetY,selfX,selfY) ## if dist > 100 and angle < 80: ## hTrack.saGoToTarget(targetX,targetY) ## else: ## hTrack.saGoToTargetFacingHeading(targetX,targetY,h) hTrack.saGoToTargetFacingHeading(targetX,targetY,h) # Hysterisis for whether or not your at the defender point. if distSquared <= hMath.SQUARE(gTargetPointReqAccuracy): gTargetPointReqAccuracy = TARGET_PT_ACC_LARGE_CIRCLE if abs(ballH) < 15: Action.stopLegs() else: gTargetPointReqAccuracy = TARGET_PT_ACC_SMALL_CIRCLE if abs(targetX - selfX) < 30 and abs(targetY - selfY) < 100: checkThenBlock()
def shouldITurnTowardTheFieldFromTargetGoal(): global getOutOfGoalCounter, getOutOfGoalDirection targetGoal = Global.vTGoal turnCCW = -1 # If we can see our target goal real big or we've got our face in either goal or we're locked # into a get-out-of-goal routine. if ((targetGoal.getConfidence() > 0 and (targetGoal.getWidth() > \ Constant.WIDTH_MOVE_THRESHOLD or (targetGoal.getHeight() > \ Constant.HEIGHT_MOVE_THRESHOLD and targetGoal.getWidth() > \ Constant.IMAGE_WIDTH / 2))) or getOutOfGoalCounter > 0): if getOutOfGoalDirection == None: if targetGoal.getHeading() >= 0: turnCCW = -Action.MAX_TURN_NORMAL / 2 else: turnCCW = Action.MAX_TURN_NORMAL / 2 getOutOfGoalDirection = turnCCW else: turnCCW = getOutOfGoalDirection Action.walk(0, 0, turnCCW) Action.setHeadParams(0, 40, 0, Action.HTAbs_h) getOutOfGoalCounter = getOutOfGoalCounter + 1 MAX_INFRA_RED = 90.0 irFar = VisionLink.getAnySensor(Constant.ssINFRARED_FAR) if getOutOfGoalCounter > 60 or Global.vOGoal.getConfidence() > 0 or \ (irFar / 10000.0) >= MAX_INFRA_RED or Global.seenBeacon: getOutOfGoalCounter = 0 return True else: getOutOfGoalDirection = None return False
def Destroyed(self): if self.burnanim: self.burnanim.DetachRelative() seq = Action.Sequence(self.burnanim) self.burnanim.SphericalParticleEmitter.EmitRate = 0 Action.Delay(seq, 1) Action.Call(seq, lambda: self.burnanim.Destroy)
def OnCollision(self, CollisionEvent): if self.triggerable: if CollisionEvent.OtherObject.CanTrigger and CollisionEvent.OtherObject.CanTrigger.Active: self.triggerable = False self.Trigger() self.Owner.SoundEmitter.PlayCue("ButtonCue") if self.TriggerDuration: self.Owner.SoundEmitter.PlayCue("TickCue") seq = Action.Sequence(self.Owner.Actions) def Untrigger(): self.Trigger(False) self.Owner.SoundEmitter.Stop() self.Owner.SoundEmitter.PlayCue("WallEnterCue") def TurnTriggerable(): self.triggerable = True Action.Delay(seq, .5) Action.Call(seq, TurnTriggerable) Action.Delay(seq, self.TriggerDuration) Action.Call(seq, Untrigger)
def frameReset(): VisionLink.startProfile("hFrameResetR") Action.frameReset() Global.frameReset() sGrab.frameReset() hStuck.frameReset() VisionLink.stopProfile("hFrameResetR")
def select_dialog_action(self, s): # confirm action, then patient, then recipient, then extraneous arguments a_name, a_max = arg_max(s.user_action_belief) unsure = True if a_max < 1 else False missing_params = [] if a_name is not None: a_preds = [] for i in range(0, s.action_num_expected_params[a_name]): p_name, p_max = arg_max(s.user_action_parameters_belief[i]) if p_name is not None: if p_max < 1: unsure = True else: missing_params.append(i) a_preds.append(p_name) a = Action.Action(a_name, a_preds) if unsure: if (s.previous_action is not None and s.previous_action[0] == "confirm_action" and s.previous_action[1][0] == a): return "repeat_goal", [], Action.Action() else: return "confirm_action", [a], a elif len(missing_params) > 0: return "request_missing_param", [missing_params[0]], a else: return "repeat_goal", [], Action.Action() else: return "repeat_goal", [], Action.Action()
def efficientScan(reset=True, frameList=120, speed=50 * 80 / 90, leftLimit=80, rightLimit=-80, offsetFrame=15): global scanListPosition, lastFrame #if current frame not after the last frame in some offset(1 or 2), reset. #if it set to reset, then reset #if it not reset then not reset except the scanListPosition is empty if reset or (Global.frame - lastFrame > offsetFrame) or scanListPosition == []: #print "reseted, reset", reset, " Global.frame - lastFrame:",Global.frame - lastFrame, " scanListPosition:", scanListPosition == [] createScanningPosition(Constant.dCLOCKWISE, frameList, speed, leftLimit, rightLimit) #print scanListPosition if scanListPosition == []: print "scan list empty" return panx, tilty, cranez = scanListPosition[0] del scanListPosition[0] Action.setHeadParams(panx, tilty, cranez, Action.HTAbs_h) print Global.frame, ":set:", panx, ":", tilty, ":", cranez print Global.frame, ":", Global.pan, ":", Global.tilt, ":", Global.crane lastFrame = Global.frame
def scanLikeGermanTeam(lowCrane=0, highCrane=0, tilty=0, speed=10, minPan=-90, maxPan=90): global panDirection global gScanWaitCounter if not scanLikeGermanTeamHorizontal(highCrane, tilty, speed, minPan, maxPan): if gScanWaitCounter < 3: gScanWaitCounter += 1 elif not scanLikeGermanTeamVertical(): gScanWaitCounter = 0 panx = Global.desiredPan gScanWaitCounter = 0 if panx == 0\ and panDirection == Constant.dCLOCKWISE: panx = -1 panDirection = Constant.dCLOCKWISE gScanWaitCounter = 0 elif panx == 0\ and panDirection == Constant.dANTICLOCKWISE: panx = 1 panDirection = Constant.dANTICLOCKWISE Action.setHeadParams(panx, tilty, lowCrane, Action.HTAbs_h) else: gScanWaitCounter = 0
def trackVisualBallByProj(adjX=0, adjY=0, adjZ=0, isTurning=False, adjPos=1.0): ballPos = VisionLink.getProjectedBall() # If projection returns infinity, then use gps. if abs(ballPos[0]) >= Constant.LARGE_VAL\ or abs(ballPos[1]) >= Constant.LARGE_VAL: # We should divide the velocity adjustment by half. trackVisualBallByTrig(adjX, adjY, adjZ, adjPos=adjPos) return ballPosX = ballPos[0] * adjPos ballPosY = ballPos[1] * adjPos #print " track by proj ", # Note that xyz in getPointProjection is different from xyz to headMotion if isTurning and abs(Global.ballH) > 80: d = hMath.getDistanceBetween(0, 0, ballPosX, ballPosY) x = d * math.cos(hMath.DEG2RAD(10)) y = d * math.sin(hMath.DEG2RAD(10)) if Global.ballH < 0: x = -x Action.setHeadParams(x + adjX, adjY, y + adjZ, Action.HTAbs_xyz) else: Action.setHeadParams(ballPosX + adjX, adjY, ballPosY + adjZ, Action.HTAbs_xyz)
def SearchByHeadMoving(direction=RIGHT): #print "searching for the ball by moving the head.." global panDir, lastAction if lastAction != MOVE_HEAD: panDir = direction lastAction = MOVE_HEAD pan = tilt = crane = 0 if Action.lastValues[Action.HeadType] == Action.HTAbs_h: pan = Action.lastValues[Action.Panx] if pan >= PAN_LIMIT: pan = PAN_LIMIT panDir = RIGHT elif pan < -PAN_LIMIT: pan = -PAN_LIMIT panDir = LEFT if panDir == LEFT: pan += HEAD_SCAN_SPEED else: pan -= HEAD_SCAN_SPEED if panDir != direction: tilt = crane = -30 Action.setHeadParams(pan, tilt, crane, Action.HTAbs_h)
def E1_ActivateLogo(self): if self.Camera.CameraFunction.FadeDone: self.CurrentFunction = None self.Camera.ThunderGenerator.ActivateThunder() sequence = Action.Sequence(self.Owner.Actions) Action.Delay(sequence, self.E1ToE2Delay) Action.Call(sequence, self.GoToE2)
def supportBall(): global gTargetPointReqAccuracy targetX, targetY, rangeX = hWhere.getStrikerPos() targetX, targetY = getAdjustedTarget(targetX, targetY, rangeX) selfX, selfY = Global.selfLoc.getPos() selfH = Global.selfLoc.getHeading() ballH = Global.ballH h = hMath.normalizeAngle_0_360(selfH + ballH) distSquared = hMath.getDistSquaredBetween(targetX, targetY, selfX, selfY) # From outside a metre walk fast, else turn to face ball #if dist > 100: # hTrack.saGoToTarget(targetX, targetY) #else: # hTrack.saGoToTargetFacingHeading(targetX, targetY, h) hTrack.saGoToTargetFacingHeading(targetX, targetY, h) # Hysterisis for whether or not you are at the striker point. if distSquared <= hMath.SQUARE(gTargetPointReqAccuracy): gTargetPointReqAccuracy = TARGET_PT_ACC_LARGE_CIRCLE if abs(ballH) < 5: Action.stopLegs() else: gTargetPointReqAccuracy = TARGET_PT_ACC_SMALL_CIRCLE return targetX, targetY
def findByForce(): global gForceType global gForceFired global gForceCounter global gForceSpinDir # If we need to force anything, then force it. gForceFired = True if gForceCounter > 0: if gForceType == FORCE_SPIN: findBySpin(gForceSpinDir) elif gForceType == FORCE_FOLLOW: if gForceCounter < 20: findByGps() else: hTrack.scan() if not gHeadOnly: Action.walk(Action.MAX_FORWARD,0,0) elif gForceType == FORCE_LAST_VISUAL: hTrack.scan() if not gHeadOnly: walkToBall(Global.fstVisBallDist, Global.fstVisBallHead, getBehind = gGetBehind) Global.lostBall = 0 gForceCounter -= 1
def Test(self): self.Max = self.MaxRange + random.uniform(0, self.MaxVariation) self.Owner.SphereCollider.Radius = self.Max sequence = Action.Sequence(self.Owner.Actions) Action.Delay(sequence, random.uniform(self.MaxTimeMinVar, self.MaxTimeMaxVar)) Action.Call(sequence, self.Test)
def Initialize(self, initializer): Zero.Connect(self.Owner, Events.CollisionStarted, self.OnCollision) #Zero.Connect(self.Owner, "WeatherDestory", self.OnWeatherDestory) sequence = Action.Sequence(self.Owner.Actions) Action.Delay(sequence, self.MaxLife) Action.Call(sequence, self.Death)
def FreezeThem(self, poss): def FreezeHelper(target): if not target.FreezeAnimReceiver.IsActivated(): target.FreezeAnimReceiver.Freeze() ice = self.Space.CreateAtPosition( "IceParticle", target.Transform.WorldTranslation) ice.TimedDeath.Active = True ice.SphericalParticleEmitter.EmitRate = 3 ice.SphericalParticleEmitter.EmitCount = 1 ice.SphericalParticleEmitter.ResetCount() for pair in poss: target = self.RecArray[pair[1]][pair[0]] FreezeHelper(target) if self.CreateWaterRing: for pair in poss: if pair in self.cache_ring: FreezeHelper(self.cache_ring[pair]) if pair in self.cache_ring2: FreezeHelper(self.cache_ring2[pair]) self.cache_ring2[ pair].Transform.Translation -= self.curious_offset nextset = set(sum([self.GetNeighbor(pair) for pair in poss], tuple())) nextset -= self.frozen_set self.frozen_set.update(nextset) if nextset: seq = Action.Sequence(self.Owner.Actions) Action.Delay(seq, 0.1) Action.Call(seq, self.FreezeThem, (nextset, ))
def TeleportThis(self, target): if self.NextLevel.Name != "DefaultLevel": self.Space.FindObjectByName("Camera").CameraFunction.SetCameraFade( Vec4(1, 1, 1, 0), Vec4(1, 1, 1, 1), .03, 0) sequence = Action.Sequence(self.Owner.Actions) ls = self.Space.FindObjectByName("LevelSettings").LevelStart if ls: hm = ls.HUDManager hm.CacheSkills() Action.Delay(sequence, 1.5) Action.Call(sequence, lambda: self.Space.LoadLevel(self.NextLevel)) elif self.Teleport: camera = self.Space.FindObjectByName("Camera") dest = self.Teleport.Transform.Translation ct = camera.Transform.Translation pt = target.Transform.Translation #camera.CameraFunction.SetCameraFade(Vec4(0,0,0,1),Vec4(0,0,0,0),.03,0) #camera.Transform.Translation = VectorMath.Vec3(dest.x, dest.y,ct.z) target.Transform.Translation = VectorMath.Vec3( dest.x, dest.y, pt.z)
def ShowOnce(self, select): self.ShowTarget(select) self.seq.Cancel() self.seq = Action.Sequence(self.Owner.Actions) Action.Delay(self.seq,2) Action.Call(self.seq, lambda:self.ShowTarget(None))
def performWithoutGrab(dkd): global gKickCounter global gIsKickTriggering if (gUseHeadLeft and isHeadLeftOk())\ or (not gUseHeadLeft and isHeadRightOk()): gIsKickTriggering = True if gIsKickTriggering: if gKickCounter < 15: if gUseHeadLeft: Action.kick(Action.HeadLeftWT) else: Action.kick(Action.HeadRightWT) else: resetPerform() return Constant.STATE_SUCCESS gKickCounter += 1 return Constant.STATE_EXECUTING if gTargetAngle == None: setGetBehind(dkd) if gIsGetBehindNeeded: sGetBehindBall.perform(gTargetAngle) else: sFindBall.perform() gIsKickTriggering = False gKickCounter = 0 return Constant.STATE_EXECUTING
def Ending(self): self.Owner.SphericalParticleEmitter.Size *= 1.05 self.Owner.SphericalParticleEmitter.EmitRate = 35 if self.Owner.SphericalParticleEmitter.Size > 25: d = self.DestinationObject.Transform.WorldTranslation z = self.Player.Transform.WorldTranslation.z self.Player.Transform.WorldTranslation = Vec3(d.x, d.y, z) z = self.camera.Transform.WorldTranslation.z self.camera.Transform.WorldTranslation = Vec3(d.x, d.y, z) def showit(): self.camera.CameraFunction.SetCameraFade(self.ShadingColor,self.ShadingColor*Vec4(1,1,1,0),0.005,0) self.Player.RigidBody.Kinematic = False self.CurrentUpdater = self.Fading if self.ExtendedDelay: self.camera.CameraFunction.SetCameraFade(self.ShadingColor, self.ShadingColor,2,0) self.Owner.DestroyInterface.Destroy() seq = Action.Sequence(self.Player.Actions) Action.Delay(seq, 2) Action.Call(seq, showit) else: self.camera.CameraFunction.SetCameraFade(self.ShadingColor, self.ShadingColor*Vec4(1,1,1,0),0.0025,0) self.CurrentUpdater = self.Fading self.Player.RigidBody.Kinematic = False self.Owner.DestroyInterface.Destroy()
def executePolicy(self, observation): # Start the counter count = 0 # Copy the initial observation self.workingObservation = self.copyObservation(observation) if self.verbose: print("START") # While a terminal state has not been hit and the counter hasn't expired, take the best action for the current state while not self.workingObservation.isTerminal and count < self.numSteps: newAction = Action() # Get the best action for this state newAction.actionValue = self.greedy(self.workingObservation) if self.verbose == True: print self.gridEnvironment.actionToString( newAction.actionValue) # execute the step and get a new observation and reward currentObs, reward = self.gridEnvironment.env_step(newAction) # update the value table if self.calculateFlatState( currentObs.worldState) not in self.v_table.keys(): self.v_table[self.calculateFlatState( currentObs.worldState)] = self.numActions * [0.0] self.totalReward = self.totalReward + reward.rewardValue self.workingObservation = copy.deepcopy(currentObs) # increment counter count = count + 1 if self.verbose: print("END")
def reverse(aa, turndir, taOff, verticalDistToBall): adjustTurn = hMath.CLIP( hMath.normalizeAngle_180(Global.selfLoc.getHeading() - aa), 10) adjustTurn *= -1 ##~ correctionLen = max (0,taOff-2*Constant.BallDiameter) if (verticalDistToBall > -Constant.BallDiameter): Action.walk(-6, 0, adjustTurn) ##~ print "uppest" ##~ elif (verticalDistToBall > (Constant.BallDiameter-taOff) ): elif (verticalDistToBall > -taOff / 2.0): ##~ correctAmp = correctionLen-abs(Constant.BallDiameter+verticalDistToBall) ##~ reverseSpeed = -6.0 * correctAmp / correctionLen if (turndir == Constant.dANTICLOCKWISE): Action.walk(-3, -4, adjustTurn) else: Action.walk(-3, 4, adjustTurn) ##~ print "middle" else: if (turndir == Constant.dANTICLOCKWISE): Action.walk(-2, -4, adjustTurn) else: Action.walk(-2, 4, adjustTurn)
def ExplodeState(self, UpdateEvent): self.Owner.SphericalParticleEmitter.Size -= UpdateEvent.Dt * 2.5 if(self.Owner.SphericalParticleEmitter.Size <= 0.1): self.CurrentUpdate = 0 sequence = Action.Sequence(self.Owner.Actions) Action.Delay(sequence, 0.5) Action.Call(sequence, self.EndState)
def findBySpin(turnDir=None): pan = Global.desiredPan turnRate = Action.MAX_TURN if Global.lightingChallenge: turnRate = 60 # if turnDir is specified, then force to spin that direction. if turnDir != None: if turnDir == Constant.dANTICLOCKWISE: turnCCW = turnRate pan += 10 else: pan -= 10 turnCCW = -turnRate # Decide which direction to spin. elif gIsClockwise: turnCCW = -turnRate pan -= 10 else: turnCCW = turnRate pan += 10 pan = hMath.CLIP(pan,90) Action.setHeadParams(pan,-10,-8,Action.HTAbs_h) Action.walk(0,0,turnCCW,minorWalkType=Action.SkeFastForwardMWT)
def E2_CheckAnimatorEnd(self): if not self.Logo.Animator.Active: self.CurrentFunction = None self.Space.SoundSpace.PlayCue("RavenCue") sequence = Action.Sequence(self.Owner.Actions) Action.Delay(sequence, self.LogoDisplayDelay) Action.Call(sequence, self.GoToE3)
def supportBall(): global gTargetPointReqAccuracy targetX, targetY, rangeX = hWhere.getSupporterPos(False) targetX, targetY = getAdjustedTarget(targetX, targetY, rangeX) selfX, selfY = Global.selfLoc.getPos() selfH = Global.selfLoc.getHeading() ballH = Global.ballH h = hMath.normalizeAngle_0_360(selfH + ballH) distSquared = hMath.getDistSquaredBetween(targetX, targetY, selfX, selfY) # From outside a metre walk fast, else turn to face ball #if dist > 100: # hTrack.saGoToTarget(targetX, targetY) #else: # hTrack.saGoToTargetFacingHeading(targetX, targetY, h) hTrack.saGoToTargetFacingHeading(targetX, targetY, h) # Hysterisis for whether or not you are at the supporter point. if distSquared <= hMath.SQUARE(gTargetPointReqAccuracy): gTargetPointReqAccuracy = TARGET_PT_ACC_LARGE_CIRCLE if abs(ballH) < 5: Action.stopLegs() if 0 <= Global.selfLoc.getHeading( ) <= 180 and not Global.vOGoal.isVisible(): # only block if my heading is right and I cannot see my own goal sBlock.checkThenBlock(minBallSpeed=2, bothSides=True) else: gTargetPointReqAccuracy = TARGET_PT_ACC_SMALL_CIRCLE return targetX, targetY
def doTurnKick(): global gIsInStartPosition global gIsHeadLifting global gHeadLiftCounter global gKickCounter currentStep = Global.pWalkInfo.getCurrentStep() # Check if I am ready to execute the kick if not gIsInStartPosition: if gStartStep - gErrorMargin <= currentStep <= gStartStep + ( gErrorMargin + 2): #print "StartStep : ", gStartStep, " currentStep : ", currentStep gIsInStartPosition = True else: setTurnKickParams(forward=Action.MAX_FORWARD, left=-gTurnDir) return setTurnKickParams(left=-gTurnDir, turnccw=gTurnAmount * gTurnDir) gKickCounter += 1 # Do head related business here if gKickCounter >= gKickDuration - gHeadLiftTime\ and gStep - gErrorMargin <= currentStep <= gStep + (gErrorMargin+2): #print "Step : ", gStep, " currentStep : ", currentStep gIsHeadLifting = True if gIsHeadLifting: Action.setHeadParams(0, 0, -10, Action.HTAbs_h) gHeadLiftCounter += 1 Action.closeMouth() if gHeadLiftCounter < gApplyForwardUntilThisFromEnd: Action.finalValues[Action.Forward] = gForwardVector
def Respawn(self, target): self.Owner.Actions.Clear() seq = Action.Sequence(self.Owner) seq = Action.Sequence(self.Owner) Action.Delay(seq, self.RespawnDelay) Action.Call(seq, self.PerformRespawn, (target, ))
def perform(leftAngle,rightAngle,heading): global gLastStealth forward = Action.MAX_FORWARD_NORMAL left = 0 turn = heading badHeading = (leftAngle + rightAngle) / 2.0 trueHeading = hMath.normalizeAngle_180(Global.selfLoc.getHeading() + heading) if heading < badHeading + 10 * gLastStealth: if True or not (trueHeading < -90 or trueHeading > 120): stealthTurn = rightAngle - 45 if stealthTurn < turn: turn = stealthTurn gLastStealth = 1 Indicator.showFacePattern([3,3,0,0,0]) else: if True or not (trueHeading > -90 and trueHeading < 60): stealthTurn = leftAngle + 45; if stealthTurn > turn: turn = stealthTurn gLastStealth = -1 Indicator.showFacePattern([0,0,0,3,3]) turnccw = hMath.CLIP(turn / 2.0, Action.MAX_TURN_NORMAL) Action.walk(forward,left,turnccw)
def executePolicy(self, observation): # Start the counter count = 0 # Copy the initial observation self.workingObservation = self.copyObservation(observation) if self.verbose: print("START") # While a terminal state has not been hit and the counter hasn't expired, take the best action for the current state while not self.workingObservation.isTerminal and count < self.numSteps: newAction = Action() # Get the best action for this state newAction.actionValue = self.greedy(self.workingObservation) if self.verbose == True: print self.gridEnvironment.actionToString(newAction.actionValue) # execute the step and get a new observation and reward currentObs, reward = self.gridEnvironment.env_step(newAction) # keep track of max observed reward if reward.rewardValue > self.maxObservedReward: self.maxObservedReward = reward.rewardValue # update the value table if self.calculateFlatState(currentObs.worldState) not in self.v_table.keys(): self.v_table[self.calculateFlatState(currentObs.worldState)] = self.numActions*[0.0] self.totalReward = self.totalReward + reward.rewardValue self.workingObservation = copy.deepcopy(currentObs) # increment counter count = count + 1 if self.verbose: print("END")
def run(hostname, port): s = None try: with socket.create_connection((hostname, port)) as sock: Action.sendObject(sock, Action.RegisterThread()) except Exception as e: print(e) pass
def __init__(self,ui,name,icon1,icon2,pos,board): self.ui = ui self.name = name self.icon1 = icon1 self.icon2 = icon2 self.pos = pos self.board = board self.chooseMethod = Action.manual() self.pickMethod = Action.playerOnePick(self.ui,self.icon2) if self.name == "player1" else Action.playerTwoPick(self.ui,self.icon2)
def isIdle(self, pic): log("checking for idleness with %s" % pic) sleep(3) if Action.doesExist(pic): log("%s might be idle..." % self.name) sleep(3) if Action.doesExist(pic): log("%s is idle" % self.name, "warn") return True return False
def run(): log("Hypervisor script has been started") crit_errors = 0 Sequences = getAllSequences() while True: log("outter loop started") if crit_errors > 4: log("Total number of critical errors has been exceeded, system SHUTTING DOWN", "warn") break log("stopping all currently running windows") system("pkill -9 chrome") Action.wait() # Inital startup of all Sequences startSequences(Sequences) log("inner loop started") while len(filter(lambda x: x.status < 0, Sequences)) > 0: for Sequence in filter(lambda x: x.status < 0, Sequences): log("turning %s" % Sequence.name) turnedPic = Sequence.turn() if turnedPic is not False: log("turned pic is %s" % turnedPic) if Sequence.isIdle(turnedPic): log("%s is idle, deactivating..." % Sequence.name, "warn") Sequence.closeAll() Sequence.status += 1 else: log("%s turned.\n" % Sequence.name) Sequence.no_clicks = 0 elif Sequence.no_clicks > 5: log("%s exceeds maximum number of missed clicks, deactivating..." % Sequence.name, "warn") Sequence.closeAll() Sequence.status += 1 else: log("no buttons detected for %s" % Sequence.name) Sequence.no_clicks += 1 log("all Sequences have been turned.") for Sequence in filter(lambda x: 0 <= x.status < 8, Sequences): log("attempting to reactivate %s" % Sequence.name) if Sequence.start(): log("%s successfully started" % Sequence.name) Sequence.status = -1 else: log("failed to start %s" % Sequence.name) Sequence.status += 1
def main(): #modulo option parser do python para facilitar a vida parser = OptionParser("usage: %prog [options] -f circuitfile", version="%prog 1.0") parser.add_option("-f", "--file", dest="filename", help="input circuit file", metavar="FILE") parser.add_option("-p", "--param", action="append", type="string", dest="param") parser.add_option("-q", "--quiet", action="store_false", dest="verbose", default=True, help="don't print status messages to stdout") (options, args) = parser.parse_args() #depois de determinar os parametros de execucao, inicia-se a iteracao por Action act = Action() act.simulate(options.filename)
def __init__(self,ui,name,icon1,icon2,pos,board,level): self.ui = ui self.name = name self.icon1 = icon1 self.icon2 = icon2 self.pos = pos self.board = board self.level = level if level == 1: self.chooseMethod = Action.random() self.pickMethod = Action.playerOnePick(self.ui,self.icon2) if self.name == "player1" else Action.playerTwoPick(self.ui,self.icon2) self.own_plate_index = 0 if self.name == "player1" else 7 self.own_bowl_index = 6 if self.name == "player1" else 13 self.oppo_plate_index = 7 if self.name == "player1" else 0 self.oppo_bowl_index = 13 if self.name == "player1" else 6
def qLearn(self, observation): # copy the initial observation self.workingObservation = self.copyObservation(observation) # start the counter count = 0 lastAction = -1 # while terminal state not reached and counter hasn't expired, use epsilon-greedy search while not self.workingObservation.isTerminal and count < self.numSteps: # Take the epsilon-greedy action newAction = Action() newAction.actionValue = self.egreedy(self.workingObservation) lastAction = newAction.actionValue # Get the new state and reward from the environment currentObs, reward = self.gridEnvironment.env_step(newAction) rewardValue = reward.rewardValue # update maxObserved Reward if rewardValue > self.maxObservedReward: self.maxObservedReward = rewardValue # update the value table if self.calculateFlatState(currentObs.worldState) not in self.v_table.keys(): self.v_table[self.calculateFlatState(currentObs.worldState)] = self.numActions*[0.0] lastFlatState = self.calculateFlatState(self.workingObservation.worldState) newFlatState = self.calculateFlatState(currentObs.worldState) if not currentObs.isTerminal: Q_sa=self.v_table[lastFlatState][newAction.actionValue] Q_sprime_aprime=self.v_table[newFlatState][self.returnMaxIndex(currentObs)] new_Q_sa=Q_sa + self.stepsize * (rewardValue + self.gamma * Q_sprime_aprime - Q_sa) self.v_table[lastFlatState][lastAction]=new_Q_sa else: Q_sa=self.v_table[lastFlatState][lastAction] new_Q_sa=Q_sa + self.stepsize * (rewardValue - Q_sa) self.v_table[lastFlatState][lastAction] = new_Q_sa # increment counter count = count + 1 self.workingObservation = self.copyObservation(currentObs) # Done learning, reset environment self.gridEnvironment.env_reset()
def init(self, **kw): if kw.has_key('generator') and kw.has_key('action'): raise sconpat_error, 'do not mix action and generator in a builder' if kw.has_key('action'): a = kw['action'].replace('$SOURCES', '${SRC}') a = a.replace('$TARGETS', '${TGT}') a = a.replace('$TARGET', '${TGT[0].abspath(env)}') a = a.replace('$SOURCE', '${SRC[0].abspath(env)}') m = md5() m.update(a) key = m.hexdigest() Action.simple_action(key, a, kw.get('color', 'GREEN')) self.action=key
def startSequences(Sequences): log("Activating Sequences") for Sequence in Sequences: while 0 <= Sequence.status < 4: log("starting %s" % Sequence.name) if Sequence.start(): Sequence.status = -1 log("windows for %s ready\n" % Sequence.name) else: log("failed to start %s" % Sequence.name) Sequence.closeAll() Sequence.status += 1 Action.wait() if Sequence.status > 0: log("ending attempts to start %s" % Sequence.name, "warn") log("all Sequences have been started")
def qLearn(self, observation): # copy the initial observation self.workingObservation = self.copyObservation(observation) # start the counter count = 0 lastAction = -1 # reset total reward self.totalReward = 0.0 # while terminal state not reached and counter hasn't expired, use epsilon-greedy search while not self.workingObservation.isTerminal and count < self.numSteps: # Make sure table is populated correctly self.initializeVtableStateEntry(self.workingObservation.worldState) # Take the epsilon-greedy action newAction = Action() newAction.actionValue = self.egreedy(self.workingObservation) lastAction = newAction.actionValue # Get the new state and reward from the environment currentObs, reward = self.gridEnvironment.env_step(newAction) rewardValue = reward.rewardValue # Make sure table is populated correctly self.initializeVtableStateEntry(currentObs.worldState) # update the value table lastFlatState = self.calculateFlatState(self.workingObservation.worldState) newFlatState = self.calculateFlatState(currentObs.worldState) #self.updateVtable(newFlatState, lastFlatState, newAction.actionValue, lastAction, rewardValue, currentObs.isTerminal, currentObs.availableActions) self.updateVtable(newFlatState, lastFlatState, newAction.actionValue, rewardValue, currentObs.isTerminal, currentObs.availableActions) # increment counter count = count + 1 self.workingObservation = self.copyObservation(currentObs) # increment total reward self.totalReward = self.totalReward + reward.rewardValue # Done learning, reset environment self.gridEnvironment.env_reset()
def executePolicy(self, observation): # History stores up list of actions executed history = [] # Start the counter count = 0 # reset total reward self.totalReward = 0.0 # Copy the initial observation self.workingObservation = self.copyObservation(observation) # Make sure the value table has the starting observation self.initializeVtableStateEntry(self.workingObservation.worldState) if self.isVerbose(): print("START") # While a terminal state has not been hit and the counter hasn't expired, take the best action for the current state while not self.workingObservation.isTerminal and count < self.numSteps: newAction = Action() # Get the best action for this state newAction.actionValue = self.greedy(self.workingObservation) history.append((newAction.actionValue, self.workingObservation.worldState)) if self.isVerbose(): print "state:", self.workingObservation.worldState print "bot action:", self.gridEnvironment.actionToString(newAction.actionValue) # execute the step and get a new observation and reward currentObs, reward = self.gridEnvironment.env_step(newAction) if self.isVerbose(): print "reward:", reward.rewardValue self.totalReward = self.totalReward + reward.rewardValue self.workingObservation = copy.deepcopy(currentObs) # increment counter count = count + 1 if self.isVerbose(): print("END") return history
def start(self, description="", cmds="",action=None,actionRecover=None,actionArgs={},category="unknown",name="unknown",\ errorMessage="", resolutionMessage="", loglevel=1,die=True,stdOutput=True,errorOutput=True,retry=1,serviceObj=None): ''' @param id is unique id which allows finding back of action @param description: Action description (what are we doing) @param errorMessage: message to give when error @param resolutionMessage: Action resolution message (how to resolve the action when error) @param loglevel: Message level @param action: python function to execute @param actionRecover: python function to execute when error @param actionArgs is dict with arguments @param cmds is list of commands to execute on os @param state : INIT,RUNNING,OK,ERROR ''' action=Action(description, cmds,action,actionRecover,actionArgs,category,name,errorMessage, resolutionMessage, loglevel,die,stdOutput,errorOutput,retry,serviceObj=serviceObj) md=self.getActionNamesDone(action) if action.name in md: print "* %-20s: %-40s %-40s ALREADY DONE"%(action.category,action.name,action.description) return action.execute() if action.state=="OK": self.setActionNamesDone(action)
def getDecision(self, current_frame): if self.game.world_counter % 100 == 0: print "*" * 40, self.game.world_counter, "%" * 40 curr_action = Action.getRandomAction() # Actually perform the action in the game self.performAction(curr_action) data = current_frame.toCNNInput() #data = data.reshape((84,84)) data *= 1/255. self.dataset[self.dataset_index] = data self.dataset_index += 1 if self.dataset_index == len(self.dataset): print("RESIZING TO ", len(self.dataset)*2) self.dataset.resize((len(self.dataset)*2, WINDOW_SIZE, WINDOW_SIZE))
def closeAll(self): log("restarting %s" % self.name) Action.close(self.startWindow) sleep(1) Action.close(self.activeWindow) sleep(1) Action.close(self.adWindow) log("all %s windows closed" % self.name) # Reset all counters and Sequence variables self.no_clicks = 0 self.startWindow = None self.activeWindow = None self.adWindow = None
def warmUp(): Action.openUrl("www.yahoo.com") sleep(20) system("pkill -9 chrome")
def getDecision(self, frame): a = Action.getRandomAction() self.performAction(a)
def handle_read(self): action = Action.recevieObject(self) if not action: return print(action)
# optional flavor text ## 'glitch_text' : '', 'fail_text' : '', 'success_text' : '', 'great_success_text' : '', } # Do not edit below this line # Set the threshold for the number of successes (set with first argument) if len(argv) > 1: action_info['threshold'] = str(argv[1]) else: action_info['threshold'] = 0 return action_info if __name__ == '__main__': # action_info = main() try: Action.runner(main()) except: print('ERROR - Action.runner() failed')
return True def execute(self): for case in self.cases: print "=============Execution================" result = self.execute_one(case) print("================%s=================" % str(result) ) def dumpcase(self): i = 0 for case in self.cases: print("Case " + str(i) + ": " ), self.g.outputpath(case) i+=1 def savesvg(self): self.g.savesvg() if __name__ == '__main__': fsm = FSM() action1 = Action() action1.transfer = lambda x : (x == "1" and "1" ) or "2" action1.name = "rule 1" action2 = Action() action2.transfer = lambda x : (x == "2" and "1") or "2" action2.name = "rule 2" fsm.actionset.append(action1) fsm.actionset.append(action2) fsm.startstates = ["1"] fsm.generateEdge() #for edge in fsm.edgeset: # print(str(edge))
status['app'] = 'on' return status def transfer_appon(status): if status['app'] != 'na' and status['black']!='on': status['app'] = 'on' return status def transfer_appoff(status): if status['app'] != 'na': status['app'] = 'off' return status if __name__ == '__main__': fsm = FSM() action1 = Action() action1.transfer = transfer_blackon action1.name = 'black_on' action2 = Action() action2.transfer = transfer_blackoff action2.name = 'black_off' action3 = Action() action3.transfer = transfer_appin action3.name = 'app_in' action4 = Action() action4.transfer = transfer_appon action4.name = 'app_on'
actions = [1, 3, 3, 0, 1] # The last state workingObservation = gridAgent.copyObservation(gridAgent.initialObs) # Make sure there is an entry for the last state in the v table gridAgent.initializeVtableStateEntry(workingObservation.worldState) # Report the initial v table print "Initial V Table:" print gridAgent.v_table print "---" # Execute the sequence of actions for a in actions: # Make a new action newAction = Action() newAction.actionValue = a # Execute the action currentObs, reward = gridEnvironment.env_step(newAction) # Make sure there is an entry in the v table for the new state gridAgent.initializeVtableStateEntry(currentObs.worldState) # Put things in the right form lastFlatState = gridAgent.calculateFlatState(workingObservation.worldState) newFlatState = gridAgent.calculateFlatState(currentObs.worldState) # Update the v table gridAgent.updateVtable(newFlatState, lastFlatState, newAction.actionValue, reward.rewardValue, currentObs.isTerminal, currentObs.availableActions) # Report print "v table after:" print " old state:", workingObservation.worldState print " action:", newAction.actionValue print " new state:", currentObs.worldState
elif x[0] == '0' or x[0] == 'w': #up move = 0 elif x[0] == '1' or x[0] == 's': #down move = 1 elif x[0] == '2' or x[0] == 'a': #left move = 2 elif x[0] == '3' or x[0] == 'd': #right move = 3 elif x[0] == '4' or x[0] == 'q': #smash move = 4 act = Action() act.actionValue = move newobs, reward = gridEnvironment.env_step(act) print "reward received:", reward.rewardValue totalr = totalr + reward.rewardValue elif play == 2: # play as the enemy print "PLAY!" gridAgent.agent_reset() gridAgent.verbose = 0 gridEnvironment.enemyMode = 4 # don't change this gridEnvironment.verbose = 0 obs = gridAgent.copyObservation(gridAgent.initialObs)
import Action if __name__ == '__main__': # PYGAME INIT pygame.init() Game.screen = pygame.display.set_mode((Const.windowHeight, Const.windowLength)) Game.screen.fill(Colour.darkGrey) pygame.display.set_caption('SupNetwork') clock = pygame.time.Clock() # DISPLAY Init.array() Display.all_cell() # GAME LOOP while True: for event in pygame.event.get(): clock.tick(30) if event.type == MOUSEBUTTONUP: x, y = event.pos if Const.margin < x < Const.margin+Const.cellSize*Const.gameSize \ and Const.margin < y < Const.margin+Const.cellSize*Const.gameSize: Action.click(x, y) if event.type == QUIT: pygame.quit() sys.exit()