def agent_end(self, reward): modelReward = reward if reward == -10.0: reward = DeathPenalty modelReward = InPitPenalty lastActionId = self.lastAction #rewardFea = getTrainFeature(self.lastState, [round(modelReward, 0)], lastActionId) #don't learn the pseudo reward #if not self.AgentType() == SarsaAgent: #self.rewardFeaList.append(rewardFea) #if self.isModelReady(): #preReward, = self.RewardLearner.getClass(rewardFea) #print "pre reward: ", reward rewardFea = getRewardFeature(self.lastState, self.lastLastAction) print "before pre reward: ", self.rewardAgent.getQ(rewardFea, self.lastAction) self.agent.end(reward) self.rewardAgent.end(round(modelReward, 0)) print "pre reward: ", self.rewardAgent.getQ(rewardFea, self.lastAction) print "end: ", reward, " step: ", self.stepNum, " dist:", self.lastState.mario.x self.totalStep = self.totalStep + self.stepNum #if self.DynamicLearner.empty() or self.RewardLearner.empty(): #else: #self.agent.end(reward) #self.rewardList.append(reward) if reward < 10: #only the reward on the finish line counts reward = 0 self.distList[len(self.distList)-1] = (self.totalStep, self.lastState.mario.x, self.episodeNum, reward)
def ExpandPath(path, state, dynaLearner, rewardLearner): for actionId in path: nextState = SimState() if state.path != []: prevAction = state.path[len(state.path) - 1] else: prevAction = -1 # TODO: make it NoTask for i, world in enumerate(state.worldList): fea = getTestFeature(world, actionId) rewardFea = getRewardFeature(world, prevAction) # WARNING!! Don't change the order # reward, = rewardLearner.getClass(fea) reward = rewardLearner.getQ(rewardFea, actionId) fea.pop(0) distList = dynaLearner[actionId].getClass(fea, orange.GetProbabilities) # TODO: add randomness here ax, ay, dx, dy = [GetSample(dist) for dist in distList] # ax, ay, dx, dy = [round(value, Precision) for value in sampleValue] # ll = [log(pair[1]) for pair in sampleValue] #loglikelihood m = world.mario sx = ax + m.sx sy = ay + m.sy newMario = copy.deepcopy(m) newMario.x = m.x + m.sx + dx newMario.y = m.y + m.sy + dy newMario.sx = sx newMario.sy = sy newWorld = copy.copy(world) # with static assumption, everything other than mario stays the same newWorld.mario = newMario dir, isJump, isSpeed = getActionType(actionId) if (not (newWorld.mario.sy >= 0 and isJump)) and not ( sx == 0.0 and sy == 0.0 and dx == 0.0 and dy == 0.0 ): # Jump does not increase y-speed, do not need to search anymore nextState.worldList.append(newWorld) nextState.rewardList.append(state.rewardList[i] + reward) # nextState.probList.append(state.probList[i] + sum(ll)) nextState.path = state.path + [actionId] state = nextState return state
def agent_start(self, obs): state = WorldState(obs) self.lastState = state fea = getSarsaFeature(state, NoTask) if self.isModelReady(): self.agent.epsilon = self.HORDQ_episilon possibleAction = self.agent.getPossibleAction(fea) action = self.planning(state, possibleAction) action = self.agent.start(fea, action) else: if self.AgentType() == AgentType.SarsaAgent: self.agent.epsilon = self.HORDQ_episilon else: self.agent.epsilon = 0.05 #encourage exploration action = self.agent.start(fea, NoTask) rewardFea = getRewardFeature(state, NoTask) self.rewardAgent.start(rewardFea, action) self.stepNum = 0 self.lastAction = action self.distList.append(()) #put a dummy one self.episodeNum = self.episodeNum + 1 return makeAction(action)
def agent_step(self, reward, obs): #self.obsList.append(obs) #if reward < -0.01 + epsilon and reward > -0.01 - epsilon: #reward = -1 state = WorldState(obs) fea = getSarsaFeature(state, self.lastAction) lastMario = self.lastState.mario mario = state.mario #for internal reward system dx = mario.x - lastMario.x reward = reward + dx modelReward = 0 if isMarioInPit(state): print "in pit !!!!!!!" #reward = reward + InPitPenalty #no pit penalty for HORDQ modelReward = InPitPenalty if not self.isModelReady(): #fea = getSarsaFeature(obs) action = self.agent.step(reward, fea, NoTask) else: #episilon greey policy if random.random() < self.epsilon: #select randomly action = self.actionList[int(random.random()*len(self.actionList))] print "random!!" else: possibleAction = self.agent.getPossibleAction(fea) #if fea[0] == (): #if not monster around, pass control to the planner #possibleAction = self.actionList action = self.planning(state, possibleAction) print "planning", action self.agent.pseudoReward = 10000 action = self.agent.step(reward, fea, action) self.agent.pseudoReward = self.initPseudoReward #state.dump() print "step loc:", self.stepNum, " ", mario.x , " ", mario.y, " ", mario.sx, " ", mario.sy #state.path = [] #state.reward = 0 #nextState, isValid = ExpandPath([0], state, self.DynamicLearner, self.RewardLearner) #nextState.dump() #print "pred loc:", nextState.mario.x , " ", nextState.mario.y, " ", nextState.mario.sx, " ", nextState.mario.sy #print "backoff reward: ", nextState.reward #nextState, isValid = ExpandPath([action], state, self.DynamicLearner, self.RewardLearner) #nextState.dump() #print "pred loc:", nextState.mario.x , " ", nextState.mario.y, " ", nextState.mario.sx, " ", nextState.mario.sy #print "pred rewar:", action, " ", nextState.reward lastActionId = self.lastAction deltaX = mario.x - (lastMario.x + lastMario.sx) deltaY = mario.y - (lastMario.y + lastMario.sy) aX = mario.sx - lastMario.sx aY = mario.sy - lastMario.sy classVar = [round(aX, Precision), round(aY, Precision), round(deltaX, Precision), round(deltaY, Precision)] rewardClassVar = [round(modelReward, 0)] modelFea = getModelFeature(self.lastState, classVar) #rewardFea = getTrainFeature(self.lastState, rewardClassVar, lastActionId) #don't learn the pseudo reward if self.isModelReady(): #TODO: too dirty #predictModelClass = self.DynamicLearner[lastActionId].getClass(modelFea) #predictModelClass = [round(v, 1) for v in predictModelClass] #print "feature: ", lastActionId, " ", modelFea #print "predict: ", predictModelClass predictModelClass = self.DynamicLearner[lastActionId].getClass(modelFea) predictModelClass = [round(v, 1) for v in predictModelClass] roundClassVar = [round(v, 1) for v in classVar] print "feature: ", lastActionId, " ", modelFea print "predict: ", predictModelClass if not roundClassVar == predictModelClass: self.feaList[lastActionId].append(modelFea) else: print "pass model-------------" else: if not self.AgentType() == AgentType.SarsaAgent: self.feaList[lastActionId].append(modelFea) rewardFea = getRewardFeature(state, self.lastAction) print "before pre reward: ", self.rewardAgent.getQ(rewardFea, action) self.rewardAgent.step(rewardFea, modelReward, action) print "pre reward: ", self.rewardAgent.getQ(rewardFea, action) print "reward: ", modelReward #if self.isModelReady(): #predictRewardClass = self.RewardLearner.getClass(rewardFea) #predictRewardClass = [round(v, 0) for v in predictRewardClass] #print "reward: ", modelReward #print "pre reward: ", predictRewardClass #if not rewardClassVar == predictRewardClass: #self.rewardFeaList.append(rewardFea) #else: #print "pass reward-------------" #else: #if not self.AgentType() == SarsaAgent: #self.rewardFeaList.append(rewardFea) self.lastState = state self.lastLastAction = self.lastAction self.lastAction = action self.stepNum = self.stepNum + 1 self.distList[len(self.distList)-1] = (self.totalStep + self.stepNum, self.lastState.mario.x, self.episodeNum, 0) return makeAction(action)