def __init__(self, index, offense): DefensiveReflexAgent.__init__(self, index) if offense: self.myNotFeatures = [ 'eatingEnemy','distanceToClosestEnemyAsGhostSquared', 'attackingEnemyAsGhost', 'numberOfYourFoodsRemaining', 'homeTerritory', 'distanceToClosestEnemyAsGhost', 'isPacman'] else: TrialAgent.defenseIndex.append(index) self.myNotFeatures = [ 'distancetoClosestEnemyFoodSquared', 'gettingEaten' 'distanceToClosestEnemyAsPacmanSquared', 'attackingEnemyAsPacman', 'numberOfEnemyFoodsRemaining','distancetoClosestEnemyFood','enemyTerritory', 'distanceToClosestEnemyAsPacman','enemyGhostClose', 'distanceToFriends']
def chooseAction(self, gameState): # we're blue for now if self.red: return choice(gameState.getLegalActions(self.index)) enemyIndices = [] if self.red: enemyIndices = gameState.getBlueTeamIndices() else: enemyIndices = gameState.getRedTeamIndices() if self.inferenceModule.distributions == None: self.inferenceModule.initializeDistributions(gameState, enemyIndices) for agentIndex in enemyIndices: self.inferenceModule.observe(gameState, self.index, agentIndex) self.inferenceModule.elapseTime(gameState, agentIndex) # this is just to get the distribution in a format that is displayable distrToDisplay = [] for i in xrange(gameState.getNumAgents()): if i in self.inferenceModule.distributions.keys(): distrToDisplay.append(self.inferenceModule.distributions[i]) else: distrToDisplay.append(None) self.displayDistributionsOverPositions(distrToDisplay) return DefensiveReflexAgent.chooseAction(self, gameState)
def registerInitialState(self, gameState): DefensiveReflexAgent.registerInitialState(self, gameState) self.startingFood = len(self.getFoodYouAreDefending(gameState).asList()) self.theirStartingFood = len(self.getFood(gameState).asList()) if TrialAgent.firstTurn: TrialAgent.distancer = distanceCalculator.Distancer(gameState.data.layout) TrialAgent.manhattanDistancer = distanceCalculator.Distancer(gameState.data.layout) # TrialAgent.manhattanDistancer.useManhattanDistances() TrialAgent.distancer.getMazeDistances() TrialAgent.legalPositions = gameState.getWalls().asList(False) for pos in TrialAgent.legalPositions: TrialAgent.legalNextPositions[pos] = self.getLegalNextPositions(gameState, pos) TrialAgent.enemyIndices = self.getOpponents(gameState) TrialAgent.allyIndices = self.getTeam(gameState) self.initializeUniformly(gameState) TrialAgent.firstTurn = False TrialAgent.lastAgent = TrialAgent.allyIndices[len(TrialAgent.allyIndices) -1] TrialAgent.numAllies = len(TrialAgent.allyIndices) for ally in TrialAgent.allyIndices: TrialAgent.currentGoal[ally] = (-1, -1) TrialAgent.defenseIndex = list()
def __init__(self, index): DefensiveReflexAgent.__init__(self, index) self.agentName = 'defensiveQLearningAgent' self.weights = None
def getBestAction(self, state): return DefensiveReflexAgent.chooseAction(self, state)