Esempio n. 1
0
    def registerInitialState(self, gameState):
        CaptureAgent.registerInitialState(self, gameState)

        self.teamIndices = CaptureAgent.getTeam(
            self,
            gameState)  #Array representing the indices of every ally Agent
        self.opponentIndices = CaptureAgent.getOpponents(
            self,
            gameState)  #Array representing the indices of every oponent Agent

        if self.index == self.teamIndices[
                0]:  #Determine which index information about this Agent can be found. That includes who it's going after (if it's on defense) and where it is
            self.teamOrder = 0
        elif self.index == self.teamIndices[1]:
            self.teamOrder = 1

        position = gameState.getAgentPosition(
            self.index
        )  #This figures out where the boundary between either side is
        gameMap = gameState.getWalls()
        boundary = gameMap.width / 2
        if position[0] < boundary:
            self.leftEdge = 0  #The farthest left the Agent can go and still be on it's home side
            self.rightEdge = boundary  #The farthest left the Agent can go and be on the opponent side
            self.boundary = boundary - 1  #The place on the home side closest to the far side
        else:
            self.leftEdge = boundary  #All these variables are the same as above.
            self.rightEdge = gameMap.width
            self.boundary = boundary
Esempio n. 2
0
 def registerInitialState(self, gameState):
     CaptureAgent.registerInitialState(self, gameState)
     self.myAgents = CaptureAgent.getTeam(self, gameState)
     self.opponentsAgents = CaptureAgent.getOpponents(self, gameState)
     self.myFoods = CaptureAgent.getFood(self, gameState).asList()
     self.opponentsFoods = CaptureAgent.getFoodYouAreDefending(
         self, gameState).asList()
Esempio n. 3
0
    def registerInitialState(self, gameState):
        """
    This method handles the initial setup of the
    agent to populate useful fields (such as what team
    we're on).

    A distanceCalculator instance caches the maze distances
    between each pair of positions, so your agents can use:
    self.distancer.getDistance(p1, p2)

    IMPORTANT: This method may run for at most 15 seconds.
    """
        '''
    Make sure you do not delete the following line. If you would like to
    use Manhattan distances instead of maze distances in order to save
    on initialization time, please take a look at
    CaptureAgent.registerInitialState in captureAgents.py.
    '''
        self.start = gameState.getAgentPosition(self.index)
        self.team = CaptureAgent.getTeam(self, gameState)
        self.food = CaptureAgent.getFood(self, gameState)
        self.foodDefending = CaptureAgent.getFoodYouAreDefending(
            self, gameState)
        self.opponents = CaptureAgent.getOpponents(self, gameState)
        print("Start: ", self.start, "Team: ", self.team, "Food: ", self.food,
              "Food Defending:", self.foodDefending, "Opponent: ",
              self.opponents)
        CaptureAgent.registerInitialState(self, gameState)
        '''
Esempio n. 4
0
 def getFeatures(self, gameState, action):
     food = gameState.getBlueFood()
     wall = gameState.getWalls()
     ghosts = []
     opponentAgents = CaptureAgent.getOpponents(self, gameState)
     if opponentAgents:
         for opponent in opponentAgents:
             opponentPosition = gameState.getAgentPosition(opponent)
             opponentIsPacman = gameState.getAgentState(opponent).isPacman
             if opponentPosition and not opponentIsPacman:
                 ghosts.append(opponentPosition)
     counter = util.Counter()
     successor = self.getSuccessor(gameState, action)
     counter['successorScore'] = self.getScore(successor)
     counter["bias"] = 1.0
     x, y = gameState.getAgentPosition(self.index)
     dx, dy = Actions.directionToVector(action)
     nextX, nextY = int(x + dx), int(y + dy)
     counter["ghost1Away"] = sum(
         (nextX, nextY) in Actions.getLegalNeighbors(g, wall)
         for g in ghosts)
     if not counter["ghost1Away"] and food[nextX][nextY]:
         counter["eatFood"] = 1.0
     dist = self.closestFood((nextX, nextY), food, wall)
     if dist is not None:
         counter["closestPellet"] = float(dist) / (wall.width * wall.height)
     counter.divideAll(10.0)
     return counter
Esempio n. 5
0
    def getFeatures(self, gameState, action):
        # Extract the grid of food and wall locations
        if self.red:
            food = gameState.getBlueFood()
        else:
            food = gameState.getRedFood()

        walls = gameState.getWalls()
        ghosts = []
        opAgents = CaptureAgent.getOpponents(self, gameState)
        # Get ghost locations and states if observable
        if opAgents:
            for opponent in opAgents:
                opPos = gameState.getAgentPosition(opponent)
                opIsPacman = gameState.getAgentState(opponent).isPacman
                if opPos and not opIsPacman:
                    ghosts.append(opPos)

        # Initialize features
        features = util.Counter()
        successor = self.getSuccessor(gameState, action)

        # Successor Score
        features['successorScore'] = self.getScore(successor)

        # Bias
        features["bias"] = 1.0

        # compute the location of pacman after he takes the action
        x, y = gameState.getAgentPosition(self.index)
        dx, dy = Actions.directionToVector(action)
        next_x, next_y = int(x + dx), int(y + dy)

        # Number of Ghosts 1-step away
        features["#-of-ghosts-1-step-away"] = sum(
            (next_x, next_y) in Actions.getLegalNeighbors(g, walls)
            for g in ghosts)
        # if there is no danger of ghosts then add the food feature
        if not features["#-of-ghosts-1-step-away"] and food[next_x][next_y]:
            features["eats-food"] = 1.0

        # Number of Ghosts scared
        # features['#-of-scared-ghosts'] = sum(gameState.getAgentState(opponent).scaredTimer != 0 for opponent in opAgents)

        # Closest food
        dist = self.closestFood((next_x, next_y), food, walls)
        if dist is not None:
            # make the distance a number less than one otherwise the update
            # will diverge wildly
            features["closest-food"] = float(dist) / (walls.width *
                                                      walls.height)

        # Normalize and return
        features.divideAll(10.0)
        return features
    def getFeatures(self, gameState, action):
        food = self.getFood(gameState)
        walls = gameState.getWalls()
        ghosts = []
        opponents = CaptureAgent.getOpponents(self, gameState)
        # Gets visible ghost locations and states
        if opponents:
            for opponent in opponents:
                opp_pos = gameState.getAgentPosition(opponent)
                op_is_pacman = gameState.getAgentState(opponent).isPacman
                if opp_pos and not op_is_pacman:
                    ghosts.append(opp_pos)

        # Initializes features
        features = util.Counter()
        successor = self.getSuccessor(gameState, action)

        features['successorScore'] = self.getScore(successor)

        features["bias"] = 1.0

        # Computes the location of pacman after it takes the action
        x, y = gameState.getAgentPosition(self.index)
        dx, dy = Actions.directionToVector(action)
        next_x, next_y = int(x + dx), int(y + dy)

        # Number of ghosts 1-step away
        features["#-of-ghosts-1-step-away"] = sum(
            (next_x, next_y) in Actions.getLegalNeighbors(ghost, walls)
            for ghost in ghosts)

        myPos = gameState.getAgentState(self.index).getPosition()

        # Check for ghosts & add the food feature
        if not features["#-of-ghosts-1-step-away"] and food[next_x][next_y]:
            features["eats-food"] = 1.0

        # Finds closest food
        dist = self.closestFood((next_x, next_y), food, walls)
        if dist is not None:
            features["closest-food"] = float(dist) / (walls.width *
                                                      walls.height)

        if gameState.data.agentStates[self.index].numCarrying > 5:
            print("Enough food! Going home...")
            distHome = self.getMazeDistance(myPos, self.start)
            features["eats-food"] = 0.0
            features["go-home"] = distHome

        # Normalizes all the features and returns
        features.divideAll(10.0)
        print(features)
        return features
Esempio n. 7
0
	def registerInitialState(self, gameState):
		"""
		This method handles the initial setup of the
		agent to populate useful fields (such as what team
		we're on).
		"""

		CaptureAgent.registerInitialState(self, gameState)
		self.myAgents = CaptureAgent.getTeam(self, gameState)
		self.opAgents = CaptureAgent.getOpponents(self, gameState)
		self.myFoods = CaptureAgent.getFood(self, gameState).asList()
		self.opFoods = CaptureAgent.getFoodYouAreDefending(self, gameState).asList()
    def aStarSearch(self, gameState, start, searchPos, foodOrGhost):
        import util
        # Initilization
        opponentList = CaptureAgent.getOpponents(self, gameState)
        getWalls = self.getDeepWalls(
        )  # get deep copy of teh walls and edit for walls

        if foodOrGhost == 1:  #1 = Food or capsule else ghost
            for i in opponentList:
                checkNone = gameState.getAgentPosition(i)
                if checkNone is not None:
                    x, y = checkNone
                    getWalls[x][y] = True

        adjacentNodes = util.PriorityQueue()
        visitedNodes = []  #List holding nodes already visited
        #Start by pushing the start node into the priority queue
        adjacentNodes.push((start, [start], 0),
                           (0 + self.getMazeDistance(start, searchPos)))
        #Pop the initial point from the priority queue
        (state, goalDirection, cost) = adjacentNodes.pop()

        #adding the point to the visited list
        visitedNodes.append(
            (state, cost + self.getMazeDistance(start, searchPos)))

        while state != searchPos:  #while we do not find the goal point
            neighbours = Actions.getLegalNeighbors(
                state, getWalls)  #get the point's succesors
            for node in neighbours:
                visitedExist = False
                totalCost = cost + 1
                for (currentState, costToCurrentState) in visitedNodes:
                    # Check the closed list to find if there are any nodes at the same level with cost less than the total cost
                    if (node == currentState) and (costToCurrentState <=
                                                   totalCost):
                        visitedExist = True
                        break

                if not visitedExist:
                    # push the point with priority num of its total cost
                    adjacentNodes.push(
                        (node, goalDirection + [node], cost + 1),
                        (cost + 1 + self.getMazeDistance(node, searchPos)))
                    visitedNodes.append(
                        (node, cost + 1))  # add this point to visited list

            if (adjacentNodes.isEmpty()):
                return 0
            (state, goalDirection, cost) = adjacentNodes.pop()
        return len(goalDirection)
    def getFeatures(self, gameState, action):
        features = util.Counter()
        successor = self.getSuccessor(gameState, action)
        opponentList = CaptureAgent.getOpponents(
            self, gameState)  # get opponents indices
        foodList = self.getFood(successor).asList()

        if (gameState.getAgentState(self.index).numCarrying <
                4):  # eat 4 food and submit to score
            features['successorScore'] = -len(foodList)
        else:  # keep eating food
            myPos = successor.getAgentState(self.index).getPosition()
            defendFood = self.getFoodYouAreDefending(gameState).asList()
            features['successorScore'] = -min(
                [self.getMazeDistance(myPos, self.start)])

        #calculating distance from the visible opponents
        gDist = 0
        minGhost = 9999
        for i in opponentList:
            checkNone = gameState.getAgentPosition(i)
            if checkNone is not None:
                myPos = successor.getAgentState(self.index).getPosition()
                gDist = self.aStarSearch(gameState, myPos, checkNone, 0)
                if gDist < 3:
                    if gDist < minGhost:
                        minGhost = gDist

        if (minGhost == 9999): minGhost = 0

        features['distanceToGhost'] = minGhost

        # get the capsule a-star distance
        capsule = self.getCapsules(gameState)
        if len(capsule) != 0:
            myPos = successor.getAgentState(self.index).getPosition()
            features['distanceToCapsule'] = min([
                self.aStarSearch(gameState, myPos, caps, 1) for caps in capsule
            ])

        else:
            features['distanceToCapsule'] = 0

        # get food distance
        myPos = successor.getAgentState(self.index).getPosition()
        minDistance = min(
            [self.aStarSearch(gameState, myPos, food, 1) for food in foodList])
        features['distanceToFood'] = minDistance

        return features
Esempio n. 10
0
    def registerInitialState(self, gameState):
        CaptureAgent.registerInitialState(self, gameState)

        self.teamIndices = CaptureAgent.getTeam(self, gameState)
        self.opponentIndices = CaptureAgent.getOpponents(self, gameState)
        self.enemyPos = [None, None]

        position = gameState.getAgentPosition(
            self.index
        )  #This figures out where the boundary between either side is
        gameMap = gameState.getWalls()
        boundary = gameMap.width / 2
        if position[0] < boundary:
            self.leftEdge = 0
            self.rightEdge = boundary
        else:
            self.leftEdge = boundary
            self.rightEdge = gameMap.width
Esempio n. 11
0
    def evalute_state_one_agent_defensive(self, agent_position, agent_index,
                                          agent_is_on_enemy_team,
                                          team_mate_position):
        if self.check_if_valid_coordinates(self.root_agent_object.middle):
            distance_to_middle_factor = CaptureAgent.getMazeDistance(
                self.root_agent_object, agent_position,
                self.root_agent_object.middle)
        else:
            distance_to_middle_factor = 0
        if self.check_if_valid_coordinates(self.root_agent_object.upperHalf):
            distance_to_upper_half = CaptureAgent.getMazeDistance(
                self.root_agent_object, agent_position,
                self.root_agent_object.upperHalf)
        else:
            distance_to_upper_half = 0
        if self.check_if_valid_coordinates(self.root_agent_object.lowerHalf):
            distance_to_lower_half = CaptureAgent.getMazeDistance(
                self.root_agent_object, agent_position,
                self.root_agent_object.lowerHalf)
        else:
            distance_to_lower_half = 0
        enemies = [
            self.node_state.getAgentState(i) for i in
            CaptureAgent.getOpponents(self.root_agent_object, self.node_state)
        ]
        invaders = [
            a for a in enemies if a.isPacman and a.getPosition() != None
        ]
        number_of_invaders = len(invaders)
        invader_distance = 0
        if len(invaders) > 0:
            dists = [
                CaptureAgent.getMazeDistance(self.root_agent_object,
                                             agent_position, a.getPosition())
                for a in invaders
            ]
            invader_distance = min(dists)

        distance_from_each_other = CaptureAgent.getMazeDistance(
            self.root_agent_object, agent_position, team_mate_position)

        state_value = number_of_invaders * (-9999) + invader_distance * (-1000) + distance_to_middle_factor * (-200) + \
                      distance_to_lower_half*(-50) + distance_to_upper_half*(-50) + distance_from_each_other * (200)
        return state_value
    def aStarSearch(self, gameState, start, searchPos, foodOrGhost):
        import util
        opponentList = CaptureAgent.getOpponents(self, gameState)
        #getWalls1 = self.walls
        getWalls = self.getDeepWalls()

        if foodOrGhost == 1:  #1 = Food or capsule else ghost
            for i in opponentList:
                checkNone = gameState.getAgentPosition(i)
                if checkNone is not None:
                    x, y = checkNone
                    getWalls[x][y] = True
        adjacentNodes = util.PriorityQueue()
        visitedNodes = []
        adjacentNodes.push((start, [start], 0),
                           (0 + self.getMazeDistance(start, searchPos)))
        #Pop the initial point from the priority queue
        (state, goalDirection, cost) = adjacentNodes.pop()
        #adding the point to the visited list
        visitedNodes.append(
            (state, cost + self.getMazeDistance(start, searchPos)))
        while state != searchPos:  #while we do not find the goal point
            neighbours = Actions.getLegalNeighbors(
                state, self.walls)  #get the point's succesors
            for node in neighbours:
                visitedExist = False
                totalCost = cost + 1
                for (currentState, costToCurrentState) in visitedNodes:
                    if (node == currentState) and (costToCurrentState <=
                                                   totalCost):
                        visitedExist = True
                        break
                if not visitedExist:
                    adjacentNodes.push(
                        (node, goalDirection + [node], cost + 1),
                        (cost + 1 + self.getMazeDistance(node, searchPos)))
                    visitedNodes.append((node, cost + 1))
            (state, goalDirection, cost) = adjacentNodes.pop()
        return len(goalDirection)
Esempio n. 13
0
	def registerInitialState(self, gameState):
		CaptureAgent.registerInitialState(self, gameState)
		self.init = gameState.getAgentState(self.index).getPosition()
		self.epsilon = 0.0#exploration prob
		self.alpha = 0.1 #learning rate
		self.discountRate = 0.8
		self.dangeFood = []
		self.mode = 1
		self.weights1 = {'closest-food': 2.35219445732408, 'bias': 2.579502234147277, 'closest-capsule': 2.473714473123398}
		self.weights2 = {'closest-food': 1.02910769618005556, 'bias': -6.112936837778204, 'closest-ghosts': -10.11587156566253, 'closest-capsule': 1.257363246901937, 'num-of-walls': -10.4903928122119086, 'time-of-scared-ghosts': 1.6265815562445105, 'reverse': -2.732599631268455}
		self.weights3 = {'bias': -0.1619191782335229, 'closest-ghosts': -18.645334316865307, 'num-of-walls': -10.45335435502801, 'distance-back-to-home': 2.0996715469522775, 'time-of-scared-ghosts': 0.7966612961334337, 'reverse': -2.732599631268455,'closest-capsule': 4.523232232232323}										
		self.weights4 = {'bias': 6.802602309336149, 'distance-back-to-home': 12.7541385540534}
		self.finish = False
		self.myAgents = CaptureAgent.getTeam(self, gameState)
		self.opAgents = CaptureAgent.getOpponents(self, gameState)
		self.myFoods = CaptureAgent.getFood(self, gameState).asList()
		self.opFoods = CaptureAgent.getFoodYouAreDefending(self, gameState).asList()
		self.lostFoods = []
		self.gamescore = 0

		self.midWidth = gameState.data.layout.width / 2
		self.height = gameState.data.layout.height
		self.width = gameState.data.layout.width
		self.midHeight = gameState.data.layout.height / 2
Esempio n. 14
0
	def getThirdFeatures(self, gameState, action):
		nextState = self.getSuccessor(gameState, action)		
		self.ghosts = []
		opAgents = CaptureAgent.getOpponents(self,nextState)
		# Get ghost locations and states if observable
		if opAgents:
			for opponent in opAgents:
				opPos = nextState.getAgentPosition(opponent)
				opIsPacman = nextState.getAgentState(opponent).isPacman
				if opPos and not opIsPacman: 
					currentPos = gameState.getAgentPosition(self.index)
					nopPos = gameState.getAgentPosition(opponent)
					dis = abs(currentPos[0]-nopPos[0])+abs(currentPos[1]-nopPos[1])
					if dis<= 5:
						ghost=[]
						ghost.append(opPos)
						ghost.append(opponent)
						self.ghosts.append(ghost)
					
		# Initialize features
		features = util.Counter()
		reverse = self.reverseDirection(action)
		if reverse == gameState.getAgentState(self.index).configuration.direction:
			features["reverse"]=1
			# print("reverse")
		else:
			features["reverse"]=0
			# print("notreverse")
		# features['num-of-carry']  = nextState.getAgentState(self.index).numCarrying
		if self.red:
			self.middle = int((gameState.data.layout.width - 2)/2 )
		else:
			self.middle = int((gameState.data.layout.width - 2)/2 + 1)
		self.boundary = []
		for i in range(1, gameState.data.layout.height - 1):
			if not gameState.hasWall(self.middle, i):
				self.boundary.append((self.middle, i))
		if gameState.getAgentState(self.index):
			
			distance_to_boundary = 10000
			for each in self.boundary:
				distance = self.getMazeDistance(nextState.getAgentPosition(self.index),each)
				if distance<distance_to_boundary:
					distance_to_boundary = distance
			if distance_to_boundary ==0:
				features['distance-back-to-home']=2
			else :
				features['distance-back-to-home'] = 1.0/distance_to_boundary
		else : 
			features['distance-back-to-home'] = 0

		# Bias
		features["bias"] = 1.0
		legalActions = nextState.getLegalActions(self.index)
		legalActions.remove(Directions.STOP)
		features['num-of-walls'] = max(self.getDeathRoad(gameState,action),self.getDeathRound(gameState,action))
		# compute the location of pacman after he takes the action
		if(self.getDeathRoad(gameState,action)>=8):
			food = self.getDeathFood(gameState,action)
			for each in food:
				if each[1]>5:
					self.dangeFood.append(each[0])
		

		if(len(self.getCapsules(nextState))>1):
			if len(self.getCapsules(gameState))==len(self.getCapsules(nextState))+1:
				features['closest-capsule']=2.0
			else:
				minCap = 10000
				for each in self.getCapsules(nextState):
					if minCap > self.getMazeDistance(nextState.getAgentPosition(self.index),each):
						minCap = self.getMazeDistance(nextState.getAgentPosition(self.index),each)
				features['closest-capsule'] = 1.0/minCap
		elif(len(self.getCapsules(nextState))==1): 
			if len(self.getCapsules(gameState))==2:
				features['closest-capsule']=2.0
			else:
				features['closest-capsule'] = 1.0/self.getMazeDistance(nextState.getAgentPosition(self.index),self.getCapsules(gameState)[0])
		else: 
			if(len(self.getCapsules(gameState))>0):
				features['closest-capsule']=2.0
			else :
				features['closest-capsule']=0.0

		if len(self.ghosts)==0:
			features['time-of-scared-ghosts']=0
			features['closest-ghosts']=0
			self.scareTimer = 0
		else:
			features['time-of-scared-ghosts'] = min(nextState.getAgentState(opponent).scaredTimer for opponent in opAgents)

			if(min(nextState.getAgentState(opponent).scaredTimer for opponent in opAgents)>10):
				self.scareTimer = 1
				features['time-of-scared-ghosts']=1

			else:
				features['time-of-scared-ghosts']=0

				self.scareTimer = 0
			if len(self.ghosts)==1:
				ghostIndex = self.ghosts[0][1]
				pos = gameState.getAgentState(ghostIndex).getPosition()
				nextpos = nextState.getAgentState(ghostIndex).getPosition()
				posDis = self.getMazeDistance(gameState.getAgentState(self.index).getPosition(),pos)
				nextDis = self.getMazeDistance(nextState.getAgentState(self.index).getPosition(),nextpos)
				if abs(posDis-nextDis)>5:
					# print("!!!!!!!!!!!!!!!!!!!!!!!!!!")
					features['closest-ghosts'] = 2 
					# print("222222222222222222222")
				else :
					# print("333333333333333333")
					
					features['closest-ghosts'] = 1.0/self.getMazeDistance(nextState.getAgentPosition(self.index),self.ghosts[0][0])
			else:
				ghostIndex1 = self.ghosts[0][1]
				pos1 = gameState.getAgentState(ghostIndex1).getPosition()
				nextpos1 = nextState.getAgentState(ghostIndex1).getPosition()
				ghostIndex2 = self.ghosts[1][1]
				pos2 = gameState.getAgentState(ghostIndex2).getPosition()
				nextpos2 = nextState.getAgentState(ghostIndex2).getPosition()
				posDis1 = self.getMazeDistance(gameState.getAgentState(self.index).getPosition(),pos1)
				nextDis1 = self.getMazeDistance(nextState.getAgentState(self.index).getPosition(),nextpos1)
				posDis2 = self.getMazeDistance(gameState.getAgentState(self.index).getPosition(),pos2)
				nextDis2 = self.getMazeDistance(nextState.getAgentState(self.index).getPosition(),nextpos2)
				if abs(posDis1-nextDis1)>5 or abs(posDis2-nextDis2)>5:
					# print("!!!!!!!!!!!!!!!!!!!!!!!!!!")
					features['closest-ghosts'] = 2 
					# print("222222222222222222222")
				else :
					# print("333333333333333333")	
					if posDis1==posDis2:
						features['closest-ghosts'] = 1.0/(nextDis1+nextDis2)*2
					else:
						features['closest-ghosts'] = 1.0/min(nextDis1,nextDis2)
		
		features.divideAll(100)
		return features
Esempio n. 15
0
	def getSecondFeatures(self, gameState, action):
		nextState = self.getSuccessor(gameState, action)
		
		food = self.getFood(gameState)
		nextfood = self.getFood(nextState)
		walls = gameState.getWalls()
		
		self.ghosts = []
		opAgents = CaptureAgent.getOpponents(self,nextState)
		# Get ghost locations and states if observable
		if opAgents:
			for opponent in opAgents:
				opPos = nextState.getAgentPosition(opponent)
				opIsPacman = nextState.getAgentState(opponent).isPacman
				if opPos and not opIsPacman: 
					currentPos = gameState.getAgentPosition(self.index)
					nopPos = gameState.getAgentPosition(opponent)
					dis = abs(currentPos[0]-nopPos[0])+abs(currentPos[1]-nopPos[1])
					if dis<= 5:
						ghost=[]
						ghost.append(opPos)
						ghost.append(opponent)
						self.ghosts.append(ghost)
		
		# Initialize features
		features = util.Counter()
		reverse = self.reverseDirection(action)
		if reverse == gameState.getAgentState(self.index).configuration.direction:
			features["reverse"]=1
			# print("reverse")
		else:
			features["reverse"]=0
			# print("notreverse")
		features["bias"] = 1.0
		legalActions = nextState.getLegalActions(self.index)
		legalActions.remove(Directions.STOP)
		features['num-of-walls'] = max(self.getDeathRoad(gameState,action),self.getDeathRound(gameState,action))
		if(self.getDeathRoad(gameState,action)>=8):
			deathfood = self.getDeathFood(gameState,action)
			for each in deathfood:
				if each[1]>5:
					self.dangeFood.append(each[0])
		# compute the location of pacman after he takes the action
		
		
		if len(self.ghosts)==0:
			# print("0000000000000000000000000")
			features['time-of-scared-ghosts']=0
			features['closest-ghosts']=0
			self.scareTimer = 0
		else:
			features['time-of-scared-ghosts'] = min(nextState.getAgentState(opponent).scaredTimer for opponent in opAgents)

			if(min(nextState.getAgentState(opponent).scaredTimer for opponent in opAgents)>10):
				self.scareTimer = 1
				features['time-of-scared-ghosts']=1

			else:
				features['time-of-scared-ghosts']=0

				self.scareTimer = 0
			# print("next",nextState.getAgentState(self.ghosts[0][1]).getPosition()[0],"   inital",nextState.getInitialAgentPosition(self.ghosts[0][1])[0])	
		
			if len(self.ghosts)==1:
				ghostIndex = self.ghosts[0][1]
				pos = gameState.getAgentState(ghostIndex).getPosition()
				nextpos = nextState.getAgentState(ghostIndex).getPosition()
				posDis = self.getMazeDistance(gameState.getAgentState(self.index).getPosition(),pos)
				nextDis = self.getMazeDistance(nextState.getAgentState(self.index).getPosition(),nextpos)
				if abs(posDis-nextDis)>5:
					# print("!!!!!!!!!!!!!!!!!!!!!!!!!!")
					features['closest-ghosts'] = 2 
					# print("222222222222222222222")
				else :
					# print("333333333333333333")
					
					features['closest-ghosts'] = 1.0/nextDis
			else:
				ghostIndex1 = self.ghosts[0][1]
				pos1 = gameState.getAgentState(ghostIndex1).getPosition()
				nextpos1 = nextState.getAgentState(ghostIndex1).getPosition()
				ghostIndex2 = self.ghosts[1][1]
				pos2 = gameState.getAgentState(ghostIndex2).getPosition()
				nextpos2 = nextState.getAgentState(ghostIndex2).getPosition()
				posDis1 = self.getMazeDistance(gameState.getAgentState(self.index).getPosition(),pos1)
				nextDis1 = self.getMazeDistance(nextState.getAgentState(self.index).getPosition(),nextpos1)
				posDis2 = self.getMazeDistance(gameState.getAgentState(self.index).getPosition(),pos2)
				nextDis2 = self.getMazeDistance(nextState.getAgentState(self.index).getPosition(),nextpos2)
				if abs(posDis1-nextDis1)>5 or abs(posDis2-nextDis2)>5:
					# print("!!!!!!!!!!!!!!!!!!!!!!!!!!")
					features['closest-ghosts'] = 2 
					# print("222222222222222222222")
				else :
					# print("333333333333333333")
					if posDis1==posDis2:
						features['closest-ghosts'] = 1.0/(nextDis1+nextDis2)*2
					else:
						features['closest-ghosts'] = 1.0/min(nextDis1,nextDis2)

		if(len(self.getCapsules(nextState))>1):
			if len(self.getCapsules(gameState))==len(self.getCapsules(nextState))+1:
				features['closest-capsule']=2.0
			else:
				minCap = 10000
				for each in self.getCapsules(nextState):
					if minCap > self.getMazeDistance(nextState.getAgentPosition(self.index),each):
						minCap = self.getMazeDistance(nextState.getAgentPosition(self.index),each)
				features['closest-capsule'] = 1.0/minCap
		elif(len(self.getCapsules(nextState))==1): 
			if len(self.getCapsules(gameState))==2:
				features['closest-capsule']=2.0
			else:
				features['closest-capsule'] = 1.0/self.getMazeDistance(nextState.getAgentPosition(self.index),self.getCapsules(gameState)[0])
		else: 
			if(len(self.getCapsules(gameState))>0):
				features['closest-capsule']=2.0
			else :
				features['closest-capsule']=0.0
		# Closest food
		# if len()
		if self.index ==self.getTeam(gameState)[0]:
			dist = self.halfUpperClosestFood(nextState.getAgentPosition(self.index), nextfood, walls,nextState)
			if len(food.asList()) == len(nextfood.asList() ) and not dist ==0:
				# make the distance a number less than one otherwise the update
				# will diverge wildly
				features["closest-food"] = 1.0/float(dist)
				# print("The distance is",dist)
			else : 
				features["closest-food"] = 2
		else:
			dist = self.halfLowerClosestFood(nextState.getAgentPosition(self.index), nextfood, walls,nextState)
			if len(food.asList()) == len(nextfood.asList() ) and not dist ==0:
				# make the distance a number less than one otherwise the update
				# will diverge wildly
				features["closest-food"] = 1.0/float(dist)
				# print("The distance is",dist)
			else : 
				features["closest-food"] = 2
		
		
		features.divideAll(100)
		return features
Esempio n. 16
0
	def chooseOfAction(self, gameState):
		# Pick Action
		ghost=[]
		ghostIndex = 0
		opAgents = CaptureAgent.getOpponents(self,gameState)
		currentPos = gameState.getAgentPosition(self.index)
		# Get ghost locations and states if observable
		if opAgents:
			for opponent in opAgents:
				opPos = gameState.getAgentPosition(opponent)
				opIsPacman = gameState.getAgentState(opponent).isPacman
				
				if opPos and not opIsPacman: 
					dis = abs(currentPos[0]-opPos[0])+abs(currentPos[1]-opPos[1])
					if dis<=6:
						ghost.append(opPos)
						ghostIndex = opponent
		if   len(self.getFood(gameState).asList())>2:
			
			if len(ghost) ==0 :
				if gameState.getAgentState(self.index).numCarrying>1 and  gameState.data.timeleft<200:
					self.weights =self.weights4
					print("444444444444444444444")
				else:
					self.weights = self.weights1
					print("111111111111111111111")
			else:

				if min([self.getMazeDistance(gameState.getAgentPosition(self.index),a) for a in ghost])>6:
					self.weights = self.weights1
					print("111111111111111111111")
				else:
					if gameState.getAgentState(ghostIndex).scaredTimer<10:
						if gameState.data.timeleft<200 :
							if gameState.getAgentState(self.index).numCarrying>2:
								self.weights = self.weights3
								print("33333333333333333333")
							else:
								self.weights = self.weights2
								print("2222222222222222222222")
						else:
							if gameState.getAgentState(self.index).numCarrying>10:
								if self.red:
									middle = int((gameState.data.layout.width - 2)/2 )
								else:
									middle = int((gameState.data.layout.width - 2)/2 + 1)
								if abs(gameState.getAgentPosition(self.index)[0]-middle) < middle/2:
									self.weights = self.weights3
									print("33333333333333333333")
								else :
									self.weights = self.weights2
									print("2222222222222222222222")
							else:
								self.weights = self.weights2
								print("2222222222222222222222")
					else :
						self.weights = self.weights1
						print("111111111111111111111")

		else :
			if len(ghost) ==0:
				self.weights = self.weights4
				print("44444444444444444444")
			else:
				if gameState.getAgentState(ghostIndex).scaredTimer<10:
					self.weights = self.weights3
					print("33333333333333333333")
				else: 
					self.weights = self.weights4
					print("44444444444444444444")
		legalActions = gameState.getLegalActions(self.index)
		legalActions.remove(Directions.STOP)
		
		action = None

		if len(legalActions) != 0:
			prob = util.flipCoin(self.epsilon)
			if prob:
				action = random.choice(legalActions)

			else:
				if self.weights ==self.weights1:
					action = self.getSafePolicy(gameState)
			
				else:
					action = self.getPolicy(gameState)

		if self.weights == self.weights2:

			food = self.getFeatures(gameState,action)["closest-food"]
			ghost = self.getFeatures(gameState,action)["closest-ghosts"]
			print(food*100,ghost*100)
		if not gameState.getAgentState(self.index).isPacman:
			if self.red:
				if  self.finish:
					self.mode =2 
			else:
				if self.finish:
					self.mode = 2
		return action