示例#1
0
def get_adjacency_list():
    """ Returns list of node neighbours """

    adjacency_list = {}
    grid = get_grid()
    rows, columns = grid.shape

    for i in range(rows):
        for j in range(columns):
            if grid[i, j] == 0:  # if move not available just continue
                continue

            node_name = get_node_name(i, j)
            adjacency_list[node_name] = []

            # get all directions in which user can move and initialize its cost to 1

            if grid[i - 1, j]:  # i is never 0 so i-1 is safe operation
                adjacency_list[node_name].append((get_node_name(i - 1, j), 1))

            if j > 0:
                if grid[i, j - 1]:
                    adjacency_list[node_name].append((get_node_name(i,
                                                                    j - 1), 1))

            if grid[i + 1, j]:  # i is never row-1 so i+1 is safe operation
                adjacency_list[node_name].append((get_node_name(i + 1, j), 1))

            if j < columns - 1:
                if grid[i, j + 1]:
                    adjacency_list[node_name].append((get_node_name(i,
                                                                    j + 1), 1))

    return adjacency_list
示例#2
0
def stateTransformer(player, dots, ghosts, isChasingMode):

    state = np.array(get_grid(), copy=True)

    Environment.mapPlayerOnGrid(player, state)
    Environment.mapDotsOnGrid(dots, state)
    Environment.mapGhostsOnGrid(ghosts, state, isChasingMode)

    return state
    def setPlayerType(self, type):
        self.type = type

        if type == 'rl':
            self.agent = Agent(np.array(get_grid()).shape, 4)
        elif type == 'greedy':
            self.agent = GreedyAgent()
        elif type == 'tree':
            self.agent = TreeAgent()
        else:
            self.agent = None  # human is playing game
    def step(self, action):
        self.chasingLast += 1 if self.chasing else 0

        if self.chasingLast == FIVE_SECS:
            self.chasingLast = 0
            self.chasing = False
            for ghost in self.ghosts:
                ghost.path = []
                ghost.algorithm = AStar(ghost, self.player)

        self.grid = np.array(get_grid(), copy=True)

        reward = 0

        self.actions.get(action)()
        if self.player.notValid:
            reward += NOT_VALID_MOVE_REWARD

        self.mapPlayerOnGrid(self.player, self.grid)

        eatPillReward = self.player.eatPill(self.dots)
        if eatPillReward == BIG_PILL_REWARD:
            self.chasing = True

        reward += eatPillReward

        done = False
        if len(self.dots) == 0:
            done = True

        self.mapDotsOnGrid(self.dots, self.grid)

        reward += self.player.eatGhost(self.ghosts, self.chasing)

        if self.chasing and self.chasingLast == 0:
            for ghost in self.ghosts:
                ghost.path = []
                ghost.algorithm = Frightened(ghost)

        moveGhosts(self.ghosts)
        self.mapGhostsOnGrid(self.ghosts, self.grid, self.chasing)

        for ghost in self.ghosts:
            if self.player.caught(ghost):
                reward += PLAYER_DEATH_REWARD
                done = True
                break

        return self.grid, reward, done
    def reset(self):
        self.grid = np.array(get_grid(), copy=True)
        self.dots = initDots()
        self.mapDotsOnGrid(self.dots, self.grid)
        self.player = Player()
        self.mapPlayerOnGrid(self.player, self.grid)
        self.ghosts = initGhosts(self.player)
        self.mapGhostsOnGrid(self.ghosts, self.grid, self.chasing)

        self.actions = {
            0: self.player.moveUp,
            1: self.player.moveDown,
            2: self.player.moveLeft,
            3: self.player.moveRight,
        }

        return self.grid
def checkMovePoint(player):
    """ Checks if player can move by selected (movex, movey) """

    if player.x + player.movex < 0:
        player.x = WIDTH - CELL_SIZE / 2

    if player.x + player.movex >= WIDTH:
        player.x = CELL_SIZE / 2

    grid = get_grid()
    grid_x = int((player.x + player.movex) // CELL_SIZE)
    grid_y = int((player.y + player.movey) // CELL_SIZE)

    if grid[grid_y][grid_x] != 1.0:
        player.movex = player.movey = 0
        return False

    return True
    def getLastNode(self):
        grid = get_grid()

        ghostI, ghostJ = pixelToGrid((self.ghost.x, self.ghost.y))

        for action in self.code:
            if action == 0:  # up
                if ghostI != 0:
                    ghostI -= 1
            elif action == 1:  # down
                if ghostI != 28:
                    ghostI += 1
            elif action == 2:  # right
                if ghostJ != 0:
                    ghostJ -= 1
            elif action == 3:  # left
                if ghostJ != 29:
                    ghostJ += 1

        return getNodeName((ghostI, ghostJ))
import random
import numpy as np

from abc import ABC, abstractmethod

from grid.get_grid import get_grid
from graph import create_graph
from algorithms.help_functions import *

grid = np.array(get_grid(), copy=True)  # movement grid
graph = create_graph()  # movement graph


class AlgorithmInterface(ABC):
    def __init__(self):
        self.goalNameKey = ""
        self.player = None

    def run(self):
        pass

    @abstractmethod
    def getNextStep(self):
        pass

    def getGoal(self, index):
        # Red ghost's goal is player (index = 1)
        if index == 1:
            return pixelToGrid((self.player.x, self.player.y))

        # Lightblue ghost's goal is 4 fields ahead of player's current position (index = 2)