Esempio n. 1
0
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()

        self.score_pre = (
            self.state.score
        )  # Storing the last score for designing the reward function
Esempio n. 2
0
 def __init__(self, host, port):
     self.socket = GameSocket(host, port)
     self.state = State()
     self.pre_x = 0
     self.pre_y = 0
     self.pre_energy = 0
     #self.pre_action = ''   
     self.score_pre = self.state.score#Storing the last score for designing the reward function
Esempio n. 3
0
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()

        self.score_pre = self.state.score  #Storing the last score for designing the reward function
        self.decay = 27
        self.area_affect = 3
        self.affect_eff = 0.92
        self.view = None
        self.energy_view = None
        self.current_action = None
        self.gold_map = None
        self.gold_map_origin = None
Esempio n. 4
0
    def test_reset(self):
        socket = GameSocket(None, None)
        socket.reset(['map1', '0', '0', '10', '3', '20'])

        self.assertEqual(0, socket.userMatch.posx)
        self.assertEqual(0, socket.userMatch.posy)
        self.assertEqual(10, socket.userMatch.energy)
        self.assertEqual(3, socket.userMatch.gameinfo.numberOfPlayers)
        self.assertEqual(20, socket.userMatch.gameinfo.steps)
        self.assertEqual(2, socket.userMatch.gameinfo.width)
        self.assertEqual(2, socket.userMatch.gameinfo.height)
        self.assertEqual(1, len(socket.userMatch.gameinfo.golds))
        self.assertEqual(3, len(socket.userMatch.gameinfo.obstacles))
        self.assertEqual(3, len(socket.users))
Esempio n. 5
0
    def test_craft(self):
        socket = GameSocket(None, None)
        socket.send('map1,0,0,10,4,20')
        socket.send(json.dumps({
            '1': 5,
            '2': 5,
            '3': 5,
            '4': 5,
        }))

        data = json.loads(socket.receive())
        self.assertDictEqual(
            {
                'players': [{
                    'playerId': playerId,
                    'posx': 0,
                    'posy': 0,
                    'score': 50,
                    'energy': 5,
                    'status': 0,
                    'lastAction': 5,
                    'freeCount': 0,
                } for playerId in [1, 2, 3, 4]],
                'golds': [{
                    'posx': 0,
                    'posy': 0,
                    'amount': 250
                }],
                'changedObstacles': []
            }, data)
Esempio n. 6
0
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()

        # define action space
        self.INPUTNUM = 198  # The number of input values for the DQN model
        self.ACTIONNUM = 6  # The number of actions output from the DQN model
        # define state space

        self.gameState = None
        self.reward = 0
        self.terminate = False
        
        self.score_pre = self.state.score   # Storing the last score for designing the reward function
        self.energy_pre = self.state.energy # Storing the last energy for designing the reward function

        self.viewer = None
        self.steps_beyond_done = None
Esempio n. 7
0
    def test_send_reset_receive(self):
        socket = GameSocket(None, None)
        socket.send('map1,7,9,10,4,20')

        data = json.loads(socket.receive())
        self.assertDictContainsSubset({
            'posx': 7,
            'posy': 9,
            'energy': 10,
        }, data)
        self.assertDictContainsSubset(
            {
                'numberOfPlayers': 4,
                'width': 2,
                'height': 2,
                'steps': 20,
                'golds': [{
                    'posx': 0,
                    'posy': 0,
                    'amount': 450,
                }],
            }, data['gameinfo'])
        self.assertListEqual([{
            'type': 0,
            'posx': 1,
            'posy': 0,
            'value': -1
        }, {
            'type': 2,
            'posx': 0,
            'posy': 1,
            'value': -10
        }, {
            'type': 1,
            'posx': 1,
            'posy': 1,
            'value': 0
        }], data['gameinfo']['obstacles'])
Esempio n. 8
0
    def test_mixed_actions(self):
        socket = GameSocket(None, None)
        socket.send('map1,0,0,10,4,20')
        socket.send(json.dumps({
            '1': 0,
            '2': 3,
            '3': 4,
            '4': 5,
        }))

        data = json.loads(socket.receive())
        self.assertDictEqual(
            {
                'players': [
                    {
                        'playerId': 1,
                        'posx': -1,
                        'posy': 0,
                        'score': 0,
                        'energy': 10,
                        'status': 1,
                        'lastAction': 6,
                        'freeCount': 0,
                    },
                    {
                        'playerId': 2,
                        'posx': 0,
                        'posy': 1,
                        'score': 0,
                        'energy': 0,
                        'status': 2,
                        'lastAction': 6,
                        'freeCount': 0,
                    },
                    {
                        'playerId': 3,
                        'posx': 0,
                        'posy': 0,
                        'score': 0,
                        'energy': 10,
                        'status': 0,
                        'lastAction': 4,
                        'freeCount': 1,
                    },
                    {
                        'playerId': 4,
                        'posx': 0,
                        'posy': 0,
                        'score': 50,
                        'energy': 5,
                        'status': 0,
                        'lastAction': 5,
                        'freeCount': 0,
                    },
                ],
                'golds': [{
                    'posx': 0,
                    'posy': 0,
                    'amount': 400
                }],
                'changedObstacles': [{
                    'type': 0,
                    'posx': 0,
                    'posy': 1,
                    'value': -1,
                }]
            }, data)
Esempio n. 9
0
class MinerEnv:
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()

        self.score_pre = self.state.score  #Storing the last score for designing the reward function

    def start(self):  #connect to server
        self.socket.connect()

    def end(self):  #disconnect server
        self.socket.close()

    def send_map_info(self, request):  #tell server which map to run
        self.socket.send(request)

    def reset(self):  #start new game
        self.state_x_pre = self.state.x
        self.state_y_pre = self.state.y
        self.last3position = []
        self.Swamp_position = []
        self.craft_no_gold = 0
        self.in_gold = 0
        self.premindist = 1000
        #self.dmax,_,_ = self.distance_value_trade_off()
        try:
            message = self.socket.receive()  #receive game info from server
            self.state.init_state(message)  #init state
        except Exception as e:
            import traceback
            traceback.print_exc()

    def step(self, action):  #step process
        self.state_x_pre = self.state.x
        self.state_y_pre = self.state.y
        self.last3position.append([self.state.x, self.state.y])
        if len(self.last3position) > 3:
            self.last3position.pop(0)
        #print(self.last3position)
        self.socket.send(action)  #send action to server
        try:
            message = self.socket.receive()  #receive new state from server
            self.state.update_state(message)  #update to local state
        except Exception as e:
            import traceback
            traceback.print_exc()

    # Functions are customized by client
    def get_reward(self):
        # Calculate reward
        reward = 0
        score_action = self.state.score - self.score_pre
        self.score_pre = self.state.score
        # dmax, xgold, ygold = self.distance_value_trade_off()
        # print("come to: ", dmax, xgold, ygold, self.state.mapInfo.gold_amount(xgold, ygold))
        # if dmax >= self.dmax:
        #     reward += 0.1
        # print(self.dmax, self.state.x, self.state.y, self.state.mapInfo.gold_amount(self.state.x, self.state.y))
        # self.dmax = dmax

        if score_action > 0:
            #If the DQN agent crafts golds, then it should obtain a positive reward (equal score_action)
            reward += score_action / 50 * 10

        #If the DQN agent crashs into obstacels (Tree, Trap, Swamp), then it should be punished by a negative reward
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == TreeID:  # Tree
            reward -= 0.06 * 3
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == TrapID:  # Trap
            reward -= 0.03 * 3
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == SwampID:  # Swamp
            if [self.state.x,
                    self.state.y] in self.Swamp_position:  # go to Swamp again
                reward -= 0.5
            else:
                reward -= 0.05  # first time go to swamp
            #reward -=0.4
        if self.state.mapInfo.gold_amount(self.state.x, self.state.y) >= 50:
            reward += 0.3
        if self.state.mapInfo.gold_amount(
                self.state_x_pre, self.state_y_pre
        ) >= 50 and self.state.lastAction != 5:  # in gold but don't craft
            self.in_gold += 1
            reward -= 0.5
        if self.state.lastAction == 5 and score_action == 0:  # not in gold but craft
            self.craft_no_gold += 1
            reward -= 0.5
        # If out of the map, then the DQN agent should be punished by a larger nagative reward.
        if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP:
            reward -= 20

        #Run out of energy, then the DQN agent should be punished by a larger nagative reward.
        if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY:
            reward -= 0.7
        if self.state.status == State.STATUS_PLAYING:
            reward += 0.1
        # print ("reward",reward)
        return reward

    def get_reward_complex(self):
        reward = 0
        score_action = self.state.score - self.score_pre
        self.score_pre = self.state.score
        ### reward for gold
        golds = self.state.mapInfo.golds
        miner_posx, miner_posy = self.state.x, self.state.y
        target_x, target_y = miner_posx, miner_posy
        mindist = 1000
        for gold in golds:
            dist = distance([gold["posx"], gold["posy"]],
                            [miner_posx, miner_posy]) - self.reward_gold(
                                [gold["posx"], gold["posy"]])
            if dist < mindist:
                mindist = dist
        if mindist < self.premindist:
            reward += 0.5
        self.premindist = mindist
        ####
        if score_action > 0:
            #If the DQN agent crafts golds, then it should obtain a positive reward (equal score_action)
            reward += score_action / 50 * 10
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == TreeID:  # Tree
            reward -= 0.06 * 3
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == TrapID:  # Trap
            reward -= 0.03 * 3
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == SwampID:  # Swamp
            if [self.state.x,
                    self.state.y] in self.Swamp_position:  # go to Swamp again
                reward -= 0.5
            else:
                reward -= 0.05  # first time go to swamp
            #reward -=0.4
        if self.state.mapInfo.gold_amount(
                self.state_x_pre, self.state_y_pre
        ) >= 50 and self.state.lastAction != 5:  # in gold but don't craft
            reward -= 0.5
        if self.state.lastAction == 5 and score_action == 0:  # not in gold but craft
            reward -= 0.5
        if self.state.energy >= 45 and self.state.lastAction == 4:
            reward -= 1
        # If out of the map, then the DQN agent should be punished by a larger nagative reward.
        if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP:
            reward -= 20

        #Run out of energy, then the DQN agent should be punished by a larger nagative reward.
        if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY:
            reward -= 0.7
        if self.state.status == State.STATUS_PLAYING:
            reward += 0.1
        return reward

    def reward_gold(self, gold_pos):
        x, y = gold_pos[0], gold_pos[1]
        reward = 0
        for stt, (i, j) in enumerate(
                zip([-1, 1, 0, 0, -1, 1, -1, 1], [0, 0, -1, 1, -1, -1, 1, 1])):
            xnew, ynew = x + i, y + j
            if xnew <= self.state.mapInfo.max_x and xnew >=0 \
            and ynew <= self.state.mapInfo.max_y and ynew >= 0:
                amount = self.state.mapInfo.gold_amount(xnew, ynew)
                if amount >= 100 and amount <= 200:
                    reward += 1
                if amount > 200 and amount <= 500:
                    reward += 2
                if amount > 500:
                    reward += 3
                if amount >= 1000:
                    reward += 5
        return reward

    def get_state_tensor(self, scale_map):
        n = scale_map
        view = torch.zeros((7, n * (self.state.mapInfo.max_x + 1), n *
                            (self.state.mapInfo.max_y + 1)),
                           dtype=torch.float)
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if self.state.mapInfo.get_obstacle(
                        i, j) == TreeID:  # Tree     # trap map
                    view[2, n * i:n * i + n, n * j:n * j + n] = -TreeID
                    view[0, n * i:n * i + n, n * j:n * j + n] = -TreeID
                if self.state.mapInfo.get_obstacle(
                        i, j) == TrapID:  # Trap     # trap map
                    view[2, n * i:n * i + n, n * j:n * j + n] = -TrapID
                    view[0, n * i:n * i + n, n * j:n * j + n] = -TrapID
                if self.state.mapInfo.get_obstacle(
                        i, j) == SwampID:  # Swamp    # trap map
                    if [i, j] not in self.Swamp_position:
                        view[2, n * i:n * i + n,
                             n * j:n * j + n] = -SwampID  # -3
                        view[0, n * i:n * i + n, n * j:n * j + n] = -SwampID
                    else:
                        view[2, n * i:n * i + n,
                             n * j:n * j + n] = -SwampID - 3  # -6
                        view[0, n * i:n * i + n,
                             n * j:n * j + n] = -SwampID - 3
                gold_ = self.state.mapInfo.gold_amount(i, j)
                if gold_ > 0:
                    view[1, n * i:n * i + n,
                         n * j:n * j + n] = gold_ / 1000  ##/10 gold map
                    view[0, n * i:n * i + n, n * j:n * j + n] = gold_ / 1000

        index = 3
        playerid_list = []
        for stt, player in enumerate(self.state.players):
            playerid_list.append(player["playerId"])
            if player["playerId"] != self.state.id:
                try:
                    if player["status"] not in [1, 2, 3]:
                        try:
                            view[index + 1,
                                 n * player["posx"]:n * player["posx"] + n,
                                 n * player["posy"]:n * player["posy"] +
                                 n] = player["energy"] / 50
                        except:
                            view[index + 1,
                                 n * player["posx"]:n * player["posx"] + n,
                                 n * player["posy"]:n * player["posy"] + n] = 1
                        index += 1
                except:
                    view[index + 1, n * player["posx"]:n * player["posx"] + n,
                         n * player["posy"]:n * player["posy"] + n] = 1
                    # print(self.state.players)
                    #print(view[player["posx"]: player["posx"]+1, player["posy"]: player["posy"]+1, stt])
                    #print(np.unique(a-view[:,:,stt]))
                    index += 1
            else:
                try:
                    view[3, n * self.state.x:n * self.state.x + n,
                         n * self.state.y:n * self.state.y +
                         n] = self.state.energy / 50
                except:
                    print('out of map')
        if self.state.id not in playerid_list:
            view[3, n * self.state.x:n * self.state.x + n, n *
                 self.state.y:n * self.state.y + n] = self.state.energy / 50
        #print("check: ", np.unique(view[3,:,:]))
        DQNState = view
        return DQNState

    def get_state2(self, limit):
        # Building the map
        view = np.zeros([limit * 2 + 1, limit * 2 + 1], dtype=int)
        max_x, max_y = self.state.mapInfo.max_x, self.state.mapInfo.max_y
        xlimit_below = np.clip(self.state.x - limit, 0, max_x) - np.clip(
            self.state.x + limit - max_x, 0, limit)
        xlimit_up = np.clip(self.state.x + limit, 0, max_x) + np.clip(
            0 - self.state.x + limit, 0, limit)
        ylimit_below = np.clip(self.state.y - limit, 0, max_y) - np.clip(
            self.state.y + limit - max_y, 0, limit)
        ylimit_up = np.clip(self.state.y + limit, 0, max_y) + np.clip(
            0 - self.state.y + limit, 0, limit)

        #print(xlimit_below, xlimit_up, ylimit_below, ylimit_up, self.state.x, self.state.y)

        for i in range(xlimit_below, xlimit_up + 1):
            for j in range(ylimit_below, ylimit_up + 1):
                if self.state.mapInfo.get_obstacle(i, j) == TreeID:  # Tree
                    view[i - xlimit_below, j - ylimit_below] = -TreeID
                if self.state.mapInfo.get_obstacle(i, j) == TrapID:  # Trap
                    view[i - xlimit_below, j - ylimit_below] = -TrapID
                if self.state.mapInfo.get_obstacle(i, j) == SwampID:  # Swamp
                    view[i - xlimit_below, j - ylimit_below] = -SwampID
                if self.state.mapInfo.gold_amount(i, j) > 0:
                    view[i - xlimit_below,
                         j - ylimit_below] = self.state.mapInfo.gold_amount(
                             i, j) / 10
        DQNState = view.flatten().tolist(
        )  #Flattening the map matrix to a vector

        # Add position and energy of agent to the DQNState
        DQNState.append(self.state.x - xlimit_below)
        DQNState.append(self.state.y - ylimit_below)
        DQNState.append(self.state.energy)
        #Add position of bots
        # for player in self.state.players:
        #     if player["playerId"] != self.state.id:
        #         DQNState.append(player["posx"])
        #         DQNState.append(player["posy"])

        #Convert the DQNState from list to array for training
        DQNState = np.array(DQNState)
        return DQNState

    def update_swamp(self):
        for player in self.state.players:
            if self.state.mapInfo.get_obstacle(
                    player["posx"], player["posy"]) == 3 and [
                        player["posx"], player["posy"]
                    ] not in self.Swamp_position:
                self.Swamp_position.append([player["posx"], player["posy"]])

    def check_terminate(self):
        #Checking the status of the game
        #it indicates the game ends or is playing
        return self.state.status != State.STATUS_PLAYING
Esempio n. 10
0
class MinerEnv:
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()

        self.score_pre = self.state.score  #Storing the last score for designing the reward function

    def start(self):  #connect to server
        self.socket.connect()

    def end(self):  #disconnect server
        self.socket.close()

    def send_map_info(self, request):  #tell server which map to run
        self.socket.send(request)

    def reset(self):  #start new game
        self.state_x_pre = self.state.x
        self.state_y_pre = self.state.y
        self.last3position = []
        self.Swamp_position = []
        try:
            message = self.socket.receive()  #receive game info from server
            self.state.init_state(message)  #init state
        except Exception as e:
            import traceback
            traceback.print_exc()

    def step(self, action):  #step process
        self.state_x_pre = self.state.x
        self.state_y_pre = self.state.y
        self.last3position.append([self.state.x, self.state.y])
        if len(self.last3position) > 3:
            self.last3position.pop(0)
        #print(self.last3position)
        self.socket.send(action)  #send action to server
        try:
            message = self.socket.receive()  #receive new state from server
            self.state.update_state(message)  #update to local state
        except Exception as e:
            import traceback
            traceback.print_exc()

    # Functions are customized by client
    def get_state(self):
        # Building the map
        view = np.zeros(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1],
            dtype=int)
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if self.state.mapInfo.get_obstacle(i, j) == TreeID:  # Tree
                    view[i, j] = -TreeID
                if self.state.mapInfo.get_obstacle(i, j) == TrapID:  # Trap
                    view[i, j] = -TrapID
                if self.state.mapInfo.get_obstacle(i, j) == SwampID:  # Swamp
                    view[i, j] = -SwampID
                if self.state.mapInfo.gold_amount(i, j) > 0:
                    view[i, j] = self.state.mapInfo.gold_amount(i, j)

        DQNState = view.flatten().tolist(
        )  #Flattening the map matrix to a vector

        # Add position and energy of agent to the DQNState
        DQNState.append(self.state.x)
        DQNState.append(self.state.y)
        DQNState.append(self.state.energy)
        #Add position of bots
        for player in self.state.players:
            if player["playerId"] != self.state.id:
                DQNState.append(player["posx"])
                DQNState.append(player["posy"])

        #Convert the DQNState from list to array for training
        DQNState = np.array(DQNState)
        return DQNState

    def get_reward2(self):
        # Calculate reward
        reward = 0
        score_action = self.state.score - self.score_pre
        self.score_pre = self.state.score
        if score_action > 0:
            #If the DQN agent crafts golds, then it should obtain a positive reward (equal score_action)
            reward += score_action

        #If the DQN agent crashs into obstacels (Tree, Trap, Swamp), then it should be punished by a negative reward
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == TreeID:  # Tree
            reward -= TreeID * 3 * randrange(1, 5)
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == TrapID:  # Trap
            reward -= TrapID * 3
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == SwampID:  # Swamp
            if [self.state.x,
                    self.state.y] in self.Swamp_position:  # go to Swamp again
                reward -= 15
            else:
                reward -= SwampID * 3  # first time go to swamp
            self.Swamp_position.append([self.state.x, self.state.y])
        if self.state.mapInfo.gold_amount(
                self.state_x_pre, self.state_y_pre
        ) >= 50 and self.state.lastAction != 5:  # in gold but don't craft
            reward -= 10
        if self.state.lastAction == 5 and score_action < 0:  # not in gold but craft
            reward -= 10
        if len(self.last3position
               ) == 3 and self.state.lastAction != 5:  # back to same position
            if self.last3position[0] == self.last3position[2]:
                reward -= 3
            if self.last3position[1] == self.last3position[2]:
                reward -= 3
        if self.state.energy >= 45 and self.state.lastAction == 4:
            reward -= 7
        # if self.state.status == State.STATUS_PLAYING:
        #     reward += 0.5
        # If out of the map, then the DQN agent should be punished by a larger nagative reward.
        if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP:
            reward += -40

        #Run out of energy, then the DQN agent should be punished by a larger nagative reward.
        if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY:
            reward += -20
        # print ("reward",reward)
        return reward

    def get_reward(self):
        # Calculate reward
        reward = 0
        score_action = self.state.score - self.score_pre
        self.score_pre = self.state.score
        if score_action > 0:
            #If the DQN agent crafts golds, then it should obtain a positive reward (equal score_action)
            reward += score_action / 50

        #If the DQN agent crashs into obstacels (Tree, Trap, Swamp), then it should be punished by a negative reward
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == TreeID:  # Tree
            reward -= 0.03 * randrange(1, 5)
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == TrapID:  # Trap
            reward -= 0.06 * 3
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == SwampID:  # Swamp
            if [self.state.x,
                    self.state.y] in self.Swamp_position:  # go to Swamp again
                reward -= 0.15
            else:
                reward -= 0.05  # first time go to swamp
            self.Swamp_position.append([self.state.x, self.state.y])
        if self.state.mapInfo.gold_amount(
                self.state_x_pre, self.state_y_pre
        ) >= 50 and self.state.lastAction != 5:  # in gold but don't craft
            reward -= 0.55
        if self.state.lastAction == 5 and score_action < 0:  # not in gold but craft
            reward -= 0.55
        if len(self.last3position
               ) == 3 and self.state.lastAction != 5:  # back to same position
            if self.last3position[0] == self.last3position[2]:
                reward -= 0.1
            if self.last3position[1] == self.last3position[2]:
                reward -= 0.1
        if self.state.energy >= 45 and self.state.lastAction == 4:
            reward -= 0.3
        # if self.state.status == State.STATUS_PLAYING:
        #     reward += 0.5
        # If out of the map, then the DQN agent should be punished by a larger nagative reward.
        if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP:
            reward += -10

        #Run out of energy, then the DQN agent should be punished by a larger nagative reward.
        if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY:
            reward += -5
        # print ("reward",reward)
        return reward

    def get_state_tensor(self, scale_map):
        n = scale_map
        view = np.zeros((n * (self.state.mapInfo.max_x + 1),
                         n * (self.state.mapInfo.max_y + 1), 6))
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if self.state.mapInfo.get_obstacle(
                        i, j) == TreeID:  # Tree     # trap map
                    view[n * i:n * i + n, n * j:n * j + n, 0] = -TreeID
                if self.state.mapInfo.get_obstacle(
                        i, j) == TrapID:  # Trap     # trap map
                    view[n * i:n * i + n, n * j:n * j + n, 0] = -TrapID
                if self.state.mapInfo.get_obstacle(
                        i, j) == SwampID:  # Swamp    # trap map
                    view[n * i:n * i + n, n * j:n * j + n, 0] = -SwampID
                if self.state.mapInfo.gold_amount(i, j) > 0:
                    view[n * i:n * i + n,
                         n * j:n * j + n, 0] = self.state.mapInfo.gold_amount(
                             i, j) / 1000  ##/10 gold map
        for stt, player in enumerate(self.state.players):
            if player["playerId"] != self.state.id:
                try:
                    if player["status"] not in [1, 2, 3]:
                        try:
                            view[n * player["posx"]:n * player["posx"] + n,
                                 n * player["posy"]:n * player["posy"] + n,
                                 stt + 1] = player["energy"] / 50
                        except:
                            view[n * player["posx"]:n * player["posx"] + n,
                                 n * player["posy"]:n * player["posy"] + n,
                                 stt + 1] = 1
                except:
                    view[n * player["posx"]:n * player["posx"] + n,
                         n * player["posy"]:n * player["posy"] + n, stt] = 1
                    # print(self.state.players)
            else:
                try:
                    view[n * self.state.x:n * self.state.x + n,
                         n * self.state.y:n * self.state.y + n,
                         2] = self.state.energy / 50
                except:
                    print('out of map')

        DQNState = np.array(view)
        return DQNState

    def get_state3(self, limit):
        # Building the map
        view = np.zeros([limit * 2 + 1, limit * 2 + 1], dtype=int)
        max_x, max_y = self.state.mapInfo.max_x, self.state.mapInfo.max_y
        xlimit_below = np.clip(self.state.x - limit, 0, max_x) - np.clip(
            self.state.x + limit - max_x, 0, limit)
        xlimit_up = np.clip(self.state.x + limit, 0, max_x) + np.clip(
            0 - self.state.x + limit, 0, limit)
        ylimit_below = np.clip(self.state.y - limit, 0, max_y) - np.clip(
            self.state.y + limit - max_y, 0, limit)
        ylimit_up = np.clip(self.state.y + limit, 0, max_y) + np.clip(
            0 - self.state.y + limit, 0, limit)

        #print(xlimit_below, xlimit_up, ylimit_below, ylimit_up, self.state.x, self.state.y)
        dmax, m, n, exist_gold = -1000, -5, 0.1, False
        x_maxgold, y_maxgold = self.state.x, self.state.y
        for i in range(max_x + 1):
            for j in range(max_y + 1):
                if self.state.mapInfo.gold_amount(i, j) >= 50:
                    exist_gold = True
                    d = m * ((self.state.x - i)**2 + (self.state.y - j)**
                             2) + n * self.state.mapInfo.gold_amount(i, j)
                    if d > dmax:
                        dmax = d
                        x_maxgold, y_maxgold = i, j  # position of cell is nearest and much gold

                if i in range(xlimit_below, xlimit_up + 1) and j in range(
                        ylimit_below, ylimit_up + 1):
                    if self.state.mapInfo.get_obstacle(i, j) == TreeID:  # Tree
                        view[i - xlimit_below, j - ylimit_below] = -TreeID
                    if self.state.mapInfo.get_obstacle(i, j) == TrapID:  # Trap
                        view[i - xlimit_below, j - ylimit_below] = -TrapID
                    if self.state.mapInfo.get_obstacle(i,
                                                       j) == SwampID:  # Swamp
                        view[i - xlimit_below, j - ylimit_below] = -SwampID
                    if self.state.mapInfo.gold_amount(i, j) > 0:
                        view[i - xlimit_below, j -
                             ylimit_below] = self.state.mapInfo.gold_amount(
                                 i, j) / 10
        DQNState = view.flatten().tolist(
        )  #Flattening the map matrix to a vector

        # Add position and energy of agent to the DQNState
        DQNState.append(self.state.x - xlimit_below)
        DQNState.append(self.state.y - ylimit_below)
        DQNState.append(self.state.energy)
        #Add position of bots
        # for player in self.state.players:
        #     if player["playerId"] != self.state.id:
        #         DQNState.append(player["posx"])
        #         DQNState.append(player["posy"])
        DQNState.append(self.state.x - x_maxgold)
        DQNState.append(self.state.y - y_maxgold)
        if exist_gold == False:
            DQNState.append(0)
        else:
            DQNState.append(
                self.state.mapInfo.gold_amount(x_maxgold, y_maxgold) / 10)
        #Convert the DQNState from list to array for training
        DQNState = np.array(DQNState)
        return DQNState

    def check_terminate(self):
        #Checking the status of the game
        #it indicates the game ends or is playing
        return self.state.status != State.STATUS_PLAYING
Esempio n. 11
0
class MinerEnv:
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()
        self.pre_x = 0
        self.pre_y = 0
        self.pre_energy = 0
        #self.pre_action = ''   
        self.score_pre = self.state.score#Storing the last score for designing the reward function

    def start(self): #connect to server
        self.socket.connect()

    def end(self): #disconnect server
        self.socket.close()

    def send_map_info(self, request):#tell server which map to run
        self.socket.send(request)

    def reset(self): #start new game
        try:
            message = self.socket.receive() #receive game info from server
            self.state.init_state(message) #init state
        except:
            import traceback
            traceback.print_exc()

    def step(self, action): #step process
        #self.pre_action = action
        self.pre_energy = self.state.energy
        self.pre_x, self.pre_y = self.state.x,self.state.y # store the last coordinate
        self.socket.send(action) #send action to server
        try:
            message = self.socket.receive() #receive new state from server
            self.state.update_state(message) #update to local state
        # new_state = str_2_json(message)
        # players = new_state["players"]
        # print('length of players in step', len(players))
        except:
            import traceback
            traceback.print_exc()
        # print(self.state.players)
    # Functions are customized by client
    def get_state(self):
        # Building the map
        #print(self.state.x,self.state.y)
        view = np.zeros((5*(self.state.mapInfo.max_x + 1), 5*(self.state.mapInfo.max_y + 1), 6), dtype=int)
        #view[0:3, :] = -10
        #view[-3:, :] = -10
        #view[:, 0:3] = -10
        #view[:, -3:] = -10
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if self.state.mapInfo.get_obstacle(i, j) == TreeID:  # Tree     # trap map
                    view[5*i:5*i+5, 5*j:5*j+5,0] = -TreeID
                if self.state.mapInfo.get_obstacle(i, j) == TrapID:  # Trap     # trap map
                    view[5*i:5*i+5, 5*j:5*j+5,0] = -TrapID
                if self.state.mapInfo.get_obstacle(i, j) == SwampID: # Swamp    # trap map
                    view[5*i:5*i+5, 5*j:5*j+5,0] = -SwampID
                if self.state.mapInfo.gold_amount(i, j) > 0:
                    view[5*i:5*i+5, 5*j:5*j+5,0] = self.state.mapInfo.gold_amount(i, j)/1000  ## gold map
        
        for stt,player in enumerate(self.state.players):
            if player["playerId"] != self.state.id:
                try:
                    if player["status"] not in [1,2,3]:
                        try:
                            view[5*player["posx"]:5*player["posx"]+5,5*player["posy"]:5*player["posy"]+5,stt + 1] = player["energy"]/50
                        except:
                            view[5*player["posx"]:5*player["posx"]+5,5*player["posy"]:5*player["posy"]+5,stt + 1] = 1
                except:
                    view[5*player["posx"]: 5*player["posx"]+5,5*player["posy"]:5*player["posy"]+5,stt]= 1
                    # print(self.state.players)
            else:
                try:
                    view[5*self.state.x:5*self.state.x+5,5*self.state.y:5*self.state.y+5,2]= self.state.energy
                except: 
                    print('out of map')
                
        DQNState = np.array(view)
        return DQNState

    def get_reward(self,action):
        # Calculate reward
        reward = 0
        score_action = self.state.score - self.score_pre
        self.score_pre = self.state.score
        
        pre_x, pre_y =self.pre_x,self.pre_y
        
        if self.state.energy >=45 and self.state.lastAction == 4:
            reward += -0.2
        #plus a small bonus if the agent go to a coordinate that has golds 
        if self.state.mapInfo.gold_amount(self.state.x,self.state.y) >= 50:
            reward += 0.2
        #If the DQN agent crafts golds, then it should obtain a positive reward (equal score_action)
        if score_action > 0:
            reward += score_action/50
        # if still in the map, plus a small bonus
        if self.state.status == State.STATUS_PLAYING:
            reward += 0.1
        # if there is no gold, but the agent still crafts golds, it will be punished
        if self.state.mapInfo.get_obstacle(pre_x,pre_y)<4 and int(self.state.lastAction)==5:
            reward+=-0.2
        if (self.state.mapInfo.gold_amount(pre_x,pre_y) >= 50 and self.pre_energy >15) and (int(self.state.lastAction)!=5):
            reward+=-0.2


        


        # If out of the map, then the DQN agent should be punished by a larger nagative reward.
        #if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP:
        #    reward = -1
            
        #Run out of energy, then the DQN agent should be punished by a larger nagative reward.
        #if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY:
        #    reward = -1
        # print ("reward",reward)
        #if self.state.status == State.STATUS_STOP_END_STEP:
        #    reward = +2
        return reward

    def check_terminate(self):
        #Checking the status of the game
        #it indicates the game ends or is playing
        return self.state.status != State.STATUS_PLAYING
Esempio n. 12
0
class MinerEnv:
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()

        # define action space
        self.INPUTNUM = 198  # The number of input values for the DQN model
        self.ACTIONNUM = 6  # The number of actions output from the DQN model
        # define state space

        self.gameState = None
        self.reward = 0
        self.terminate = False
        
        self.score_pre = self.state.score   # Storing the last score for designing the reward function
        self.energy_pre = self.state.energy # Storing the last energy for designing the reward function

        self.viewer = None
        self.steps_beyond_done = None

    def start(self):    # connect to server
        self.socket.connect()

    def end(self):  # disconnect server
        self.socket.close()

    def send_map_info(self, request):   # tell server which map to run
        self.socket.send(request)

    def reset(self):    # start new game
        # Choosing a map in the list
        # mapID = np.random.randint(1, 6)  # Choosing a map ID from 5 maps in Maps folder randomly
        mapID = 1
        posID_x = np.random.randint(MAP_MAX_X)  # Choosing a initial position of the DQN agent on
        # X-axes randomly
        posID_y = np.random.randint(MAP_MAX_Y)  # Choosing a initial position of the DQN agent on Y-axes randomly
        # Creating a request for initializing a map, initial position, the initial energy, and the maximum number of steps of the DQN agent
        request = ("map" + str(mapID) + "," + str(posID_x) + "," + str(posID_y) + ",50,100")
        # Send the request to the game environment (GAME_SOCKET_DUMMY.py)
        self.send_map_info(request)

        # Initialize the game environment
        try:
            message = self.socket.receive() #receive game info from server
            self.state.init_state(message) #init state
        except Exception as e:
            import traceback
            traceback.print_exc()

        self.gameState = self.get_state()  # Get the state after resetting.
        # This function (get_state()) is an example of creating a state for the DQN model
        self.reward = 0  # The amount of rewards for the entire episode
        self.terminate = False  # The variable indicates that the episode ends
        self.steps_beyond_done = None
        return self.gameState

    def step(self, action):     # step process
        self.socket.send(str(action))   # send action to server
        try:
            message = self.socket.receive()     # receive new state from server
            self.state.update_state(message)    # update to local state
        except Exception as e:
            import traceback
            traceback.print_exc()

        self.gameState = self.get_state()
        self.reward = self.get_reward()
        done = self.check_terminate()
        return self.gameState, self.reward, done, {}

    # Functions are customized by client
    def get_state(self):
        # Building the map
        view = np.zeros([self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], dtype=int)
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if self.state.mapInfo.get_obstacle(i, j) == TreeID:  # Tree
                    view[i, j] = -20
                if self.state.mapInfo.get_obstacle(i, j) == TrapID:  # Trap
                    view[i, j] = -10
                if self.state.mapInfo.get_obstacle(i, j) == SwampID: # Swamp
                    view[i, j] = self.state.mapInfo.get_obstacle_value(i, j)
                if self.state.mapInfo.gold_amount(i, j) > 0:
                    view[i, j] = self.state.mapInfo.gold_amount(i, j)

        # print(view)
        DQNState = view.flatten().tolist() #Flattening the map matrix to a vector
        
        # Add position and energy of agent to the DQNState
        DQNState.append(self.state.x)
        DQNState.append(self.state.y)
        DQNState.append(self.state.energy)
        me = {"playerId": 1, "energy": self.state.energy, "posx": self.state.x, "posy": self.state.y,
              "lastAction": self.state.lastAction, "score": self.state.score, "status": self.state.status}

        #Add position of bots 
        for player in self.state.players:
            if player["playerId"] != self.state.id:
                DQNState.append(player["posx"])
                DQNState.append(player["posy"])
                
        #Convert the DQNState from list to array for training
        DQNState = np.array(DQNState)

        return DQNState

    def get_reward(self):
        # Calculate reward
        reward = 0
        score_action = self.state.score - self.score_pre
        energy_consume = self.energy_pre - self.state.energy
        self.score_pre = self.state.score
        self.energy_pre = self.state.energy
        reward = score_action - 0.2 * energy_consume


        # if score_action > 0:
        #     #If the DQN agent crafts golds, then it should obtain a positive reward (equal score_action)
        #     reward += score_action
        #
        # #If the DQN agent crashs into obstacels (Tree, Trap, Swamp), then it should be punished by a negative reward
        # if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TreeID:  # Tree
        #     reward -= TreeID
        # if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TrapID:  # Trap
        #     reward -= TrapID
        # if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == SwampID:  # Swamp
        #     reward -= SwampID

        # If out of the map, then the DQN agent should be punished by a larger nagative reward.
        if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP:
            reward += -10
            
        # Run out of energy, then the DQN agent should be punished by a larger nagative reward.
        if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY:
            reward += -10
        # print ("reward",reward)
        return reward

    def check_terminate(self):
        # Checking the status of the game
        # it indicates the game ends or is playing
        return self.state.status != State.STATUS_PLAYING

    def updateObservation(self):
        return

    def render(self, mode='human', close=False):
       return

    def close(self):
        """Override in your subclass to perform any necessary cleanup.
        Environments will automatically close() themselves when
        garbage collected or when the program exits.
        """
        raise NotImplementedError()

    def seed(self, seed=None):
        """Sets the seed for this env's random number generator(s).

        # Returns
            Returns the list of seeds used in this env's random number generators
        """
        raise NotImplementedError()

    def configure(self, *args, **kwargs):
        """Provides runtime configuration to the environment.
        This configuration should consist of data that tells your
        environment how to run (such as an address of a remote server,
        or path to your ImageNet data). It should not affect the
        semantics of the environment.
        """
        raise NotImplementedError()
Esempio n. 13
0
class MinerEnv:
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()

        self.score_pre = self.state.score  # Storing the last score for designing the reward function
        self.energy_pre = self.state.energy
        #self.x_pre = self.state.x
        #self.y_pre = self.state.y

    def start(self):  # connect to server
        self.socket.connect()

    def end(self):  # disconnect server
        self.socket.close()

    def send_map_info(self, request):  # tell server which map to run
        self.socket.send(request)

    def reset(self):  # start new game
        try:
            message = self.socket.receive()  # receive game info from server
            self.state.init_state(message)  # init state
        except Exception as e:
            import traceback
            traceback.print_exc()

    def step(self, action):  # step process
        self.socket.send(action)  # send action to server
        try:
            message = self.socket.receive()  # receive new state from server
            self.state.update_state(message)  # update to local state
        except Exception as e:
            import traceback
            traceback.print_exc()

    # Functions are customized by client
    def get_state(self, remain_steps, initial_flag=False):
        # update pre position, score, energy
        #self.x_pre = self.state.x
        #self.y_pre = self.state.y
        self.score_pre = self.state.score
        self.energy_pre = self.state.energy

        # depth = 3  # goal, min_energy, max_energy
        depth = 15  # goal, min_energy, max_energy, 4 player position
        goal_depth = 0
        min_energy_depth = 1
        max_energy_depth = 2
        my_agent_depth = 3
        bot1_depth = 4
        bot2_depth = 5
        bot3_depth = 6
        goal_pos = 7
        tree_pos = 8
        trap_pos = 9
        swamp_pos_5 = 10
        swamp_pos_20 = 11
        swamp_pos_40 = 12
        swamp_pos_100 = 13
        ground_position = 14

        # len_player_infor = 6 * 4
        len_player_infor = 2 + 8 + 6

        # max_goal = 67 * 50 * 4  # assume 67 steps for mining and 33 steps for relaxing
        max_goal = 1250
        max_energy = 100

        # max_x = self.state.mapInfo.max_x
        # max_y = self.state.mapInfo.max_y
        max_player_energy = 50
        max_score = 3000
        # max_score = 67 * 50
        max_last_action = 6 + 1  # 1 because of None
        max_status = 5

        # Building the map
        view_1 = np.zeros([
            self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1, depth
        ],
                          dtype=float)
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                # ground
                view_1[i, j, min_energy_depth] = -1 / max_energy
                view_1[i, j, max_energy_depth] = -1 / max_energy
                view_1[i, j, ground_position] = 1

                goal = self.state.mapInfo.gold_amount(i, j)
                if goal > 0:
                    view_1[i, j, ground_position] = 0
                    view_1[i, j, goal_pos] = 1
                    view_1[i, j, min_energy_depth] = -4 / max_energy
                    view_1[i, j, max_energy_depth] = -4 / max_energy
                    view_1[i, j, goal_depth] = goal / max_goal

        for obstacle in self.state.mapInfo.obstacles:
            i = obstacle["posx"]
            j = obstacle["posy"]
            if obstacle["type"] == TreeID:  # Tree
                view_1[i, j, ground_position] = 0
                view_1[i, j, tree_pos] = 1
                view_1[i, j, min_energy_depth] = -5 / max_energy  # -5 ~ -20
                view_1[i, j, max_energy_depth] = -20 / max_energy  # -5 ~ -20
            elif obstacle["type"] == TrapID:  # Trap
                if obstacle["value"] != 0:
                    view_1[i, j, ground_position] = 0
                    view_1[i, j, trap_pos] = 1
                view_1[i, j, min_energy_depth] = obstacle["value"] / max_energy
                view_1[i, j, max_energy_depth] = obstacle["value"] / max_energy
            elif obstacle["type"] == SwampID:  # Swamp
                view_1[i, j, ground_position] = 0
                view_1[i, j, min_energy_depth] = obstacle[
                    "value"] / max_energy  # -5, -20, -40, -100
                view_1[i, j, max_energy_depth] = obstacle[
                    "value"] / max_energy  # -5, -20, -40, -100
                if obstacle["value"] == -5:
                    view_1[i, j, swamp_pos_5] = 1
                elif obstacle["value"] == -20:
                    view_1[i, j, swamp_pos_20] = 1
                elif obstacle["value"] == -40:
                    view_1[i, j, swamp_pos_40] = 1
                elif obstacle["value"] == -100:
                    view_1[i, j, swamp_pos_100] = 1
        """
        for goal in self.state.mapInfo.golds:
            i = goal["posx"]
            j = goal["posy"]
            view_1[i, j, min_energy_depth] = 4 / max_energy
            view_1[i, j, max_energy_depth] = 4 / max_energy
            view_1[i, j, goal_depth] = goal["amount"] / max_goal
        """

        # Add player's information
        view_2 = np.zeros([len_player_infor * 4 + 1],
                          dtype=float)  # +1 remaining steps

        index_player = 0

        if (0 <= self.state.x <= self.state.mapInfo.max_x) and \
                (0 <= self.state.y <= self.state.mapInfo.max_y):
            view_1[self.state.x, self.state.y, my_agent_depth] = 1
            view_2[index_player * len_player_infor +
                   0] = self.state.energy / max_player_energy
            view_2[index_player * len_player_infor +
                   1] = self.state.score / max_score
            if self.state.lastAction is None:  # 0 step
                view_2[index_player * len_player_infor + 2 +
                       max_last_action] = 1
            else:  # > 1 step
                view_2[index_player * len_player_infor + 2 +
                       self.state.lastAction] = 1
            view_2[index_player * len_player_infor + 2 + max_last_action + 1 +
                   self.state.status] = 1

        bot_depth = my_agent_depth
        for player in self.state.players:
            if player["playerId"] != self.state.id:
                index_player += 1
                bot_depth += 1
                if (0 <= player["posx"] <= self.state.mapInfo.max_x) and \
                        (0 <= player["posy"] <= self.state.mapInfo.max_y):
                    if "energy" in player:  # > 1 step
                        if player["status"] == self.state.STATUS_PLAYING:
                            view_1[player["posx"], player["posy"],
                                   bot_depth] = 1
                            view_2[index_player * len_player_infor +
                                   0] = player["energy"] / max_player_energy
                            view_2[index_player * len_player_infor +
                                   1] = player["score"] / max_score
                            view_2[index_player * len_player_infor + 2 +
                                   player["lastAction"]] = 1  # one hot
                            view_2[index_player * len_player_infor + 2 +
                                   max_last_action + 1 + player["status"]] = 1
                    elif initial_flag:  # 0 step, initial state
                        view_1[player["posx"], player["posy"], bot_depth] = 1
                        view_2[index_player * len_player_infor +
                               0] = 50 / max_player_energy
                        view_2[index_player * len_player_infor +
                               1] = 0 / max_score
                        view_2[index_player * len_player_infor + 2 +
                               max_last_action] = 1  # one hot
                        view_2[index_player * len_player_infor + 2 +
                               max_last_action + 1 +
                               self.state.STATUS_PLAYING] = 1
        view_2[-1] = remain_steps / 100

        # Convert the DQNState from list to array for training
        DQNState_map = np.array(view_1)
        DQNState_users = np.array(view_2)

        return DQNState_map, DQNState_users

    def get_reward(self, num_of_wrong_relax, num_of_wrong_mining):
        # return -0.01 ~ 0.01
        # reward must target to mine goal

        max_reward = 50
        reward_died = -50  # ~ double max reward
        # reward_died = -25  # let a try

        reward_enter_goal = max_reward / 20  # 5

        # Calculate reward
        reward = 0  # moving, because agent will die at the max step

        energy_action = self.state.energy - self.energy_pre  # < 0 if not relax
        score_action = self.state.score - self.score_pre  # >= 0

        if score_action > 0:
            reward = score_action / 2500  # max ~2500 / episode
        else:
            # moving
            #if int(self.state.lastAction) < 4:
            #    # enter gold
            #    if self.state.mapInfo.gold_amount(self.state.x, self.state.y) > 0:
            #        reward = reward_enter_goal / 2500
            # mining but cannot get gold
            if (int(self.state.lastAction) == 5) and (score_action == 0):
                #    reward = reward_died / 10 / max_reward
                num_of_wrong_mining += 1
            # relax when energy > 40 or cannot get more energy
            elif int(self.state.lastAction) == 4:
                if self.energy_pre > 40 or energy_action == 0:
                    #        reward = reward_died / 10 / max_reward
                    num_of_wrong_relax += 1

            # at gold but move to ground
            # if (int(self.state.lastAction) < 4) and (self.state.mapInfo.gold_amount(self.x_pre, self.y_pre) > 0) \
            #        and (self.state.mapInfo.gold_amount(self.state.x, self.state.y) == 0):
            #    reward = reward_died

            # relax when energy > 40
            #elif self.energy_pre > 40 and int(self.state.lastAction) == 4:
            #    reward = reward_died / 4

            # relax but cannot get more energy
            #elif int(self.state.lastAction) == 4 and energy_action == 0:
            #    reward = reward_died / 4

        # If out of the map, then the DQN agent should be punished by a larger negative reward.
        #if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP or self.state.status == State.STATUS_ELIMINATED_INVALID_ACTION:
        #    reward = reward_died / max_reward

        #elif self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY or self.state.status == State.STATUS_STOP_EMPTY_GOLD \
        #        or self.state.status == State.STATUS_STOP_END_STEP:

        if self.state.status != State.STATUS_PLAYING:
            if self.state.score == 0:
                reward = reward_died / max_reward  # -1

        if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP or self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY:
            reward = reward_died / max_reward  # -1

        # print ("reward",reward)
        #return reward / max_reward / self.state.mapInfo.maxStep  # 100 steps
        return reward, num_of_wrong_relax, num_of_wrong_mining

    def check_terminate(self):
        # Checking the status of the game
        # it indicates the game ends or is playing
        return self.state.status != State.STATUS_PLAYING
class MinerEnv:
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()
        self.score_pre = self.state.score  # Storing the last score for designing the reward function

    def start(self):  # connect to server
        self.socket.connect()

    def end(self):  # disconnect server
        self.socket.close()

    def send_map_info(self, request):  # tell server which map to run
        self.socket.send(request)

    def reset(self):  # start new game
        try:
            message = self.socket.receive()  # receive game info from server
            self.state.init_state(message)  # init state
        except Exception as e:
            import traceback
            traceback.print_exc()

    def step(self, action):  # step process
        self.socket.send(action)  # send action to server
        try:
            message = self.socket.receive()  # receive new state from server
            self.state.update_state(message)  # update to local state
        except Exception as e:
            import traceback
            traceback.print_exc()

    # Functions are customized by client
    def get_state(self):
        # Building the map
        view = np.zeros(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1],
            dtype=int)
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if self.state.mapInfo.get_obstacle(i, j) == TreeID:  # Tree
                    view[i, j] = -TreeID
                if self.state.mapInfo.get_obstacle(i, j) == TrapID:  # Trap
                    view[i, j] = -TrapID
                if self.state.mapInfo.get_obstacle(i, j) == SwampID:  # Swamp
                    view[i, j] = -SwampID
                if self.state.mapInfo.gold_amount(i, j) > 0:
                    view[i, j] = self.state.mapInfo.gold_amount(i, j)

        DQNState = view.flatten().tolist(
        )  # Flattening the map matrix to a vector

        # Add position and energy of agent to the DQNState

        next_round_energy = self.get_next_round_engergy()
        DQNState.append(next_round_energy)
        DQNState.append(self.state.x)
        DQNState.append(self.state.y)
        DQNState.append(self.state.score)
        DQNState.append(self.state.energy)
        # DQNState.append(self.state)
        # Add position of bots
        for player in self.state.players:
            if player["playerId"] != self.state.id:
                DQNState.append(player["posx"])
                DQNState.append(player["posy"])
                energy = 0
                score = 0
                free_count = 0
                if 'energy' in player:
                    energy = player["energy"]
                if 'score' in player:
                    score = player["score"]
                if 'free_count' in player:
                    free_count = player["free_count"]
                DQNState.append(energy)
                DQNState.append(score)
                DQNState.append(free_count)

        # Convert the DQNState from list to array for training
        DQNState = np.array(DQNState)
        return DQNState

    def get_next_round_engergy(self):
        free_count = 0
        for p in self.state.players:
            if p['playerId'] == self.state.id:
                free_count = p['freeCount']
        next_e = self.state.energy
        for i in range(4 - free_count):
            next_e += next_e / max(i, 1)
        return next_e

    def dig_score(self):
        pass

    def get_reward(self):
        # Calculate reward
        reward = 0
        score_action = self.state.score  # - self.score_pre
        self.score_pre = self.state.score
        if score_action > 0:
            # If the DQN agent crafts golds, then it should obtain a positive reward (equal score_action)
            reward += score_action
            # print('Craft gold : {}'.format(score_action))
        next_e = self.get_next_round_engergy()
        if next_e <= 0:  # Do not stand while you have full energy :(
            reward -= 100

        if next_e >= 50 and self.state.lastAction == 4:
            reward -= 100

        # If the DQN agent crashs into obstacels (Tree, Trap, Swamp), then it should be punished by a negative reward
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == TreeID:  # Tree
            reward -= TreeID
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == TrapID:  # Trap
            reward -= TrapID
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == SwampID:  # Swamp
            reward -= SwampID

        # If out of the map, then the DQN agent should be punished by a larger nagative reward.
        if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP:
            reward += -100
        # Run out of energy, then the DQN agent should be punished by a larger nagative reward.
        if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY:
            reward += -100
        return reward / 100.

    def check_terminate(self):
        # Checking the status of the game
        # it indicates the game ends or is playing
        return self.state.status != State.STATUS_PLAYING
Esempio n. 15
0
class MinerEnv:
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()

        self.score_pre = self.state.score  #Storing the last score for designing the reward function
        self.decay = 27
        self.area_affect = 3
        self.affect_eff = 0.92
        self.view = None
        self.energy_view = None
        self.current_action = None
        self.gold_map = None
        self.gold_map_origin = None

    def start(self):  #connect to server
        self.socket.connect()

    def end(self):  #disconnect server
        self.socket.close()

    def send_map_info(self, request):  #tell server which map to run
        self.socket.send(request)

    def reset(self):  #start new game
        # Choosing a map in the list
        # mapID = np.random.randint(1, 6)  # Choosing a map ID from 5 maps in Maps folder randomly
        mapID = 1
        posID_x = np.random.randint(
            MAP_MAX_X)  # Choosing a initial position of the DQN agent on
        # posID_x = 12
        # X-axes randomly
        posID_y = np.random.randint(
            MAP_MAX_Y
        )  # Choosing a initial position of the DQN agent on Y-axes randomly
        # posID_y = 1
        # Creating a request for initializing a map, initial position, the initial energy, and the maximum number of steps of the DQN agent
        request = ("map" + str(mapID) + "," + str(posID_x) + "," +
                   str(posID_y) + ",50,100")
        # Send the request to the game environment (GAME_SOCKET_DUMMY.py)
        self.send_map_info(request)

        try:
            message = self.socket.receive()  #receive game info from server
            print(message)
            self.state.init_state(message)  #init state
            print(self.state.score)
        except Exception as e:
            import traceback
            traceback.print_exc()

    def step(self, action):  #step process
        self.socket.send(action)  #send action to server
        try:
            message = self.socket.receive()  #receive new state from server
            #print("New state: ", message)
            self.state.update_state(message)  #update to local state
            print(self.state.score)
        except Exception as e:
            import traceback
            traceback.print_exc()

    # Functions are customized by client
    def get_state(self):
        # Building the map
        self.view = np.zeros(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1],
            dtype=int)
        self.energy_view = np.zeros(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1],
            dtype=int)
        self.gold_map = np.zeros(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1],
            dtype=int)
        gold_opt = np.zeros(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1],
            dtype=int)
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if self.state.mapInfo.get_obstacle(i, j) == TreeID:  # Tree
                    self.view[i, j] = -20
                    self.energy_view[i, j] = -20
                elif self.state.mapInfo.get_obstacle(i, j) == TrapID:  # Trap
                    self.view[i, j] = -10
                    self.energy_view[i, j] = -10
                elif self.state.mapInfo.get_obstacle(i, j) == SwampID:  # Swamp
                    self.view[i,
                              j] = self.state.mapInfo.get_obstacle_value(i, j)
                    self.energy_view[
                        i, j] = self.state.mapInfo.get_obstacle_value(i, j)
                elif self.state.mapInfo.gold_amount(i, j) > 0:
                    self.view[i, j] = self.state.mapInfo.gold_amount(i, j)
                    self.energy_view[i, j] = -4
                    self.gold_map[i, j] = self.state.mapInfo.gold_amount(i, j)
                else:
                    self.view[i, j] = -1
                    self.energy_view[i, j] = -1
        self.gold_map_origin = copy.deepcopy(self.gold_map)
        # print(self.gold_map)
        # player update goldmap
        for player in self.state.players:
            if player["playerId"] != self.state.id:
                x = player["posx"]
                y = player["posy"]
                if 0 <= x <= self.state.mapInfo.max_x and 0 <= y <= self.state.mapInfo.max_y:
                    if self.gold_map[x][y] > 0:
                        if x != self.state.x or y != self.state.y:
                            self.gold_map[x][y] = self.gold_map[x][y] * 0.63
                            self.view[x][y] = self.gold_map[x][y]
                    else:
                        for t in range(1, self.area_affect + 1):
                            for k in range(-t, t):
                                if 0 <= x + k <= self.state.mapInfo.max_x and 0 <= y + t - abs(
                                        k) <= self.state.mapInfo.max_y:
                                    if self.gold_map[x + k][y + t -
                                                            abs(k)] > 0:
                                        self.gold_map[x + k][
                                            y + t - abs(k)] = self.gold_map[
                                                x + k][y + t - abs(k)] * pow(
                                                    self.affect_eff,
                                                    self.area_affect + 1 - t)
                                        self.view[x +
                                                  k][y + t -
                                                     abs(k)] = self.gold_map[
                                                         x + k][y + t - abs(k)]
                                if 0 <= x - k <= self.state.mapInfo.max_x and 0 <= y - t + abs(
                                        k) <= self.state.mapInfo.max_y:
                                    if self.gold_map[x - k][y - t +
                                                            abs(k)] > 0:
                                        self.gold_map[x - k][
                                            y - t + abs(k)] = self.gold_map[
                                                x - k][y - t + abs(k)] * pow(
                                                    self.affect_eff,
                                                    self.area_affect + 1 - t)
                                        self.view[x -
                                                  k][y - t +
                                                     abs(k)] = self.gold_map[
                                                         x - k][y - t + abs(k)]
        print(self.gold_map)
        arr = []
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if self.state.mapInfo.gold_amount(i, j) > 0:
                    gold_est = np.zeros([
                        self.state.mapInfo.max_x + 1,
                        self.state.mapInfo.max_y + 1
                    ],
                                        dtype=int)
                    gold_est[i][j] = self.gold_map[i][j]
                    for a in range(0, i):
                        gold_est[i - a - 1][j] = max(
                            gold_est[i - a][j] - self.decay +
                            self.view[i - a - 1][j], 0)
                    for b in range(i + 1, self.state.mapInfo.max_x + 1):
                        gold_est[b][j] = max(
                            gold_est[b - 1][j] - self.decay + self.view[b][j],
                            0)
                    for c in range(0, j):
                        gold_est[i][j - c - 1] = max(
                            gold_est[i][j - c] - self.decay +
                            self.view[i][j - c - 1], 0)
                    for d in range(j + 1, self.state.mapInfo.max_y + 1):
                        gold_est[i][d] = max(
                            gold_est[i][d - 1] - self.decay + self.view[i][d],
                            0)

                    for x in range(0, i):
                        for y in range(0, j):
                            gold_est[i - x - 1][j - y - 1] = max(gold_est[i - x][j - y - 1],
                                                                 gold_est[i - x - 1][j - y]) - self.decay + \
                                                             self.view[i - x - 1][j - y - 1]
                    for x in range(0, i):
                        for y in range(j + 1, self.state.mapInfo.max_y + 1):
                            gold_est[i - x - 1][y] = max(gold_est[i - x][y], gold_est[i - x - 1][y - 1]) - self.decay + \
                                                     self.view[i - x - 1][y]
                    for x in range(i + 1, self.state.mapInfo.max_x + 1):
                        for y in range(0, j):
                            gold_est[x][j - y - 1] = max(gold_est[x][j - y], gold_est[x - 1][j - y - 1]) - self.decay + \
                                                     self.view[x][j - y - 1]
                    for x in range(i + 1, self.state.mapInfo.max_x + 1):
                        for y in range(j + 1, self.state.mapInfo.max_y + 1):
                            gold_est[x][y] = max(
                                gold_est[x - 1][y],
                                gold_est[x][y -
                                            1]) - self.decay + self.view[x][y]

                    # print(i, j, self.state.mapInfo.gold_amount(i, j))
                    # print(gold_est)
                    arr.append(gold_est)
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                for t in range(len(arr)):
                    if gold_opt[i][j] < arr[t][i][j]:
                        gold_opt[i][j] = arr[t][i][j]
        # print(gold_opt)
        return np.array(gold_opt)

    def check_terminate(self):
        return self.state.status != State.STATUS_PLAYING
Esempio n. 16
0
    def test_send_reset(self, mock_reset):
        socket = GameSocket(None, None)
        socket.send('map1,0,0,10,3,20')

        mock_reset.assert_called_with(['map1', '0', '0', '10', '3', '20'])
Esempio n. 17
0
    def test_send_update(self, mock_reset):
        socket = GameSocket(None, None)
        socket.send('1')

        mock_reset.assert_not_called()
Esempio n. 18
0
class MinerEnv:
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()

        self.score_pre = self.state.score  #Storing the last score for designing the reward function
        self.pos_x_pre = self.state.x
        self.pos_y_pre = self.state.y

    def start(self):  #connect to server
        self.socket.connect()

    def end(self):  #disconnect server
        self.socket.close()

    def send_map_info(self, request):  #tell server which map to run
        self.socket.send(request)

    def reset(self):  #start new game
        try:
            message = self.socket.receive()  #receive game info from server
            self.state.init_state(message)  #init state

            self.score_pre = self.state.score  #Storing the last score for designing the reward function
            self.pos_x_pre = self.state.x
            self.pos_y_pre = self.state.y
        except Exception as e:
            import traceback
            traceback.print_exc()

    def step(self, action):  #step process
        self.socket.send(action)  #send action to server
        try:
            message = self.socket.receive()  #receive new state from server
            self.state.update_state(message)  #update to local state
        except Exception as e:
            import traceback
            traceback.print_exc()

    # Functions are customized by client
    def get_state(self):
        #Local view
        view = np.zeros([5, 5])
        for i in range(-2, 3):
            for j in range(-2, 3):
                index_x = self.state.x + i
                index_y = self.state.y + j
                if index_x < 0 or index_y < 0 or index_x >= self.state.mapInfo.max_x or index_y >= self.state.mapInfo.max_y:
                    view[2 + i, 2 + j] = -1
                else:
                    if self.state.mapInfo.get_obstacle(index_x,
                                                       index_y) == TreeID:
                        view[2 + i, 2 + j] = -1
                    if self.state.mapInfo.get_obstacle(index_x,
                                                       index_y) == TrapID:
                        view[2 + i, 2 + j] = -1
                    if self.state.mapInfo.get_obstacle(index_x,
                                                       index_y) == SwampID:
                        view[2 + i, 2 + j] = -1

        #Create the state
        DQNState = view.flatten().tolist()
        self.pos_x_gold_first = self.state.x
        self.pos_y_gold_first = self.state.y
        if len(self.state.mapInfo.golds) > 0:
            self.pos_x_gold_first = self.state.mapInfo.golds[0]["posx"]
            self.pos_y_gold_first = self.state.mapInfo.golds[0]["posy"]
        DQNState.append(self.pos_x_gold_first - self.state.x)
        DQNState.append(self.pos_y_gold_first - self.state.y)

        #Convert the DQNState from list to array for training
        DQNState = np.array(DQNState)

        return DQNState

    def get_reward(self):
        # Calculate reward
        reward = 0
        goldamount = self.state.mapInfo.gold_amount(self.state.x, self.state.y)
        if goldamount > 0:
            reward += 10  #goldamount
            #remove the gold
            for g in self.socket.stepState.golds:
                if g.posx == self.state.x and g.posy == self.state.y:
                    self.socket.stepState.golds.remove(g)

        #If the DQN agent crashs into obstacels (Tree, Trap, Swamp), then it should be punished by a negative reward
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == TreeID:  # Tree
            reward -= 0.2
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == TrapID:  # Trap
            reward -= 0.2
        if self.state.mapInfo.get_obstacle(self.state.x,
                                           self.state.y) == SwampID:  # Swamp
            reward -= 0.2

        dis_pre = np.sqrt((self.pos_x_pre - self.pos_x_gold_first)**2 +
                          (self.pos_y_pre - self.pos_y_gold_first)**2)
        dis_curr = np.sqrt((self.state.x - self.pos_x_gold_first)**2 +
                           (self.state.y - self.pos_y_gold_first)**2)
        if (dis_curr - dis_pre) <= 0:  #Reducing the distance , reward ++
            reward += 0.1
        else:
            reward -= 0.1

        # If out of the map, then the DQN agent should be punished by a larger nagative reward.
        if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP:
            reward += -10
        return reward

    def check_terminate(self):
        #Checking the status of the game
        #it indicates the game ends or is playing
        return self.state.status != State.STATUS_PLAYING
Esempio n. 19
0
class MinerEnv:
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()

        self.score_pre = self.state.score  #Storing the last score for designing the reward function

    def start(self):  #connect to server
        self.socket.connect()

    def end(self):  #disconnect server
        self.socket.close()

    def send_map_info(self, request):  #tell server which map to run
        self.socket.send(request)

    def reset(self):  #start new game
        # Choosing a map in the list
        # mapID = np.random.randint(1, 6)  # Choosing a map ID from 5 maps in Maps folder randomly
        mapID = 1
        posID_x = np.random.randint(
            MAP_MAX_X)  # Choosing a initial position of the DQN agent on
        # X-axes randomly
        posID_y = np.random.randint(
            MAP_MAX_Y
        )  # Choosing a initial position of the DQN agent on Y-axes randomly
        # Creating a request for initializing a map, initial position, the initial energy, and the maximum number of steps of the DQN agent
        request = ("map" + str(mapID) + "," + str(posID_x) + "," +
                   str(posID_y) + ",50,100")
        # Send the request to the game environment (GAME_SOCKET_DUMMY.py)
        self.send_map_info(request)

        try:
            message = self.socket.receive()  #receive game info from server
            print(message)
            self.state.init_state(message)  #init state
            print(self.state.score)
        except Exception as e:
            import traceback
            traceback.print_exc()

    def step(self, action):  #step process
        self.socket.send(action)  #send action to server
        try:
            message = self.socket.receive()  #receive new state from server
            #print("New state: ", message)
            self.state.update_state(message)  #update to local state
            print(self.state.score)
        except Exception as e:
            import traceback
            traceback.print_exc()

    # Functions are customized by client
    def get_state(self):
        # Building the map
        view = np.zeros(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1, 2],
            dtype="float32")
        self.gold_map = np.zeros(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1],
            dtype=int)
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if self.state.mapInfo.get_obstacle(i, j) == TreeID:  # Tree
                    view[i, j, 0] = -20 * 1.0 / 20
                    # view[i, j, 0] = -TreeID
                elif self.state.mapInfo.get_obstacle(i, j) == TrapID:  # Trap
                    view[i, j, 0] = -10 * 1.0 / 20
                    # view[i, j, 0] = -TrapID
                elif self.state.mapInfo.get_obstacle(i, j) == SwampID:  # Swamp
                    view[i, j, 0] = self.state.mapInfo.get_obstacle_value(
                        i, j) * 1.0 / 20
                    # view[i, j, 0] = -SwampID
                elif self.state.mapInfo.gold_amount(i, j) > 0:
                    view[i, j,
                         0] = self.state.mapInfo.gold_amount(i, j) * 1.0 / 100
                    self.gold_map[i, j] = self.state.mapInfo.gold_amount(
                        i, j) / 50

        if self.state.status == 0:
            view[self.state.x, self.state.y, 1] = self.state.energy

        # for player in self.state.players:
        #     if player["playerId"] != self.state.id:
        #         view[player["posx"], player["posy"], 1] -= 1

        # Convert the DQNState from list to array for training
        DQNState = np.array(view)

        return DQNState

    def check_terminate(self):
        return self.state.status != State.STATUS_PLAYING
Esempio n. 20
0
class MinerEnv:
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()

        self.score_pre = (
            self.state.score
        )  # Storing the last score for designing the reward function

    def start(self):  # connect to server
        self.socket.connect()

    def end(self):  # disconnect server
        self.socket.close()

    def send_map_info(self, request):  # tell server which map to run
        self.socket.send(request)

    def reset(self):  # start new game
        try:
            message = self.socket.receive()  # receive game info from server
            self.state.init_state(message)  # init state
        except Exception as e:
            import traceback

            traceback.print_exc()

    def step(self, action):  # step process
        self.socket.send(action)  # send action to server
        try:
            message = self.socket.receive()  # receive new state from server
            self.state.update_state(message)  # update to local state
        except Exception as e:
            import traceback

            traceback.print_exc()

    # Functions are customized by client
    def get_state(self):
        # Building the map
        view = np.zeros(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1],
            dtype=int)
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if self.state.mapInfo.get_obstacle(i, j) == TreeID:  # Tree
                    view[i, j] = -TreeID
                if self.state.mapInfo.get_obstacle(i, j) == TrapID:  # Trap
                    view[i, j] = -TrapID
                if self.state.mapInfo.get_obstacle(i, j) == SwampID:  # Swamp
                    view[i, j] = -SwampID
                if self.state.mapInfo.gold_amount(i, j) > 0:
                    view[i, j] = self.state.mapInfo.gold_amount(i, j)

        DQNState = view.flatten().tolist(
        )  # Flattening the map matrix to a vector

        # Add position and energy of agent to the DQNState
        DQNState.append(self.state.x)
        DQNState.append(self.state.y)
        DQNState.append(self.state.energy)
        # Add position of bots
        for player in self.state.players:
            if player["playerId"] != self.state.id:
                DQNState.append(player["posx"])
                DQNState.append(player["posy"])

        # Convert the DQNState from list to array for training
        DQNState = np.array(DQNState)

        return DQNState

    def get_reward(self):
        # Calculate reward
        reward = 0
        score_action = self.state.score - self.score_pre
        self.score_pre = self.state.score
        if score_action > 0:
            # If the DQN agent crafts golds, then it should obtain a positive reward (equal score_action)
            reward += score_action

        # If the DQN agent crashs into obstacels (Tree, Trap, Swamp), then it should be punished by a negative reward
        if (self.state.mapInfo.get_obstacle(self.state.x,
                                            self.state.y) == TreeID):  # Tree
            reward -= TreeID
        if (self.state.mapInfo.get_obstacle(self.state.x,
                                            self.state.y) == TrapID):  # Trap
            reward -= TrapID
        if (self.state.mapInfo.get_obstacle(self.state.x,
                                            self.state.y) == SwampID):  # Swamp
            reward -= SwampID

        # If out of the map, then the DQN agent should be punished by a larger nagative reward.
        if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP:
            reward += -10

        # Run out of energy, then the DQN agent should be punished by a larger nagative reward.
        if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY:
            reward += -10
        # print ("reward",reward)
        return reward

    def check_terminate(self):
        # Checking the status of the game
        # it indicates the game ends or is playing
        return self.state.status != State.STATUS_PLAYING
Esempio n. 21
0
class MinerEnv:
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()
        self.energy_pre = self.state.energy
        self.score_pre = self.state.score  #Storing the last score for designing the reward function

    def start(self):  #connect to server
        self.socket.connect()

    def end(self):  #disconnect server
        self.socket.close()

    def send_map_info(self, request):  #tell server which map to run
        self.socket.send(request)

    def reset(self):  #start new game
        try:
            message = self.socket.receive()  #receive game info from server
            self.state.init_state(message)  #init state
        except Exception as e:
            import traceback
            traceback.print_exc()

    def step(self, action):  #step process
        self.socket.send(action)  #send action to server
        try:
            message = self.socket.receive()  #receive new state from server
            self.state.update_state(message)  #update to local state
        except Exception as e:
            import traceback
            traceback.print_exc()

    # Functions are customized by client
    def get_state(self):
        # Building the map
        channel_1 = np.full(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], 0.05)
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                obs_id, val = self.state.mapInfo.get_obstacle(i, j)
                if obs_id == TreeID:  # Tree
                    channel_1[i, j] = 0.3
                if obs_id == TrapID:  # Trap
                    channel_1[i, j] = 0.6
                if obs_id == SwampID:  # Tree
                    if abs(val) == -5:
                        channel_1[i, j] = 0.2
                    if abs(val) == -20:
                        channel_1[i, j] = 0.4
                    if abs(val) > 20:
                        channel_1[i, j] = 0.8

        channel_2 = np.full(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], 0.05)
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if self.state.mapInfo.gold_amount(i, j) > 0:
                    if self.state.mapInfo.gold_amount(i, j) < 500:
                        channel_2[i, j] = 0.3
                    if 900 > self.state.mapInfo.gold_amount(i, j) >= 500:
                        channel_2[i, j] = 0.6
                    if self.state.mapInfo.gold_amount(i, j) >= 900:
                        channel_2[i, j] = 1

        channel_3 = np.full(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], 0.05)
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if self.state.x in range(21) and self.state.y in range(9):
                    channel_3[self.state.x, self.state.y] = 1
        X = []
        Y = []
        for player in self.state.players:
            if player["playerId"] != self.state.id:
                X.append(player["posx"])
                Y.append(player["posy"])

        channel_4 = np.full(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], 0.05)
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if X[0] in range(21) and Y[0] in range(9):
                    channel_4[X[0], Y[0]] = 1

        channel_5 = np.full(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], 0.05)
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if X[1] in range(21) and Y[1] in range(9):
                    channel_5[X[1], Y[1]] = 1

        channel_6 = np.full(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], 0.05)
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if X[2] in range(21) and Y[2] in range(9):
                    channel_6[X[2], Y[2]] = 1

        channel_7 = np.full(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], 0.05)
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                channel_7[i, j] = float(self.state.energy / 50)
        DQNState = np.dstack([
            channel_1, channel_2, channel_3, channel_4, channel_5, channel_6,
            channel_7
        ])
        DQNState = np.rollaxis(DQNState, 2, 0)
        return DQNState

    def get_reward(self):
        # Calculate reward
        reward = 0
        score_action = self.state.score - self.score_pre
        self.score_pre = int(self.state.score)
        if score_action > 0 and self.state.lastAction == 5:
            reward += 6.25
        if score_action <= 0 and self.state.lastAction == 5:
            reward -= 2
        obs_id, value = self.state.mapInfo.get_obstacle(
            self.state.x, self.state.y)
        if obs_id not in [1, 2, 3] and self.state.lastAction != 4:
            reward += 0.5
        if obs_id == TreeID:  # Tree
            reward -= 2
        if obs_id == TrapID:  # Trap
            reward -= 2
        if obs_id == SwampID:  # Swamp
            if abs(value) <= -5:
                reward -= 0.5
            if 15 <= abs(value) <= 40:
                reward -= 4
            if abs(value) > 40:
                reward -= 6

        # if self.state.mapInfo.is_row_has_gold(self.state.x):
        #   if self.state.lastAction in [2,3]:
        #     reward += 1
        #   else:
        #     reward += 0.5
        # if self.state.mapInfo.is_column_has_gold(self.state.x):
        #   if self.state.lastAction in [0,1]:
        #     reward += 1
        #   else:
        #     reward += 0.5
        if self.state.lastAction == 4 and self.state.energy > 40:
            reward -= 4
        if self.state.lastAction == 4:
            reward += 1.75
        if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP:
            reward += -10
        if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY:
            reward += -5
        return np.sign(reward) * np.log(1 + abs(reward))

    def check_terminate(self):
        #Checking the status of the game
        #it indicates the game ends or is playing
        return self.state.status != State.STATUS_PLAYING
Esempio n. 22
0
class MinerEnv:
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()

        # define action space
        # self.INPUT_DIM = (21, 9, 2)  # The number of input values for the DQN model
        self.INPUT_DIM = (21, 9)
        self.ACTIONNUM = 6  # The number of actions output from the DQN model
        # define state space

        self.gameState = None
        self.reward = 0
        self.terminate = False
        self.gold_map = None
        self.dist_gold = None

        self.score_pre = self.state.score  # Storing the last score for designing the reward function
        self.energy_pre = self.state.energy  # Storing the last energy for designing the reward function

        self.viewer = None
        self.steps_beyond_done = None

    def start(self):  # connect to server
        self.socket.connect()

    def end(self):  # disconnect server
        self.socket.close()

    def send_map_info(self, request):  # tell server which map to run
        self.socket.send(request)

    def reset(self):  # start new game
        # Choosing a map in the list
        # mapID = np.random.randint(1, 6)  # Choosing a map ID from 5 maps in Maps folder randomly
        mapID = 1
        posID_x = np.random.randint(
            MAP_MAX_X)  # Choosing a initial position of the DQN agent on
        # X-axes randomly
        posID_y = np.random.randint(
            MAP_MAX_Y
        )  # Choosing a initial position of the DQN agent on Y-axes randomly
        # Creating a request for initializing a map, initial position, the initial energy, and the maximum number of steps of the DQN agent
        request = ("map" + str(mapID) + "," + str(posID_x) + "," +
                   str(posID_y) + ",50,100")
        # Send the request to the game environment (GAME_SOCKET_DUMMY.py)
        self.send_map_info(request)

        # Initialize the game environment
        try:
            message = self.socket.receive()  #receive game info from server
            self.state.init_state(message)  #init state
        except Exception as e:
            import traceback
            traceback.print_exc()

        self.gameState = self.get_state()  # Get the state after resetting.
        # This function (get_state()) is an example of creating a state for the DQN model
        distance = 500
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if self.gold_map[i][j] > 0:
                    distance_temp = abs(self.state.x - i) + abs(
                        self.state.y - j)  #- self.gold_map[i][j]
                    if distance > distance_temp:
                        distance = distance_temp
        self.dist_gold = distance

        self.score_pre = self.state.score  # Storing the last score for designing the reward function
        self.energy_pre = self.state.energy  # Storing the last energy for designing the reward function

        self.reward = 0  # The amount of rewards for the entire episode
        self.terminate = False  # The variable indicates that the episode ends
        self.steps_beyond_done = None
        return self.gameState

    def step(self, action):  # step process
        self.socket.send(str(action))  # send action to server
        try:
            message = self.socket.receive()  # receive new state from server
            self.state.update_state(message)  # update to local state
        except Exception as e:
            import traceback
            traceback.print_exc()

        self.gameState = self.get_state()
        self.reward = self.get_reward()
        done = self.check_terminate()
        return self.gameState, self.reward, done, {}

    # Functions are customized by client
    def get_state(self):
        # Building the map
        view = np.zeros(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1, 2],
            dtype="float32")
        self.gold_map = np.zeros(
            [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1],
            dtype=int)
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if self.state.mapInfo.get_obstacle(i, j) == TreeID:  # Tree
                    view[i, j, 0] = -20 * 1.0 / 20
                    # view[i, j, 0] = -TreeID
                elif self.state.mapInfo.get_obstacle(i, j) == TrapID:  # Trap
                    view[i, j, 0] = -10 * 1.0 / 20
                    # view[i, j, 0] = -TrapID
                elif self.state.mapInfo.get_obstacle(i, j) == SwampID:  # Swamp
                    view[i, j, 0] = self.state.mapInfo.get_obstacle_value(
                        i, j) * 1.0 / 20
                    # view[i, j, 0] = -SwampID
                elif self.state.mapInfo.gold_amount(i, j) > 0:
                    view[i, j,
                         0] = self.state.mapInfo.gold_amount(i, j) * 1.0 / 100
                    self.gold_map[i, j] = self.state.mapInfo.gold_amount(
                        i, j) / 50

        if self.state.status == 0:
            view[self.state.x, self.state.y, 1] = self.state.energy * 1.0 / 10

        # for player in self.state.players:
        #     if player["playerId"] != self.state.id:        #         view[player["posx"], player["posy"], 1] -= 1

        #Convert the DQNState from list to array for training
        DQNState = np.array(view)

        return DQNState

    def get_reward(self):
        # Calculate reward
        a = self.state.lastAction
        e = self.state.energy
        e_pre = self.energy_pre
        score_action = self.state.score - self.score_pre
        energy_consume = self.energy_pre - self.state.energy
        if energy_consume == 0:
            energy_consume = 20
        elif energy_consume < 0:
            energy_consume = energy_consume * 2
        self.score_pre = self.state.score
        self.energy_pre = self.state.energy

        # calculate distance to gold
        distance = 500
        for i in range(self.state.mapInfo.max_x + 1):
            for j in range(self.state.mapInfo.max_y + 1):
                if self.gold_map[i][j] > 0:
                    distance_temp = abs(self.state.x - i) + abs(
                        self.state.y - j)  # - self.gold_map[i][j]
                    if distance > distance_temp:
                        distance = distance_temp
        move_distance = (self.dist_gold - distance)
        if move_distance > 0:
            score_distance = move_distance * 20
        elif move_distance < 0:
            score_distance = move_distance * 10
        else:
            score_distance = 0
        self.dist_gold = distance
        reward = score_action * 2 + score_distance - energy_consume

        # If out of the map, then the DQN agent should be punished by a larger nagative reward.
        if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP:
            reward += -50

        # Run out of energy, then the DQN agent should be punished by a larger nagative reward.
        if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY:
            reward += -50
        # print ("reward",reward)
        return reward * 0.01

    def check_terminate(self):
        # Checking the status of the game
        # it indicates the game ends or is playing
        return self.state.status != State.STATUS_PLAYING

    def updateObservation(self):
        return

    def render(self, mode='human', close=False):
        return

    def close(self):
        """Override in your subclass to perform any necessary cleanup.
        Environments will automatically close() themselves when
        garbage collected or when the program exits.
        """
        raise NotImplementedError()

    def seed(self, seed=None):
        """Sets the seed for this env's random number generator(s).

        # Returns
            Returns the list of seeds used in this env's random number generators
        """
        raise NotImplementedError()

    def configure(self, *args, **kwargs):
        """Provides runtime configuration to the environment.
        This configuration should consist of data that tells your
        environment how to run (such as an address of a remote server,
        or path to your ImageNet data). It should not affect the
        semantics of the environment.
        """
        raise NotImplementedError()