def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() self.score_pre = ( self.state.score ) # Storing the last score for designing the reward function
def __init__(self, id): self.state = State() self.info = PlayerInfo(id) self.isMovingInc = False self.isHorizontalMoving = False
def __init__(self, id, estWood=-1, pEnergyToStep=-1, pStepToGold=-1, strategy=-1): self.state = State() self.info = PlayerInfo(id) if (estWood == -1): #random strenght estWood = (5 + randrange(16)) pEnergyToStep = (2 + randrange(9)) * 5 pStepToGold = (1 + randrange(6)) * 50 self.estWood = estWood self.pEnergyToStep = pEnergyToStep self.pStepToGold = pStepToGold #print ("AddG_BOT",estWood,pEnergyToStep,pStepToGold) self.tx = -1 self.ty = -1 if (strategy == -1): strategy = np.random.choice(range(6)) - 1 self.selectTargetOption = strategy
class Bot4: ACTION_GO_LEFT = 0 ACTION_GO_RIGHT = 1 ACTION_GO_UP = 2 ACTION_GO_DOWN = 3 ACTION_FREE = 4 ACTION_CRAFT = 5 def __init__(self, id): self.state = State() self.info = PlayerInfo(id) def distance(self, posA, posB): return (posA[0] - posB[0])**2 + (posA[1] - posB[1])**2 def next_action(self): golds = self.state.mapInfo.golds mindis = 1000 bot_posx, bot_posy = self.info.posx, self.info.posy for gold in golds: dist = self.distance([gold["posx"], gold["posy"]], [bot_posx, bot_posy]) if dist < mindis: mindis = dist target_x = gold["posx"] target_y = gold["posy"] if self.state.mapInfo.gold_amount(self.info.posx, self.info.posy) > 0: if self.info.energy >= 6: return self.ACTION_CRAFT if self.info.energy < 10: return self.ACTION_FREE if (target_x - bot_posx) < 0: return self.ACTION_GO_LEFT if (target_x - bot_posx) > 0: return self.ACTION_GO_RIGHT if (target_y - bot_posy) < 0: return self.ACTION_GO_UP if (target_y - bot_posy) > 0: return self.ACTION_GO_DOWN return self.np.random.randrange(0, 4) # if self.info.posx == 9: # return 4 # return 0 def new_game(self, data): try: self.state.init_state(data) except Exception as e: import traceback traceback.print_exc() def new_state(self, data): # action = self.next_action(); # self.socket.send(action) try: self.state.update_state(data) except Exception as e: import traceback traceback.print_exc()
def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() self.is_moving_right = True # default: go to right side self.steps = 0 self.pre_action = 0
def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() self.pre_x = 0 self.pre_y = 0 self.pre_energy = 0 #self.pre_action = '' self.score_pre = self.state.score#Storing the last score for designing the reward function
def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() self.score_pre = self.state.score # Storing the last score for designing the reward function self.decay = 27 self.area_affect = 3 self.affect_eff = 0.92 self.view = None self.energy_view = None self.current_action = None
def __init__(self, id, estWood=-1, pEnergyToStep=-1, pStepToGold=-1): self.state = State() self.info = PlayerInfo(id) if (estWood == -1): #random strenght estWood = (5 + randrange(16)) pEnergyToStep = (2 + randrange(9)) * 5 pStepToGold = (1 + randrange(6)) * 50 self.estWood = estWood self.pEnergyToStep = pEnergyToStep self.pStepToGold = pStepToGold
def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() self.is_moving_right = True # default: go to right side self.steps = 0 self.pre_action = 0 self.pre_x = -1 self.pre_y = -1 self.search_left_right = True self.largest_gold_x = -1 self.largest_gold_y = -1 self.left_or_right = 2
class Bot1: ACTION_GO_LEFT = 0 ACTION_GO_RIGHT = 1 ACTION_GO_UP = 2 ACTION_GO_DOWN = 3 ACTION_FREE = 4 ACTION_CRAFT = 5 def __init__(self, id): self.state = State() self.info = PlayerInfo(id) def next_action(self): if self.state.mapInfo.gold_amount(self.info.posx, self.info.posy) > 0: if self.info.energy >= 6: return self.ACTION_CRAFT else: return self.ACTION_FREE if self.info.energy < 5: return self.ACTION_FREE else: action = self.ACTION_GO_UP if self.info.posy % 2 == 0: if self.info.posx < self.state.mapInfo.max_x: action = self.ACTION_GO_RIGHT else: if self.info.posx > 0: action = self.ACTION_GO_LEFT else: action = self.ACTION_GO_DOWN return action # if self.info.posx == 9: # return 4 # return 0 def new_game(self, data): try: self.state.init_state(data) except Exception as e: import traceback traceback.print_exc() def new_state(self, data): # action = self.next_action(); # self.socket.send(action) try: self.state.update_state(data) except Exception as e: import traceback traceback.print_exc()
class Bot2: ACTION_GO_LEFT = 0 ACTION_GO_RIGHT = 1 ACTION_GO_UP = 2 ACTION_GO_DOWN = 3 ACTION_FREE = 4 ACTION_CRAFT = 5 def __init__(self, id): self.state = State() self.info = PlayerInfo(id) def next_action(self): # print("bot2 :", self.info.posx, self.info.posy) if self.state.mapInfo.gold_amount(self.info.posx, self.info.posy) > 0: if self.info.energy >= 6: return self.ACTION_CRAFT else: return self.ACTION_FREE if self.info.energy < 21: return self.ACTION_FREE else: action = np.random.randint(0, 4) while (self.info.posx == 0 and action == 0) or ( self.info.posx == 28 and action == 1) or (self.info.posy == 0 and action == 3) or (self.info.posy == 28 and action == 2): action = np.random.randint(0, 4) return action def new_game(self, data): try: self.state.init_state(data) except Exception as e: import traceback traceback.print_exc() def new_state(self, data): # action = self.next_action(); # self.socket.send(action) try: self.state.update_state(data) except Exception as e: import traceback traceback.print_exc()
def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() # define action space self.INPUTNUM = 198 # The number of input values for the DQN model self.ACTIONNUM = 6 # The number of actions output from the DQN model # define state space self.gameState = None self.reward = 0 self.terminate = False self.score_pre = self.state.score # Storing the last score for designing the reward function self.energy_pre = self.state.energy # Storing the last energy for designing the reward function self.viewer = None self.steps_beyond_done = None
def __init__(self, id): self.state = State() self.info = PlayerInfo(id) self.limit = 2 state_dim = (2 * self.limit + 1)**2 + 3 + 3 action_dim = 6 max_action = 1.0 # load model kwargs = { "state_dim": state_dim, "action_dim": action_dim, "max_action": max_action, } policy_file = "newTD3_Miner_0_2_get_state3" self.TreeID = 1 self.TrapID = 2 self.SwampID = 3 self.policy = newTD3_bot.TD3(**kwargs) self.policy.load(f"./models_newTD3_2/{policy_file}")
def __init__(self, id): self.state = State() self.info = PlayerInfo(id) self.limit = 2 state_dim = (2*self.limit+1)**2 + 3 action_dim = 6 max_action = 1.0 # load model kwargs = { "state_dim": state_dim, "action_dim": action_dim, "max_action": max_action, } policy_file = "DDPG_Miner_0_2" self.TreeID = 1 self.TrapID = 2 self.SwampID = 3 self.policy = DDPG.DDPG(**kwargs) self.policy.load(f"./models_DDPG/{policy_file}")
def __init__(self, id): self.state = State() self.info = PlayerInfo(id)
class MinerEnv: def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() self.score_pre = self.state.score #Storing the last score for designing the reward function def start(self): #connect to server self.socket.connect() def end(self): #disconnect server self.socket.close() def send_map_info(self, request): #tell server which map to run self.socket.send(request) def reset(self): #start new game try: message = self.socket.receive() #receive game info from server print(message) self.state.init_state(message) #init state except Exception as e: import traceback traceback.print_exc() def step(self, action): #step process self.socket.send(action) #send action to server try: message = self.socket.receive() #receive new state from server #print("New state: ", message) self.state.update_state(message) #update to local state except Exception as e: import traceback traceback.print_exc() # Functions are customized by client def get_state(self): #Local view view = np.zeros([5, 5]) for i in range(-2, 3): for j in range(-2, 3): index_x = self.state.x + i index_y = self.state.y + j if index_x < 0 or index_y < 0 or index_x >= self.state.mapInfo.max_x or index_y >= self.state.mapInfo.max_y: view[2 + i, 2 + j] = -1 else: if self.state.mapInfo.get_obstacle(index_x, index_y) == TreeID: view[2 + i, 2 + j] = -1 if self.state.mapInfo.get_obstacle(index_x, index_y) == TrapID: view[2 + i, 2 + j] = -1 if self.state.mapInfo.get_obstacle(index_x, index_y) == SwampID: view[2 + i, 2 + j] = -1 #Create the state DQNState = view.flatten().tolist() self.pos_x_gold_first = self.state.x self.pos_y_gold_first = self.state.y if len(self.state.mapInfo.golds) > 0: self.pos_x_gold_first = self.state.mapInfo.golds[0]["posx"] self.pos_y_gold_first = self.state.mapInfo.golds[0]["posy"] DQNState.append(self.pos_x_gold_first - self.state.x) DQNState.append(self.pos_y_gold_first - self.state.y) #Convert the DQNState from list to array for training DQNState = np.array(DQNState) return DQNState def check_terminate(self): return self.state.status != State.STATUS_PLAYING
class Bot1: ACTION_GO_LEFT = 0 ACTION_GO_RIGHT = 1 ACTION_GO_UP = 2 ACTION_GO_DOWN = 3 ACTION_FREE = 4 ACTION_CRAFT = 5 def __init__(self, id, estWood=-1, pEnergyToStep=-1, pStepToGold=-1): self.state = State() self.info = PlayerInfo(id) if (estWood == -1): #random strenght estWood = (5 + randrange(16)) pEnergyToStep = (2 + randrange(9)) * 5 pStepToGold = (1 + randrange(6)) * 50 self.estWood = estWood self.pEnergyToStep = pEnergyToStep self.pStepToGold = pStepToGold #print ("AddG_BOT",estWood,pEnergyToStep,pStepToGold) def next_action(self): if (self.info.status != 0 and self.state.stepCount < 100): print("WTF", self.info.status) countPlayerAtGoldMine = 0 x, y = self.info.posx, self.info.posy r_Action = self.ACTION_FREE #for safe if (self.isKeepFree): self.isKeepFree = False return r_Action # 1st rule. Heighest Priority. Craft & Survive if (valid(y, x)): goldOnGround = self.state.mapInfo.gold_amount(x, y) countPlayerAtGoldMine = 0 for player in self.state.players: px, py = player['posx'], player['posy'] if (px == x and py == y): countPlayerAtGoldMine += 1 if (goldOnGround > 0): if (goldOnGround // countPlayerAtGoldMine > 0 and self.info.energy > 5): r_Action = self.ACTION_CRAFT else: g = Graph(9, 21) g.convertToMap(state=self.state, estWood=self.estWood, botInfo=self.info, isBot=True) g.BFS() target = g.getBFSResult(self.pEnergyToStep, self.pStepToGold) if (target == -1): print("NO TARGET") return self.ACTION_FREE ny, nx = g.traceBack(target) ny, nx = int(ny), int(nx) typeOb = self.state.mapInfo.get_obstacle(nx, ny) nextTrap = g.boardMap[ny, nx] if (typeOb == 1): # WOOOD nextTrap = 20 if (nextTrap >= self.info.energy): r_Action = self.ACTION_FREE else: if (ny == y): if (nx > x): r_Action = self.ACTION_GO_RIGHT elif (nx < x): r_Action = self.ACTION_GO_LEFT else: #nx==x if (ny > y): r_Action = self.ACTION_GO_DOWN elif (ny < y): r_Action = self.ACTION_GO_UP else: print("INVALID WTF") if (r_Action < 4 and self.info.energy <= 13 and self.state.stepCount < 90): self.isKeepFree = True return r_Action def new_game(self, data): try: self.isKeepFree = False self.state.init_state(data) except Exception as e: import traceback traceback.print_exc() def new_state(self, data): # action = self.next_action(); # self.socket.send(action) try: self.state.update_state(data) except Exception as e: import traceback traceback.print_exc() def printInfo(self): print("G_BOT", self.info.playerId, self.estWood, self.pEnergyToStep, self.pStepToGold, self.info.score, self.info.energy)
class MinerEnv: def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() self.energy_pre = self.state.energy self.score_pre = self.state.score #Storing the last score for designing the reward function def start(self): #connect to server self.socket.connect() def end(self): #disconnect server self.socket.close() def send_map_info(self, request): #tell server which map to run self.socket.send(request) def reset(self): #start new game try: message = self.socket.receive() #receive game info from server self.state.init_state(message) #init state except Exception as e: import traceback traceback.print_exc() def step(self, action): #step process self.socket.send(action) #send action to server try: message = self.socket.receive() #receive new state from server self.state.update_state(message) #update to local state except Exception as e: import traceback traceback.print_exc() # Functions are customized by client def get_state(self): # Building the map channel_1 = np.full( [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], 0.05) for i in range(self.state.mapInfo.max_x + 1): for j in range(self.state.mapInfo.max_y + 1): obs_id, val = self.state.mapInfo.get_obstacle(i, j) if obs_id == TreeID: # Tree channel_1[i, j] = 0.3 if obs_id == TrapID: # Trap channel_1[i, j] = 0.6 if obs_id == SwampID: # Tree if abs(val) == -5: channel_1[i, j] = 0.2 if abs(val) == -20: channel_1[i, j] = 0.4 if abs(val) > 20: channel_1[i, j] = 0.8 channel_2 = np.full( [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], 0.05) for i in range(self.state.mapInfo.max_x + 1): for j in range(self.state.mapInfo.max_y + 1): if self.state.mapInfo.gold_amount(i, j) > 0: if self.state.mapInfo.gold_amount(i, j) < 500: channel_2[i, j] = 0.3 if 900 > self.state.mapInfo.gold_amount(i, j) >= 500: channel_2[i, j] = 0.6 if self.state.mapInfo.gold_amount(i, j) >= 900: channel_2[i, j] = 1 channel_3 = np.full( [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], 0.05) for i in range(self.state.mapInfo.max_x + 1): for j in range(self.state.mapInfo.max_y + 1): if self.state.x in range(21) and self.state.y in range(9): channel_3[self.state.x, self.state.y] = 1 X = [] Y = [] for player in self.state.players: if player["playerId"] != self.state.id: X.append(player["posx"]) Y.append(player["posy"]) channel_4 = np.full( [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], 0.05) for i in range(self.state.mapInfo.max_x + 1): for j in range(self.state.mapInfo.max_y + 1): if X[0] in range(21) and Y[0] in range(9): channel_4[X[0], Y[0]] = 1 channel_5 = np.full( [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], 0.05) for i in range(self.state.mapInfo.max_x + 1): for j in range(self.state.mapInfo.max_y + 1): if X[1] in range(21) and Y[1] in range(9): channel_5[X[1], Y[1]] = 1 channel_6 = np.full( [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], 0.05) for i in range(self.state.mapInfo.max_x + 1): for j in range(self.state.mapInfo.max_y + 1): if X[2] in range(21) and Y[2] in range(9): channel_6[X[2], Y[2]] = 1 channel_7 = np.full( [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], 0.05) for i in range(self.state.mapInfo.max_x + 1): for j in range(self.state.mapInfo.max_y + 1): channel_7[i, j] = float(self.state.energy / 50) DQNState = np.dstack([ channel_1, channel_2, channel_3, channel_4, channel_5, channel_6, channel_7 ]) DQNState = np.rollaxis(DQNState, 2, 0) return DQNState def get_reward(self): # Calculate reward reward = 0 score_action = self.state.score - self.score_pre self.score_pre = int(self.state.score) if score_action > 0 and self.state.lastAction == 5: reward += 6.25 if score_action <= 0 and self.state.lastAction == 5: reward -= 2 obs_id, value = self.state.mapInfo.get_obstacle( self.state.x, self.state.y) if obs_id not in [1, 2, 3] and self.state.lastAction != 4: reward += 0.5 if obs_id == TreeID: # Tree reward -= 2 if obs_id == TrapID: # Trap reward -= 2 if obs_id == SwampID: # Swamp if abs(value) <= -5: reward -= 0.5 if 15 <= abs(value) <= 40: reward -= 4 if abs(value) > 40: reward -= 6 # if self.state.mapInfo.is_row_has_gold(self.state.x): # if self.state.lastAction in [2,3]: # reward += 1 # else: # reward += 0.5 # if self.state.mapInfo.is_column_has_gold(self.state.x): # if self.state.lastAction in [0,1]: # reward += 1 # else: # reward += 0.5 if self.state.lastAction == 4 and self.state.energy > 40: reward -= 4 if self.state.lastAction == 4: reward += 1.75 if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP: reward += -10 if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY: reward += -5 return np.sign(reward) * np.log(1 + abs(reward)) def check_terminate(self): #Checking the status of the game #it indicates the game ends or is playing return self.state.status != State.STATUS_PLAYING
class MinerEnv: def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() self.score_pre = self.state.score #Storing the last score for designing the reward function def start(self): #connect to server self.socket.connect() def end(self): #disconnect server self.socket.close() def send_map_info(self, request): #tell server which map to run self.socket.send(request) def reset(self): #start new game self.state_x_pre = self.state.x self.state_y_pre = self.state.y self.last3position = [] self.Swamp_position = [] try: message = self.socket.receive() #receive game info from server self.state.init_state(message) #init state except Exception as e: import traceback traceback.print_exc() def step(self, action): #step process self.state_x_pre = self.state.x self.state_y_pre = self.state.y self.last3position.append([self.state.x, self.state.y]) if len(self.last3position) > 3: self.last3position.pop(0) #print(self.last3position) self.socket.send(action) #send action to server try: message = self.socket.receive() #receive new state from server self.state.update_state(message) #update to local state except Exception as e: import traceback traceback.print_exc() # Functions are customized by client def get_state(self): # Building the map view = np.zeros( [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], dtype=int) for i in range(self.state.mapInfo.max_x + 1): for j in range(self.state.mapInfo.max_y + 1): if self.state.mapInfo.get_obstacle(i, j) == TreeID: # Tree view[i, j] = -TreeID if self.state.mapInfo.get_obstacle(i, j) == TrapID: # Trap view[i, j] = -TrapID if self.state.mapInfo.get_obstacle(i, j) == SwampID: # Swamp view[i, j] = -SwampID if self.state.mapInfo.gold_amount(i, j) > 0: view[i, j] = self.state.mapInfo.gold_amount(i, j) DQNState = view.flatten().tolist( ) #Flattening the map matrix to a vector # Add position and energy of agent to the DQNState DQNState.append(self.state.x) DQNState.append(self.state.y) DQNState.append(self.state.energy) #Add position of bots for player in self.state.players: if player["playerId"] != self.state.id: DQNState.append(player["posx"]) DQNState.append(player["posy"]) #Convert the DQNState from list to array for training DQNState = np.array(DQNState) return DQNState def get_reward2(self): # Calculate reward reward = 0 score_action = self.state.score - self.score_pre self.score_pre = self.state.score if score_action > 0: #If the DQN agent crafts golds, then it should obtain a positive reward (equal score_action) reward += score_action #If the DQN agent crashs into obstacels (Tree, Trap, Swamp), then it should be punished by a negative reward if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TreeID: # Tree reward -= TreeID * 3 * randrange(1, 5) if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TrapID: # Trap reward -= TrapID * 3 if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == SwampID: # Swamp if [self.state.x, self.state.y] in self.Swamp_position: # go to Swamp again reward -= 15 else: reward -= SwampID * 3 # first time go to swamp self.Swamp_position.append([self.state.x, self.state.y]) if self.state.mapInfo.gold_amount( self.state_x_pre, self.state_y_pre ) >= 50 and self.state.lastAction != 5: # in gold but don't craft reward -= 10 if self.state.lastAction == 5 and score_action < 0: # not in gold but craft reward -= 10 if len(self.last3position ) == 3 and self.state.lastAction != 5: # back to same position if self.last3position[0] == self.last3position[2]: reward -= 3 if self.last3position[1] == self.last3position[2]: reward -= 3 if self.state.energy >= 45 and self.state.lastAction == 4: reward -= 7 # if self.state.status == State.STATUS_PLAYING: # reward += 0.5 # If out of the map, then the DQN agent should be punished by a larger nagative reward. if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP: reward += -40 #Run out of energy, then the DQN agent should be punished by a larger nagative reward. if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY: reward += -20 # print ("reward",reward) return reward def get_reward(self): # Calculate reward reward = 0 score_action = self.state.score - self.score_pre self.score_pre = self.state.score if score_action > 0: #If the DQN agent crafts golds, then it should obtain a positive reward (equal score_action) reward += score_action / 50 #If the DQN agent crashs into obstacels (Tree, Trap, Swamp), then it should be punished by a negative reward if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TreeID: # Tree reward -= 0.03 * randrange(1, 5) if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TrapID: # Trap reward -= 0.06 * 3 if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == SwampID: # Swamp if [self.state.x, self.state.y] in self.Swamp_position: # go to Swamp again reward -= 0.15 else: reward -= 0.05 # first time go to swamp self.Swamp_position.append([self.state.x, self.state.y]) if self.state.mapInfo.gold_amount( self.state_x_pre, self.state_y_pre ) >= 50 and self.state.lastAction != 5: # in gold but don't craft reward -= 0.55 if self.state.lastAction == 5 and score_action < 0: # not in gold but craft reward -= 0.55 if len(self.last3position ) == 3 and self.state.lastAction != 5: # back to same position if self.last3position[0] == self.last3position[2]: reward -= 0.1 if self.last3position[1] == self.last3position[2]: reward -= 0.1 if self.state.energy >= 45 and self.state.lastAction == 4: reward -= 0.3 # if self.state.status == State.STATUS_PLAYING: # reward += 0.5 # If out of the map, then the DQN agent should be punished by a larger nagative reward. if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP: reward += -10 #Run out of energy, then the DQN agent should be punished by a larger nagative reward. if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY: reward += -5 # print ("reward",reward) return reward def get_state_tensor(self, scale_map): n = scale_map view = np.zeros((n * (self.state.mapInfo.max_x + 1), n * (self.state.mapInfo.max_y + 1), 6)) for i in range(self.state.mapInfo.max_x + 1): for j in range(self.state.mapInfo.max_y + 1): if self.state.mapInfo.get_obstacle( i, j) == TreeID: # Tree # trap map view[n * i:n * i + n, n * j:n * j + n, 0] = -TreeID if self.state.mapInfo.get_obstacle( i, j) == TrapID: # Trap # trap map view[n * i:n * i + n, n * j:n * j + n, 0] = -TrapID if self.state.mapInfo.get_obstacle( i, j) == SwampID: # Swamp # trap map view[n * i:n * i + n, n * j:n * j + n, 0] = -SwampID if self.state.mapInfo.gold_amount(i, j) > 0: view[n * i:n * i + n, n * j:n * j + n, 0] = self.state.mapInfo.gold_amount( i, j) / 1000 ##/10 gold map for stt, player in enumerate(self.state.players): if player["playerId"] != self.state.id: try: if player["status"] not in [1, 2, 3]: try: view[n * player["posx"]:n * player["posx"] + n, n * player["posy"]:n * player["posy"] + n, stt + 1] = player["energy"] / 50 except: view[n * player["posx"]:n * player["posx"] + n, n * player["posy"]:n * player["posy"] + n, stt + 1] = 1 except: view[n * player["posx"]:n * player["posx"] + n, n * player["posy"]:n * player["posy"] + n, stt] = 1 # print(self.state.players) else: try: view[n * self.state.x:n * self.state.x + n, n * self.state.y:n * self.state.y + n, 2] = self.state.energy / 50 except: print('out of map') DQNState = np.array(view) return DQNState def get_state3(self, limit): # Building the map view = np.zeros([limit * 2 + 1, limit * 2 + 1], dtype=int) max_x, max_y = self.state.mapInfo.max_x, self.state.mapInfo.max_y xlimit_below = np.clip(self.state.x - limit, 0, max_x) - np.clip( self.state.x + limit - max_x, 0, limit) xlimit_up = np.clip(self.state.x + limit, 0, max_x) + np.clip( 0 - self.state.x + limit, 0, limit) ylimit_below = np.clip(self.state.y - limit, 0, max_y) - np.clip( self.state.y + limit - max_y, 0, limit) ylimit_up = np.clip(self.state.y + limit, 0, max_y) + np.clip( 0 - self.state.y + limit, 0, limit) #print(xlimit_below, xlimit_up, ylimit_below, ylimit_up, self.state.x, self.state.y) dmax, m, n, exist_gold = -1000, -5, 0.1, False x_maxgold, y_maxgold = self.state.x, self.state.y for i in range(max_x + 1): for j in range(max_y + 1): if self.state.mapInfo.gold_amount(i, j) >= 50: exist_gold = True d = m * ((self.state.x - i)**2 + (self.state.y - j)** 2) + n * self.state.mapInfo.gold_amount(i, j) if d > dmax: dmax = d x_maxgold, y_maxgold = i, j # position of cell is nearest and much gold if i in range(xlimit_below, xlimit_up + 1) and j in range( ylimit_below, ylimit_up + 1): if self.state.mapInfo.get_obstacle(i, j) == TreeID: # Tree view[i - xlimit_below, j - ylimit_below] = -TreeID if self.state.mapInfo.get_obstacle(i, j) == TrapID: # Trap view[i - xlimit_below, j - ylimit_below] = -TrapID if self.state.mapInfo.get_obstacle(i, j) == SwampID: # Swamp view[i - xlimit_below, j - ylimit_below] = -SwampID if self.state.mapInfo.gold_amount(i, j) > 0: view[i - xlimit_below, j - ylimit_below] = self.state.mapInfo.gold_amount( i, j) / 10 DQNState = view.flatten().tolist( ) #Flattening the map matrix to a vector # Add position and energy of agent to the DQNState DQNState.append(self.state.x - xlimit_below) DQNState.append(self.state.y - ylimit_below) DQNState.append(self.state.energy) #Add position of bots # for player in self.state.players: # if player["playerId"] != self.state.id: # DQNState.append(player["posx"]) # DQNState.append(player["posy"]) DQNState.append(self.state.x - x_maxgold) DQNState.append(self.state.y - y_maxgold) if exist_gold == False: DQNState.append(0) else: DQNState.append( self.state.mapInfo.gold_amount(x_maxgold, y_maxgold) / 10) #Convert the DQNState from list to array for training DQNState = np.array(DQNState) return DQNState def check_terminate(self): #Checking the status of the game #it indicates the game ends or is playing return self.state.status != State.STATUS_PLAYING
class Bot3: ACTION_GO_LEFT = 0 ACTION_GO_RIGHT = 1 ACTION_GO_UP = 2 ACTION_GO_DOWN = 3 ACTION_FREE = 4 ACTION_CRAFT = 5 def __init__(self, id): self.state = State() self.info = PlayerInfo(id) def next_action(self): if self.state.mapInfo.gold_amount(self.info.posx, self.info.posy) > 0: if self.info.energy >= 6: return self.ACTION_CRAFT else: return self.ACTION_FREE if self.info.energy < 5: return self.ACTION_FREE else: action = self.ACTION_GO_LEFT if self.info.posx % 2 == 0: if self.info.posy < self.state.mapInfo.max_y: action = self.ACTION_GO_DOWN else: if self.info.posy > 0: action = self.ACTION_GO_UP else: action = self.ACTION_GO_RIGHT return action def act_sample(self, mystate): if self.state.mapInfo.gold_amount(mystate[-3], mystate[-2]) > 0: if mystate[-1] >= 6: return self.ACTION_CRAFT else: return self.ACTION_FREE if mystate[-1] < 5: return self.ACTION_FREE else: action = self.ACTION_GO_LEFT if self.info.posx % 2 == 0: if mystate[-2] < self.state.mapInfo.max_y: action = self.ACTION_GO_DOWN else: if mystate[-2] > 0: action = self.ACTION_GO_UP else: action = self.ACTION_GO_RIGHT return action def new_game(self, data): try: self.state.init_state(data) except Exception as e: import traceback traceback.print_exc() def new_state(self, data): # action = self.next_action(); # self.socket.send(action) try: self.state.update_state(data) except Exception as e: import traceback traceback.print_exc()
class MinerEnv: def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() self.score_pre = self.state.score #Storing the last score for designing the reward function self.pos_x_pre = self.state.x self.pos_y_pre = self.state.y def start(self): #connect to server self.socket.connect() def end(self): #disconnect server self.socket.close() def send_map_info(self, request): #tell server which map to run self.socket.send(request) def reset(self): #start new game try: message = self.socket.receive() #receive game info from server self.state.init_state(message) #init state self.score_pre = self.state.score #Storing the last score for designing the reward function self.pos_x_pre = self.state.x self.pos_y_pre = self.state.y except Exception as e: import traceback traceback.print_exc() def step(self, action): #step process self.socket.send(action) #send action to server try: message = self.socket.receive() #receive new state from server self.state.update_state(message) #update to local state except Exception as e: import traceback traceback.print_exc() # Functions are customized by client def get_state(self): #Local view view = np.zeros([5, 5]) for i in range(-2, 3): for j in range(-2, 3): index_x = self.state.x + i index_y = self.state.y + j if index_x < 0 or index_y < 0 or index_x >= self.state.mapInfo.max_x or index_y >= self.state.mapInfo.max_y: view[2 + i, 2 + j] = -1 else: if self.state.mapInfo.get_obstacle(index_x, index_y) == TreeID: view[2 + i, 2 + j] = -1 if self.state.mapInfo.get_obstacle(index_x, index_y) == TrapID: view[2 + i, 2 + j] = -1 if self.state.mapInfo.get_obstacle(index_x, index_y) == SwampID: view[2 + i, 2 + j] = -1 #Create the state DQNState = view.flatten().tolist() self.pos_x_gold_first = self.state.x self.pos_y_gold_first = self.state.y if len(self.state.mapInfo.golds) > 0: self.pos_x_gold_first = self.state.mapInfo.golds[0]["posx"] self.pos_y_gold_first = self.state.mapInfo.golds[0]["posy"] DQNState.append(self.pos_x_gold_first - self.state.x) DQNState.append(self.pos_y_gold_first - self.state.y) #Convert the DQNState from list to array for training DQNState = np.array(DQNState) return DQNState def get_reward(self): # Calculate reward reward = 0 goldamount = self.state.mapInfo.gold_amount(self.state.x, self.state.y) if goldamount > 0: reward += 10 #goldamount #remove the gold for g in self.socket.stepState.golds: if g.posx == self.state.x and g.posy == self.state.y: self.socket.stepState.golds.remove(g) #If the DQN agent crashs into obstacels (Tree, Trap, Swamp), then it should be punished by a negative reward if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TreeID: # Tree reward -= 0.2 if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TrapID: # Trap reward -= 0.2 if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == SwampID: # Swamp reward -= 0.2 dis_pre = np.sqrt((self.pos_x_pre - self.pos_x_gold_first)**2 + (self.pos_y_pre - self.pos_y_gold_first)**2) dis_curr = np.sqrt((self.state.x - self.pos_x_gold_first)**2 + (self.state.y - self.pos_y_gold_first)**2) if (dis_curr - dis_pre) <= 0: #Reducing the distance , reward ++ reward += 0.1 else: reward -= 0.1 # If out of the map, then the DQN agent should be punished by a larger nagative reward. if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP: reward += -10 return reward def check_terminate(self): #Checking the status of the game #it indicates the game ends or is playing return self.state.status != State.STATUS_PLAYING
class MinerEnv: def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() self.score_pre = self.state.score #Storing the last score for designing the reward function def start(self): #connect to server self.socket.connect() def end(self): #disconnect server self.socket.close() def send_map_info(self, request): #tell server which map to run self.socket.send(request) def reset(self): #start new game # Choosing a map in the list # mapID = np.random.randint(1, 6) # Choosing a map ID from 5 maps in Maps folder randomly mapID = 1 posID_x = np.random.randint( MAP_MAX_X) # Choosing a initial position of the DQN agent on # X-axes randomly posID_y = np.random.randint( MAP_MAX_Y ) # Choosing a initial position of the DQN agent on Y-axes randomly # Creating a request for initializing a map, initial position, the initial energy, and the maximum number of steps of the DQN agent request = ("map" + str(mapID) + "," + str(posID_x) + "," + str(posID_y) + ",50,100") # Send the request to the game environment (GAME_SOCKET_DUMMY.py) self.send_map_info(request) try: message = self.socket.receive() #receive game info from server print(message) self.state.init_state(message) #init state print(self.state.score) except Exception as e: import traceback traceback.print_exc() def step(self, action): #step process self.socket.send(action) #send action to server try: message = self.socket.receive() #receive new state from server #print("New state: ", message) self.state.update_state(message) #update to local state print(self.state.score) except Exception as e: import traceback traceback.print_exc() # Functions are customized by client def get_state(self): # Building the map view = np.zeros( [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1, 2], dtype="float32") self.gold_map = np.zeros( [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], dtype=int) for i in range(self.state.mapInfo.max_x + 1): for j in range(self.state.mapInfo.max_y + 1): if self.state.mapInfo.get_obstacle(i, j) == TreeID: # Tree view[i, j, 0] = -20 * 1.0 / 20 # view[i, j, 0] = -TreeID elif self.state.mapInfo.get_obstacle(i, j) == TrapID: # Trap view[i, j, 0] = -10 * 1.0 / 20 # view[i, j, 0] = -TrapID elif self.state.mapInfo.get_obstacle(i, j) == SwampID: # Swamp view[i, j, 0] = self.state.mapInfo.get_obstacle_value( i, j) * 1.0 / 20 # view[i, j, 0] = -SwampID elif self.state.mapInfo.gold_amount(i, j) > 0: view[i, j, 0] = self.state.mapInfo.gold_amount(i, j) * 1.0 / 100 self.gold_map[i, j] = self.state.mapInfo.gold_amount( i, j) / 50 if self.state.status == 0: view[self.state.x, self.state.y, 1] = self.state.energy # for player in self.state.players: # if player["playerId"] != self.state.id: # view[player["posx"], player["posy"], 1] -= 1 # Convert the DQNState from list to array for training DQNState = np.array(view) return DQNState def check_terminate(self): return self.state.status != State.STATUS_PLAYING
class MyBot: ACTION_GO_LEFT = 0 ACTION_GO_RIGHT = 1 ACTION_GO_UP = 2 ACTION_GO_DOWN = 3 ACTION_FREE = 4 ACTION_CRAFT = 5 def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() self.is_moving_right = True # default: go to right side self.steps = 0 self.pre_action = 0 def start(self): # connect to server self.socket.connect() def end(self): # disconnect server self.socket.close() def send_map_info(self, request): # tell server which map to run self.socket.send(request) def reset(self): # start new game try: message = self.socket.receive() # receive game info from server self.state.init_state(message) # init state except Exception as e: import traceback traceback.print_exc() def step(self, tmp_action): # step process self.socket.send(tmp_action) # send action to server try: message = self.socket.receive() # receive new state from server self.state.update_state(message) # update to local state except Exception as e: import traceback traceback.print_exc() def check_terminate(self): # Checking the status of the game # it indicates the game ends or is playing return self.state.status != State.STATUS_PLAYING def goLeftOrRight(self, my_bot_x, my_bot_y, initial_flag=False): total_gold_left = 0 total_gold_right = 0 for gold in self.state.mapInfo.golds: gold_amount = gold["amount"] if gold_amount > 0: i = gold["posx"] j = gold["posy"] if i >= my_bot_x: total_gold_right += gold_amount if i <= my_bot_x: total_gold_left += gold_amount count_players_left = 0 count_players_right = 0 for player in self.state.players: if "energy" in player: if player["status"] == self.state.STATUS_PLAYING: if player["posx"] >= my_bot_x: count_players_right += 1 if player["posx"] <= my_bot_x: count_players_left += 1 elif initial_flag: # 0 step, initial state if player["posx"] >= my_bot_x: count_players_right += 1 if player["posx"] <= my_bot_x: count_players_left += 1 total_gold_left = total_gold_left / count_players_left total_gold_right = total_gold_right / count_players_right # 1 ==> left; 2 ==> both; 3 ==> right if total_gold_left > total_gold_right: return 1 elif total_gold_left == total_gold_right: return 2 else: return 3 def myGetGoldAmount(self, x, y, initial_flag=False, are_we_here=False): distance = abs(x - self.state.x) + abs(y - self.state.y) gold_on_ground = self.state.mapInfo.gold_amount(x, y) if gold_on_ground == 0: return 0 count_players = 0 for player in self.state.players: if player["posx"] == x and player["posy"] == y: if "energy" in player: if player["status"] == self.state.STATUS_PLAYING: count_players += 1 elif initial_flag: # 0 step, initial state count_players += 1 if are_we_here: return gold_on_ground / count_players else: return gold_on_ground / (count_players + 1) - (distance * 50) - ( 50 * count_players * distance ) # +1 because assuming that we will come here def findLargestGold(self, initial_flag=False, leftOrRight=2): my_bot_x, my_bot_y = self.state.x, self.state.y largest_gold_x = -1 largest_gold_y = -1 max_gold = -100000 for goal in self.state.mapInfo.golds: if leftOrRight == 2: if goal["amount"] > 0: i = goal["posx"] j = goal["posy"] distance = abs(i - self.state.x) + abs(j - self.state.y) count_players = 0 for player in self.state.players: if player["posx"] == i and player["posy"] == j: if "energy" in player: if player[ "status"] == self.state.STATUS_PLAYING: count_players += 1 elif initial_flag: # 0 step, initial state count_players += 1 gold_amount = (goal["amount"] / (count_players + 1)) - ( distance * 50) - (50 * count_players * distance) if gold_amount > max_gold: largest_gold_x = i largest_gold_y = j max_gold = gold_amount elif gold_amount == max_gold: prev_distance = (largest_gold_x - my_bot_x) * (largest_gold_x - my_bot_x) + \ (largest_gold_y - my_bot_y) * (largest_gold_y - my_bot_y) new_distance = (i - my_bot_x) * (i - my_bot_x) + ( j - my_bot_y) * (j - my_bot_y) if new_distance < prev_distance: largest_gold_x = i largest_gold_y = j max_gold = gold_amount # only search at left side if leftOrRight == 1: if goal["amount"] > 0: i = goal["posx"] j = goal["posy"] if i <= my_bot_x: distance = abs(i - self.state.x) + abs(j - self.state.y) count_players = 0 for player in self.state.players: if player["posx"] == i and player["posy"] == j: if "energy" in player: if player[ "status"] == self.state.STATUS_PLAYING: count_players += 1 elif initial_flag: # 0 step, initial state count_players += 1 gold_amount = (goal["amount"] / (count_players + 1)) - ( distance * 50) - ( 50 * count_players * distance) if gold_amount > max_gold: largest_gold_x = i largest_gold_y = j max_gold = gold_amount elif gold_amount == max_gold: prev_distance = (largest_gold_x - my_bot_x) * (largest_gold_x - my_bot_x) + \ (largest_gold_y - my_bot_y) * (largest_gold_y - my_bot_y) new_distance = (i - my_bot_x) * (i - my_bot_x) + ( j - my_bot_y) * (j - my_bot_y) if new_distance < prev_distance: largest_gold_x = i largest_gold_y = j max_gold = gold_amount # only search at right side if leftOrRight == 3: if goal["amount"] > 0: i = goal["posx"] j = goal["posy"] if i >= my_bot_x: distance = abs(i - self.state.x) + abs(j - self.state.y) count_players = 0 for player in self.state.players: if player["posx"] == i and player["posy"] == j: if "energy" in player: if player[ "status"] == self.state.STATUS_PLAYING: count_players += 1 elif initial_flag: # 0 step, initial state count_players += 1 gold_amount = (goal["amount"] / (count_players + 1)) - ( distance * 50) - ( 50 * count_players * distance) if gold_amount > max_gold: largest_gold_x = i largest_gold_y = j max_gold = gold_amount elif gold_amount == max_gold: prev_distance = (largest_gold_x - my_bot_x) * (largest_gold_x - my_bot_x) + \ (largest_gold_y - my_bot_y) * (largest_gold_y - my_bot_y) new_distance = (i - my_bot_x) * (i - my_bot_x) + ( j - my_bot_y) * (j - my_bot_y) if new_distance < prev_distance: largest_gold_x = i largest_gold_y = j max_gold = gold_amount return largest_gold_x, largest_gold_y def findLargestGoldInSmallMap(self, des_x, des_y): x, y = self.state.x, self.state.y largest_gold_x = None largest_gold_y = None next_step_x = 0 next_step_y = 0 if x < des_x: next_step_x = 1 else: next_step_x = -1 if y < des_y: next_step_y = 1 else: next_step_y = -1 max_gold = -100000 while x != des_x + next_step_x: while y != des_y + next_step_y: if x != des_x or y != des_y: gold_amount = self.myGetGoldAmount(x, y) if gold_amount > 0: if gold_amount > max_gold: largest_gold_x = x largest_gold_y = y max_gold = gold_amount elif gold_amount == max_gold: prev_distance = (largest_gold_x - self.state.x) * (largest_gold_x - self.state.x) + \ (largest_gold_y - self.state.y) * (largest_gold_y - self.state.y) new_distance = (x - self.state.x) * ( x - self.state.x) + (y - self.state.y) * ( y - self.state.y) if new_distance < prev_distance: largest_gold_x = x largest_gold_y = y max_gold = gold_amount y += next_step_y y = self.state.y x += next_step_x return largest_gold_x, largest_gold_y def getActionBaseOnEnergy(self, action_option_1, action_option_2): my_bot_x, my_bot_y = self.state.x, self.state.y n_action = action_option_1 require_energy = 100 if action_option_1 == self.ACTION_GO_RIGHT: next_x = my_bot_x + 1 else: next_x = my_bot_x - 1 if action_option_2 == self.ACTION_GO_DOWN: next_y = my_bot_y + 1 else: next_y = my_bot_y - 1 energy_1 = 1 energy_2 = 1 gold = self.state.mapInfo.gold_amount(next_x, my_bot_y) if gold > 0: energy_1 = 4 gold = self.state.mapInfo.gold_amount(my_bot_x, next_y) if gold > 0: energy_2 = 4 for obstacle in self.state.mapInfo.obstacles: i = obstacle["posx"] j = obstacle["posy"] if i == next_x and j == my_bot_y: if obstacle["type"] == 1: # Tree energy_1 = 20 elif obstacle["type"] == 2: # Trap if obstacle["value"] == -10: energy_1 = 10 elif obstacle["type"] == 3: # Swamp energy_1 = -obstacle["value"] if i == my_bot_x and j == next_y: if obstacle["type"] == 1: # Tree energy_2 = 20 elif obstacle["type"] == 2: # Trap if obstacle["value"] == -10: energy_2 = 10 elif obstacle["type"] == 3: # Swamp energy_2 = -obstacle["value"] if energy_1 < energy_2: n_action = action_option_1 require_energy = energy_1 else: n_action = action_option_2 require_energy = energy_2 if self.state.energy <= require_energy: n_action = self.ACTION_FREE #print("require_energy = {0}".format(require_energy)) #print("choose action = {0}".format(n_action)) return n_action def goToTarget(self, des_x, des_y): n_action = self.ACTION_FREE require_energy = 100 my_bot_x, my_bot_y = self.state.x, self.state.y next_my_bot_x = my_bot_x next_my_bot_y = my_bot_y if my_bot_x == des_x: if my_bot_y < des_y: n_action = self.ACTION_GO_DOWN next_my_bot_y += 1 else: n_action = self.ACTION_GO_UP next_my_bot_y -= 1 elif my_bot_y == des_y: if my_bot_x < des_x: n_action = self.ACTION_GO_RIGHT next_my_bot_x += 1 else: n_action = self.ACTION_GO_LEFT next_my_bot_x -= 1 else: if my_bot_x < des_x: action_option_1 = self.ACTION_GO_RIGHT else: action_option_1 = self.ACTION_GO_LEFT if my_bot_y < des_y: action_option_2 = self.ACTION_GO_DOWN else: action_option_2 = self.ACTION_GO_UP n_action = self.getActionBaseOnEnergy(action_option_1, action_option_2) return n_action require_energy = 1 gold_amount = self.state.mapInfo.gold_amount(next_my_bot_x, next_my_bot_y) if gold_amount > 0: require_energy = 4 for obstacle in self.state.mapInfo.obstacles: i = obstacle["posx"] j = obstacle["posy"] if i == next_my_bot_x and j == next_my_bot_y: if obstacle["type"] == 1: # Tree require_energy = 20 elif obstacle["type"] == 2: # Trap if obstacle["value"] == -10: require_energy = 10 elif obstacle["type"] == 3: # Swamp require_energy = -obstacle["value"] if self.state.energy <= require_energy: n_action = self.ACTION_FREE return n_action def next_action(self, initial_flag=False): my_bot_x, my_bot_y = self.state.x, self.state.y n_action = self.ACTION_FREE gold_on_ground = self.myGetGoldAmount(my_bot_x, my_bot_y, initial_flag, are_we_here=True) energy = self.state.energy if gold_on_ground > 0: if energy <= 5: n_action = self.ACTION_FREE elif energy >= (gold_on_ground / 50) * 5: n_action = self.ACTION_CRAFT elif self.pre_action == self.ACTION_FREE and energy < 38: n_action = self.ACTION_FREE else: n_action = self.ACTION_CRAFT else: leftOrRight = 2 if self.steps < 30: leftOrRight = self.goLeftOrRight(my_bot_x, my_bot_y, initial_flag) largest_gold_x, largest_gold_y = self.findLargestGold( initial_flag, leftOrRight) target_x = largest_gold_x target_y = largest_gold_y while True: tmp_x, tmp_y = self.findLargestGoldInSmallMap( target_x, target_y) if (tmp_x is None) or (tmp_y is None): break target_x = tmp_x target_y = tmp_y n_action = self.goToTarget(target_x, target_y) self.steps += 1 self.pre_action = n_action return n_action
class MinerEnv: def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() self.score_pre = self.state.score #Storing the last score for designing the reward function def start(self): #connect to server self.socket.connect() def end(self): #disconnect server self.socket.close() def send_map_info(self, request): #tell server which map to run self.socket.send(request) def reset(self): #start new game try: message = self.socket.receive() #receive game info from server print(message) self.state.init_state(message) #init state except Exception as e: import traceback traceback.print_exc() def step(self, action): #step process self.socket.send(action) #send action to server try: message = self.socket.receive() #receive new state from server #print("New state: ", message) self.state.update_state(message) #update to local state except Exception as e: import traceback traceback.print_exc() # Functions are customized by client def get_state(self): # Building the map view = np.zeros( [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], dtype=int) for i in range(self.state.mapInfo.max_x + 1): for j in range(self.state.mapInfo.max_y + 1): if self.state.mapInfo.get_obstacle(i, j) == TreeID: # Tree view[i, j] = -TreeID if self.state.mapInfo.get_obstacle(i, j) == TrapID: # Trap view[i, j] = -TrapID if self.state.mapInfo.get_obstacle(i, j) == SwampID: # Swamp view[i, j] = -SwampID if self.state.mapInfo.gold_amount(i, j) > 0: view[i, j] = self.state.mapInfo.gold_amount(i, j) DQNState = view.flatten().tolist( ) #Flattening the map matrix to a vector # Add position and energy of agent to the DQNState DQNState.append(self.state.x) DQNState.append(self.state.y) DQNState.append(self.state.energy) #Add position of bots for player in self.state.players: if player["playerId"] != self.state.id: DQNState.append(player["posx"]) DQNState.append(player["posy"]) #Convert the DQNState from list to array DQNState = np.array(DQNState) return DQNState def check_terminate(self): return self.state.status != State.STATUS_PLAYING
class MinerEnv: def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() self.score_pre = self.state.score # Storing the last score for designing the reward function self.energy_pre = self.state.energy #self.x_pre = self.state.x #self.y_pre = self.state.y def start(self): # connect to server self.socket.connect() def end(self): # disconnect server self.socket.close() def send_map_info(self, request): # tell server which map to run self.socket.send(request) def reset(self): # start new game try: message = self.socket.receive() # receive game info from server self.state.init_state(message) # init state except Exception as e: import traceback traceback.print_exc() def step(self, action): # step process self.socket.send(action) # send action to server try: message = self.socket.receive() # receive new state from server self.state.update_state(message) # update to local state except Exception as e: import traceback traceback.print_exc() # Functions are customized by client def get_state(self, remain_steps, initial_flag=False): # update pre position, score, energy #self.x_pre = self.state.x #self.y_pre = self.state.y self.score_pre = self.state.score self.energy_pre = self.state.energy # depth = 3 # goal, min_energy, max_energy depth = 15 # goal, min_energy, max_energy, 4 player position goal_depth = 0 min_energy_depth = 1 max_energy_depth = 2 my_agent_depth = 3 bot1_depth = 4 bot2_depth = 5 bot3_depth = 6 goal_pos = 7 tree_pos = 8 trap_pos = 9 swamp_pos_5 = 10 swamp_pos_20 = 11 swamp_pos_40 = 12 swamp_pos_100 = 13 ground_position = 14 # len_player_infor = 6 * 4 len_player_infor = 2 + 8 + 6 # max_goal = 67 * 50 * 4 # assume 67 steps for mining and 33 steps for relaxing max_goal = 1250 max_energy = 100 # max_x = self.state.mapInfo.max_x # max_y = self.state.mapInfo.max_y max_player_energy = 50 max_score = 3000 # max_score = 67 * 50 max_last_action = 6 + 1 # 1 because of None max_status = 5 # Building the map view_1 = np.zeros([ self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1, depth ], dtype=float) for i in range(self.state.mapInfo.max_x + 1): for j in range(self.state.mapInfo.max_y + 1): # ground view_1[i, j, min_energy_depth] = -1 / max_energy view_1[i, j, max_energy_depth] = -1 / max_energy view_1[i, j, ground_position] = 1 goal = self.state.mapInfo.gold_amount(i, j) if goal > 0: view_1[i, j, ground_position] = 0 view_1[i, j, goal_pos] = 1 view_1[i, j, min_energy_depth] = -4 / max_energy view_1[i, j, max_energy_depth] = -4 / max_energy view_1[i, j, goal_depth] = goal / max_goal for obstacle in self.state.mapInfo.obstacles: i = obstacle["posx"] j = obstacle["posy"] if obstacle["type"] == TreeID: # Tree view_1[i, j, ground_position] = 0 view_1[i, j, tree_pos] = 1 view_1[i, j, min_energy_depth] = -5 / max_energy # -5 ~ -20 view_1[i, j, max_energy_depth] = -20 / max_energy # -5 ~ -20 elif obstacle["type"] == TrapID: # Trap if obstacle["value"] != 0: view_1[i, j, ground_position] = 0 view_1[i, j, trap_pos] = 1 view_1[i, j, min_energy_depth] = obstacle["value"] / max_energy view_1[i, j, max_energy_depth] = obstacle["value"] / max_energy elif obstacle["type"] == SwampID: # Swamp view_1[i, j, ground_position] = 0 view_1[i, j, min_energy_depth] = obstacle[ "value"] / max_energy # -5, -20, -40, -100 view_1[i, j, max_energy_depth] = obstacle[ "value"] / max_energy # -5, -20, -40, -100 if obstacle["value"] == -5: view_1[i, j, swamp_pos_5] = 1 elif obstacle["value"] == -20: view_1[i, j, swamp_pos_20] = 1 elif obstacle["value"] == -40: view_1[i, j, swamp_pos_40] = 1 elif obstacle["value"] == -100: view_1[i, j, swamp_pos_100] = 1 """ for goal in self.state.mapInfo.golds: i = goal["posx"] j = goal["posy"] view_1[i, j, min_energy_depth] = 4 / max_energy view_1[i, j, max_energy_depth] = 4 / max_energy view_1[i, j, goal_depth] = goal["amount"] / max_goal """ # Add player's information view_2 = np.zeros([len_player_infor * 4 + 1], dtype=float) # +1 remaining steps index_player = 0 if (0 <= self.state.x <= self.state.mapInfo.max_x) and \ (0 <= self.state.y <= self.state.mapInfo.max_y): view_1[self.state.x, self.state.y, my_agent_depth] = 1 view_2[index_player * len_player_infor + 0] = self.state.energy / max_player_energy view_2[index_player * len_player_infor + 1] = self.state.score / max_score if self.state.lastAction is None: # 0 step view_2[index_player * len_player_infor + 2 + max_last_action] = 1 else: # > 1 step view_2[index_player * len_player_infor + 2 + self.state.lastAction] = 1 view_2[index_player * len_player_infor + 2 + max_last_action + 1 + self.state.status] = 1 bot_depth = my_agent_depth for player in self.state.players: if player["playerId"] != self.state.id: index_player += 1 bot_depth += 1 if (0 <= player["posx"] <= self.state.mapInfo.max_x) and \ (0 <= player["posy"] <= self.state.mapInfo.max_y): if "energy" in player: # > 1 step if player["status"] == self.state.STATUS_PLAYING: view_1[player["posx"], player["posy"], bot_depth] = 1 view_2[index_player * len_player_infor + 0] = player["energy"] / max_player_energy view_2[index_player * len_player_infor + 1] = player["score"] / max_score view_2[index_player * len_player_infor + 2 + player["lastAction"]] = 1 # one hot view_2[index_player * len_player_infor + 2 + max_last_action + 1 + player["status"]] = 1 elif initial_flag: # 0 step, initial state view_1[player["posx"], player["posy"], bot_depth] = 1 view_2[index_player * len_player_infor + 0] = 50 / max_player_energy view_2[index_player * len_player_infor + 1] = 0 / max_score view_2[index_player * len_player_infor + 2 + max_last_action] = 1 # one hot view_2[index_player * len_player_infor + 2 + max_last_action + 1 + self.state.STATUS_PLAYING] = 1 view_2[-1] = remain_steps / 100 # Convert the DQNState from list to array for training DQNState_map = np.array(view_1) DQNState_users = np.array(view_2) return DQNState_map, DQNState_users def get_reward(self, num_of_wrong_relax, num_of_wrong_mining): # return -0.01 ~ 0.01 # reward must target to mine goal max_reward = 50 reward_died = -50 # ~ double max reward # reward_died = -25 # let a try reward_enter_goal = max_reward / 20 # 5 # Calculate reward reward = 0 # moving, because agent will die at the max step energy_action = self.state.energy - self.energy_pre # < 0 if not relax score_action = self.state.score - self.score_pre # >= 0 if score_action > 0: reward = score_action / 2500 # max ~2500 / episode else: # moving #if int(self.state.lastAction) < 4: # # enter gold # if self.state.mapInfo.gold_amount(self.state.x, self.state.y) > 0: # reward = reward_enter_goal / 2500 # mining but cannot get gold if (int(self.state.lastAction) == 5) and (score_action == 0): # reward = reward_died / 10 / max_reward num_of_wrong_mining += 1 # relax when energy > 40 or cannot get more energy elif int(self.state.lastAction) == 4: if self.energy_pre > 40 or energy_action == 0: # reward = reward_died / 10 / max_reward num_of_wrong_relax += 1 # at gold but move to ground # if (int(self.state.lastAction) < 4) and (self.state.mapInfo.gold_amount(self.x_pre, self.y_pre) > 0) \ # and (self.state.mapInfo.gold_amount(self.state.x, self.state.y) == 0): # reward = reward_died # relax when energy > 40 #elif self.energy_pre > 40 and int(self.state.lastAction) == 4: # reward = reward_died / 4 # relax but cannot get more energy #elif int(self.state.lastAction) == 4 and energy_action == 0: # reward = reward_died / 4 # If out of the map, then the DQN agent should be punished by a larger negative reward. #if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP or self.state.status == State.STATUS_ELIMINATED_INVALID_ACTION: # reward = reward_died / max_reward #elif self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY or self.state.status == State.STATUS_STOP_EMPTY_GOLD \ # or self.state.status == State.STATUS_STOP_END_STEP: if self.state.status != State.STATUS_PLAYING: if self.state.score == 0: reward = reward_died / max_reward # -1 if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP or self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY: reward = reward_died / max_reward # -1 # print ("reward",reward) #return reward / max_reward / self.state.mapInfo.maxStep # 100 steps return reward, num_of_wrong_relax, num_of_wrong_mining def check_terminate(self): # Checking the status of the game # it indicates the game ends or is playing return self.state.status != State.STATUS_PLAYING
class MinerEnv: def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() # define action space self.INPUTNUM = 198 # The number of input values for the DQN model self.ACTIONNUM = 6 # The number of actions output from the DQN model # define state space self.gameState = None self.reward = 0 self.terminate = False self.score_pre = self.state.score # Storing the last score for designing the reward function self.energy_pre = self.state.energy # Storing the last energy for designing the reward function self.viewer = None self.steps_beyond_done = None def start(self): # connect to server self.socket.connect() def end(self): # disconnect server self.socket.close() def send_map_info(self, request): # tell server which map to run self.socket.send(request) def reset(self): # start new game # Choosing a map in the list # mapID = np.random.randint(1, 6) # Choosing a map ID from 5 maps in Maps folder randomly mapID = 1 posID_x = np.random.randint(MAP_MAX_X) # Choosing a initial position of the DQN agent on # X-axes randomly posID_y = np.random.randint(MAP_MAX_Y) # Choosing a initial position of the DQN agent on Y-axes randomly # Creating a request for initializing a map, initial position, the initial energy, and the maximum number of steps of the DQN agent request = ("map" + str(mapID) + "," + str(posID_x) + "," + str(posID_y) + ",50,100") # Send the request to the game environment (GAME_SOCKET_DUMMY.py) self.send_map_info(request) # Initialize the game environment try: message = self.socket.receive() #receive game info from server self.state.init_state(message) #init state except Exception as e: import traceback traceback.print_exc() self.gameState = self.get_state() # Get the state after resetting. # This function (get_state()) is an example of creating a state for the DQN model self.reward = 0 # The amount of rewards for the entire episode self.terminate = False # The variable indicates that the episode ends self.steps_beyond_done = None return self.gameState def step(self, action): # step process self.socket.send(str(action)) # send action to server try: message = self.socket.receive() # receive new state from server self.state.update_state(message) # update to local state except Exception as e: import traceback traceback.print_exc() self.gameState = self.get_state() self.reward = self.get_reward() done = self.check_terminate() return self.gameState, self.reward, done, {} # Functions are customized by client def get_state(self): # Building the map view = np.zeros([self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], dtype=int) for i in range(self.state.mapInfo.max_x + 1): for j in range(self.state.mapInfo.max_y + 1): if self.state.mapInfo.get_obstacle(i, j) == TreeID: # Tree view[i, j] = -20 if self.state.mapInfo.get_obstacle(i, j) == TrapID: # Trap view[i, j] = -10 if self.state.mapInfo.get_obstacle(i, j) == SwampID: # Swamp view[i, j] = self.state.mapInfo.get_obstacle_value(i, j) if self.state.mapInfo.gold_amount(i, j) > 0: view[i, j] = self.state.mapInfo.gold_amount(i, j) # print(view) DQNState = view.flatten().tolist() #Flattening the map matrix to a vector # Add position and energy of agent to the DQNState DQNState.append(self.state.x) DQNState.append(self.state.y) DQNState.append(self.state.energy) me = {"playerId": 1, "energy": self.state.energy, "posx": self.state.x, "posy": self.state.y, "lastAction": self.state.lastAction, "score": self.state.score, "status": self.state.status} #Add position of bots for player in self.state.players: if player["playerId"] != self.state.id: DQNState.append(player["posx"]) DQNState.append(player["posy"]) #Convert the DQNState from list to array for training DQNState = np.array(DQNState) return DQNState def get_reward(self): # Calculate reward reward = 0 score_action = self.state.score - self.score_pre energy_consume = self.energy_pre - self.state.energy self.score_pre = self.state.score self.energy_pre = self.state.energy reward = score_action - 0.2 * energy_consume # if score_action > 0: # #If the DQN agent crafts golds, then it should obtain a positive reward (equal score_action) # reward += score_action # # #If the DQN agent crashs into obstacels (Tree, Trap, Swamp), then it should be punished by a negative reward # if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TreeID: # Tree # reward -= TreeID # if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TrapID: # Trap # reward -= TrapID # if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == SwampID: # Swamp # reward -= SwampID # If out of the map, then the DQN agent should be punished by a larger nagative reward. if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP: reward += -10 # Run out of energy, then the DQN agent should be punished by a larger nagative reward. if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY: reward += -10 # print ("reward",reward) return reward def check_terminate(self): # Checking the status of the game # it indicates the game ends or is playing return self.state.status != State.STATUS_PLAYING def updateObservation(self): return def render(self, mode='human', close=False): return def close(self): """Override in your subclass to perform any necessary cleanup. Environments will automatically close() themselves when garbage collected or when the program exits. """ raise NotImplementedError() def seed(self, seed=None): """Sets the seed for this env's random number generator(s). # Returns Returns the list of seeds used in this env's random number generators """ raise NotImplementedError() def configure(self, *args, **kwargs): """Provides runtime configuration to the environment. This configuration should consist of data that tells your environment how to run (such as an address of a remote server, or path to your ImageNet data). It should not affect the semantics of the environment. """ raise NotImplementedError()
class Bot_newTD3: ACTION_GO_LEFT = 0 ACTION_GO_RIGHT = 1 ACTION_GO_UP = 2 ACTION_GO_DOWN = 3 ACTION_FREE = 4 ACTION_CRAFT = 5 def __init__(self, id): self.state = State() self.info = PlayerInfo(id) self.limit = 2 state_dim = (2 * self.limit + 1)**2 + 3 + 3 action_dim = 6 max_action = 1.0 # load model kwargs = { "state_dim": state_dim, "action_dim": action_dim, "max_action": max_action, } policy_file = "newTD3_Miner_0_2_get_state3" self.TreeID = 1 self.TrapID = 2 self.SwampID = 3 self.policy = newTD3_bot.TD3(**kwargs) self.policy.load(f"./models_newTD3_2/{policy_file}") def next_action(self): s = self.get_state2(self.limit) action, _ = self.policy.predict_action(s) return int(action) def get_state2(self, limit): # Building the map view = np.zeros([limit * 2 + 1, limit * 2 + 1], dtype=int) max_x, max_y = self.state.mapInfo.max_x, self.state.mapInfo.max_y xlimit_below = np.clip(self.info.posx - limit, 0, max_x) - np.clip( self.info.posx + limit - max_x, 0, limit) xlimit_up = np.clip(self.info.posx + limit, 0, max_x) + np.clip( 0 - self.info.posx + limit, 0, limit) ylimit_below = np.clip(self.info.posy - limit, 0, max_y) - np.clip( self.info.posy + limit - max_y, 0, limit) ylimit_up = np.clip(self.info.posy + limit, 0, max_y) + np.clip( 0 - self.info.posy + limit, 0, limit) #print(xlimit_below, xlimit_up, ylimit_below, ylimit_up, self.info.posx, self.info.posy) dmax, m, n, exist_gold = -1000, -5, 0.1, False x_maxgold, y_maxgold = self.state.x, self.state.y for i in range(max_x + 1): for j in range(max_y + 1): if self.state.mapInfo.gold_amount(i, j) >= 50: exist_gold = True d = m * ((self.info.posx - i)**2 + (self.info.posy - j)** 2) + n * self.state.mapInfo.gold_amount(i, j) if d > dmax: dmax = d x_maxgold, y_maxgold = i, j # position of cell is nearest and much gold if i in range(xlimit_below, xlimit_up + 1) and j in range( ylimit_below, ylimit_up + 1): if self.state.mapInfo.get_obstacle( i, j) == self.TreeID: # Tree view[i - xlimit_below, j - ylimit_below] = -self.TreeID if self.state.mapInfo.get_obstacle( i, j) == self.TrapID: # Trap view[i - xlimit_below, j - ylimit_below] = -self.TrapID if self.state.mapInfo.get_obstacle( i, j) == self.SwampID: # Swamp view[i - xlimit_below, j - ylimit_below] = -self.SwampID if self.state.mapInfo.gold_amount(i, j) > 0: view[i - xlimit_below, j - ylimit_below] = self.state.mapInfo.gold_amount( i, j) / 10 DQNState = view.flatten().tolist( ) #Flattening the map matrix to a vector # Add position and energy of agent to the DQNState DQNState.append(self.info.posx - xlimit_below) DQNState.append(self.info.posy - ylimit_below) DQNState.append(self.info.energy) #Add position of bots # for player in self.state.players: # if player["playerId"] != self.state.id: # DQNState.append(player["posx"]) # DQNState.append(player["posy"]) DQNState.append(self.info.posx - x_maxgold) DQNState.append(self.info.posy - y_maxgold) if exist_gold == False: DQNState.append(0) else: DQNState.append( self.state.mapInfo.gold_amount(x_maxgold, y_maxgold) / 10) #Convert the DQNState from list to array for training DQNState = np.array(DQNState) return DQNState def new_game(self, data): try: self.state.init_state(data) except Exception as e: import traceback traceback.print_exc() def new_state(self, data): # action = self.next_action(); # self.socket.send(action) try: self.state.update_state(data) except Exception as e: import traceback traceback.print_exc()
class MinerEnv: def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() self.score_pre = self.state.score # Storing the last score for designing the reward function def start(self): # connect to server self.socket.connect() def end(self): # disconnect server self.socket.close() def send_map_info(self, request): # tell server which map to run self.socket.send(request) def reset(self): # start new game try: message = self.socket.receive() # receive game info from server print(message) self.state.init_state(message) # init state except Exception as e: import traceback traceback.print_exc() def step(self, action): # step process self.socket.send(action) # send action to server try: message = self.socket.receive() # receive new state from server # print("New state: ", message) self.state.update_state(message) # update to local state except Exception as e: import traceback traceback.print_exc() # Functions are customized by client # def get_state(self): # obs = self.state # # player_channel = np.zeros((4, obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1), dtype=float) # obstacle_1 = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) # obstacle_random = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) # obstacle_5 = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) # obstacle_10 = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) # obstacle_40 = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) # obstacle_100 = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) # obstacle_value_min = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) # obstacle_value_max = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) # # gold = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) # gold_amount = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) # # for i in range(obs.mapInfo.max_y + 1): # for j in range(obs.mapInfo.max_x + 1): # type, value = None, None # for cell in obs.mapInfo.obstacles: # if j == cell["posx"] and i == cell["posy"]: # type, value = cell["type"], cell["value"] # # if value == 0: # obstacle_random[i, j] = 1 # if value == -1: # obstacle_1[i, j] = 1 # if value == -5: # obstacle_5[i, j] = 1 # if value == -10: # obstacle_10[i, j] = 1 # if value == -40: # obstacle_40[i, j] = 1 # if value == -100: # obstacle_100[i, j] = 1 # if value is None: # gold[i, j] = 1 # value = -4 # # obstacle_value_min[i, j] = (-value if value != 0 else 5) / constants.MAX_ENERGY # obstacle_value_max[i, j] = (-value if value != 0 else 20) / constants.MAX_ENERGY # # gold_amount[i, j] = obs.mapInfo.gold_amount(j, i) / constants.MAX_EXTRACTABLE_GOLD # # player_channel[0][obs.y, obs.x] = 1 # # id = 1 # for player in obs.players: # if "status" in player and player["status"] == constants.Status.STATUS_PLAYING.value: # if player["playerId"] == obs.id: # continue # # player_channel[id][player["posy"], player["posx"]] = 1 # id += 1 # # board = np.stack( # [obstacle_random, obstacle_1, obstacle_5, obstacle_10, obstacle_40, obstacle_100, obstacle_value_min, # obstacle_value_max, gold, gold_amount]) # board = np.concatenate([player_channel, board]) # # energy = torch.tensor([max(0, obs.energy) / constants.MAX_ENERGY], dtype=torch.float) # position = torch.clamp(torch.tensor([obs.y / 8 * 2 - 1, # obs.x / 20 * 2 - 1], dtype=torch.float), -1, 1) # # featurized_obs = { # "obs": { # "conv_features": torch.unsqueeze(torch.tensor(board, dtype=torch.float), 0), # "fc_features": torch.unsqueeze(torch.cat([energy, position]), 0) # } # } # # return featurized_obs def get_state(self, last_3_actions): obs = self.state player_channel = np.zeros( (4, obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1), dtype=float) obstacle_1 = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) obstacle_random = np.zeros( [obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) obstacle_5 = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) obstacle_10 = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) obstacle_20 = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) obstacle_40 = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) obstacle_100 = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) obstacle_value_min = np.zeros( [obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) obstacle_value_max = np.zeros( [obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) gold = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) gold_amount = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) for i in range(obs.mapInfo.max_y + 1): for j in range(obs.mapInfo.max_x + 1): type, value = None, None for cell in obs.mapInfo.obstacles: if j == cell["posx"] and i == cell["posy"]: type, value = cell["type"], cell["value"] if type is None and value is None: has_gold = False for cell in obs.mapInfo.golds: if j == cell["posx"] and i == cell["posy"]: has_gold = True if not has_gold: value = -1 if value == 0: # Forest obstacle_random[i, j] = 1 if value == -1: # Land obstacle_1[i, j] = 1 if value == -5: # Swamp 1 obstacle_5[i, j] = 1 if value == -10: # Trap obstacle_10[i, j] = 1 if value == -20: # Swamp 2 obstacle_20[i, j] = 1 if value == -40: # Swamp 3 obstacle_40[i, j] = 1 if value == -100: # Swamp 4 obstacle_100[i, j] = 1 if value is None: # Gold spot gold[i, j] = 1 value = -4 obstacle_value_min[i, j] = (-value if value != 0 else 5) / constants.MAX_ENERGY obstacle_value_max[i, j] = (-value if value != 0 else 20) / constants.MAX_ENERGY gold_amount[i, j] = obs.mapInfo.gold_amount(j, i) / 3000 player_channel[0][obs.y, obs.x] = 1 id = 1 for player in obs.players: if player["playerId"] == obs.id: continue if "status" in player and player[ "status"] == constants.Status.STATUS_PLAYING.value: player_channel[id][player["posy"], player["posx"]] = 1 id += 1 board = np.stack([ obstacle_random, obstacle_1, obstacle_5, obstacle_10, obstacle_20, obstacle_40, obstacle_100, obstacle_value_min, obstacle_value_max, gold, gold_amount ]) position = np.clip(np.array([obs.y / 8 * 2 - 1, obs.x / 20 * 2 - 1]), -1, 1) one_hot_last_3_actions = np.zeros((3, 6), dtype=np.float32) one_hot_last_3_actions[np.arange(3), last_3_actions] = 1 one_hot_last_3_actions = one_hot_last_3_actions.reshape(-1) featurized_obs = { "obs": { "conv_features": torch.unsqueeze( torch.tensor(np.concatenate([ player_channel, board, np.full( (1, obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1), fill_value=max( 0, obs.energy / (constants.MAX_ENERGY / 2))) ]), dtype=torch.float), 0), "fc_features": torch.unsqueeze( torch.tensor(np.concatenate( [position, one_hot_last_3_actions]), dtype=torch.float), 0) } } return featurized_obs, self.state def get_state_v2(self, last_3_actions): obs = self.state player_channel = np.zeros( (4, obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1), dtype=float) obstacle_1 = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) obstacle_random = np.zeros( [obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) obstacle_5 = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) obstacle_10 = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) obstacle_20 = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) obstacle_40 = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) obstacle_100 = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) obstacle_value_min = np.zeros( [obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) obstacle_value_max = np.zeros( [obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) gold = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) gold_amount = np.zeros([obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1], dtype=float) for i in range(obs.mapInfo.max_y + 1): for j in range(obs.mapInfo.max_x + 1): type, value = None, None for cell in obs.mapInfo.obstacles: if j == cell["posx"] and i == cell["posy"]: type, value = cell["type"], cell["value"] if type is None and value is None: has_gold = False for cell in obs.mapInfo.golds: if j == cell["posx"] and i == cell["posy"]: has_gold = True if not has_gold: value = -1 if value == 0: # Forest obstacle_random[i, j] = 1 if value == -1: # Land obstacle_1[i, j] = 1 if value == -5: # Swamp 1 obstacle_5[i, j] = 1 if value == -10: # Trap obstacle_10[i, j] = 1 if value == -20: # Swamp 2 obstacle_20[i, j] = 1 if value == -40: # Swamp 3 obstacle_40[i, j] = 1 if value == -100: # Swamp 4 obstacle_100[i, j] = 1 if value is None: # Gold spot gold[i, j] = 1 value = -4 obstacle_value_min[i, j] = (-value if value != 0 else 5) / constants.MAX_ENERGY obstacle_value_max[i, j] = (-value if value != 0 else 20) / constants.MAX_ENERGY gold_amount[i, j] = obs.mapInfo.gold_amount(j, i) / 1250 scores = [obs.score, 0, 0, 0] energies = [obs.energy, 0, 0, 0] player_channel[0][obs.y, obs.x] = 1 id = 1 for player in obs.players: if player["playerId"] == obs.id: continue if "status" in player and player[ "status"] == constants.Status.STATUS_PLAYING.value: player_channel[id][player["posy"], player["posx"]] = 1 scores[id] = player["scores"] energies[id] = player["energy"] id += 1 board = np.stack([ obstacle_random, obstacle_1, obstacle_5, obstacle_10, obstacle_20, obstacle_40, obstacle_100, obstacle_value_min, obstacle_value_max, gold, gold_amount ]) # board = np.concatenate([players, board]) position = np.clip(np.array([obs.y / 8 * 2 - 1, obs.x / 20 * 2 - 1]), -1, 1) one_hot_last_3_actions = np.zeros((3, 6), dtype=np.float32) one_hot_last_3_actions[np.arange(3), last_3_actions] = 1 one_hot_last_3_actions = one_hot_last_3_actions.reshape(-1) featurized_obs = { "obs": { "conv_features": torch.unsqueeze( torch.tensor(np.concatenate([ player_channel, np.copy(board), np.full( (1, obs.mapInfo.max_y + 1, obs.mapInfo.max_x + 1), fill_value=max( 0, obs.energy / (constants.MAX_ENERGY / 2))) ]), dtype=torch.float), 0), "fc_features": torch.unsqueeze( torch.tensor(np.concatenate( [position, one_hot_last_3_actions, scores, energies]), dtype=torch.float), 0) } } return featurized_obs, self.state def check_terminate(self): return self.state.status != State.STATUS_PLAYING
class MinerEnv: def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() self.score_pre = ( self.state.score ) # Storing the last score for designing the reward function def start(self): # connect to server self.socket.connect() def end(self): # disconnect server self.socket.close() def send_map_info(self, request): # tell server which map to run self.socket.send(request) def reset(self): # start new game try: message = self.socket.receive() # receive game info from server self.state.init_state(message) # init state except Exception as e: import traceback traceback.print_exc() def step(self, action): # step process self.socket.send(action) # send action to server try: message = self.socket.receive() # receive new state from server self.state.update_state(message) # update to local state except Exception as e: import traceback traceback.print_exc() # Functions are customized by client def get_state(self): # Building the map view = np.zeros( [self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], dtype=int) for i in range(self.state.mapInfo.max_x + 1): for j in range(self.state.mapInfo.max_y + 1): if self.state.mapInfo.get_obstacle(i, j) == TreeID: # Tree view[i, j] = -TreeID if self.state.mapInfo.get_obstacle(i, j) == TrapID: # Trap view[i, j] = -TrapID if self.state.mapInfo.get_obstacle(i, j) == SwampID: # Swamp view[i, j] = -SwampID if self.state.mapInfo.gold_amount(i, j) > 0: view[i, j] = self.state.mapInfo.gold_amount(i, j) DQNState = view.flatten().tolist( ) # Flattening the map matrix to a vector # Add position and energy of agent to the DQNState DQNState.append(self.state.x) DQNState.append(self.state.y) DQNState.append(self.state.energy) # Add position of bots for player in self.state.players: if player["playerId"] != self.state.id: DQNState.append(player["posx"]) DQNState.append(player["posy"]) # Convert the DQNState from list to array for training DQNState = np.array(DQNState) return DQNState def get_reward(self): # Calculate reward reward = 0 score_action = self.state.score - self.score_pre self.score_pre = self.state.score if score_action > 0: # If the DQN agent crafts golds, then it should obtain a positive reward (equal score_action) reward += score_action # If the DQN agent crashs into obstacels (Tree, Trap, Swamp), then it should be punished by a negative reward if (self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TreeID): # Tree reward -= TreeID if (self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TrapID): # Trap reward -= TrapID if (self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == SwampID): # Swamp reward -= SwampID # If out of the map, then the DQN agent should be punished by a larger nagative reward. if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP: reward += -10 # Run out of energy, then the DQN agent should be punished by a larger nagative reward. if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY: reward += -10 # print ("reward",reward) return reward def check_terminate(self): # Checking the status of the game # it indicates the game ends or is playing return self.state.status != State.STATUS_PLAYING
class MinerEnv: def __init__(self, host, port): self.socket = GameSocket(host, port) self.state = State() self.pre_x = 0 self.pre_y = 0 self.pre_energy = 0 #self.pre_action = '' self.score_pre = self.state.score#Storing the last score for designing the reward function def start(self): #connect to server self.socket.connect() def end(self): #disconnect server self.socket.close() def send_map_info(self, request):#tell server which map to run self.socket.send(request) def reset(self): #start new game try: message = self.socket.receive() #receive game info from server self.state.init_state(message) #init state except: import traceback traceback.print_exc() def step(self, action): #step process #self.pre_action = action self.pre_energy = self.state.energy self.pre_x, self.pre_y = self.state.x,self.state.y # store the last coordinate self.socket.send(action) #send action to server try: message = self.socket.receive() #receive new state from server self.state.update_state(message) #update to local state # new_state = str_2_json(message) # players = new_state["players"] # print('length of players in step', len(players)) except: import traceback traceback.print_exc() # print(self.state.players) # Functions are customized by client def get_state(self): # Building the map #print(self.state.x,self.state.y) view = np.zeros((5*(self.state.mapInfo.max_x + 1), 5*(self.state.mapInfo.max_y + 1), 6), dtype=int) #view[0:3, :] = -10 #view[-3:, :] = -10 #view[:, 0:3] = -10 #view[:, -3:] = -10 for i in range(self.state.mapInfo.max_x + 1): for j in range(self.state.mapInfo.max_y + 1): if self.state.mapInfo.get_obstacle(i, j) == TreeID: # Tree # trap map view[5*i:5*i+5, 5*j:5*j+5,0] = -TreeID if self.state.mapInfo.get_obstacle(i, j) == TrapID: # Trap # trap map view[5*i:5*i+5, 5*j:5*j+5,0] = -TrapID if self.state.mapInfo.get_obstacle(i, j) == SwampID: # Swamp # trap map view[5*i:5*i+5, 5*j:5*j+5,0] = -SwampID if self.state.mapInfo.gold_amount(i, j) > 0: view[5*i:5*i+5, 5*j:5*j+5,0] = self.state.mapInfo.gold_amount(i, j)/1000 ## gold map for stt,player in enumerate(self.state.players): if player["playerId"] != self.state.id: try: if player["status"] not in [1,2,3]: try: view[5*player["posx"]:5*player["posx"]+5,5*player["posy"]:5*player["posy"]+5,stt + 1] = player["energy"]/50 except: view[5*player["posx"]:5*player["posx"]+5,5*player["posy"]:5*player["posy"]+5,stt + 1] = 1 except: view[5*player["posx"]: 5*player["posx"]+5,5*player["posy"]:5*player["posy"]+5,stt]= 1 # print(self.state.players) else: try: view[5*self.state.x:5*self.state.x+5,5*self.state.y:5*self.state.y+5,2]= self.state.energy except: print('out of map') DQNState = np.array(view) return DQNState def get_reward(self,action): # Calculate reward reward = 0 score_action = self.state.score - self.score_pre self.score_pre = self.state.score pre_x, pre_y =self.pre_x,self.pre_y if self.state.energy >=45 and self.state.lastAction == 4: reward += -0.2 #plus a small bonus if the agent go to a coordinate that has golds if self.state.mapInfo.gold_amount(self.state.x,self.state.y) >= 50: reward += 0.2 #If the DQN agent crafts golds, then it should obtain a positive reward (equal score_action) if score_action > 0: reward += score_action/50 # if still in the map, plus a small bonus if self.state.status == State.STATUS_PLAYING: reward += 0.1 # if there is no gold, but the agent still crafts golds, it will be punished if self.state.mapInfo.get_obstacle(pre_x,pre_y)<4 and int(self.state.lastAction)==5: reward+=-0.2 if (self.state.mapInfo.gold_amount(pre_x,pre_y) >= 50 and self.pre_energy >15) and (int(self.state.lastAction)!=5): reward+=-0.2 # If out of the map, then the DQN agent should be punished by a larger nagative reward. #if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP: # reward = -1 #Run out of energy, then the DQN agent should be punished by a larger nagative reward. #if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY: # reward = -1 # print ("reward",reward) #if self.state.status == State.STATUS_STOP_END_STEP: # reward = +2 return reward def check_terminate(self): #Checking the status of the game #it indicates the game ends or is playing return self.state.status != State.STATUS_PLAYING