def init_bots(self): self.bots = [Bot1(2), Bot2(3), Bot4(4)] # use bot1(id=2), bot2(id=3), bot3(id=4) for (bot) in self.bots: # at the beginning, all bots will have same position, energy as player bot.info.posx = self.user.posx bot.info.posy = self.user.posy bot.info.energy = self.user.energy bot.info.lastAction = -1 bot.info.status = PlayerInfo.STATUS_PLAYING bot.info.score = 0 self.stepState.players.append(bot.info) self.userMatch.gameinfo.numberOfPlayers = len(self.stepState.players) print("numberOfPlayers: ", self.userMatch.gameinfo.numberOfPlayers)
def __init__(self): pygame.init() self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT)) pygame.display.set_caption("Future") self.clock = pygame.time.Clock() self.a_map = World_map() self.bender = Robot(self) self.mouse_pos_x, self.mouse_pos_y = 0, 0 self.bullets = pygame.sprite.Group() self.bots1 = pygame.sprite.Group() bot1 = Bot1(self) bot1.rect.x = 904 bot1.rect.y = 282 bot1.real_x = 904 bot1.real_y = 282 bot1.x_on_a_map = 904 bot1.y_on_a_map = 282 self.bots1.add(bot1) bot1 = Bot1(self) bot1.rect.x = 104 bot1.rect.y = 292 bot1.real_x = 104 bot1.real_y = 292 bot1.x_on_a_map = 104 bot1.y_on_a_map = 292 self.bots1.add(bot1) self.bots2 = pygame.sprite.Group() self.bot2 = Bot2(self) self.bot2.rect.x = 600 self.bot2.rect.y = 92 self.bots2.add(self.bot2) self.gun1 = Guns(self) self.fire = False self.gun1_status = False self.move_left = False self.move_right = False self.camera = Camera(self.camera_configure, self)
def init_bots(self): #self.bots = [Bot1(2), Bot2(3), Bot3(4)] # use bot1(id=2), bot2(id=3), bot3(id=4) tmp_random = randrange(20) if tmp_random == 0 or tmp_random > 15: self.bots = [Bot5(2), Bot6(3), Bot2(4)] # use bot1(id=2), bot2(id=3), bot3(id=4) elif tmp_random > 9: self.bots = [Bot2(2), Bot5(3), Bot6(4)] # use bot1(id=2), bot2(id=3), bot3(id=4) elif tmp_random == 1: self.bots = [Bot1(2), Bot2(3), Bot6(4)] # use bot1(id=2), bot2(id=3), bot3(id=4) elif tmp_random == 2: self.bots = [Bot5(2), Bot1(3), Bot2(4)] # use bot1(id=2), bot2(id=3), bot3(id=4) elif tmp_random == 3: self.bots = [Bot6(2), Bot5(3), Bot1(4)] # use bot1(id=2), bot2(id=3), bot3(id=4) elif tmp_random == 4: self.bots = [Bot3(2), Bot6(3), Bot2(4)] # use bot1(id=2), bot2(id=3), bot3(id=4) elif tmp_random == 5: self.bots = [Bot2(2), Bot3(3), Bot5(4)] # use bot1(id=2), bot2(id=3), bot3(id=4) elif tmp_random == 6: self.bots = [Bot5(2), Bot6(3), Bot3(4)] # use bot1(id=2), bot2(id=3), bot3(id=4) elif tmp_random == 7: self.bots = [Bot6(2), Bot2(3), Bot4(4)] # use bot1(id=2), bot2(id=3), bot3(id=4) elif tmp_random == 8: self.bots = [Bot4(2), Bot2(3), Bot5(4)] # use bot1(id=2), bot2(id=3), bot3(id=4) else: self.bots = [Bot6(2), Bot5(3), Bot4(4)] # use bot1(id=2), bot2(id=3), bot3(id=4) #self.bots = [Bot5(2), Bot6(3), Bot2(4)] # use bot1(id=2), bot2(id=3), bot3(id=4) for (bot) in self.bots: # at the beginning, all bots will have same position, energy as player bot.info.posx = self.user.posx bot.info.posy = self.user.posy bot.info.energy = self.user.energy bot.info.lastAction = -1 bot.info.status = PlayerInfo.STATUS_PLAYING bot.info.score = 0 self.stepState.players.append(bot.info) self.userMatch.gameinfo.numberOfPlayers = len(self.stepState.players) print("numberOfPlayers: ", self.userMatch.gameinfo.numberOfPlayers)
# Parameters for training a DQN model N_EPISODE = 100000 #The number of episodes for training MAX_STEP = 1000 #The number of steps for each episode BATCH_SIZE = 32 #The number of experiences for each replay MEMORY_SIZE = 100000 #The size of the batch for storing experiences SAVE_NETWORK = 1000 # After this number of episodes, the DQN model is saved for testing later. INITIAL_REPLAY_SIZE = 10000 #The number of experiences are stored in the memory batch before starting replaying INPUTNUM = (2*limit+1)**2 + 3#198 #The number of input values for the DQN model ACTIONNUM = 6 #The number of actions output from the DQN model MAP_MAX_X = 21 #Width of the Map MAP_MAX_Y = 9 #Height of the Map # Initialize a DQN model and a memory batch for storing experiences DQNAgent = DQN(INPUTNUM, ACTIONNUM) memory = Memory(MEMORY_SIZE) bots = [Bot1(2), Bot2(3), Bot3(4)] #load model to continue training if args.load_model !="": file_name = "TrainedModels/DQNmodel_20200730-1832_ep1000out-30.json" json_file = file_name if args.load_model == "default" else args.load_model DQNAgent.load_model(json_file) # Initialize environment minerEnv = MinerEnv(HOST, PORT) #Creating a communication environment between the DQN model and the game environment (GAME_SOCKET_DUMMY.py) minerEnv.start() # Connect to the game train = False #The variable is used to indicate that the replay starts, and the epsilon starts decrease. #Training Process #the main part of the deep-q learning agorithm for episode_i in range(0, N_EPISODE): print(np.unique(DQNAgent.model.get_weights()[0]))