Пример #1
0
def main(_config):
    env = gym.make(_config.ENV_NAME)

    agent = DQN(env, _config)

    print("[*] --- Begin Emulator Training ---")

    for episode in range(_config.EPISODE):

        obs = env.reset()

        # === Emulator ===
        for i in range(_config.STEP):
            action = agent.pick_action(obs)
            obs_next, reward, done, _ = env.step(action)

            # agent will store the newest experience into replay buffer, and training with mini-batch and off-policy
            agent.perceive(obs, action, reward, done)

            if done:
                break

            obs = obs_next

        # == train ==
        agent.train(episode)

        if (episode + 1) % agent.save_every == 0:
            agent.save(step=episode)

        # == test ==
        print("\n[*] === Enter TEST module ===")
        test(env, _config.STEP, agent)

    agent.record()
Пример #2
0
class Agent:
	"""
	The intelligent agent of the simulation. Set the model of the neural network used and general parameters.
	It is responsible to select the actions, optimize the neural network and manage the models.
	"""

	def __init__(self, action_set, train=True, load_path=None):
		#1. Initialize agent params
		self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
		self.action_set = action_set
		self.action_number = len(action_set)
		self.steps_done = 0
		self.epsilon = Config.EPS_START
		self.episode_durations = []

		#2. Build networks
		self.policy_net = DQN().to(self.device)
		self.target_net = DQN().to(self.device)
		
		self.optimizer = optim.RMSprop(self.policy_net.parameters(), lr=Config.LEARNING_RATE)

		if not train:		
			self.optimizer = optim.RMSprop(self.policy_net.parameters(), lr=0)	
			self.policy_net.load(load_path, optimizer=self.optimizer)
			self.policy_net.eval()

		self.target_net.load_state_dict(self.policy_net.state_dict())
		self.target_net.eval()

		#3. Create Prioritized Experience Replay Memory
		self.memory = Memory(Config.MEMORY_SIZE)


	 
	def append_sample(self, state, action, next_state, reward):
		"""
		save sample (error,<s,a,s',r>) to the replay memory
		"""

		# Define if is the end of the simulation
		done = True if next_state is None else False

		# Compute Q(s_t, a) - the model computes Q(s_t), then we select the columns of actions taken
		state_action_values = self.policy_net(state)
		state_action_values = state_action_values.gather(1, action.view(-1,1))

		
		if not done:
			# Compute argmax Q(s', a; θ)		
			next_state_actions = self.policy_net(next_state).max(1)[1].detach().unsqueeze(1)

			# Compute Q(s', argmax Q(s', a; θ), θ-)
			next_state_values = self.target_net(next_state).gather(1, next_state_actions).squeeze(1).detach()

			# Compute the expected Q values
			expected_state_action_values = (next_state_values * Config.GAMMA) + reward
		else:
			expected_state_action_values = reward


		error = abs(state_action_values - expected_state_action_values).data.cpu().numpy()


		self.memory.add(error, state, action, next_state, reward)

	def select_action(self, state, train=True):
		"""
		Selet the best action according to the Q-values outputed from the neural network

		Parameters
		----------
			state: float ndarray
				The current state on the simulation
			train: bool
				Define if we are evaluating or trainning the model
		Returns
		-------
			a.max(1)[1]: int
				The action with the highest Q-value
			a.max(0): float
				The Q-value of the action taken
		"""
		global steps_done
		sample = random.random()
		#1. Perform a epsilon-greedy algorithm
		#a. set the value for epsilon
		self.epsilon = Config.EPS_END + (Config.EPS_START - Config.EPS_END) * \
			math.exp(-1. * self.steps_done / Config.EPS_DECAY)
			
		self.steps_done += 1

		#b. make the decision for selecting a random action or selecting an action from the neural network
		if sample > self.epsilon or (not train):
			# select an action from the neural network
			with torch.no_grad():
				# a <- argmax Q(s, theta)
				a = self.policy_net(state)
				return a.max(1)[1].view(1, 1), a.max(0)
		else:
			# select a random action
			print('random action')
			return torch.tensor([[random.randrange(2)]], device=self.device, dtype=torch.long), None

	"""
	def select_action(self, state, train=True):
		
		Selet the best action according to the Q-values outputed from the neural network

		Parameters
		----------
			state: float ndarray
				The current state on the simulation
			train: bool
				Define if we are evaluating or trainning the model
		Returns
		-------
			a.max(1)[1]: int
				The action with the highest Q-value
			a.max(0): float
				The Q-value of the action taken
		
		global steps_done
		sample = random.random()
		#1. Perform a epsilon-greedy algorithm
		#a. set the value for epsilon
		self.epsilon = Config.EPS_END + (Config.EPS_START - Config.EPS_END) * \
			math.exp(-1. * self.steps_done / Config.EPS_DECAY)
			
		self.steps_done += 1

		#b. make the decision for selecting a random action or selecting an action from the neural network
		if sample > self.epsilon or (not train):
			# select an action from the neural network
			with torch.no_grad():
				# a <- argmax Q(s, theta)
				#set the network to train mode is important to enable dropout
				self.policy_net.train()
				output_list = []
				# Retrieve the outputs from neural network feedfoward n times to build a statistic model
				for i in range(Config.STOCHASTIC_PASSES):
					#print(agent.policy_net(data))
					output_list.append(torch.unsqueeze(F.softmax(self.policy_net(state)), 0))
					#print(output_list[i])

				self.policy_net.eval()
				# The result of the network is the mean of n passes
				output_mean = torch.cat(output_list, 0).mean(0)
				q_value = output_mean.data.cpu().numpy().max()
				action = output_mean.max(1)[1].view(1, 1)

				uncertainty = torch.cat(output_list, 0).var(0).mean().item()
				
				return action, q_value, uncertainty
				
		else:
			# select a random action
			print('random action')
			return torch.tensor([[random.randrange(2)]], device=self.device, dtype=torch.long), None, None

	"""
	def optimize_model(self):
		"""
		Perform one step of optimization on the neural network
		"""

		if self.memory.tree.n_entries < Config.BATCH_SIZE:
			return
		transitions, idxs, is_weights = self.memory.sample(Config.BATCH_SIZE)

		# Transpose the batch (see http://stackoverflow.com/a/19343/3343043 for detailed explanation).
		batch = Transition(*zip(*transitions))

		# Compute a mask of non-final states and concatenate the batch elements
		non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
											  batch.next_state)), device=self.device, dtype=torch.uint8)
		non_final_next_states = torch.cat([s for s in batch.next_state
													if s is not None])
		
		state_batch = torch.cat(batch.state)
		action_batch = torch.cat(batch.action)
		reward_batch = torch.cat(batch.reward)
		
		# Compute Q(s_t, a) - the model computes Q(s_t), then we select the columns of actions taken
		state_action_values = self.policy_net(state_batch).gather(1, action_batch)
		
	
		# Compute argmax Q(s', a; θ)		
		next_state_actions = self.policy_net(non_final_next_states).max(1)[1].detach().unsqueeze(1)

		# Compute Q(s', argmax Q(s', a; θ), θ-)
		next_state_values = torch.zeros(Config.BATCH_SIZE, device=self.device)
		next_state_values[non_final_mask] = self.target_net(non_final_next_states).gather(1, next_state_actions).squeeze(1).detach()

		# Compute the expected Q values
		expected_state_action_values = (next_state_values * Config.GAMMA) + reward_batch

		# Update priorities
		errors = torch.abs(state_action_values.squeeze() - expected_state_action_values).data.cpu().numpy()
		
		# update priority
		for i in range(Config.BATCH_SIZE):
			idx = idxs[i]
			self.memory.update(idx, errors[i])


		# Compute Huber loss
		loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
		loss_return = loss.item()

		# Optimize the model
		self.optimizer.zero_grad()
		loss.backward()
		for param in self.policy_net.parameters():
			param.grad.data.clamp_(-1, 1)
		self.optimizer.step()

		return loss_return

	def save(self, step, logs_path, label):
		"""
		Save the model on hard disc

		Parameters
		----------
			step: int
				current step on the simulation
			logs_path: string
				path to where we will store the model
			label: string
				label that will be used to store the model
		"""

		os.makedirs(logs_path + label, exist_ok=True)

		full_label = label + str(step) + '.pth'
		logs_path = os.path.join(logs_path, label, full_label)

		self.policy_net.save(logs_path, step=step, optimizer=self.optimizer)
	
	def restore(self, logs_path):
		"""
		Load the model from hard disc

		Parameters
		----------
			logs_path: string
				path to where we will store the model
		"""
		self.policy_net.load(logs_path)
		self.target_net.load(logs_path)
Пример #3
0
import time
from model import DQN
from rewards import CustomReward

# CONFIG
wave = True
learn_timesteps = 1 * int(1e3)

if wave:
    import arlie

    env = arlie.make("LunarLander", render_mode=False, reward=CustomReward())
else:
    import gym

    env = gym.make("LunarLander-v2")

model = DQN(env)

print("Training...")
_t = time.time()
model.learn(total_timesteps=learn_timesteps)
t = time.time() - _t
str_t = time.strftime("%H h, %M m, %S s", time.gmtime(t))
print("Trained in {} during {} timesteps".format(str_t, learn_timesteps))

model.save("{}-trained-model".format("wave" if wave else "gym"))

env.close()
Пример #4
0
        rewards = np.array(winner_rewards + loser_rewards)
        boards = np.concatenate([boards[winner], boards[loser]])

    else:
        #tie
        one_rewards = [0] * len(boards[1])
        two_rewards = [0] * len(boards[2])

        rewards = np.array(one_rewards + two_rewards)
        boards = np.concatenate([boards[1], boards[2]])
    
    rewards = rewards.reshape(rewards.shape[0], -1)
    model.train(boards, rewards)

    if games % EPOCH == 0:
        gamma *= 1.0
        win_rate.append(test_against_random(model))

        debug_run(model)

    if games % TEST_FRQ == 0:
        print(win_rate)
        plt.plot(win_rate)
        plt.ylabel('Winning Percentage')
        plt.xlabel('Epochs')
        plt.show()
        model.save('./dqn_no/')
        exit()

Пример #5
0
            # Transition
            transition.append(state)
            if len(transition) > 4:
                transition.pop(0)

            if model.step > model.train_start_step and model.step % model.train_step_interval:
                model.train()
                if model.step % model.target_update_interval == 0:
                    model.update_target()

            if is_render:
                env.render()

            if done:

                writer.add_scalar('reward/accum', accum_reward, model.step)
                writer.add_scalar('data/epsilon', model.epsilon, model.step)
                writer.add_scalar('data/x_pos', info['x_pos'], model.step)
                print(
                    "Episode : %5d\t\tSteps : %10d\t\tReward : %7d\t\tX_step : %4d\t\tEpsilon : %.3f"
                    % (model.episode, model.step, accum_reward, info['x_pos'],
                       model.epsilon))

                if save_model and model.episode % 100 == 0:
                    model.save()

                break

    env.close()
Пример #6
0
def main():
    ## arguments ##
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('-d', '--device', default='cuda')
    parser.add_argument(
        '--checkpoint',
        action='store_true',
        help='save model every epoch, ER buffer will only be saved as one file'
    )
    parser.add_argument('--logdir', default='log/dqn')
    # train
    parser.add_argument('--warmup', default=10000, type=int)
    parser.add_argument('--epochs', default=1200, type=int)
    parser.add_argument('--steps',
                        default=1000000,
                        type=int,
                        help='steps per epoch')
    parser.add_argument('--capacity', default=1000000, type=int)
    parser.add_argument('--batch_size', default=32, type=int)
    parser.add_argument('--lr', default=0.00001, type=float)
    parser.add_argument('--momentum', default=0.95, type=float)
    parser.add_argument('--epsilon',
                        default=1000000,
                        type=int,
                        help='decay steps from eps_max to eps_min')
    parser.add_argument('--eps_min', default=.1, type=float)
    parser.add_argument('--gamma', default=.99, type=float)
    parser.add_argument('--freq', default=4, type=int)
    parser.add_argument('--target_freq', default=10000, type=int)
    # test
    parser.add_argument('--test_only', action='store_true')
    parser.add_argument('--test_steps', type=int, default=500000)
    parser.add_argument('--render', action='store_true')
    parser.add_argument('--test_epsilon', default=0.05, type=float)
    # average
    parser.add_argument(
        '-k',
        '--k',
        type=int,
        default=1,
        help='number of average target network, if k = 1, perform vanilla DQN')
    # resume training
    parser.add_argument('--resume',
                        action='store_true',
                        help='resume training')
    parser.add_argument('-m', '--model', default=None, help='model path')
    # DDQN
    parser.add_argument('--ddqn',
                        action='store_true',
                        help='perform Double-DQN')

    args = parser.parse_args()

    ## main ##
    env = gym.make('BreakoutNoFrameskip-v4')

    # frame stack and preprocessing
    env = AtariPreprocessing(env, noop_max=30, frame_skip=4)
    env = FrameStack(env, 4)

    agent = DQN(args, env)
    #writer = SummaryWriter(args.logdir)
    if not args.test_only:
        train(args, env, agent)
        agent.save(args.model)
Пример #7
0
class Agent:
    """
    The intelligent agent of the simulation. Set the model of the neural network used and general parameters.
    It is responsible to select the actions, optimize the neural network and manage the models.
    """

    def __init__(self, action_set, train=True, load_path=None):
        #1. Initialize agent params
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.action_set = action_set
        self.action_number = len(action_set)
        self.steps_done = 0
        self.epsilon = Config.EPS_START
        self.episode_durations = []

        print('LOAD PATH    --  agent.init:', load_path)
        time.sleep(2)

        #2. Build networks
        self.policy_net = DQN().to(self.device)
        self.target_net = DQN().to(self.device)
        
        self.optimizer = optim.RMSprop(self.policy_net.parameters(), lr=Config.LEARNING_RATE)

        if not train:
            print('entrou no not train')        
            self.optimizer = optim.RMSprop(self.policy_net.parameters(), lr=0)    
            self.policy_net.load(load_path, optimizer=self.optimizer)
            self.policy_net.eval()

        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.target_net.eval()

        self.memory = ReplayMemory(1000)

        


    def select_action(self, state, train=True):
        """
        Selet the best action according to the Q-values outputed from the neural network

        Parameters
        ----------
            state: float ndarray
                The current state on the simulation
            train: bool
                Define if we are evaluating or trainning the model
        Returns
        -------
            a.max(1)[1]: int
                The action with the highest Q-value
            a.max(0): float
                The Q-value of the action taken
        """
        global steps_done
        sample = random.random()
        #1. Perform a epsilon-greedy algorithm
        #a. set the value for epsilon
        self.epsilon = Config.EPS_END + (Config.EPS_START - Config.EPS_END) * \
            math.exp(-1. * self.steps_done / Config.EPS_DECAY)
            
        self.steps_done += 1

        #b. make the decision for selecting a random action or selecting an action from the neural network
        if sample > self.epsilon or (not train):
            # select an action from the neural network
            with torch.no_grad():
                # a <- argmax Q(s, theta)
                a = self.policy_net(state)
                return a.max(1)[1].view(1, 1), a.max(0)
        else:
            # select a random action
            print('random action')
            return torch.tensor([[random.randrange(2)]], device=self.device, dtype=torch.long), None

    def optimize_model(self):
        """
        Perform one step of optimization on the neural network
        """

        if len(self.memory) < Config.BATCH_SIZE:
            return
        transitions = self.memory.sample(Config.BATCH_SIZE)

        # Transpose the batch (see http://stackoverflow.com/a/19343/3343043 for detailed explanation).
        batch = Transition(*zip(*transitions))

        # Compute a mask of non-final states and concatenate the batch elements
        non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
                                              batch.next_state)), device=self.device, dtype=torch.uint8)
        non_final_next_states = torch.cat([s for s in batch.next_state
                                                    if s is not None])
        
        state_batch = torch.cat(batch.state)
        action_batch = torch.cat(batch.action)
        reward_batch = torch.cat(batch.reward)
        
        # Compute Q(s_t, a) - the model computes Q(s_t), then we select the columns of actions taken
        state_action_values = self.policy_net(state_batch).gather(1, action_batch)
        
    
        # Compute argmax Q(s', a; θ)        
        next_state_actions = self.policy_net(non_final_next_states).max(1)[1].detach().unsqueeze(1)

        # Compute Q(s', argmax Q(s', a; θ), θ-)
        next_state_values = torch.zeros(Config.BATCH_SIZE, device=self.device)
        next_state_values[non_final_mask] = self.target_net(non_final_next_states).gather(1, next_state_actions).squeeze(1).detach()

        # Compute the expected Q values
        expected_state_action_values = (next_state_values * Config.GAMMA) + reward_batch


        # Compute Huber loss
        loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
        
        # Optimize the model
        self.optimizer.zero_grad()
        loss.backward()
        for param in self.policy_net.parameters():
            param.grad.data.clamp_(-1, 1)
        self.optimizer.step()

    def save(self, step, logs_path, label):
        """
        Save the model on hard disc

        Parameters
        ----------
            step: int
                current step on the simulation
            logs_path: string
                path to where we will store the model
            label: string
                label that will be used to store the model
        """

        os.makedirs(logs_path + label, exist_ok=True)

        full_label = label + str(step) + '.pth'
        logs_path = os.path.join(logs_path, label, full_label)

        self.policy_net.save(logs_path, step=step, optimizer=self.optimizer)
    
    def restore(self, logs_path):
        """
        Load the model from hard disc

        Parameters
        ----------
            logs_path: string
                path to where we will store the model
        """
        self.policy_net.load(logs_path)
        self.target_net.load(logs_path)
Пример #8
0
class Agent:
    """
    Class representing a learning agent acting in an environment.
    """
    def __init__(self,
                 buffer_size,
                 batch_size,
                 alpha,
                 gamma,
                 epsilon,
                 epsilon_min,
                 epsilon_decay,
                 lr,
                 game="CartPole-v1",
                 mean_bound=5,
                 reward_bound=495.0,
                 sync_model=1000,
                 save_model=10):
        """
        Constructor of the agent class.
            - game="CartPole-v1" : Name of the game environment
            - mean_bound=5 : Number of last acquired rewards considered for mean reward
            - reward_bound=495.0 : Reward acquired for completing an episode properly
            - sync_model=1000 : Interval for synchronizing model and target model
            - save_model=10 : Interval for saving model

            - buffer_size : Replay buffer size of the DQN model
            - batch_size : Batch size of the DQN model
            - alpha : Learning rate for Q-Learning
            - gamma : Discount factor for Q-Learning
            - epsilon : Threshold for taking a random action
            - epsilon_min : Minimal value allowed for epsilon
            - epsilon_decay : Decay rate for epsilon
            - lr : Learning rate for the DQN model
        """

        # Environment variables
        self.game = game
        self.env = gym.make(self.game)
        self.num_states = self.env.observation_space.shape[0]
        self.num_actions = self.env.action_space.n

        # Agent variables
        self.buffer_size = buffer_size
        self.batch_size = batch_size
        self.buffer = ReplayBuffer(self.buffer_size, self.batch_size)
        self.alpha = alpha
        self.gamma = gamma
        self.epsilon = epsilon
        self.epsilon_min = epsilon_min
        self.epsilon_decay = epsilon_decay
        self.mean_bound = mean_bound
        self.reward_bound = reward_bound

        # DQN variables
        self.lr = lr
        self.model = DQN(self.num_states, self.num_actions, self.lr)
        self.target_model = DQN(self.num_states, self.num_actions, self.lr)
        self.target_model.update(self.model)
        self.sync_model = sync_model
        self.save_model = save_model

        # File paths
        dirname = os.path.dirname(__file__)
        self.path_model = os.path.join(dirname, "../models/dqn.h5")
        self.path_plot = os.path.join(dirname, "../plots/dqn.png")

        # Load model, if it already exists
        try:
            self.model.load(self.path_model)
            self.target_model.update(self.model)
        except:
            print("Model does not exist! Create new model...")

    def reduce_epsilon(self):
        """
        Reduces the parameter epsilon up to a given minimal value where the speed of decay is controlled by some given parameter.
        """

        epsilon = self.epsilon * self.epsilon_decay

        if epsilon >= self.epsilon_min:
            self.epsilon = epsilon
        else:
            self.epsilon = self.epsilon_min

    def get_action(self, state):
        """
        Returns an action for a given state, based on the current policy.
            - state : Current state of the agent
        """

        if np.random.random() < self.epsilon:
            action = self.env.action_space.sample()
        else:
            action = np.argmax(self.model.predict(state))

        return action

    def train(self, num_episodes, report_interval):
        """
        Trains the DQN model for a given number of episodes. Outputting report information is controlled by a given time interval.
            - num_episodes : Number of episodes to train
            - report_interval : Interval for outputting report information of training
        """

        step = 0
        total_rewards = []

        for episode in range(1, num_episodes + 1):
            if episode % self.save_model == 0:
                self.model.save(self.path_model)

            state = self.env.reset()
            state = state.reshape((1, self.num_states))
            total_reward = 0.0

            while True:
                step += 1

                action = self.get_action(state)
                next_state, reward, done, _ = self.env.step(action)
                next_state = next_state.reshape((1, self.num_states))

                # Penalize agent if pole could not be balanced until end of episode
                if done and reward < 499.0:
                    reward = -100.0

                self.buffer.remember(state, action, reward, next_state, done)
                self.replay()
                self.reduce_epsilon()

                state = next_state
                total_reward += reward

                if step % self.sync_model == 0:
                    self.target_model.update(self.model)

                if done:
                    total_reward += 100.0
                    total_rewards.append(total_reward)
                    mean_reward = np.mean(total_rewards[-self.mean_bound:])

                    if episode % report_interval == 0:
                        print(f"Episode: {episode}/{num_episodes}"
                              f"\tStep: {step}"
                              f"\tMemory Size: {len(self.memory)}"
                              f"\tEpsilon: {self.epsilon : .3f}"
                              f"\tReward: {total_reward}"
                              f"\tLast 5 Mean: {mean_reward : .2f}")

                        self.plot_rewards(total_rewards)

                    if mean_reward > self.reward_bound:
                        self.model.save(self.path_model)
                        return

                    break

        self.model.save(self.path_model)

    def replay(self):
        """
        Samples training data from the replay buffer and fits the DQN model.
        """

        sample_size, states, actions, rewards, next_states, dones = self.memory.sample(
        )

        q_values = self.model.predict(states)
        next_q_values = self.target_model.predict(next_states)

        for i in range(sample_size):
            action = actions[i]
            done = dones[i]

            if done:
                q_target = rewards[i]
            else:
                q_target = rewards[i] + self.gamma * np.max(next_q_values[i])

            q_values[i][action] = (1 - self.alpha) * \
                q_values[i][action] + self.alpha * q_target

        self.model.fit(states, q_values)

    def play(self, num_episodes):
        """
        Renders the trained agent for a given number of episodes.
            - num_episodes : Number of episodes to render
        """

        self.epsilon = self.epsilon_min

        for episode in range(1, num_episodes + 1):
            state = self.env.reset()
            state = state.reshape((1, self.num_states))
            total_reward = 0.0

            while True:
                self.env.render()
                action = self.get_action(state)
                next_state, reward, done, _ = self.env.step(action)
                next_state = next_state.reshape((1, self.num_states))
                state = next_state
                total_reward += reward

                if done:
                    print(f"Episode: {episode}/{num_episodes}"
                          f"\tTotal Reward: {total_reward : .2f}")

                    break

    def plot_rewards(self, total_rewards):
        """
        Plots the rewards the agent has acquired during training.
            - total_rewards : Rewards the agent has gained per episode
        """

        x = range(len(total_rewards))
        y = total_rewards

        slope, intercept, _, _, _ = linregress(x, y)

        plt.plot(x, y, linewidth=0.8)
        plt.plot(x, slope * x + intercept, color="red", linestyle="-.")
        plt.xlabel("Episode")
        plt.ylabel("Reward")
        plt.title("DQN-Learning")
        plt.savefig(self.path_plot)
     s_[3])
 # 增加当局的reward
 reward_counter += r
 if do_train:
     # 保存memory
     dqn.store(s, a, r, done, s_)
     # train network
     losses = dqn.train_net()
 s = s_
 # 帧数自增
 last_frame_counter += 1
 if last_frame_counter >= 200:
     done = True
 # 如果达到保存条件,则保存模型
 if do_save and do_train:
     dqn.save(save_path)
     do_save = False
 # 如果结束,打印当局数据,判断是否保存模型
 if done:
     mean_array[mean_counter % mean_size] = reward_counter
     mean_counter += 1
     # 如果mean满数据,并且达到目标,则保存模型,并且开启保存模型
     if mean_counter > mean_size and np.mean(
             mean_array) > target_reward and do_train:
         render_show = True
         do_save = True
     # 打印当局的数据
     epilist.append(episode)
     meanrdlist.append(np.mean(mean_array))
     rewardlist.append(reward_counter)
     print(
                #     -abs term代表鼓勵agent不要去移動車子,
                #     一直維持在中間才能獲得很高的獎賞!
                # 2.  r2得到的是角度的資訊,
                #     儘量讓棒子跟垂直線的角度愈小愈好(就是讓棒子立正),
                #     角度愈小獎賞愈高
                # 最後扣0.5是讓獎勵區間分布於[-1~1]之間
                # ----------------------------------------------------
                x, x_dot, theta, theta_dot = s_
                r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.8
                r2 = (env.theta_threshold_radians -
                      abs(theta)) / env.theta_threshold_radians - 0.5
                r = r1 + r2

            # Sotre the transition and update the network
            net.store_path(s, a, r, s_)
            net.update()

            # Judge if finish an episode (and decay the epsilon)
            if finish:
                print("Episode: %d \t Total reward: %d \t Eps: %f" %
                      (i, total_reward, net.epsilon))
                reward_list.append(total_reward)
                if net.epsilon > 0.01:
                    net.episodeDecay()
                break

            # Update the current state as the future state of previous state
            s = s_
    net.save()
    plt.plot(range(len(reward_list)), reward_list, '-')
    plt.show()
Пример #11
0
    targets[1] = [0] * len(rewards[1])
    targets[1][-1] = float(rewards[1][-1])
    for i in reversed(range(len(rewards[1]) - 1)):
        targets[1][i] = float(lam * vals[1][i + 1])

    targets[2] = [0] * len(rewards[2])
    targets[2][-1] = float(rewards[2][-1])
    for i in reversed(range(len(rewards[2]) - 1)):
        targets[2][i] = float(lam * vals[2][i + 1])

    boards = np.array(boards[1] + boards[2])
    targets = np.expand_dims(np.array(targets[1] + targets[2]), 1)

    model.train(boards, targets)

    if games % EPOCH == 0:
        gamma *= .99
        win_rate.append(test_against_random(model))

        debug_run(model)

    if games % TEST_FRQ == 0:
        print(win_rate)
        plt.plot(win_rate)
        plt.ylabel('Winning Percentage')
        plt.xlabel('Epochs')
        plt.show()
        model.save('./temp/')
        exit()
Пример #12
0
def main(opt):
    dqn = DQN(NUM_STATES, NUM_ACTIONS, opt.eps, opt)
    episodes = opt.num_episodes
    print("Collecting Experience....")
    reward_list = []
    reward_list_mean = []
    win_list = []
    win_list_mean = []
    test_reward_rand = []
    test_win_rand = []
    test_reward_max = []
    test_win_max = []
    test_reward_self = []
    test_win_self = []
    test_reward_exact = []
    test_win_exact = []
    plt.ion()
    fig, ax = plt.subplots()
    fig2, ax2 = plt.subplots()
    fig3, ax3 = plt.subplots()

    for i in range(episodes):
        dqn.ep_decay(episodes, i)
        ep_reward, ep_win = train_ep(dqn, opt.opp_policy)

        r = copy.copy(ep_reward)
        reward_list.append(r)
        reward_list_mean.append(np.mean(reward_list[-10:]))
        win_list.append(ep_win)
        win_list_mean.append(np.mean(win_list[-10:]))
        ax.set_xlim(0, episodes)
        print(
            "episode: {} , the episode reward is {}, average of last 10 eps is {}, win = {}, win_mean = {}"
            .format(i, ep_reward, reward_list_mean[-1], win_list[-1],
                    win_list_mean[-1]))
        if i % 100 == 99 or i == 0:
            if i < episodes - 50:
                test_epsilon = 0.05
            else:
                test_epsilon = 1e-2
            t_reward_rand, t_win_rand = test_ep(dqn, 'random', opt.num_test,
                                                test_epsilon)
            t_reward_max, t_win_max = test_ep(dqn, 'max', opt.num_test,
                                              test_epsilon)
            t_reward_self, t_win_self = test_ep(dqn, 'self', opt.num_test,
                                                test_epsilon)
            t_reward_exact, t_win_exact = test_ep(dqn, 'exact', opt.num_test,
                                                  test_epsilon)
            # t_reward,t_win = test_ep(dqn, 'max', opt.num_test)
            print('[random policy] test: {}, test_reward: {}, test_win: {}'.
                  format(i, t_reward_rand, t_win_rand))
            print(
                '[max policy] test: {}, test_reward: {}, test_win: {}'.format(
                    i, t_reward_max, t_win_max))
            print(
                '[self policy] test: {}, test_reward: {}, test_win: {}'.format(
                    i, t_reward_self, t_win_self))
            print('[exact policy] test: {}, test_reward: {}, test_win: {}'.
                  format(i, t_reward_exact, t_win_exact))
            test_reward_rand.append(t_reward_rand)
            test_win_rand.append(t_win_rand * 100)
            test_reward_max.append(t_reward_max)
            test_win_max.append(t_win_max * 100)
            test_reward_self.append(t_reward_self)
            test_win_self.append(t_win_self * 100)
            test_reward_exact.append(t_reward_exact)
            test_win_exact.append(t_win_exact * 100)

            # SAVE
            s = os.path.join(opt.save_path, 'p1-' + str(i).zfill(5))
            print("saving model at episode %i in save_path=%s" % (i, s))
            dqn.save(s + '.pth')
            np.savez(s + '-test_reward_rand', test_reward_rand)
            np.savez(s + '-test_win_rand', test_win_rand)
            np.savez(s + '-test_reward_max', test_reward_max)
            np.savez(s + '-test_win_max', test_win_max)
            np.savez(s + '-test_reward_max', test_reward_self)
            np.savez(s + '-test_win_max', test_win_self)
            np.savez(s + '-test_reward_max', test_reward_exact)
            np.savez(s + '-test_win_max', test_win_exact)
            np.savez(s + '-train_reward', reward_list)
            np.savez(s + '-train_reward_mean', reward_list_mean)

            # PLOT
            fig.suptitle('[Train] Score over Number of Episodes')
            ax.set_xlabel('Number of Training Episodes')
            ax.set_ylabel('Score')
            ax.plot(reward_list, 'g-', label='current')
            ax.plot(reward_list_mean, 'r-', label='ema')
            # ax.plot(np.array(win_list_mean)*100, 'b-', label='win_rate')

            fig2.suptitle('[Test] Score over Number of Episodes')
            ax2.set_xlabel('Number of Training Episodes (Hundreds)')
            ax2.set_ylabel('Mean Score')
            ax2.plot(test_reward_rand, 'r-', label='vs random policy')
            ax2.plot(test_reward_max, 'g-', label='vs max policy')
            ax2.plot(test_reward_self, 'b-', label='vs self policy')
            ax2.plot(test_reward_exact, 'y-', label='vs exact policy')

            fig3.suptitle('[Test] Win rate of agent')
            ax3.set_xlabel('Number of Training Episodes (Hundreds)')
            ax3.set_ylabel('Win rate (%)')
            ax3.plot(test_win_rand, 'r-', label='vs random policy')
            ax3.plot(test_win_max, 'g-', label='vs max policy')
            ax3.plot(test_win_self, 'b-', label='vs self policy')
            ax3.plot(test_win_exact, 'y-', label='vs exact policy')
            plt.pause(0.001)

            if i == 0:
                fig.legend(loc='lower right', bbox_to_anchor=(0.9, 0.11))
                fig2.legend(loc='lower right', bbox_to_anchor=(0.9, 0.11))
                fig3.legend(loc='lower right', bbox_to_anchor=(0.9, 0.11))

    fig.savefig(os.path.join(opt.save_path, 'p1-train-rewards.png'))
    fig2.savefig(os.path.join(opt.save_path, 'p1-test-rewards.png'))
    fig3.savefig(os.path.join(opt.save_path, 'p1-win-rates.png'))

    print('vs. random policy')
    _, _ = test_ep(dqn, 'random', 1, 1e-2, render=True)
    print('vs. max policy')
    _, _ = test_ep(dqn, 'max', 1, 1e-2, render=True)
    print('vs. self')
    _, _ = test_ep(dqn, 'self', 1, 1e-2, render=True)