Exemplo n.º 1
0
def dqn_learing(env,
                q_func,
                optimizer_spec,
                exploration,
                stopping_criterion=None,
                replay_buffer_size=1000000,
                batch_size=32,
                gamma=0.99,
                learning_starts=50000,
                learning_freq=4,
                frame_history_len=4,
                target_update_freq=10000):
    """Run Deep Q-learning algorithm.

    You can specify your own convnet using q_func.

    All schedules are w.r.t. total number of steps taken in the environment.

    Parameters
    ----------
    env: gym.Env
        gym environment to train on.
    q_func: function
        Model to use for computing the q function. It should accept the
        following named arguments:
            input_channel: int
                number of channel of input.
            num_actions: int
                number of actions
    optimizer_spec: OptimizerSpec
        Specifying the constructor and kwargs, as well as learning rate schedule
        for the optimizer
    exploration: Schedule (defined in utils.schedule)
        schedule for probability of chosing random action.
    stopping_criterion: (env) -> bool
        should return true when it's ok for the RL algorithm to stop.
        takes in env and the number of steps executed so far.
    replay_buffer_size: int
        How many memories to store in the replay buffer.
    batch_size: int
        How many transitions to sample each time experience is replayed.
    gamma: float
        Discount Factor
    learning_starts: int
        After how many environment steps to start replaying experiences
    learning_freq: int
        How many steps of environment to take between every experience replay
    frame_history_len: int
        How many past frames to include as input to the model.
    target_update_freq: int
        How many experience replay rounds (not steps!) to perform between
        each update to the target Q network
    """
    assert type(env.observation_space) == gym.spaces.Box
    assert type(env.action_space) == gym.spaces.Discrete

    ###############
    # BUILD MODEL #
    ###############

    if len(env.observation_space.shape) == 1:
        # This means we are running on low-dimensional observations (e.g. RAM)
        input_arg = env.observation_space.shape[0]
    else:
        img_h, img_w, img_c = env.observation_space.shape
        input_arg = frame_history_len * img_c
    num_actions = env.action_space.n

    # Construct an epilson greedy policy with given exploration schedule
    def select_epilson_greedy_action(model, obs, t):
        sample = random.random()
        eps_threshold = exploration.value(t)
        if sample > eps_threshold:
            obs = torch.from_numpy(obs).type(dtype).unsqueeze(0) / 255.0
            # Use volatile = True if variable is only used in inference mode, i.e. don’t save the history
            return model(Variable(obs, volatile=True)).data.max(1)[1].cpu()
        else:
            return torch.IntTensor([[random.randrange(num_actions)]])

    # Initialize target q function and q function, i.e. build the model.
    ######

    model = DQN(in_channels=input_arg, num_actions=num_actions)
    target_Q = DQN(in_channels=input_arg, num_actions=num_actions)

    if USE_CUDA:
        target_Q = target_Q.cuda()
        model = model.cuda()

    ######

    # Construct Q network optimizer function
    optimizer = optimizer_spec.constructor(model.parameters(),
                                           **optimizer_spec.kwargs)

    # Construct the replay buffer
    replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len)

    ###############
    # RUN ENV     #
    ###############
    num_param_updates = 0
    mean_episode_reward = -float('nan')
    best_mean_episode_reward = -float('inf')
    last_obs = env.reset()
    LOG_EVERY_N_STEPS = 10000

    for t in count():
        ### 1. Check stopping criterion
        if stopping_criterion is not None and stopping_criterion(env):
            break

        ### 2. Step the env and store the transition
        # At this point, "last_obs" contains the latest observation that was
        # recorded from the simulator. Here, your code needs to store this
        # observation and its outcome (reward, next observation, etc.) into
        # the replay buffer while stepping the simulator forward one step.
        # At the end of this block of code, the simulator should have been
        # advanced one step, and the replay buffer should contain one more
        # transition.
        # Specifically, last_obs must point to the new latest observation.
        # Useful functions you'll need to call:
        # obs, reward, done, info = env.step(action)
        # this steps the environment forward one step
        # obs = env.reset()
        # this resets the environment if you reached an episode boundary.
        # Don't forget to call env.reset() to get a new observation if done
        # is true!!
        # Note that you cannot use "last_obs" directly as input
        # into your network, since it needs to be processed to include context
        # from previous frames. You should check out the replay buffer
        # implementation in dqn_utils.py to see what functionality the replay
        # buffer exposes. The replay buffer has a function called
        # encode_recent_observation that will take the latest observation
        # that you pushed into the buffer and compute the corresponding
        # input that should be given to a Q network by appending some
        # previous frames.
        # Don't forget to include epsilon greedy exploration!
        # And remember that the first time you enter this loop, the model
        # may not yet have been initialized (but of course, the first step
        # might as well be random, since you haven't trained your net...)
        ##### OUR CODE
        idx = replay_buffer.store_frame(last_obs)
        encoded_obs = replay_buffer.encode_recent_observation()

        if t > learning_starts:
            action = select_epilson_greedy_action(model, encoded_obs, t)[0]
        else:
            action = random.randrange(num_actions)

        obs, reward, done, info = env.step(action)
        reward = max(-1.0, min(reward, 1.0))

        replay_buffer.store_effect(idx, action, reward, done)
        if done:
            obs = env.reset()
        last_obs = obs
        #####

        # at this point, the environment should have been advanced one step (and
        # reset if done was true), and last_obs should point to the new latest
        # observation

        ### 3. Perform experience replay and train the network.
        # Note that this is only done if the replay buffer contains enough samples
        # for us to learn something useful -- until then, the model will not be
        # initialized and random actions should be taken
        if (t > learning_starts and t % learning_freq == 0
                and replay_buffer.can_sample(batch_size)):

            # Here, you should perform training. Training consists of four steps:
            # 3.a: use the replay buffer to sample a batch of transitions (see the
            # replay buffer code for function definition, each batch that you sample
            # should consist of current observations, current actions, rewards,
            # next observations, and done indicator).
            # Note: Move the variables to the GPU if avialable

            obs_batch, act_batch, rew_batch, next_obs_batch, done_mask = replay_buffer.sample(
                batch_size)

            obs_batch = Variable(
                torch.from_numpy(obs_batch).type(dtype) / 255.0)

            next_obs_batch = Variable(
                torch.from_numpy(next_obs_batch).type(dtype) / 255.0)
            act_batch = Variable(
                torch.Tensor(act_batch).type(torch.LongTensor))
            rew_batch = Variable(torch.from_numpy(rew_batch))

            done_mask = Variable(
                torch.Tensor([1. if val == 0 else 0. for val in done_mask]))
            if USE_CUDA:
                done_mask = done_mask.cuda()
                act_batch = act_batch.cuda()
                rew_batch = rew_batch.cuda()
                obs_batch = obs_batch.cuda()
                next_obs_batch = next_obs_batch.cuda()

            # 3.b: fill in your own code to compute the Bellman error. This requires
            # evaluating the current and next Q-values and constructing the corresponding error.
            # Note: don't forget to clip the error between [-1,1], multiply is by -1 (since pytorch minimizes) and
            #       maskout post terminal status Q-values (see ReplayBuffer code).

            # We choose Q based on action taken.
            current_Q_values = model(obs_batch).gather(
                1, act_batch.unsqueeze(1))  #[0, act_batch]
            # 5. Obtain maxQ' and set our target value for chosen action using the bellman equation.
            next_max_q = target_Q(next_obs_batch).detach().max(1)[0]
            next_Q_values = torch.mul(done_mask, next_max_q)

            target_Q_values = rew_batch + (gamma * next_Q_values)
            if USE_CUDA:
                target_Q_values = target_Q_values.cuda()
            d_error = target_Q_values.unsqueeze(1) - current_Q_values
            d_error = d_error.clamp(-1, 1) * -1

            # 3.c: train the model. To do this, use the bellman error you calculated perviously.
            # Pytorch will differentiate this error for you, to backward the error use the following API:
            #       current.backward(d_error.data.unsqueeze(1))
            # Where "current" is the variable holding current Q Values and d_error is the clipped bellman error.
            # Your code should produce one scalar-valued tensor.
            # Note: don't forget to call optimizer.zero_grad() before the backward call and
            #       optimizer.step() after the backward call.

            optimizer.zero_grad()
            current_Q_values.backward(d_error)
            optimizer.step()
            num_param_updates += 1

            # 3.d: periodically update the target network by loading the current Q network weights into the
            #      target_Q network. see state_dict() and load_state_dict() methods.
            #      you should update every target_update_freq steps, and you may find the
            #      variable num_param_updates useful for this (it was initialized to 0)
            #####
            if num_param_updates % target_update_freq == 0:
                target_Q.load_state_dict(model.state_dict())

            # YOUR CODE HERE

            #####

        ### 4. Log progress and keep track of statistics
        episode_rewards = get_wrapper_by_name(env,
                                              "Monitor").get_episode_rewards()
        if len(episode_rewards) > 0:
            mean_episode_reward = np.mean(episode_rewards[-100:])
            if hasattr(exploration, 'add_reward'):
                exploration.add_reward(episode_rewards)
        if len(episode_rewards) > 100:
            best_mean_episode_reward = max(best_mean_episode_reward,
                                           mean_episode_reward)

        Statistic["mean_episode_rewards"].append(mean_episode_reward)
        Statistic["best_mean_episode_rewards"].append(best_mean_episode_reward)

        if t % LOG_EVERY_N_STEPS == 0 and t > learning_starts:
            print("Timestep %d" % (t, ))
            print("mean reward (100 episodes) %f" % mean_episode_reward)
            print("best mean reward %f" % best_mean_episode_reward)
            print("episodes %d" % len(episode_rewards))
            print("exploration %f" % exploration.value(t))
            sys.stdout.flush()

            # Dump statistics to pickle
            with open('statistics.pkl', 'wb') as f:
                pickle.dump(Statistic, f)
                print("Saved to %s" % 'statistics.pkl')
Exemplo n.º 2
0
def dqn_eval(env, scheduler, optimizer_constructor, model_type, batch_size, rp_start, rp_size, 
	exp_frame, exp_initial, exp_final, gamma, target_update_steps, frames_per_epoch, 
	frames_per_state, output_directory, last_checkpoint, envo):
	
	env.seed(7)
	random.seed(7)
	gym.undo_logger_setup()
	logging.basicConfig(filename=envo+'_'+model_type+'_eval.log',level=logging.INFO)
	num_actions = env.action_space.n
	
	print('No. of actions: ', num_actions)
	print(env.unwrapped.get_action_meanings())

	# initialize action value and target network with the same weights
	model = DQN(num_actions, use_bn=False)

	if use_cuda:
		model.cuda()

	saved_params = None
	directory = None
	index = []

	for (dirpath, dirnames, filenames) in os.walk(output_directory):
		directory = dirpath
		saved_params = filenames

	count = 0 
	counter = 0

	chckpoint_index = get_index_from_checkpoint_path(last_checkpoint)

	for x in saved_params:
		temp = get_index_from_checkpoint_file(x)
		if temp > chckpoint_index:
			index.append(temp)

	index = sorted(index, key=int)

	for w in index:

		path = directory + '/' + model_type + '_weights_' + str(w) + '.pth'
		model.load_state_dict(torch.load(path))
		print(path)
		print('saved weights loaded...')

		eval_epsilon = 0.05
		env.reset()
		total_reward = []
		rewards_per_episode = 0

		action_value = torch.zeros(num_actions)

		current_state, _, _, _ = play_game(env, frames_per_state, action=0, evaluate=True)

		average_action = {k: [] for k in range(num_actions)}

		for i in range(NUM_GAMES):
			for frame in range(int(MAX_FRAMES_PER_GAME/frames_per_state)):

				eval_choice = random.uniform(0,1)
	
				# epsilon greedy algorithm
				if eval_choice <= eval_epsilon:
					action = LongTensor([[random.randrange(num_actions)]])

				else:
					action = get_greedy_action(model, current_state)

				curr_obs, reward, done, _ = play_game(env, frames_per_state, action[0][0], evaluate=True)

				average_action[action[0,0]].append(get_Q_value(model, action.view(1,1), curr_obs))

				current_state = curr_obs
				rewards_per_episode += reward

				if done:
					env.reset()
					total_reward.append(rewards_per_episode)
					rewards_per_episode = 0
					current_state, _, _, _ = play_game(env, frames_per_state, action=0, evaluate=True)
					break

		average_reward = sum(total_reward)/float(len(total_reward))

		total_action = 0
		for i in range(num_actions):
			total_action += sum(average_action[i])/len(average_action[i])

		average_action_value = total_action/num_actions

		#Compute Standard Deviation
		diff = 0
		for x in total_reward:
			diff += (x - average_reward)*(x - average_reward)
		var = diff/len(total_reward)
		std_dev = math.sqrt(var)

		eval_content = 'Average Score: ', average_reward
		eval_std_dev = 'Standard Deviation: ', std_dev
		average_action_value_content = 'Average Action Value: ', average_action_value
		print(average_action_value_content)
		print(eval_content)
		print(eval_std_dev)
		log_content = path + ' ' + str(average_reward) + ' ' + str(average_action_value) + ' ' + str(std_dev)
		logging.info(log_content)

		count += 1

	print(count)
class Agent_DQN():
    def __init__(self, env, args):
        # Parameters for q-learning

        super(Agent_DQN, self).__init__()

        self.env = env
        state = env.reset()
        state = state.transpose(2, 0, 1)

        self.policy_net = DQN(state.shape,
                              self.env.action_space.n)  # Behavior Q
        self.target_net = DQN(state.shape, self.env.action_space.n)  # Target Q
        self.target_net.load_state_dict(self.policy_net.state_dict())
        #Initial Q

        if USE_CUDA:
            print("Using CUDA . . .     ")
            self.policy_net = self.policy_net.cuda()
            self.target_net = self.target_net.cuda()

        print('hyperparameters and network initialized')

        if args.test_dqn or LOAD == True:
            print('loading trained model')
            checkpoint = torch.load('trainData')
            self.policy_net.load_state_dict(checkpoint['model_state_dict'])

        self.target_net.load_state_dict(self.policy_net.state_dict())

    def init_game_setting(self):
        print('loading trained model')
        checkpoint = torch.load('trainData')
        self.policy_net.load_state_dict(checkpoint['model_state_dict'])

    def push(self, state, action, reward, next_state, done):
        state = np.expand_dims(state, 0)
        next_state = np.expand_dims(next_state, 0)

        memory.append((state, action, reward, next_state, done))

    def replay_buffer(self):
        state, action, reward, next_state, done = zip(
            *random.sample(memory, batch_size))
        return np.concatenate(state), action, reward, np.concatenate(
            next_state), done

    def __len__(self):
        return len(self.buffer)

    def make_action(self, observation, test=True):

        observation = observation.transpose(2, 0, 1)

        if np.random.random() > EPSILON or test == True:
            observation = Variable(torch.FloatTensor(
                np.float32(observation)).unsqueeze(0),
                                   volatile=True)
            q_value = self.policy_net.forward(observation)
            action = q_value.max(1)[1].data[0]
            action = int(action.item())
        else:
            action = random.randrange(4)
        return action

    def optimize_model(self):

        states, actions, next_states, rewards, dones = self.replay_buffer()

        states_v = Variable(torch.FloatTensor(np.float32(states)))
        next_states_v = Variable(torch.FloatTensor(np.float32(next_states)),
                                 volatile=True)
        actions_v = Variable(torch.LongTensor(actions))
        rewards_v = Variable(torch.FloatTensor(rewards))
        done = Variable(torch.FloatTensor(dones))

        state_action_values = self.policy_net(states_v).gather(
            1, actions_v.unsqueeze(1)).squeeze(1)
        next_state_values = self.target_net(next_states_v).max(1)[0]
        expected_q_value = rewards_v + next_state_values * GAMMA * (
            1 - done)  #+ rewards_v

        loss = (state_action_values -
                Variable(expected_q_value.data)).pow(2).mean()
        return loss

    def train(self):
        optimizer = optim.Adam(self.policy_net.parameters(), lr=ALPHA)

        # Fill the memory with experiences
        print('Gathering experiences ...')
        meanScore = 0
        AvgRewards = []
        AllScores = []
        step = 1
        iEpisode = 0

        while meanScore < 50:

            state = self.env.reset()
            done = False
            EpisodeScore = 0
            tBegin = time.time()
            done = False

            while not done:

                action = self.make_action(state)
                nextState, reward, done, _ = self.env.step(action)
                self.push(state.transpose(2, 0, 1), action,
                          nextState.transpose(2, 0, 1), reward, done)

                state = nextState

                if len(memory) > StartLearning:
                    loss = self.optimize_model()
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()
                else:
                    iEpisode = 0
                    continue

                # Update exploration factor
                EPSILON = EPS_END + (EPS_START - EPS_END) * math.exp(
                    -1. * step / EPS_DECAY)
                storeEpsilon.append(EPSILON)
                step += 1

                EpisodeScore += reward

                if step % TARGET_UPDATE == 0:
                    print('Updating Target Network . . .')
                    self.target_net.load_state_dict(
                        self.policy_net.state_dict())

            iEpisode += 1
            AllScores.append(EpisodeScore)
            meanScore = np.mean(AllScores[-100:])
            AvgRewards.append(meanScore)

            if len(memory) > StartLearning:
                print('Episode: ', iEpisode, ' score:', EpisodeScore,
                      ' Avg Score:', meanScore, ' epsilon: ', EPSILON, ' t: ',
                      time.time() - tBegin, ' loss:', loss.item())
            else:
                print('Gathering Data . . .')

            if iEpisode % 500 == 0:
                torch.save(
                    {
                        'epoch': iEpisode,
                        'model_state_dict': self.policy_net.state_dict(),
                        'optimizer_state_dict': optimizer.state_dict(),
                        'loss': loss,
                        'AvgRewards': AvgRewards
                    }, 'trainData')

                os.remove("Rewards.csv")
                with open('Rewards.csv', mode='w') as dataFile:
                    rewardwriter = csv.writer(dataFile,
                                              delimiter=',',
                                              quotechar='"',
                                              quoting=csv.QUOTE_MINIMAL)
                    rewardwriter.writerow(AvgRewards)

        print('======== Complete ========')
        torch.save(
            {
                'epoch': iEpisode,
                'model_state_dict': self.policy_net.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'loss': loss,
                'AvgRewards': AvgRewards
            }, 'trainData')

        with open('Rewards.csv', mode='w') as dataFile:
            rewardwriter = csv.writer(dataFile,
                                      delimiter=',',
                                      quotechar='"',
                                      quoting=csv.QUOTE_MINIMAL)
            rewardwriter.writerow(AvgRewards)
Exemplo n.º 4
0
def ddqn_train(env, scheduler, optimizer_constructor, model_type, batch_size,
               rp_start, rp_size, exp_frame, exp_initial, exp_final, gamma,
               target_update_steps, frames_per_epoch, frames_per_state,
               output_directory, last_checkpoint, envo):
    """
	Implementation of the training algorithm for DDQN. 
	"""

    gym.undo_logger_setup()
    logging.basicConfig(filename=envo + '_' + model_type + 'ddqn_training.log',
                        level=logging.INFO)
    num_actions = env.action_space.n
    env.reset()

    print('No. of actions: ', num_actions)
    print(env.unwrapped.get_action_meanings())

    # initialize action value and target network with the same weights
    model = DQN(num_actions, use_bn=False)
    target = DQN(num_actions, use_bn=False)

    if use_cuda:
        model.cuda()
        target.cuda()

    exp_replay = None
    episodes_count = 1

    if last_checkpoint:
        model.load_state_dict(torch.load(last_checkpoint))
        print(last_checkpoint)
        print('weights loaded...')

        exp_replay = initialize_replay_resume(env, rp_start, rp_size,
                                              frames_per_state, model)
        episodes_count = get_index_from_checkpoint_path(last_checkpoint)

    else:
        exp_replay = initialize_replay(env, rp_start, rp_size,
                                       frames_per_state)

    target.load_state_dict(model.state_dict())

    # scheduler = Scheduler(exp_frame, exp_initial, exp_final)
    optimizer = optimizer_constructor.type(
        model.parameters(),
        lr=optimizer_constructor.kwargs['lr'],
        alpha=optimizer_constructor.kwargs['alpha'],
        eps=optimizer_constructor.kwargs['eps'])

    frames_count = 1
    frames_per_episode = 1
    epsiodes_durations = []
    rewards_per_episode = 0
    rewards_duration = []
    loss_per_epoch = []

    current_state, _, _, _ = play_game(env, frames_per_state)
    print('Starting training...')

    count = 0

    while True:

        epsilon = scheduler.anneal_linear(frames_count)
        choice = random.uniform(0, 1)

        # epsilon greedy algorithm
        if choice <= epsilon:
            action = LongTensor([[random.randrange(num_actions)]])

        else:
            action = get_greedy_action(model, current_state)

        curr_obs, reward, done, _ = play_game(env, frames_per_state,
                                              action[0][0])

        rewards_per_episode += reward
        reward = Tensor([reward])

        exp_replay.push(current_state, action, reward, curr_obs)

        current_state = curr_obs

        #sample random mini-batch
        obs_sample = exp_replay.sample(batch_size)

        batch = Experience(
            *zip(*obs_sample)
        )  #unpack the batch into states, actions, rewards and next_states

        #compute y
        if len(exp_replay) >= batch_size:

            loss = ddqn_compute_y(batch, batch_size, model, target, gamma)
            optimizer.zero_grad()
            loss.backward()

            for param in model.parameters():
                param.grad.data.clamp_(-1, 1)

            optimizer.step()
            loss_per_epoch.append(loss.data.cpu().numpy())

        frames_count += 1
        frames_per_episode += frames_per_state

        if done:
            rewards_duration.append(rewards_per_episode)
            rewards_per_episode = 0
            frames_per_episode = 1
            episodes_count += 1
            env.reset()
            current_state, _, _, _ = play_game(env, frames_per_state)

            if episodes_count % 100 == 0:
                avg_episode_reward = sum(rewards_duration) / 100.0
                avg_reward_content = 'Episode from', episodes_count - 99, ' to ', episodes_count, ' has an average of ', avg_episode_reward, ' reward and loss of ', sum(
                    loss_per_epoch)
                print(avg_reward_content)
                logging.info(avg_reward_content)
                rewards_duration = []
                loss_per_epoch = []

        # update weights of target network for every TARGET_UPDATE_FREQ steps
        if frames_count % target_update_steps == 0:
            target.load_state_dict(model.state_dict())
            # print('weights updated at frame no. ', frames_count)

        #Save weights every 250k frames
        if frames_count % 250000 == 0:
            util.make_sure_path_exists(output_directory + '/' + envo + '/')
            torch.save(
                model.state_dict(), output_directory + envo + '_' +
                model_type + '/weights_' + str(frames_count) + '.pth')

        #Print frame count for every 1000000 (one million) frames:
        if frames_count % 1000000 == 0:
            training_update = 'frame count: ', frames_count, 'episode count: ', episodes_count, 'epsilon: ', epsilon
            print(training_update)
            logging.info(training_update)
Exemplo n.º 5
0
class Agent_DQN(Agent):
    def __init__(self, env, args):
        """
        Initialize every things you need here.
        For example: building your model
        """

        super(Agent_DQN, self).__init__(env)
        self.env = env
        self.args = args
        self.episode = 0
        self.n_actions = self.env.action_space.n
        self.epsilon_start = 1.0
        self.epsilon_final = 0.025
        self.epsilon_decay = 3000
        self.epsilon_by_frame = lambda frame_idx: self.epsilon_final + (
            self.epsilon_start - self.epsilon_final) * math.exp(
                -1. * frame_idx / self.epsilon_decay)
        self.epsilon = 0
        self.eval_net = DQN().cuda()
        self.target_net = DQN().cuda()
        self.target_net.load_state_dict(self.eval_net.state_dict())

        self.criterion = nn.MSELoss()
        #self._model = Net(self.env.observation_space.shape, self.env.action_space.n)
        self._use_cuda = torch.cuda.is_available()
        self.optim = torch.optim.Adam(self.eval_net.parameters(),
                                      lr=self.args.learning_rate)

        if self._use_cuda:
            self.eval_net = self.eval_net.cuda()
            self.target_net = self.target_net.cuda()
            self.criterion = self.criterion.cuda()

#       self.replaybuffer = ReplayBuffer(args.buffer_size)
        self.buffer = deque(maxlen=10000)
        if args.test_dqn:
            #you can load your model here
            print('loading trained model')
            self.eval_net.load_state_dict(torch.load(args.model_dqn))
            self.target_net.load_state_dict(self.eval_net.state_dict())
            if self._use_cuda:
                self.eval_net = self.eval_net.cuda()
                self.target_net = self.target_net.cuda()

        ##################
        # YOUR CODE HERE #
        ##################

    def init_game_setting(self):
        """
        Testing function will call this function at the begining of new game
        Put anything you want to initialize if necessary
        """
        ##################
        # YOUR CODE HERE #
        ##################
        pass

    def push(self, state, action, reward, next_state, done):
        state = np.expand_dims(state, 0)
        next_state = np.expand_dims(next_state, 0)
        self.buffer.append((state, action, reward, next_state, done))

    def replay_buffer(self, batch_size):
        state, action, reward, next_state, done = zip(
            *random.sample(self.buffer, batch_size))
        return np.concatenate(state), action, reward, np.concatenate(
            next_state), done

    def train(self):
        """
        Implement your training algorithm here
        """
        ##################
        # YOUR CODE HERE #
        ##################

        print('begin train...')

        #        if self.args.log_file is not None:
        #        fp_log = open(self.args.log_file, 'w', buffering=1)
        fout = open('dqn_score.log', 'w')
        if os.path.exists('model') == False:
            os.makedirs('model')

        losses = []
        all_rewards = []
        episode_reward = 0
        best_mean_reward = 0
        state = self.env.reset()
        for i_step in range(self.args.max_steps):
            self.epsilon = self.epsilon_by_frame(i_step)
            action = self.make_action(state)
            next_state, reward, done, _ = self.env.step(action)

            self.push(state, action, reward, next_state, done)
            state = next_state
            episode_reward += reward

            if done:
                state = self.env.reset()
                all_rewards.append(episode_reward)
                self.episode += 1
                print('{},{}'.format(self.episode, episode_reward))
                fout.write('Episode{},episode_reward{}\n'.format(
                    self.episode, episode_reward))
                episode_reward = 0

            if len(self.buffer) == self.args.buffer_size:
                if i_step % self.args.eval_net_update_step == 0:
                    loss = self.optimize_model()
                    losses.append(loss)

                if i_step % self.args.target_net_update_step == 0:
                    self.target_net.load_state_dict(self.eval_net.state_dict())

            if i_step % self.args.save_freq == 0:
                mean_reward = \
                    sum(all_rewards[-100:]) / 100
                if best_mean_reward < mean_reward:
                    print('save best model with mean reward = %f' %
                          mean_reward)
                    best_mean_reward = mean_reward
                    torch.save(self.eval_net.state_dict(), self.args.model_dqn)

    def make_action(self, observation, test=True):
        """
        Return predicted action of your agent
        Input:
            observation: np.array
                stack 4 last preprocessed frames, shape: (84, 84, 4)
        Return:
            action: int
                the predicted action from trained model
        """
        ##################
        # YOUR CODE HERE #
        ##################
        observation = torch.cuda.FloatTensor(
            observation.reshape((1, 84, 84, 4))).transpose(1,
                                                           3).transpose(2, 3)
        #        print(type(observation))
        Q_value = self.eval_net.forward(observation).data.cpu().numpy()
        if random.random() > self.epsilon:
            action = np.argmax(Q_value)
        else:
            action = self.env.get_random_action()
        return action

    def optimize_model(self):

        state, action, reward, next_state, done = self.replay_buffer(
            self.args.batch_size)

        state = torch.FloatTensor(np.float32(state)).permute(0, 3, 1, 2)
        next_state = torch.FloatTensor(np.float32(next_state)).permute(
            0, 3, 1, 2)
        action = torch.LongTensor(action)
        reward = torch.FloatTensor(reward)
        done = torch.ByteTensor(done)

        if self._use_cuda:
            state = state.cuda()
            next_state = next_state.cuda()
            action = action.cuda()
            reward = reward.cuda()
            done = done.cuda()

        q_values = self.eval_net(state)

        # next_q_values = self.target_net(next_state).detach()

        q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)

        next_q_values = self.target_net(next_state).detach()
        next_q_value = next_q_values.max(1)[0]

        expected_q_value = reward + self.args.gamma * next_q_value * (1 - done)

        loss = self.criterion(q_value, expected_q_value.data)

        self.optim.zero_grad()
        loss.backward()
        self.optim.step()

        return loss
Exemplo n.º 6
0
class Agent_DQN(Agent):
    def __init__(self, env, args):
        """
        Initialize everything you need here.
        For example:
            paramters for neural network
            initialize Q net and target Q net
            parameters for repaly buffer
            parameters for q-learning; decaying epsilon-greedy
            ...
        """

        super(Agent_DQN, self).__init__(env)
        ###########################
        # YOUR IMPLEMENTATION HERE #
        self.env = env
        self.batch_size = BATCH_SIZE
        self.gamma = 0.999
        self.eps_start = EPS_START
        self.eps_decay = EPS_DECAY
        self.TARGET_UPDATE = TARGET_UPDATE

        self.policy_net = DQN(self.env.action_space.n)
        self.target_net = DQN(self.env.action_space.n)
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.target_net.eval()
        if use_cuda:
            self.policy_net.cuda()
            self.target_net.cuda()

        self.optimizer = optim.RMSprop(self.policy_net.parameters(), lr=1e-5)
        self.memory = deque(maxlen=10000)

        if args.test_dqn:
            # you can load your model here
            print('loading trained model')
            ###########################
            # YOUR IMPLEMENTATION HERE #

    def init_game_setting(self):
        """
        Testing function will call this function at the begining of new game
        Put anything you want to initialize if necessary.
        If no parameters need to be initialized, you can leave it as blank.
        """
        ###########################
        # YOUR IMPLEMENTATION HERE #

        ###########################
        pass

    def make_action(self, observation, test=True):
        """
        Return predicted action of your agent
        Input:
            observation: np.array
                stack 4 last preprocessed frames, shape: (84, 84, 4)
        Return:
            action: int
                the predicted action from trained model
        """
        ###########################
        # YOUR IMPLEMENTATION HERE #
        global steps_done
        self.policy_net.eval()
        sample = random.random()
        eps_threshold = EPS_END + (EPS_START - EPS_END) * \
            math.exp(-1. * steps_done / EPS_DECAY)
        steps_done += 1
        if sample > eps_threshold:
            return self.policy_net(
                Variable(torch.from_numpy(observation),
                         volatile=True).type(FloatTensor)).data.max(1)[1].view(
                             1, 1)
        else:
            return LongTensor([[random.randrange(self.env.action_space.n)]])
        ###########################
        return action

    def push(self, s, a, r, s_, done):
        """ You can add additional arguments as you need.
        Push new data to buffer and remove the old one if the buffer is full.

        Hints:
        -----
            you can consider deque(maxlen = 10000) list
        """
        ###########################
        # YOUR IMPLEMENTATION HERE #
        self.memory.append((s, a, r, s_, done))
        if len(self.memory) > self.maxlen:
            self.replay_memory_store.popleft()
        self.memory_counter += 1

        ###########################

    def replay_buffer(self):
        """ You can add additional arguments as you need.
        Select batch from buffer.
        """
        ###########################
        # YOUR IMPLEMENTATION HERE #
        #print("memory", len(self.memory), self.BATCH_SIZE)
        minibatch = random.sample(self.memory, self.BATCH_SIZE)
        minibatch = np.array(minibatch).transpose(0, 3, 1, 2)
        minibatch = torch.tensor(minibatch / 255.0)
        ###########################
        return minibatch

    def optimize_model(self):
        if len(self.memory) < BATCH_SIZE:
            return
        transitions = self.memory.sample(BATCH_SIZE)
        # Transpose the batch (see http://stackoverflow.com/a/19343/3343043 for
        # detailed explanation).
        batch = Transition(*zip(*transitions))

        # Compute a mask of non-final states and concatenate the batch elements
        non_final_mask = ByteTensor(
            tuple(map(lambda s: s is not None, batch.next_state)))
        non_final_next_states = Variable(torch.cat(
            [s for s in batch.next_state if s is not None]),
                                         volatile=True).cuda()
        state_batch = Variable(torch.cat(batch.state)).cuda()
        action_batch = Variable(torch.cat(batch.action)).cuda()
        reward_batch = Variable(torch.cat(batch.reward)).cuda()

        # Compute Q(s_t, a) - the model computes Q(s_t), then we select the
        # columns of actions taken
        self.policy_net.train()
        state_action_values = self.policy_net(state_batch).gather(
            1, action_batch)

        # Compute V(s_{t+1}) for all next states.
        next_state_values = Variable(
            torch.zeros(BATCH_SIZE).type(Tensor)).cuda()
        next_state_values[non_final_mask] = self.target_net(
            non_final_next_states).max(1)[0]
        # Compute the expected Q values
        expected_state_action_values = (next_state_values *
                                        GAMMA) + reward_batch
        # Undo volatility (which was used to prevent unnecessary gradients)
        expected_state_action_values = Variable(
            expected_state_action_values.data).cuda()

        # Compute Huber loss
        loss = F.smooth_l1_loss(state_action_values,
                                expected_state_action_values)

        # Optimize the model
        self.optimizer.zero_grad()
        loss.backward()
        for param in self.policy_net.parameters():
            param.grad.data.clamp_(-1, 1)
        self.optimizer.step()

    def train(self):
        """
        Implement your training algorithm here
        """
        ###########################
        # YOUR IMPLEMENTATION HERE #
        num_episodes = 1400000
        for i_episode in range(num_episodes):
            # Initialize the environment and state
            observation = self.env.reset()
            observation = observation.transpose((2, 0, 1))
            observation = observation[np.newaxis, :]
            state = observation

            for t in count():
                # Select and perform an action
                action = self.make_action(state)
                next_state, reward, done, _ = self.env.step(action[0, 0])
                next_state = next_state.transpose((2, 0, 1))
                next_state = next_state[np.newaxis, :]
                reward = Tensor([reward])

                # Store the transition in memory
                self.memory.push(torch.from_numpy(state), action,
                                 torch.from_numpy(next_state), reward)

                # Observe new state
                if not done:
                    state = next_state
                else:
                    state = None

                # Perform one step of the optimization (on the target network)
                self.optimize_model()
                if done:
                    print(
                        'resetting env. episode %d \'s reward total was %d.' %
                        (i_episode + 1, t + 1))
                    break
            # Update the target network
            if i_episode % TARGET_UPDATE == 0:
                self.target_net.load_state_dict(self.policy_net.state_dict())
            if i_episode % 50 == 0:
                checkpoint_path = os.path.join('save_dir', 'model-best.pth')
                torch.save(self.policy_net.state_dict(), checkpoint_path)
                print("model saved to {}".format(checkpoint_path))
Exemplo n.º 7
0
def ddqn_rankWeight_train(env, exploreScheduler, betaScheduler, optimizer_constructor, model_type, batch_size, rp_start, rp_size, 
	exp_frame, exp_initial, exp_final, prob_alpha, gamma, target_update_steps, frames_per_epoch, 
	frames_per_state, output_directory, last_checkpoint, max_frames, envo):

	"""
	Implementation of the training algorithm for DDQN using Rank-based prioritization.
	Information with regards to the algorithm can be found in the paper, 
	"Prioritized Experience Replay" by Tom Schaul, John Quan, Ioannis Antonoglou and
	David Silver. Refer to section 3.3 in the paper for more info. 
	"""
	
	gym.undo_logger_setup()
	logging.basicConfig(filename=envo+'_'+'ddqn_rank_weightedLoss_training.log',level=logging.INFO)
	num_actions = env.action_space.n
	env.reset()
	
	print('No. of actions: ', num_actions)
	print(env.unwrapped.get_action_meanings())

	# initialize action value and target network with the same weights
	model = DQN(num_actions)
	target = DQN(num_actions)

	if use_cuda:
		model.cuda()
		target.cuda()

	frames_count = 1

	if last_checkpoint:
		model.load_state_dict(torch.load(last_checkpoint))
		print(last_checkpoint)
		print('weights loaded...')

		#TODO: Implementation of resume
		# exp_replay = util.initialize_rank_replay_resume(env, rp_start, rp_size, frames_per_state, 
		# 	model, target, gamma, batch_size)
		# frames_count = get_index_from_checkpoint_path(last_checkpoint)

	else:
		exp_replay = util.initialize_rank_replay(env, rp_start, rp_size, frames_per_state, 
			model, target, gamma, prob_alpha)

	target.load_state_dict(model.state_dict())

	optimizer = optimizer_constructor.type(model.parameters(), lr=optimizer_constructor.kwargs['lr'],
		alpha=optimizer_constructor.kwargs['alpha'], eps=optimizer_constructor.kwargs['eps'] )

	episodes_count = 1
	frames_per_episode = 1
	epsiodes_durations = []
	rewards_per_episode = 0
	rewards_duration = []
	loss_per_epoch = []
	wLoss_func = Weighted_Loss()

	
	current_state, _, _, _ = util.play_game(env, frames_per_state)
	print('Starting training...')

	for frames_count in range(1, max_frames):

		epsilon=exploreScheduler.anneal_linear(frames_count)
		beta = betaScheduler.anneal_linear(frames_count)
		choice = random.uniform(0,1)

		# epsilon greedy algorithm
		if choice <= epsilon:
			action = LongTensor([[random.randrange(num_actions)]])

		else:
			action = util.get_greedy_action(model, current_state)

		curr_obs, reward, done, _ = util.play_game(env, frames_per_state, action[0][0])

		rewards_per_episode += reward
		reward = Tensor([[reward]])
		current_state_ex = Variable(current_state, volatile=True)
		curr_obs_ex = Variable(curr_obs, volatile=True)
		action_ex = Variable(action, volatile=True)
		reward_ex = Variable(reward, volatile=True)

		#compute td-error for one sample
		td_error = ddqn_compute_td_error(batch_size=1, state_batch=current_state_ex, reward_batch=reward_ex, action_batch=action_ex, 
			next_state_batch=curr_obs_ex, model=model, target=target, gamma=gamma)

		td_error = torch.pow(torch.abs(td_error)+1e-8, prob_alpha)
		exp_replay.push(current_state, action, reward, curr_obs, td_error)
		current_state = curr_obs

		# compute y 
		if len(exp_replay) >= batch_size:
			# Get batch samples
			obs_samples, obs_ranks, obs_priorityVals = exp_replay.sample(batch_size)
			num_samples_per_batch = len(obs_samples)
			obs_priorityTensor = torch.from_numpy(np.array(obs_priorityVals))
			p_batch = 1/ obs_priorityTensor
			w_batch_raw = (1/len(exp_replay) * p_batch)**beta
			max_weight = exp_replay.get_max_weight(beta)
			w_batch = w_batch_raw/max_weight
			w_batch = w_batch.type(Tensor)
			
			batch = Experience(*zip(*obs_samples))

			loss, new_weights = ddqn_compute_y(batch, num_samples_per_batch, model, target, gamma, w_batch, wLoss_func)
			loss_abs = torch.abs(new_weights)
			exp_replay.update(obs_ranks, loss_abs)

			optimizer.zero_grad()
			loss.backward()

			for param in model.parameters():
				param.grad.data.clamp_(-1,1)

			optimizer.step()
			loss_per_epoch.append(loss.data.cpu().numpy()[0])
		
		frames_per_episode+= frames_per_state

		if done:
			rewards_duration.append(rewards_per_episode)
			rewards_per_episode = 0
			frames_per_episode=1
			episodes_count+=1
			env.reset()
			current_state, _, _, _ = util.play_game(env, frames_per_state)

			if episodes_count % 100 == 0:
				avg_episode_reward = sum(rewards_duration)/100.0
				avg_reward_content = 'Episode from', episodes_count-99, ' to ', episodes_count, ' has an average of ', avg_episode_reward, ' reward and loss of ', sum(loss_per_epoch)
				print(avg_reward_content)
				logging.info(avg_reward_content)
				rewards_duration = []
				loss_per_epoch = []

		# update weights of target network for every TARGET_UPDATE_FREQ steps
		if frames_count % target_update_steps == 0:
			target.load_state_dict(model.state_dict())

		# sort memory replay every half of it's capacity iterations 
		if frames_count % int(rp_size/2) == 0:
			exp_replay.sort()


		#Save weights every 250k frames
		if frames_count % 250000 == 0:
			util.make_sure_path_exists(output_directory+'/'+envo+'/')
			torch.save(model.state_dict(), output_directory+'/'+envo+'/rank_weightedLoss_'+ str(frames_count)+'.pth')


		#Print frame count and sort experience replay for every 1000000 (one million) frames:
		if frames_count % 1000000 == 0:
			training_update = 'frame count: ', frames_count, 'episode count: ', episodes_count, 'epsilon: ', epsilon
			print(training_update)
			logging.info(training_update)
Exemplo n.º 8
0
def ddqn_rank_train(env, exploreScheduler, betaScheduler,
                    optimizer_constructor, model_type, batch_size, rp_start,
                    rp_size, exp_frame, exp_initial, exp_final, prob_alpha,
                    gamma, target_update_steps, frames_per_epoch,
                    frames_per_state, output_directory, last_checkpoint,
                    max_frames, envo):
    """
	Implementation of the training algorithm for DDQN using Rank-based prioritization.
	Information with regards to the algorithm can be found in the paper, 
	"Prioritized Experience Replay" by Tom Schaul, John Quan, Ioannis Antonoglou and
	David Silver. Refer to section 3.3 in the paper for more info. 
	"""

    gym.undo_logger_setup()
    logging.basicConfig(filename=envo + '_' +
                        'ddqn_rank_weighted_training.log',
                        level=logging.INFO)
    num_actions = env.action_space.n
    env.reset()

    print('No. of actions: ', num_actions)
    print(env.unwrapped.get_action_meanings())

    # initialize action value and target network with the same weights
    model = DQN(num_actions)
    target = DQN(num_actions)

    if use_cuda:
        model.cuda()
        target.cuda()

    frames_count = 1

    if last_checkpoint:
        model.load_state_dict(torch.load(last_checkpoint))
        print(last_checkpoint)
        print('weights loaded...')

        #TODO: Implementation of resume
        # exp_replay = util.initialize_rank_replay_resume(env, rp_start, rp_size, frames_per_state,
        # 	model, target, gamma, batch_size)
        # frames_count = get_index_from_checkpoint_path(last_checkpoint)

    else:
        exp_replay = util.initialize_rank_replay(env, rp_start, rp_size,
                                                 frames_per_state, model,
                                                 target, gamma, prob_alpha)

    target.load_state_dict(model.state_dict())

    optimizer = optimizer_constructor.type(
        model.parameters(),
        lr=optimizer_constructor.kwargs['lr'],
        alpha=optimizer_constructor.kwargs['alpha'],
        eps=optimizer_constructor.kwargs['eps'])

    episodes_count = 1
    epsiodes_durations = []
    rewards_per_episode = 0
    rewards_duration = []
    loss_per_epoch = []
    current_state, _, _, _ = util.play_game(env, frames_per_state)
    wLoss_func = Weighted_Loss()

    print('Starting training...')

    for frames_count in range(1, max_frames):

        epsilon = exploreScheduler.anneal_linear(frames_count)
        beta = betaScheduler.anneal_linear(frames_count)
        choice = random.uniform(0, 1)

        # epsilon greedy algorithm
        if choice <= epsilon:
            action = LongTensor([[random.randrange(num_actions)]])

        else:
            action = util.get_greedy_action(model, current_state)

        curr_obs, reward, done, _ = util.play_game(env, frames_per_state,
                                                   action[0][0])

        rewards_per_episode += reward
        reward = Tensor([[reward]])
        td_error = 1

        temp_exp = Experience(current_state, action, reward, curr_obs,
                              td_error)
        current_state = curr_obs

        # compute y
        if len(exp_replay) >= batch_size:
            # Get batch samples

            # start = time.time()

            if frames_count % rp_size == 0:
                obs_samples, obs_priorityVals = exp_replay.sample(batch_size -
                                                                  1,
                                                                  prob_alpha,
                                                                  sort=True)
            else:
                obs_samples, obs_priorityVals = exp_replay.sample(batch_size -
                                                                  1,
                                                                  prob_alpha,
                                                                  sort=False)

            obs_samples.append(temp_exp)
            obs_priorityVals.append(td_error)

            obs_pVals_tensor = torch.from_numpy(np.array(obs_priorityVals))
            # print("P(i): ", obs_pVals_tensor)
            IS_weights = torch.pow((obs_pVals_tensor * rp_size), -beta)
            max_weight = torch.max(IS_weights)

            IS_weights_norm = torch.div(IS_weights, max_weight).type(Tensor)
            IS_weights_norm[-1] = torch.max(IS_weights_norm)

            # print("Norm W(i): ", IS_weights_norm)

            batch = Experience(*zip(*obs_samples))
            loss, new_weights = ddqn_compute_y(batch, batch_size, model,
                                               target, gamma, IS_weights_norm,
                                               wLoss_func)
            new_weights = torch.pow(new_weights, prob_alpha)
            new_exp = Experience(temp_exp.state, temp_exp.action,
                                 temp_exp.reward, temp_exp.next_state,
                                 new_weights[batch_size - 1])
            exp_replay.update(obs_samples, new_weights, new_exp)
            optimizer.zero_grad()
            loss.backward()
            # print("loss: ", loss.data)
            optimizer.step()
            loss_per_epoch.append(loss.data.cpu().numpy()[0])

        else:
            exp_replay.push(new_exp.state, new_exp.action, new_exp.reward,
                            new_exp.next_state, td_error)

        # end = time.time()

        # duration = end-start

        # print('duration : ', duration)

        if done:
            # print('Game: ', rewards_per_episode)
            rewards_duration.append(rewards_per_episode)
            rewards_per_episode = 0
            episodes_count += 1
            env.reset()
            current_state, _, _, _ = util.play_game(env, frames_per_state)

            if episodes_count % 100 == 0:
                avg_episode_reward = sum(rewards_duration) / 100.0
                avg_reward_content = 'Episode from', episodes_count - 99, ' to ', episodes_count, ' has an average of ', avg_episode_reward, ' reward and loss of ', sum(
                    loss_per_epoch)
                print(avg_reward_content)
                logging.info(avg_reward_content)
                rewards_duration = []
                loss_per_epoch = []

        # update weights of target network for every TARGET_UPDATE_FREQ steps
        if frames_count % target_update_steps == 0:
            target.load_state_dict(model.state_dict())

        #Save weights every 250k frames
        if frames_count % 250000 == 0:
            util.make_sure_path_exists(output_directory + '/' + envo + '/')
            torch.save(
                model.state_dict(), output_directory + '/' + envo +
                '/rank_uniform' + str(frames_count) + '.pth')

        #Print frame count and sort experience replay for every 1000000 (one million) frames:
        if frames_count % 1000000 == 0:
            training_update = 'frame count: ', frames_count, 'episode count: ', episodes_count, 'epsilon: ', epsilon
            print(training_update)
            logging.info(training_update)
Exemplo n.º 9
0
class Agent_DQN(Agent):
    def __init__(self, env, args):
        """
        Initialize everything you need here.
        For example: 
            paramters for neural network  
            initialize Q net and target Q net
            parameters for repaly buffer
            parameters for q-learning; decaying epsilon-greedy
            ...
        """

        super(Agent_DQN,self).__init__(env)
        ###########################
        # YOUR IMPLEMENTATION HERE #
        self.epsilon_start = 1
        self.epsilon_end = 0.02
        self.epsilon_decay = 200000
        self.epsilon = self.epsilon_start
        
        self.gamma = 0.99
        self.env = env
        
        self.buffer_size = 30000
        self.buffer = deque(maxlen=30000)
        
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.policy_net = DQN().to(self.device)
        self.target_net = DQN().to(self.device)
        self.target_net.load_state_dict(self.policy_net.state_dict())
        
        self.optimizer = torch.optim.Adam(self.policy_net.parameters(),lr=0.00015)
        self.reward_array = []
        self.reward_x_axis = []
        self.batch_size = 32
        if args.test_dqn:
            #you can load your model here
            print('loading trained model')
            ###########################
            # YOUR IMPLEMENTATION HERE #
            

    def init_game_setting(self):
        """
        Testing function will call this function at the begining of new game
        Put anything you want to initialize if necessary.
        If no parameters need to be initialized, you can leave it as blank.
        """
        ###########################
        # YOUR IMPLEMENTATION HERE #
        self.policy_net.load_state_dict(torch.load('policy_model'))
        self.target_net.load_state_dict(self.policy_net.state_dict()) 
        self.policy_net = self.policy_net.cuda()
        self.target_net = self.target_net.cuda()
        ###########################
        pass
    
    
    def make_action(self, observation, test=True):
        """
        Return predicted action of your agent
        Input:
            observation: np.array
                stack 4 last preprocessed frames, shape: (84, 84, 4)
        Return:
            action: int
                the predicted action from trained model
        """
        ###########################
        # YOUR IMPLEMENTATION HERE #
        if test==True:
            self.epsilon = 0
        observation=torch.cuda.FloatTensor(observation.reshape((1,84,84,4))).transpose(1,3).transpose(2,3)
        q = self.policy_net(observation).data.cpu().numpy()
        if random.random() > self.epsilon:
           action  = np.argmax(q)
        else:
            action = random.randint(0,4)
        return action
    
    def push(self,data):
        """ You can add additional arguments as you need. 
        Push new data to buffer and remove the old one if the buffer is full.
        
        Hints:
        -----
            you can consider deque(maxlen = 10000) list
        """
        ###########################
        # YOUR IMPLEMENTATION HERE #
        self.buffer.append(data)
        ###########################
        
        
    def replay_buffer(self,batch_size):
        """ You can add additional arguments as you need.
        Select batch from buffer.
        """
        ###########################
        # YOUR IMPLEMENTATION HERE #      
        ###########################
        return random.sample(self.buffer,batch_size)
        
    def play_game(self,start_state):
        action = self.make_action(start_state)
        n_s,r,terminal,_ = self.env.step(action)
        self.push((start_state,action,r,n_s,terminal))
        return n_s,r,terminal
    
    def loss_function(self):
        data = self.replay_buffer(self.batch_size)
        s,a,r,n_s,terminal = zip(*data)
        s = torch.FloatTensor(np.float32(s)).permute(0,3,1,2).to(self.device)
        a = torch.LongTensor(a).to(self.device)
        r = torch.FloatTensor(r).to(self.device)
        n_s = torch.FloatTensor(np.float32(n_s)).permute(0,3,1,2).to(self.device).to(self.device)
        terminal = torch.FloatTensor(terminal).to(self.device)
        q = self.policy_net(s).gather(1,a.unsqueeze(1)).squeeze(1)
        n_q = self.target_net(n_s).detach().max(1)[0]
        expected_q = r + self.gamma * n_q * (1 - terminal)
        loss = F.smooth_l1_loss(q, expected_q.data)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        
        
    def train(self):
        """
        Implement your training algorithm here
        """
        ###########################
        # YOUR IMPLEMENTATION HERE #
        rewards_array = []
        reward_ = 0
        best_mean = 0
        print_rate = 100
        last_saved = None
        start_state = self.env.reset()
        for frames in range (3500000):
            self.epsilon = self.epsilon_end + (self.epsilon_start - self.epsilon_end) * math.exp(-1. *frames / self.epsilon_decay)
            n_s,r,terminal = self.play_game(start_state)
            start_state = n_s
            reward_ += r
            if terminal:
                start_state = self.env.reset()
                rewards_array.append(reward_)
                if len(rewards_array) % print_rate==0:
                    print('%%%%%%%%%%%%%%%%%%%%%%%%%')
                    print('Frames = ', frames)
                    print('Current Epsilon = ', self.epsilon)
                    print('Episode = ', len(rewards_array))
                    print('Reward = ', np.mean(rewards_array[-100:]))#sum(rewards_array[-100:]) / 100)
                    print('Buffer Length = ', len(self.buffer))
                    self.reward_array.append(np.mean(rewards_array[-100:]))
                    self.reward_x_axis.append(len(rewards_array))
                    self.print_graph()
                    if last_saved != None:
                        print("last saved = ", best_mean)
                    print('%%%%%%%%%%%%%%%%%%%%%%%%%')
                reward_ = 0
                
            if len(self.buffer)<10000:
                continue   
            if len(self.buffer) > 10000 and frames % 4 ==0:
                    self.loss_function()
                
            if frames % 1000 == 0:
                    print("Target net updated")
                    self.target_net.load_state_dict(self.policy_net.state_dict())
                    
            mean_reward = np.mean(rewards_array[-100:])
            if mean_reward > best_mean and frames % 100==0:
                    print("Saving model with reward = ", mean_reward)
                    best_mean = mean_reward
                    last_saved = mean_reward
                    torch.save(self.policy_net.state_dict(), 'policy_model_')
        ###########################
    
    def print_graph(self):
        fig = plt.figure()
        ax = plt.subplot(111)
        ax.plot(self.reward_x_axis,self.reward_array,label='$y = Rewards, $x = episodes')
        ax.legend()
        fig.savefig('plot.png')
Exemplo n.º 10
0
def ddqn_rankBatch_train(env, scheduler, optimizer_constructor, model_type,
                         batch_size, rp_start, rp_size, exp_frame, exp_initial,
                         exp_final, inital_beta, gamma, target_update_steps,
                         frames_per_epoch, frames_per_state, output_directory,
                         last_checkpoint):
    """
	Implementation of the training algorithm for DDQN using Rank-based prioritization.
	Information with regards to the algorithm can be found in the paper, 
	"Prioritized Experience Replay" by Tom Schaul, John Quan, Ioannis Antonoglou and
	David Silver. Refer to section 3.3 in the paper for more info. 
	"""

    gym.undo_logger_setup()
    logging.basicConfig(filename='ddqn_rank_training.log', level=logging.INFO)
    num_actions = env.action_space.n
    env.reset()

    print('No. of actions: ', num_actions)
    print(env.unwrapped.get_action_meanings())

    # initialize action value and target network with the same weights
    model = DQN(num_actions, use_bn=False)
    target = DQN(num_actions, use_bn=False)

    if use_cuda:
        model.cuda()
        target.cuda()

    frames_count = 1

    if last_checkpoint:
        model.load_state_dict(torch.load(last_checkpoint))
        print(last_checkpoint)
        print('weights loaded...')

        exp_replay = util.initialize_rank_replay_resume(
            env, rp_start, rp_size, frames_per_state, model, target, gamma,
            batch_size)
        frames_count = get_index_from_checkpoint_path(last_checkpoint)

    else:
        exp_replay = util.initialize_rank_replay(env, rp_start, rp_size,
                                                 frames_per_state, model,
                                                 target, gamma)

    target.load_state_dict(model.state_dict())

    optimizer = optimizer_constructor.type(
        model.parameters(),
        lr=optimizer_constructor.kwargs['lr'],
        alpha=optimizer_constructor.kwargs['alpha'],
        eps=optimizer_constructor.kwargs['eps'])

    episodes_count = 1
    frames_per_episode = 1
    epsiodes_durations = []
    rewards_per_episode = 0
    rewards_duration = []
    loss_per_epoch = []

    current_state, _, _, _ = util.play_game(env, frames_per_state)
    print('Starting training...')

    count = 0

    while True:

        epsilon = scheduler.anneal_linear(frames_count)
        choice = random.uniform(0, 1)

        # epsilon greedy algorithm
        if choice <= epsilon:
            action = LongTensor([[random.randrange(num_actions)]])

        else:
            action = util.get_greedy_action(model, current_state)

        curr_obs, reward, done, _ = util.play_game(env, frames_per_state,
                                                   action[0][0])

        rewards_per_episode += reward
        reward = Tensor([[reward]])
        current_state_ex = Variable(current_state, volatile=True)
        curr_obs_ex = Variable(curr_obs, volatile=True)
        action_ex = Variable(action, volatile=True)
        reward_ex = Variable(reward, volatile=True)

        #compute td-error for one sample
        td_error = ddqn_compute_td_error(batch_size=1,
                                         state_batch=current_state_ex,
                                         reward_batch=reward_ex,
                                         action_batch=action_ex,
                                         next_state_batch=curr_obs_ex,
                                         model=model,
                                         target=target,
                                         gamma=gamma)

        td_error = torch.abs(td_error)
        exp_replay.push(current_state_ex, action_ex, reward_ex, curr_obs_ex,
                        td_error)
        current_state = curr_obs

        # compute y
        if len(exp_replay) >= batch_size:
            # Get batch samples
            obs_samples, obs_ranks, obs_priorityVals = exp_replay.sample(
                batch_size)
            obs_priorityTensor = torch.from_numpy(np.array(obs_priorityVals))
            p_batch = 1 / obs_priorityTensor
            w_batch = (1 / len(exp_replay) * p_batch)**inital_beta
            max_weight = exp_replay.get_max_weight(inital_beta)
            params_grad = []

            for i in range(len(obs_samples)):
                sample = obs_samples[i]
                sample.state.volatile = False
                sample.next_state.volatile = False
                sample.reward.volatile = False
                sample.action.volatile = False
                loss = ddqn_compute_y(batch_size=1,
                                      state_batch=sample.state,
                                      reward_batch=sample.reward,
                                      action_batch=sample.action,
                                      next_state_batch=sample.next_state,
                                      model=model,
                                      target=target,
                                      gamma=gamma)
                loss_abs = torch.abs(loss)
                exp_replay.update(obs_ranks[i], loss_abs)

                for param in model.parameters():
                    if param.grad is not None:
                        param.grad.data.zero_()

                loss.backward()

                #accumulate weight change
                if i == 0:
                    for param in model.parameters():
                        tmp = ((w_batch[i] / max_weight) *
                               loss.data[0]) * param.grad.data
                        params_grad.append(tmp)

                else:
                    paramIndex = 0
                    for param in model.parameters():
                        tmp = ((w_batch[i] / max_weight) *
                               loss.data[0]) * param.grad.data
                        params_grad[paramIndex] = tmp + params_grad[paramIndex]
                        paramIndex += 1

            # update weights
            paramIndex = 0
            for param in model.parameters():
                param.data += params_grad[paramIndex].mul(
                    optimizer_constructor.kwargs['lr']).type(Tensor)
                paramIndex += 1

        frames_count += 1
        frames_per_episode += frames_per_state

        if done:
            rewards_duration.append(rewards_per_episode)
            rewards_per_episode = 0
            frames_per_episode = 1
            episodes_count += 1
            env.reset()
            current_state, _, _, _ = util.play_game(env, frames_per_state)

            if episodes_count % 100 == 0:
                avg_episode_reward = sum(rewards_duration) / 100.0
                avg_reward_content = 'Episode from', episodes_count - 99, ' to ', episodes_count, ' has an average of ', avg_episode_reward, ' reward and loss of ', sum(
                    loss_per_epoch)
                print(avg_reward_content)
                logging.info(avg_reward_content)
                rewards_duration = []
                loss_per_epoch = []

        # update weights of target network for every TARGET_UPDATE_FREQ steps
        if frames_count % target_update_steps == 0:
            target.load_state_dict(model.state_dict())
            # print('weights updated at frame no. ', frames_count)

        #Save weights every 250k frames
        if frames_count % 250000 == 0:
            util.make_sure_path_exists(output_directory + model_type + '/')
            torch.save(model.state_dict(),
                       'rank_weights_' + str(frames_count) + '.pth')

        #Print frame count and sort experience replay for every 1000000 (one million) frames:
        if frames_count % 1000000 == 0:
            training_update = 'frame count: ', frames_count, 'episode count: ', episodes_count, 'epsilon: ', epsilon
            print(training_update)
            logging.info(training_update)
            exp_replay.sort()