def __init__(self, state_size, action_size, seed, gamma=0.99, tau=1e-3, lr=5e-4, buffer_size=int(1e5), batch_size=64, update_every=4): """Initialize an Agent object. Params ====== state_size (int): dimension of each state action_size (int): dimension of each action seed (int): random seed gamma: discount factor tau: for soft update of target parameters lr: learning rate buffer_size: replay buffer size batch_size: minibatch size update_every: how often to update the network """ self.state_size = state_size self.action_size = action_size self.seed = random.seed(seed) self.gamma = gamma self.tau = tau self.lr = lr self.buffer_size = buffer_size self.batch_size = batch_size self.update_every = update_every # Q-Network self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device) self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device) self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=self.lr) # Replay memory self.memory = ReplayBuffer(action_size, self.buffer_size, self.batch_size, seed) # Initialize time step (for updating every UPDATE_EVERY steps) self.t_step = 0
def __init__(self, state_size, action_size, random_seed): """Initialize an Agent object. Params ====== state_size (int): dimension of each state action_size (int): dimension of each action random_seed (int): random seed """ self.state_size = state_size self.action_size = action_size self.seed = random.seed(random_seed) # Q-Network self.qnetwork_local = QNetwork(state_size, action_size, random_seed).to(device) self.qnetwork_target = QNetwork(state_size, action_size, random_seed).to(device) self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR) # Replay memory self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed) # Initialize time step (for updating every UPDATE_EVERY steps) self.t_step = 0
def __init__(self, state_size, action_size, seed, action_dim=False, qnet='DQN'): """Initialize an Agent object. Params ====== state_size (int): dimension of each state action_size (int): dimension of each action seed (int): random seed """ self.state_size = state_size self.action_size = action_size self.seed = random.seed(seed) self.action_dim = action_dim # Q-Network self.qnet = qnet if qnet == 'DQN': self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device) self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device) elif qnet == 'DQN-BN': self.qnetwork_local = QNetworkBN(state_size, action_size, seed).to(device) self.qnetwork_target = QNetworkBN(state_size, action_size, seed).to(device) elif qnet == 'DQN-LN': self.qnetwork_local = QNetworkLN(state_size, action_size, seed).to(device) self.qnetwork_target = QNetworkLN(state_size, action_size, seed).to(device) elif qnet == 'DQN-WN': self.qnetwork_local = QNetworkWN(state_size, action_size, seed).to(device) self.qnetwork_target = QNetworkWN(state_size, action_size, seed).to(device) elif qnet == 'DQN-RN': self.qnetwork_local = QNetworkRN(state_size, action_size, seed).to(device) self.qnetwork_target = QNetworkRN(state_size, action_size, seed).to(device) elif qnet == 'DQN-RLN': self.qnetwork_local = QNetworkRLN(state_size, action_size, seed).to(device) self.qnetwork_target = QNetworkRLN(state_size, action_size, seed).to(device) elif qnet == 'DQN-RNLN': self.qnetwork_local = QNetworkRNLN(state_size, action_size, seed).to(device) self.qnetwork_target = QNetworkRNLN(state_size, action_size, seed).to(device) elif qnet == 'DQN-RN-UAM': self.qnetwork_local = QNetworkRN_UAM(state_size, action_size, seed).to(device) self.qnetwork_target = QNetworkRN_UAM(state_size, action_size, seed).to(device) elif qnet == 'DQN-UAM': self.qnetwork_local = QNetwork_UAM(state_size, action_size, seed).to(device) self.qnetwork_target = QNetwork_UAM(state_size, action_size, seed).to(device) # self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device) self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR) self.x_history_stack_local=None self.COMP_stack_local=None self.x_history_stack_target=None self.COMP_stack_target=None # Replay memory self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed) # Initialize time step (for updating every UPDATE_EVERY steps) self.t_step = 0
def __init__(self, state_size, action_size, brain_name, seed, params=default_params, verbose=False, device=None): """Initialize an Agent object. Params ====== state_size (int): dimension of each state action_size (int): dimension of each action seed (int): random seed """ params = self._fill_params(params) # implementation details and identity self.device = device if device is not None else torch.device( "cuda:0" if torch.cuda.is_available() else "cpu") self.name = params['name'] self.brain_name = brain_name # set environment information self.state_size = state_size self.action_size = action_size self.seed = random.seed(seed) # Q-Network self.qnetwork_local = QNetwork(state_size, action_size, seed, layers=params['layers']).to(self.device) self.qnetwork_target = QNetwork(state_size, action_size, seed, layers=params['layers']).to( self.device) self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=params['learning_rate']) # Replay memory self.memory = ReplayBuffer(action_size, params['buffer_size'], params['batch_size'], seed) # Initialize time steps, t_step for updates, c_step for copying weights self.t_step = 0 self.c_step = 0 # store params for later self.params = params
class Playing_Agent(object): """An agent who chooses an action from experience """ def __init__(self): self.qnetwork = QNetwork(state_size=37, action_size=4, seed=0) self.qnetwork.load_state_dict(torch.load('checkpoint.pth')) self.qnetwork.eval() def act(self, state): """Return experienced action given state""" state = torch.from_numpy(state).float().unsqueeze(0).to('cpu') with torch.no_grad(): q = self.qnetwork(state) # action values return q.argmax().item()
class Agent(): """Interacts with and learns from the environment. Agent is made of several functions: init : create an agent step : save experience in memory and decide if it is time to learn act : decide the most relevant action in function of the environment state and current knowledge learn : learn for past experiences Use the soft_update function to update the target Qnetwork """ def __init__(self, state_size, action_size, seed): """Initialize an Agent object. Params ====== state_size (int): dimension of each state action_size (int): dimension of each action seed (int): random seed """ self.state_size = state_size self.action_size = action_size self.seed = random.seed(seed) # Q-Network self.qnetwork_local = QNetwork(state_size, action_size, seed).to(DEVICE) self.qnetwork_target = QNetwork(state_size, action_size, seed).to(DEVICE) self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR) # Replay memory self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed) # Initialize time step (for updating every UPDATE_EVERY steps) self.t_step = 0 def step(self, state, action, reward, next_state, done): """Function triggering the learning phase if enough exploration has been done At each step, record the experience in the memory After collecting experiences, learn from them Params ====== experience = state, action, reward, next_state, done """ # Save experience in replay memory self.memory.add(state, action, reward, next_state, done) # Learn every UPDATE_EVERY time steps. self.t_step = (self.t_step + 1) % UPDATE_EVERY if self.t_step == 0: # If enough samples are available in memory, get random subset and learn if len(self.memory) > BATCH_SIZE: experiences = self.memory.sample() self.learn(experiences, GAMMA) def act(self, state, eps=0.): """Returns actions for given state as per current policy. Params ====== state (array_like): current state eps (float): epsilon, for epsilon-greedy action selection """ state = torch.from_numpy(state).float().unsqueeze(0).to(DEVICE) # switch network into evaluation mode when not training self.qnetwork_local.eval() # deactivate gradient tracking as we do not to back propagate with torch.no_grad(): action_values = self.qnetwork_local(state) # switch network into training mode self.qnetwork_local.train() # Epsilon-greedy action selection if random.random() > eps: return np.argmax(action_values.cpu().data.numpy()) else: return random.choice(np.arange(self.action_size)) def learn(self, experiences, gamma): """Update value parameters using given batch of experience tuples. Params ====== experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples gamma (float): discount factor """ states, actions, rewards, next_states, dones = experiences # Get max predicted Q values (for next states) from target model Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1) # Compute Q targets for current states Q_targets = rewards + (gamma * Q_targets_next * (1 - dones)) # Get expected Q values from local model Q_expected = self.qnetwork_local(states).gather(1, actions) # Compute loss using mean square error loss = F.mse_loss(Q_expected, Q_targets) # Minimize the loss self.optimizer.zero_grad() # update network weights loss.backward() self.optimizer.step() # ------------------- update target network ------------------- # self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU) def soft_update(self, local_model, target_model, tau): """Soft update model parameters. θ_target = τ*θ_local + (1 - τ)*θ_target Params ====== local_model (PyTorch model): weights will be copied from target_model (PyTorch model): weights will be copied to tau (float): interpolation parameter """ for target_param, local_param in zip(target_model.parameters(), local_model.parameters()): target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class Agent(): """Interacts with and learns from the environment.""" def __init__(self, state_size, action_size, seed): """Initialize an Agent object. Params ====== state_size (int): dimension of each state action_size (int): dimension of each action seed (int): random seed """ self.state_size = state_size self.action_size = action_size self.seed = seed # Q-Network self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device) self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device) self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR) # Replay memory self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed) # Initialize time step (for updating every UPDATE_EVERY steps) self.t_step = 0 def step(self, state, action, reward, next_state, done): # Save experience in replay memory self.memory.add(state, action, reward, next_state, done) # Learn every UPDATE_EVERY time steps. self.t_step = (self.t_step + 1) % UPDATE_EVERY if self.t_step == 0: # If enough samples are available in memory, get random subset and learn if len(self.memory) > BATCH_SIZE: experiences = self.memory.sample() self.learn(experiences, GAMMA) def act(self, state, eps=0.): """Returns actions for given state as per current policy. Params ====== state (array_like): current state eps (float): epsilon, for epsilon-greedy action selection """ state = torch.from_numpy(state).float().unsqueeze(0).to(device) self.qnetwork_local.eval() with torch.no_grad(): action_values = self.qnetwork_local(state) self.qnetwork_local.train() # Epsilon-greedy action selection if random.random() > eps: return np.argmax(action_values.cpu().data.numpy()) else: return random.choice(np.arange(self.action_size)) def learn(self, experiences, gamma): """Update value parameters using given batch of experience tuples. Params ====== experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples gamma (float): discount factor """ states, actions, rewards, next_states, dones = experiences # get max predicted Q values (for next states) from target model Q_targets_next = self.qnetwork_target(next_states).detach().max( 1)[0].unsqueeze(1) # compute Q targets for current states Q_targets = rewards + (gamma * Q_targets_next * (1 - dones)) # Get expected Q values from local model Q_expected = self.qnetwork_local(states).gather(1, actions) # Compute loss loss = F.mse_loss(Q_expected, Q_targets) # Minimize the loss self.optimizer.zero_grad() loss.backward() self.optimizer.step() # ------------------- update target network ------------------- # self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU) def soft_update(self, local_model, target_model, tau): """Soft update model parameters. θ_target = τ*θ_local + (1 - τ)*θ_target Params ====== local_model (PyTorch model): weights will be copied from target_model (PyTorch model): weights will be copied to tau (float): interpolation parameter """ for target_param, local_param in zip(target_model.parameters(), local_model.parameters()): target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
def __init__(self): self.qnetwork = QNetwork(state_size=37, action_size=4, seed=0) self.qnetwork.load_state_dict(torch.load('checkpoint.pth')) self.qnetwork.eval()
class DQN_Agent(): """Interacts with and learns from the environment.""" def __init__(self, state_size, action_size, brain_name, seed, params=default_params, verbose=False, device=None): """Initialize an Agent object. Params ====== state_size (int): dimension of each state action_size (int): dimension of each action seed (int): random seed """ params = self._fill_params(params) # implementation details and identity self.device = device if device is not None else torch.device( "cuda:0" if torch.cuda.is_available() else "cpu") self.name = params['name'] self.brain_name = brain_name # set environment information self.state_size = state_size self.action_size = action_size self.seed = random.seed(seed) # Q-Network self.qnetwork_local = QNetwork(state_size, action_size, seed, layers=params['layers']).to(self.device) self.qnetwork_target = QNetwork(state_size, action_size, seed, layers=params['layers']).to( self.device) self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=params['learning_rate']) # Replay memory self.memory = ReplayBuffer(action_size, params['buffer_size'], params['batch_size'], seed) # Initialize time steps, t_step for updates, c_step for copying weights self.t_step = 0 self.c_step = 0 # store params for later self.params = params def _fill_params(self, src_params): params = { 'name': self._get_param_or_default('name', src_params, default_params), 'layers': self._get_param_or_default('layers', src_params, default_params), 'buffer_size': self._get_param_or_default('buffer_size', src_params, default_params), 'batch_size': self._get_param_or_default('batch_size', src_params, default_params), 'update_every': self._get_param_or_default('update_every', src_params, default_params), 'copy_every': self._get_param_or_default('copy_every', src_params, default_params), 'learning_rate': self._get_param_or_default('learning_rate', src_params, default_params), 'gamma': self._get_param_or_default('gamma', src_params, default_params), 'tau': self._get_param_or_default('tau', src_params, default_params) } return params def _get_param_or_default(self, key, src_params, default_params): if key in src_params: return src_params[key] else: return default_params[key] def display_params(self, force_print=False): p = '{}: h{}, exp[{}, {}], u,c[{}, {}], g,t,lr[{}, {}, {}]'.format( self.params['name'], self.params['layers'], self.params['buffer_size'], self.params['batch_size'], self.params['update_every'], self.params['copy_every'], self.params['gamma'], self.params['tau'], self.params['learning_rate']) if force_print: print(p) return p def step(self, state, action, reward, next_state, done): # Save experience in replay memory self.memory.add(state, action, reward, next_state, done) # update timesteps self.t_step = (self.t_step + 1) % self.params['update_every'] self.c_step = (self.c_step + 1) % self.params['copy_every'] # only update every params.UpdateEvery timesteps if self.t_step == 0: if len(self.memory) >= self.params['batch_size']: # get random subset and learn experiences = self.memory.sample() self.learn(experiences, self.params['gamma']) def act(self, state, eps=0.): """Returns actions for given state as per current policy. Params ====== state (array_like): current state eps (float): epsilon, for epsilon-greedy action selection """ # get action values for state state = torch.from_numpy(state).float().unsqueeze(0).to(self.device) self.qnetwork_local.eval() with torch.no_grad(): action_values = self.qnetwork_local(state) self.qnetwork_local.train() # Epsilon-greedy action selection if random.random() > eps: return np.argmax(action_values.cpu().data.numpy()) else: return random.choice(np.arange(self.action_size)) def learn(self, experiences, gamma): """Update value parameters using given batch of experience tuples. Params ====== experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples gamma (float): discount factor """ states, actions, rewards, next_states, dones = experiences # Get max predicted Q values (for next states) from target model Q_targets_next = self.qnetwork_target(next_states).detach().max( 1)[0].unsqueeze(1) # Compute Q targets for current states Q_targets = rewards + (gamma * Q_targets_next * (1 - dones)) # Get expected Q values from local model for evaluation Q_expected = self.qnetwork_local(states).gather(1, actions) # Minimize the loss loss = F.mse_loss(Q_expected, Q_targets) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # update the weights in the target network every c_steps if self.c_step == 0: self.soft_update(self.qnetwork_local, self.qnetwork_target, self.params['tau']) def soft_update(self, local_model, target_model, tau): """Soft update model parameters. θ_target = τ*θ_local + (1 - τ)*θ_target Params ====== local_model (PyTorch model): weights will be copied from target_model (PyTorch model): weights will be copied to tau (float): interpolation parameter (0 = all target, 1 = all local) """ for target_param, local_param in zip(target_model.parameters(), local_model.parameters()): target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
class Agent(): """Interacts with and learns from the environment.""" def __init__(self, state_size, action_size, random_seed): """Initialize an Agent object. Params ====== state_size (int): dimension of each state action_size (int): dimension of each action random_seed (int): random seed """ self.state_size = state_size self.action_size = action_size self.seed = random.seed(random_seed) # Q-Network self.qnetwork_local = QNetwork(state_size, action_size, random_seed).to(device) self.qnetwork_target = QNetwork(state_size, action_size, random_seed).to(device) self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR) # Replay memory self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed) # Initialize time step (for updating every UPDATE_EVERY steps) self.t_step = 0 def step(self, state, action, reward, next_state, done): # Save experience in replay memory self.memory.add(state, action, reward, next_state, done) # Learn every UPDATE_EVERY time steps. self.t_step = (self.t_step + 1) % UPDATE_EVERY if self.t_step == 0: # If enough samples are available in memory, get random subset and learn if len(self.memory) > BATCH_SIZE: experiences = self.memory.sample() self.learn(experiences, GAMMA) def act(self, state, eps=0.): """Returns actions for given state as per current policy. Params ====== state (array_like): current state eps (float): epsilon, for epsilon-greedy action selection """ state = torch.from_numpy(state).float().unsqueeze(0).to(device) self.qnetwork_local.eval() with torch.no_grad(): action_values = self.qnetwork_local(state) self.qnetwork_local.train() # Epsilon-greedy action selection if random.random() > eps: return np.argmax(action_values.cpu().data.numpy()) else: return random.choice(np.arange(self.action_size)) def learn(self, experiences, gamma): """Update value parameters using given batch of experience tuples. Params ====== experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples gamma (float): discount factor """ states, actions, rewards, next_states, dones = experiences # Get max predicted Q values (for next states) from target model Q_targets_next = self.qnetwork_target(next_states).detach().max( 1)[0].unsqueeze(1) # Compute Q targets for current states Q_targets = rewards + (gamma * Q_targets_next * (1 - dones)) # Get expected Q values from local model Q_expected = self.qnetwork_local(states).gather(1, actions) # Compute loss loss = F.mse_loss(Q_expected, Q_targets) # Minimize the loss self.optimizer.zero_grad() loss.backward() self.optimizer.step() # ------------------- update target network ------------------- # self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU) def soft_update(self, local_model, target_model, tau): """Soft update model parameters. θ_target = τ*θ_local + (1 - τ)*θ_target Params ====== local_model (PyTorch model): weights will be copied from target_model (PyTorch model): weights will be copied to tau (float): interpolation parameter """ for target_param, local_param in zip(target_model.parameters(), local_model.parameters()): target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data) def train(self, env, n_episodes=1000, max_t=1000): scores_deque = deque(maxlen=100) scores = [] eps = 1.0 for i_episode in range(1, n_episodes + 1): state = env.reset() score = 0 for t in range(max_t): action = self.act(state, eps) next_state, reward, done, _ = env.step(action) self.step(state, action, reward, next_state, done) state = next_state score += reward if eps > EPS_MIN: eps *= EPS_DECAY if done: break scores_deque.append(score) scores.append(score) print('\rEpisode {}\tAverage Score: {:.2f}\tScore: {:.2f}'.format( i_episode, np.mean(scores_deque), score), end="") if i_episode % 100 == 0: torch.save(self.qnetwork_local.state_dict(), 'checkpoint.pth') print('\rEpisode {}\tAverage Score: {:.2f}\tEps: {:.2f}'.format( i_episode, np.mean(scores_deque), eps)) return scores def load(self): self.qnetwork_local.load_state_dict(torch.load('checkpoint.pth')) self.qnetwork_local.eval() self.qnetwork_target.load_state_dict(torch.load('checkpoint.pth')) self.qnetwork_target.eval()
class DQNAgent(): """Interacts with and learns from the environment.""" def __init__(self, state_size, action_size, seed, hidden_layer1=64, hidden_layer2=108): """Initialize an Agent object. Params ====== state_size (int): dimension of each state action_size (int): dimension of each action seed (int): random seed """ self.state_size = state_size self.action_size = action_size self.seed = random.seed(seed) # Q-Network self.qnetwork_local = QNetwork(state_size, action_size, seed, hidden_layer1, hidden_layer2).to(device) self.qnetwork_target = QNetwork(state_size, action_size, seed, hidden_layer1, hidden_layer2).to(device) self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR) # Replay memory self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed) # Initialize time step (for updating every UPDATE_EVERY steps) self.t_step = 0 def step(self, state, action, reward, next_state, done): # Save experience in replay memory self.memory.add(state, action, reward, next_state, done) # Learn every UPDATE_EVERY time steps. self.t_step = (self.t_step + 1) % UPDATE_EVERY if self.t_step == 0: # If enough samples are available in memory, get random subset and learn if len(self.memory) > BATCH_SIZE: experiences = self.memory.sample() self.learn(experiences, GAMMA) def act(self, state, eps=0.): """Returns actions for given state as per current policy. Params ====== state (array_like): current state eps (float): epsilon, for epsilon-greedy action selection """ state = torch.from_numpy(state).float().unsqueeze(0).to(device) self.qnetwork_local.eval() with torch.no_grad(): action_values = self.qnetwork_local(state) self.qnetwork_local.train() # Epsilon-greedy action selection if random.random() > eps: return np.argmax(action_values.cpu().data.numpy()) else: return random.choice(np.arange(self.action_size)) def save(self, file_name): torch.save(self.qnetwork_local.state_dict(), file_name) def load(self, file_name): self.qnetwork_local.load_state_dict(torch.load(file_name)) def save_bin(self, file_name): torch.save(self.qnetwork_local.state_dict(), file_name) keymap = self.qnetwork_local.state_dict() outputfloats = [] for key, value in keymap.items(): # ignore keys order is all that matteers if isinstance(value, torch.Tensor): tup = value.size() lv = value.tolist() if isinstance(lv, float): outputfloats.append(lv) else: for row in lv: if isinstance(row, float): outputfloats.append(row) else: for item in row: outputfloats.append(item) while (len(outputfloats) < 8192): outputfloats.append(0.0) output_file = open(file_name, 'wb') float_array = array('f', outputfloats) float_array.tofile(output_file) output_file.close() def load_bin(self, file_name): keymap = self.qnetwork_local.state_dict() new_keymap = OrderedDict() sz = os.path.getsize(file_name) input_file = open(file_name, 'rb') n = sz / 4 buff = input_file.read(sz) fmtstr = '{:d}f'.format(trunc(n)) inputfloats = struct.unpack(fmtstr, buff) input_file.close() index = 0 for key, value in keymap.items(): # ignore keys order is all that matteers if isinstance(value, torch.Tensor): tup = value.size() if len(tup) == 2: dtensor = [] for row in range(tup[0]): trow = [] for col in range(tup[1]): trow.append(inputfloats[index]) index += 1 dtensor.append(trow) tensor_from_list = torch.FloatTensor(dtensor) new_keymap[key] = tensor_from_list else: dtensor = [] for row in range(tup[0]): dtensor.append(inputfloats[index]) index += 1 tensor_from_list = torch.FloatTensor(dtensor) new_keymap[key] = tensor_from_list self.qnetwork_local.load_state_dict(new_keymap) def weights(self): return self.qnetwork_local.state_dict() def fitness(self, episode_reward): # How we calculate the % fitness # Episide reward is a value from about -500 to +300 # < 0 is very bad crash # < 100 is bad crash 0% # < 150 is failure # < 200 is poor # > 200 is okay, pass mark 50% # > 220 is good # > 240 is very good # > 270 is excellent # > 300 is perfect, 100% # Assuming 100% = 300 and 100 is 0 fitness = episode_reward - 100 if fitness < 0: fitness = 0 # Bad lowest floor of 0%, anything below is consisdered 0% # Now divide score by 2 to get % fitness = fitness / 2 if fitness > 100: fitness = 100 # Highest cap to 100%, anything above is considered 100% # Note that 50% is now considered 'okay' its a successful landing, passing mark return fitness def learn(self, experiences, gamma): """Update value parameters using given batch of experience tuples. Params ====== experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples gamma (float): discount factor """ states, actions, rewards, next_states, dones = experiences # Get max predicted Q values (for next states) from target model Q_targets_next = self.qnetwork_target(next_states).detach().max( 1)[0].unsqueeze(1) # Compute Q targets for current states Q_targets = rewards + (gamma * Q_targets_next * (1 - dones)) # Get expected Q values from local model Q_expected = self.qnetwork_local(states).gather(1, actions) # Compute loss loss = F.mse_loss(Q_expected, Q_targets) # Minimize the loss self.optimizer.zero_grad() loss.backward() self.optimizer.step() # ------------------- update target network ------------------- # self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU) def soft_update(self, local_model, target_model, tau): """Soft update model parameters. θ_target = τ*θ_local + (1 - τ)*θ_target Params ====== local_model (PyTorch model): weights will be copied from target_model (PyTorch model): weights will be copied to tau (float): interpolation parameter """ for target_param, local_param in zip(target_model.parameters(), local_model.parameters()): target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)