def __init__(self, history_size, num_layers, units_per_layer, lr, obs_n_shape, act_shape, act_type, gumbel_temperature, q_network, agent_index, noise, use_ounoise, temporal_mode): """ Implementation of the policy network, with optional gumbel softmax activation at the final layer. """ self.num_layers = num_layers self.lr = lr self.history_size = history_size self.obs_n_shape = obs_n_shape self.act_shape = act_shape self.act_type = act_type if act_type is Discrete: self.use_gumbel = True else: self.use_gumbel = False self.use_ounoise = use_ounoise self.gumbel_temperature = gumbel_temperature self.q_network = q_network self.agent_index = agent_index self.clip_norm = 0.5 self.noise = noise self.noise_mode = OUNoise(act_shape[0], scale=1.0) self.temporal_mode = temporal_mode self.optimizer = tf.keras.optimizers.Adam(lr=self.lr) ### set up network structure self.obs_input = tf.keras.layers.Input(shape=(self.history_size, self.obs_n_shape[agent_index][0])) self.temporal_state = None if self.temporal_mode.lower() == "rnn": self.temporal_state = tf.keras.layers.GRU(units_per_layer) elif self.temporal_mode.lower() == "attention": self.temporal_state = SelfAttention(activation=tf.keras.layers.LeakyReLU(alpha=0.1)) else: raise RuntimeError( "Temporal Information Layer should be rnn or attention but %s found!" % self.temporal_mode) self.hidden_layers = [] for idx in range(num_layers): layer = tf.keras.layers.Dense(units_per_layer, activation='relu', name='ag{}pol_hid{}'.format(agent_index, idx)) self.hidden_layers.append(layer) if self.use_gumbel: self.output_layer = tf.keras.layers.Dense(self.act_shape, activation='linear', name='ag{}pol_out{}'.format(agent_index, idx)) else: self.output_layer = tf.keras.layers.Dense(self.act_shape, activation='tanh', name='ag{}pol_out{}'.format(agent_index, idx)) # connect layers x = self.obs_input x = self.temporal_state(x) if self.temporal_mode.lower() == "attention": x = tf.keras.layers.Lambda(lambda x: x[:, -1])(x) for layer in self.hidden_layers: x = layer(x) x = self.output_layer(x) self.model = tf.keras.Model(inputs=[self.obs_input], outputs=[x])
def __init__(self, num_layers, units_per_layer, lr, obs_n_shape, act_shape, act_type, gumbel_temperature, q_network, agent_index, noise, use_ounoise): """ Implementation of the policy network, with optional gumbel softmax activation at the final layer. """ self.num_layers = num_layers self.lr = lr self.obs_n_shape = obs_n_shape self.act_shape = act_shape self.act_type = act_type if act_type is Discrete: self.use_gumbel = True else: self.use_gumbel = False self.use_ounoise = use_ounoise self.gumbel_temperature = gumbel_temperature self.q_network = q_network self.agent_index = agent_index self.clip_norm = 0.5 self.noise = noise self.noise_mode = OUNoise(act_shape[0], scale=1.0) self.optimizer = tf.keras.optimizers.Adam(lr=self.lr) ### set up network structure self.obs_input = tf.keras.layers.Input( shape=self.obs_n_shape[agent_index]) self.hidden_layers = [] for idx in range(num_layers): layer = tf.keras.layers.Dense(units_per_layer, activation='relu', name='ag{}pol_hid{}'.format( agent_index, idx)) self.hidden_layers.append(layer) if self.use_gumbel: self.output_layer = tf.keras.layers.Dense( self.act_shape, activation='linear', name='ag{}pol_out{}'.format(agent_index, idx)) else: self.output_layer = tf.keras.layers.Dense( self.act_shape, activation='tanh', name='ag{}pol_out{}'.format(agent_index, idx)) # connect layers x = self.obs_input for layer in self.hidden_layers: x = layer(x) x = self.output_layer(x) self.model = tf.keras.Model(inputs=[self.obs_input], outputs=[x])
def main(arglist): global no_actions, no_features, no_agents env = u.make_env(arglist.scenario, arglist.no_agents) obs_shape_n = env.observation_space act_shape_n = env.action_space act_shape_n = u.space_n_to_shape_n(act_shape_n) no_agents = env.n batch_size = arglist.batch_size no_neighbors = arglist.no_neighbors k_lst = list(range(no_neighbors + 2))[2:] # [2,3] u.create_seed(arglist.seed) noise_mode = OUNoise(act_shape_n[0], scale=1.0) noise = 0.1 reduction_noise = 0.999 # Velocity.x Velocity.y Pos.x Pos.y {Land.Pos.x Land.Pos.y}*10 {Ent.Pos.x Ent.Pos.y}*9 no_features = obs_shape_n[0].shape[0] no_actions = act_shape_n[0][0] model, model_t = __build_conf() optimizer = AdamW(learning_rate=arglist.lr, weight_decay=1e-5) # Results episode_rewards = [0.0] # sum of rewards for all agents result_path = os.path.join("results", arglist.exp_name) res = os.path.join(result_path, " %s.csv" % arglist.exp_name) if not os.path.exists(result_path): os.makedirs(result_path) replay_buffer = ReplayBuffer(arglist.max_buffer_size) # Init Buffer episode_step = 0 train_step = 0 t_start = time.time() obs_n = env.reset() adj = u.get_adj(obs_n, k_lst, no_agents, is_gcn=True) print('Starting iterations...') while True: episode_step += 1 terminal = (episode_step >= arglist.max_episode_len) if episode_step % 3 == 0: adj = u.get_adj(obs_n, k_lst, no_agents, is_gcn=True) predictions = get_predictions(u.to_tensor(np.array(obs_n)), adj, model) actions = get_actions(predictions, noise, noise_mode) # Observe next state, reward and done value new_obs_n, rew_n, done_n, _ = env.step(actions) done = all(done_n) or terminal cooperative_reward = rew_n[0] # Store the data in the replay memory replay_buffer.add(obs_n, adj, actions, cooperative_reward, new_obs_n, done) obs_n = new_obs_n episode_rewards[-1] += cooperative_reward if done or terminal: obs_n = env.reset() episode_step = 0 episode_rewards.append(0) # increment global step counter train_step += 1 # for displaying learned policies if arglist.display: time.sleep(0.1) env.render() continue # Train the models train_cond = not arglist.display if train_cond and len(replay_buffer) > arglist.batch_size: if len( episode_rewards ) % arglist.update_rate == 0: # only update every 30 episodes for _ in range(arglist.update_times): state, adj_n, actions, rewards, new_state, dones = replay_buffer.sample( batch_size) noise *= reduction_noise # Calculate TD-target with tf.GradientTape() as tape: target_q_values = model_t([new_state, adj_n]) # Apply max(Q) to obtain the TD-target target_q_tot = tf.reduce_max(target_q_values, axis=-1) # Apply VDN to reduce the agent-dimension max_q_tot = tf.reduce_sum(target_q_tot, axis=-1) y = rewards + (1. - dones) * arglist.gamma * max_q_tot # Predictions action_one_hot = tf.one_hot( tf.argmax(actions, axis=2, name='action_one_hot'), no_actions) q_values = model([state, adj_n]) q_tot = tf.reduce_sum(q_values * action_one_hot, axis=-1, name='q_acted') pred = tf.reduce_sum(q_tot, axis=1) if "huber" in arglist.loss_type: loss = tf.reduce_sum( u.huber_loss(pred, tf.stop_gradient(y))) elif "mse" in arglist.loss_type: loss = tf.losses.mean_squared_error( pred, tf.stop_gradient(y)) else: raise RuntimeError( "Loss function should be either Huber or MSE. %s found!" % arglist.loss_type) gradients = tape.gradient(loss, model.trainable_variables) local_clipped = u.clip_by_local_norm(gradients, 0.1) optimizer.apply_gradients( zip(local_clipped, model.trainable_variables)) tf.saved_model.save(model, result_path) # display training output if train_step % arglist.save_rate == 0: # eval_reward = get_eval_reward(env, model) with open(res, "a+") as f: mes_dict = { "steps": train_step, "episodes": len(episode_rewards), "train_episode_reward": np.round(np.mean(episode_rewards[-arglist.save_rate:]), 3), # "eval_episode_reward": np.round(np.mean(eval_reward), 3), "time": round(time.time() - t_start, 3) } print(mes_dict) for item in list(mes_dict.values()): f.write("%s\t" % item) f.write("\n") f.close() t_start = time.time() # train target model if arglist.soft_update: weights = model.get_weights() target_weights = model_t.get_weights() for w in range(len(weights)): target_weights[w] = arglist.tau * weights[w] + ( 1 - arglist.tau) * target_weights[w] model_t.set_weights(target_weights) elif terminal and train_step % 200 == 0: model_t.set_weights(model.get_weights())
class MADDPGPolicyNetwork(object): def __init__(self, num_layers, units_per_layer, lr, obs_n_shape, act_shape, act_type, gumbel_temperature, q_network, agent_index, noise, use_ounoise): """ Implementation of the policy network, with optional gumbel softmax activation at the final layer. """ self.num_layers = num_layers self.lr = lr self.obs_n_shape = obs_n_shape self.act_shape = act_shape self.act_type = act_type if act_type is Discrete: self.use_gumbel = True else: self.use_gumbel = False self.use_ounoise = use_ounoise self.gumbel_temperature = gumbel_temperature self.q_network = q_network self.agent_index = agent_index self.clip_norm = 0.5 self.noise = noise self.noise_mode = OUNoise(act_shape[0], scale=1.0) self.optimizer = tf.keras.optimizers.Adam(lr=self.lr) ### set up network structure self.obs_input = tf.keras.layers.Input( shape=self.obs_n_shape[agent_index]) self.hidden_layers = [] for idx in range(num_layers): layer = tf.keras.layers.Dense(units_per_layer, activation='relu', name='ag{}pol_hid{}'.format( agent_index, idx)) self.hidden_layers.append(layer) if self.use_gumbel: self.output_layer = tf.keras.layers.Dense( self.act_shape, activation='linear', name='ag{}pol_out{}'.format(agent_index, idx)) else: self.output_layer = tf.keras.layers.Dense( self.act_shape, activation='tanh', name='ag{}pol_out{}'.format(agent_index, idx)) # connect layers x = self.obs_input for layer in self.hidden_layers: x = layer(x) x = self.output_layer(x) self.model = tf.keras.Model(inputs=[self.obs_input], outputs=[x]) @classmethod def gumbel_softmax_sample(cls, logits): """ Produces Gumbel softmax samples from the input log-probabilities (logits). These are used, because they are differentiable approximations of the distribution of an argmax. """ uniform_noise = tf.random.uniform(tf.shape(logits)) gumbel = -tf.math.log(-tf.math.log(uniform_noise)) noisy_logits = gumbel + logits # / temperature return tf.math.softmax(noisy_logits) def forward_pass(self, obs): """ Performs a simple forward pass through the NN. """ x = obs for idx in range(self.num_layers): x = self.hidden_layers[idx](x) outputs = self.output_layer( x ) # log probabilities of the gumbel softmax dist are the output of the network return outputs @tf.function def get_action(self, obs): outputs = self.forward_pass(obs) if self.use_gumbel: outputs = self.gumbel_softmax_sample(outputs) elif self.use_ounoise: outputs = outputs + self.noise * self.noise_mode.noise() outputs = tf.clip_by_value(outputs, -1, 1) return outputs # @tf.function # The state and the action that executed in the environment from an agent def train(self, obs_n, act_n, adjacency): with tf.GradientTape() as tape: # linear output layer x = self.forward_pass(obs_n[self.agent_index]) act_n = tf.unstack(act_n) if self.use_gumbel: logits = x # log probabilities of the gumbel softmax dist are the output of the network act_n[self.agent_index] = self.gumbel_softmax_sample(logits) elif self.use_ounoise: act_n[self. agent_index] = x + self.noise * self.noise_mode.noise() act_n[self.agent_index] = tf.clip_by_value( act_n[self.agent_index], -1, 1) else: act_n[self.agent_index] = x # q_value = self.q_network._predict_internal(obs_n + act_n) concatenated_input = tf.concat([obs_n, act_n], axis=-1) concatenated_input = tf.transpose(concatenated_input, [1, 0, 2]) q_value = self.q_network.model([concatenated_input, adjacency]) # policy_regularization = tf.math.reduce_mean(tf.math.square(x)) policy_regularization = tf.math.reduce_mean(x) loss = -tf.math.reduce_mean( q_value ) + 1e-3 * policy_regularization # gradient plus regularization gradients = tape.gradient(loss, self.model.trainable_variables ) # todo not sure if this really works # gradients = tf.clip_by_global_norm(gradients, self.clip_norm)[0] local_clipped = clip_by_local_norm(gradients, self.clip_norm) self.optimizer.apply_gradients( zip(local_clipped, self.model.trainable_variables)) return loss