def __init__(self, task): self.task = task self.state_size = task.state_size self.action_size = task.action_size self.action_low = task.action_low self.action_high = task.action_high self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high) self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high) self.critic_local = Critic(self.state_size, self.action_size) self.critic_target = Critic(self.state_size, self.action_size) self.critic_target.model.set_weights(self.critic_local.model.get_weights()) self.actor_target.model.set_weights(self.actor_local.model.get_weights()) self.exploration_mu = 0 self.exploration_theta = 0.10 # same direction self.exploration_sigma = 0.001 # random noise self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) self.buffer_size = 100000 self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) self.gamma = 0.90 # discount factor self.tau = 0.1 # for soft update of target parameters self.best_score = -np.inf self.score = 0
def __init__(self, task): self.task = task self.state_size = task.state_size self.action_size = task.action_size self.action_low = task.action_low self.action_high = task.action_high self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high) self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high) self.critic_local = Critic(self.state_size, self.action_size) self.critic_target = Critic(self.state_size, self.action_size) self.critic_target.model.set_weights( self.critic_local.model.get_weights()) self.actor_target.model.set_weights( self.actor_local.model.get_weights()) self.exploration_mu = 0 self.exploration_theta = 0.15 self.exploration_sigma = 0.2 self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) self.buffer_size = 100000 self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) self.gamma = 0.99 self.tau = 0.001
def __init__(self, task): self.task = task self.state_size = task.state_size self.action_size = task.action_size self.action_low = task.action_low self.action_high = task.action_high # Actor (Policy) Model self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high) self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high) # Critic (Value) Model self.critic_local = Critic(self.state_size, self.action_size) self.critic_target = Critic(self.state_size, self.action_size) # Initialize target model parameters with local model parameters self.critic_target.model.set_weights(self.critic_local.model.get_weights()) self.actor_target.model.set_weights(self.actor_local.model.get_weights()) # Noise process self.exploration_mu = 0 self.exploration_theta = 0.15 self.exploration_sigma = 0.01 self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) # Replay memory self.buffer_size = 100000 self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) # Algorithm parameters self.gamma = 0.99 # discount factor 0.99 self.tau = 0.1 # for soft update of target parameters 0.01
def __init__(self, task, seed=None, render=False): self.env = task.env self.total_reward = 0 self.steps = 0 self.action_repeat = 3 self.render = render # Score tracker and learning parameters self.score = -np.inf self.best_w = None self.best_score = -np.inf self.noise_scale = 0.1 #counter self.count = 0 # Replay memory self.buffer_size = 100000 self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) # Noise process self.exploration_mu = 0 self.exploration_theta = 0.15 self.exploration_sigma = 0.2 self.noise = OUNoise(1, self.exploration_mu, self.exploration_theta, self.exploration_sigma)
def __init__(self, task, buffer_size=100000, batch_size=64, gamma=0.99, tau=0.01): self.task = task self.state_size = task.state_size self.action_size = task.action_size self.action_low = task.action_low self.action_high = task.action_high # Actor (Policy) Model self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high, learning_rate=1e-3) self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high, learning_rate=1e-3) # Critic (Value) Model self.critic_local = Critic(self.state_size, self.action_size, learning_rate=1e-4) self.critic_target = Critic(self.state_size, self.action_size, learning_rate=1e-4) # Initialize target model parameters with local model parameters self.critic_target.model.set_weights( self.critic_local.model.get_weights()) self.actor_target.model.set_weights( self.actor_local.model.get_weights()) # Noise process self.noise = OUNoise(size=self.action_size) # Replay memory self.buffer_size = buffer_size self.batch_size = batch_size # 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) # Algorithm parameters self.gamma = gamma # 0.99 discount factor self.tau = tau # 0.01 for soft update of target parameters # 初始化 self.last_state = None self.total_reward = 0.0 # Score tracker and learning parameters self.score = 0 self.best_score = -np.inf self.count = 0
def __init__(self, obs_space, act_space, sess, n_agents, name): self.obs_space = obs_space self.act_space = act_space self.n_agents = n_agents self.dqn = DQN(sess, obs_space, sup_len, act_space, n_agents, name) self.rb = ReplayBuffer(capacity=rb_capacity) self.train_cnt = 0
def __init__(self, task, sess, stats): self.sess = sess self.task = task self.stats = stats tau = 0.01 learning_rate = 2e-4 self.critic_local = QNetwork(sess, task, stats, name='critic_local', hidden_units=64, dropout_rate=0.2) self.critic_target = QNetwork(sess, task, stats, name='critic_target', hidden_units=64, dropout_rate=0.2) self.actor_local = Policy(sess, task, stats, name='actor_local', hidden_units=32, dropout_rate=0.2) self.actor_target = Policy(sess, task, stats, name='actor_target', hidden_units=32, dropout_rate=0.2) soft_copy_critic_ops = self._create_soft_copy_op('critic_local', 'critic_target', tau=tau) soft_copy_actor_ops = self._create_soft_copy_op('actor_local', 'actor_target', tau=tau) self._soft_copy_ops = [] self._soft_copy_ops.extend(soft_copy_critic_ops) self._soft_copy_ops.extend(soft_copy_actor_ops) self.gamma = 0.99 # reward discount rate # Exploration noise process exploration_mu = 0 exploration_theta = 0.15 exploration_sigma = 0.15 self.noise = OUNoise(task.action_size, exploration_mu, exploration_theta, exploration_sigma) # Replay memory self.batch_size = 256 self.memory = ReplayBuffer(buffer_size=10000, decay_steps=1000) self.sess.run(tf.global_variables_initializer())
def __init__(self, obs_space, act_space, sess, n_agents, name): self.act_space = act_space self.n_agents = n_agents self.ped_dqn = DQN(sess, obs_space, sup_len, act_space, n_agents, name) self.action_rb = ReplayBuffer(capacity=rb_capacity) self.train_cnt = 0 self.sns_q = None
def __init__(self, task, params={}): self.task = task self.state_size = task.state_size self.action_size = task.action_size self.action_low = task.action_low self.action_high = task.action_high # Actor (Policy) Model self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high, params=params) self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high, params=params) # Critic (Value) Model self.critic_local = Critic(self.state_size, self.action_size, params) self.critic_target = Critic(self.state_size, self.action_size, params) # Initialize target model parameters with local model parameters self.critic_target.model.set_weights( self.critic_local.model.get_weights()) self.actor_target.model.set_weights( self.actor_local.model.get_weights()) # Noise process self.exploration_mu = 0 self.exploration_theta = 0.15 # same direction self.exploration_sigma = 0.001 # random noise if (params.get("sigma")): self.exploration_sigma = params.get("sigma") self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) # Replay memory self.buffer_size = 100000 self.batch_size = 64 if (params.get("batch_size")): self.batch_size = params.get("batch_size") self.memory = ReplayBuffer(self.buffer_size, self.batch_size) # Algorithm parameters self.gamma = 0.99 # discount factor self.tau = 0.1 # for soft update of target parameters #self.gamma = 0.9 #self.tau = 0.05 # Statistics self.best_score = -np.inf self.score = 0
class Agent(object): def __init__(self, obs_space, act_space, sess, n_agents, name): self.act_space = act_space self.n_agents = n_agents self.dqn = DQN(sess, obs_space, sup_len, act_space, n_agents, name) self.rb = ReplayBuffer(capacity=rb_capacity) self.train_cnt = 0 self.sns_q = None def act_multi(self, obs, random): q_values = self.dqn.get_q_values([obs])[0] r_action = np.random.randint(self.act_space, size=(len(obs))) action_n = ( (random + 1) % 2) * (q_values.argmax(axis=1)) + (random) * r_action return action_n def incentivize_multi(self, info): state, action, reward, next_state, done = info return reward def add_to_memory(self, exp): self.rb.add_to_memory(exp) def sync_target(self): self.dqn.training_target_qnet() def train(self, use_rx): data = self.rb.sample_from_memory(minibatch_size) state = np.asarray([x[0] for x in data]) action = np.asarray([x[1] for x in data]) base_reward = np.asarray([x[2] for x in data]) next_state = np.asarray([x[3] for x in data]) done = np.asarray([x[4] for x in data]) not_done = (done + 1) % 2 if use_rx: rx_inc = np.asarray([x[5] for x in data]) reward = base_reward + rx_inc else: reward = base_reward td_error, _ = self.dqn.training_qnet(state, action, reward, not_done, next_state) self.train_cnt += 1 if self.train_cnt % (target_update) == 0: self.dqn.training_target_qnet() return td_error
def __init__(self, task): self.task = task self.state_size = task.state_size self.action_size = task.action_size self.action_low = task.action_low self.action_high = task.action_high # AE: Although OUNoise gives me a convenient set of randomness for each of the rotors, I still need # AE: to make a decision myself on how to apply the randomness and how to manage its magnitude # AE: (i.e. my eplore vs exploit strategy). These variables will do that. self.explore_start = 1.0 # AE: exploration probability at start self.explore_stop = 0.001 # AE: minimum exploration probability self.decay_rate = 0.003 # AE: exponential decay rate for exploration prob self.magnitude_coeff = 0.1 # AE: a coefficient to limit randomness # Actor (Policy) Model self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high) self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high) # Critic (Value) Model self.critic_local = Critic(self.state_size, self.action_size) self.critic_target = Critic(self.state_size, self.action_size) # Initialize target model parameters with local model parameters self.critic_target.model.set_weights( self.critic_local.model.get_weights()) self.actor_target.model.set_weights( self.actor_local.model.get_weights()) # Noise process self.exploration_mu = 0 # AE: additive to the noise. mu * theta will be directly added self.exploration_theta = 0.15 # AE: old noise will be multiplied by this self.exploration_sigma = 0.2 # AE: new noise will be multiplied by this self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) # Replay memory self.buffer_size = 100000 self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) # Algorithm parameters self.gamma = 0.99 # discount factor # AE: The learning rate. How much we trust the new values compared to the old ones. self.tau = 0.0001 # for soft update of target parameters # AE: current reward in learning procedure (for statistics) self.score = -np.inf # Episode variables self.reset_episode()
class Agent(object): def __init__(self, global_state_space, obs_space, act_space, n_agents, sess, name): self.act_space = act_space self.obs_space = obs_space self.n_agents = n_agents self.dqn = DQN(sess, global_state_space, obs_space, act_space, n_agents, name) self.rb = ReplayBuffer(capacity=rb_capacity) self.train_cnt = 0 def act_multi(self, obs, random): if random.all(): return np.random.randint(self.act_space, size=(len(obs))) q_values = self.dqn.get_q_values([obs])[0] r_action = np.random.randint(self.act_space, size=(len(obs))) action_n = ( (random + 1) % 2) * (q_values.argmax(axis=1)) + (random) * r_action return action_n def add_to_memory(self, exp): self.rb.add_to_memory(exp) def sync_target(self): self.dqn.training_target_qnet() def train(self): data = self.rb.sample_from_memory(minibatch_size) state = np.asarray([x[0] for x in data]) action = np.asarray([x[1] for x in data]) reward = np.asarray([x[2] for x in data]) next_state = np.asarray([x[3] for x in data]) done = np.asarray([x[4] for x in data]) global_state = np.asarray([x[5] for x in data]) next_global_state = np.asarray([x[6] for x in data]) not_done = (done + 1) % 2 td_error, _ = self.dqn.training_qnet(global_state, state, action, reward, not_done, next_global_state, next_state) self.train_cnt += 1 if self.train_cnt % target_update == 0: self.dqn.training_target_qnet() return td_error
def __init__(self, env, actor_model, critic_model, gamma=0.99, tau=1e-3, critic_lr=1e-3, actor_lr=1e-4, critic_decay=0.): # Changed this to use generic env instead of Task super().__init__(env) self.state_size = env.observation_space.shape[0] self.action_size = env.action_space.shape[0] self.action_low = env.action_space.low self.action_high = env.action_space.high # Algorithm parameters self.gamma = gamma # discount factor self.tau = tau # for soft update of target parameters self.critic_lr = critic_lr self.actor_lr = actor_lr # Actor (Policy) Model self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high, self.actor_lr) self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high, self.actor_lr) # Critic (Value) Model self.critic_local = Critic(self.state_size, self.action_size, self.critic_lr) self.critic_target = Critic(self.state_size, self.action_size, self.critic_lr) # Initialize target model parameters with local model parameters self.critic_target.model.set_weights( self.critic_local.model.get_weights()) self.actor_target.model.set_weights( self.actor_local.model.get_weights()) # Noise process self.exploration_mu = 0 self.exploration_theta = 0.15 self.exploration_sigma = 0.2 self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) # Replay memory self.buffer_size = 100000 self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size)
def __init__(self, task, seed=None, render=False): self.env = task.env self.state_size = task.state_size self.action_size = task.action_size self.action_low = task.action_low self.action_high = task.action_high # Actor (Policy) Model self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high) self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high) # Critic (Value) Model self.critic_local = Critic(self.state_size, self.action_size) self.critic_target = Critic(self.state_size, self.action_size) # Initialize target model parameters with local model parameters self.critic_target.model.set_weights( self.critic_local.model.get_weights()) self.actor_target.model.set_weights( self.actor_local.model.get_weights()) self.total_reward = 0 self.steps = 0 self.action_repeat = 3 self.render = render # Score tracker and learning parameters self.score = -np.inf self.best_w = None self.best_score = -np.inf self.noise_scale = 0.1 #counter self.count = 0 # Replay memory self.buffer_size = 100000 self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) # Noise process self.exploration_mu = 0 self.exploration_theta = 0.15 self.exploration_sigma = 0.2 self.noise = OUNoise(1, self.exploration_mu, self.exploration_theta, self.exploration_sigma) # Algorithm parameters self.gamma = 0.99 # discount factor self.tau = 0.01 # for soft update of target parameters
def __init__(self, actor_model, tgt_actor_model, critic_model, tgt_critic_model, action_limits, actor_lr=1e-4, critic_lr=1e-3, critic_decay=1e-2, tau=1e-3, gamma=0.99, process=None, rb_size=1e6, minibatch_size=64, warmup_episodes=0, episodes_trained=0, train_scores=None, test_scores=None, best_train_score=-np.inf): # Changed this to use generic env instead of Task super().__init__(warmup_episodes, episodes_trained, train_scores, test_scores, best_train_score) self.actor = Actor(actor_model, critic_model, lr=actor_lr) self.tgt_actor = Actor(tgt_actor_model, tgt_critic_model, lr=actor_lr) self.tgt_actor.set_weights(self.actor.get_weights()) self.critic = Critic(critic_model, lr=critic_lr, decay=critic_decay) self.tgt_critic = Critic(tgt_critic_model, lr=critic_lr, decay=critic_decay) self.tgt_critic.set_weights(self.critic.get_weights()) self.action_limits = action_limits self.process = process self.minibatch_size = minibatch_size self.buffer = ReplayBuffer(int(rb_size), self.minibatch_size) self.tau = tau self.gamma = gamma self.state_space = K.int_shape(critic_model.inputs[0])[1] self.action_space = K.int_shape(critic_model.inputs[1])[1] self.learning_phase = 1 if process is None: self.process = OUNoise(size=self.action_space, theta=0.15, mu=0, sigma=0.2) else: self.process = process
def __init__(self, task): self.task = task self.state_size = task.state_size self.action_size = task.action_size self.action_low = task.action_low self.action_high = task.action_high # Actor (Policy) Model self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high) self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high) # Critic (Value) Model self.critic_local = Critic(self.state_size, self.action_size) self.critic_target = Critic(self.state_size, self.action_size) # Initialize target model parameters with local model parameters self.critic_target.model.set_weights(self.critic_local.model.get_weights()) self.actor_target.model.set_weights(self.actor_local.model.get_weights()) # Noise process self.exploration_mu = 0 self.exploration_theta = 0.15 self.exploration_sigma = 0.2 self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) # Replay memory self.buffer_size = 100000 self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) # Algorithm parameters self.gamma = 0.99 # discount factor self.tau = 0.01 # for soft update of target parameters # from plicy search self.action_range = self.action_high - self.action_low self.w = np.random.normal( size=(self.state_size, self.action_size), # weights for simple linear policy: state_space x action_space scale=(self.action_range / (2 * self.state_size))) # start producing actions in a decent range # Score tracker and learning parameters self.score = -np.inf self.best_w = None self.best_score = -np.inf self.noise_scale = 0.1 #counter self.count = 0
def __init__(self, task, verbose=False): self.verbose = verbose self.task = task self.state_size = task.state_size self.action_size = task.action_size self.action_low = task.action_low self.action_high = task.action_high # Actor (Policy) Model self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high) self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high) # Critic (Value) Model self.critic_local = Critic(self.state_size, self.action_size) self.critic_target = Critic(self.state_size, self.action_size) #log_path = '/tmp/logs' #self.callback = callbacks.TensorBoard(log_dir=log_path, histogram_freq=1, # write_images=False, write_grads=True, write_graph=False) #self.callback.set_model(self.critic_local.model) #log_path = '/tmp/logs' #self.writer = tf.summary.FileWriter(log_path) #self.learn_counter = 0 # Initialize target model parameters with local model parameters self.critic_target.model.set_weights( self.critic_local.model.get_weights()) self.actor_target.model.set_weights( self.actor_local.model.get_weights()) # Noise process self.exploration_mu = 0.1 self.exploration_theta = 0.2 self.exploration_sigma = 0.2 self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) # Replay memory self.buffer_size = 100000 self.batch_size = 512 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) # Algorithm parameters self.gamma = 0.99 # discount factor self.tau = 0.015 # for soft update of target parameters
def __init__(self, env_reset, state_size, action_size, action_low, action_high): """Params: env_reset: callback function to reset environemnt at end of episode state_size: dimension of state space action_size: dimension of action space action_low: float - minimum action value action_high: float - maximum action value """ self.training_steps = 0 # number of training steps run so far self.env_reset = env_reset self.state_size = state_size self.action_size = action_size self.action_low = action_low self.action_high = action_high # Algorithm parameters self.gamma = 0.99 # discount factor self.tau = 1e-3 # for soft update of target parameters self.critic_decay = 1e-2 # L2 weight decay for critic (regularization) self.critic_lr = 1e-3 # Learning rate for critic self.critic_alpha = 1e-2 # Leaky ReLU alpha for critic self.actor_lr = 1e-4 # Learning rate for actor self.actor_alpha = 1e-2 # Leaky ReLU alpha for actor # Actor (Policy) Model self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high, self.actor_lr, self.actor_alpha) self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high, self.actor_lr, self.actor_alpha) # Critic (Value) Model self.critic_local = Critic(self.state_size, self.action_size, self.critic_lr, self.critic_decay, self.critic_alpha) self.critic_target = Critic(self.state_size, self.action_size, self.critic_lr, self.critic_decay,self.critic_alpha) # Initialize target model parameters with local model parameters self.critic_target.model.set_weights(self.critic_local.model.get_weights()) self.actor_target.model.set_weights(self.actor_local.model.get_weights()) # Noise process self.exploration_mu = 0 self.exploration_theta = 0.15 self.exploration_sigma = 0.2 self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) # Replay memory self.buffer_size = int(1e6) self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size)
def __init__(self, task): """Initialize DDPG Agent instance.""" self.task = task self.state_size = task.state_size self.action_size = task.action_size self.action_high = task.action_high self.action_low = task.action_low # Initializing local and target Actor Models # Actor (Policy) Model self.actor_local = Actor(self.state_size, self.action_size, self.action_high, self.action_low) self.actor_target = Actor(self.state_size, self.action_size, self.action_high, self.action_low) # Initializing local and target Critic Models # Critic (Value) Model self.critic_local = Critic(self.state_size, self.action_size) self.critic_target = Critic(self.state_size, self.action_size) # Initialize target model parameters with local model parameters self.actor_target.model.set_weights( self.actor_local.model.get_weights()) self.critic_target.model.set_weights( self.critic_local.model.get_weights()) self.exploration_mu = 0 self.exploration_theta = 0.15 self.exploration_sigma = 0.2 self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) # Replay Memory self.buffer_size = 100000 self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) # Algorithm parameters self.gamma = 0.99 # discount factor self.tau = 0.01 # for soft update of target parameters # Additional Parameters self.best_score = -np.inf self.total_reward = 0.0 self.count = 0 self.score = 0
def __init__(self, task): self.task = task self.state_size = task.state_size self.action_size = task.action_size self.action_low = task.action_low self.action_high = task.action_high # Actor (Policy) Model self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high) self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high) # Critic (Value) Model self.critic_local = Critic(self.state_size, self.action_size) self.critic_target = Critic(self.state_size, self.action_size) # Initialize target model parameters with local model parameters self.critic_target.model.set_weights( self.critic_local.model.get_weights()) self.actor_target.model.set_weights( self.actor_local.model.get_weights()) # Noise process self.exploration_mu = 0 self.exploration_theta = 0.15 # Y.W. changing sigma self.exploration_sigma = 0.3 #0.3 #0.2 # 0.3 self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) # Replay memory # Y.W. extending buffer_size self.buffer_size = 1000000 #100000 self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) # Algorithm parameters self.gamma = 0.99 # discount factor # self.tau = 0.01 # for soft update of target parameters # Y.W. self.tau = 0.001 # 0.001 # simple reword cash self.total_reward = 0.0 self.best_total_reward = -np.inf
def __init__(self, task): self.task = task self.state_size = task.state_size self.action_size = task.action_size self.action_low = task.action_low self.action_high = task.action_high # Actor (Policy) Model self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high) self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high) # Critic (Value) Model self.critic_local = Critic(self.state_size, self.action_size) self.critic_target = Critic(self.state_size, self.action_size) # Initialize target model parameters with local model parameters self.critic_target.model.set_weights( self.critic_local.model.get_weights()) self.actor_target.model.set_weights( self.actor_local.model.get_weights()) # Noise process self.exploration_mu = 0 self.exploration_theta = 0.15 self.exploration_sigma = 0.2 self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) # Replay memory self.buffer_size = 100000 self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) # Algorithm parameters self.gamma = 0.99 # discount factor self.tau = 0.01 # for soft update of target parameters # ======================== Custom amednments : Score tracker and learning parameters ======================== self.best_score = -np.inf self.score = 0 self.total_reward = 0.0 self.count = 0 self.best_position = np.zeros(3)
def load_model(cls, filename): with open(filename + '.ddpg_agent') as f: m = pickle.load(f) m.actor_local = load_model(filename + '.actor_local') m.actor_target = load_model(filename + '.actor_target') m.critic_local = load_model(filename + '.critic_local') m.critic_target = load_model(filename + '.critic_target') m.replay_buffer = ReplayBuffer(m.buffer_size, m.batch_size) return m
def set_params(self, mu=0.1, sigma=0.1, theta=0.1, buffer_size=1e+8, batch_size=128, gamma=0.99, tau=1e-3): self.exploration_mu = mu self.exploration_sigma = sigma self.exploration_theta = theta self.noise = noise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) self.buffer_size = int(buffer_size) self.batch_size = int(batch_size) self.buffer = ReplayBuffer(self.buffer_size) self.gamma = gamma self.tau = tau
def setup_replay_buffer_(self): """ Setup a replay buffer. :return: None. """ if self.prioritized_replay: self.replay_buffer = PrioritizedReplayBuffer(self.buffer_size, alpha=self.pr_alpha) self.beta_schedule = LinearSchedule(self.max_timesteps, initial_p=self.pr_beta, final_p=self.final_explore) else: self.replay_buffer = ReplayBuffer(self.buffer_size) self.beta_schedule = None
def __init__(self, task): self.task = task self.state_size = task.state_size self.action_size = task.action_size self.action_low = task.action_low self.action_high = task.action_high self.exploration_mu = 0 self.exploration_theta = 0.15 self.exploration_sigma = 0.001 self.buffer_size = 100000 self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size) self.gamma = 0.99 self.tau = 0.1 self.learning_rate = 0.0005 self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high, learning_rate=self.learning_rate) self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high, learning_rate=self.learning_rate) self.critic_local = Critic(self.state_size, self.action_size, learning_rate=self.learning_rate) self.critic_target = Critic(self.state_size, self.action_size, learning_rate=self.learning_rate)
def __init__(self, task): self.task = task self.state_size = task.state_size self.action_size = task.action_size self.action_low = task.action_low self.action_high = task.action_high #actor model self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high) self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high) #Critic model self.critic_local = Critic(self.state_size, self.action_size) self.critic_target = Critic(self.state_size, self.action_size) #Initialize target model params with local params self.critic_target.model.set_weights( self.critic_local.model.get_weights()) self.actor_target.model.set_weights( self.actor_local.model.get_weights()) #Initialize noise process self.exploration_mu = 0 self.exploration_theta = 0.15 self.exploration_sigma = 0.2 self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) #Replay memory Initialization self.buffer_size, self.batch_size = 2000000, 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) #Initialize algorithm parameters self.gamma, self.tau = 0.95, 0.001 #Initialize scores self.score, self.best_score = -np.inf, -np.inf
def __init__(self, task): """Initialize DDPG Agent instance.""" self.task = task self.state_size = task.state_size self.action_size = task.action_size self.action_high = task.action_high self.action_low = task.action_low # Initializing local and target Actor Models # Actor (Policy) Model self.actor_local = Actor(self.state_size, self.action_size, self.action_high, self.action_low) self.actor_target = Actor(self.state_size, self.action_size, self.action_high, self.action_low) # Initializing local and target Critic Models # Critic (Value) Model self.critic_local = Critic(self.state_size, self.action_size) self.critic_target = Critic(self.state_size, self.action_size) # Initialize target model parameters with local model parameters self.actor_target.model.set_weights(self.actor_local.model.get_weights()) self.critic_target.model.set_weights(self.critic_local.model.get_weights()) self.exploration_mu = 0 self.exploration_theta = 0.15 self.exploration_sigma = 0.2 self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) # Replay Memory self.buffer_size = 100000 self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) # Algorithm parameters self.gamma = 0.99 # discount factor self.tau = 0.01 # for soft update of target parameters # Additional Parameters self.best_score = -np.inf self.total_reward = 0.0 self.count = 0 self.score = 0
def __init__(self, action_size): # Noise process self.exploration_mu = 0 self.exploration_theta = 0.15 # same direction self.exploration_sigma = 0.001 # random noise self.noise = OUNoise(action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) # Replay memory self.buffer_size = 100000 self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) #self.memory = Memory(self.buffer_size, self.batch_size) # Algorithm parameters self.gamma = 0.99 # discount factor self.tau = 0.1 # for soft update of target parameters
class Agent(): """Reinforcement Learning agent that learns using DDPG.""" def __init__(self, task): self.task = task self.state_size = task.state_size self.action_size = task.action_size self.action_low = task.action_low self.action_high = task.action_high # Actor (Policy) Model self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high) self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high) # Critic (Value) Model self.critic_local = Critic(self.state_size, self.action_size) self.critic_target = Critic(self.state_size, self.action_size) # Initialize target model parameters with local model parameters self.critic_target.model.set_weights(self.critic_local.model.get_weights()) self.actor_target.model.set_weights(self.actor_local.model.get_weights()) # Noise process self.exploration_mu = 0 self.exploration_theta = 0.15 # same direction self.exploration_sigma = 0.001 # random noise #self.exploration_mu = 0 #self.exploration_theta = 0.15 #self.exploration_sigma = 0.2 self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) # Replay memory self.buffer_size = 100000 self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) # Algorithm parameters self.gamma = 0.99 # discount factor self.tau = 0.1 # for soft update of target parameters # Compute the ongoing top score self.top_score = -np.inf self.score = 0 def reset_episode(self): self.noise.reset() state = self.task.reset() self.last_state = state self.score = 0 return state def step(self, action, reward, next_state, done): # Save experience / reward self.memory.add(self.last_state, action, reward, next_state, done) # Learn, if enough samples are available in memory if len(self.memory) > self.batch_size: experiences = self.memory.sample() self.learn(experiences) # Roll over last state and action self.last_state = next_state # stats self.score += reward if done: if self.score > self.top_score: self.top_score = self.score def act(self, states): """Returns actions for given state(s) as per current policy.""" state = np.reshape(states, [-1, self.state_size]) action = self.actor_local.model.predict(state)[0] return list(action + self.noise.sample()) # add some noise for exploration def learn(self, experiences): """Update policy and value parameters using given batch of experience tuples.""" # Convert experience tuples to separate arrays for each element (states, actions, rewards, etc.) states = np.vstack([e.state for e in experiences if e is not None]) actions = np.array([e.action for e in experiences if e is not None]).astype(np.float32).reshape(-1, self.action_size) rewards = np.array([e.reward for e in experiences if e is not None]).astype(np.float32).reshape(-1, 1) dones = np.array([e.done for e in experiences if e is not None]).astype(np.uint8).reshape(-1, 1) next_states = np.vstack([e.next_state for e in experiences if e is not None]) # Get predicted next-state actions and Q values from target models # Q_targets_next = critic_target(next_state, actor_target(next_state)) actions_next = self.actor_target.model.predict_on_batch(next_states) Q_targets_next = self.critic_target.model.predict_on_batch([next_states, actions_next]) # Compute Q targets for current states and train critic model (local) Q_targets = rewards + self.gamma * Q_targets_next * (1 - dones) self.critic_local.model.train_on_batch(x=[states, actions], y=Q_targets) # Train actor model (local) action_gradients = np.reshape(self.critic_local.get_action_gradients([states, actions, 0]), (-1, self.action_size)) self.actor_local.train_fn([states, action_gradients, 1]) # custom training function # Soft-update target models self.soft_update(self.critic_local.model, self.critic_target.model) self.soft_update(self.actor_local.model, self.actor_target.model) def soft_update(self, local_model, target_model): """Soft update model parameters.""" local_weights = np.array(local_model.get_weights()) target_weights = np.array(target_model.get_weights()) assert len(local_weights) == len(target_weights), "Local and target model parameters must have the same size" new_weights = self.tau * local_weights + (1 - self.tau) * target_weights target_model.set_weights(new_weights)
class DDPG_Agent: """Reinforcement learning agent that learns through DDPG.""" def __init__(self, task): """Initialize DDPG Agent instance.""" self.task = task self.state_size = task.state_size self.action_size = task.action_size self.action_high = task.action_high self.action_low = task.action_low # Initializing local and target Actor Models # Actor (Policy) Model self.actor_local = Actor(self.state_size, self.action_size, self.action_high, self.action_low) self.actor_target = Actor(self.state_size, self.action_size, self.action_high, self.action_low) # Initializing local and target Critic Models # Critic (Value) Model self.critic_local = Critic(self.state_size, self.action_size) self.critic_target = Critic(self.state_size, self.action_size) # Initialize target model parameters with local model parameters self.actor_target.model.set_weights(self.actor_local.model.get_weights()) self.critic_target.model.set_weights(self.critic_local.model.get_weights()) self.exploration_mu = 0 self.exploration_theta = 0.15 self.exploration_sigma = 0.2 self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) # Replay Memory self.buffer_size = 100000 self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) # Algorithm parameters self.gamma = 0.99 # discount factor self.tau = 0.01 # for soft update of target parameters # Additional Parameters self.best_score = -np.inf self.total_reward = 0.0 self.count = 0 self.score = 0 def reset_episode(self): """Reset episode to initial state.""" self.total_reward = 0.0 self.count = 0 self.noise.reset() state = self.task.reset() self.last_state = state return state def step(self, action, reward, next_state, done): """Take a step.""" self.total_reward += reward self.count += 1 # Save experience/reward self.memory.memorize(self.last_state, action, reward, next_state, done) # Learn if enough samples are available in memory. if len(self.memory) > self.batch_size: experiences = self.memory.sample() self.learn(experiences) def act(self, state): """Returns actions for state(s) according to given policy.""" state = np.reshape(state, [-1, self.state_size]) action = self.actor_local.model.predict(state)[0] # Add some noise to action for exploration and return return list(action + self.noise.sample()) def learn(self, experiences): """Update policy and value parameters using given batch of experience tuples.""" self.score = self.total_reward / \ float(self.count) if self.count else 0.0 if self.score > self.best_score: self.best_score = self.score states = np.vstack([e.state for e in experiences if e is not None]) actions = np.vstack( [e.action for e in experiences if e is not None]).astype( np.float32).reshape(-1, self.action_size) rewards = np.vstack( [e.reward for e in experiences if e is not None]).astype( np.float32).reshape(-1, 1) dones = np.vstack( [e.done for e in experiences if e is not None]).astype( np.uint8).reshape(-1, 1) next_states = np.vstack( [e.next_state for e in experiences if e is not None]) # Get predicted next-state actions and Q values from target models # Q_targets_next = critic_target(next_state, actor_target(next_state)) next_actions = self.actor_target.model.predict_on_batch(next_states) Q_targets_next = self.critic_target.model.predict_on_batch( [next_states, next_actions]) # Compute Q targets for current states and train critic model (local) Q_targets = rewards + self.gamma * Q_targets_next * (1 - dones) self.critic_local.model.train_on_batch( x=[states, actions], y=Q_targets) # Train actor model (local) # [states, actions, 0] 0 is for No learning Phase action_gradients = np.reshape(self.critic_local.get_action_gradients( [states, actions, 0]), (-1, self.action_size)) self.actor_local.train_fn([states, action_gradients, 1]) # Soft-update target models self.soft_update(self.critic_local.model, self.critic_target.model) self.soft_update(self.actor_local.model, self.actor_target.model) def soft_update(self, local_model, target_model): """Soft update model parameters.""" local_weights = np.array(local_model.get_weights()) target_weights = np.array(target_model.get_weights()) assert len(local_weights) == len( target_weights), "Local and target model parameters must \ have the same size" new_weights = self.tau*local_weights + (1-self.tau)*target_weights target_model.set_weights(new_weights)
class DDPG_Land(): def __init__(self, task, seed=None, render=False): self.env = task.env self.state_size = task.state_size self.action_size = task.action_size self.action_low = task.action_low self.action_high = task.action_high # Actor (Policy) Model self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high) self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high) # Critic (Value) Model self.critic_local = Critic(self.state_size, self.action_size) self.critic_target = Critic(self.state_size, self.action_size) # Initialize target model parameters with local model parameters self.critic_target.model.set_weights( self.critic_local.model.get_weights()) self.actor_target.model.set_weights( self.actor_local.model.get_weights()) self.total_reward = 0 self.steps = 0 self.action_repeat = 3 self.render = render # Score tracker and learning parameters self.score = -np.inf self.best_w = None self.best_score = -np.inf self.noise_scale = 0.1 #counter self.count = 0 # Replay memory self.buffer_size = 100000 self.batch_size = 64 self.memory = ReplayBuffer(self.buffer_size, self.batch_size) # Noise process self.exploration_mu = 0 self.exploration_theta = 0.15 self.exploration_sigma = 0.2 self.noise = OUNoise(1, self.exploration_mu, self.exploration_theta, self.exploration_sigma) # Algorithm parameters self.gamma = 0.99 # discount factor self.tau = 0.01 # for soft update of target parameters def act(self, s): # # print('act') # # a = lunder.heuristic(self.env, s) # # 1. Testing. # # 2. Demonstration rollout. # angle_targ = s[0]*0.5 + s[2]*1.0 # angle should point towards center (s[0] is horizontal coordinate, s[2] hor speed) # if angle_targ > 0.4: angle_targ = 0.4 # more than 0.4 radians (22 degrees) is bad # if angle_targ < -0.4: angle_targ = -0.4 # hover_targ = 0.55*np.abs(s[0]) # target y should be proporional to horizontal offset # # PID controller: s[4] angle, s[5] angularSpeed # angle_todo = (angle_targ - s[4])*0.5 - (s[5])*1.0 # #print("angle_targ=%0.2f, angle_todo=%0.2f" % (angle_targ, angle_todo)) # # PID controller: s[1] vertical coordinate s[3] vertical speed # hover_todo = (hover_targ - s[1])*0.5 - (s[3])*0.5 # #print("hover_targ=%0.2f, hover_todo=%0.2f" % (hover_targ, hover_todo)) # if s[6] or s[7]: # legs have contact # angle_todo = 0 # hover_todo = -(s[3])*0.5 # override to reduce fall speed, that's all we need after contact # if self.env.continuous: # a = np.array( [hover_todo*20 - 1, -angle_todo*20] ) # a = np.clip(a, -1, +1) # else: # a = 0 # if hover_todo > np.abs(angle_todo) and hover_todo > 0.05: a = 2 # elif angle_todo < -0.05: a = 3 # elif angle_todo > +0.05: a = 1 # # return a # state = s """Returns actions for given state(s) as per current policy.""" state = np.reshape(s, [-1, 24]) action = self.actor_local.model.predict(state)[0] return list(action + self.noise.sample()) def step(self, action, reward, next_state, done): # print ("step") # ob, reward, done, info = self.env.step(action) # print(ob) # next_state = ob # Save experience / reward reward = np.clip(reward, a_min=-100, a_max=100) self.memory.add(self.last_state, action, reward, next_state, done) self.count += 1 self.total_reward += reward # Learn, if enough samples are available in memory if len(self.memory) > self.batch_size: experiences = self.memory.sample() self.learn(experiences) # Roll over last state and action self.last_state = next_state #from the tutorial SRC self.score += reward if done: # self.score = np.clip(self.score,a_min=-100,a_max=100) if self.score > self.best_score: self.best_score = self.score def soft_update(self, local_model, target_model): """Soft update model parameters.""" local_weights = np.array(local_model.get_weights()) target_weights = np.array(target_model.get_weights()) assert len(local_weights) == len( target_weights ), "Local and target model parameters must have the same size" new_weights = self.tau * local_weights + (1 - self.tau) * target_weights target_model.set_weights(new_weights) # #from the tutorial SRC # self.score += reward # if done: # if self.score > self.best_score: # self.best_score = self.score # # return ob, reward, done def learn(self, experiences): """Update policy and value parameters using given batch of experience tuples.""" # Convert experience tuples to separate arrays for each element (states, actions, rewards, etc.) states = np.vstack([e.state for e in experiences if e is not None]) actions = np.array([e.action for e in experiences if e is not None]).astype(np.float32).reshape( -1, self.action_size) rewards = np.array([e.reward for e in experiences if e is not None ]).astype(np.float32).reshape(-1, 1) dones = np.array([e.done for e in experiences if e is not None]).astype(np.uint8).reshape(-1, 1) next_states = np.vstack( [e.next_state for e in experiences if e is not None]) # Get predicted next-state actions and Q values from target models # Q_targets_next = critic_target(next_state, actor_target(next_state)) actions_next = self.actor_target.model.predict_on_batch(next_states) Q_targets_next = self.critic_target.model.predict_on_batch( [next_states, actions_next]) # Compute Q targets for current states and train critic model (local) Q_targets = rewards + self.gamma * Q_targets_next * (1 - dones) self.critic_local.model.train_on_batch(x=[states, actions], y=Q_targets) # Train actor model (local) action_gradients = np.reshape( self.critic_local.get_action_gradients([states, actions, 0]), (-1, self.action_size)) self.actor_local.train_fn([states, action_gradients, 1]) # custom training function # Soft-update target models self.soft_update(self.critic_local.model, self.critic_target.model) self.soft_update(self.actor_local.model, self.actor_target.model) # # from policy search # # Learn by random policy search, using a reward-based score # # self.score = self.total_reward / float(self.count) if self.count else 0.0 # # if self.score > self.best_score: # # self.best_score = self.score # # self.best_w = self.w # # self.noise_scale = max(0.5 * self.noise_scale, 0.01) # # else: # # self.w = self.best_w # # self.noise_scale = min(2.0 * self.noise_scale, 3.2) # # self.w = self.w + self.noise_scale * np.random.normal(size=self.w.shape) # equal noise in all directions def reset(self): self.steps = 0 self.total_reward = 0 self.count = 0 self.score = 0 # self.best_score = 0 """Reset the sim to start a new episode.""" ob = self.env.reset() state = np.concatenate([ob] * self.action_repeat) self.last_state = state return state
class Agent(): def __init__(self, task, sess, stats): self.sess = sess self.task = task self.stats = stats tau = 0.01 learning_rate = 2e-4 self.critic_local = QNetwork(sess, task, stats, name='critic_local', hidden_units=64, dropout_rate=0.2) self.critic_target = QNetwork(sess, task, stats, name='critic_target', hidden_units=64, dropout_rate=0.2) self.actor_local = Policy(sess, task, stats, name='actor_local', hidden_units=32, dropout_rate=0.2) self.actor_target = Policy(sess, task, stats, name='actor_target', hidden_units=32, dropout_rate=0.2) soft_copy_critic_ops = self._create_soft_copy_op('critic_local', 'critic_target', tau=tau) soft_copy_actor_ops = self._create_soft_copy_op('actor_local', 'actor_target', tau=tau) self._soft_copy_ops = [] self._soft_copy_ops.extend(soft_copy_critic_ops) self._soft_copy_ops.extend(soft_copy_actor_ops) self.gamma = 0.99 # reward discount rate # Exploration noise process exploration_mu = 0 exploration_theta = 0.15 exploration_sigma = 0.15 self.noise = OUNoise(task.action_size, exploration_mu, exploration_theta, exploration_sigma) # Replay memory self.batch_size = 256 self.memory = ReplayBuffer(buffer_size=10000, decay_steps=1000) self.sess.run(tf.global_variables_initializer()) def reset_episode(self): self.noise.reset() state = self.task.reset() self.last_state = state self.memory.decay_a() return state def step(self, action, reward, next_state, done): # Save experience self._save_experience(self.last_state, action, reward, next_state, done) # Learn, if enough samples are available in memory self.learn() # Roll over last state and action self.last_state = next_state def act(self, state, explore=False): """Returns actions for given state(s) as per current policy.""" actor = self.actor_local if explore else self.actor_target action = actor.act([state], explore)[0] assert not np.any(np.isnan(action)) if explore: action = action + self.noise.sample() action = np.maximum(action, self.task.action_low) action = np.minimum(action, self.task.action_high) assert not np.any(np.isnan(action)) assert np.all(action >= self.task.action_low ), "expected less than {:7.3f}, but was {}".format( task.action_low, action) assert np.all(action <= self.task.action_high) return action def learn(self): """Update policy and value parameters using given batch of experience tuples.""" if len(self.memory) < self.batch_size: return # Convert experience tuples to separate arrays for each element (states, actions, rewards, etc.) experiences, experience_indexes = self.memory.sample(self.batch_size) action_size = self.task.action_size states = np.vstack([e.state for e in experiences]) actions = np.array([e.action for e in experiences ]).astype(np.float32).reshape(-1, action_size) rewards = np.array([e.reward for e in experiences ]).astype(np.float32).reshape(-1, 1) dones = np.array([e.done for e in experiences ]).astype(np.uint8).reshape(-1, 1) next_states = np.vstack([e.next_state for e in experiences]) # Get predicted next-state actions, Q and V values actions_next = self.actor_target.act(next_states) Q_targets_next, V_targets_next = self.critic_target.get_q_and_v( next_states, actions_next) # Compute Q targets for current states and train critic model (local) Q_targets = rewards + self.gamma * Q_targets_next * (1 - dones) V_targets = rewards + self.gamma * V_targets_next * (1 - dones) td_errs = self.critic_local.learn(states, actions, Q_targets, V_targets) self.memory.update_td_err(experience_indexes, td_errs) self.memory.scrape_stats(self.stats) # Train actor model actions = self.actor_target.act(states) action_gradients = self.critic_target.get_action_gradients( states, actions) self.actor_local.learn(states, action_gradients) self._soft_copy() def _save_experience(self, state, action, reward, next_state, done): """Adds experience into ReplayBuffer. As a side effect, also learns q network on this sample.""" # Get predicted next-state actions and Q values actions_next = self.actor_local.act([next_state]) Q_targets_next, _ = self.critic_local.get_q_and_v([next_state], actions_next) Q_target_next = Q_targets_next[0] Q_target = reward + self.gamma * Q_target_next * (1 - done) td_err = self.critic_local.get_td_err([state], [action], [Q_target]) self.memory.add(Experience(state, action, reward, next_state, done), td_err) def _soft_copy(self): self.sess.run(self._soft_copy_ops) def _create_soft_copy_op(self, scope_src, scope_dst, tau=0.01): var_src = tf.trainable_variables(scope=scope_src) var_dst = tf.trainable_variables(scope=scope_dst) copy_ops = [] for src, dst in zip(var_src, var_dst): mixed = tau * src + (1.0 - tau) * dst copy_op = tf.assign(dst, mixed) copy_ops.append(copy_op) return copy_ops