def testActionIsInRange(self): policy = gaussian_policy.GaussianPolicy(self._wrapped_policy) action_step = policy.action(self._time_step_batch) self.assertEqual(action_step.action.shape.as_list(), [2, 1]) self.assertEqual(action_step.action.dtype, tf.float32) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(tf.compat.v1.local_variables_initializer()) actions_ = self.evaluate(action_step.action) self.assertTrue(np.all(actions_ >= self._action_spec.minimum)) self.assertTrue(np.all(actions_ <= self._action_spec.maximum))
def testActionAddsGaussianNoise(self): policy = gaussian_policy.GaussianPolicy(self._wrapped_policy, clip=False) action_step = policy.action(self._time_step_batch) wrapped_action_step = self._wrapped_policy.action(self._time_step_batch) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(tf.compat.v1.local_variables_initializer()) actions_ = self.evaluate(action_step.action) wrapped_policy_actions_ = self.evaluate(wrapped_action_step.action) self.assertTrue(np.linalg.norm(actions_ - wrapped_policy_actions_) > 0)
def testActionList(self): action_spec = [self._action_spec] actor_network = DummyActionNet(self._obs_spec, action_spec) self._wrapped_policy = actor_policy.ActorPolicy( time_step_spec=self._time_step_spec, action_spec=action_spec, actor_network=actor_network, clip=False) policy = gaussian_policy.GaussianPolicy(self._wrapped_policy) action_step = policy.action(self._time_step_batch) self.assertEqual(action_step.action[0].shape.as_list(), [2, 1]) self.assertEqual(action_step.action[0].dtype, tf.float32) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(tf.compat.v1.local_variables_initializer()) actions_ = self.evaluate(action_step.action) self.assertTrue(np.all(actions_[0] >= self._action_spec.minimum)) self.assertTrue(np.all(actions_[0] <= self._action_spec.maximum))
def __init__(self, time_step_spec, action_spec, actor_network, critic_network, actor_optimizer, critic_optimizer, exploration_noise_std=0.1, critic_network_2=None, target_actor_network=None, target_critic_network=None, target_critic_network_2=None, target_update_tau=1.0, target_update_period=1, actor_update_period=1, dqda_clipping=None, td_errors_loss_fn=None, gamma=1.0, reward_scale_factor=1.0, target_policy_noise=0.2, target_policy_noise_clip=0.5, gradient_clipping=None, debug_summaries=False, summarize_grads_and_vars=False, train_step_counter=None, name=None): """Creates a Td3Agent Agent. Args: time_step_spec: A `TimeStep` spec of the expected time_steps. action_spec: A nest of BoundedTensorSpec representing the actions. actor_network: A tf_agents.network.Network to be used by the agent. The network will be called with call(observation, step_type). critic_network: A tf_agents.network.Network to be used by the agent. The network will be called with call(observation, action, step_type). actor_optimizer: The default optimizer to use for the actor network. critic_optimizer: The default optimizer to use for the critic network. exploration_noise_std: Scale factor on exploration policy noise. critic_network_2: (Optional.) A `tf_agents.network.Network` to be used as the second critic network during Q learning. The weights from `critic_network` are copied if this is not provided. target_actor_network: (Optional.) A `tf_agents.network.Network` to be used as the target actor network during Q learning. Every `target_update_period` train steps, the weights from `actor_network` are copied (possibly withsmoothing via `target_update_tau`) to ` target_actor_network`. If `target_actor_network` is not provided, it is created by making a copy of `actor_network`, which initializes a new network with the same structure and its own layers and weights. Performing a `Network.copy` does not work when the network instance already has trainable parameters (e.g., has already been built, or when the network is sharing layers with another). In these cases, it is up to you to build a copy having weights that are not shared with the original `actor_network`, so that this can be used as a target network. If you provide a `target_actor_network` that shares any weights with `actor_network`, a warning will be logged but no exception is thrown. target_critic_network: (Optional.) Similar network as target_actor_network but for the critic_network. See documentation for target_actor_network. target_critic_network_2: (Optional.) Similar network as target_actor_network but for the critic_network_2. See documentation for target_actor_network. Will only be used if 'critic_network_2' is also specified. target_update_tau: Factor for soft update of the target networks. target_update_period: Period for soft update of the target networks. actor_update_period: Period for the optimization step on actor network. dqda_clipping: A scalar or float clips the gradient dqda element-wise between [-dqda_clipping, dqda_clipping]. Default is None representing no clippiing. td_errors_loss_fn: A function for computing the TD errors loss. If None, a default value of elementwise huber_loss is used. gamma: A discount factor for future rewards. reward_scale_factor: Multiplicative scale for the reward. target_policy_noise: Scale factor on target action noise target_policy_noise_clip: Value to clip noise. gradient_clipping: Norm length to clip gradients. debug_summaries: A bool to gather debug summaries. summarize_grads_and_vars: If True, gradient and network variable summaries will be written during training. train_step_counter: An optional counter to increment every time the train op is run. Defaults to the global_step. name: The name of this agent. All variables in this module will fall under that name. Defaults to the class name. """ tf.Module.__init__(self, name=name) self._actor_network = actor_network actor_network.create_variables() if target_actor_network: target_actor_network.create_variables() self._target_actor_network = common.maybe_copy_target_network_with_checks( self._actor_network, target_actor_network, 'TargetActorNetwork') self._critic_network_1 = critic_network critic_network.create_variables() if target_critic_network: target_critic_network.create_variables() self._target_critic_network_1 = ( common.maybe_copy_target_network_with_checks(self._critic_network_1, target_critic_network, 'TargetCriticNetwork1')) if critic_network_2 is not None: self._critic_network_2 = critic_network_2 else: self._critic_network_2 = critic_network.copy(name='CriticNetwork2') # Do not use target_critic_network_2 if critic_network_2 is None. target_critic_network_2 = None self._critic_network_2.create_variables() if target_critic_network_2: target_critic_network_2.create_variables() self._target_critic_network_2 = ( common.maybe_copy_target_network_with_checks(self._critic_network_2, target_critic_network_2, 'TargetCriticNetwork2')) self._actor_optimizer = actor_optimizer self._critic_optimizer = critic_optimizer self._exploration_noise_std = exploration_noise_std self._target_update_tau = target_update_tau self._target_update_period = target_update_period self._actor_update_period = actor_update_period self._dqda_clipping = dqda_clipping self._td_errors_loss_fn = ( td_errors_loss_fn or common.element_wise_huber_loss) self._gamma = gamma self._reward_scale_factor = reward_scale_factor self._target_policy_noise = target_policy_noise self._target_policy_noise_clip = target_policy_noise_clip self._gradient_clipping = gradient_clipping self._update_target = self._get_target_updater(target_update_tau, target_update_period) policy = actor_policy.ActorPolicy( time_step_spec=time_step_spec, action_spec=action_spec, actor_network=self._actor_network, clip=True) collect_policy = actor_policy.ActorPolicy( time_step_spec=time_step_spec, action_spec=action_spec, actor_network=self._actor_network, clip=False) collect_policy = gaussian_policy.GaussianPolicy( collect_policy, scale=self._exploration_noise_std, clip=True) super(Td3Agent, self).__init__( time_step_spec, action_spec, policy, collect_policy, train_sequence_length=2 if not self._actor_network.state_spec else None, debug_summaries=debug_summaries, summarize_grads_and_vars=summarize_grads_and_vars, train_step_counter=train_step_counter)
def __init__(self, time_step_spec, action_spec, actor_network, critic_network, actor_optimizer, critic_optimizer, exploration_noise_std=0.1, target_update_tau=1.0, target_update_period=1, actor_update_period=1, dqda_clipping=None, td_errors_loss_fn=None, gamma=1.0, reward_scale_factor=1.0, target_policy_noise=0.2, target_policy_noise_clip=0.5, gradient_clipping=None, debug_summaries=False, summarize_grads_and_vars=False, train_step_counter=None, name=None): """Creates a Td3Agent Agent. Args: time_step_spec: A `TimeStep` spec of the expected time_steps. action_spec: A nest of BoundedTensorSpec representing the actions. actor_network: A tf_agents.network.Network to be used by the agent. The network will be called with call(observation, step_type). critic_network: A tf_agents.network.Network to be used by the agent. The network will be called with call(observation, action, step_type). actor_optimizer: The default optimizer to use for the actor network. critic_optimizer: The default optimizer to use for the critic network. exploration_noise_std: Scale factor on exploration policy noise. target_update_tau: Factor for soft update of the target networks. target_update_period: Period for soft update of the target networks. actor_update_period: Period for the optimization step on actor network. dqda_clipping: A scalar or float clips the gradient dqda element-wise between [-dqda_clipping, dqda_clipping]. Default is None representing no clippiing. td_errors_loss_fn: A function for computing the TD errors loss. If None, a default value of elementwise huber_loss is used. gamma: A discount factor for future rewards. reward_scale_factor: Multiplicative scale for the reward. target_policy_noise: Scale factor on target action noise target_policy_noise_clip: Value to clip noise. gradient_clipping: Norm length to clip gradients. debug_summaries: A bool to gather debug summaries. summarize_grads_and_vars: If True, gradient and network variable summaries will be written during training. train_step_counter: An optional counter to increment every time the train op is run. Defaults to the global_step. name: The name of this agent. All variables in this module will fall under that name. Defaults to the class name. """ tf.Module.__init__(self, name=name) self._actor_network = actor_network self._target_actor_network = actor_network.copy( name='TargetActorNetwork') self._critic_network_1 = critic_network self._target_critic_network_1 = critic_network.copy( name='TargetCriticNetwork1') self._critic_network_2 = critic_network.copy(name='CriticNetwork2') self._target_critic_network_2 = critic_network.copy( name='TargetCriticNetwork2') self._actor_optimizer = actor_optimizer self._critic_optimizer = critic_optimizer self._exploration_noise_std = exploration_noise_std self._target_update_tau = target_update_tau self._target_update_period = target_update_period self._actor_update_period = actor_update_period self._dqda_clipping = dqda_clipping self._td_errors_loss_fn = ( td_errors_loss_fn or common.element_wise_huber_loss) self._gamma = gamma self._reward_scale_factor = reward_scale_factor self._target_policy_noise = target_policy_noise self._target_policy_noise_clip = target_policy_noise_clip self._gradient_clipping = gradient_clipping self._update_target = self._get_target_updater( target_update_tau, target_update_period) policy = actor_policy.ActorPolicy( time_step_spec=time_step_spec, action_spec=action_spec, actor_network=self._actor_network, clip=True) collect_policy = actor_policy.ActorPolicy( time_step_spec=time_step_spec, action_spec=action_spec, actor_network=self._actor_network, clip=False) collect_policy = gaussian_policy.GaussianPolicy( collect_policy, scale=self._exploration_noise_std, clip=True) super(Td3Agent, self).__init__( time_step_spec, action_spec, policy, collect_policy, train_sequence_length=2 if not self._actor_network.state_spec else None, debug_summaries=debug_summaries, summarize_grads_and_vars=summarize_grads_and_vars, train_step_counter=train_step_counter)
def testBuild(self): policy = gaussian_policy.GaussianPolicy(self._wrapped_policy) self.assertEqual(policy.time_step_spec, self._time_step_spec) self.assertEqual(policy.action_spec, self._action_spec) self.assertEqual(len(policy.variables()), 2)
def __init__(self, time_step_spec, action_spec, actor_network, critic_network, actor_optimizer=None, critic_optimizer=None, exploration_noise_stddev=2.0, target_actor_network=None, target_critic_network=None, target_update_tau=0.001, target_update_period=1, dqda_clipping=None, td_errors_loss_fn=tf.math.squared_difference, gamma=1.0, reward_scale_factor=1.0, gradient_clipping=None, debug_summaries=False, summarize_grads_and_vars=False, train_step_counter=None, name=None): """Creates a DDPG Agent. Args: time_step_spec: A `TimeStep` spec of the expected time_steps. action_spec: A nest of BoundedTensorSpec representing the actions. actor_network: A tf_agents.network.Network to be used by the agent. The network will be called with call(observation, step_type[, policy_state]) and should return (action, new_state). critic_network: A tf_agents.network.Network to be used by the agent. The network will be called with call((observation, action), step_type[, policy_state]) and should return (q_value, new_state). actor_optimizer: The optimizer to use for the actor network. critic_optimizer: The optimizer to use for the critic network. ou_stddev: Standard deviation for the Ornstein-Uhlenbeck (OU) noise added in the default collect policy. ou_damping: Damping factor for the OU noise added in the default collect policy. target_actor_network: (Optional.) A `tf_agents.network.Network` to be used as the actor target network during Q learning. Every `target_update_period` train steps, the weights from `actor_network` are copied (possibly withsmoothing via `target_update_tau`) to ` target_q_network`. If `target_actor_network` is not provided, it is created by making a copy of `actor_network`, which initializes a new network with the same structure and its own layers and weights. Performing a `Network.copy` does not work when the network instance already has trainable parameters (e.g., has already been built, or when the network is sharing layers with another). In these cases, it is up to you to build a copy having weights that are not shared with the original `actor_network`, so that this can be used as a target network. If you provide a `target_actor_network` that shares any weights with `actor_network`, a warning will be logged but no exception is thrown. target_critic_network: (Optional.) Similar network as target_actor_network but for the critic_network. See documentation for target_actor_network. target_update_tau: Factor for soft update of the target networks. target_update_period: Period for soft update of the target networks. dqda_clipping: when computing the actor loss, clips the gradient dqda element-wise between [-dqda_clipping, dqda_clipping]. Does not perform clipping if dqda_clipping == 0. td_errors_loss_fn: A function for computing the TD errors loss. If None, a default value of elementwise huber_loss is used. gamma: A discount factor for future rewards. reward_scale_factor: Multiplicative scale for the reward. gradient_clipping: Norm length to clip gradients. debug_summaries: A bool to gather debug summaries. summarize_grads_and_vars: If True, gradient and network variable summaries will be written during training. train_step_counter: An optional counter to increment every time the train op is run. Defaults to the global_step. name: The name of this agent. All variables in this module will fall under that name. Defaults to the class name. """ tf.Module.__init__(self, name=name) self._actor_network = actor_network actor_network.create_variables() if target_actor_network: target_actor_network.create_variables() self._target_actor_network = common.maybe_copy_target_network_with_checks( self._actor_network, target_actor_network, 'TargetActorNetwork') self._critic_network = critic_network critic_network.create_variables() if target_critic_network: target_critic_network.create_variables() self._target_critic_network = common.maybe_copy_target_network_with_checks( self._critic_network, target_critic_network, 'TargetCriticNetwork') self._actor_optimizer = actor_optimizer self._critic_optimizer = critic_optimizer self._standard_normal = ds.Normal(0, 1) self._target_update_tau = target_update_tau self._target_update_period = target_update_period self._dqda_clipping = dqda_clipping self._td_errors_loss_fn = (td_errors_loss_fn or common.element_wise_huber_loss) self._gamma = gamma self._reward_scale_factor = reward_scale_factor self._gradient_clipping = gradient_clipping self._update_target = self._get_target_updater(target_update_tau, target_update_period) policy = agents.WcpgPolicy( time_step_spec=time_step_spec, action_spec=action_spec, alpha_sampler=lambda: np.random.uniform(0.1, 0.3), actor_network=self._actor_network, clip=True) collect_policy = agents.WcpgPolicy(time_step_spec=time_step_spec, action_spec=action_spec, actor_network=self._actor_network, clip=False) collect_policy = gaussian_policy.GaussianPolicy( collect_policy, scale=exploration_noise_stddev, clip=True) super(ddpg_agent.DdpgAgent, self).__init__(time_step_spec, action_spec, policy, collect_policy, train_sequence_length=2 if not self._actor_network.state_spec else None, debug_summaries=debug_summaries, summarize_grads_and_vars=summarize_grads_and_vars, train_step_counter=train_step_counter)
def train_and_evaluate_ACagent(tf_agent, train_env=None, eval_env=None, num_iterations=None, batch_size=32, replay_buffer_capacity=1000, name='agent'): if train_env is None: raise ValueError( "train_env is None! Environment should be implemented") if eval_env is None: raise ValueError( "eval_env is None! Environment for evaluation should be implemented" ) if num_iterations is None: raise ValueError("Number of iterations should be implemented!") tf_agent.initialize() initial_collect_steps = 1 collect_steps_per_iteration = 1 print('Initial collect step is', initial_collect_steps) print('collect steps per iteration', collect_steps_per_iteration) print('batch size is ', batch_size) print('replay buffer capacity is', replay_buffer_capacity) eval_policy = tf_agent.policy collect_policy = gaussian_policy.GaussianPolicy(tf_agent.collect_policy) replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( data_spec=tf_agent.collect_data_spec, batch_size=1, max_length=replay_buffer_capacity) initial_collect_driver = dynamic_step_driver.DynamicStepDriver( train_env, collect_policy, observers=[replay_buffer.add_batch], num_steps=initial_collect_steps) initial_collect_driver.run() collect_driver = dynamic_step_driver.DynamicStepDriver( train_env, collect_policy, observers=[replay_buffer.add_batch], num_steps=collect_steps_per_iteration) tf_agent.train = common.function(tf_agent.train) collect_driver.run = common.function(collect_driver.run) # Reset the train step tf_agent.train_step_counter.assign(0) # Evalute the agent's policy once before training avg_return = compute_avg_return(eval_env, eval_policy) train_return = compute_avg_return(train_env, eval_policy) returns = [(0, avg_return)] losses = [] train_returns = [train_return] for _ in range(num_iterations): # Collect a few steps using collect_policy and svae to the replay buffer. for _ in range(collect_steps_per_iteration): collect_driver.run() dataset = replay_buffer.as_dataset(batch_size, 2) iterator = iter(dataset) # Sample a batch of data from the buffer and update the agent's network experience, _ = next(iterator) train_loss = tf_agent.train(experience) step = tf_agent.train_step_counter.numpy() log_interval = 50 eval_interval = 50 if step % log_interval == 0: print('step = {0}: loss = {1}'.format(step, train_loss.loss)) losses.append((step, train_loss.loss)) if step % eval_interval == 0: eval_policy = tf_agent.policy avg_return = compute_avg_return(eval_env, eval_policy) train_avg_return = compute_avg_return(train_env, eval_policy) print('step = {0}: Average Return = {1}'.format(step, avg_return)) returns.append((step, avg_return)) train_returns.append(train_avg_return) saver = PolicySaver(tf_agent.policy, batch_size=None) saver.save(r'C:\Users\DELL\Desktop\Python\\' + name + "policy_%d" % step) steps_list = [r[0] for r in returns] rewards_list = [r[1] for r in returns] loss_steps_list = [l[0] for l in losses] loss_list = [l[1] for l in losses] return steps_list, rewards_list, name, loss_steps_list, loss_list, train_returns
def get_env_and_policy(load_dir, env_name, alpha, env_seed=0, tabular_obs=False): if env_name == 'taxi': env = taxi.Taxi(tabular_obs=tabular_obs) env.seed(env_seed) policy_fn, policy_info_spec = taxi.get_taxi_policy(load_dir, env, alpha=alpha, py=False) tf_env = tf_py_environment.TFPyEnvironment(gym_wrapper.GymWrapper(env)) policy = common_lib.TFAgentsWrappedPolicy(tf_env.time_step_spec(), tf_env.action_spec(), policy_fn, policy_info_spec, emit_log_probability=True) elif env_name == 'grid': env = navigation.GridWalk(tabular_obs=tabular_obs) env.seed(env_seed) policy_fn, policy_info_spec = navigation.get_navigation_policy( env, epsilon_explore=0.1 + 0.6 * (1 - alpha), py=False) tf_env = tf_py_environment.TFPyEnvironment(gym_wrapper.GymWrapper(env)) policy = common_lib.TFAgentsWrappedPolicy(tf_env.time_step_spec(), tf_env.action_spec(), policy_fn, policy_info_spec, emit_log_probability=True) elif env_name == 'tree': env = tree.Tree(branching=2, depth=10) env.seed(env_seed) policy_fn, policy_info_spec = tree.get_tree_policy( env, epsilon_explore=0.1 + 0.8 * (1 - alpha), py=False) tf_env = tf_py_environment.TFPyEnvironment(gym_wrapper.GymWrapper(env)) policy = common_lib.TFAgentsWrappedPolicy(tf_env.time_step_spec(), tf_env.action_spec(), policy_fn, policy_info_spec, emit_log_probability=True) elif env_name.startswith('bandit'): num_arms = int(env_name[6:]) if len(env_name) > 6 else 2 env = bandit.Bandit(num_arms=num_arms) env.seed(env_seed) policy_fn, policy_info_spec = bandit.get_bandit_policy( env, epsilon_explore=1 - alpha, py=False) tf_env = tf_py_environment.TFPyEnvironment(gym_wrapper.GymWrapper(env)) policy = common_lib.TFAgentsWrappedPolicy(tf_env.time_step_spec(), tf_env.action_spec(), policy_fn, policy_info_spec, emit_log_probability=True) elif env_name == 'small_tree': env = tree.Tree(branching=2, depth=3, loop=True) env.seed(env_seed) policy_fn, policy_info_spec = tree.get_tree_policy( env, epsilon_explore=0.1 + 0.8 * (1 - alpha), py=False) tf_env = tf_py_environment.TFPyEnvironment(gym_wrapper.GymWrapper(env)) policy = common_lib.TFAgentsWrappedPolicy(tf_env.time_step_spec(), tf_env.action_spec(), policy_fn, policy_info_spec, emit_log_probability=True) elif env_name == 'CartPole-v0': tf_env, policy = get_env_and_dqn_policy( env_name, os.path.join(load_dir, 'CartPole-v0', 'train', 'policy'), env_seed=env_seed, epsilon=0.3 + 0.15 * (1 - alpha)) elif env_name == 'cartpole': # Infinite-horizon cartpole. tf_env, policy = get_env_and_dqn_policy( 'CartPole-v0', os.path.join(load_dir, 'CartPole-v0-250', 'train', 'policy'), env_seed=env_seed, epsilon=0.3 + 0.15 * (1 - alpha)) env = InfiniteCartPole() tf_env = tf_py_environment.TFPyEnvironment(gym_wrapper.GymWrapper(env)) elif env_name == 'FrozenLake-v0': tf_env, policy = get_env_and_dqn_policy('FrozenLake-v0', os.path.join( load_dir, 'FrozenLake-v0', 'train', 'policy'), env_seed=env_seed, epsilon=0.2 * (1 - alpha), ckpt_file='ckpt-100000') elif env_name == 'frozenlake': # Infinite-horizon frozenlake. tf_env, policy = get_env_and_dqn_policy('FrozenLake-v0', os.path.join( load_dir, 'FrozenLake-v0', 'train', 'policy'), env_seed=env_seed, epsilon=0.2 * (1 - alpha), ckpt_file='ckpt-100000') env = InfiniteFrozenLake() tf_env = tf_py_environment.TFPyEnvironment(gym_wrapper.GymWrapper(env)) elif env_name in ['Reacher-v2', 'reacher']: if env_name == 'Reacher-v2': env = suites.load_mujoco(env_name) else: env = gym_wrapper.GymWrapper(InfiniteReacher()) env.seed(env_seed) tf_env = tf_py_environment.TFPyEnvironment(env) sac_policy = get_sac_policy(tf_env) directory = os.path.join(load_dir, 'Reacher-v2', 'train', 'policy') policy = load_policy(sac_policy, env_name, directory) policy = gaussian_policy.GaussianPolicy(policy, 0.4 - 0.3 * alpha) elif env_name == 'HalfCheetah-v2': env = suites.load_mujoco(env_name) env.seed(env_seed) tf_env = tf_py_environment.TFPyEnvironment(env) sac_policy = get_sac_policy(tf_env) directory = os.path.join(load_dir, env_name, 'train', 'policy') policy = load_policy(sac_policy, env_name, directory) policy = gaussian_policy.GaussianPolicy(policy, 0.2 - 0.1 * alpha) else: raise ValueError('Unrecognized environment %s.' % env_name) return tf_env, policy