def train(agent, environment, training_loops, steps_per_loop, output_path): optimal_reward_fn = functools.partial( env_util.compute_optimal_reward_with_classification_environment, environment=environment, ) optimal_action_fn = functools.partial( env_util.compute_optimal_action_with_classification_environment, environment=environment, ) regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn) cumulative_reward = CumulativeRewardMetric() suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( optimal_action_fn) metrics = [regret_metric, suboptimal_arms_metric, cumulative_reward] from datetime import datetime t1 = datetime.now() trainer.train( root_dir=output_path, agent=agent, environment=environment, training_loops=training_loops, steps_per_loop=steps_per_loop, # 452950//batch_size, additional_metrics=metrics, ) t2 = datetime.now() print("Training time in minutes:") print((t2 - t1).total_seconds() / 60)
def main(unused_argv): tf.compat.v1.enable_v2_behavior() # The trainer only runs with V2 enabled. means = [0.1, 0.2, 0.3, 0.45, 0.5] env = bern_env.BernoulliPyEnvironment(means=means, batch_size=BATCH_SIZE) environment = tf_py_environment.TFPyEnvironment(env) def optimal_reward_fn(unused_observation): return np.max(means) def optimal_action_fn(unused_observation): return np.int32(np.argmax(means)) if FLAGS.agent == 'BernTS': agent = bern_ts_agent.BernoulliThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), dtype=tf.float64, batch_size=BATCH_SIZE) else: raise ValueError('Only BernoulliTS is supported for now.') regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn) suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( optimal_action_fn) trainer.train(root_dir=FLAGS.root_dir, agent=agent, environment=environment, training_loops=TRAINING_LOOPS, steps_per_loop=STEPS_PER_LOOP, additional_metrics=[regret_metric, suboptimal_arms_metric], save_policy=False)
def main(unused_argv): tf.compat.v1.enable_v2_behavior() # The trainer only runs with V2 enabled. data_path = FLAGS.data_path if not data_path: raise ValueError('Please specify the location of the data file.') env = movielens_py_environment.MovieLensPyEnvironment( data_path, RANK_K, BATCH_SIZE, num_movies=20) environment = tf_py_environment.TFPyEnvironment(env) optimal_reward_fn = functools.partial( environment_utilities.compute_optimal_reward_with_movielens_environment, environment=environment) optimal_action_fn = functools.partial( environment_utilities.compute_optimal_action_with_movielens_environment, environment=environment) if FLAGS.agent == 'LinUCB': agent = lin_ucb_agent.LinearUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), tikhonov_weight=0.001, alpha=AGENT_ALPHA, dtype=tf.float32) elif FLAGS.agent == 'LinTS': agent = lin_ts_agent.LinearThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), dtype=tf.float32) elif FLAGS.agent == 'epsGreedy': network = q_network.QNetwork( input_tensor_spec=environment.time_step_spec().observation, action_spec=environment.action_spec(), fc_layer_params=LAYERS) agent = eps_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), epsilon=EPSILON) elif FLAGS.agent == 'DropoutTS': agent = dropout_ts_agent.DropoutThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), dropout_rate=DROPOUT_RATE, network_layers=LAYERS, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR)) regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn) suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( optimal_action_fn) trainer.train( root_dir=FLAGS.root_dir, agent=agent, environment=environment, training_loops=TRAINING_LOOPS, steps_per_loop=STEPS_PER_LOOP, additional_metrics=[regret_metric, suboptimal_arms_metric])
def main(unused_argv): tf.compat.v1.enable_v2_behavior() # The trainer only runs with V2 enabled. class LinearNormalReward(object): def __init__(self, theta): self.theta = theta def __call__(self, x): mu = np.dot(x, self.theta) return np.random.normal(mu, 1) def _global_context_sampling_fn(): return np.random.randint(-10, 10, [4]).astype(np.float32) def _arm_context_sampling_fn(): return np.random.randint(-2, 3, [5]).astype(np.float32) reward_fn = LinearNormalReward(HIDDEN_PARAM) env = sspe.StationaryStochasticPerArmPyEnvironment( _global_context_sampling_fn, _arm_context_sampling_fn, NUM_ACTIONS, reward_fn, batch_size=BATCH_SIZE) environment = tf_py_environment.TFPyEnvironment(env) obs_spec = environment.observation_spec() if FLAGS.network == 'commontower': network = (global_and_arm_feature_network. create_feed_forward_common_tower_network( obs_spec, (4, 3), (3, 4), (4, 2))) elif FLAGS.network == 'dotproduct': network = (global_and_arm_feature_network. create_feed_forward_dot_product_network( obs_spec, (4, 3, 6), (3, 4, 6))) agent = neural_epsilon_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), epsilon=EPSILON, accepts_per_arm_features=True, emit_policy_info=policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN) optimal_reward_fn = functools.partial(optimal_reward, hidden_param=HIDDEN_PARAM) optimal_action_fn = functools.partial(optimal_action, hidden_param=HIDDEN_PARAM) regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn) suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( optimal_action_fn) trainer.train(root_dir=FLAGS.root_dir, agent=agent, environment=environment, training_loops=TRAINING_LOOPS, steps_per_loop=STEPS_PER_LOOP, additional_metrics=[regret_metric, suboptimal_arms_metric])
def main(unused_argv): tf.compat.v1.enable_v2_behavior() # The trainer only runs with V2 enabled. with tf.device('/CPU:0'): # due to b/128333994 covertype_dataset = dataset_utilities.convert_covertype_dataset( FLAGS.covertype_csv) covertype_reward_distribution = tfd.Independent( tfd.Deterministic(tf.eye(7)), reinterpreted_batch_ndims=2) environment = ce.ClassificationBanditEnvironment( covertype_dataset, covertype_reward_distribution, BATCH_SIZE) optimal_reward_fn = functools.partial( env_util.compute_optimal_reward_with_classification_environment, environment=environment) optimal_action_fn = functools.partial( env_util.compute_optimal_action_with_classification_environment, environment=environment) if FLAGS.agent == 'LinUCB': agent = lin_ucb_agent.LinearUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, emit_log_probability=False, dtype=tf.float32) elif FLAGS.agent == 'LinTS': agent = lin_ts_agent.LinearThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, dtype=tf.float32) elif FLAGS.agent == 'epsGreedy': network = q_network.QNetwork( input_tensor_spec=environment.time_step_spec().observation, action_spec=environment.action_spec(), fc_layer_params=LAYERS) agent = eps_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), epsilon=EPSILON) regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn) suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( optimal_action_fn) trainer.train( root_dir=FLAGS.root_dir, agent=agent, environment=environment, training_loops=TRAINING_LOOPS, steps_per_loop=STEPS_PER_LOOP, additional_metrics=[regret_metric, suboptimal_arms_metric])
def main(unused_argv): tf.enable_resource_variables() with tf.device('/CPU:0'): # due to b/128333994 env = wheel_py_environment.WheelPyEnvironment(DELTA, MU_BASE, STD_BASE, MU_HIGH, STD_HIGH, BATCH_SIZE) environment = tf_py_environment.TFPyEnvironment(env) optimal_reward_fn = functools.partial( environment_utilities.tf_wheel_bandit_compute_optimal_reward, delta=DELTA, mu_inside=MU_BASE[0], mu_high=MU_HIGH) optimal_action_fn = functools.partial( environment_utilities.tf_wheel_bandit_compute_optimal_action, delta=DELTA) if FLAGS.agent == 'LinUCB': agent = lin_ucb_agent.LinearUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, dtype=tf.float32) elif FLAGS.agent == 'LinTS': agent = lin_ts_agent.LinearThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, dtype=tf.float32) elif FLAGS.agent == 'epsGreedy': network = q_network.QNetwork( input_tensor_spec=environment.time_step_spec().observation, action_spec=environment.action_spec(), fc_layer_params=LAYERS) agent = eps_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), epsilon=EPSILON) regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn) suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( optimal_action_fn) trainer.train( root_dir=FLAGS.root_dir, agent=agent, environment=environment, training_loops=TRAINING_LOOPS, steps_per_loop=STEPS_PER_LOOP, additional_metrics=[regret_metric, suboptimal_arms_metric])
def main(unused_argv): tf.compat.v1.enable_v2_behavior() # The trainer only runs with V2 enabled. with tf.device('/CPU:0'): # due to b/128333994 mushroom_reward_distribution = ( dataset_utilities.mushroom_reward_distribution( r_noeat=0.0, r_eat_safe=5.0, r_eat_poison_bad=-35.0, r_eat_poison_good=5.0, prob_poison_bad=0.5)) mushroom_dataset = ( dataset_utilities.convert_mushroom_csv_to_tf_dataset( FLAGS.mushroom_csv)) environment = ce.ClassificationBanditEnvironment( mushroom_dataset, mushroom_reward_distribution, BATCH_SIZE) optimal_reward_fn = functools.partial( env_util.compute_optimal_reward_with_classification_environment, environment=environment) optimal_action_fn = functools.partial( env_util.compute_optimal_action_with_classification_environment, environment=environment) if FLAGS.agent == 'LinUCB': agent = lin_ucb_agent.LinearUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, gamma=0.95, emit_log_probability=False, dtype=tf.float32) elif FLAGS.agent == 'LinTS': agent = lin_ts_agent.LinearThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, gamma=0.95, dtype=tf.float32) regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn) suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( optimal_action_fn) trainer.train( root_dir=FLAGS.root_dir, agent=agent, environment=environment, training_loops=TRAINING_LOOPS, steps_per_loop=STEPS_PER_LOOP, additional_metrics=[regret_metric, suboptimal_arms_metric])
def main(unused_argv): tf.compat.v1.enable_v2_behavior() # The trainer only runs with V2 enabled. with tf.device('/CPU:0'): # due to b/128333994 observation_shape = [CONTEXT_DIM] overall_shape = [BATCH_SIZE] + observation_shape observation_distribution = tfd.Normal(loc=tf.zeros(overall_shape), scale=tf.ones(overall_shape)) action_shape = [NUM_ACTIONS] observation_to_reward_shape = observation_shape + action_shape observation_to_reward_distribution = tfd.Normal( loc=tf.zeros(observation_to_reward_shape), scale=tf.ones(observation_to_reward_shape)) drift_distribution = tfd.Normal(loc=DRIFT_MEAN, scale=DRIFT_VARIANCE) additive_reward_distribution = tfd.Normal( loc=tf.zeros(action_shape), scale=(REWARD_NOISE_VARIANCE * tf.ones(action_shape))) environment_dynamics = dle.DriftingLinearDynamics( observation_distribution, observation_to_reward_distribution, drift_distribution, additive_reward_distribution) environment = nse.NonStationaryStochasticEnvironment( environment_dynamics) if FLAGS.agent == 'LinUCB': agent = lin_ucb_agent.LinearUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, gamma=0.95, emit_log_probability=False, dtype=tf.float32) elif FLAGS.agent == 'LinTS': agent = lin_ts_agent.LinearThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, gamma=0.95, dtype=tf.float32) regret_metric = tf_bandit_metrics.RegretMetric( environment.environment_dynamics.compute_optimal_reward) suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( environment.environment_dynamics.compute_optimal_action) trainer.train( root_dir=FLAGS.root_dir, agent=agent, environment=environment, training_loops=TRAINING_LOOPS, steps_per_loop=STEPS_PER_LOOP, additional_metrics=[regret_metric, suboptimal_arms_metric])
def main(unused_argv): tf.compat.v1.enable_v2_behavior() # The trainer only runs with V2 enabled. with tf.device('/CPU:0'): # due to b/128333994 action_reward_fns = ( environment_utilities.sliding_linear_reward_fn_generator( CONTEXT_DIM, NUM_ACTIONS, REWARD_NOISE_VARIANCE)) env = sspe.StationaryStochasticPyEnvironment(functools.partial( environment_utilities.context_sampling_fn, batch_size=BATCH_SIZE, context_dim=CONTEXT_DIM), action_reward_fns, batch_size=BATCH_SIZE) environment = tf_py_environment.TFPyEnvironment(env) optimal_reward_fn = functools.partial( environment_utilities.tf_compute_optimal_reward, per_action_reward_fns=action_reward_fns) optimal_action_fn = functools.partial( environment_utilities.tf_compute_optimal_action, per_action_reward_fns=action_reward_fns) q_net = q_network.QNetwork(environment.observation_spec(), environment.action_spec(), fc_layer_params=(50, 50)) agent = dqn_agent.DqnAgent( environment.time_step_spec(), environment.action_spec(), q_network=q_net, epsilon_greedy=0.1, target_update_tau=0.05, target_update_period=5, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=1e-2), td_errors_loss_fn=common.element_wise_squared_loss) regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn) suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( optimal_action_fn) trainer.train( root_dir=FLAGS.root_dir, agent=agent, environment=environment, training_loops=TRAINING_LOOPS, steps_per_loop=STEPS_PER_LOOP, additional_metrics=[regret_metric, suboptimal_arms_metric])
def main(unused_argv): tf.compat.v1.enable_resource_variables() with tf.device('/CPU:0'): # due to b/128333994 action_reward_fns = ( environment_utilities.sliding_linear_reward_fn_generator( CONTEXT_DIM, NUM_ACTIONS, REWARD_NOISE_VARIANCE)) env = sspe.StationaryStochasticPyEnvironment(functools.partial( environment_utilities.context_sampling_fn, batch_size=BATCH_SIZE, context_dim=CONTEXT_DIM), action_reward_fns, batch_size=BATCH_SIZE) environment = tf_py_environment.TFPyEnvironment(env) optimal_reward_fn = functools.partial( environment_utilities.tf_compute_optimal_reward, per_action_reward_fns=action_reward_fns) optimal_action_fn = functools.partial( environment_utilities.tf_compute_optimal_action, per_action_reward_fns=action_reward_fns) if FLAGS.agent == 'LinUCB': agent = lin_ucb_agent.LinearUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, dtype=tf.float32) elif FLAGS.agent == 'LinTS': agent = lin_ts_agent.LinearThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, dtype=tf.float32) regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn) suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( optimal_action_fn) trainer.train( root_dir=FLAGS.root_dir, agent=agent, environment=environment, training_loops=TRAINING_LOOPS, steps_per_loop=STEPS_PER_LOOP, additional_metrics=[regret_metric, suboptimal_arms_metric])
def testAgentAndEnvironmentRuns(self, environment_name, agent_name): batch_size = 8 training_loops = 3 steps_per_loop = 2 (environment, optimal_reward_fn, optimal_action_fn) = get_environment_and_optimal_functions_by_name( environment_name, batch_size) agent = get_agent_by_name(agent_name, environment.time_step_spec(), environment.action_spec()) regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn) suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( optimal_action_fn) trainer.train( root_dir=tempfile.mkdtemp(dir=os.getenv('TEST_TMPDIR')), agent=agent, environment=environment, training_loops=training_loops, steps_per_loop=steps_per_loop, additional_metrics=[regret_metric, suboptimal_arms_metric])
def main(unused_argv): tf.compat.v1.enable_v2_behavior() # The trainer only runs with V2 enabled. data_path = FLAGS.data_path if not data_path: raise ValueError('Please specify the location of the data file.') if FLAGS.per_arm: env = movielens_per_arm_py_environment.MovieLensPerArmPyEnvironment( data_path, RANK_K, BATCH_SIZE, num_actions=NUM_ACTIONS, csv_delimiter='\t') else: env = movielens_py_environment.MovieLensPyEnvironment( data_path, RANK_K, BATCH_SIZE, num_movies=NUM_ACTIONS, csv_delimiter='\t') environment = tf_py_environment.TFPyEnvironment(env) optimal_reward_fn = functools.partial( environment_utilities.compute_optimal_reward_with_movielens_environment, environment=environment) optimal_action_fn = functools.partial( environment_utilities.compute_optimal_action_with_movielens_environment, environment=environment) if FLAGS.agent == 'LinUCB': agent = lin_ucb_agent.LinearUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), tikhonov_weight=0.001, alpha=AGENT_ALPHA, dtype=tf.float32, accepts_per_arm_features=FLAGS.per_arm) elif FLAGS.agent == 'LinTS': agent = lin_ts_agent.LinearThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), dtype=tf.float32, accepts_per_arm_features=FLAGS.per_arm) elif FLAGS.agent == 'epsGreedy': if FLAGS.per_arm: network = ( global_and_arm_feature_network .create_feed_forward_dot_product_network( environment.time_step_spec().observation, global_layers=LAYERS, arm_layers=LAYERS)) else: network = q_network.QNetwork( input_tensor_spec=environment.time_step_spec().observation, action_spec=environment.action_spec(), fc_layer_params=LAYERS) agent = eps_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), epsilon=EPSILON, emit_policy_info='predicted_rewards_mean', info_fields_to_inherit_from_greedy=['predicted_rewards_mean']) elif FLAGS.agent == 'DropoutTS': train_step_counter = tf.compat.v1.train.get_or_create_global_step() def dropout_fn(): return tf.math.maximum( tf.math.reciprocal_no_nan(1.01 + tf.cast(train_step_counter, tf.float32)), 0.0003) agent = dropout_ts_agent.DropoutThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), dropout_rate=dropout_fn, network_layers=LAYERS, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR)) regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn) suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( optimal_action_fn) trainer.train( root_dir=FLAGS.root_dir, agent=agent, environment=environment, training_loops=TRAINING_LOOPS, steps_per_loop=STEPS_PER_LOOP, additional_metrics=[regret_metric, suboptimal_arms_metric])
def main(unused_argv): tf.compat.v1.enable_v2_behavior() # The trainer only runs with V2 enabled. with tf.device('/CPU:0'): # due to b/128333994 env = wheel_py_environment.WheelPyEnvironment(DELTA, MU_BASE, STD_BASE, MU_HIGH, STD_HIGH, BATCH_SIZE) environment = tf_py_environment.TFPyEnvironment(env) optimal_reward_fn = functools.partial( environment_utilities.tf_wheel_bandit_compute_optimal_reward, delta=DELTA, mu_inside=MU_BASE[0], mu_high=MU_HIGH) optimal_action_fn = functools.partial( environment_utilities.tf_wheel_bandit_compute_optimal_action, delta=DELTA) network = q_network.QNetwork( input_tensor_spec=environment.time_step_spec().observation, action_spec=environment.action_spec(), fc_layer_params=(LAYERS)) if FLAGS.agent == 'LinUCB': agent = lin_ucb_agent.LinearUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, dtype=tf.float32) elif FLAGS.agent == 'LinTS': agent = lin_ts_agent.LinearThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, dtype=tf.float32) elif FLAGS.agent == 'epsGreedy': agent = eps_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), epsilon=EPSILON) elif FLAGS.agent == 'random': agent = eps_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), epsilon=1.) elif FLAGS.agent == 'Mix': emit_policy_info = ( policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN, ) agent_epsgreedy = eps_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), emit_policy_info=emit_policy_info, epsilon=EPSILON) agent_linucb = lin_ucb_agent.LinearUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, emit_policy_info=emit_policy_info, dtype=tf.float32) agent_random = eps_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), emit_policy_info=emit_policy_info, epsilon=1.) agent_halfrandom = eps_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), emit_policy_info=emit_policy_info, epsilon=0.5) agent = exp3_mixture_agent.Exp3MixtureAgent( (agent_epsgreedy, agent_linucb, agent_random, agent_halfrandom)) regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn) suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( optimal_action_fn) trainer.train( root_dir=FLAGS.root_dir, agent=agent, environment=environment, training_loops=TRAINING_LOOPS, steps_per_loop=STEPS_PER_LOOP, additional_metrics=[regret_metric, suboptimal_arms_metric])
def main(unused_argv): tf.compat.v1.enable_v2_behavior() # The trainer only runs with V2 enabled. with tf.device('/CPU:0'): # due to b/128333994 if FLAGS.normalize_reward_fns: action_reward_fns = ( environment_utilities.normalized_sliding_linear_reward_fn_generator( CONTEXT_DIM, NUM_ACTIONS, REWARD_NOISE_VARIANCE)) else: action_reward_fns = ( environment_utilities.sliding_linear_reward_fn_generator( CONTEXT_DIM, NUM_ACTIONS, REWARD_NOISE_VARIANCE)) env = sspe.StationaryStochasticPyEnvironment( functools.partial( environment_utilities.context_sampling_fn, batch_size=BATCH_SIZE, context_dim=CONTEXT_DIM), action_reward_fns, batch_size=BATCH_SIZE) mask_split_fn = None if FLAGS.num_disabled_actions > 0: mask_split_fn = lambda x: (x[0], x[1]) env = wrappers.ExtraDisabledActionsWrapper(env, FLAGS.num_disabled_actions) environment = tf_py_environment.TFPyEnvironment(env) optimal_reward_fn = functools.partial( environment_utilities.tf_compute_optimal_reward, per_action_reward_fns=action_reward_fns) optimal_action_fn = functools.partial( environment_utilities.tf_compute_optimal_action, per_action_reward_fns=action_reward_fns) network_input_spec = environment.time_step_spec().observation if FLAGS.num_disabled_actions > 0: def _apply_only_to_observation(fn): def result_fn(obs): return fn(obs[0]) return result_fn optimal_action_fn = _apply_only_to_observation(optimal_action_fn) optimal_reward_fn = _apply_only_to_observation(optimal_reward_fn) network_input_spec = network_input_spec[0] network = q_network.QNetwork( input_tensor_spec=network_input_spec, action_spec=environment.action_spec(), fc_layer_params=LAYERS) if FLAGS.agent == 'LinUCB': agent = lin_ucb_agent.LinearUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, dtype=tf.float32, observation_and_action_constraint_splitter=mask_split_fn) elif FLAGS.agent == 'LinTS': agent = lin_ts_agent.LinearThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, dtype=tf.float32, observation_and_action_constraint_splitter=mask_split_fn) elif FLAGS.agent == 'epsGreedy': agent = neural_epsilon_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), epsilon=EPSILON, observation_and_action_constraint_splitter=mask_split_fn) elif FLAGS.agent == 'Mix': assert FLAGS.num_disabled_actions == 0, ( 'Extra actions with mixture agent not supported.') emit_policy_info = policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN agent_linucb = lin_ucb_agent.LinearUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), emit_policy_info=emit_policy_info, alpha=AGENT_ALPHA, dtype=tf.float32) agent_lints = lin_ts_agent.LinearThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), emit_policy_info=emit_policy_info, alpha=AGENT_ALPHA, dtype=tf.float32) agent_epsgreedy = neural_epsilon_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), emit_policy_info=emit_policy_info, epsilon=EPSILON) agent = exp3_mixture_agent.Exp3MixtureAgent( (agent_linucb, agent_lints, agent_epsgreedy)) regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn) suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( optimal_action_fn) trainer.train( root_dir=FLAGS.root_dir, agent=agent, environment=environment, training_loops=TRAINING_LOOPS, steps_per_loop=STEPS_PER_LOOP, additional_metrics=[regret_metric, suboptimal_arms_metric])
def main(unused_argv): tf.compat.v1.enable_v2_behavior() # The trainer only runs with V2 enabled. class LinearNormalReward(object): def __init__(self, theta): self.theta = theta def __call__(self, x): mu = np.dot(x, self.theta) return np.random.normal(mu, 1) def _global_context_sampling_fn(): return np.random.randint(-10, 10, [4]).astype(np.float32) def _arm_context_sampling_fn(): return np.random.randint(-2, 3, [5]).astype(np.float32) reward_fn = LinearNormalReward(HIDDEN_PARAM) observation_and_action_constraint_splitter = None num_actions_fn = None variable_action_method = bandit_spec_utils.VariableActionMethod.FIXED if FLAGS.add_num_actions_feature: num_actions_fn = lambda: NUM_ACTIONS variable_action_method = ( bandit_spec_utils.VariableActionMethod.NUM_ACTIONS_FEATURE) env = sspe.StationaryStochasticPerArmPyEnvironment( _global_context_sampling_fn, _arm_context_sampling_fn, NUM_ACTIONS, reward_fn, num_actions_fn, batch_size=BATCH_SIZE, variable_action_method=variable_action_method) environment = tf_py_environment.TFPyEnvironment(env) if FLAGS.agent == 'LinUCB': agent = lin_ucb_agent.LinearUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, accepts_per_arm_features=True, dtype=tf.float32) elif FLAGS.agent == 'LinTS': agent = lin_ts_agent.LinearThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, observation_and_action_constraint_splitter=( observation_and_action_constraint_splitter), accepts_per_arm_features=True, dtype=tf.float32) elif FLAGS.agent == 'epsGreedy': obs_spec = environment.observation_spec() if FLAGS.network == 'commontower': network = (global_and_arm_feature_network. create_feed_forward_common_tower_network( obs_spec, (40, 30), (30, 40), (40, 20))) elif FLAGS.network == 'dotproduct': network = (global_and_arm_feature_network. create_feed_forward_dot_product_network( obs_spec, (4, 3, 6), (3, 4, 6))) agent = neural_epsilon_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), epsilon=EPSILON, observation_and_action_constraint_splitter=( observation_and_action_constraint_splitter), accepts_per_arm_features=True, emit_policy_info=policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN ) elif FLAGS.agent == 'NeuralLinUCB': obs_spec = environment.observation_spec() network = (global_and_arm_feature_network. create_feed_forward_common_tower_network( obs_spec, (40, 30), (30, 40), (40, 20), ENCODING_DIM)) agent = neural_linucb_agent.NeuralLinUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), encoding_network=network, encoding_network_num_train_steps=EPS_PHASE_STEPS, encoding_dim=ENCODING_DIM, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), alpha=1.0, gamma=1.0, epsilon_greedy=EPSILON, accepts_per_arm_features=True, debug_summaries=True, summarize_grads_and_vars=True, emit_policy_info=policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN ) def _all_rewards(observation, hidden_param): """Outputs rewards for all actions, given an observation.""" hidden_param = tf.cast(hidden_param, dtype=tf.float32) global_obs = observation[bandit_spec_utils.GLOBAL_FEATURE_KEY] per_arm_obs = observation[bandit_spec_utils.PER_ARM_FEATURE_KEY] num_actions = tf.shape(per_arm_obs)[1] tiled_global = tf.tile(tf.expand_dims(global_obs, axis=1), [1, num_actions, 1]) concatenated = tf.concat([tiled_global, per_arm_obs], axis=-1) rewards = tf.linalg.matvec(concatenated, hidden_param) return rewards def optimal_reward(observation, hidden_param): return tf.reduce_max(_all_rewards(observation, hidden_param), axis=1) def optimal_action(observation, hidden_param): return tf.argmax(_all_rewards(observation, hidden_param), axis=1, output_type=tf.int32) optimal_reward_fn = functools.partial(optimal_reward, hidden_param=HIDDEN_PARAM) optimal_action_fn = functools.partial(optimal_action, hidden_param=HIDDEN_PARAM) regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn) suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( optimal_action_fn) if FLAGS.drop_arm_obs: drop_arm_feature_fn = functools.partial( bandit_spec_utils.drop_arm_observation) else: drop_arm_feature_fn = None trainer.train(root_dir=FLAGS.root_dir, agent=agent, environment=environment, training_loops=TRAINING_LOOPS, steps_per_loop=STEPS_PER_LOOP, additional_metrics=[regret_metric, suboptimal_arms_metric], training_data_spec_transformation_fn=drop_arm_feature_fn)
def main(unused_argv): tf.compat.v1.enable_v2_behavior() # The trainer only runs with V2 enabled. with tf.device('/CPU:0'): # due to b/128333994 action_reward_fns = ( environment_utilities.structured_linear_reward_fn_generator( CONTEXT_DIM, NUM_ACTIONS, REWARD_NOISE_VARIANCE)) env = sspe.StationaryStochasticPyEnvironment(functools.partial( environment_utilities.context_sampling_fn, batch_size=BATCH_SIZE, context_dim=CONTEXT_DIM), action_reward_fns, batch_size=BATCH_SIZE) environment = tf_py_environment.TFPyEnvironment(env) optimal_reward_fn = functools.partial( environment_utilities.tf_compute_optimal_reward, per_action_reward_fns=action_reward_fns) optimal_action_fn = functools.partial( environment_utilities.tf_compute_optimal_action, per_action_reward_fns=action_reward_fns) if FLAGS.agent == 'LinUCB': agent = lin_ucb_agent.LinearUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, dtype=tf.float32) elif FLAGS.agent == 'epsGreedy': laplacian_matrix = utils.build_laplacian_over_ordinal_integer_actions( environment.action_spec()) network = q_network.QNetwork( input_tensor_spec=environment.time_step_spec().observation, action_spec=environment.action_spec(), fc_layer_params=REWARD_NETWORK_LAYER_PARAMS) agent = eps_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=tf.compat.v1.train.AdamOptimizer( learning_rate=NN_LEARNING_RATE), epsilon=EPSILON, laplacian_matrix=laplacian_matrix, laplacian_smoothing_weight=0.01) elif FLAGS.agent == 'LinTS': agent = lin_ts_agent.LinearThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, dtype=tf.float32) regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn) suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( optimal_action_fn) trainer.train( root_dir=FLAGS.root_dir, agent=agent, environment=environment, training_loops=TRAINING_LOOPS, steps_per_loop=STEPS_PER_LOOP, additional_metrics=[regret_metric, suboptimal_arms_metric])
def main(unused_argv): tf.compat.v1.enable_v2_behavior() # The trainer only runs with V2 enabled. with tf.device('/CPU:0'): # due to b/128333994 if FLAGS.normalize_reward_fns: action_reward_fns = (environment_utilities. normalized_sliding_linear_reward_fn_generator( CONTEXT_DIM, NUM_ACTIONS, REWARD_NOISE_VARIANCE)) else: action_reward_fns = ( environment_utilities.sliding_linear_reward_fn_generator( CONTEXT_DIM, NUM_ACTIONS, REWARD_NOISE_VARIANCE)) env = sspe.StationaryStochasticPyEnvironment(functools.partial( environment_utilities.context_sampling_fn, batch_size=BATCH_SIZE, context_dim=CONTEXT_DIM), action_reward_fns, batch_size=BATCH_SIZE) mask_split_fn = None if FLAGS.num_disabled_actions > 0: mask_split_fn = lambda x: (x[0], x[1]) env = wrappers.ExtraDisabledActionsWrapper( env, FLAGS.num_disabled_actions) environment = tf_py_environment.TFPyEnvironment(env) optimal_reward_fn = functools.partial( environment_utilities.tf_compute_optimal_reward, per_action_reward_fns=action_reward_fns) optimal_action_fn = functools.partial( environment_utilities.tf_compute_optimal_action, per_action_reward_fns=action_reward_fns) network_input_spec = environment.time_step_spec().observation if FLAGS.num_disabled_actions > 0: def _apply_only_to_observation(fn): def result_fn(obs): return fn(obs[0]) return result_fn optimal_action_fn = _apply_only_to_observation(optimal_action_fn) optimal_reward_fn = _apply_only_to_observation(optimal_reward_fn) network_input_spec = network_input_spec[0] network = q_network.QNetwork(input_tensor_spec=network_input_spec, action_spec=environment.action_spec(), fc_layer_params=LAYERS) if FLAGS.agent == 'LinUCB': agent = lin_ucb_agent.LinearUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, dtype=tf.float32, observation_and_action_constraint_splitter=mask_split_fn) elif FLAGS.agent == 'LinTS': agent = lin_ts_agent.LinearThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, dtype=tf.float32, observation_and_action_constraint_splitter=mask_split_fn) elif FLAGS.agent == 'epsGreedy': agent = neural_epsilon_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), epsilon=EPSILON, observation_and_action_constraint_splitter=mask_split_fn) elif FLAGS.agent == 'Boltzmann': train_step_counter = tf.compat.v1.train.get_or_create_global_step() boundaries = [500] temp_values = [1000.0, TEMPERATURE] temp_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay( boundaries, temp_values) def _temperature_fn(): # Any variable used in the function needs to be saved in the policy. # This is true by default for the `train_step_counter`. return temp_schedule(train_step_counter) agent = neural_boltzmann_agent.NeuralBoltzmannAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, temperature=_temperature_fn, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), observation_and_action_constraint_splitter=mask_split_fn, train_step_counter=train_step_counter) # This is needed, otherwise the PolicySaver complains. agent.policy.step = train_step_counter elif FLAGS.agent == 'BoltzmannGumbel': num_samples_list = [ tf.compat.v2.Variable(0, dtype=tf.int32, name='num_samples_{}'.format(k)) for k in range(NUM_ACTIONS) ] agent = neural_boltzmann_agent.NeuralBoltzmannAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, boltzmann_gumbel_exploration_constant=250.0, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), observation_and_action_constraint_splitter=mask_split_fn, num_samples_list=num_samples_list) elif FLAGS.agent == 'Mix': assert FLAGS.num_disabled_actions == 0, ( 'Extra actions with mixture agent not supported.') emit_policy_info = policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN agent_linucb = lin_ucb_agent.LinearUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), emit_policy_info=emit_policy_info, alpha=AGENT_ALPHA, dtype=tf.float32) agent_lints = lin_ts_agent.LinearThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), emit_policy_info=emit_policy_info, alpha=AGENT_ALPHA, dtype=tf.float32) agent_epsgreedy = neural_epsilon_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR), emit_policy_info=emit_policy_info, epsilon=EPSILON) agent = exp3_mixture_agent.Exp3MixtureAgent( (agent_linucb, agent_lints, agent_epsgreedy)) regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn) suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( optimal_action_fn) trainer.train( root_dir=FLAGS.root_dir, agent=agent, environment=environment, training_loops=TRAINING_LOOPS, steps_per_loop=STEPS_PER_LOOP, additional_metrics=[regret_metric, suboptimal_arms_metric])