예제 #1
0
 def _populate_policy_info(self, arm_observations, chosen_actions,
                           rewards_for_argmax, est_rewards):
     if self._accepts_per_arm_features:
         # Saving the features for the chosen action to the policy_info.
         chosen_arm_features = tf.gather(params=arm_observations,
                                         indices=chosen_actions,
                                         batch_dims=1)
         policy_info = policy_utilities.PerArmPolicyInfo(
             predicted_rewards_sampled=(
                 rewards_for_argmax
                 if policy_utilities.InfoFields.PREDICTED_REWARDS_SAMPLED
                 in self._emit_policy_info else ()),
             predicted_rewards_mean=(
                 tf.stack(est_rewards, axis=-1)
                 if policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN
                 in self._emit_policy_info else ()),
             chosen_arm_features=chosen_arm_features)
     else:
         policy_info = policy_utilities.PolicyInfo(
             predicted_rewards_sampled=(
                 rewards_for_argmax
                 if policy_utilities.InfoFields.PREDICTED_REWARDS_SAMPLED
                 in self._emit_policy_info else ()),
             predicted_rewards_mean=(
                 tf.stack(est_rewards, axis=-1)
                 if policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN
                 in self._emit_policy_info else ()))
     return policy_info
 def testTrainPerArmAgent(self):
     obs_spec = bandit_spec_utils.create_per_arm_observation_spec(2, 3, 4)
     time_step_spec = ts.time_step_spec(obs_spec)
     reward_net = (
         global_and_arm_feature_network.create_feed_forward_per_arm_network(
             obs_spec, (4, 3), (3, 4), (4, 2)))
     optimizer = tf.compat.v1.train.GradientDescentOptimizer(
         learning_rate=0.1)
     agent = greedy_agent.GreedyRewardPredictionAgent(
         time_step_spec,
         self._action_spec,
         reward_network=reward_net,
         accepts_per_arm_features=True,
         optimizer=optimizer)
     observations = {
         bandit_spec_utils.GLOBAL_FEATURE_KEY:
         tf.constant([[1, 2], [3, 4]], dtype=tf.float32),
         bandit_spec_utils.PER_ARM_FEATURE_KEY:
         tf.cast(tf.reshape(tf.range(24), shape=[2, 4, 3]),
                 dtype=tf.float32)
     }
     actions = np.array([0, 3], dtype=np.int32)
     rewards = np.array([0.5, 3.0], dtype=np.float32)
     initial_step, final_step = _get_initial_and_final_steps(
         observations, rewards)
     action_step = policy_step.PolicyStep(
         action=tf.convert_to_tensor(actions),
         info=policy_utilities.PerArmPolicyInfo(
             chosen_arm_features=np.array([[1, 2, 3], [3, 2, 1]],
                                          dtype=np.float32)))
     experience = _get_experience(initial_step, action_step, final_step)
     agent.train(experience, None)
     self.evaluate(tf.compat.v1.initialize_all_variables())
예제 #3
0
 def _populate_policy_info_spec(self, observation_spec,
                                observation_and_action_constraint_splitter):
     predicted_rewards_mean = ()
     if (policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN
             in self._emit_policy_info):
         predicted_rewards_mean = tensor_spec.TensorSpec(
             [self._num_actions], dtype=self._dtype)
     predicted_rewards_sampled = ()
     if (policy_utilities.InfoFields.PREDICTED_REWARDS_SAMPLED
             in self._emit_policy_info):
         predicted_rewards_sampled = tensor_spec.TensorSpec(
             [self._num_actions], dtype=self._dtype)
     if self._accepts_per_arm_features:
         # The features for the chosen arm is saved to policy_info.
         chosen_arm_features_info = (
             policy_utilities.create_chosen_arm_features_info_spec(
                 observation_spec))
         info_spec = policy_utilities.PerArmPolicyInfo(
             predicted_rewards_mean=predicted_rewards_mean,
             predicted_rewards_sampled=predicted_rewards_sampled,
             chosen_arm_features=chosen_arm_features_info)
     else:
         info_spec = policy_utilities.PolicyInfo(
             predicted_rewards_mean=predicted_rewards_mean,
             predicted_rewards_sampled=predicted_rewards_sampled)
     return info_spec
예제 #4
0
 def _populate_policy_info_spec(self, context_spec):
     predicted_rewards_mean = ()
     if (policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN
             in self._emit_policy_info):
         predicted_rewards_mean = tensor_spec.TensorSpec(
             [self._num_actions], dtype=self._dtype)
     predicted_rewards_sampled = ()
     if (policy_utilities.InfoFields.PREDICTED_REWARDS_SAMPLED
             in self._emit_policy_info):
         predicted_rewards_sampled = tensor_spec.TensorSpec(
             [self._num_actions], dtype=self._dtype)
     if self._accepts_per_arm_features:
         # The features for the chosen arm is saved to policy_info.
         arm_spec = context_spec[bandit_spec_utils.PER_ARM_FEATURE_KEY]
         chosen_arm_features_info = tensor_spec.TensorSpec(
             dtype=arm_spec.dtype,
             shape=arm_spec.shape[1:],
             name='chosen_arm_features')
         info_spec = policy_utilities.PerArmPolicyInfo(
             predicted_rewards_mean=predicted_rewards_mean,
             predicted_rewards_sampled=predicted_rewards_sampled,
             chosen_arm_features=chosen_arm_features_info)
     else:
         info_spec = policy_utilities.PolicyInfo(
             predicted_rewards_mean=predicted_rewards_mean,
             predicted_rewards_sampled=predicted_rewards_sampled)
     return info_spec
예제 #5
0
 def testTrainPerArmAgentVariableActions(self):
   num_actions = 5
   obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
       2, 3, num_actions, add_num_actions_feature=True)
   time_step_spec = time_step.time_step_spec(obs_spec)
   action_spec = tensor_spec.BoundedTensorSpec(
       dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
   encoding_dim = 10
   encoder = (
       global_and_arm_feature_network.create_feed_forward_common_tower_network(
           obs_spec, (4, 3), (3, 4), (4, 2), encoding_dim))
   agent = neural_linucb_agent.NeuralLinUCBAgent(
       time_step_spec=time_step_spec,
       action_spec=action_spec,
       encoding_network=encoder,
       encoding_network_num_train_steps=10,
       encoding_dim=encoding_dim,
       accepts_per_arm_features=True,
       optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001))
   observations = {
       bandit_spec_utils.GLOBAL_FEATURE_KEY:
           tf.constant([[1, 2], [3, 4]], dtype=tf.float32),
       bandit_spec_utils.PER_ARM_FEATURE_KEY:
           tf.cast(
               tf.reshape(tf.range(30), shape=[2, 5, 3]), dtype=tf.float32),
       bandit_spec_utils.NUM_ACTIONS_FEATURE_KEY:
           tf.constant([3, 4], dtype=tf.int32)
   }
   actions = np.array([0, 3], dtype=np.int32)
   rewards = np.array([0.5, 3.0], dtype=np.float32)
   initial_step = time_step.TimeStep(
       tf.constant(
           time_step.StepType.FIRST,
           dtype=tf.int32,
           shape=[2],
           name='step_type'),
       tf.constant(0.0, dtype=tf.float32, shape=[2], name='reward'),
       tf.constant(1.0, dtype=tf.float32, shape=[2], name='discount'),
       observations)
   final_step = time_step.TimeStep(
       tf.constant(
           time_step.StepType.LAST,
           dtype=tf.int32,
           shape=[2],
           name='step_type'),
       tf.constant(rewards, dtype=tf.float32, name='reward'),
       tf.constant(1.0, dtype=tf.float32, shape=[2], name='discount'),
       observations)
   action_step = policy_step.PolicyStep(
       action=tf.convert_to_tensor(actions),
       info=policy_utilities.PerArmPolicyInfo(
           chosen_arm_features=np.array([[1, 2, 3], [3, 2, 1]],
                                        dtype=np.float32)))
   experience = _get_experience(initial_step, action_step, final_step)
   loss_info, _ = agent.train(experience, None)
   self.evaluate(tf.compat.v1.initialize_all_variables())
   loss_value = self.evaluate(loss_info)
   self.assertGreater(loss_value, 0.0)
예제 #6
0
  def testLinearAgentUpdatePerArmFeatures(self,
                                          batch_size,
                                          context_dim,
                                          exploration_policy,
                                          dtype,
                                          use_eigendecomp=False):
    """Check that the agent updates for specified actions and rewards."""

    # Construct a `Trajectory` for the given action, observation, reward.
    num_actions = 5
    global_context_dim = context_dim
    arm_context_dim = 3
    initial_step, final_step = (
        _get_initial_and_final_steps_with_per_arm_features(
            batch_size, global_context_dim, num_actions, arm_context_dim))
    action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)
    action_step = policy_step.PolicyStep(
        action=tf.convert_to_tensor(action),
        info=policy_utilities.PerArmPolicyInfo(
            chosen_arm_features=np.arange(
                batch_size * arm_context_dim, dtype=np.float32).reshape(
                    [batch_size, arm_context_dim])))
    experience = _get_experience(initial_step, action_step, final_step)

    # Construct an agent and perform the update.
    observation_spec = bandit_spec_utils.create_per_arm_observation_spec(
        context_dim, arm_context_dim, num_actions)
    time_step_spec = time_step.time_step_spec(observation_spec)
    action_spec = tensor_spec.BoundedTensorSpec(
        dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
    agent = linear_agent.LinearBanditAgent(
        exploration_policy=exploration_policy,
        time_step_spec=time_step_spec,
        action_spec=action_spec,
        use_eigendecomp=use_eigendecomp,
        accepts_per_arm_features=True,
        dtype=dtype)
    self.evaluate(agent.initialize())
    loss_info = agent.train(experience)
    self.evaluate(loss_info)
    final_a = self.evaluate(agent.cov_matrix)
    final_b = self.evaluate(agent.data_vector)

    # Compute the expected updated estimates.
    global_observation = experience.observation[
        bandit_spec_utils.GLOBAL_FEATURE_KEY]
    arm_observation = experience.policy_info.chosen_arm_features
    overall_observation = tf.squeeze(
        tf.concat([global_observation, arm_observation], axis=-1), axis=1)
    rewards = tf.squeeze(experience.reward, axis=1)

    expected_a_new = tf.matmul(
        overall_observation, overall_observation, transpose_a=True)
    expected_b_new = bandit_utils.sum_reward_weighted_observations(
        rewards, overall_observation)
    self.assertAllClose(expected_a_new, final_a[0])
    self.assertAllClose(expected_b_new, final_b[0])
예제 #7
0
    def testNumActions(self, dtype):
        if not dtype.is_integer:
            self.skipTest('testNumActions only applies to integer dtypes')

        batch_size = 1000

        # Create action spec, time_step and spec with max_num_arms = 4.
        action_spec = tensor_spec.BoundedTensorSpec((), dtype, 0, 3)
        time_step_spec, time_step_1 = self.create_time_step(
            use_per_arm_features=True, num_arms=2)
        _, time_step_2 = self.create_time_step(use_per_arm_features=True,
                                               num_arms=3)
        # First half of time_step batch will have num_action = 2 and second
        # half will have num_actions = 3.
        half_batch_size = int(batch_size / 2)
        time_step = nest_utils.stack_nested_tensors(
            [time_step_1] * half_batch_size + [time_step_2] * half_batch_size)

        # The features for the chosen arm is saved to policy_info.
        chosen_arm_features_info = (
            policy_utilities.create_chosen_arm_features_info_spec(
                time_step_spec.observation))
        info_spec = policy_utilities.PerArmPolicyInfo(
            chosen_arm_features=chosen_arm_features_info)

        policy = random_tf_policy.RandomTFPolicy(time_step_spec=time_step_spec,
                                                 action_spec=action_spec,
                                                 info_spec=info_spec,
                                                 accepts_per_arm_features=True,
                                                 emit_log_probability=True)

        action_step = policy.action(time_step)
        tf.nest.assert_same_structure(action_spec, action_step.action)

        # Sample from the policy 1000 times, and ensure that actions considered
        # invalid according to the mask are never chosen.
        step = self.evaluate(action_step)
        action_ = step.action
        self.assertTrue(np.all(action_ >= 0))
        self.assertTrue(np.all(action_[:half_batch_size] < 2))
        self.assertTrue(np.all(action_[half_batch_size:] < 3))

        # With num_action valid actions, probabilities should be 1/num_actions.
        self.assertAllClose(
            step.info.log_probability[:half_batch_size],
            tf.constant(np.log(1. / 2), shape=[half_batch_size]))
        self.assertAllClose(
            step.info.log_probability[half_batch_size:],
            tf.constant(np.log(1. / 3), shape=[half_batch_size]))
예제 #8
0
 def testTrainPerArmAgent(self):
     obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
         2, 3, 4, add_num_actions_feature=True)
     time_step_spec = ts.time_step_spec(observation_spec=obs_spec,
                                        reward_spec=tensor_spec.TensorSpec(
                                            [3], tf.float32))
     action_spec = tensor_spec.BoundedTensorSpec((), tf.int32, 0, 3)
     networks_and_loss_fns = [
         (global_and_arm_feature_network.
          create_feed_forward_common_tower_network(obs_spec, (4, 3), (3, 4),
                                                   (4, 2)),
          tf.compat.v1.losses.mean_squared_error) for _ in range(3)
     ]
     optimizer = tf.compat.v1.train.GradientDescentOptimizer(
         learning_rate=0.01)
     agent = greedy_multi_objective_agent.GreedyMultiObjectiveNeuralAgent(
         time_step_spec,
         action_spec,
         self._scalarizer,
         objective_network_and_loss_fn_sequence=networks_and_loss_fns,
         accepts_per_arm_features=True,
         optimizer=optimizer)
     observations = {
         bandit_spec_utils.GLOBAL_FEATURE_KEY:
         tf.constant([[1, 2], [3, 4]], dtype=tf.float32),
         bandit_spec_utils.PER_ARM_FEATURE_KEY:
         tf.cast(tf.reshape(tf.range(24), shape=[2, 4, 3]),
                 dtype=tf.float32),
         bandit_spec_utils.NUM_ACTIONS_FEATURE_KEY:
         tf.ones([2], dtype=tf.int32)
     }
     actions = np.array([0, 3], dtype=np.int32)
     objectives = np.array([[1, 2, 3], [6, 5, 4]], dtype=np.float32)
     initial_step, final_step = _get_initial_and_final_steps(
         observations, objectives)
     action_step = policy_step.PolicyStep(
         action=tf.convert_to_tensor(actions),
         info=policy_utilities.PerArmPolicyInfo(
             chosen_arm_features=np.array([[1, 2, 3], [3, 2, 1]],
                                          dtype=np.float32)))
     experience = _get_experience(initial_step, action_step, final_step)
     agent.train(experience, None)
     self.evaluate(tf.compat.v1.initialize_all_variables())
예제 #9
0
 def testProcessExperiencePerArmFeaturesWithMask(self):
     mask_spec = tensor_spec.BoundedTensorSpec(shape=(5, ),
                                               minimum=0,
                                               maximum=1,
                                               dtype=tf.int32)
     observation_spec = ({
         'global':
         tf.TensorSpec(shape=(4, ), dtype=tf.float32),
         'per_arm': {
             'f1': tf.TensorSpec(shape=(5, ), dtype=tf.string),
             'f2': tf.TensorSpec(shape=(5, 2), dtype=tf.int32)
         }
     }, mask_spec)
     time_step_spec = time_step.time_step_spec(observation_spec)
     policy_info_spec = policy_utilities.PerArmPolicyInfo(
         chosen_arm_features={
             'f1': tf.TensorSpec(shape=(), dtype=tf.string),
             'f2': tf.TensorSpec(shape=(2, ), dtype=tf.int32)
         })
     training_data_spec = trajectory.Trajectory(
         step_type=time_step_spec.step_type,
         observation=time_step_spec.observation,
         action=tensor_spec.BoundedTensorSpec(shape=(),
                                              minimum=0,
                                              maximum=4,
                                              dtype=tf.int32),
         policy_info=policy_info_spec,
         next_step_type=time_step_spec.step_type,
         reward=tensor_spec.BoundedTensorSpec(shape=(),
                                              minimum=0,
                                              maximum=2,
                                              dtype=tf.float32),
         discount=time_step_spec.discount)
     experience = tensor_spec.sample_spec_nest(training_data_spec,
                                               outer_dims=(7, 2))
     observation, action, reward = utils.process_experience_for_neural_agents(
         experience, lambda x: (x[0], x[1]), True, training_data_spec)
     self.assertEqual(
         observation['per_arm']['f1'][0],
         experience.policy_info.chosen_arm_features['f1'][0, 0])
     self.assertAllEqual(action, tf.zeros(14, dtype=tf.int32))
     self.assertEqual(reward[0], experience.reward[0, 0])
 def testTrainPerArmAgentWithMask(self):
     num_actions = 4
     obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
         2, 3, num_actions)
     mask_obs_spec = (obs_spec,
                      tensor_spec.BoundedTensorSpec(shape=[num_actions],
                                                    minimum=0,
                                                    maximum=1,
                                                    dtype=tf.float32))
     time_step_spec = ts.time_step_spec(mask_obs_spec)
     reward_net = (global_and_arm_feature_network.
                   create_feed_forward_common_tower_network(
                       obs_spec, (4, 3), (3, 4), (4, 2)))
     optimizer = tf.compat.v1.train.GradientDescentOptimizer(
         learning_rate=0.1)
     agent = greedy_agent.GreedyRewardPredictionAgent(
         time_step_spec,
         self._action_spec,
         reward_network=reward_net,
         observation_and_action_constraint_splitter=lambda x: [x[0], x[1]],
         accepts_per_arm_features=True,
         optimizer=optimizer)
     observations = ({
         bandit_spec_utils.GLOBAL_FEATURE_KEY:
         tf.constant([[1, 2], [3, 4]], dtype=tf.float32),
         bandit_spec_utils.PER_ARM_FEATURE_KEY:
         tf.cast(tf.reshape(tf.range(24), shape=[2, 4, 3]),
                 dtype=tf.float32)
     }, tf.ones([2, num_actions]))
     actions = np.array([0, 3], dtype=np.int32)
     rewards = np.array([0.5, 3.0], dtype=np.float32)
     initial_step, final_step = _get_initial_and_final_steps_with_action_mask(
         observations, rewards)
     action_step = policy_step.PolicyStep(
         action=tf.convert_to_tensor(actions),
         info=policy_utilities.PerArmPolicyInfo(
             chosen_arm_features=np.array([[1, 2, 3], [3, 2, 1]],
                                          dtype=np.float32)))
     experience = _get_experience(initial_step, action_step, final_step)
     agent.train(experience, None)
     self.evaluate(tf.compat.v1.initialize_all_variables())
예제 #11
0
    def __init__(self,
                 time_step_spec: types.TimeStep,
                 action_spec: types.NestedTensorSpec,
                 reward_network: types.Network,
                 observation_and_action_constraint_splitter: Optional[
                     types.Splitter] = None,
                 accepts_per_arm_features: bool = False,
                 constraints: Tuple[constr.NeuralConstraint, ...] = (),
                 emit_policy_info: Tuple[Text, ...] = (),
                 name: Optional[Text] = None):
        """Builds a GreedyRewardPredictionPolicy given a reward tf_agents.Network.

    This policy takes a tf_agents.Network predicting rewards and generates the
    action corresponding to the largest predicted reward.

    Args:
      time_step_spec: A `TimeStep` spec of the expected time_steps.
      action_spec: A nest of BoundedTensorSpec representing the actions.
      reward_network: An instance of a `tf_agents.network.Network`,
        callable via `network(observation, step_type) -> (output, final_state)`.
      observation_and_action_constraint_splitter: A function used for masking
        valid/invalid actions with each state of the environment. The function
        takes in a full observation and returns a tuple consisting of 1) the
        part of the observation intended as input to the network and 2) the
        mask.  The mask should be a 0-1 `Tensor` of shape
        `[batch_size, num_actions]`. This function should also work with a
        `TensorSpec` as input, and should output `TensorSpec` objects for the
        observation and mask.
      accepts_per_arm_features: (bool) Whether the policy accepts per-arm
        features.
      constraints: iterable of constraints objects that are instances of
        `tf_agents.bandits.agents.NeuralConstraint`.
      emit_policy_info: (tuple of strings) what side information we want to get
        as part of the policy info. Allowed values can be found in
        `policy_utilities.PolicyInfo`.
      name: The name of this policy. All variables in this module will fall
        under that name. Defaults to the class name.

    Raises:
      NotImplementedError: If `action_spec` contains more than one
        `BoundedTensorSpec` or the `BoundedTensorSpec` is not valid.
    """
        policy_utilities.check_no_mask_with_arm_features(
            accepts_per_arm_features,
            observation_and_action_constraint_splitter)
        flat_action_spec = tf.nest.flatten(action_spec)
        if len(flat_action_spec) > 1:
            raise NotImplementedError(
                'action_spec can only contain a single BoundedTensorSpec.')

        action_spec = flat_action_spec[0]
        if (not tensor_spec.is_bounded(action_spec)
                or not tensor_spec.is_discrete(action_spec)
                or action_spec.shape.rank > 1
                or action_spec.shape.num_elements() != 1):
            raise NotImplementedError(
                'action_spec must be a BoundedTensorSpec of type int32 and shape (). '
                'Found {}.'.format(action_spec))
        self._expected_num_actions = action_spec.maximum - action_spec.minimum + 1
        self._action_offset = action_spec.minimum
        reward_network.create_variables()
        self._reward_network = reward_network
        self._constraints = constraints

        self._emit_policy_info = emit_policy_info
        predicted_rewards_mean = ()
        if policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN in emit_policy_info:
            predicted_rewards_mean = tensor_spec.TensorSpec(
                [self._expected_num_actions])
        bandit_policy_type = ()
        if policy_utilities.InfoFields.BANDIT_POLICY_TYPE in emit_policy_info:
            bandit_policy_type = (
                policy_utilities.create_bandit_policy_type_tensor_spec(
                    shape=[1]))
        if accepts_per_arm_features:
            # The features for the chosen arm is saved to policy_info.
            chosen_arm_features_info = (
                policy_utilities.create_chosen_arm_features_info_spec(
                    time_step_spec.observation))
            info_spec = policy_utilities.PerArmPolicyInfo(
                predicted_rewards_mean=predicted_rewards_mean,
                bandit_policy_type=bandit_policy_type,
                chosen_arm_features=chosen_arm_features_info)
        else:
            info_spec = policy_utilities.PolicyInfo(
                predicted_rewards_mean=predicted_rewards_mean,
                bandit_policy_type=bandit_policy_type)

        self._accepts_per_arm_features = accepts_per_arm_features

        super(GreedyRewardPredictionPolicy,
              self).__init__(time_step_spec,
                             action_spec,
                             policy_state_spec=reward_network.state_spec,
                             clip=False,
                             info_spec=info_spec,
                             emit_log_probability='log_probability'
                             in emit_policy_info,
                             observation_and_action_constraint_splitter=(
                                 observation_and_action_constraint_splitter),
                             name=name)
예제 #12
0
    def _distribution(self, time_step, policy_state):
        observation = time_step.observation
        if self.observation_and_action_constraint_splitter is not None:
            observation, _ = self.observation_and_action_constraint_splitter(
                observation)

        predictions, policy_state = self._reward_network(
            observation, time_step.step_type, policy_state)
        batch_size = tf.shape(predictions)[0]

        if isinstance(self._reward_network,
                      heteroscedastic_q_network.HeteroscedasticQNetwork):
            predicted_reward_values = predictions.q_value_logits
        else:
            predicted_reward_values = predictions

        predicted_reward_values.shape.with_rank_at_least(2)
        predicted_reward_values.shape.with_rank_at_most(3)
        if predicted_reward_values.shape[
                -1] is not None and predicted_reward_values.shape[
                    -1] != self._expected_num_actions:
            raise ValueError(
                'The number of actions ({}) does not match the reward_network output'
                ' size ({}).'.format(self._expected_num_actions,
                                     predicted_reward_values.shape[1]))

        mask = constr.construct_mask_from_multiple_sources(
            time_step.observation,
            self._observation_and_action_constraint_splitter,
            self._constraints, self._expected_num_actions)

        # Argmax.
        if mask is not None:
            actions = policy_utilities.masked_argmax(
                predicted_reward_values,
                mask,
                output_type=self.action_spec.dtype)
        else:
            actions = tf.argmax(predicted_reward_values,
                                axis=-1,
                                output_type=self.action_spec.dtype)

        actions += self._action_offset

        bandit_policy_values = tf.fill(
            [batch_size, 1], policy_utilities.BanditPolicyType.GREEDY)

        if self._accepts_per_arm_features:
            # Saving the features for the chosen action to the policy_info.
            def gather_observation(obs):
                return tf.gather(params=obs, indices=actions, batch_dims=1)

            chosen_arm_features = tf.nest.map_structure(
                gather_observation,
                observation[bandit_spec_utils.PER_ARM_FEATURE_KEY])
            policy_info = policy_utilities.PerArmPolicyInfo(
                log_probability=tf.zeros([batch_size], tf.float32)
                if policy_utilities.InfoFields.LOG_PROBABILITY
                in self._emit_policy_info else (),
                predicted_rewards_mean=(
                    predicted_reward_values
                    if policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN
                    in self._emit_policy_info else ()),
                bandit_policy_type=(
                    bandit_policy_values
                    if policy_utilities.InfoFields.BANDIT_POLICY_TYPE
                    in self._emit_policy_info else ()),
                chosen_arm_features=chosen_arm_features)
        else:
            policy_info = policy_utilities.PolicyInfo(
                log_probability=tf.zeros([batch_size], tf.float32)
                if policy_utilities.InfoFields.LOG_PROBABILITY
                in self._emit_policy_info else (),
                predicted_rewards_mean=(
                    predicted_reward_values
                    if policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN
                    in self._emit_policy_info else ()),
                bandit_policy_type=(
                    bandit_policy_values
                    if policy_utilities.InfoFields.BANDIT_POLICY_TYPE
                    in self._emit_policy_info else ()))

        return policy_step.PolicyStep(
            tfp.distributions.Deterministic(loc=actions), policy_state,
            policy_info)
    def __init__(
            self,
            time_step_spec: Optional[ts.TimeStep],
            action_spec: Optional[NestedBoundedTensorSpec],
            scalarizer: multi_objective_scalarizer.Scalarizer,
            objective_networks: Sequence[Network],
            observation_and_action_constraint_splitter: types.Splitter = None,
            accepts_per_arm_features: bool = False,
            emit_policy_info: Tuple[Text] = (),
            name: Optional[Text] = None):
        """Builds a GreedyMultiObjectiveNeuralPolicy based on multiple networks.

    This policy takes an iterable of `tf_agents.Network`, each responsible for
    predicting a specific objective, along with a `Scalarizer` object to
    generate an action by maximizing the scalarized objective, i.e., the output
    of the `Scalarizer` applied to the multiple predicted objectives by the
    networks.

    Args:
      time_step_spec: A `TimeStep` spec of the expected time_steps.
      action_spec: A nest of `BoundedTensorSpec` representing the actions.
      scalarizer: A
       `tf_agents.bandits.multi_objective.multi_objective_scalarizer.Scalarizer`
        object that implements scalarization of multiple objectives into a
        single scalar reward.
      objective_networks: A Sequence of `tf_agents.network.Network` objects to
        be used by the policy. Each network will be called with
        call(observation, step_type) and is expected to provide a prediction for
        a specific objective for all actions.
      observation_and_action_constraint_splitter: A function used for masking
        valid/invalid actions with each state of the environment. The function
        takes in a full observation and returns a tuple consisting of 1) the
        part of the observation intended as input to the network and 2) the
        mask.  The mask should be a 0-1 `Tensor` of shape `[batch_size,
        num_actions]`. This function should also work with a `TensorSpec` as
        input, and should output `TensorSpec` objects for the observation and
        mask.
      accepts_per_arm_features: (bool) Whether the policy accepts per-arm
        features.
      emit_policy_info: (tuple of strings) what side information we want to get
        as part of the policy info. Allowed values can be found in
        `policy_utilities.PolicyInfo`.
      name: The name of this policy. All variables in this module will fall
        under that name. Defaults to the class name.

    Raises:
      NotImplementedError: If `action_spec` contains more than one
        `BoundedTensorSpec` or the `BoundedTensorSpec` is not valid.
      NotImplementedError: If `action_spec` is not a `BoundedTensorSpec` of type
        int32 and shape ().
      ValueError: If `objective_networks` has fewer than two networks.
      ValueError: If `accepts_per_arm_features` is true but `time_step_spec` is
        None.
    """
        flat_action_spec = tf.nest.flatten(action_spec)
        if len(flat_action_spec) > 1:
            raise NotImplementedError(
                'action_spec can only contain a single BoundedTensorSpec.')

        action_spec = flat_action_spec[0]
        if (not tensor_spec.is_bounded(action_spec)
                or not tensor_spec.is_discrete(action_spec)
                or action_spec.shape.rank > 1
                or action_spec.shape.num_elements() != 1):
            raise NotImplementedError(
                'action_spec must be a BoundedTensorSpec of type int32 and shape (). '
                'Found {}.'.format(action_spec))
        self._expected_num_actions = action_spec.maximum - action_spec.minimum + 1
        self._action_offset = action_spec.minimum
        policy_state_spec = []
        for network in objective_networks:
            policy_state_spec.append(network.state_spec)
            network.create_variables()
        self._objective_networks = objective_networks
        self._scalarizer = scalarizer
        self._num_objectives = len(self._objective_networks)
        if self._num_objectives < 2:
            raise ValueError(
                'Number of objectives should be at least two, but found to be {}'
                .format(self._num_objectives))

        self._emit_policy_info = emit_policy_info
        predicted_rewards_mean = ()
        if policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN in emit_policy_info:
            predicted_rewards_mean = tensor_spec.TensorSpec(
                [self._num_objectives, self._expected_num_actions])
        bandit_policy_type = ()
        if policy_utilities.InfoFields.BANDIT_POLICY_TYPE in emit_policy_info:
            bandit_policy_type = (
                policy_utilities.create_bandit_policy_type_tensor_spec(
                    shape=[1]))
        if accepts_per_arm_features:
            if time_step_spec is None:
                raise ValueError(
                    'time_step_spec should not be None for per-arm-features policies, '
                    'but found to be.')
            # The features for the chosen arm is saved to policy_info.
            chosen_arm_features_info = (
                policy_utilities.create_chosen_arm_features_info_spec(
                    time_step_spec.observation,
                    observation_and_action_constraint_splitter))
            info_spec = policy_utilities.PerArmPolicyInfo(
                predicted_rewards_mean=predicted_rewards_mean,
                bandit_policy_type=bandit_policy_type,
                chosen_arm_features=chosen_arm_features_info)
        else:
            info_spec = policy_utilities.PolicyInfo(
                predicted_rewards_mean=predicted_rewards_mean,
                bandit_policy_type=bandit_policy_type)

        self._accepts_per_arm_features = accepts_per_arm_features

        super(GreedyMultiObjectiveNeuralPolicy,
              self).__init__(time_step_spec,
                             action_spec,
                             policy_state_spec=policy_state_spec,
                             clip=False,
                             info_spec=info_spec,
                             emit_log_probability='log_probability'
                             in emit_policy_info,
                             observation_and_action_constraint_splitter=(
                                 observation_and_action_constraint_splitter),
                             name=name)
    def _distribution(
            self, time_step: ts.TimeStep,
            policy_state: Sequence[types.TensorSpec]
    ) -> policy_step.PolicyStep:
        observation = time_step.observation
        if self.observation_and_action_constraint_splitter is not None:
            observation, _ = self.observation_and_action_constraint_splitter(
                observation)
        predicted_objective_values_tensor, policy_state = self._predict(
            observation, time_step.step_type, policy_state)
        scalarized_reward = scalarize_objectives(
            predicted_objective_values_tensor, self._scalarizer)
        batch_size = scalarized_reward.shape[0]
        mask = policy_utilities.construct_mask_from_multiple_sources(
            time_step.observation,
            self._observation_and_action_constraint_splitter, (),
            self._expected_num_actions)

        # Argmax.
        if mask is not None:
            actions = policy_utilities.masked_argmax(
                scalarized_reward, mask, output_type=self.action_spec.dtype)
        else:
            actions = tf.argmax(scalarized_reward,
                                axis=-1,
                                output_type=self.action_spec.dtype)

        actions += self._action_offset

        bandit_policy_values = tf.fill(
            [batch_size, 1], policy_utilities.BanditPolicyType.GREEDY)

        if self._accepts_per_arm_features:
            # Saving the features for the chosen action to the policy_info.
            def gather_observation(obs):
                return tf.gather(params=obs, indices=actions, batch_dims=1)

            chosen_arm_features = tf.nest.map_structure(
                gather_observation,
                observation[bandit_spec_utils.PER_ARM_FEATURE_KEY])
            policy_info = policy_utilities.PerArmPolicyInfo(
                log_probability=tf.zeros([batch_size], tf.float32)
                if policy_utilities.InfoFields.LOG_PROBABILITY
                in self._emit_policy_info else (),
                predicted_rewards_mean=(
                    predicted_objective_values_tensor
                    if policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN
                    in self._emit_policy_info else ()),
                bandit_policy_type=(
                    bandit_policy_values
                    if policy_utilities.InfoFields.BANDIT_POLICY_TYPE
                    in self._emit_policy_info else ()),
                chosen_arm_features=chosen_arm_features)
        else:
            policy_info = policy_utilities.PolicyInfo(
                log_probability=tf.zeros([batch_size], tf.float32)
                if policy_utilities.InfoFields.LOG_PROBABILITY
                in self._emit_policy_info else (),
                predicted_rewards_mean=(
                    predicted_objective_values_tensor
                    if policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN
                    in self._emit_policy_info else ()),
                bandit_policy_type=(
                    bandit_policy_values
                    if policy_utilities.InfoFields.BANDIT_POLICY_TYPE
                    in self._emit_policy_info else ()))

        return policy_step.PolicyStep(
            tfp.distributions.Deterministic(loc=actions), policy_state,
            policy_info)
예제 #15
0
    def __init__(self,
                 encoding_network,
                 encoding_dim,
                 reward_layer,
                 epsilon_greedy,
                 actions_from_reward_layer,
                 cov_matrix,
                 data_vector,
                 num_samples,
                 time_step_spec=None,
                 alpha=1.0,
                 emit_policy_info=(),
                 emit_log_probability=False,
                 accepts_per_arm_features=False,
                 observation_and_action_constraint_splitter=None,
                 name=None):
        """Initializes `NeuralLinUCBPolicy`.

    Args:
      encoding_network: network that encodes the observations.
      encoding_dim: (int) dimension of the encoded observations.
      reward_layer: final layer that predicts the expected reward per arm. In
        case the policy accepts per-arm features, the output of this layer has
        to be a scalar. This is because in the per-arm case, all encoded
        observations have to go through the same computation to get the reward
        estimates. The `num_actions` dimension of the encoded observation is
        treated as a batch dimension in the reward layer.
      epsilon_greedy: (float) representing the probability of choosing a random
        action instead of the greedy action.
      actions_from_reward_layer: (bool) whether to get actions from the reward
        layer or from LinUCB.
      cov_matrix: list of the covariance matrices. There exists one covariance
        matrix per arm, unless the policy accepts per-arm features, in which
        case this list must have a single element.
      data_vector: list of the data vectors. A data vector is a weighted sum
        of the observations, where the weight is the corresponding reward. Each
        arm has its own data vector, unless the policy accepts per-arm features,
        in which case this list must have a single element.
      num_samples: list of number of samples per arm. If the policy accepts per-
        arm features, this is a single-element list counting the number of
        steps.
      time_step_spec: A `TimeStep` spec of the expected time_steps.
      alpha: (float) non-negative weight multiplying the confidence intervals.
      emit_policy_info: (tuple of strings) what side information we want to get
        as part of the policy info. Allowed values can be found in
        `policy_utilities.PolicyInfo`.
      emit_log_probability: (bool) whether to emit log probabilities.
      accepts_per_arm_features: (bool) Whether the policy accepts per-arm
        features.
      observation_and_action_constraint_splitter: A function used for masking
        valid/invalid actions with each state of the environment. The function
        takes in a full observation and returns a tuple consisting of 1) the
        part of the observation intended as input to the bandit policy and 2)
        the mask. The mask should be a 0-1 `Tensor` of shape
        `[batch_size, num_actions]`. This function should also work with a
        `TensorSpec` as input, and should output `TensorSpec` objects for the
        observation and mask.
      name: The name of this policy.
    """
        encoding_network.create_variables()
        self._encoding_network = encoding_network
        self._reward_layer = reward_layer
        self._encoding_dim = encoding_dim

        if accepts_per_arm_features and reward_layer.units != 1:
            raise ValueError(
                'The output dimension of the reward layer must be 1, got'
                ' {}'.format(reward_layer.units))

        if not isinstance(cov_matrix, (list, tuple)):
            raise ValueError(
                'cov_matrix must be a list of matrices (Tensors).')
        self._cov_matrix = cov_matrix

        if not isinstance(data_vector, (list, tuple)):
            raise ValueError(
                'data_vector must be a list of vectors (Tensors).')
        self._data_vector = data_vector

        if not isinstance(num_samples, (list, tuple)):
            raise ValueError(
                'num_samples must be a list of vectors (Tensors).')
        self._num_samples = num_samples

        self._alpha = alpha
        self._actions_from_reward_layer = actions_from_reward_layer
        self._epsilon_greedy = epsilon_greedy
        self._dtype = self._data_vector[0].dtype

        if len(cov_matrix) != len(data_vector):
            raise ValueError(
                'The size of list cov_matrix must match the size of '
                'list data_vector. Got {} for cov_matrix and {} '
                'for data_vector'.format(len(self._cov_matrix),
                                         len((data_vector))))
        if len(num_samples) != len(cov_matrix):
            raise ValueError('The size of num_samples must match the size of '
                             'list cov_matrix. Got {} for num_samples and {} '
                             'for cov_matrix'.format(len(self._num_samples),
                                                     len((cov_matrix))))

        self._accepts_per_arm_features = accepts_per_arm_features
        if observation_and_action_constraint_splitter is not None:
            context_spec, _ = observation_and_action_constraint_splitter(
                time_step_spec.observation)
        else:
            context_spec = time_step_spec.observation
        if accepts_per_arm_features:
            self._num_actions = context_spec[
                bandit_spec_utils.PER_ARM_FEATURE_KEY].shape.as_list()[0]
            self._num_models = 1
        else:
            self._num_actions = len(cov_matrix)
            self._num_models = self._num_actions
        (self._global_context_dim,
         self._arm_context_dim) = bandit_spec_utils.get_context_dims_from_spec(
             context_spec, accepts_per_arm_features)
        cov_matrix_dim = tf.compat.dimension_value(cov_matrix[0].shape[0])
        if self._encoding_dim != cov_matrix_dim:
            raise ValueError('The dimension of matrix `cov_matrix` must match '
                             'encoding dimension {}.'
                             'Got {} for `cov_matrix`.'.format(
                                 self._encoding_dim, cov_matrix_dim))
        data_vector_dim = tf.compat.dimension_value(data_vector[0].shape[0])
        if self._encoding_dim != data_vector_dim:
            raise ValueError(
                'The dimension of vector `data_vector` must match '
                'encoding  dimension {}. '
                'Got {} for `data_vector`.'.format(self._encoding_dim,
                                                   data_vector_dim))
        action_spec = tensor_spec.BoundedTensorSpec(shape=(),
                                                    dtype=tf.int32,
                                                    minimum=0,
                                                    maximum=self._num_actions -
                                                    1,
                                                    name='action')

        self._emit_policy_info = emit_policy_info
        predicted_rewards_mean = ()
        if policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN in emit_policy_info:
            predicted_rewards_mean = tensor_spec.TensorSpec(
                [self._num_actions], dtype=tf.float32)
        if accepts_per_arm_features:
            chosen_arm_features_info = tensor_spec.TensorSpec(
                dtype=tf.float32,
                shape=[self._arm_context_dim],
                name='chosen_arm_features')
            info_spec = policy_utilities.PerArmPolicyInfo(
                predicted_rewards_mean=predicted_rewards_mean,
                chosen_arm_features=chosen_arm_features_info)
        else:
            info_spec = policy_utilities.PolicyInfo(
                predicted_rewards_mean=predicted_rewards_mean)

        super(NeuralLinUCBPolicy,
              self).__init__(time_step_spec=time_step_spec,
                             action_spec=action_spec,
                             emit_log_probability=emit_log_probability,
                             observation_and_action_constraint_splitter=(
                                 observation_and_action_constraint_splitter),
                             info_spec=info_spec,
                             name=name)