def _populate_policy_info_spec(self, observation_spec,
                                observation_and_action_constraint_splitter):
     predicted_rewards_mean = ()
     if (policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN
             in self._emit_policy_info):
         predicted_rewards_mean = tensor_spec.TensorSpec(
             [self._num_actions], dtype=self._dtype)
     predicted_rewards_optimistic = ()
     if (policy_utilities.InfoFields.PREDICTED_REWARDS_OPTIMISTIC
             in self._emit_policy_info):
         predicted_rewards_optimistic = tensor_spec.TensorSpec(
             [self._num_actions], dtype=self._dtype)
     predicted_rewards_sampled = ()
     if (policy_utilities.InfoFields.PREDICTED_REWARDS_SAMPLED
             in self._emit_policy_info):
         predicted_rewards_sampled = tensor_spec.TensorSpec(
             [self._num_actions], dtype=self._dtype)
     if self._accepts_per_arm_features:
         # The features for the chosen arm is saved to policy_info.
         chosen_arm_features_info = (
             policy_utilities.create_chosen_arm_features_info_spec(
                 observation_spec))
         info_spec = policy_utilities.PerArmPolicyInfo(
             predicted_rewards_mean=predicted_rewards_mean,
             predicted_rewards_optimistic=predicted_rewards_optimistic,
             predicted_rewards_sampled=predicted_rewards_sampled,
             chosen_arm_features=chosen_arm_features_info)
     else:
         info_spec = policy_utilities.PolicyInfo(
             predicted_rewards_mean=predicted_rewards_mean,
             predicted_rewards_optimistic=predicted_rewards_optimistic,
             predicted_rewards_sampled=predicted_rewards_sampled)
     return info_spec
Example #2
0
    def testNumActions(self, dtype):
        if not dtype.is_integer:
            self.skipTest('testNumActions only applies to integer dtypes')

        batch_size = 1000

        # Create action spec, time_step and spec with max_num_arms = 4.
        action_spec = tensor_spec.BoundedTensorSpec((), dtype, 0, 3)
        time_step_spec, time_step_1 = self.create_time_step(
            use_per_arm_features=True, num_arms=2)
        _, time_step_2 = self.create_time_step(use_per_arm_features=True,
                                               num_arms=3)
        # First half of time_step batch will have num_action = 2 and second
        # half will have num_actions = 3.
        half_batch_size = int(batch_size / 2)
        time_step = nest_utils.stack_nested_tensors(
            [time_step_1] * half_batch_size + [time_step_2] * half_batch_size)

        # The features for the chosen arm is saved to policy_info.
        chosen_arm_features_info = (
            policy_utilities.create_chosen_arm_features_info_spec(
                time_step_spec.observation))
        info_spec = policy_utilities.PerArmPolicyInfo(
            chosen_arm_features=chosen_arm_features_info)

        policy = random_tf_policy.RandomTFPolicy(time_step_spec=time_step_spec,
                                                 action_spec=action_spec,
                                                 info_spec=info_spec,
                                                 accepts_per_arm_features=True,
                                                 emit_log_probability=True)

        action_step = policy.action(time_step)
        tf.nest.assert_same_structure(action_spec, action_step.action)

        # Sample from the policy 1000 times, and ensure that actions considered
        # invalid according to the mask are never chosen.
        step = self.evaluate(action_step)
        action_ = step.action
        self.assertTrue(np.all(action_ >= 0))
        self.assertTrue(np.all(action_[:half_batch_size] < 2))
        self.assertTrue(np.all(action_[half_batch_size:] < 3))

        # With num_action valid actions, probabilities should be 1/num_actions.
        self.assertAllClose(
            step.info.log_probability[:half_batch_size],
            tf.constant(np.log(1. / 2), shape=[half_batch_size]))
        self.assertAllClose(
            step.info.log_probability[half_batch_size:],
            tf.constant(np.log(1. / 3), shape=[half_batch_size]))
Example #3
0
    def __init__(self,
                 encoding_network: types.Network,
                 encoding_dim: int,
                 reward_layer: tf.keras.layers.Dense,
                 epsilon_greedy: float,
                 actions_from_reward_layer: types.Bool,
                 cov_matrix: Sequence[types.Float],
                 data_vector: Sequence[types.Float],
                 num_samples: Sequence[types.Int],
                 time_step_spec: types.TimeStep,
                 alpha: float = 1.0,
                 emit_policy_info: Sequence[Text] = (),
                 emit_log_probability: bool = False,
                 accepts_per_arm_features: bool = False,
                 distributed_use_reward_layer: bool = False,
                 observation_and_action_constraint_splitter: Optional[
                     types.Splitter] = None,
                 name: Optional[Text] = None):
        """Initializes `NeuralLinUCBPolicy`.

    Args:
      encoding_network: network that encodes the observations.
      encoding_dim: (int) dimension of the encoded observations.
      reward_layer: final layer that predicts the expected reward per arm. In
        case the policy accepts per-arm features, the output of this layer has
        to be a scalar. This is because in the per-arm case, all encoded
        observations have to go through the same computation to get the reward
        estimates. The `num_actions` dimension of the encoded observation is
        treated as a batch dimension in the reward layer.
      epsilon_greedy: (float) representing the probability of choosing a random
        action instead of the greedy action.
      actions_from_reward_layer: (boolean variable) whether to get actions from
        the reward layer or from LinUCB.
      cov_matrix: list of the covariance matrices. There exists one covariance
        matrix per arm, unless the policy accepts per-arm features, in which
        case this list must have a single element.
      data_vector: list of the data vectors. A data vector is a weighted sum
        of the observations, where the weight is the corresponding reward. Each
        arm has its own data vector, unless the policy accepts per-arm features,
        in which case this list must have a single element.
      num_samples: list of number of samples per arm. If the policy accepts per-
        arm features, this is a single-element list counting the number of
        steps.
      time_step_spec: A `TimeStep` spec of the expected time_steps.
      alpha: (float) non-negative weight multiplying the confidence intervals.
      emit_policy_info: (tuple of strings) what side information we want to get
        as part of the policy info. Allowed values can be found in
        `policy_utilities.PolicyInfo`.
      emit_log_probability: (bool) whether to emit log probabilities.
      accepts_per_arm_features: (bool) Whether the policy accepts per-arm
        features.
      distributed_use_reward_layer: (bool) Whether to pick the actions using
        the network or use LinUCB. This applies only in distributed training
        setting and has a similar role to the `actions_from_reward_layer`
        mentioned above.
      observation_and_action_constraint_splitter: A function used for masking
        valid/invalid actions with each state of the environment. The function
        takes in a full observation and returns a tuple consisting of 1) the
        part of the observation intended as input to the bandit policy and 2)
        the mask. The mask should be a 0-1 `Tensor` of shape
        `[batch_size, num_actions]`. This function should also work with a
        `TensorSpec` as input, and should output `TensorSpec` objects for the
        observation and mask.
      name: The name of this policy.
    """
        policy_utilities.check_no_mask_with_arm_features(
            accepts_per_arm_features,
            observation_and_action_constraint_splitter)
        encoding_network.create_variables()
        self._encoding_network = encoding_network
        self._reward_layer = reward_layer
        self._encoding_dim = encoding_dim

        if accepts_per_arm_features and reward_layer.units != 1:
            raise ValueError(
                'The output dimension of the reward layer must be 1, got'
                ' {}'.format(reward_layer.units))

        if not isinstance(cov_matrix, (list, tuple)):
            raise ValueError(
                'cov_matrix must be a list of matrices (Tensors).')
        self._cov_matrix = cov_matrix

        if not isinstance(data_vector, (list, tuple)):
            raise ValueError(
                'data_vector must be a list of vectors (Tensors).')
        self._data_vector = data_vector

        if not isinstance(num_samples, (list, tuple)):
            raise ValueError(
                'num_samples must be a list of vectors (Tensors).')
        self._num_samples = num_samples

        self._alpha = alpha
        self._actions_from_reward_layer = actions_from_reward_layer
        self._epsilon_greedy = epsilon_greedy
        self._dtype = self._data_vector[0].dtype
        self._distributed_use_reward_layer = distributed_use_reward_layer

        if len(cov_matrix) != len(data_vector):
            raise ValueError(
                'The size of list cov_matrix must match the size of '
                'list data_vector. Got {} for cov_matrix and {} '
                'for data_vector'.format(len(self._cov_matrix),
                                         len((data_vector))))
        if len(num_samples) != len(cov_matrix):
            raise ValueError('The size of num_samples must match the size of '
                             'list cov_matrix. Got {} for num_samples and {} '
                             'for cov_matrix'.format(len(self._num_samples),
                                                     len((cov_matrix))))

        self._accepts_per_arm_features = accepts_per_arm_features
        if observation_and_action_constraint_splitter is not None:
            context_spec, _ = observation_and_action_constraint_splitter(
                time_step_spec.observation)
        else:
            context_spec = time_step_spec.observation
        if accepts_per_arm_features:
            self._num_actions = tf.nest.flatten(context_spec[
                bandit_spec_utils.PER_ARM_FEATURE_KEY])[0].shape.as_list()[0]
            self._num_models = 1
        else:
            self._num_actions = len(cov_matrix)
            self._num_models = self._num_actions
        cov_matrix_dim = tf.compat.dimension_value(cov_matrix[0].shape[0])
        if self._encoding_dim != cov_matrix_dim:
            raise ValueError('The dimension of matrix `cov_matrix` must match '
                             'encoding dimension {}.'
                             'Got {} for `cov_matrix`.'.format(
                                 self._encoding_dim, cov_matrix_dim))
        data_vector_dim = tf.compat.dimension_value(data_vector[0].shape[0])
        if self._encoding_dim != data_vector_dim:
            raise ValueError(
                'The dimension of vector `data_vector` must match '
                'encoding  dimension {}. '
                'Got {} for `data_vector`.'.format(self._encoding_dim,
                                                   data_vector_dim))
        action_spec = tensor_spec.BoundedTensorSpec(shape=(),
                                                    dtype=tf.int32,
                                                    minimum=0,
                                                    maximum=self._num_actions -
                                                    1,
                                                    name='action')

        self._emit_policy_info = emit_policy_info
        predicted_rewards_mean = ()
        if policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN in emit_policy_info:
            predicted_rewards_mean = tensor_spec.TensorSpec(
                [self._num_actions], dtype=tf.float32)
        predicted_rewards_optimistic = ()
        if (policy_utilities.InfoFields.PREDICTED_REWARDS_OPTIMISTIC
                in emit_policy_info):
            predicted_rewards_optimistic = tensor_spec.TensorSpec(
                [self._num_actions], dtype=tf.float32)
        if accepts_per_arm_features:
            chosen_arm_features_info_spec = (
                policy_utilities.create_chosen_arm_features_info_spec(
                    time_step_spec.observation))
            info_spec = policy_utilities.PerArmPolicyInfo(
                predicted_rewards_mean=predicted_rewards_mean,
                predicted_rewards_optimistic=predicted_rewards_optimistic,
                chosen_arm_features=chosen_arm_features_info_spec)
        else:
            info_spec = policy_utilities.PolicyInfo(
                predicted_rewards_mean=predicted_rewards_mean,
                predicted_rewards_optimistic=predicted_rewards_optimistic)

        super(NeuralLinUCBPolicy,
              self).__init__(time_step_spec=time_step_spec,
                             action_spec=action_spec,
                             emit_log_probability=emit_log_probability,
                             observation_and_action_constraint_splitter=(
                                 observation_and_action_constraint_splitter),
                             info_spec=info_spec,
                             name=name)
Example #4
0
    def __init__(self,
                 time_step_spec: types.TimeStep,
                 action_spec: types.NestedTensorSpec,
                 reward_network: types.Network,
                 temperature: types.FloatOrReturningFloat = 1.0,
                 observation_and_action_constraint_splitter: Optional[
                     types.Splitter] = None,
                 accepts_per_arm_features: bool = False,
                 constraints: Tuple[constr.NeuralConstraint, ...] = (),
                 emit_policy_info: Tuple[Text, ...] = (),
                 name: Optional[Text] = None):
        """Builds a BoltzmannRewardPredictionPolicy given a reward network.

    This policy takes a tf_agents.Network predicting rewards and chooses an
    action with weighted probabilities (i.e., using a softmax over the network
    estimates of value for each action).

    Args:
      time_step_spec: A `TimeStep` spec of the expected time_steps.
      action_spec: A nest of BoundedTensorSpec representing the actions.
      reward_network: An instance of a `tf_agents.network.Network`,
        callable via `network(observation, step_type) -> (output, final_state)`.
      temperature: float or callable that returns a float. The temperature used
        in the Boltzmann exploration.
      observation_and_action_constraint_splitter: A function used for masking
        valid/invalid actions with each state of the environment. The function
        takes in a full observation and returns a tuple consisting of 1) the
        part of the observation intended as input to the network and 2) the
        mask.  The mask should be a 0-1 `Tensor` of shape
        `[batch_size, num_actions]`. This function should also work with a
        `TensorSpec` as input, and should output `TensorSpec` objects for the
        observation and mask.
      accepts_per_arm_features: (bool) Whether the policy accepts per-arm
        features.
      constraints: iterable of constraints objects that are instances of
        `tf_agents.bandits.agents.NeuralConstraint`.
      emit_policy_info: (tuple of strings) what side information we want to get
        as part of the policy info. Allowed values can be found in
        `policy_utilities.PolicyInfo`.
      name: The name of this policy. All variables in this module will fall
        under that name. Defaults to the class name.

    Raises:
      NotImplementedError: If `action_spec` contains more than one
        `BoundedTensorSpec` or the `BoundedTensorSpec` is not valid.
    """
        policy_utilities.check_no_mask_with_arm_features(
            accepts_per_arm_features,
            observation_and_action_constraint_splitter)
        flat_action_spec = tf.nest.flatten(action_spec)
        if len(flat_action_spec) > 1:
            raise NotImplementedError(
                'action_spec can only contain a single BoundedTensorSpec.')

        self._temperature = temperature
        action_spec = flat_action_spec[0]
        if (not tensor_spec.is_bounded(action_spec)
                or not tensor_spec.is_discrete(action_spec)
                or action_spec.shape.rank > 1
                or action_spec.shape.num_elements() != 1):
            raise NotImplementedError(
                'action_spec must be a BoundedTensorSpec of type int32 and shape (). '
                'Found {}.'.format(action_spec))
        self._expected_num_actions = action_spec.maximum - action_spec.minimum + 1
        self._action_offset = action_spec.minimum
        reward_network.create_variables()
        self._reward_network = reward_network
        self._constraints = constraints

        self._emit_policy_info = emit_policy_info
        predicted_rewards_mean = ()
        if policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN in emit_policy_info:
            predicted_rewards_mean = tensor_spec.TensorSpec(
                [self._expected_num_actions])
        bandit_policy_type = ()
        if policy_utilities.InfoFields.BANDIT_POLICY_TYPE in emit_policy_info:
            bandit_policy_type = (
                policy_utilities.create_bandit_policy_type_tensor_spec(
                    shape=[1]))
        if accepts_per_arm_features:
            # The features for the chosen arm is saved to policy_info.
            chosen_arm_features_info = (
                policy_utilities.create_chosen_arm_features_info_spec(
                    time_step_spec.observation))
            info_spec = policy_utilities.PerArmPolicyInfo(
                predicted_rewards_mean=predicted_rewards_mean,
                bandit_policy_type=bandit_policy_type,
                chosen_arm_features=chosen_arm_features_info)
        else:
            info_spec = policy_utilities.PolicyInfo(
                predicted_rewards_mean=predicted_rewards_mean,
                bandit_policy_type=bandit_policy_type)

        self._accepts_per_arm_features = accepts_per_arm_features

        super(BoltzmannRewardPredictionPolicy,
              self).__init__(time_step_spec,
                             action_spec,
                             policy_state_spec=reward_network.state_spec,
                             clip=False,
                             info_spec=info_spec,
                             emit_log_probability='log_probability'
                             in emit_policy_info,
                             observation_and_action_constraint_splitter=(
                                 observation_and_action_constraint_splitter),
                             name=name)
    def __init__(
            self,
            time_step_spec: Optional[ts.TimeStep],
            action_spec: Optional[types.NestedBoundedTensorSpec],
            scalarizer: multi_objective_scalarizer.Scalarizer,
            objective_networks: Sequence[Network],
            observation_and_action_constraint_splitter: types.Splitter = None,
            accepts_per_arm_features: bool = False,
            emit_policy_info: Tuple[Text, ...] = (),
            name: Optional[Text] = None):
        """Builds a GreedyMultiObjectiveNeuralPolicy based on multiple networks.

    This policy takes an iterable of `tf_agents.Network`, each responsible for
    predicting a specific objective, along with a `Scalarizer` object to
    generate an action by maximizing the scalarized objective, i.e., the output
    of the `Scalarizer` applied to the multiple predicted objectives by the
    networks.

    Args:
      time_step_spec: A `TimeStep` spec of the expected time_steps.
      action_spec: A nest of `BoundedTensorSpec` representing the actions.
      scalarizer: A
       `tf_agents.bandits.multi_objective.multi_objective_scalarizer.Scalarizer`
        object that implements scalarization of multiple objectives into a
        single scalar reward.
      objective_networks: A Sequence of `tf_agents.network.Network` objects to
        be used by the policy. Each network will be called with
        call(observation, step_type) and is expected to provide a prediction for
        a specific objective for all actions.
      observation_and_action_constraint_splitter: A function used for masking
        valid/invalid actions with each state of the environment. The function
        takes in a full observation and returns a tuple consisting of 1) the
        part of the observation intended as input to the network and 2) the
        mask.  The mask should be a 0-1 `Tensor` of shape `[batch_size,
        num_actions]`. This function should also work with a `TensorSpec` as
        input, and should output `TensorSpec` objects for the observation and
        mask.
      accepts_per_arm_features: (bool) Whether the policy accepts per-arm
        features.
      emit_policy_info: (tuple of strings) what side information we want to get
        as part of the policy info. Allowed values can be found in
        `policy_utilities.PolicyInfo`.
      name: The name of this policy. All variables in this module will fall
        under that name. Defaults to the class name.

    Raises:
      NotImplementedError: If `action_spec` contains more than one
        `BoundedTensorSpec` or the `BoundedTensorSpec` is not valid.
      NotImplementedError: If `action_spec` is not a `BoundedTensorSpec` of type
        int32 and shape ().
      ValueError: If `objective_networks` has fewer than two networks.
      ValueError: If `accepts_per_arm_features` is true but `time_step_spec` is
        None.
    """
        policy_utilities.check_no_mask_with_arm_features(
            accepts_per_arm_features,
            observation_and_action_constraint_splitter)
        flat_action_spec = tf.nest.flatten(action_spec)
        if len(flat_action_spec) > 1:
            raise NotImplementedError(
                'action_spec can only contain a single BoundedTensorSpec.')

        action_spec = flat_action_spec[0]
        if (not tensor_spec.is_bounded(action_spec)
                or not tensor_spec.is_discrete(action_spec)
                or action_spec.shape.rank > 1
                or action_spec.shape.num_elements() != 1):
            raise NotImplementedError(
                'action_spec must be a BoundedTensorSpec of type int32 and shape (). '
                'Found {}.'.format(action_spec))
        self._expected_num_actions = action_spec.maximum - action_spec.minimum + 1
        self._action_offset = action_spec.minimum
        policy_state_spec = []
        for network in objective_networks:
            policy_state_spec.append(network.state_spec)
            network.create_variables()
        self._objective_networks = objective_networks
        self._scalarizer = scalarizer
        self._num_objectives = len(self._objective_networks)
        if self._num_objectives < 2:
            raise ValueError(
                'Number of objectives should be at least two, but found to be {}'
                .format(self._num_objectives))

        self._emit_policy_info = emit_policy_info
        predicted_rewards_mean = ()
        if policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN in emit_policy_info:
            predicted_rewards_mean = tensor_spec.TensorSpec(
                [self._num_objectives, self._expected_num_actions])
        scalarized_predicted_rewards_mean = ()
        if (policy_utilities.InfoFields.
                MULTIOBJECTIVE_SCALARIZED_PREDICTED_REWARDS_MEAN
                in emit_policy_info):
            scalarized_predicted_rewards_mean = tensor_spec.TensorSpec(
                [self._expected_num_actions])
        bandit_policy_type = ()
        if policy_utilities.InfoFields.BANDIT_POLICY_TYPE in emit_policy_info:
            bandit_policy_type = (
                policy_utilities.create_bandit_policy_type_tensor_spec(
                    shape=[1]))
        if accepts_per_arm_features:
            if time_step_spec is None:
                raise ValueError(
                    'time_step_spec should not be None for per-arm-features policies, '
                    'but found to be.')
            # The features for the chosen arm is saved to policy_info.
            chosen_arm_features_info = (
                policy_utilities.create_chosen_arm_features_info_spec(
                    time_step_spec.observation))
            info_spec = policy_utilities.PerArmPolicyInfo(
                predicted_rewards_mean=predicted_rewards_mean,
                multiobjective_scalarized_predicted_rewards_mean=
                scalarized_predicted_rewards_mean,
                bandit_policy_type=bandit_policy_type,
                chosen_arm_features=chosen_arm_features_info)
        else:
            info_spec = policy_utilities.PolicyInfo(
                predicted_rewards_mean=predicted_rewards_mean,
                multiobjective_scalarized_predicted_rewards_mean=
                scalarized_predicted_rewards_mean,
                bandit_policy_type=bandit_policy_type)

        self._accepts_per_arm_features = accepts_per_arm_features

        super(GreedyMultiObjectiveNeuralPolicy,
              self).__init__(time_step_spec,
                             action_spec,
                             policy_state_spec=policy_state_spec,
                             clip=False,
                             info_spec=info_spec,
                             emit_log_probability='log_probability'
                             in emit_policy_info,
                             observation_and_action_constraint_splitter=(
                                 observation_and_action_constraint_splitter),
                             name=name)