Example #1
0
 def test_num_features(self):
   assert rcfr.num_features(_GAME) == 13
Example #2
0
    def __init__(self,
                 game,
                 num_hidden_units,
                 num_hidden_layers=1,
                 num_hidden_factors=0,
                 hidden_activation=tf.nn.relu,
                 use_skip_connections=False,
                 regularizer=None,
                 autoencode=False):
        """Creates a new `DeepNeurdModel.

    Args:
      game: The OpenSpiel game being solved.
      num_hidden_units: The number of units in each hidden layer.
      num_hidden_layers: The number of hidden layers. Defaults to 1.
      num_hidden_factors: The number of hidden factors or the matrix rank of the
        layer. If greater than zero, hidden layers will be split into two
        separate linear transformations, the first with
        `num_hidden_factors`-columns and the second with
        `num_hidden_units`-columns. The result is that the logical hidden layer
        is a rank-`num_hidden_units` matrix instead of a rank-`num_hidden_units`
        matrix. When `num_hidden_units < num_hidden_units`, this is effectively
        implements weight sharing. Defaults to 0.
      hidden_activation: The activation function to apply over hidden layers.
        Defaults to `tf.nn.relu`.
      use_skip_connections: Whether or not to apply skip connections (layer
        output = layer(x) + x) on hidden layers. Zero padding or truncation is
        used to match the number of columns on layer inputs and outputs.
      regularizer: A regularizer to apply to each layer. Defaults to `None`.
      autoencode: Whether or not to output a reconstruction of the inputs upon
        being called. Defaults to `False`.
    """

        self._autoencode = autoencode
        self._use_skip_connections = use_skip_connections
        self._hidden_are_factored = num_hidden_factors > 0

        self.layers = []
        for _ in range(num_hidden_layers):
            if self._hidden_are_factored:
                self.layers.append(
                    tf.keras.layers.Dense(num_hidden_factors,
                                          use_bias=True,
                                          kernel_regularizer=regularizer))

            self.layers.append(
                tf.keras.layers.Dense(num_hidden_units,
                                      use_bias=True,
                                      activation=hidden_activation,
                                      kernel_regularizer=regularizer))

        self.layers.append(
            tf.keras.layers.Dense(1 +
                                  self._autoencode * rcfr.num_features(game),
                                  use_bias=True,
                                  kernel_regularizer=regularizer))

        # Construct variables for all layers by exercising the network.
        x = tf.zeros([1, rcfr.num_features(game)])
        for layer in self.layers:
            x = layer(x)

        self.trainable_variables = sum(
            [layer.trainable_variables for layer in self.layers], [])
        self.losses = sum([layer.losses for layer in self.layers], [])