Beispiel #1
0
  def build_network(self, inputs, phase_train=True, nclass=1001):
    try:
      from official.recommendation import neumf_model  # pylint: disable=g-import-not-at-top
    except ImportError as e:
      if 'neumf_model' not in e.message:
        raise
      raise ImportError('To use the experimental NCF model, you must clone the '
                        'repo https://github.com/tensorflow/models and add '
                        'tensorflow/models to the PYTHONPATH.')
    del nclass

    users, items, _ = inputs
    params = {
        'num_users': _NUM_USERS_20M,
        'num_items': _NUM_ITEMS_20M,
        'model_layers': (256, 256, 128, 64),
        'mf_dim': 64,
        'mf_regularization': 0,
        'mlp_reg_layers': (0, 0, 0, 0),
        'use_tpu': False
    }
    if self.data_type == tf.float32:
      keras_model = neumf_model.construct_model(users, items, params)
      logits = keras_model.output
    else:
      assert self.data_type == tf.float16
      tf.keras.backend.set_floatx('float16')
      # We cannot rely on the variable_scope's fp16 custom getter here, because
      # the NCF model uses keras layers, which ignore variable scopes. So we use
      # a variable_creator_scope instead.
      with tf.variable_creator_scope(_fp16_variable_creator):
        keras_model = neumf_model.construct_model(users, items, params)
      logits = tf.cast(keras_model.output, tf.float32)

    return model.BuildNetworkResult(logits=logits, extra_info=None)
Beispiel #2
0
  def build_network(self, inputs, phase_train=True, nclass=1001,
                    data_type=tf.float32):
    try:
      from official.recommendation import neumf_model  # pylint: disable=g-import-not-at-top
    except ImportError as e:
      if 'neumf_model' not in e.message:
        raise
      raise ImportError('To use the experimental NCF model, you must clone the '
                        'repo https://github.com/tensorflow/models and add '
                        'tensorflow/models to the PYTHONPATH.')
    del nclass
    if data_type != tf.float32:
      raise ValueError('NCF model only supports float32 for now.')

    users, items = inputs
    params = {
        'num_users': _NUM_USERS_20M,
        'num_items': _NUM_ITEMS_20M,
        'model_layers': (256, 256, 128, 64),
        'mf_dim': 64,
        'mf_regularization': 0,
        'mlp_reg_layers': (0, 0, 0, 0),
    }
    logits = neumf_model.construct_model(users, items, params)
    return model.BuildNetworkResult(logits=logits, extra_info=None)
  def build_network(self, inputs, phase_train=True, nclass=29):
    """Builds the forward pass of the deepspeech2 model.

    Args:
      inputs: The input list of the model.
      phase_train: True during training. False during evaluation.
      nclass: Number of classes that the input spectrogram can belong to.

    Returns:
      A BuildNetworkResult which contains the logits and model-specific extra
        information.
    """
    inputs = inputs[0]  # Get the spectrogram feature.

    # Two cnn layers.
    inputs = self._conv_bn_layer(
        inputs,
        padding=(20, 5),
        filters=DeepSpeech2Model.CONV_FILTERS,
        kernel_size=(41, 11),
        strides=(2, 2),
        layer_id=1,
        training=phase_train)

    inputs = self._conv_bn_layer(
        inputs,
        padding=(10, 5),
        filters=DeepSpeech2Model.CONV_FILTERS,
        kernel_size=(21, 11),
        strides=(2, 1),
        layer_id=2,
        training=phase_train)

    # output of conv_layer2 with the shape of
    # [batch_size (N), times (T), features (F), channels (C)].
    # Convert the conv output to rnn input.

    # batch_size = tf.shape(inputs)[0]
    feat_size = inputs.get_shape().as_list()[2]
    inputs = tf.reshape(
        inputs,
        [self.batch_size, -1, feat_size * DeepSpeech2Model.CONV_FILTERS])

    # RNN layers.
    rnn_cell = DeepSpeech2Model.SUPPORTED_RNNS[self.rnn_type]
    for layer_counter in xrange(self.num_rnn_layers):
      # No batch normalization on the first layer.
      use_batch_norm = (layer_counter != 0)
      inputs = self._rnn_layer(inputs, rnn_cell, self.rnn_hidden_size,
                               layer_counter + 1, use_batch_norm,
                               self.is_bidirectional, phase_train)

    # FC layer with batch norm.
    inputs = self._batch_norm(inputs, phase_train)
    logits = tf.layers.dense(inputs, nclass, use_bias=self.use_bias)

    return model_lib.BuildNetworkResult(logits=logits, extra_info=None)
Beispiel #4
0
    def build_network(self,
                      images,
                      phase_train=True,
                      nclass=1001,
                      data_type=tf.float32):
        try:
            from official.recommendation import neumf_model  # pylint: disable=g-import-not-at-top
        except ImportError:
            raise ImportError(
                'To use the experimental NCF model, you must clone the '
                'repo https://github.com/tensorflow/models and add '
                'tensorflow/models to the PYTHONPATH.')
        del nclass
        if data_type != tf.float32:
            raise ValueError('NCF model only supports float32 for now.')
        batch_size = int(images.shape[0])

        # Create synthetic users and items. tf_cnn_benchmarks only passes images to
        # this function, which we cannot use in the NCF model. We use functions as
        # initializers for XLA compatibility.
        def users_init_val():
            return tf.random_uniform((batch_size, ),
                                     minval=0,
                                     maxval=_NUM_USERS_20M,
                                     dtype=tf.int32)

        users = tf.Variable(users_init_val,
                            dtype=tf.int32,
                            trainable=False,
                            collections=[tf.GraphKeys.LOCAL_VARIABLES],
                            name='synthetic_users')

        def items_init_val():
            return tf.random_uniform((batch_size, ),
                                     minval=0,
                                     maxval=_NUM_ITEMS_20M,
                                     dtype=tf.int32)

        items = tf.Variable(items_init_val,
                            dtype=tf.int32,
                            trainable=False,
                            collections=[tf.GraphKeys.LOCAL_VARIABLES],
                            name='synthetic_items')

        params = {
            'num_users': _NUM_USERS_20M,
            'num_items': _NUM_ITEMS_20M,
            'model_layers': (256, 256, 128, 64),
            'mf_dim': 64,
            'mf_regularization': 0,
            'mlp_reg_layers': (0, 0, 0, 0),
        }
        logits = neumf_model.construct_model(users, items, params)
        return model.BuildNetworkResult(logits=logits, extra_info=None)
 def manually_compute_losses(self, inputs, num_workers, params):
     with tf.Graph().as_default(), tf.device('/cpu:0'):
         a = tf.Variable(self.VAR_A_INITIAL_VALUE, name='A')
         b = tf.Variable(self.VAR_B_INITIAL_VALUE, name='B')
         inputs_placeholder = tf.placeholder(tf.float32, (None, 1, 1, 1),
                                             name='inputs_placeholder')
         inputs_reshaped = tf.reshape(inputs_placeholder, (-1, 1))
         loss = self.loss_function(
             None,
             model.BuildNetworkResult(logits=inputs_reshaped * a * b,
                                      extra_info=None))
         return manually_compute_losses(inputs, inputs_placeholder, loss,
                                        num_workers, params)
 def build_network(self, images, phase_train=True, nclass=1001,
                   data_type=tf.float32):
   # pylint: disable=g-import-not-at-top
   try:
     from official.resnet.r1.imagenet_main import ImagenetModel
   except ImportError:
     tf.logging.fatal('Please include tensorflow/models to the PYTHONPATH.')
     raise
   images = tf.cast(images, data_type)
   model_class = ImagenetModel(resnet_size=self.resnet_size,
                               resnet_version=self.version,
                               # The official model dtype seems to be ignored,
                               # as the dtype it uses is the dtype of the input
                               # images. Doesn't hurt to set it though.
                               dtype=data_type)
   logits = model_class(images, phase_train)
   logits = tf.cast(logits, tf.float32)
   return model_lib.BuildNetworkResult(logits=logits, extra_info=None)