def _create_strategy_and_mid_level(self, optimizer_name):
        strategy = self._get_strategy()

        with strategy.scope():
            if optimizer_name == 'sgd':
                optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
            elif optimizer_name == 'adagrad':
                optimizer = tpu_embedding_v2_utils.Adagrad(learning_rate=0.1)
            elif optimizer_name == 'adam':
                optimizer = tpu_embedding_v2_utils.Adam(learning_rate=0.1)
            elif optimizer_name == 'ftrl':
                optimizer = tpu_embedding_v2_utils.FTRL(learning_rate=0.1)
            elif optimizer_name == 'adagrad_momentum':
                optimizer = tpu_embedding_v2_utils.AdagradMomentum(
                    learning_rate=0.1,
                    momentum=0.9,
                    use_nesterov=True,
                    exponent=3.0,
                    epsilon=0.1,
                    beta2=0.9)
            else:
                raise ValueError('optimizer is not recognized: ',
                                 optimizer_name)
            mid_level_api = self._create_mid_level(optimizer=optimizer)

        return strategy, mid_level_api, optimizer
 def test_optimizer_with_slot_creation_fn_non_partial(self):
   def slot_creation_fn(table, slot_names, _):
     slots = {}
     for slot in slot_names:
       # Note that we don't pass functools.partial here, so on TPU we can't
       # extract the shape. We expect the error below.
       slots[slot] = tf_variables.Variable(
           name='{}_{}'.format(table.name, slot),
           initial_value=init_ops_v2.Zeros()(shape=table.shape,
                                             dtype=dtypes.float32),
           trainable=False)
     return slots
   optimizer = tpu_embedding_v2_utils.Adagrad(
       learning_rate=0.1,
       slot_variable_creation_fn=slot_creation_fn)
   strategy = self._get_strategy()
   with strategy.scope():
     mid_level_api = tpu_embedding_v2.TPUEmbedding(
         feature_config=self.feature_config,
         optimizer=optimizer)
     with self.assertRaisesRegex(ValueError,
                                 'Unable to extract initializer function'):
       # We aren't going to actually run anything, so the batch_size here does
       # not matter.
       mid_level_api.build(self.batch_size)
示例#3
0
def create_strategy_and_mid_level(optimizer_name):
    strategy = get_strategy()
    with strategy.scope():
        if optimizer_name == 'sgd':
            optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
        elif optimizer_name == 'adagrad':
            optimizer = tpu_embedding_v2_utils.Adagrad(learning_rate=0.1)
        elif optimizer_name == 'adam':
            optimizer = tpu_embedding_v2_utils.Adam(learning_rate=0.1)
        else:
            raise ValueError('optimizer is not recognized: ', optimizer_name)
        embedding = create_mid_level(optimizer=optimizer)
    return strategy, embedding, optimizer
 def tpu_embedding_config():
   feature_configs = []
   for dim, vocab, name in table_data:
     feature_configs.append(tpu_embedding_v2_utils.FeatureConfig(
         table=tpu_embedding_v2_utils.TableConfig(
             vocabulary_size=int(vocab), dim=int(dim),
             initializer=init_ops_v2.Zeros(), name=name)))
   optimizer = tpu_embedding_v2_utils.Adagrad(
       learning_rate=0.1)
   with strategy.scope():
     mid_level_api = tpu_embedding_v2.TPUEmbedding(
         feature_config=feature_configs,
         optimizer=optimizer)
   mid_level_api._output_shapes = [TensorShape(128)] * len(feature_configs)
   return mid_level_api._create_config_proto()
  def _create_strategy_and_mid_level(self, optimizer_name):
    strategy = self._get_strategy()

    with strategy.scope():
      if optimizer_name == 'sgd':
        optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
      elif optimizer_name == 'adagrad':
        optimizer = tpu_embedding_v2_utils.Adagrad(learning_rate=0.1)
      elif optimizer_name == 'adam':
        optimizer = tpu_embedding_v2_utils.Adam(learning_rate=0.1)
      elif optimizer_name == 'ftrl':
        optimizer = tpu_embedding_v2_utils.FTRL(learning_rate=0.1)
      else:
        raise ValueError('optimizer is not recognized: ', optimizer_name)
      mid_level_api = self._create_mid_level(optimizer=optimizer)

    return strategy, mid_level_api, optimizer
  def test_optimizer_with_slot_creation_fn(self, use_tpu):
    def slot_creation_fn(table, slot_names, _):
      slots = {}
      for slot in slot_names:
        slots[slot] = tf_variables.Variable(
            name='{}_{}'.format(table.name, slot),
            initial_value=functools.partial(
                init_ops_v2.Zeros(), shape=table.shape, dtype=dtypes.float32),
            trainable=False)
      return slots
    optimizer = tpu_embedding_v2_utils.Adagrad(
        learning_rate=0.1,
        slot_variable_creation_fn=slot_creation_fn)
    if use_tpu:
      strategy = self._get_strategy()
    else:
      strategy = distribution_strategy_context.get_strategy()
    with strategy.scope():
      mid_level = tpu_embedding_v2.TPUEmbedding(
          feature_config=self.feature_config,
          optimizer=optimizer)
      # We aren't going to actually run anything, so the batch_size here does
      # not matter.
      mid_level.build(self.batch_size)
    video_accumulator = mid_level._variables['video']['accumulators']
    user_accumulator = mid_level._variables['user']['accumulators']
    if use_tpu:
      # To check the table contents (ensure that it is zero rather than the
      # normal initial accumulator value specified to in the optimizer config),
      # we need to select the underlying table variable on TPU.
      # We only have one shard on Forge.
      video_accumulator = video_accumulator.variables[0]
      user_accumulator = user_accumulator.variables[0]

    self.assertAllClose(video_accumulator.numpy(),
                        np.zeros((self.table_video.vocabulary_size,
                                  self.table_video.dim)))
    self.assertAllClose(user_accumulator.numpy(),
                        np.zeros((self.table_user.vocabulary_size,
                                  self.table_user.dim)))
    def _create_strategy_and_mid_level(self, optimizer_name):
        strategy = self._get_strategy()

        # Keras optimizers has to be translated to embedding optimizer with slot
        # variable creation fn properly populated.
        with strategy.scope():
            if optimizer_name == 'sgd':
                optimizer = optimizer_v2.gradient_descent.SGD(
                    learning_rate=0.1)
                embedding_optimizer = tpu_embedding_v2_utils.SGD(
                    learning_rate=0.1)
            elif optimizer_name == 'adagrad':
                optimizer = optimizer_v2.adagrad.Adagrad(learning_rate=0.1)
                embedding_optimizer = tpu_embedding_v2_utils.Adagrad(
                    learning_rate=0.1,
                    slot_variable_creation_fn=self.
                    _get_slot_variable_creation_fn(optimizer))
            elif optimizer_name == 'adam':
                optimizer = optimizer_v2.adam.Adam(learning_rate=0.1)
                embedding_optimizer = tpu_embedding_v2_utils.Adam(
                    learning_rate=0.1,
                    slot_variable_creation_fn=self.
                    _get_slot_variable_creation_fn(optimizer))
            elif optimizer_name == 'ftrl':
                optimizer = optimizer_v2.ftrl.Ftrl(learning_rate=0.1)
                embedding_optimizer = tpu_embedding_v2_utils.FTRL(
                    learning_rate=0.1,
                    slot_variable_creation_fn=self.
                    _get_slot_variable_creation_fn(optimizer))
            else:
                raise ValueError('optimizer is not recognized: ',
                                 optimizer_name)

            mid_level_api = self._create_mid_level(
                optimizer=embedding_optimizer)

        return strategy, mid_level_api, optimizer
 def test_optimizer_with_slot_creation_fn_non_partial(self):
   def slot_creation_fn(table, slot_names):
     slots = {}
     for slot in slot_names:
       # Note that we don't pass functools.partial here, so on TPU we can't
       # extract the shape. We expect the error below.
       slots[slot] = tf_variables.Variable(
           name='{}_{}'.format(table.name, slot),
           initial_value=init_ops_v2.Zeros()(shape=table.shape,
                                             dtype=dtypes.float32),
           trainable=False)
     return slots
   optimizer = tpu_embedding_v2_utils.Adagrad(
       learning_rate=0.1,
       slot_variable_creation_fn=slot_creation_fn)
   strategy = self._get_strategy()
   num_replicas = strategy.num_replicas_in_sync
   with strategy.scope():
     with self.assertRaisesRegex(ValueError,
                                 'Unable to extract initializer function'):
       tpu_embedding_v2.TPUEmbedding(
           feature_config=self.feature_config,
           batch_size=self.batch_size*num_replicas,
           optimizer=optimizer)