def model_fn(features, labels, mode, params, config):
     is_training = True if mode == estimator.ModeKeys.TRAIN else False
     if mode != estimator.ModeKeys.PREDICT:
         features = self._parse_sequence_weight(features)
         features = self.sparse2dense(features,
                                      self._dataset.varlen_list)
         duration, labels = labels
     features = self._dense2sparse(features, self._dataset.varlen_list)
     network = self._Network(self._flags, self._dataset, 'network')
     dense, embeddings = network.build_features(features)
     network_out = network(dense, embeddings, is_training)
     predictions = tf.keras.layers.Dense(1,
                                         activation=tf.sigmoid,
                                         name='output')(network_out)
     if mode == estimator.ModeKeys.PREDICT:
         outputs = self._build_predict_outputs(features, predictions)
         return estimator.EstimatorSpec(mode, predictions=outputs)
     metrics = self._build_duration_metrics(duration, labels,
                                            predictions)
     weights = self._build_weights(duration, labels)
     loss = self._build_loss(labels, predictions, weights=weights)
     self._build_summary(loss, metrics)
     if mode == estimator.ModeKeys.EVAL:
         return estimator.EstimatorSpec(mode,
                                        loss=loss,
                                        eval_metric_ops=metrics)
     assert mode == estimator.ModeKeys.TRAIN
     train_op = self._build_train_op(loss)
     return estimator.EstimatorSpec(mode=mode,
                                    loss=loss,
                                    train_op=train_op)
示例#2
0
 def model_fn(features, labels, mode):
     _ = labels
     step = training.get_global_step()
     w = variable_scope.get_variable(
         'w',
         shape=[],
         initializer=init_ops.zeros_initializer(),
         dtype=dtypes.int64)
     if estimator_lib.ModeKeys.TRAIN == mode:
         # to consume features, we have control dependency
         with ops.control_dependencies([features]):
             step_inc = state_ops.assign_add(training.get_global_step(),
                                             1)
         with ops.control_dependencies([step_inc]):
             assign_w_to_step_plus_2 = w.assign(step + 2)
         return estimator_lib.EstimatorSpec(
             mode,
             loss=constant_op.constant(3.),
             train_op=assign_w_to_step_plus_2)
     if estimator_lib.ModeKeys.EVAL == mode:
         # to consume features, we have control dependency
         with ops.control_dependencies([features]):
             loss = constant_op.constant(5.)
         return estimator_lib.EstimatorSpec(
             mode,
             loss=loss,
             # w is constant in each step, so the mean.
             # w = 0 if step==0 else step+2
             eval_metric_ops={'mean_of_const': metrics_lib.mean(w)})
示例#3
0
 def model_fn(features, labels, mode):
     _ = labels
     step = tf.compat.v1.train.get_global_step()
     w = tf.compat.v1.get_variable(
         'w',
         shape=[],
         initializer=tf.compat.v1.initializers.zeros(),
         dtype=tf.dtypes.int64)
     if estimator_lib.ModeKeys.TRAIN == mode:
         # to consume features, we have control dependency
         with tf.control_dependencies([features]):
             step_inc = tf.compat.v1.assign_add(
                 tf.compat.v1.train.get_global_step(), 1)
         with tf.control_dependencies([step_inc]):
             assign_w_to_step_plus_2 = w.assign(step + 2)
         return estimator_lib.EstimatorSpec(
             mode,
             loss=tf.constant(3.),
             train_op=assign_w_to_step_plus_2)
     if estimator_lib.ModeKeys.EVAL == mode:
         # to consume features, we have control dependency
         with tf.control_dependencies([features]):
             loss = tf.constant(5.)
         mean = metrics_module.Mean()
         mean.update_state(w)
         return estimator_lib.EstimatorSpec(
             mode,
             loss=loss,
             # w is constant in each step, so the mean.
             # w = 0 if step==0 else step+2
             eval_metric_ops={'mean_of_const': mean})
 def model_fn(features, labels, mode, params, config):
     is_training = True if mode == estimator.ModeKeys.TRAIN else False
     if mode != estimator.ModeKeys.PREDICT:
         features = self._parse_sequence_weight(features)
         features = self.sparse2dense(features,
                                      self._dataset.varlen_list)
     features = self._dense2sparse(features, self._dataset.varlen_list)
     network = self._Network(self._flags, self._dataset, 'network')
     dense, embeddings = network.build_features(features)
     network_out = network(dense, embeddings, is_training)
     # 防止回归值小于0
     predictions = tf.maximum(
         tf.keras.layers.Dense(1, activation=None,
                               name='output')(network_out), 0.)
     if mode == estimator.ModeKeys.PREDICT:
         outputs = {
             "predictions": predictions,
             self._flags.label_key: tf.expm1(predictions)
         }
         self._output_cols = list(outputs.keys())
         return estimator.EstimatorSpec(mode, predictions=outputs)
     loss = self._build_regression_loss(labels, predictions)
     metrics = self._build_regression_metrics(loss, labels, predictions,
                                              self._flags.label_key)
     self._build_summary(loss, metrics)
     if mode == estimator.ModeKeys.EVAL:
         return estimator.EstimatorSpec(mode,
                                        loss=loss,
                                        eval_metric_ops=metrics)
     assert mode == estimator.ModeKeys.TRAIN
     train_op = self._build_train_op(loss)
     return estimator.EstimatorSpec(mode=mode,
                                    loss=loss,
                                    train_op=train_op)
示例#5
0
 def model_fn(features, labels, mode, params, config):
     is_training = True if mode == estimator.ModeKeys.TRAIN else False
     if mode != estimator.ModeKeys.PREDICT:
         features = self.sparse2dense(features, self._dataset.varlen_list)
     features = self._dense2sparse(features, self._dataset.varlen_list)
     self.right_features = self._get_features_by_index(features, 1)
     predict_tower = self._Network(self._flags, self._dataset, 'predict')
     p_dense, p_embeddings = predict_tower.build_features(self.right_features)
     p_tower_out = predict_tower(p_dense, p_embeddings, is_training)
     p_out = tf.keras.layers.Dense(1, activation=tf.sigmoid, name = 'p_out')(p_tower_out)
     if mode == estimator.ModeKeys.PREDICT:
         outputs = self._build_predict_outputs(features, p_out)
         return estimator.EstimatorSpec(
             mode,
             predictions=outputs
         )
     self.left_features = self._get_features_by_index(features, 0)
     bias_net = network_factory(self._flags.pal_submodel)
     bias_tower = bias_net(self._flags, self._dataset, 'bias', hidden=self._flags.pal_bias)
     b_dense, b_embeddings = bias_tower.build_features(self.left_features)
     b_tower_out = bias_tower(b_dense, b_embeddings, is_training)
     b_out = tf.keras.layers.Dense(1, activation=tf.sigmoid, name = 'b_out')(b_tower_out)
     predictions = tf.multiply(b_out, p_out, name=self.predictions)
     metrics = self._build_metrics(labels, predictions)
     loss = self._build_loss(labels, predictions)
     self._build_summary(loss, metrics)
     if mode == estimator.ModeKeys.EVAL:
         return estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)
     assert mode == estimator.ModeKeys.TRAIN
     train_op = self._build_train_op(loss)
     return estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
示例#6
0
 def model_fn(features, labels, mode):
   _ = labels
   if estimator_lib.ModeKeys.TRAIN == mode:
     with ops.control_dependencies([features]):
       train_op = state_ops.assign_add(training.get_global_step(), 1)
     return estimator_lib.EstimatorSpec(
         mode, loss=constant_op.constant(3.), train_op=train_op)
   if estimator_lib.ModeKeys.EVAL == mode:
     return estimator_lib.EstimatorSpec(
         mode,
         loss=constant_op.constant(5.),
         eval_metric_ops={'mean_of_features': metrics_lib.mean(features)})
示例#7
0
 def model_fn(features, labels, mode, params, config):
     is_training = True if mode == estimator.ModeKeys.TRAIN else False
     if mode != estimator.ModeKeys.PREDICT:
         features = self.sparse2dense(features,
                                      self._dataset.varlen_list)
     features = self._dense2sparse(features, self._dataset.varlen_list)
     self.left_features = self._get_features_by_index(features, 0)
     self.right_features = self._get_features_by_index(features, 1)
     left_network = self._Network(self._flags, self._dataset, 'left')
     right_network = self._Network(self._flags, self._dataset, 'right')
     u_dense, u_embeddings = left_network.build_features(
         self.left_features)
     v_dense, v_embeddings = right_network.build_features(
         self.right_features)
     left_embedding = tf.reduce_mean(tf.concat(u_embeddings, 1), 1)
     right_embedding = tf.reduce_mean(tf.concat(v_embeddings, 1), 1)
     dense = u_dense + v_dense
     embeddings = u_embeddings + v_embeddings
     network_out = left_network(dense, embeddings, is_training)
     predictions = tf.keras.layers.Dense(
         1, activation=None, name='predictions')(network_out)
     predictions = tf.identity(predictions, name=self.predictions)
     if mode == estimator.ModeKeys.PREDICT:
         left_bias = tf.reduce_mean(tf.ones_like(left_embedding),
                                    -1,
                                    keepdims=True)
         left_embedding = tf.concat((left_embedding, left_bias),
                                    -1,
                                    name=self.left_output)
         right_bias = tf.reduce_mean(right_network(
             v_dense, v_embeddings, False),
                                     -1,
                                     keepdims=True)
         right_embedding = tf.concat((right_embedding, right_bias),
                                     -1,
                                     name=self.right_output)
         outputs = self._build_predict_outputs(features,
                                               right_embedding)
         return estimator.EstimatorSpec(mode, predictions=outputs)
     loss = self._build_regression_loss(labels, predictions)
     metrics = self._build_regression_metrics(loss, labels, predictions,
                                              'duration')
     self._build_summary(loss, metrics)
     if mode == estimator.ModeKeys.EVAL:
         return estimator.EstimatorSpec(mode,
                                        loss=loss,
                                        eval_metric_ops=metrics)
     assert mode == estimator.ModeKeys.TRAIN
     train_op = self._build_train_op(loss)
     return estimator.EstimatorSpec(mode=mode,
                                    loss=loss,
                                    train_op=train_op)
示例#8
0
        def model_fn(features, labels, mode, params, config):
            is_training = True if mode == estimator.ModeKeys.TRAIN else False
            if mode != estimator.ModeKeys.PREDICT:
                features = self.sparse2dense(features,
                                             self._dataset.varlen_list)
            features = self._dense2sparse(features, self._dataset.varlen_list)

            # 网络
            network = self._Network(self._flags, self._dataset,
                                    'dnn')  # 初始化网络对象
            dense_feat, embedding_feat = network.build_features(
                features)  # 特征embedding化,[batch_size, length]
            tf.logging.debug("feature dense: %r" % dense_feat)
            tf.logging.debug("feature embed: %r" % embedding_feat)
            network_out = network(dense_feat, embedding_feat,
                                  is_training)  # 模型输出
            tf.logging.debug("network out: %r" % network_out.shape)

            # user embedding
            user_embedding = tf.keras.layers.Dense(
                self._flags.vector_dim)(network_out)
            tf.logging.debug("user embedding: %r" % user_embedding.shape)

            if mode == estimator.ModeKeys.PREDICT:
                predictions = {"user_embedding": user_embedding}
                return estimator.EstimatorSpec(mode, predictions=predictions)

            # softmax层
            nce_weights = tf.Variable(tf.truncated_normal(
                [self.item_num, self._flags.vector_dim],
                stddev=1.0 / tf.math.sqrt(float(self._flags.vector_dim))),
                                      name="nce_weights")
            logits = tf.matmul(user_embedding, tf.transpose(
                nce_weights))  # 计算所有item的概率,[batch_size, item_num]
            labels = tf.reshape(
                labels, [-1, self.label_num])  # [batch_size, label_num]

            if mode == tf.estimator.ModeKeys.EVAL:
                eval_loss = self.build_eval_loss(logits, labels)
                metrics = self.build_metrics(logits, labels)
                return tf.estimator.EstimatorSpec(mode,
                                                  loss=eval_loss,
                                                  eval_metric_ops=metrics)

            assert mode == estimator.ModeKeys.TRAIN
            loss = self.build_loss(user_embedding, labels,
                                   nce_weights)  # 训练使用nce_loss
            train_op = self._build_train_op(loss)
            return estimator.EstimatorSpec(mode=mode,
                                           loss=loss,
                                           train_op=train_op)
示例#9
0
 def model_fn(features, labels, mode, params, config):
     is_training = True if mode == estimator.ModeKeys.TRAIN else False
     if mode != estimator.ModeKeys.PREDICT:
         features = self.sparse2dense(features,
                                      self._dataset.varlen_list)
     features = self._dense2sparse(features, self._dataset.varlen_list)
     self.left_features = self._get_features_by_index(features, 0)
     self.right_features = self._get_features_by_index(features, 1)
     user_tower = self._Network(self._flags, self._dataset, 'user')
     item_tower = self._Network(self._flags, self._dataset, 'item')
     u_dense, u_embeddings = user_tower.build_features(
         self.left_features)
     v_dense, v_embeddings = item_tower.build_features(
         self.right_features)
     u_tower_out = user_tower(u_dense, u_embeddings, is_training)
     v_tower_out = item_tower(v_dense, v_embeddings, is_training)
     u_vector = tf.nn.l2_normalize(tf.keras.layers.Dense(
         self._flags.vector_dim)(u_tower_out),
                                   axis=-1,
                                   name=self.left_output)
     v_vector = tf.nn.l2_normalize(tf.keras.layers.Dense(
         self._flags.vector_dim)(v_tower_out),
                                   axis=-1,
                                   name=self.right_output)
     # 使用余弦相似度,计算结果在[-1, 1],变换成[0, 1]
     predictions = tf.divide(1. + tf.reduce_sum(
         tf.multiply(u_vector, v_vector), axis=-1, keep_dims=True),
                             2.,
                             name=self.predictions)
     if mode == estimator.ModeKeys.PREDICT:
         if self._flags.predict_with_emb:
             outputs = self._build_predict_outputs(features, v_vector)
         elif self._flags.predict_with_user:
             outputs = self._build_predict_outputs_user(
                 features, u_vector)
         else:
             outputs = super(DssmModel, self)._build_predict_outputs(
                 features, predictions)
         return estimator.EstimatorSpec(mode, predictions=outputs)
     metrics = self._build_metrics(labels, predictions)
     loss = self._build_loss(labels, predictions)
     self._build_summary(loss, metrics)
     if mode == estimator.ModeKeys.EVAL:
         return estimator.EstimatorSpec(mode,
                                        loss=loss,
                                        eval_metric_ops=metrics)
     assert mode == estimator.ModeKeys.TRAIN
     train_op = self._build_train_op(loss)
     return estimator.EstimatorSpec(mode=mode,
                                    loss=loss,
                                    train_op=train_op)
示例#10
0
 def _serving_ops(self, features):
     """Add ops for serving to the graph."""
     with tf.compat.v1.variable_scope("model", use_resource=True):
         filtering_features = {}
         prediction_features = {}
         values_length = tf.compat.v1.shape(
             features[feature_keys.FilteringFeatures.VALUES])[1]
         for key, value in features.items():
             if key == feature_keys.State.STATE_TUPLE:
                 # Ignore state input. The model's default start state is replicated
                 # across the batch.
                 continue
             if key == feature_keys.FilteringFeatures.VALUES:
                 filtering_features[key] = value
             else:
                 filtering_features[key] = value[:, :values_length]
                 prediction_features[key] = value[:, values_length:]
         cold_filtering_outputs = self.model.define_loss(
             features=filtering_features, mode=estimator_lib.ModeKeys.EVAL)
         prediction_features[feature_keys.State.STATE_TUPLE] = (
             cold_filtering_outputs.end_state)
     with tf.compat.v1.variable_scope("model", reuse=True):
         prediction_outputs = self.model.predict(
             features=prediction_features)
     return estimator_lib.EstimatorSpec(
         mode=estimator_lib.ModeKeys.PREDICT,
         export_outputs={
             feature_keys.SavedModelLabels.PREDICT:
             _NoStatePredictOutput(prediction_outputs),
         },
         # Likely unused, but it is necessary to return `predictions` to satisfy
         # the Estimator's error checking.
         predictions={})
示例#11
0
 def _serving_ops(self, features):
     """Add ops for serving to the graph."""
     with tf.compat.v1.variable_scope("model", use_resource=True):
         prediction_outputs = self.model.predict(features=features)
     with tf.compat.v1.variable_scope("model", reuse=True):
         filtering_outputs = self.create_loss(features,
                                              estimator_lib.ModeKeys.EVAL)
     with tf.compat.v1.variable_scope("model", reuse=True):
         no_state_features = {
             k: v
             for k, v in features.items()
             if not k.startswith(feature_keys.State.STATE_PREFIX)
         }
         # Ignore any state management when cold-starting. The model's default
         # start state is replicated across the batch.
         cold_filtering_outputs = self.model.define_loss(
             features=no_state_features, mode=estimator_lib.ModeKeys.EVAL)
     return estimator_lib.EstimatorSpec(
         mode=estimator_lib.ModeKeys.PREDICT,
         export_outputs={
             feature_keys.SavedModelLabels.PREDICT:
             export_lib.PredictOutput(prediction_outputs),
             feature_keys.SavedModelLabels.FILTER:
             export_lib.PredictOutput(
                 state_to_dictionary(filtering_outputs.end_state)),
             feature_keys.SavedModelLabels.COLD_START_FILTER:
             _NoStatePredictOutput(
                 state_to_dictionary(cold_filtering_outputs.end_state))
         },
         # Likely unused, but it is necessary to return `predictions` to satisfy
         # the Estimator's error checking.
         predictions={})
示例#12
0
 def _evaluate_ops(self, features):
     """Add ops for evaluation (aka filtering) to the graph."""
     mode = estimator_lib.ModeKeys.EVAL
     with tf.compat.v1.variable_scope("model", use_resource=True):
         model_outputs = self.create_loss(features, mode)
     metrics = {}
     # Just output in-sample predictions for the last chunk seen
     for prediction_key, prediction_value in model_outputs.predictions.items(
     ):
         metrics[prediction_key] = _identity_metric_single(
             prediction_key, prediction_value)
     metrics[feature_keys.FilteringResults.TIMES] = _identity_metric_single(
         feature_keys.FilteringResults.TIMES,
         model_outputs.prediction_times)
     metrics[feature_keys.FilteringResults.STATE_TUPLE] = (
         _identity_metric_nested(feature_keys.FilteringResults.STATE_TUPLE,
                                 model_outputs.end_state))
     metrics[metric_keys.MetricKeys.LOSS_MEAN] = tf.compat.v1.metrics.mean(
         model_outputs.loss, name="average_loss")
     return estimator_lib.EstimatorSpec(
         loss=model_outputs.loss,
         mode=mode,
         eval_metric_ops=metrics,
         # needed for custom metrics.
         predictions=model_outputs.predictions)
示例#13
0
 def _predict_ops(self, features):
     """Add ops for prediction to the graph."""
     with tf.compat.v1.variable_scope("model", use_resource=True):
         prediction = self.model.predict(features=features)
     prediction[feature_keys.PredictionResults.TIMES] = features[
         feature_keys.PredictionFeatures.TIMES]
     return estimator_lib.EstimatorSpec(predictions=prediction,
                                        mode=estimator_lib.ModeKeys.PREDICT)
示例#14
0
 def model_fn(features, mode):
     del features
     global_step = training.get_global_step()
     return estimator_lib.EstimatorSpec(
         mode,
         loss=constant_op.constant([5.]),
         predictions={'x': constant_op.constant([5.])},
         train_op=global_step.assign_add(1))
示例#15
0
 def model_fn(features, labels, mode):
     _ = labels
     if estimator_lib.ModeKeys.TRAIN == mode:
         with tf.control_dependencies([features]):
             train_op = tf.compat.v1.assign_add(
                 tf.compat.v1.train.get_global_step(), 1)
         return estimator_lib.EstimatorSpec(mode,
                                            loss=tf.constant(3.),
                                            train_op=train_op)
     if estimator_lib.ModeKeys.EVAL == mode:
         mean = metrics_module.Mean()
         mean.update_state(features)
         return estimator_lib.EstimatorSpec(mode,
                                            loss=tf.constant(5.),
                                            eval_metric_ops={
                                                'mean_of_features':
                                                mean,
                                            })
示例#16
0
 def model_fn(features, labels, mode):
   _, _ = features, labels
   return estimator_lib.EstimatorSpec(
       mode,
       loss=constant_op.constant(3.),
       scaffold=training.Scaffold(saver=training.Saver()),
       train_op=constant_op.constant(5.),
       eval_metric_ops={
           'mean_of_features': metrics_lib.mean(constant_op.constant(2.))
       })
示例#17
0
 def model_fn(features, labels, mode):
     _, _ = features, labels
     mean = metrics_module.Mean()
     mean.update_state(constant_op.constant(2.))
     return estimator_lib.EstimatorSpec(
         mode,
         loss=constant_op.constant(3.),
         scaffold=training.Scaffold(saver=training.Saver()),
         train_op=constant_op.constant(5.),
         eval_metric_ops={
             'mean_of_features': mean,
         })
示例#18
0
  def _train_ops(self, features):
    """Add training ops to the graph."""
    mode = estimator_lib.ModeKeys.TRAIN
    with tf.compat.v1.variable_scope(
        "model",
        # Use ResourceVariables to avoid race conditions.
        use_resource=True):
      model_outputs = self.create_loss(features, mode)

    train_op = self.optimizer.minimize(
        model_outputs.loss, global_step=tf.compat.v1.train.get_global_step())
    return estimator_lib.EstimatorSpec(
        loss=model_outputs.loss, mode=mode, train_op=train_op)
示例#19
0
 def model_fn(features, labels, mode):
     _, _ = features, labels
     mean = tf.keras.metrics.Mean()
     mean.update_state(tf.constant(2.))
     return estimator_lib.EstimatorSpec(
         mode,
         loss=tf.constant(3.),
         scaffold=tf.compat.v1.train.Scaffold(
             saver=tf.compat.v1.train.Saver()),
         train_op=tf.constant(5.),
         eval_metric_ops={
             'mean_of_features': mean,
         })
示例#20
0
    def model_fn(features, labels, mode):
      _, _ = features, labels

      def init_fn(scaffold, session):
        _, _ = scaffold, session

      return estimator_lib.EstimatorSpec(
          mode,
          loss=constant_op.constant(3.),
          scaffold=training.Scaffold(init_fn=init_fn),
          train_op=constant_op.constant(5.),
          eval_metric_ops={
              'mean_of_features': metrics_lib.mean(constant_op.constant(2.))
          })
示例#21
0
        def model_fn(features, labels, mode):
            _, _ = features, labels

            def init_fn(scaffold, session):
                _, _ = scaffold, session

            mean = metrics_module.Mean()
            mean.update_state(tf.constant(2.))
            return estimator_lib.EstimatorSpec(
                mode,
                loss=tf.constant(3.),
                scaffold=tf.compat.v1.train.Scaffold(init_fn=init_fn),
                train_op=tf.constant(5.),
                eval_metric_ops={
                    'mean_of_features': mean,
                })
示例#22
0
 def model_fn(features, labels, mode):
   _, _ = features, labels
   w = variables.VariableV1(
       initial_value=[0.],
       trainable=False,
       collections=[ops.GraphKeys.SAVEABLE_OBJECTS])
   init_op = control_flow_ops.group(
       [w.initializer, training.get_global_step().initializer])
   return estimator_lib.EstimatorSpec(
       mode,
       loss=constant_op.constant(3.),
       scaffold=training.Scaffold(init_op=init_op),
       train_op=constant_op.constant(5.),
       eval_metric_ops={
           'mean_of_features': metrics_lib.mean(constant_op.constant(2.))
       })
示例#23
0
        def model_fn(features, labels, mode):
            _, _ = features, labels
            w = tf.compat.v1.Variable(
                initial_value=[0.],
                trainable=False,
                collections=[tf.compat.v1.GraphKeys.SAVEABLE_OBJECTS])
            init_op = tf.group([
                w.initializer,
                tf.compat.v1.train.get_global_step().initializer
            ])

            mean = metrics_module.Mean()
            mean.update_state(tf.constant(2.))
            return estimator_lib.EstimatorSpec(
                mode,
                loss=tf.constant(3.),
                scaffold=tf.compat.v1.train.Scaffold(init_op=init_op),
                train_op=tf.constant(5.),
                eval_metric_ops={
                    'mean_of_features': mean,
                })
        def model_fn(features, labels, mode, params, config):
            is_training = True if mode == estimator.ModeKeys.TRAIN else False
            if mode != estimator.ModeKeys.PREDICT:
                features = self.sparse2dense(features,
                                             self._dataset.varlen_list)
            features = self._dense2sparse(features, self._dataset.varlen_list)

            # 左塔
            left_tower = self._Network(self._flags, self._dataset,
                                       'left_tower')  # 初始化网络对象
            self.left_features = self._get_features_by_index(features, 0)
            left_dense, left_embeddings = left_tower.build_features(
                self.left_features
            )  # 特征embedding化,[batch_size * pre_batch, length]
            tf.logging.debug("LeftTower input dense: %r" % left_dense)
            tf.logging.debug("LeftTower input embed: %r" % left_embeddings)
            left_tower_out = left_tower(left_dense, left_embeddings,
                                        is_training)  # 输出
            left_tower_embedding = tf.nn.l2_normalize(
                tf.keras.layers.Dense(self._flags.vector_dim)(left_tower_out),
                axis=-1,
                name=self.left_output
            )  # 单位化,[batch_size * pre_batch, embedding_size]
            tf.logging.debug("LeftTower output: %r" %
                             left_tower_embedding.shape)

            # 右塔
            right_tower = self._Network(self._flags, self._dataset,
                                        'right_tower')  # 初始化网络对象
            self.right_features = self._get_features_by_index(features, 1)
            right_dense, right_embeddings = right_tower.build_features(
                self.right_features
            )  # 特征embedding化,[batch_size * pre_batch * sample_num, length]
            tf.logging.debug("RightTower input dense: %r" % right_dense)
            tf.logging.debug("RightTower input embed: %r" % right_embeddings)
            right_tower_out = right_tower(right_dense, right_embeddings,
                                          is_training)  # 输出
            right_tower_embedding = tf.nn.l2_normalize(
                tf.keras.layers.Dense(self._flags.vector_dim)(right_tower_out),
                axis=-1,
                name=self.right_output
            )  # 单位化,[batch_size * pre_batch * sample_num, embedding_size]
            tf.logging.debug("RightTower output: %r" %
                             right_tower_embedding.shape)

            if mode == estimator.ModeKeys.PREDICT:
                outputs = self._build_predict_outputs(features,
                                                      right_tower_embedding)
                return estimator.EstimatorSpec(mode, predictions=outputs)

            # 损失
            if self.sample_num > 1:  # 如果是多个sample,那么需要做reshape,使用softmax做分类
                left_tower_embedding = tf.expand_dims(
                    left_tower_embedding,
                    1)  # [batch_size * pre_batch, 1, embedding_size]
                tf.logging.debug("LeftTower embedding: %r" %
                                 left_tower_embedding.shape)
                right_tower_embedding = tf.reshape(
                    right_tower_embedding,
                    [-1, self.sample_num, right_tower_embedding.shape[-1]
                     ])  # [batch_size * pre_batch, sample_num, embedding_size]
                tf.logging.debug("RightTower embedding: %r" %
                                 right_tower_embedding.shape)
                cosine_score = tf.matmul(
                    left_tower_embedding,
                    right_tower_embedding,
                    transpose_b=True
                )  # [batch_size * pre_batch, 1, sample_num]
                cosine_score = tf.squeeze(
                    cosine_score, axis=1, name=self.predictions
                )  # [batch_size * pre_batch, sample_num]
                tf.logging.debug("Cosine: %r" % cosine_score.shape)
                labels = tf.reshape(labels,
                                    [-1, self.sample_num
                                     ])  # [batch_size * pre_batch, sample_num]
                tf.logging.debug("Labels: %r" % labels.shape)
                loss = self.build_loss(labels, cosine_score)
                metrics = self.build_softmax_metric(labels, cosine_score)
            else:  # 如果是单个sample,那么使用logloss
                cosine_score = tf.divide(1. + tf.reduce_sum(tf.multiply(
                    left_tower_embedding, right_tower_embedding),
                                                            axis=-1,
                                                            keep_dims=True),
                                         2.,
                                         name=self.predictions)
                loss = self._build_loss(labels, cosine_score)
                metrics = self._build_metrics(labels, cosine_score)

            self._build_summary(loss, metrics)
            if mode == estimator.ModeKeys.EVAL:
                return estimator.EstimatorSpec(mode,
                                               loss=loss,
                                               eval_metric_ops=metrics)
            assert mode == estimator.ModeKeys.TRAIN
            train_op = self._build_train_op(loss)
            return estimator.EstimatorSpec(mode=mode,
                                           loss=loss,
                                           train_op=train_op)