Example #1
0
def esmm_model_fn(features, labels, mode, params):
  wt = tf.feature_column.input_layer(features, params['weight_columns'])
  mmoe_scope = 'mmoe'             
  multi_inputs, weights_shared = build_mmoe(features, params, mmoe_scope)
  hidden_units = params['hidden_units']
  linear_parent_scope = 'linear'
  dnn_parent_scope = 'dnn'
  is_dynamic = params['dynamic']
  print("is_dynamic:", is_dynamic)
  reg = 1e-4
  dnn_scope = 'dnn' 
  with tf.variable_scope(dnn_scope):
    ctr_logits = build_deep_layers(multi_inputs[0], hidden_units, mode, 'CTR', reg)
    cvr_logits = build_deep_layers(multi_inputs[1], hidden_units, mode, 'CVR', reg)
  ctr_preds = tf.nn.sigmoid(ctr_logits)
  cvr_preds = tf.nn.sigmoid(cvr_logits)
  ctcvr_preds = ctr_preds * cvr_preds
  tf.summary.histogram("esmm/ctr_preds", ctr_preds) 
  tf.summary.histogram("esmm/cvr_preds", cvr_preds) 
  tf.summary.histogram("esmm/ctcvr_preds", ctcvr_preds)
  if mode == tf.estimator.ModeKeys.PREDICT:
    #redundant_items = ctr_preds
    predictions = {
      'prob': tf.concat([cvr_preds, ctr_preds], 1)
    }
    export_outputs = {
      tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: tf.estimator.export.PredictOutput(predictions)  #线上预测需要的
    }
    return tf.estimator.EstimatorSpec(mode, predictions=predictions, export_outputs=export_outputs)

  else:
    ctr_labels = labels['ctr']
    ctcvr_labels = labels['ctcvr']
    linear_optimizer = tf.train.FtrlOptimizer(0.01, l1_regularization_strength=0.001, l2_regularization_strength=0.001)
    dnn_optimizer = optimizers.get_optimizer_instance('Adam', params['learning_rate'])
    loss_optimizer = optimizers.get_optimizer_instance('Adam', 0.001)
    ctr_loss = tf.losses.log_loss(ctr_labels, ctr_preds, reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE, weights=wt)         
    ctcvr_loss = tf.losses.log_loss(ctcvr_labels, ctcvr_preds, reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
    #reg_loss = tf.reduce_sum(ops.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
    ctr_auc = tf.metrics.auc(labels=ctr_labels, predictions=ctr_preds, weights=wt)
    ctcvr_auc = tf.metrics.auc(labels=ctcvr_labels, predictions=ctcvr_preds)
    mask = tf.map_fn(lambda x:tf.cond(tf.equal(x, 1), lambda: True, lambda: False), tf.squeeze(labels['ctr']), dtype=tf.bool)
    cvr_preds = tf.boolean_mask(cvr_preds, mask)
    cvr_labels = tf.boolean_mask(labels['ctcvr'], mask)
    cvr_auc = tf.metrics.auc(labels=cvr_labels, predictions=cvr_preds)
    cvr_loss = tf.losses.log_loss(cvr_labels, cvr_preds, reduction=tf.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
    tf.summary.scalar("cvr_auc", cvr_auc[1])
    tf.summary.scalar("cvr_loss", cvr_loss)
    tf.summary.scalar('ctr_loss', ctr_loss)
    tf.summary.scalar('ctcvr_loss', ctcvr_loss)
    tf.summary.scalar('ctr_auc', ctr_auc[1])
    tf.summary.scalar('ctcvr_auc', ctcvr_auc[1])
    weight_loss, update_list, w_list, loss_gradnorm = get_weight_loss([ctr_loss, ctcvr_loss], is_dynamic, weights_shared)
    #loss = tf.add_n(weight_loss + [reg_loss])
    loss = tf.add_n(weight_loss)
    #loss = weight_loss
    #w_list = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope='grad_weight')
    def _train_op_fn(loss):
      train_ops = []
      global_step = tf.train.get_global_step()
      if params['model'] in ('dnn'):
        fm_var_list = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope='fm')
        dnn_var_list = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope=dnn_scope) + ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope='dnn_embed') + ops.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=mmoe_scope)
        train_ops.append(
          dnn_optimizer.minimize(
            loss,
            var_list=dnn_var_list))
        train_ops.append(
          linear_optimizer.minimize(
            loss,
            var_list=fm_var_list))
      if w_list is not None and loss_gradnorm is not None:
        train_ops.append(
          loss_optimizer.minimize(
              loss_gradnorm,
              var_list=w_list))
      if update_list is not None:
        train_ops.append(update_list)
      train_op = control_flow_ops.group(*train_ops)
      with ops.control_dependencies([train_op]):
        return state_ops.assign_add(global_step, 1).op
    hooks = tf.train.LoggingTensorHook({'ctr_loss':ctr_loss, 'ctcvr_loss':ctcvr_loss, 'cvr_loss':cvr_loss}, every_n_iter=10000)
    train_op = _train_op_fn(loss)
    train_op = head_v1._append_update_ops(train_op)
    metrics = {'ctr_auc': ctr_auc, 'ctcvr_auc': ctcvr_auc, 'cvr_auc': cvr_auc}
    #return _TPUEstimatorSpec(mode, loss=loss, train_op=train_op).as_estimator_spec()
    return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op, eval_metric_ops=metrics)
Example #2
0
    def _create_tpu_estimator_spec(self,
                                   features,
                                   mode,
                                   logits,
                                   labels=None,
                                   optimizer=None,
                                   train_op_fn=None,
                                   regularization_losses=None):
        """Returns an `model_fn._TPUEstimatorSpec`.

    Args:
      features: Input `dict` of `Tensor` or `SparseTensor` objects.
      mode: Estimator's `ModeKeys`.
      logits: logits `Tensor` with shape `[D0, D1, ... DN, n_classes]`.
        For many applications, the shape is `[batch_size, n_classes]`.
      labels: Labels with shape matching `logits`. Can be multi-hot `Tensor`
        with shape `[D0, D1, ... DN, n_classes]` or `SparseTensor` with
        `dense_shape` `[D0, D1, ... DN, ?]`. `labels` is required argument when
        `mode` equals `TRAIN` or `EVAL`.
      optimizer: `Optimizer` instance to optimize the loss in TRAIN mode.
        Namely, sets `train_op = optimizer.minimize(loss, global_step)`, which
        updates variables and increments `global_step`.
      train_op_fn: Function that takes a scalar loss `Tensor` and returns
        `train_op`. Used if `optimizer` is `None`.
      regularization_losses: A list of additional scalar losses to be added to
        the training loss, such as regularization losses. These losses are
        usually expressed as a batch average, so for best results users need to
        set `loss_reduction=SUM_OVER_BATCH_SIZE` or
        `loss_reduction=SUM_OVER_NONZERO_WEIGHTS` when creating the head to
        avoid scaling errors.
    Returns:
      `model_fn._TPUEstimatorSpec`.
    Raises:
      ValueError: If both `train_op_fn` and `optimizer` are `None` in TRAIN
        mode, or if both are set.
    """
        with ops.name_scope(self._name, 'head'):
            logits = head_lib._check_logits_final_dim(logits,
                                                      self.logits_dimension)  # pylint:disable=protected-access

            # Predict.
            pred_keys = prediction_keys.PredictionKeys
            with ops.name_scope(None, 'predictions', (logits, )):
                probabilities = math_ops.sigmoid(logits,
                                                 name=pred_keys.PROBABILITIES)
                predictions = {
                    pred_keys.LOGITS: logits,
                    pred_keys.PROBABILITIES: probabilities,
                }
            if mode == model_fn.ModeKeys.PREDICT:
                classifier_output = head_lib._classification_output(  # pylint:disable=protected-access
                    scores=probabilities,
                    n_classes=self._n_classes,
                    label_vocabulary=self._label_vocabulary)
                return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        _DEFAULT_SERVING_KEY: classifier_output,
                        head_lib._CLASSIFY_SERVING_KEY: classifier_output,  # pylint:disable=protected-access
                        head_lib._PREDICT_SERVING_KEY: (  # pylint:disable=protected-access
                            export_output.PredictOutput(predictions))
                    })

            (training_loss, unreduced_loss, weights,
             processed_labels) = self.create_loss(features=features,
                                                  mode=mode,
                                                  logits=logits,
                                                  labels=labels)
            if regularization_losses:
                regularization_loss = math_ops.add_n(regularization_losses)
                regularized_training_loss = math_ops.add_n(
                    [training_loss, regularization_loss])
            else:
                regularization_loss = None
                regularized_training_loss = training_loss

            # Eval.
            if mode == model_fn.ModeKeys.EVAL:
                return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
                    mode=model_fn.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=regularized_training_loss,
                    eval_metrics=head_lib._create_eval_metrics_tuple(  # pylint:disable=protected-access
                        self._eval_metric_ops, {
                            'labels': processed_labels,
                            'probabilities': probabilities,
                            'weights': weights,
                            'unreduced_loss': unreduced_loss,
                            'regularization_loss': regularization_loss,
                        }))

            # Train.
            if optimizer is not None:
                if train_op_fn is not None:
                    raise ValueError(
                        'train_op_fn and optimizer cannot both be set.')
                train_op = optimizer.minimize(
                    regularized_training_loss,
                    global_step=training_util.get_global_step())
            elif train_op_fn is not None:
                train_op = train_op_fn(regularized_training_loss)
            else:
                raise ValueError(
                    'train_op_fn and optimizer cannot both be None.')
            train_op = head_lib._append_update_ops(train_op)  # pylint:disable=protected-access
            # Only summarize mean_loss for SUM reduction to preserve backwards
            # compatibility. Otherwise skip it to avoid unnecessary computation.
            if self._loss_reduction == losses.Reduction.SUM:
                example_weight_sum = math_ops.reduce_sum(
                    weights * array_ops.ones_like(unreduced_loss))
                mean_loss = training_loss / example_weight_sum
            else:
                mean_loss = None
        with ops.name_scope(''):
            keys = metric_keys.MetricKeys
            summary.scalar(
                head_lib._summary_key(self._name, keys.LOSS),  # pylint:disable=protected-access
                regularized_training_loss)
            if mean_loss is not None:
                summary.scalar(
                    head_lib._summary_key(self._name, keys.LOSS_MEAN),  # pylint:disable=protected-access
                    mean_loss)
            if regularization_loss is not None:
                summary.scalar(
                    head_lib._summary_key(self._name,
                                          keys.LOSS_REGULARIZATION),  # pylint:disable=protected-access
                    regularization_loss)
        return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
            mode=model_fn.ModeKeys.TRAIN,
            predictions=predictions,
            loss=regularized_training_loss,
            train_op=train_op)
Example #3
0
  def _create_tpu_estimator_spec(
      self, features, mode, logits, labels=None, optimizer=None,
      train_op_fn=None, regularization_losses=None):
    """Returns an `model_fn._TPUEstimatorSpec`.

    Args:
      features: Input `dict` of `Tensor` or `SparseTensor` objects.
      mode: Estimator's `ModeKeys`.
      logits: logits `Tensor` with shape `[D0, D1, ... DN, n_classes]`.
        For many applications, the shape is `[batch_size, n_classes]`.
      labels: Labels with shape matching `logits`. Can be multi-hot `Tensor`
        with shape `[D0, D1, ... DN, n_classes]` or `SparseTensor` with
        `dense_shape` `[D0, D1, ... DN, ?]`. `labels` is required argument when
        `mode` equals `TRAIN` or `EVAL`.
      optimizer: `Optimizer` instance to optimize the loss in TRAIN mode.
        Namely, sets `train_op = optimizer.minimize(loss, global_step)`, which
        updates variables and increments `global_step`.
      train_op_fn: Function that takes a scalar loss `Tensor` and returns
        `train_op`. Used if `optimizer` is `None`.
      regularization_losses: A list of additional scalar losses to be added to
        the training loss, such as regularization losses. These losses are
        usually expressed as a batch average, so for best results users need to
        set `loss_reduction=SUM_OVER_BATCH_SIZE` or
        `loss_reduction=SUM_OVER_NONZERO_WEIGHTS` when creating the head to
        avoid scaling errors.
    Returns:
      `model_fn._TPUEstimatorSpec`.
    Raises:
      ValueError: If both `train_op_fn` and `optimizer` are `None` in TRAIN
        mode, or if both are set.
    """
    with ops.name_scope(self._name, 'head'):
      logits = head_lib._check_logits_final_dim(logits, self.logits_dimension)  # pylint:disable=protected-access

      # Predict.
      pred_keys = prediction_keys.PredictionKeys
      with ops.name_scope(None, 'predictions', (logits,)):
        probabilities = math_ops.sigmoid(logits, name=pred_keys.PROBABILITIES)
        predictions = {
            pred_keys.LOGITS: logits,
            pred_keys.PROBABILITIES: probabilities,
        }
      if mode == model_fn.ModeKeys.PREDICT:
        classifier_output = head_lib._classification_output(  # pylint:disable=protected-access
            scores=probabilities, n_classes=self._n_classes,
            label_vocabulary=self._label_vocabulary)
        return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                _DEFAULT_SERVING_KEY: classifier_output,
                head_lib._CLASSIFY_SERVING_KEY: classifier_output,  # pylint:disable=protected-access
                head_lib._PREDICT_SERVING_KEY: (  # pylint:disable=protected-access
                    export_output.PredictOutput(predictions))
            })

      (training_loss, unreduced_loss, weights,
       processed_labels) = self.create_loss(
           features=features, mode=mode, logits=logits, labels=labels)
      if regularization_losses:
        regularization_loss = math_ops.add_n(regularization_losses)
        regularized_training_loss = math_ops.add_n(
            [training_loss, regularization_loss])
      else:
        regularization_loss = None
        regularized_training_loss = training_loss

      # Eval.
      if mode == model_fn.ModeKeys.EVAL:
        return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=regularized_training_loss,
            eval_metrics=head_lib._create_eval_metrics_tuple(  # pylint:disable=protected-access
                self._eval_metric_ops, {
                    'labels': processed_labels,
                    'probabilities': probabilities,
                    'weights': weights,
                    'unreduced_loss': unreduced_loss,
                    'regularization_loss': regularization_loss,
                }))

      # Train.
      if optimizer is not None:
        if train_op_fn is not None:
          raise ValueError('train_op_fn and optimizer cannot both be set.')
        train_op = optimizer.minimize(
            regularized_training_loss,
            global_step=training_util.get_global_step())
      elif train_op_fn is not None:
        train_op = train_op_fn(regularized_training_loss)
      else:
        raise ValueError('train_op_fn and optimizer cannot both be None.')
      train_op = head_lib._append_update_ops(train_op)  # pylint:disable=protected-access
      # Only summarize mean_loss for SUM reduction to preserve backwards
      # compatibility. Otherwise skip it to avoid unnecessary computation.
      if self._loss_reduction == losses.Reduction.SUM:
        example_weight_sum = math_ops.reduce_sum(
            weights * array_ops.ones_like(unreduced_loss))
        mean_loss = training_loss / example_weight_sum
      else:
        mean_loss = None
    with ops.name_scope(''):
      keys = metric_keys.MetricKeys
      summary.scalar(
          head_lib._summary_key(self._name, keys.LOSS),  # pylint:disable=protected-access
          regularized_training_loss)
      if mean_loss is not None:
        summary.scalar(
            head_lib._summary_key(self._name, keys.LOSS_MEAN),  # pylint:disable=protected-access
            mean_loss)
      if regularization_loss is not None:
        summary.scalar(
            head_lib._summary_key(self._name, keys.LOSS_REGULARIZATION),  # pylint:disable=protected-access
            regularization_loss)
    return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
        mode=model_fn.ModeKeys.TRAIN,
        predictions=predictions,
        loss=regularized_training_loss,
        train_op=train_op)
Example #4
0
def esmm_model_fn(features, labels, mode, params):
    batch_weight = tf.feature_column.input_layer(features,
                                                 params['weight_columns'])
    inputs, shared_weights = build_input(features, params)
    hidden_units = params['hidden_units']
    linear_parent_scope = 'linear'
    dnn_parent_scope = 'dnn'
    is_dynamic = params['dynamic']
    print("is_dynamic:", is_dynamic)
    reg = 1e-4
    if params['model'] == 'linear':
        with tf.variable_scope(linear_parent_scope,
                               values=tuple(six.itervalues(features)),
                               reuse=tf.AUTO_REUSE):
            with tf.variable_scope('linear_ctr'):
                ctr_logit_fn = linear._linear_logit_fn_builder(
                    1, params['linear_columns'])
                ctr_logits = ctr_logit_fn(features=features)
            with tf.variable_scope('linear_cvr'):
                cvr_logit_fn = linear._linear_logit_fn_builder(
                    1, params['linear_columns'])
                cvr_logits = cvr_logit_fn(features=features)
    if params['model'] == 'dnn':
        with tf.variable_scope(dnn_parent_scope):
            with tf.variable_scope('dnn_ctr'):
                ctr_logits = build_deep_layers(inputs, hidden_units, mode,
                                               params['ctr_reg'])
                #ctr_logit_fn = dnn._dnn_logit_fn_builder(1, hidden_units, params['dnn_columns'], tf.nn.relu, None, None, True)
                #ctr_logits = ctr_logit_fn(features=features, mode=mode)
            with tf.variable_scope('dnn_cvr'):
                cvr_logits = build_deep_layers(inputs, hidden_units, mode,
                                               params['cvr_reg'])
                #cvr_logit_fn = dnn._dnn_logit_fn_builder(1, hidden_units, params['dnn_columns'], tf.nn.relu, None, None, True)
                #cvr_logits = cvr_logit_fn(features=features, mode=mode)
    ctr_preds = tf.nn.sigmoid(ctr_logits)
    cvr_preds = tf.nn.sigmoid(cvr_logits)
    #ctcvr_preds = tf.stop_gradient(ctr_preds) * cvr_preds
    ctcvr_preds = ctr_preds * cvr_preds
    tf.summary.histogram("esmm/ctr_preds", ctr_preds)
    tf.summary.histogram("esmm/ctcvr_preds", ctcvr_preds)
    if mode == tf.estimator.ModeKeys.PREDICT:
        #redundant_items = ctr_preds
        predictions = {'prob': tf.concat([ctcvr_preds, ctr_preds], 1)}
        export_outputs = {
            tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            tf.estimator.export.PredictOutput(predictions)  #线上预测需要的
        }
        return tf.estimator.EstimatorSpec(mode,
                                          predictions=predictions,
                                          export_outputs=export_outputs)

    else:
        ctr_labels = labels['ctr']
        ctcvr_labels = labels['ctcvr']
        linear_optimizer = tf.train.FtrlOptimizer(
            0.01,
            l1_regularization_strength=0.001,
            l2_regularization_strength=0.001)
        dnn_optimizer = optimizers.get_optimizer_instance(
            'Adam', params['learning_rate'])
        loss_optimizer = optimizers.get_optimizer_instance('Adam', 0.001)
        ctr_loss = tf.losses.log_loss(
            ctr_labels,
            ctr_preds,
            reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE,
            weights=batch_weight)
        ctcvr_loss = tf.losses.log_loss(
            ctcvr_labels,
            ctcvr_preds,
            reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
        #reg_loss = tf.reduce_sum(ops.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
        ctr_auc = tf.metrics.auc(labels=ctr_labels,
                                 predictions=ctr_preds,
                                 weights=batch_weight)
        ctcvr_auc = tf.metrics.auc(labels=ctcvr_labels,
                                   predictions=ctcvr_preds)
        tf.summary.scalar('ctr_loss', ctr_loss)
        tf.summary.scalar('ctcvr_loss', ctcvr_loss)
        tf.summary.scalar('ctr_auc', ctr_auc[1])
        tf.summary.scalar('ctcvr_auc', ctcvr_auc[1])
        weight_loss, update_list, w_list, loss_gradnorm = get_weight_loss(
            [ctr_loss, ctcvr_loss], is_dynamic, shared_weights)
        #loss = tf.add_n(weight_loss + [reg_loss])
        loss = tf.add_n(weight_loss)

        #loss = weight_loss
        #w_list = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope='grad_weight')
        def _train_op_fn(loss):
            train_ops = []
            global_step = tf.train.get_global_step()
            if params['model'] in ('dnn'):
                fm_var_list = ops.get_collection(
                    ops.GraphKeys.TRAINABLE_VARIABLES, scope='fm')
                dnn_var_list = ops.get_collection(
                    ops.GraphKeys.TRAINABLE_VARIABLES,
                    scope=dnn_parent_scope) + ops.get_collection(
                        ops.GraphKeys.TRAINABLE_VARIABLES, scope='dnn_embed')
                train_ops.append(
                    dnn_optimizer.minimize(loss, var_list=dnn_var_list))
                train_ops.append(
                    linear_optimizer.minimize(loss, var_list=fm_var_list))
            if params['model'] in ('linear'):
                train_ops.append(
                    linear_optimizer.minimize(
                        loss,
                        var_list=ops.get_collection(
                            ops.GraphKeys.TRAINABLE_VARIABLES,
                            scope=linear_parent_scope)))
            if w_list is not None and loss_gradnorm is not None:
                train_ops.append(
                    loss_optimizer.minimize(loss_gradnorm, var_list=w_list))
            if update_list is not None:
                train_ops.append(update_list)
            train_op = control_flow_ops.group(*train_ops)
            with ops.control_dependencies([train_op]):
                return state_ops.assign_add(global_step, 1).op

        train_op = _train_op_fn(loss)
        train_op = head_v1._append_update_ops(train_op)
        metrics = {'ctr_auc': ctr_auc, 'ctcvr_auc': ctcvr_auc}
        #return _TPUEstimatorSpec(mode, loss=loss, train_op=train_op).as_estimator_spec()
        return tf.estimator.EstimatorSpec(mode,
                                          loss=loss,
                                          train_op=train_op,
                                          eval_metric_ops=metrics)