Ejemplo n.º 1
0
    def metric_fn(y_true, y_pred):
        """Returns the in_top_k metric."""
        softmax_logits = y_pred[0, :]
        logits = tf.slice(softmax_logits, [0, 1], [batch_size, 1])

        # The dup mask should be obtained from input data, but we did not yet find
        # a good way of getting it with keras, so we set it to zeros to neglect the
        # repetition correction
        dup_mask = tf.zeros([batch_size, 1])

        _, _, in_top_k, _, _ = (
            neumf_model.compute_eval_loss_and_metrics_helper(
                logits, softmax_logits, dup_mask, params["num_neg"],
                params["match_mlperf"], params["use_xla_for_gpu"]))

        is_training = tf.keras.backend.learning_phase()
        if isinstance(is_training, int):
            is_training = tf.constant(bool(is_training), dtype=tf.bool)

        in_top_k = tf.cond(
            is_training,
            lambda: tf.zeros(shape=in_top_k.shape, dtype=in_top_k.dtype),
            lambda: in_top_k)

        return in_top_k
Ejemplo n.º 2
0
  def metric_fn(y_true, y_pred):
    """Returns the in_top_k metric."""
    softmax_logits = y_pred[0, :]
    logits = tf.slice(softmax_logits, [0, 1], [batch_size, 1])

    # The dup mask should be obtained from input data, but we did not yet find
    # a good way of getting it with keras, so we set it to zeros to neglect the
    # repetition correction
    dup_mask = tf.zeros([batch_size, 1])

    _, _, in_top_k, _, _ = (
        neumf_model.compute_eval_loss_and_metrics_helper(
            logits,
            softmax_logits,
            dup_mask,
            params["num_neg"],
            params["match_mlperf"],
            params["use_xla_for_gpu"]))

    is_training = tf.keras.backend.learning_phase()
    if isinstance(is_training, int):
      is_training = tf.constant(bool(is_training), dtype=tf.bool)

    in_top_k = tf.cond(
        is_training,
        lambda: tf.zeros(shape=in_top_k.shape, dtype=in_top_k.dtype),
        lambda: in_top_k)

    return in_top_k
Ejemplo n.º 3
0
    def _model_fn(features, labels, mode, params):
        """Model Function for NeuMF estimator."""
        logits = logits_fn(features, feature_columns, params)

        # Softmax with the first column of zeros is equivalent to sigmoid.
        softmax_logits = tf.concat(
            [tf.zeros(logits.shape, dtype=logits.dtype), logits], axis=1)

        if mode == tf.estimator.ModeKeys.EVAL:
            duplicate_mask = tf.cast(features[rconst.DUPLICATE_MASK],
                                     tf.float32)
            cross_entropy, metric_fn, in_top_k, ndcg, metric_weights = (
                neumf_model.compute_eval_loss_and_metrics_helper(
                    logits,
                    softmax_logits,
                    duplicate_mask,
                    params["num_neg"],
                    params["match_mlperf"],
                    use_tpu_spec=params["use_tpu"]))

            return tf.contrib.tpu.TPUEstimatorSpec(
                mode=mode,
                loss=cross_entropy,
                eval_metrics=(metric_fn, [in_top_k, ndcg, metric_weights]))

        elif mode == tf.estimator.ModeKeys.TRAIN:
            labels = tf.cast(labels, tf.int32)
            valid_pt_mask = features[rconst.VALID_POINT_MASK]
            optimizer = tf.train.AdamOptimizer(
                learning_rate=params["learning_rate"],
                beta1=params["beta1"],
                beta2=params["beta2"],
                epsilon=params["epsilon"])
            optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)

            loss = tf.losses.sparse_softmax_cross_entropy(
                labels=labels,
                logits=softmax_logits,
                weights=tf.cast(valid_pt_mask, tf.float32))

            # This tensor is used by logging hooks.
            tf.identity(loss, name="cross_entropy")

            global_step = tf.train.get_global_step()
            tvars = tf.trainable_variables()
            gradients = optimizer.compute_gradients(
                loss, tvars, colocate_gradients_with_ops=True)
            minimize_op = optimizer.apply_gradients(gradients,
                                                    global_step=global_step,
                                                    name="train")
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            train_op = tf.group(minimize_op, update_ops)

            return tf.contrib.tpu.TPUEstimatorSpec(mode=mode,
                                                   loss=loss,
                                                   train_op=train_op)

        else:
            raise NotImplementedError