コード例 #1
0
    def train_interactiongraph(subgraph):
        linear1 = fully_connected_layer.apply(subgraph["X_1"], [1], subgraph,
            relu_in=False, relu_mid=False, relu_out=False,
            dropout_in=None, dropout_mid=None, dropout_out=None,
            bias_in=True, bias_mid=True, bias_out=True, batch_norm=False,
            train=False, l2_reg=l2_reg, scope="LinearComponent")
        linear1 = tf.reshape(linear1, shape=[-1]) # shaped [None, ]
        interactive1 = fm_layer.apply(subgraph["X_1"], factor_dim, l2_weight=0.01, scope="InteractiveComponent")
        interactive1 = tf.reshape(tf.math.reduce_sum(interactive1, axis=1), shape=[-1])

        linear2 = fully_connected_layer.apply(subgraph["X_2"], [1], subgraph,
            relu_in=False, relu_mid=False, relu_out=False,
            dropout_in=None, dropout_mid=None, dropout_out=None,
            bias_in=True, bias_mid=True, bias_out=True, batch_norm=False,
            train=False, l2_reg=l2_reg, scope="LinearComponent")
        linear2 = tf.reshape(linear2, shape=[-1]) # shaped [None, ]
        interactive2 = fm_layer.apply(subgraph["X_2"], factor_dim, l2_weight=0.01, scope="InteractiveComponent")
        interactive2 = tf.reshape(tf.math.reduce_sum(interactive2, axis=1), shape=[-1])
        dy_tilde = (linear1 + interactive1) - (linear2 + interactive2)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=subgraph["dy"], logits=dy_tilde, name="loss")
        subgraph.register_global_loss(tf.reduce_mean(loss))
        tf.summary.scalar('loss', tf.reduce_mean(loss))
        summary = tf.summary.merge_all()
        subgraph.register_global_summary(summary)
        subgraph.register_global_output(subgraph["dy"])
        subgraph.register_global_output(dy_tilde)
        pass
コード例 #2
0
def apply(in_tensor,
          dims,
          subgraph,
          item_bias=None,
          extra=None,
          l2_reg=None,
          labels=None,
          dropout=None,
          train=None,
          scope=None):
    """
        MLP softmax layer
        final layer, registers loss for training and prediction for serving
    """
    with tf.variable_scope(scope,
                           default_name='MLPSoftmax',
                           reuse=tf.AUTO_REUSE):
        if train:
            logits = fully_connected_layer.apply(in_tensor=in_tensor,
                                                 dims=dims,
                                                 subgraph=subgraph,
                                                 bias_in=True,
                                                 bias_mid=True,
                                                 bias_out=False,
                                                 dropout_mid=dropout,
                                                 l2_reg=l2_reg,
                                                 scope='mlp_reg')
        else:
            logits = fully_connected_layer.apply(in_tensor=in_tensor,
                                                 dims=dims,
                                                 subgraph=subgraph,
                                                 bias_in=True,
                                                 bias_mid=True,
                                                 bias_out=False,
                                                 l2_reg=l2_reg,
                                                 scope='mlp_reg')

        if item_bias is not None:
            logits += item_bias

        if train:
            loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                labels=labels, logits=logits)
            subgraph.register_global_loss(tf.reduce_mean(loss))
        else:
            subgraph.register_global_output(logits)
コード例 #3
0
 def train_fushion_graph(subgraph):
     logits_1 = fully_connected_layer.apply(subgraph['X1'], [1],
                                            subgraph,
                                            relu_in=False,
                                            relu_mid=False,
                                            relu_out=False,
                                            dropout_in=None,
                                            dropout_mid=None,
                                            dropout_out=None,
                                            bias_in=True,
                                            bias_mid=True,
                                            bias_out=True,
                                            batch_norm=False,
                                            train=False,
                                            l2_reg=l2_reg,
                                            scope='Weights1dTensor')
     logits_2 = fully_connected_layer.apply(subgraph['X2'], [1],
                                            subgraph,
                                            relu_in=False,
                                            relu_mid=False,
                                            relu_out=False,
                                            dropout_in=None,
                                            dropout_mid=None,
                                            dropout_out=None,
                                            bias_in=True,
                                            bias_mid=True,
                                            bias_out=True,
                                            batch_norm=False,
                                            train=False,
                                            l2_reg=l2_reg,
                                            scope='Weights1dTensor')
     dy_tilde = tf.squeeze(logits_1 - logits_2)
     loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=subgraph['dy'],
                                                    logits=dy_tilde,
                                                    name='loss')
     subgraph.register_global_loss(tf.reduce_mean(loss))
     subgraph.register_global_output(subgraph['dy'])
     subgraph.register_global_output(dy_tilde)
     tf.summary.scalar('loss', tf.reduce_mean(loss))
     summary = tf.summary.merge_all()
     subgraph.register_global_summary(summary)
コード例 #4
0
 def serve_interactiongraph(subgraph):
     linear = fully_connected_layer.apply(subgraph["X"], [1], subgraph,
         relu_in=False, relu_mid=False, relu_out=False,
         dropout_in=None, dropout_mid=None, dropout_out=None,
         bias_in=True, bias_mid=True, bias_out=True, batch_norm=False,
         train=False, l2_reg=l2_reg, scope="LinearComponent")
     linear = tf.reshape(linear, shape=[-1]) # shaped [None, ]
     interactive = fm_layer.apply(subgraph["X"], factor_dim, l2_weight=0.01, scope="InteractiveComponent") # shaped [None, factor_dim]
     interactive = tf.reshape(tf.math.reduce_sum(interactive, axis=1), shape=[-1])
     score = linear + interactive
     subgraph.register_global_output(score)
     pass
コード例 #5
0
 def serve_fusion_graph(subgraph):
     logit = fully_connected_layer.apply(subgraph['X'], [1],
                                         subgraph,
                                         relu_in=False,
                                         relu_mid=False,
                                         relu_out=False,
                                         dropout_in=None,
                                         dropout_mid=None,
                                         dropout_out=None,
                                         bias_in=True,
                                         bias_mid=True,
                                         bias_out=True,
                                         batch_norm=False,
                                         train=False,
                                         l2_reg=l2_reg,
                                         scope='Weights1dTensor')
     subgraph.register_global_output(logit)