예제 #1
0
    def _build_training_graph(self):

        with tf.variable_scope(self._scope, reuse=self._reuse):

            if self._extra is not None:
                in_tensor = tf.concat([self._user, self._item, self._extra],
                                      axis=1)
            else:
                in_tensor = tf.concat([self._user, self._item], axis=1)

            reg = MultiLayerFC(in_tensor=in_tensor,
                               dims=self._dims,
                               bias_in=True,
                               bias_mid=True,
                               bias_out=False,
                               dropout_mid=self._dropout,
                               l2_reg=self._l2_reg,
                               scope='mlp_reg',
                               reuse=self._reuse)

            logits = reg.get_outputs()[0]
            if self._item_bias is not None:
                logits += self._item_bias

            labels_float = tf.reshape(tf.to_float(self._labels), (-1, 1))
            self._loss = tf.reduce_sum(
                tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_float,
                                                        logits=logits))
            self._outputs.append(logits)
예제 #2
0
def PointwiseMLPCE(
    user,
    item,
    dims,
    subgraph,
    item_bias=None,
    extra=None,
    l2_reg=None,
    labels=None,
    dropout=None,
    train=None,
    scope=None,
):

    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):

        if extra is not None:
            in_tensor = tf.concat([user, item, extra], axis=1)
        else:
            in_tensor = tf.concat([user, item], axis=1)
        if train:
            reg = MultiLayerFC(
                in_tensor=in_tensor,
                dims=dims,
                subgraph=subgraph,
                bias_in=True,
                bias_mid=True,
                bias_out=False,
                dropout_mid=dropout,
                l2_reg=l2_reg,
                scope="mlp_reg",
            )
        else:
            reg = MultiLayerFC(
                in_tensor=in_tensor,
                dims=dims,
                subgraph=subgraph,
                bias_in=True,
                bias_mid=True,
                bias_out=False,
                l2_reg=l2_reg,
                scope="mlp_reg",
            )

        logits = reg  # .get_outputs()[0]
        if item_bias is not None:
            logits += item_bias

        if train:
            labels_float = tf.reshape(tf.to_float(labels), (-1, 1))
            subgraph.register_global_loss(
                tf.reduce_sum(
                    tf.nn.sigmoid_cross_entropy_with_logits(
                        labels=labels_float, logits=logits)))
            subgraph.register_global_output(logits)
        else:
            subgraph.register_global_output(tf.sigmoid(logits))
예제 #3
0
def MLPSoftmax(user,
               item,
               seq_len,
               max_seq_len,
               dims,
               subgraph,
               item_bias=None,
               extra=None,
               l2_reg=None,
               labels=None,
               dropout=None,
               train=None,
               scope=None):

    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):

        # average item vectors user interacted with
        seq_mask = tf.sequence_mask(seq_len, max_seq_len, dtype=tf.float32)
        item = tf.reduce_mean(item * tf.expand_dims(seq_mask, axis=2), axis=1)

        if user is not None:
            in_tensor = tf.concat([user, item], axis=1)
        else:
            in_tensor = tf.concat([item], axis=1)

        if extra is not None:
            in_tensor = tf.concat([in_tensor, extra], axis=1)

        if train:
            logits = MultiLayerFC(in_tensor=in_tensor,
                                  dims=dims,
                                  subgraph=subgraph,
                                  bias_in=True,
                                  bias_mid=True,
                                  bias_out=False,
                                  dropout_mid=dropout,
                                  l2_reg=l2_reg,
                                  scope='mlp_reg')
        else:
            logits = MultiLayerFC(in_tensor=in_tensor,
                                  dims=dims,
                                  subgraph=subgraph,
                                  bias_in=True,
                                  bias_mid=True,
                                  bias_out=False,
                                  l2_reg=l2_reg,
                                  scope='mlp_reg')

        if item_bias is not None:
            logits += item_bias

        if train:
            loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                labels=labels, logits=logits)
            subgraph.register_global_loss(tf.reduce_mean(loss))
        else:
            subgraph.register_global_output(logits)
예제 #4
0
    def _build_shared_graph(self):

        with tf.variable_scope(self._scope, reuse=self._reuse):

            _encoder = MultiLayerFC(l2_reg=self._l2_reg,
                                    in_tensor=self._in_tensor,
                                    dims=self._dims[1:],
                                    scope='encoder',
                                    dropout_in=self._dropout,
                                    dropout_mid=self._dropout,
                                    reuse=self._reuse)
            _decoder = MultiLayerFC(l2_reg=self._l2_reg,
                                    in_tensor=_encoder.get_outputs()[0],
                                    dims=self._dims[::-1][1:],
                                    scope='decoder',
                                    relu_in=True,
                                    dropout_in=self._dropout,
                                    relu_mid=True,
                                    dropout_mid=self._dropout,
                                    relu_out=True,
                                    dropout_out=self._dropout,
                                    reuse=self._reuse)

            self._outputs += _encoder.get_outputs()
            self._loss = _encoder.get_loss() + _decoder.get_loss()
            self._loss += self._l2_reconst * tf.nn.l2_loss(
                _decoder.get_outputs()[0] - self._in_tensor)
예제 #5
0
    def _build_item_extractions(self, train=True):

        super(VisualBPR, self)._build_item_extractions(train)
        if train:
            self._p_item_vf = MultiLayerFC(in_tensor=self._p_item_vfeature_input, dropout_mid=self._dropout_rate, 
                                        dims=self._dims, scope='item_visual_embed', reuse=False)
            self._n_item_vf =  MultiLayerFC(in_tensor=self._n_item_vfeature_input, dropout_mid=self._dropout_rate, 
                                        dims=self._dims, scope='item_visual_embed', reuse=True)
            self._loss_nodes += [self._p_item_vf, self._n_item_vf]
        else:
            self._item_vf_serving = MultiLayerFC(in_tensor=self._item_vfeature_serving, 
                                                dims=self._dims, scope='item_visual_embed', reuse=True)
예제 #6
0
    def _build_item_extractions(self, train=True):

        super(VisualBPR, self)._build_item_extractions(train)
        if train:
            self._add_module('p_item_vf',
                            MultiLayerFC(in_tensor=self._get_input('p_item_vfeature'), dropout_mid=self._dropout_rate, train=True,
                                        l2_reg=self._l2_reg_mlp, dims=self._dims, scope='item_visual_embed', reuse=False))
            self._add_module('n_item_vf',
                            MultiLayerFC(in_tensor=self._get_input('n_item_vfeature'), dropout_mid=self._dropout_rate, train=True,
                                        l2_reg=self._l2_reg_mlp, dims=self._dims, scope='item_visual_embed', reuse=True))
        else:
            self._add_module('item_vf',
                            MultiLayerFC(in_tensor=self._get_input('item_vfeature', train=False), train=False,
                                        l2_reg=self._l2_reg_mlp, dims=self._dims, scope='item_visual_embed', reuse=True),
                            train=False)
예제 #7
0
 def train_add_item_graph(subgraph):
     p_item_vout = MultiLayerFC(in_tensor=subgraph['p_item_vfeature'],
                                l2_reg=l2_reg_mlp,
                                subgraph=subgraph,
                                dims=[dim_user_embed - dim_item_embed],
                                scope='item_MLP')
     n_item_vout = MultiLayerFC(in_tensor=subgraph['n_item_vfeature'],
                                l2_reg=l2_reg_mlp,
                                subgraph=subgraph,
                                dims=[dim_user_embed - dim_item_embed],
                                scope='item_MLP')
     subgraph['p_item_vec'] = tf.concat(
         [subgraph['p_item_vec'], p_item_vout], axis=1)
     subgraph['n_item_vec'] = tf.concat(
         [subgraph['n_item_vec'], n_item_vout], axis=1)
예제 #8
0
    def _build_item_extractions(self, train=True):

        super(ConcatVisualBPR, self)._build_item_extractions(train)

        if train:
            self._loss_nodes.remove(self._p_item_vec)
            self._loss_nodes.remove(self._n_item_vec)

            self._p_item_lf = LatentFactor(
                init='normal',
                l2_reg=self._l2_reg,
                ids=self._p_item_id_input,
                shape=[self._max_item, self._dim_embed - self._dim_ve],
                scope='item',
                reuse=False)
            self._p_item_vf = MultiLayerFC(
                in_tensor=self._p_item_vfeature_input,
                dims=[self._dim_ve],
                scope='item_MLP',
                reuse=False)
            self._n_item_lf = LatentFactor(
                init='normal',
                l2_reg=self._l2_reg,
                ids=self._n_item_id_input,
                shape=[self._max_item, self._dim_embed - self._dim_ve],
                scope='item',
                reuse=True)
            self._n_item_vf = MultiLayerFC(
                in_tensor=self._n_item_vfeature_input,
                dims=[self._dim_ve],
                scope='item_MLP',
                reuse=True)
        else:

            self._item_lf_serving = LatentFactor(
                init='normal',
                l2_reg=self._l2_reg,
                ids=self._item_id_serving,
                shape=[self._max_item, self._dim_embed - self._dim_ve],
                scope='item',
                reuse=True)
            self._item_vf_serving = MultiLayerFC(
                in_tensor=self._item_vfeature_serving,
                dims=[self._dim_ve],
                scope='item_MLP',
                reuse=True)
예제 #9
0
    def _build_serving_graph(self):
        
        with tf.variable_scope(self._scope, reuse=self._reuse):
            user_rep = tf.reshape(tf.tile(self._user, [1, tf.shape(self._item)[0]]), (-1, tf.shape(self._user)[1]))
            item_rep = tf.tile(self._item, (tf.shape(self._user)[0], 1))
            item_bias_rep = tf.tile(self._item_bias, (tf.shape(self._user)[0], 1))
            in_tensor = tf.concat([user_rep, item_rep], axis=1)
            reg = MultiLayerFC(
                in_tensor=in_tensor,
                dims=self._dims,
                bias_in=True,
                bias_mid=True,
                bias_out=False,
                l2_reg=self._l2_reg,
                scope='mlp_reg',
                reuse=self._reuse)

            self._outputs.append(tf.reshape(reg.get_outputs()[0] + item_bias_rep, (tf.shape(self._user)[0], tf.shape(self._item)[0])))
예제 #10
0
    def _build_user_extractions(self, train=True):

        super(UserVisualPMF, self)._build_user_extractions(train)
        
        self._add_module('user_f',
                        MultiLayerFC(in_tensor=self._get_input('user_feature', train=train), train=train,
                                     dims=self._dims_user, l2_reg=self._l2_reg_mlp, dropout_mid=self._dropout_rate, 
                                     scope='user_MLP', reuse=not train),
                        train=train)
예제 #11
0
    def _build_training_graph(self):

        with tf.variable_scope(self._scope, reuse=self._reuse):
            pointwise_product = tf.multiply(self._user, self._item)
            gdp = MultiLayerFC(in_tensor=pointwise_product,
                               dims=[1],
                               bias_in=False,
                               bias_mid=False,
                               bias_out=False,
                               l2_reg=self._l2_reg,
                               scope='gmf_reg',
                               reuse=self._reuse)

            logits = gdp.get_outputs()[0] + self._item_bias
            labels_float = tf.reshape(tf.to_float(self._labels), (-1, 1))
            self._loss = tf.reduce_sum(
                tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_float,
                                                        logits=logits))
            self._outputs.append(logits)
예제 #12
0
 def serving_add_item_graph(subgraph):
     item_vout = MultiLayerFC(
         in_tensor=subgraph["item_vfeature"],
         l2_reg=l2_reg_mlp,
         subgraph=subgraph,
         dims=[dim_user_embed - dim_item_embed],
         scope="item_MLP",
     )
     subgraph["item_vec"] = tf.concat([subgraph["item_vec"], item_vout],
                                      axis=1)
예제 #13
0
    def _build_item_extractions(self, train=True):

        super(VisualPMF, self)._build_item_extractions(train)

        if train:
            self._item_vf = MultiLayerFC(in_tensor=self._item_vfeature_input,
                                         dims=self._dims,
                                         l2_reg=self._l2_reg_mlp,
                                         dropout_mid=self._dropout_rate,
                                         scope='item_MLP',
                                         reuse=False)
            self._loss_nodes += [self._item_vf]
        else:
            self._item_vf_serving = MultiLayerFC(
                in_tensor=self._item_vfeature_serving,
                dims=self._dims,
                l2_reg=self._l2_reg_mlp,
                dropout_mid=self._dropout_rate,
                scope='item_MLP',
                reuse=True)
예제 #14
0
    def _build_user_extractions(self, train=True):

        super(UserPMF, self)._build_user_extractions(train)

        if train:
            self._loss_nodes.remove(self._user_vec)
            self._user_f = MultiLayerFC(in_tensor=self._user_feature_input,
                                        dims=self._dims,
                                        l2_reg=self._l2_reg_mlp,
                                        dropout_mid=self._dropout_rate,
                                        scope='user_MLP',
                                        reuse=False)
        else:
            self._user_f_serving = MultiLayerFC(
                in_tensor=self._user_feature_serving,
                dims=self._dims,
                l2_reg=self._l2_reg_mlp,
                dropout_mid=self._dropout_rate,
                scope='user_MLP',
                reuse=True)
예제 #15
0
    def _build_serving_graph(self):

        with tf.variable_scope(self._scope, reuse=self._reuse):
            user_rep = tf.reshape(
                tf.tile(self._user, [1, tf.shape(self._item)[0]]),
                (-1, tf.shape(self._user)[1]))
            item_rep = tf.tile(self._item, (tf.shape(self._user)[0], 1))
            item_bias_rep = tf.tile(self._item_bias,
                                    (tf.shape(self._user)[0], 1))
            pointwise_product = tf.multiply(user_rep, item_rep)
            gdp = MultiLayerFC(in_tensor=pointwise_product,
                               dims=[1],
                               bias_in=False,
                               bias_mid=False,
                               bias_out=False,
                               l2_reg=self._l2_reg,
                               scope='gmf_reg',
                               reuse=self._reuse)
            self._outputs.append(
                tf.reshape(gdp.get_outputs()[0] + item_bias_rep,
                           (tf.shape(self._user)[0], tf.shape(self._item)[0])))
예제 #16
0
    def _build_training_graph(self):

        with tf.variable_scope(self._scope, reuse=self._reuse):

            if self._mlp_pretrain:
                self._pretrain_input = tf.placeholder(tf.float32,
                                                      shape=(32,
                                                             self._shape[1]),
                                                      name='pretrain_input')
                trans_embedding = MultiLayerFC(in_tensor=self._pretrain_input,
                                               dims=self._mlp_dims,
                                               batch_norm=True,
                                               scope=self._scope + '/MLP',
                                               train=True,
                                               reuse=True,
                                               l2_reg=self._l2_reg,
                                               relu_out=True)
                identity_loss = tf.nn.l2_loss(
                    trans_embedding.get_outputs()[0] - self._pretrain_input)
                self._pretrain_ops = tf.train.AdamOptimizer(
                    learning_rate=0.001).minimize(identity_loss)
예제 #17
0
    def _build_serving_graph(self):

        with tf.variable_scope(self._scope, reuse=self._reuse):
            if self._batch_serving:
                user_rep = tf.reshape(
                    tf.tile(self._user, [1, tf.shape(self._item)[0]]),
                    (-1, tf.shape(self._user)[1]))
                item_rep = tf.tile(self._item, (tf.shape(self._user)[0], 1))
                if self._extra is not None:
                    extra_rep = tf.tile(self._extra,
                                        (tf.shape(self._user)[0], 1))
                    in_tensor = tf.concat([user_rep, item_rep, extra_rep],
                                          axis=1)
                else:
                    in_tensor = tf.concat([user_rep, item_rep], axis=1)
                reg = MultiLayerFC(in_tensor=in_tensor,
                                   dims=self._dims,
                                   bias_in=True,
                                   bias_mid=True,
                                   bias_out=False,
                                   l2_reg=self._l2_reg,
                                   scope='mlp_reg',
                                   reuse=self._reuse)
                if self._item_bias is not None:
                    item_bias_rep = tf.tile(self._item_bias,
                                            (tf.shape(self._user)[0], 1))
                    self._outputs.append(
                        tf.reshape(reg.get_outputs()[0] + item_bias_rep,
                                   (tf.shape(self._user)[0],
                                    tf.shape(self._item)[0])))
                else:
                    self._outputs.append(
                        tf.reshape(reg.get_outputs()[0], (tf.shape(
                            self._user)[0], tf.shape(self._item)[0])))
            else:
                if self._extra is not None:
                    in_tensor = tf.concat(
                        [self._user, self._item, self._extra], axis=1)
                else:
                    in_tensor = tf.concat([self._user, self._item], axis=1)
                reg = MultiLayerFC(in_tensor=in_tensor,
                                   dims=self._dims,
                                   bias_in=True,
                                   bias_mid=True,
                                   bias_out=False,
                                   l2_reg=self._l2_reg,
                                   scope='mlp_reg',
                                   reuse=self._reuse)
                logits = reg.get_outputs()[0]
                if self._item_bias is not None:
                    logits += self._item_bias
                self._outputs.append(tf.sigmoid(logits))
예제 #18
0
    def _build_shared_graph(self):

        with tf.variable_scope(self._scope, reuse=self._reuse):

            self._embedding = tf.get_variable('embedding',
                                              dtype=tf.float32,
                                              shape=self._shape,
                                              trainable=False,
                                              initializer=self._initializer)

            self._flag = tf.get_variable('flag',
                                         dtype=tf.bool,
                                         shape=[self._shape[0]],
                                         trainable=False,
                                         initializer=tf.constant_initializer(
                                             value=False, dtype=tf.bool))
            unique_ids, _ = tf.unique(self._ids)

            with tf.control_dependencies([
                    tf.scatter_update(self._flag, unique_ids,
                                      tf.ones_like(unique_ids, dtype=tf.bool))
            ]):
                trans_embedding = MultiLayerFC(
                    in_tensor=tf.nn.embedding_lookup(self._embedding,
                                                     self._ids),
                    dims=self._mlp_dims,
                    batch_norm=True,
                    scope=self._scope + '/MLP',
                    train=self._train,
                    reuse=self._reuse,
                    l2_reg=self._l2_reg,
                    relu_out=True)

            self._outputs += trans_embedding.get_outputs()
            self._loss += trans_embedding.get_loss()

            update_ids = tf.reshape(tf.where(self._flag), [-1])
            update_embedding = MultiLayerFC(in_tensor=tf.nn.embedding_lookup(
                self._embedding, update_ids),
                                            dims=self._mlp_dims,
                                            batch_norm=True,
                                            scope=self._scope + '/MLP',
                                            train=False,
                                            reuse=True,
                                            l2_reg=self._l2_reg,
                                            relu_out=True)
            self._update_node = tf.scatter_update(
                self._embedding, update_ids,
                update_embedding.get_outputs()[0])
            self._clear_flag = tf.scatter_update(
                self._flag, update_ids, tf.zeros_like(update_ids,
                                                      dtype=tf.bool))
예제 #19
0
    def _build_item_extractions(self, train=True):

        if train:
            self._add_module(
                'p_item_lf',
                LatentFactor(
                    init='normal',
                    l2_reg=self._l2_reg,
                    ids=self._get_input('p_item_id'),
                    shape=[self._max_item, self._dim_embed - self._dim_ve],
                    scope='item',
                    reuse=False))
            self._add_module(
                'p_item_vf',
                MultiLayerFC(in_tensor=self._get_input('p_item_vfeature'),
                             dims=[self._dim_ve],
                             scope='item_MLP',
                             reuse=False))
            self._add_module(
                'p_item_bias',
                LatentFactor(l2_reg=self._l2_reg,
                             init='zero',
                             ids=self._get_input('p_item_id'),
                             shape=[self._max_item, 1],
                             scope='item_bias',
                             reuse=False))
            self._add_module(
                'n_item_lf',
                LatentFactor(
                    init='normal',
                    l2_reg=self._l2_reg,
                    ids=self._get_input('n_item_id'),
                    shape=[self._max_item, self._dim_embed - self._dim_ve],
                    scope='item',
                    reuse=True))
            self._add_module(
                'n_item_vf',
                MultiLayerFC(in_tensor=self._get_input('n_item_vfeature'),
                             dims=[self._dim_ve],
                             scope='item_MLP',
                             reuse=True))
            self._add_module(
                'n_item_bias',
                LatentFactor(l2_reg=self._l2_reg,
                             init='zero',
                             ids=self._get_input('n_item_id'),
                             shape=[self._max_item, 1],
                             scope='item_bias',
                             reuse=True))
        else:
            self._add_module('item_lf',
                             LatentFactor(init='normal',
                                          l2_reg=self._l2_reg,
                                          ids=self._get_input('item_id',
                                                              train=train),
                                          shape=[
                                              self._max_item,
                                              self._dim_embed - self._dim_ve
                                          ],
                                          scope='item',
                                          reuse=True),
                             train=False)
            self._add_module('item_vf',
                             MultiLayerFC(in_tensor=self._get_input(
                                 'item_vfeature', train=train),
                                          dims=[self._dim_ve],
                                          scope='item_MLP',
                                          reuse=True),
                             train=False)
            self._add_module('item_bias',
                             LatentFactor(l2_reg=self._l2_reg,
                                          init='zero',
                                          ids=self._get_input('item_id',
                                                              train=train),
                                          shape=[self._max_item, 1],
                                          scope='item_bias',
                                          reuse=True),
                             train=False)