Beispiel #1
0
 def train_item_graph(subgraph):
     _, subgraph['p_item_vec'] = LatentFactor(
         l2_reg=l2_reg,
         init='normal',
         id_=subgraph['p_item_id'],
         shape=[total_items, dim_embed],
         subgraph=subgraph,
         scope='item')
     _, subgraph['p_item_bias'] = LatentFactor(l2_reg=l2_reg,
                                               init='zero',
                                               id_=subgraph['p_item_id'],
                                               shape=[total_items, 1],
                                               subgraph=subgraph,
                                               scope='item_bias')
     _, subgraph['n_item_vec'] = LatentFactor(
         l2_reg=l2_reg,
         init='normal',
         id_=subgraph['n_item_id'],
         shape=[total_items, dim_embed],
         subgraph=subgraph,
         scope='item')
     _, subgraph['n_item_bias'] = LatentFactor(l2_reg=l2_reg,
                                               init='zero',
                                               id_=subgraph['n_item_id'],
                                               shape=[total_items, 1],
                                               subgraph=subgraph,
                                               scope='item_bias')
Beispiel #2
0
    def _build_item_extractions(self, train=True):

        if train:
            self._item_vec = TemporalLatentFactor(
                l2_reg=self._l2_reg,
                init=self._pretrained_item_embeddings,
                ids=self._item_id_input,
                mlp_dims=self._item_dims,
                shape=[self._max_item, self._dim_embed],
                train=True,
                scope='item',
                reuse=False)
            self._item_bias = LatentFactor(l2_reg=self._l2_reg,
                                           init='zero',
                                           ids=self._item_id_input,
                                           shape=[self._max_item, 1],
                                           scope='item_bias',
                                           reuse=False)
            self._loss_nodes += [self._item_vec, self._item_bias]
        else:
            self._item_vec_serving = TemporalLatentFactor(
                l2_reg=self._l2_reg,
                init=self._pretrained_item_embeddings,
                ids=self._item_id_serving,
                mlp_dims=self._item_dims,
                shape=[self._max_item, self._dim_embed],
                train=False,
                scope='item',
                reuse=True)
            self._item_bias_serving = LatentFactor(l2_reg=self._l2_reg,
                                                   init='zero',
                                                   ids=self._item_id_serving,
                                                   shape=[self._max_item, 1],
                                                   scope='item_bias',
                                                   reuse=True)
Beispiel #3
0
 def train_item_graph(subgraph):
     _, subgraph["p_item_vec"] = LatentFactor(
         l2_reg=l2_reg,
         init="normal",
         id_=subgraph["p_item_id"],
         shape=[total_items, dim_item_embed],
         subgraph=subgraph,
         scope="item",
     )
     _, subgraph["p_item_bias"] = LatentFactor(
         l2_reg=l2_reg,
         init="zero",
         id_=subgraph["p_item_id"],
         shape=[total_items, 1],
         subgraph=subgraph,
         scope="item_bias",
     )
     _, subgraph["n_item_vec"] = LatentFactor(
         l2_reg=l2_reg,
         init="normal",
         id_=subgraph["n_item_id"],
         shape=[total_items, dim_item_embed],
         subgraph=subgraph,
         scope="item",
     )
     _, subgraph["n_item_bias"] = LatentFactor(
         l2_reg=l2_reg,
         init="zero",
         id_=subgraph["n_item_id"],
         shape=[total_items, 1],
         subgraph=subgraph,
         scope="item_bias",
     )
Beispiel #4
0
 def _build_item_extractions(self, train=True):
     
     self._add_module('item_vec',
                      LatentFactor(l2_reg=self._l2_reg, init='normal', ids=self._get_input('item_id', train=train),
                                 shape=[self._max_item, self._dim_embed], scope='item', reuse=not train), 
                      train=train)
     self._add_module('item_bias',
                      LatentFactor(l2_reg=self._l2_reg, init='zero', ids=self._get_input('item_id', train=train),
                                 shape=[self._max_item, 1], scope='item_bias', reuse=not train), 
                      train=train)
Beispiel #5
0
    def _build_item_extractions(self, train=True):

        if train:
            self._item_vec = LatentFactor(
                l2_reg=self._l2_reg,
                init='normal',
                ids=self._item_id_input,
                shape=[self._max_item, self._dim_embed],
                scope='item',
                reuse=False)
            self._item_bias = LatentFactor(l2_reg=self._l2_reg,
                                           init='zero',
                                           ids=self._item_id_input,
                                           shape=[self._max_item, 1],
                                           scope='item_bias',
                                           reuse=False)
            self._loss_nodes += [self._item_vec, self._item_bias]
        else:
            self._item_vec_serving = LatentFactor(
                l2_reg=self._l2_reg,
                init='normal',
                ids=self._item_id_serving,
                shape=[self._max_item, self._dim_embed],
                scope='item',
                reuse=True)
            self._item_bias_serving = LatentFactor(l2_reg=self._l2_reg,
                                                   init='zero',
                                                   ids=self._item_id_serving,
                                                   shape=[self._max_item, 1],
                                                   scope='item_bias',
                                                   reuse=True)
 def _add_itemgraph(subgraph):
     item_embedding, _ = LatentFactor(id_=None,
                                      shape=[total_items, dim_embed],
                                      subgraph=subgraph,
                                      scope='item')
     item_bias_embedding, _ = LatentFactor(id_=None,
                                           shape=[total_items, 1],
                                           subgraph=subgraph,
                                           scope='item_bias')
     subgraph.register_global_operation(
         tf.assign(item_embedding, subgraph['item_lf_cache']), 'cache')
     subgraph.register_global_operation(
         tf.assign(item_bias_embedding, subgraph['item_bias_cache']),
         'cache')
Beispiel #7
0
 def user_graph(subgraph):
     _, subgraph['user_vec'] = LatentFactor(l2_reg=l2_reg, 
                                init='normal', 
                                id_=subgraph['user_id'],
                                shape=[total_users, dim_user_embed], 
                                 subgraph=subgraph,
                                scope='user')
 def _add_usergraph(subgraph):
     user_embedding, _ = LatentFactor(id_=None,
                                      shape=[total_users, dim_embed],
                                      subgraph=subgraph,
                                      scope='user')
     subgraph.register_global_operation(
         tf.assign(user_embedding, subgraph['user_lf_cache']), 'cache')
Beispiel #9
0
    def user_graph(subgraph):
        _, user_gender = LatentFactor(
            l2_reg=l2_reg_embed,
            shape=[user_dict['gender'], dim_user_embed['gender']],
            id_=subgraph['user_gender'],
            subgraph=subgraph,
            init='normal',
            scope='user_gender')

        _, user_geo = LatentFactor(
            l2_reg=l2_reg_embed,
            shape=[user_dict['geo'], dim_user_embed['geo']],
            id_=subgraph['user_geo'],
            subgraph=subgraph,
            init='normal',
            scope='user_geo')
        subgraph['user_vec'] = tf.concat([user_gender, user_geo], axis=1)
Beispiel #10
0
    def _build_item_extractions(self, train=True):

        super(ConcatVisualBPR, self)._build_item_extractions(train)

        if train:
            self._loss_nodes.remove(self._p_item_vec)
            self._loss_nodes.remove(self._n_item_vec)

            self._p_item_lf = LatentFactor(
                init='normal',
                l2_reg=self._l2_reg,
                ids=self._p_item_id_input,
                shape=[self._max_item, self._dim_embed - self._dim_ve],
                scope='item',
                reuse=False)
            self._p_item_vf = MultiLayerFC(
                in_tensor=self._p_item_vfeature_input,
                dims=[self._dim_ve],
                scope='item_MLP',
                reuse=False)
            self._n_item_lf = LatentFactor(
                init='normal',
                l2_reg=self._l2_reg,
                ids=self._n_item_id_input,
                shape=[self._max_item, self._dim_embed - self._dim_ve],
                scope='item',
                reuse=True)
            self._n_item_vf = MultiLayerFC(
                in_tensor=self._n_item_vfeature_input,
                dims=[self._dim_ve],
                scope='item_MLP',
                reuse=True)
        else:

            self._item_lf_serving = LatentFactor(
                init='normal',
                l2_reg=self._l2_reg,
                ids=self._item_id_serving,
                shape=[self._max_item, self._dim_embed - self._dim_ve],
                scope='item',
                reuse=True)
            self._item_vf_serving = MultiLayerFC(
                in_tensor=self._item_vfeature_serving,
                dims=[self._dim_ve],
                scope='item_MLP',
                reuse=True)
Beispiel #11
0
 def censor_user_vec(subgraph):
     user_embedding, _ = LatentFactor(l2_reg=None,
                                      init='normal',
                                      id_=None,
                                      shape=[total_users, dim_user_embed],
                                      scope='user')
     user_censor_ops = censor_vec(user_embedding, subgraph['user_id'])
     subgraph.register_global_operation(user_censor_ops, 'censor_embedding')
Beispiel #12
0
 def item_graph(subgraph):
     _, subgraph['seq_item_vec'] = LatentFactor(
         l2_reg=l2_reg_embed,
         init='normal',
         id_=subgraph['seq_item_id'],
         shape=[total_items, dim_item_embed],
         subgraph=subgraph,
         scope='item')
Beispiel #13
0
 def item_graph(subgraph):
     _, subgraph["seq_item_vec"] = LatentFactor(
         l2_reg=l2_reg_embed,
         init="normal",
         id_=subgraph["seq_item_id"],
         shape=[total_items, dim_item_embed],
         subgraph=subgraph,
         scope="item",
     )
Beispiel #14
0
 def user_graph(subgraph):
     _, subgraph["user_vec"] = LatentFactor(
         l2_reg=l2_reg,
         init="normal",
         id_=subgraph["user_id"],
         shape=[total_users, dim_user_embed],
         subgraph=subgraph,
         scope="user",
     )
Beispiel #15
0
    def user_graph(subgraph):
        _, user_gender = LatentFactor(
            l2_reg=l2_reg_embed,
            shape=[user_dict["gender"], dim_user_embed["gender"]],
            id_=subgraph["user_gender"],
            subgraph=subgraph,
            init="normal",
            scope="user_gender",
        )

        _, user_geo = LatentFactor(
            l2_reg=l2_reg_embed,
            shape=[user_dict["geo"], dim_user_embed["geo"]],
            id_=subgraph["user_geo"],
            subgraph=subgraph,
            init="normal",
            scope="user_geo",
        )
        subgraph["user_vec"] = tf.concat([user_gender, user_geo], axis=1)
Beispiel #16
0
    def _build_user_extractions(self, train=True):

        if train:
            self._user_vec = LatentFactor(
                l2_reg=self._l2_reg,
                init='normal',
                ids=self._user_id_input,
                shape=[self._max_user, self._dim_embed],
                scope='user',
                reuse=False)
            self._loss_nodes += [self._user_vec]
        else:
            self._user_vec_serving = LatentFactor(
                l2_reg=self._l2_reg,
                init='normal',
                ids=self._user_id_serving,
                shape=[self._max_user, self._dim_embed],
                scope='user',
                reuse=True)
Beispiel #17
0
 def censor_user_vec(subgraph):
     user_embedding, _ = LatentFactor(
         l2_reg=None,
         init="normal",
         id_=None,
         shape=[total_users, dim_user_embed],
         scope="user",
     )
     user_censor_ops = censor_vec(user_embedding, subgraph["user_id"])
     subgraph.register_global_operation(user_censor_ops, "censor_embedding")
Beispiel #18
0
    def _build_user_extractions(self, train=True):

        self._add_module('user_vec',
                         LatentFactor(l2_reg=self._l2_reg,
                                      init='normal',
                                      ids=self._get_input('user_id',
                                                          train=train),
                                      shape=[self._max_user, self._dim_embed],
                                      scope='user',
                                      reuse=(not train)),
                         train=train)
Beispiel #19
0
 def censor_item_vec(subgraph):
     item_embedding, _ = LatentFactor(l2_reg=None,
                                      init='normal',
                                      id_=None,
                                      shape=[total_items, dim_item_embed],
                                      subgraph=subgraph,
                                      scope='item')
     item_censor_ops = censor_vec(
         item_embedding,
         tf.concat([subgraph['p_item_id'], subgraph['n_item_id']], axis=0))
     subgraph.register_global_operation(item_censor_ops, 'censor_embedding')
Beispiel #20
0
 def censor_item_vec(subgraph):
     item_embedding, _ = LatentFactor(
         l2_reg=None,
         init="normal",
         id_=None,
         shape=[total_items, dim_item_embed],
         subgraph=subgraph,
         scope="item",
     )
     item_censor_ops = censor_vec(
         item_embedding,
         tf.concat([subgraph["p_item_id"], subgraph["n_item_id"]], axis=0),
     )
     subgraph.register_global_operation(item_censor_ops, "censor_embedding")
Beispiel #21
0
    def _build_extra_extractions(self, train=True):

        if train:
            self._add_module('p_extra_vec',
                             LatentFactor(
                                 l2_reg=self._l2_reg,
                                 init='normal',
                                 ids=self._get_input('p_item_timestamp',
                                                     train=train),
                                 shape=[self._max_item, self._dim_embed],
                                 scope='item',
                                 reuse=not train),
                             train=train)
            self._add_module('n_extra_vec',
                             LatentFactor(
                                 l2_reg=self._l2_reg,
                                 init='normal',
                                 ids=self._get_input('n_item_timestamp',
                                                     train=train),
                                 shape=[self._max_item, self._dim_embed],
                                 scope='item',
                                 reuse=not train),
                             train=train)
Beispiel #22
0
    def _build_item_extractions(self, train=True):

        self._add_module('item_vec',
                         TemporalLatentFactor(
                             l2_reg=self._l2_reg,
                             init=self._pretrained_item_embeddings,
                             ids=self._get_input('item_id', train=train),
                             mlp_dims=self._item_dims,
                             shape=[self._max_item, self._dim_embed],
                             train=train,
                             scope='item',
                             reuse=not train),
                         train=train)
        self._add_module('item_bias',
                         LatentFactor(l2_reg=self._l2_reg,
                                      init='zero',
                                      ids=self._get_input('item_id',
                                                          train=train),
                                      shape=[self._max_item, 1],
                                      scope='item_bias',
                                      reuse=not train),
                         train=train)
Beispiel #23
0
class ItrMLP(Recommender):
    def __init__(self,
                 batch_size,
                 dim_embed,
                 max_user,
                 max_item,
                 pretrained_user_embeddings,
                 pretrained_item_embeddings,
                 user_dims,
                 item_dims,
                 test_batch_size=None,
                 l2_reg=None,
                 opt='SGD',
                 sess_config=None):

        self._dim_embed = dim_embed
        self._pretrained_user_embeddings = pretrained_user_embeddings
        self._pretrained_item_embeddings = pretrained_item_embeddings
        self._user_dims = user_dims
        self._item_dims = item_dims

        super(ItrMLP, self).__init__(batch_size=batch_size,
                                     test_batch_size=test_batch_size,
                                     max_user=max_user,
                                     max_item=max_item,
                                     l2_reg=l2_reg,
                                     opt=opt,
                                     sess_config=sess_config)

    def _initialize(self, init_dict):

        super(ItrMLP, self)._initialize(init_dict=init_dict)
        print(colored('[Pretrain user MLP into identity]', 'blue'))
        self._user_vec.pretrain_mlp_as_identity(self._sess)
        print(colored('[Pretrain item MLP into identity]', 'blue'))
        self._item_vec.pretrain_mlp_as_identity(self._sess)

    def update_embeddings(self):

        self._user_vec.forward_update_embeddings(self._sess)
        self._item_vec.forward_update_embeddings(self._sess)

    def _input_mappings(self, batch_data, train):

        if train:
            return {
                self._user_id_input: batch_data['user_id_input'],
                self._item_id_input: batch_data['item_id_input'],
                self._labels: batch_data['labels']
            }
        else:
            return {
                self._user_id_serving: batch_data['user_id_input'],
                self._item_id_serving: batch_data['item_id_input']
            }

    def _build_user_inputs(self, train=True):

        if train:
            self._user_id_input = self._input(dtype='int32',
                                              shape=[self._batch_size],
                                              name='user_id_input')
        else:
            self._user_id_serving = self._input(dtype='int32',
                                                shape=[self._test_batch_size],
                                                name='user_id_serving')

    def _build_item_inputs(self, train=True):

        if train:
            self._item_id_input = self._input(dtype='int32',
                                              shape=[self._batch_size],
                                              name='item_id_input')
        else:
            self._item_id_serving = self._input(dtype='int32',
                                                shape=[self._test_batch_size],
                                                name='item_id_serving')

    def _build_extra_inputs(self, train=True):

        if train:
            self._labels = self._input(dtype='float32',
                                       shape=[self._batch_size],
                                       name='labels')

    def _build_user_extractions(self, train=True):

        if train:
            self._user_vec = TemporalLatentFactor(
                l2_reg=self._l2_reg,
                init=self._pretrained_user_embeddings,
                ids=self._user_id_input,
                mlp_dims=self._user_dims,
                shape=[self._max_user, self._dim_embed],
                train=True,
                scope='user',
                reuse=False)
            self._loss_nodes += [self._user_vec]
        else:
            self._user_vec_serving = TemporalLatentFactor(
                l2_reg=self._l2_reg,
                init=self._pretrained_user_embeddings,
                ids=self._user_id_serving,
                mlp_dims=self._user_dims,
                shape=[self._max_user, self._dim_embed],
                train=False,
                scope='user',
                reuse=True)

    def _build_item_extractions(self, train=True):

        if train:
            self._item_vec = TemporalLatentFactor(
                l2_reg=self._l2_reg,
                init=self._pretrained_item_embeddings,
                ids=self._item_id_input,
                mlp_dims=self._item_dims,
                shape=[self._max_item, self._dim_embed],
                train=True,
                scope='item',
                reuse=False)
            self._item_bias = LatentFactor(l2_reg=self._l2_reg,
                                           init='zero',
                                           ids=self._item_id_input,
                                           shape=[self._max_item, 1],
                                           scope='item_bias',
                                           reuse=False)
            self._loss_nodes += [self._item_vec, self._item_bias]
        else:
            self._item_vec_serving = TemporalLatentFactor(
                l2_reg=self._l2_reg,
                init=self._pretrained_item_embeddings,
                ids=self._item_id_serving,
                mlp_dims=self._item_dims,
                shape=[self._max_item, self._dim_embed],
                train=False,
                scope='item',
                reuse=True)
            self._item_bias_serving = LatentFactor(l2_reg=self._l2_reg,
                                                   init='zero',
                                                   ids=self._item_id_serving,
                                                   shape=[self._max_item, 1],
                                                   scope='item_bias',
                                                   reuse=True)

    def _build_default_interactions(self, train=True):

        if train:
            self._interaction_train = PointwiseMSE(
                user=self._user_vec.get_outputs()[0],
                item=self._item_vec.get_outputs()[0],
                item_bias=self._item_bias.get_outputs()[0],
                labels=self._labels,
                a=1.0,
                b=1.0,
                sigmoid=True,
                train=True,
                scope='PointwiseMSE',
                reuse=False)
            self._loss_nodes.append(self._interaction_train)
        else:
            self._interaction_serve = PointwiseMSE(
                user=self._user_vec_serving.get_outputs()[0],
                item=self._item_vec_serving.get_outputs()[0],
                item_bias=self._item_bias_serving.get_outputs()[0],
                sigmoid=True,
                train=False,
                batch_serving=False,
                scope='PointwiseMSE',
                reuse=True)

    def _build_serving_graph(self):

        super(ItrMLP, self)._build_serving_graph()
        self._scores = self._interaction_serve.get_outputs()[0]
Beispiel #24
0
 def _build_extra_extractions(self, train=True):
     if train:
         self._add_module('labels_vec',
                          LatentFactor(
                              l2_reg=self._l2_reg,
                              init='normal',
                              ids=self._get_input('labels', train=train),
                              shape=[self._max_item, self._dim_embed],
                              scope='item',
                              reuse=tf.AUTO_REUSE),
                          train=True)
         self._add_module('source_system_tab_vec',
                          LatentFactor(
                              l2_reg=self._l2_reg,
                              init='normal',
                              ids=self._get_input('source_system_tab',
                                                  train=train),
                              shape=[self._max_item, self._dim_embed],
                              scope='item',
                              reuse=tf.AUTO_REUSE),
                          train=True)
         self._add_module('source_screen_name_vec',
                          LatentFactor(
                              l2_reg=self._l2_reg,
                              init='normal',
                              ids=self._get_input('source_screen_name',
                                                  train=train),
                              shape=[self._max_item, self._dim_embed],
                              scope='item',
                              reuse=tf.AUTO_REUSE),
                          train=True)
         self._add_module('source_type_vec',
                          LatentFactor(
                              l2_reg=self._l2_reg,
                              init='normal',
                              ids=self._get_input('source_type',
                                                  train=train),
                              shape=[self._max_item, self._dim_embed],
                              scope='item',
                              reuse=tf.AUTO_REUSE),
                          train=True)
     else:
         self._add_module('labels_vec',
                          LatentFactor(
                              l2_reg=self._l2_reg,
                              init='normal',
                              ids=self._get_input('labels', train=train),
                              shape=[self._max_item, self._dim_embed],
                              scope='item',
                              reuse=True),
                          train=False)
         self._add_module('source_system_tab_vec',
                          LatentFactor(
                              l2_reg=self._l2_reg,
                              init='normal',
                              ids=self._get_input('source_system_tab',
                                                  train=train),
                              shape=[self._max_item, self._dim_embed],
                              scope='item',
                              reuse=True),
                          train=False)
         self._add_module('source_screen_name_vec',
                          LatentFactor(
                              l2_reg=self._l2_reg,
                              init='normal',
                              ids=self._get_input('source_screen_name',
                                                  train=train),
                              shape=[self._max_item, self._dim_embed],
                              scope='item',
                              reuse=True),
                          train=False)
         self._add_module('source_type_vec',
                          LatentFactor(
                              l2_reg=self._l2_reg,
                              init='normal',
                              ids=self._get_input('source_type',
                                                  train=train),
                              shape=[self._max_item, self._dim_embed],
                              scope='item',
                              reuse=True),
                          train=False)
Beispiel #25
0
    def _build_item_extractions(self, train=True):

        if train:
            self._add_module(
                'p_item_lf',
                LatentFactor(
                    init='normal',
                    l2_reg=self._l2_reg,
                    ids=self._get_input('p_item_id'),
                    shape=[self._max_item, self._dim_embed - self._dim_ve],
                    scope='item',
                    reuse=False))
            self._add_module(
                'p_item_vf',
                MultiLayerFC(in_tensor=self._get_input('p_item_vfeature'),
                             dims=[self._dim_ve],
                             scope='item_MLP',
                             reuse=False))
            self._add_module(
                'p_item_bias',
                LatentFactor(l2_reg=self._l2_reg,
                             init='zero',
                             ids=self._get_input('p_item_id'),
                             shape=[self._max_item, 1],
                             scope='item_bias',
                             reuse=False))
            self._add_module(
                'n_item_lf',
                LatentFactor(
                    init='normal',
                    l2_reg=self._l2_reg,
                    ids=self._get_input('n_item_id'),
                    shape=[self._max_item, self._dim_embed - self._dim_ve],
                    scope='item',
                    reuse=True))
            self._add_module(
                'n_item_vf',
                MultiLayerFC(in_tensor=self._get_input('n_item_vfeature'),
                             dims=[self._dim_ve],
                             scope='item_MLP',
                             reuse=True))
            self._add_module(
                'n_item_bias',
                LatentFactor(l2_reg=self._l2_reg,
                             init='zero',
                             ids=self._get_input('n_item_id'),
                             shape=[self._max_item, 1],
                             scope='item_bias',
                             reuse=True))
        else:
            self._add_module('item_lf',
                             LatentFactor(init='normal',
                                          l2_reg=self._l2_reg,
                                          ids=self._get_input('item_id',
                                                              train=train),
                                          shape=[
                                              self._max_item,
                                              self._dim_embed - self._dim_ve
                                          ],
                                          scope='item',
                                          reuse=True),
                             train=False)
            self._add_module('item_vf',
                             MultiLayerFC(in_tensor=self._get_input(
                                 'item_vfeature', train=train),
                                          dims=[self._dim_ve],
                                          scope='item_MLP',
                                          reuse=True),
                             train=False)
            self._add_module('item_bias',
                             LatentFactor(l2_reg=self._l2_reg,
                                          init='zero',
                                          ids=self._get_input('item_id',
                                                              train=train),
                                          shape=[self._max_item, 1],
                                          scope='item_bias',
                                          reuse=True),
                             train=False)
Beispiel #26
0
    def _build_user_extractions(self, train=True):

        if train:
            self._add_module('song_id_vec',
                             LatentFactor(
                                 l2_reg=self._l2_reg,
                                 init='normal',
                                 ids=self._get_input('song_id', train=train),
                                 shape=[self._max_item, self._dim_embed],
                                 scope='user',
                                 reuse=tf.AUTO_REUSE),
                             train=True)
            self._add_module('artist_vec',
                             LatentFactor(
                                 l2_reg=self._l2_reg,
                                 init='normal',
                                 ids=self._get_input('artist', train=train),
                                 shape=[self._max_item, self._dim_embed],
                                 scope='user',
                                 reuse=tf.AUTO_REUSE),
                             train=True)
            self._add_module('genre_vec',
                             LatentFactor(
                                 l2_reg=self._l2_reg,
                                 init='normal',
                                 ids=self._get_input('genre', train=train),
                                 shape=[self._max_item, self._dim_embed],
                                 scope='user',
                                 reuse=tf.AUTO_REUSE),
                             train=True)
            self._add_module('language_vec',
                             LatentFactor(
                                 l2_reg=self._l2_reg,
                                 init='normal',
                                 ids=self._get_input('language', train=train),
                                 shape=[self._max_item, self._dim_embed],
                                 scope='user',
                                 reuse=tf.AUTO_REUSE),
                             train=True)
            self._add_module('lyricist_vec',
                             LatentFactor(
                                 l2_reg=self._l2_reg,
                                 init='normal',
                                 ids=self._get_input('lyricist', train=train),
                                 shape=[self._max_item, self._dim_embed],
                                 scope='user',
                                 reuse=tf.AUTO_REUSE),
                             train=True)
            self._add_module('composer_vec',
                             LatentFactor(
                                 l2_reg=self._l2_reg,
                                 init='normal',
                                 ids=self._get_input('composer', train=train),
                                 shape=[self._max_item, self._dim_embed],
                                 scope='user',
                                 reuse=tf.AUTO_REUSE),
                             train=True)
        else:
            self._add_module('song_id_vec',
                             LatentFactor(
                                 l2_reg=self._l2_reg,
                                 init='normal',
                                 ids=self._get_input('song_id', train=train),
                                 shape=[self._max_item, self._dim_embed],
                                 scope='user',
                                 reuse=True),
                             train=False)
            self._add_module('artist_vec',
                             LatentFactor(
                                 l2_reg=self._l2_reg,
                                 init='normal',
                                 ids=self._get_input('artist', train=train),
                                 shape=[self._max_item, self._dim_embed],
                                 scope='user',
                                 reuse=True),
                             train=False)
            self._add_module('genre_vec',
                             LatentFactor(
                                 l2_reg=self._l2_reg,
                                 init='normal',
                                 ids=self._get_input('genre', train=train),
                                 shape=[self._max_item, self._dim_embed],
                                 scope='item',
                                 reuse=True),
                             train=False)
            self._add_module('language_vec',
                             LatentFactor(
                                 l2_reg=self._l2_reg,
                                 init='normal',
                                 ids=self._get_input('language', train=train),
                                 shape=[self._max_item, self._dim_embed],
                                 scope='user',
                                 reuse=True),
                             train=False)
            self._add_module('lyricist_vec',
                             LatentFactor(
                                 l2_reg=self._l2_reg,
                                 init='normal',
                                 ids=self._get_input('lyricist', train=train),
                                 shape=[self._max_item, self._dim_embed],
                                 scope='user',
                                 reuse=True),
                             train=False)
            self._add_module('composer_vec',
                             LatentFactor(
                                 l2_reg=self._l2_reg,
                                 init='normal',
                                 ids=self._get_input('composer', train=train),
                                 shape=[self._max_item, self._dim_embed],
                                 scope='user',
                                 reuse=True),
                             train=False)
Beispiel #27
0
class PMF(Recommender):
    def __init__(self,
                 batch_size,
                 dim_embed,
                 max_user,
                 max_item,
                 test_batch_size=None,
                 l2_reg=None,
                 opt='SGD',
                 sess_config=None):

        self._dim_embed = dim_embed

        super(PMF, self).__init__(batch_size=batch_size,
                                  test_batch_size=test_batch_size,
                                  max_user=max_user,
                                  max_item=max_item,
                                  l2_reg=l2_reg,
                                  opt=opt,
                                  sess_config=sess_config)

    def _input_mappings(self, batch_data, train):

        if train:
            return {
                self._user_id_input: batch_data['user_id_input'],
                self._item_id_input: batch_data['item_id_input'],
                self._labels: batch_data['labels']
            }
        else:
            return {self._user_id_serving: batch_data['user_id_input']}

    def _build_user_inputs(self, train=True):

        if train:
            self._user_id_input = self._input(dtype='int32',
                                              shape=[self._batch_size],
                                              name='user_id_input')
        else:
            self._user_id_serving = self._input(dtype='int32',
                                                shape=[None],
                                                name='user_id_serving')

    def _build_item_inputs(self, train=True):

        if train:
            self._item_id_input = self._input(dtype='int32',
                                              shape=[self._batch_size],
                                              name='item_id_input')
        else:
            self._item_id_serving = None

    def _build_extra_inputs(self, train=True):

        if train:
            self._labels = self._input(dtype='float32',
                                       shape=[self._batch_size],
                                       name='labels')

    def _build_user_extractions(self, train=True):

        if train:
            self._user_vec = LatentFactor(
                l2_reg=self._l2_reg,
                init='normal',
                ids=self._user_id_input,
                shape=[self._max_user, self._dim_embed],
                scope='user',
                reuse=False)
            self._loss_nodes += [self._user_vec]
        else:
            self._user_vec_serving = LatentFactor(
                l2_reg=self._l2_reg,
                init='normal',
                ids=self._user_id_serving,
                shape=[self._max_user, self._dim_embed],
                scope='user',
                reuse=True)

    def _build_item_extractions(self, train=True):

        if train:
            self._item_vec = LatentFactor(
                l2_reg=self._l2_reg,
                init='normal',
                ids=self._item_id_input,
                shape=[self._max_item, self._dim_embed],
                scope='item',
                reuse=False)
            self._item_bias = LatentFactor(l2_reg=self._l2_reg,
                                           init='zero',
                                           ids=self._item_id_input,
                                           shape=[self._max_item, 1],
                                           scope='item_bias',
                                           reuse=False)
            self._loss_nodes += [self._item_vec, self._item_bias]
        else:
            self._item_vec_serving = LatentFactor(
                l2_reg=self._l2_reg,
                init='normal',
                ids=self._item_id_serving,
                shape=[self._max_item, self._dim_embed],
                scope='item',
                reuse=True)
            self._item_bias_serving = LatentFactor(l2_reg=self._l2_reg,
                                                   init='zero',
                                                   ids=self._item_id_serving,
                                                   shape=[self._max_item, 1],
                                                   scope='item_bias',
                                                   reuse=True)

    def _build_default_interactions(self, train=True):

        if train:
            self._interaction_train = PointwiseMSE(
                user=self._user_vec.get_outputs()[0],
                item=self._item_vec.get_outputs()[0],
                item_bias=self._item_bias.get_outputs()[0],
                labels=self._labels,
                a=1.0,
                b=1.0,
                train=True,
                scope='PointwiseMSE',
                reuse=False)
            self._loss_nodes.append(self._interaction_train)
        else:
            self._interaction_serve = PointwiseMSE(
                user=self._user_vec_serving.get_outputs()[0],
                item=self._item_vec_serving.get_outputs()[0],
                item_bias=self._item_bias_serving.get_outputs()[0],
                train=False,
                scope='PointwiseMSE',
                reuse=True)

    def _build_serving_graph(self):

        super(PMF, self)._build_serving_graph()
        self._scores = self._interaction_serve.get_outputs()[0]