Exemplo n.º 1
0
class UCML(Model):
    def __init__(self,
                 dim_user_embed,
                 dim_item_embed,
                 total_users,
                 total_items,
                 margin=0.5):

        super(UCML, self).__init__()
        self.user_latent_factor = LatentFactor(num_instances=total_users,
                                               dim=dim_user_embed,
                                               name='user_latent_factor')
        self.item_latent_factor = LatentFactor(num_instances=total_items,
                                               dim=dim_item_embed,
                                               name='item_latent_factor')
        self.item_bias = LatentFactor(num_instances=total_items,
                                      dim=1,
                                      name='item_bias')
        self.margin = margin

    def call(self, user_id, p_item_id, n_item_id):

        user_vec = self.user_latent_factor(user_id)
        p_item_vec = self.item_latent_factor(p_item_id)
        p_item_bias = self.item_bias(p_item_id)
        n_item_vec = self.item_latent_factor(n_item_id)
        n_item_bias = self.item_bias(n_item_id)

        l2_user_pos = tf.math.reduce_sum(tf.math.square(user_vec - p_item_vec),
                                         axis=-1,
                                         keepdims=True)
        l2_user_neg = tf.math.reduce_sum(tf.math.square(user_vec - n_item_vec),
                                         axis=-1,
                                         keepdims=True)
        pos_score = (-l2_user_pos) + p_item_bias
        neg_score = (-l2_user_neg) + n_item_bias
        diff = pos_score - neg_score

        loss = tf.reduce_sum(tf.maximum(self.margin - diff, 0))
        l2_loss = tf.nn.l2_loss(user_vec) + tf.nn.l2_loss(
            p_item_vec) + tf.nn.l2_loss(n_item_vec)

        return loss, l2_loss

    def censor_vec(self, user_id, p_item_id, n_item_id):

        return self.user_latent_factor.censor(user_id), \
                self.item_latent_factor.censor(p_item_id), \
                self.item_latent_factor.censor(n_item_id)

    def inference(self, user_id):

        user_vec = self.user_latent_factor(user_id)
        return -tf.math.reduce_sum(tf.math.square(
            tf.expand_dims(user_vec, axis=1) -
            self.item_latent_factor.variables[0]),
                                   axis=-1,
                                   keepdims=False) + tf.reshape(
                                       self.item_bias.variables[0], [-1])
Exemplo n.º 2
0
 def __init__(self, dim_user_embed, dim_item_embed, total_users, total_items):
     
     super(BPR, self).__init__()
     self.user_latent_factor = LatentFactor(num_instances=total_users, 
                                             dim=dim_user_embed, 
                                             name='user_latent_factor')
     self.item_latent_factor = LatentFactor(num_instances=total_items, 
                                             dim=dim_item_embed, 
                                             name='item_latent_factor')
     self.item_bias = LatentFactor(num_instances=total_items, 
                                    dim=1, 
                                    name='item_bias')
     self.pairwise_log_loss = PairwiseLogLoss()
Exemplo n.º 3
0
 def __init__(self, dim_user_embed, dim_item_embed, total_users, total_items, a=1.0, b=1.0):
     
     super(WRMF, self).__init__()
     self.user_latent_factor = LatentFactor(num_instances=total_users, 
                                             dim=dim_user_embed, 
                                             name='user_latent_factor')
     self.item_latent_factor = LatentFactor(num_instances=total_items, 
                                             dim=dim_item_embed, 
                                             name='item_latent_factor')
     self.item_bias = LatentFactor(num_instances=total_items, 
                                    dim=1, 
                                    name='item_bias')
     self.pointwise_mse_loss = PointwiseMSELoss(a=a, b=b)
Exemplo n.º 4
0
 def __init__(self, dim_user_embed, dim_item_embed, total_users, total_items):
     
     super(GMF, self).__init__()
     self.user_latent_factor = LatentFactor(num_instances=total_users, 
                                             dim=dim_user_embed, 
                                             name='user_latent_factor')
     self.item_latent_factor = LatentFactor(num_instances=total_items, 
                                             dim=dim_item_embed, 
                                             name='item_latent_factor')
     self.item_bias = LatentFactor(num_instances=total_items, 
                                    dim=1, 
                                    name='item_bias')
     self.mlp = MLP(units_list=[1], use_bias=False)
     self._bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)
Exemplo n.º 5
0
    def __init__(self,
                 dim_user_embed,
                 dim_item_embed,
                 total_users,
                 total_items,
                 margin=0.5):

        super(UCML, self).__init__()
        self.user_latent_factor = LatentFactor(num_instances=total_users,
                                               dim=dim_user_embed,
                                               name='user_latent_factor')
        self.item_latent_factor = LatentFactor(num_instances=total_items,
                                               dim=dim_item_embed,
                                               name='item_latent_factor')
        self.item_bias = LatentFactor(num_instances=total_items,
                                      dim=1,
                                      name='item_bias')
        self.margin = margin
Exemplo n.º 6
0
    def __init__(self,
                 m_spa,
                 ln_emb,
                 ln_bot,
                 ln_top,
                 arch_interaction_op='dot',
                 arch_interaction_itself=False,
                 sigmoid_bot=False,
                 sigmoid_top=True,
                 loss_func='mse',
                 loss_threshold=0.0):
        '''
        m_spa: the dimensionality of sparse feature embeddings
        ln_emb: the size of sparse feature embeddings (num_instances)
        ln_bot: the size of the bottom MLP
        ln_top: the size of the top MLP
        '''

        super(DLRM, self).__init__()

        self._loss_threshold = loss_threshold
        self._loss_func = loss_func
        self._latent_factors = [
            LatentFactor(num_instances=num, dim=m_spa) for num in ln_emb
        ]
        self._mlp_bot = MLP(
            units_list=ln_bot,
            out_activation='sigmoid' if sigmoid_bot else 'relu')
        self._mlp_top = MLP(
            units_list=ln_top,
            out_activation='sigmoid' if sigmoid_top else 'relu')

        self._dot_interaction = None
        if arch_interaction_op == 'dot':
            self._dot_interaction = SecondOrderFeatureInteraction(
                self_interaction=arch_interaction_itself)

        elif self._arch_interaction_op != 'cat':
            sys.exit("ERROR: arch_interaction_op=" +
                     self._arch_interaction_op + " is not supported")

        if loss_func == 'mse':
            self._loss = tf.keras.losses.MeanSquaredError()
        elif loss_func == 'bce':
            self._loss = tf.keras.losses.BinaryCrossentropy()
        else:
            sys.exit("ERROR: loss_func=" + loss_func + " is not supported")