Ejemplo n.º 1
0
 def call(self, inputs, training=None, mask=None):
     transformation_cache = FeatureTransformationCache(inputs)
     linear_tensors = []
     embedding_tensors = []
     for column in self.columns:
         sparse_tensors = column.get_sparse_tensors(transformation_cache,
                                                    None)
         linear_tensor = tf.nn.safe_embedding_lookup_sparse(
             embedding_weights=self.state_manager.get_variable(
                 column, "weights"),
             sparse_ids=sparse_tensors.id_tensor,
             sparse_weights=sparse_tensors.weight_tensor,
             combiner="sum")
         linear_tensors.append(linear_tensor)
         embedding_tensor = tf.nn.safe_embedding_lookup_sparse(
             embedding_weights=self.state_manager.get_variable(
                 column, "embeddings"),
             sparse_ids=sparse_tensors.id_tensor,
             sparse_weights=sparse_tensors.weight_tensor,
             combiner="mean")
         embedding_tensors.append(embedding_tensor)
     linear_logits_no_bias = tf.math.reduce_sum(
         tf.math.add_n(linear_tensors), axis=1, keepdims=True)
     linear_logits = tf.nn.bias_add(linear_logits_no_bias, self.bias)
     embedding_tensor = tf.stack(embedding_tensors, axis=1)
     embedding_sum_square = tf.math.square(
         tf.math.reduce_sum(embedding_tensor, axis=1))
     embedding_square_sum = tf.math.reduce_sum(
         tf.math.square(embedding_tensor), axis=1)
     embedding_logits = tf.math.reduce_sum(
         embedding_sum_square - embedding_square_sum, axis=1) * 0.5
     logits = linear_logits + embedding_logits
     predict = tf.keras.activations.sigmoid(logits)
     return predict
Ejemplo n.º 2
0
 def call(self, inputs, training=None, mask=None):
     transformation_cache = FeatureTransformationCache(inputs)
     user_sparse_tensors = self.user_column.get_sparse_tensors(transformation_cache, None)
     user_embedding = tf.nn.safe_embedding_lookup_sparse(
         embedding_weights=self.state_manager.get_variable(self.user_column, "embeddings"),
         sparse_ids=user_sparse_tensors.id_tensor,
         sparse_weights=user_sparse_tensors.weight_tensor)
     self.add_loss(self.regularizer_embedding(user_embedding))
     user_history_embeddings = []
     for user_history_column in self.user_history_columns:
         user_history_sparse_tensors = user_history_column.get_sparse_tensors(transformation_cache, None)
         user_history_embedding = tf.nn.safe_embedding_lookup_sparse(
             embedding_weights=self.state_manager.get_variable(user_history_column, "embeddings"),
             sparse_ids=user_history_sparse_tensors.id_tensor,
             sparse_weights=user_history_sparse_tensors.weight_tensor,
             combiner="sqrtn")
         user_history_embeddings.append(user_history_embedding)
         self.add_loss(self.regularizer_embedding(user_history_embedding))
     user_full_embedding = user_embedding + tf.math.add_n(user_history_embeddings)
     item_sparse_tensors = self.item_column.get_sparse_tensors(transformation_cache, None)
     item_embedding = tf.nn.safe_embedding_lookup_sparse(
         embedding_weights=self.state_manager.get_variable(self.item_column, "embeddings"),
         sparse_ids=item_sparse_tensors.id_tensor,
         sparse_weights=item_sparse_tensors.weight_tensor)
     self.add_loss(self.regularizer_embedding(item_embedding))
     score_no_bias = tf.math.reduce_sum(tf.math.multiply(user_full_embedding, item_embedding), axis=1)
     bias = self.average_score + self.user_bias + self.item_bias
     self.add_loss(self.regularizer_bias(self.user_bias))
     self.add_loss(self.regularizer_bias(self.item_bias))
     score = tf.nn.bias_add(score_no_bias, bias)
     return score
Ejemplo n.º 3
0
 def call(self, inputs, training=None, mask=None):
     transformation_cache = FeatureTransformationCache(inputs)
     output_tensors = []
     for column in self.columns:
         column_weight = self.state_manager.get_variable(column, "weights")
         self.add_loss(self.regularizer(column_weight))
         if isinstance(column, CategoricalColumn):
             sparse_tensors = column.get_sparse_tensors(
                 transformation_cache, None)
             output_tensor = tf.nn.safe_embedding_lookup_sparse(
                 embedding_weights=column_weight,
                 sparse_ids=sparse_tensors.id_tensor,
                 sparse_weights=sparse_tensors.weight_tensor,
                 combiner="sum")
             output_tensors.append(output_tensor)
         elif isinstance(column, DenseColumn):
             dense_tensor = column.get_dense_tensor(transformation_cache,
                                                    self.state_manager)
             output_tensor = tf.matmul(dense_tensor, column_weight)
             output_tensors.append(output_tensor)
     for column in self.cross_columns:
         sparse_tensors = column.get_sparse_tensors(transformation_cache,
                                                    None)
         column_weight = self.state_manager.get_variable(column, "weights")
         self.add_loss(self.regularizer(column_weight))
         output_tensor = tf.nn.safe_embedding_lookup_sparse(
             embedding_weights=column_weight,
             sparse_ids=sparse_tensors.id_tensor,
             sparse_weights=sparse_tensors.weight_tensor,
             combiner="sum")
         output_tensors.append(output_tensor)
     logits_no_bias = tf.math.add_n(output_tensors)
     logits = tf.nn.bias_add(logits_no_bias, self.bias)
     predict = tf.keras.activations.sigmoid(logits)
     return predict
Ejemplo n.º 4
0
 def call(self, inputs, training=None, mask=None):
     transformation_cache = FeatureTransformationCache(inputs)
     user_sparse_tensors = self.user_column.get_sparse_tensors(transformation_cache, None)
     user_embedding = tf.nn.safe_embedding_lookup_sparse(
         embedding_weights=self.state_manager.get_variable(self.user_column, "embeddings"),
         sparse_ids=user_sparse_tensors.id_tensor,
         sparse_weights=user_sparse_tensors.weight_tensor)
     self.add_loss(self.regularizer(user_embedding))
     item_sparse_tensors = self.item_column.get_sparse_tensors(transformation_cache, None)
     item_embedding = tf.nn.safe_embedding_lookup_sparse(
         embedding_weights=self.state_manager.get_variable(self.item_column, "embeddings"),
         sparse_ids=item_sparse_tensors.id_tensor,
         sparse_weights=item_sparse_tensors.weight_tensor)
     self.add_loss(self.regularizer(item_embedding))
     score_no_bias = tf.math.reduce_sum(tf.math.multiply(user_embedding, item_embedding), axis=1, keepdims=True)
     user_bias = tf.nn.safe_embedding_lookup_sparse(
         embedding_weights=self.user_bias,
         sparse_ids=user_sparse_tensors.id_tensor)
     item_bias = tf.nn.safe_embedding_lookup_sparse(
         embedding_weights=self.item_bias,
         sparse_ids=item_sparse_tensors.id_tensor)
     self.add_loss(self.regularizer(self.user_bias))
     self.add_loss(self.regularizer(self.item_bias))
     score = tf.nn.bias_add(score_no_bias + user_bias + item_bias, self.average_score)
     return score_no_bias + user_bias + item_bias
Ejemplo n.º 5
0
 def call(self, inputs, **kwargs):
     transformation_cache = FeatureTransformationCache(inputs)
     item_feature_tensors = []
     item_id_sparse_tensors = self.item_id_column.get_sparse_tensors(
         transformation_cache, None)
     item_id_tensor = tf.nn.safe_embedding_lookup_sparse(
         embedding_weights=self.state_manager.get_variable(
             self.item_id_column, "item_id_embedding"),
         sparse_ids=item_id_sparse_tensors.id_tensor,
         sparse_weights=item_id_sparse_tensors.weight_tensor)
     item_feature_tensors.append(item_id_tensor)
     for item_feature_column in self.item_feature_columns:
         item_feature_sparse_tensors = item_feature_column.get_sparse_tensors(
             transformation_cache, None)
         item_feature_tensor = tf.nn.safe_embedding_lookup_sparse(
             embedding_weights=self.state_manager.get_variable(
                 item_feature_column,
                 "%s_embedding" % item_feature_column.name),
             sparse_ids=item_feature_sparse_tensors.id_tensor,
             sparse_weights=item_feature_sparse_tensors.weight_tensor,
             combiner="mean")
         item_feature_tensors.append(item_feature_tensor)
     item_features_tensor = tf.stack(item_feature_tensors, axis=1)
     with tf.name_scope("WeightedItemFeature"):
         item_weights = tf.nn.safe_embedding_lookup_sparse(
             embedding_weights=self.feature_weights,
             sparse_ids=item_id_sparse_tensors.id_tensor)
         item_weights_exp = tf.math.exp(item_weights)
         item_weights = item_weights_exp / tf.reduce_sum(item_weights_exp)
     weighted_item_features_tensor = tf.matmul(item_features_tensor)
Ejemplo n.º 6
0
 def call(self, inputs, training=None, mask=None):
     transformation_cache = FeatureTransformationCache(inputs)
     embedding_tensors = []
     for column in self.feature_columns:
         embedding_weights = self.state_manager.get_variable(
             column, "embedding_weights")
         if isinstance(column, DenseColumn):
             dense_tensor = column.get_dense_tensor(transformation_cache,
                                                    self.state_manager)
             input_tensor = tf.matmul(dense_tensor, embedding_weights)
         elif isinstance(column, CategoricalColumn):
             sparse_tensors = column.get_sparse_tensors(
                 transformation_cache, self.state_manager)
             input_tensor = tf.nn.safe_embedding_lookup_sparse(
                 embedding_weights=embedding_weights,
                 sparse_ids=sparse_tensors.id_tensor,
                 sparse_weights=sparse_tensors.weight_tensor,
                 combiner="mean")
         embedding_tensors.append(input_tensor)
     product_tensor = self.product_layer(embedding_tensors)
     net = tf.add_n(embedding_tensors) + product_tensor
     net = tf.nn.bias_add(net, self.product_bias)
     net = tf.keras.activations.relu(net)
     for layer_index in range(len(self.dense_layers)):
         net = self.dense_layers[layer_index](net)
         if self.dropout_layers:
             net = self.dropout_layers[layer_index](net, training=True)
     logits = self.score_layer(net)
     return logits
Ejemplo n.º 7
0
 def call(self, inputs, training=None, mask=None):
     transformation_cache = FeatureTransformationCache(inputs)
     input_tensors = []
     for column in self.feature_columns:
         dense_tensor = column.get_dense_tensor(transformation_cache,
                                                self.state_manager)
         input_tensors.append(dense_tensor)
     tensor = self.stack_layer(input_tensors)
     tensor = self.residual_layer(tensor)
     logits = self.score_layer(tensor)
     return logits
Ejemplo n.º 8
0
 def call(self, inputs, training=None, mask=None):
     transformation_cache = FeatureTransformationCache(inputs)
     linear_tensors = []
     for column in self.columns:
         sparse_tensors = column.get_sparse_tensors(transformation_cache,
                                                    None)
         linear_tensor = tf.nn.safe_embedding_lookup_sparse(
             embedding_weights=self.state_manager.get_variable(
                 column, "weights"),
             sparse_ids=sparse_tensors.id_tensor,
             sparse_weights=sparse_tensors.weight_tensor,
             combiner="sum")
         linear_tensors.append(linear_tensor)
     cross_tensors = []
     for i in range(self.columns_count - 1):
         column_i = self.columns[i]
         column_i_embeddings = self.state_manager.get_variable(
             column_i, "embeddings")
         column_i_sparse_tensors = column_i.get_sparse_tensors(
             transformation_cache, None)
         for j in range(i + 1, self.columns_count):
             column_j = self.columns[j]
             column_j_embeddings = self.state_manager.get_variable(
                 column_j, "embeddings")
             column_j_sparse_tensors = column_j.get_sparse_tensors(
                 transformation_cache, None)
             column_ixj_embedding = tf.nn.safe_embedding_lookup_sparse(
                 embedding_weights=tf.squeeze(tf.slice(column_i_embeddings,
                                                       begin=[j - 1, 0, 0],
                                                       size=[1, -1, -1]),
                                              axis=0),
                 sparse_ids=column_i_sparse_tensors.id_tensor,
                 sparse_weights=column_i_sparse_tensors.weight_tensor)
             column_jxi_embedding = tf.nn.safe_embedding_lookup_sparse(
                 embedding_weights=tf.squeeze(tf.slice(column_j_embeddings,
                                                       begin=[i, 0, 0],
                                                       size=[1, -1, -1]),
                                              axis=0),
                 sparse_ids=column_j_sparse_tensors.id_tensor,
                 sparse_weights=column_j_sparse_tensors.weight_tensor)
             cross_tensor = tf.math.multiply(column_ixj_embedding,
                                             column_jxi_embedding)
             cross_tensor = tf.math.reduce_sum(cross_tensor,
                                               axis=1,
                                               keepdims=True)
             cross_tensors.append(cross_tensor)
     logits_no_bias = tf.math.add_n(linear_tensors) + tf.math.add_n(
         cross_tensors)
     logits = tf.nn.bias_add(logits_no_bias, self.bias)
     return logits
Ejemplo n.º 9
0
 def call(self, inputs, training=None, mask=None):
     transformation_cache = FeatureTransformationCache(inputs)
     softmax_output_tensors = []
     sigmoid_output_tensors = []
     for column in self.columns:
         softmax_weights = self.state_manager.get_variable(
             column, "softmax_weights")
         self.add_loss(self.regularizer(softmax_weights))
         sigmoid_weights = self.state_manager.get_variable(
             column, "sigmoid_weights")
         self.add_loss(self.regularizer(sigmoid_weights))
         if isinstance(column, DenseColumn):
             dense_tensor = column.get_dense_tensor(transformation_cache,
                                                    self.state_manager)
             softmax_output_tensor = tf.matmul(dense_tensor,
                                               softmax_weights)
             sigmoid_output_tensor = tf.matmul(dense_tensor,
                                               sigmoid_weights)
         elif isinstance(column, CategoricalColumn):
             sparse_tensors = column.get_sparse_tensors(
                 transformation_cache, None)
             softmax_output_tensor = tf.nn.safe_embedding_lookup_sparse(
                 embedding_weights=softmax_weights,
                 sparse_ids=sparse_tensors.id_tensor,
                 sparse_weights=sparse_tensors.weight_tensor,
                 combiner="sum")
             sigmoid_output_tensor = tf.nn.safe_embedding_lookup_sparse(
                 embedding_weights=sigmoid_weights,
                 sparse_ids=sparse_tensors.id_tensor,
                 sparse_weights=sparse_tensors.weight_tensor,
                 combiner="sum")
         else:
             raise
         softmax_output_tensors.append(softmax_output_tensor)
         sigmoid_output_tensors.append(sigmoid_output_tensor)
     softmax_tensor = tf.nn.bias_add(tf.math.add_n(softmax_output_tensors),
                                     self.softmax_bias)
     sigmoid_tensor = tf.nn.bias_add(tf.math.add_n(sigmoid_output_tensors),
                                     self.sigmoid_bias)
     softmax_tensor = tf.math.exp(softmax_tensor)
     softmax_tensor = softmax_tensor / tf.math.reduce_sum(
         softmax_tensor, axis=1, keepdims=True)
     sigmoid_tensor = 1 / (1 + tf.math.exp(sigmoid_tensor))
     logits = tf.math.reduce_sum(tf.math.multiply(softmax_tensor,
                                                  sigmoid_tensor),
                                 axis=1,
                                 keepdims=True)
     return logits
Ejemplo n.º 10
0
 def call(self, inputs, training=None, mask=None):
     transformation_cache = FeatureTransformationCache(inputs)
     feature_column_tensors = []
     for feature_column in self.feature_columns:
         feature_tensor = feature_column.get_dense_tensor(transformation_cache, self.state_manager)
         feature_column_tensors.append(feature_tensor)
     output_tensor = tf.concat(feature_column_tensors, axis=1)
     for layer_index, dense_layer in enumerate(self.dense_layers):
         output_tensor = dense_layer(output_tensor)
         if self.dropout:
             output_tensor = self.dropout_layers[layer_index](output_tensor, training)
         if self.batch_norm:
             output_tensor = self.batch_norm_layers[layer_index](output_tensor, training)
     logits = self.logits_layer(output_tensor)
     predict = tf.keras.activations.sigmoid(logits)
     return predict
Ejemplo n.º 11
0
 def call(self, inputs, training=None, mask=None):
     transformation_cache = FeatureTransformationCache(inputs)
     user_tensors = []
     for user_column in self.user_columns:
         user_tensor = user_column.get_dense_tensor(transformation_cache, self.state_manager)
         user_tensors.append(user_tensor)
     item_tensors = []
     for item_column in self.item_columns:
         item_tensor = item_column.get_dense_tensor(transformation_cache, self.state_manager)
         item_tensors.append(item_tensor)
     user_input_tensor = tf.concat(user_tensors, axis=1)
     item_input_tensor = tf.concat(item_tensors, axis=1)
     gmf_tensor = self.gmf_layer([user_input_tensor, item_input_tensor])
     mlp_tensor = tf.concat([user_input_tensor, item_input_tensor], axis=1)
     for layer_index in range(len(self.denser_layers)):
         mlp_tensor = self.denser_layers[layer_index](mlp_tensor)
         if self.dropout_layers:
             mlp_tensor = self.dropout_layers[layer_index](mlp_tensor, training=True)
     output_tensor = tf.concat([gmf_tensor, mlp_tensor], axis=1)
     logits = self.score_layer(output_tensor)
     return logits
Ejemplo n.º 12
0
 def call(self, inputs, training=None, mask=None):
     transformation_cache = FeatureTransformationCache(inputs)
     linear_tensors = []
     for column in self.linear_columns:
         column_weights = self.state_manager.get_variable(
             column, "linear_weights")
         if isinstance(column, CategoricalColumn):
             sparse_tensors = column.get_sparse_tensors(
                 transformation_cache, self.state_manager)
             linear_tensor = tf.nn.safe_embedding_lookup_sparse(
                 embedding_weights=column_weights,
                 sparse_ids=sparse_tensors.id_tensor,
                 sparse_weights=sparse_tensors.weight_tensor,
                 combiner="sum")
         elif isinstance(column, DenseColumn):
             dense_tensor = column.get_dense_tensor(transformation_cache,
                                                    self.state_manager)
             linear_tensor = tf.matmul(dense_tensor, column_weights)
         linear_tensors.append(linear_tensor)
     linear_net = tf.concat(linear_tensors, axis=1)
     dnn_tensors = []
     for column in self.dnn_columns:
         column_weights = self.state_manager.get_variable(
             column, "dnn_weights")
         dnn_tensor = column.get_dense_tensor(transformation_cache,
                                              self.state_manager)
         dnn_tensor = tf.matmul(dnn_tensor, column_weights)
         dnn_tensors.append(dnn_tensor)
     dnn_net = tf.concat(dnn_tensors, axis=1)
     for layer_index in range(len(self.dense_layers)):
         dnn_net = self.dense_layers[layer_index](dnn_net)
         if self.dropout_layers:
             dnn_net = self.dropout_layers[layer_index](dnn_net,
                                                        training=True)
     net = tf.concat([linear_net, dnn_net], axis=1)
     logits = self.logits_layer(net)
     return logits