def _build_shared_graph(self): with tf.variable_scope(self._scope, reuse=self._reuse): _encoder = MultiLayerFC(l2_reg=self._l2_reg, in_tensor=self._in_tensor, dims=self._dims[1:], scope='encoder', dropout_in=self._dropout, dropout_mid=self._dropout, reuse=self._reuse) _decoder = MultiLayerFC(l2_reg=self._l2_reg, in_tensor=_encoder.get_outputs()[0], dims=self._dims[::-1][1:], scope='decoder', relu_in=True, dropout_in=self._dropout, relu_mid=True, dropout_mid=self._dropout, relu_out=True, dropout_out=self._dropout, reuse=self._reuse) self._outputs += _encoder.get_outputs() self._loss = _encoder.get_loss() + _decoder.get_loss() self._loss += self._l2_reconst * tf.nn.l2_loss( _decoder.get_outputs()[0] - self._in_tensor)
def _build_serving_graph(self): with tf.variable_scope(self._scope, reuse=self._reuse): if self._batch_serving: user_rep = tf.reshape( tf.tile(self._user, [1, tf.shape(self._item)[0]]), (-1, tf.shape(self._user)[1])) item_rep = tf.tile(self._item, (tf.shape(self._user)[0], 1)) if self._extra is not None: extra_rep = tf.tile(self._extra, (tf.shape(self._user)[0], 1)) in_tensor = tf.concat([user_rep, item_rep, extra_rep], axis=1) else: in_tensor = tf.concat([user_rep, item_rep], axis=1) reg = MultiLayerFC(in_tensor=in_tensor, dims=self._dims, bias_in=True, bias_mid=True, bias_out=False, l2_reg=self._l2_reg, scope='mlp_reg', reuse=self._reuse) if self._item_bias is not None: item_bias_rep = tf.tile(self._item_bias, (tf.shape(self._user)[0], 1)) self._outputs.append( tf.reshape(reg.get_outputs()[0] + item_bias_rep, (tf.shape(self._user)[0], tf.shape(self._item)[0]))) else: self._outputs.append( tf.reshape(reg.get_outputs()[0], (tf.shape( self._user)[0], tf.shape(self._item)[0]))) else: if self._extra is not None: in_tensor = tf.concat( [self._user, self._item, self._extra], axis=1) else: in_tensor = tf.concat([self._user, self._item], axis=1) reg = MultiLayerFC(in_tensor=in_tensor, dims=self._dims, bias_in=True, bias_mid=True, bias_out=False, l2_reg=self._l2_reg, scope='mlp_reg', reuse=self._reuse) logits = reg.get_outputs()[0] if self._item_bias is not None: logits += self._item_bias self._outputs.append(tf.sigmoid(logits))
def _build_shared_graph(self): with tf.variable_scope(self._scope, reuse=self._reuse): self._embedding = tf.get_variable('embedding', dtype=tf.float32, shape=self._shape, trainable=False, initializer=self._initializer) self._flag = tf.get_variable('flag', dtype=tf.bool, shape=[self._shape[0]], trainable=False, initializer=tf.constant_initializer( value=False, dtype=tf.bool)) unique_ids, _ = tf.unique(self._ids) with tf.control_dependencies([ tf.scatter_update(self._flag, unique_ids, tf.ones_like(unique_ids, dtype=tf.bool)) ]): trans_embedding = MultiLayerFC( in_tensor=tf.nn.embedding_lookup(self._embedding, self._ids), dims=self._mlp_dims, batch_norm=True, scope=self._scope + '/MLP', train=self._train, reuse=self._reuse, l2_reg=self._l2_reg, relu_out=True) self._outputs += trans_embedding.get_outputs() self._loss += trans_embedding.get_loss() update_ids = tf.reshape(tf.where(self._flag), [-1]) update_embedding = MultiLayerFC(in_tensor=tf.nn.embedding_lookup( self._embedding, update_ids), dims=self._mlp_dims, batch_norm=True, scope=self._scope + '/MLP', train=False, reuse=True, l2_reg=self._l2_reg, relu_out=True) self._update_node = tf.scatter_update( self._embedding, update_ids, update_embedding.get_outputs()[0]) self._clear_flag = tf.scatter_update( self._flag, update_ids, tf.zeros_like(update_ids, dtype=tf.bool))
def _build_training_graph(self): with tf.variable_scope(self._scope, reuse=self._reuse): if self._extra is not None: in_tensor = tf.concat([self._user, self._item, self._extra], axis=1) else: in_tensor = tf.concat([self._user, self._item], axis=1) reg = MultiLayerFC(in_tensor=in_tensor, dims=self._dims, bias_in=True, bias_mid=True, bias_out=False, dropout_mid=self._dropout, l2_reg=self._l2_reg, scope='mlp_reg', reuse=self._reuse) logits = reg.get_outputs()[0] if self._item_bias is not None: logits += self._item_bias labels_float = tf.reshape(tf.to_float(self._labels), (-1, 1)) self._loss = tf.reduce_sum( tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_float, logits=logits)) self._outputs.append(logits)
def _build_serving_graph(self): with tf.variable_scope(self._scope, reuse=self._reuse): user_rep = tf.reshape(tf.tile(self._user, [1, tf.shape(self._item)[0]]), (-1, tf.shape(self._user)[1])) item_rep = tf.tile(self._item, (tf.shape(self._user)[0], 1)) item_bias_rep = tf.tile(self._item_bias, (tf.shape(self._user)[0], 1)) in_tensor = tf.concat([user_rep, item_rep], axis=1) reg = MultiLayerFC( in_tensor=in_tensor, dims=self._dims, bias_in=True, bias_mid=True, bias_out=False, l2_reg=self._l2_reg, scope='mlp_reg', reuse=self._reuse) self._outputs.append(tf.reshape(reg.get_outputs()[0] + item_bias_rep, (tf.shape(self._user)[0], tf.shape(self._item)[0])))
def _build_training_graph(self): with tf.variable_scope(self._scope, reuse=self._reuse): pointwise_product = tf.multiply(self._user, self._item) gdp = MultiLayerFC(in_tensor=pointwise_product, dims=[1], bias_in=False, bias_mid=False, bias_out=False, l2_reg=self._l2_reg, scope='gmf_reg', reuse=self._reuse) logits = gdp.get_outputs()[0] + self._item_bias labels_float = tf.reshape(tf.to_float(self._labels), (-1, 1)) self._loss = tf.reduce_sum( tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_float, logits=logits)) self._outputs.append(logits)
def _build_serving_graph(self): with tf.variable_scope(self._scope, reuse=self._reuse): user_rep = tf.reshape( tf.tile(self._user, [1, tf.shape(self._item)[0]]), (-1, tf.shape(self._user)[1])) item_rep = tf.tile(self._item, (tf.shape(self._user)[0], 1)) item_bias_rep = tf.tile(self._item_bias, (tf.shape(self._user)[0], 1)) pointwise_product = tf.multiply(user_rep, item_rep) gdp = MultiLayerFC(in_tensor=pointwise_product, dims=[1], bias_in=False, bias_mid=False, bias_out=False, l2_reg=self._l2_reg, scope='gmf_reg', reuse=self._reuse) self._outputs.append( tf.reshape(gdp.get_outputs()[0] + item_bias_rep, (tf.shape(self._user)[0], tf.shape(self._item)[0])))
def _build_training_graph(self): with tf.variable_scope(self._scope, reuse=self._reuse): if self._mlp_pretrain: self._pretrain_input = tf.placeholder(tf.float32, shape=(32, self._shape[1]), name='pretrain_input') trans_embedding = MultiLayerFC(in_tensor=self._pretrain_input, dims=self._mlp_dims, batch_norm=True, scope=self._scope + '/MLP', train=True, reuse=True, l2_reg=self._l2_reg, relu_out=True) identity_loss = tf.nn.l2_loss( trans_embedding.get_outputs()[0] - self._pretrain_input) self._pretrain_ops = tf.train.AdamOptimizer( learning_rate=0.001).minimize(identity_loss)