def _optimize_store(self, node, node_embeddings): if not self.gradient_stores: return tf.no_op() losses = [] for gradient_store, node_embedding in zip(self.gradient_stores, node_embeddings): embedding_gradient = tf.nn.embedding_lookup(gradient_store, node) with tf.control_dependencies([embedding_gradient]): clear_gradient_op = \ utils_embedding.embedding_update(gradient_store, node, 0) with tf.control_dependencies([clear_gradient_op]): losses.append( tf.reduce_sum( tf.multiply(node_embedding, embedding_gradient))) return self.store_optimizer.minimize(tf.add_n(losses))
def _optimize_store(self, node, node_embeddings): if not self.gradient_stores: return tf.zeros([]), tf.no_op() losses = [] clear_ops = [] for gradient_store, node_embedding in zip(self.gradient_stores, node_embeddings): embedding_gradient = tf.nn.embedding_lookup(gradient_store, node) with tf.control_dependencies([embedding_gradient]): clear_ops.append( utils_embedding.embedding_update(gradient_store, node, 0)) losses.append(tf.reduce_sum(node_embedding * embedding_gradient)) store_loss = tf.add_n(losses) with tf.control_dependencies(clear_ops): return store_loss, self.store_optimizer.minimize(store_loss)
def _update_store(self, node, node_embeddings): update_ops = [] for store, node_embedding in zip(self.stores, node_embeddings): update_ops.append( utils_embedding.embedding_update(store, node, node_embedding)) return tf.group(*update_ops)