Пример #1
0
 def _define_embed_graph(self):
     with tf.name_scope('triple_placeholder'):
         self.pos_hs = tf.placeholder(tf.int32, shape=[None])
         self.pos_rs = tf.placeholder(tf.int32, shape=[None])
         self.pos_ts = tf.placeholder(tf.int32, shape=[None])
         self.neg_hs = tf.placeholder(tf.int32, shape=[None])
         self.neg_rs = tf.placeholder(tf.int32, shape=[None])
         self.neg_ts = tf.placeholder(tf.int32, shape=[None])
     with tf.name_scope('triple_lookup'):
         phs = tf.nn.embedding_lookup(self.ent_embeds, self.pos_hs)
         prs = tf.nn.embedding_lookup(self.rel_embeds, self.pos_rs)
         pts = tf.nn.embedding_lookup(self.ent_embeds, self.pos_ts)
         nhs = tf.nn.embedding_lookup(self.ent_embeds, self.neg_hs)
         nrs = tf.nn.embedding_lookup(self.rel_embeds, self.neg_rs)
         nts = tf.nn.embedding_lookup(self.ent_embeds, self.neg_ts)
     with tf.name_scope('triple_loss'):
         self.triple_loss = limited_loss(
             phs,
             prs,
             pts,
             nhs,
             nrs,
             nts,
             self.args.pos_margin,
             self.args.neg_margin,
             self.args.loss_norm,
             balance=self.args.neg_margin_balance)
         self.triple_optimizer = generate_optimizer(self.triple_loss,
                                                    self.args.learning_rate,
                                                    opt=self.args.optimizer)
Пример #2
0
 def _define_embed_graph(self):
     with tf.name_scope('triple_placeholder'):
         # create placeholders for all positive and negative samples?? -> head, relation, tail
         self.pos_hs = tf.placeholder(tf.int32, shape=[None])
         self.pos_rs = tf.placeholder(tf.int32, shape=[None])
         self.pos_ts = tf.placeholder(tf.int32, shape=[None])
         self.neg_hs = tf.placeholder(tf.int32, shape=[None])
         self.neg_rs = tf.placeholder(tf.int32, shape=[None])
         self.neg_ts = tf.placeholder(tf.int32, shape=[None])
     with tf.name_scope('triple_lookup'):
         # get embeddings from the initialized random variables,
         # should given a listen of id but we give just none here, don't quite understood it
         phs = tf.nn.embedding_lookup(self.ent_embeds, self.pos_hs)
         prs = tf.nn.embedding_lookup(self.rel_embeds, self.pos_rs)
         pts = tf.nn.embedding_lookup(self.ent_embeds, self.pos_ts)
         nhs = tf.nn.embedding_lookup(self.ent_embeds, self.neg_hs)
         nrs = tf.nn.embedding_lookup(self.rel_embeds, self.neg_rs)
         nts = tf.nn.embedding_lookup(self.ent_embeds, self.neg_ts)
     with tf.name_scope('triple_loss'):
         # Compute loss and use optimizer (Adagrad for BootEA) to apply gradients
         self.triple_loss = limited_loss(phs, prs, pts, nhs, nrs, nts,
                                         self.args.pos_margin, self.args.neg_margin,
                                         self.args.loss_norm, balance=self.args.neg_margin_balance)
         self.triple_optimizer = generate_optimizer(self.triple_loss, self.args.learning_rate, opt=self.args.optimizer)