def _define_embed_graph(self): with tf.name_scope('triple_placeholder'): self.pos_hs = tf.placeholder(tf.int32, shape=[None]) self.pos_rs = tf.placeholder(tf.int32, shape=[None]) self.pos_ts = tf.placeholder(tf.int32, shape=[None]) self.neg_hs = tf.placeholder(tf.int32, shape=[None]) self.neg_rs = tf.placeholder(tf.int32, shape=[None]) self.neg_ts = tf.placeholder(tf.int32, shape=[None]) self.aligned_ents1 = tf.placeholder(tf.int32, shape=[None]) self.aligned_ents2 = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('triple_lookup'): phs = tf.nn.embedding_lookup(self.ent_embeds, self.pos_hs) prs = tf.nn.embedding_lookup(self.rel_embeds, self.pos_rs) pts = tf.nn.embedding_lookup(self.ent_embeds, self.pos_ts) nhs = tf.nn.embedding_lookup(self.ent_embeds, self.neg_hs) nrs = tf.nn.embedding_lookup(self.rel_embeds, self.neg_rs) nts = tf.nn.embedding_lookup(self.ent_embeds, self.neg_ts) ents1 = tf.nn.embedding_lookup(self.ent_embeds, self.aligned_ents1) ents2 = tf.nn.embedding_lookup(self.ent_embeds, self.aligned_ents2) with tf.name_scope('triple_loss'): self.triple_loss = get_loss_func(phs, prs, pts, nhs, nrs, nts, self.args) self.triple_optimizer = generate_optimizer(self.triple_loss, self.args.learning_rate, opt=self.args.optimizer) with tf.name_scope('align_loss'): self.align_loss = tf.reduce_sum( tf.reduce_sum(tf.pow(ents1 - ents2, 2), 1)) self.align_optimizer = generate_optimizer(self.align_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_embed_graph(self): with tf.name_scope('triple_placeholder'): self.pos_hs = tf.placeholder(tf.int32, shape=[None]) self.pos_rs = tf.placeholder(tf.int32, shape=[None]) self.pos_ts = tf.placeholder(tf.int32, shape=[None]) self.neg_hs = tf.placeholder(tf.int32, shape=[None]) self.neg_rs = tf.placeholder(tf.int32, shape=[None]) self.neg_ts = tf.placeholder(tf.int32, shape=[None]) self.pos_es = tf.placeholder(tf.int32, shape=[None]) self.pos_as = tf.placeholder(tf.int32, shape=[None]) self.pos_vs = tf.placeholder(tf.int32, shape=[None]) self.neg_es = tf.placeholder(tf.int32, shape=[None]) self.neg_as = tf.placeholder(tf.int32, shape=[None]) self.neg_vs = tf.placeholder(tf.int32, shape=[None]) self.joint_ents = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('triple_lookup'): phs = tf.nn.embedding_lookup(self.ent_embeds, self.pos_hs) prs = tf.nn.embedding_lookup(self.rel_embeds, self.pos_rs) pts = tf.nn.embedding_lookup(self.ent_embeds, self.pos_ts) nhs = tf.nn.embedding_lookup(self.ent_embeds, self.neg_hs) nrs = tf.nn.embedding_lookup(self.rel_embeds, self.neg_rs) nts = tf.nn.embedding_lookup(self.ent_embeds, self.neg_ts) pes = tf.nn.embedding_lookup(self.ent_embeds_ce, self.pos_es) pas = tf.nn.embedding_lookup(self.attr_embeds, self.pos_as) pvs = tf.nn.embedding_lookup(self.char_embeds, tf.nn.embedding_lookup(self.value_id_char_ids, self.pos_vs)) nes = tf.nn.embedding_lookup(self.ent_embeds_ce, self.neg_es) nas = tf.nn.embedding_lookup(self.attr_embeds, self.neg_as) nvs = tf.nn.embedding_lookup(self.char_embeds, tf.nn.embedding_lookup(self.value_id_char_ids, self.neg_vs)) pvs = n_gram_compositional_func(pvs, self.args.literal_len, self.args.batch_size, self.args.dim) nvs = n_gram_compositional_func(nvs, self.args.literal_len, self.args.batch_size * self.args.neg_triple_num, self.args.dim) ents_se = tf.nn.embedding_lookup(self.ent_embeds, self.joint_ents) ents_ce = tf.nn.embedding_lookup(self.ent_embeds_ce, self.joint_ents) with tf.name_scope('triple_loss'): self.triple_loss = get_loss_func(phs, prs, pts, nhs, nrs, nts, self.args) self.triple_optimizer = generate_optimizer(self.triple_loss, self.args.learning_rate, opt=self.args.optimizer) self.triple_loss_ce = get_loss_func(pes, pas, pvs, nes, nas, nvs, self.args) self.triple_optimizer_ce = generate_optimizer(self.triple_loss_ce, self.args.learning_rate, opt=self.args.optimizer) cos_sim = tf.reduce_sum(tf.multiply(ents_se, ents_ce), 1, keep_dims=True) self.joint_loss = tf.reduce_sum(1 - cos_sim) self.optimizer_joint = generate_optimizer(self.joint_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_relation_view_graph(self): with tf.name_scope('relation_triple_placeholder'): self.rel_pos_hs = tf.placeholder(tf.int32, shape=[None]) self.rel_pos_rs = tf.placeholder(tf.int32, shape=[None]) self.rel_pos_ts = tf.placeholder(tf.int32, shape=[None]) self.rel_neg_hs = tf.placeholder(tf.int32, shape=[None]) self.rel_neg_rs = tf.placeholder(tf.int32, shape=[None]) self.rel_neg_ts = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('relation_triple_lookup'): rel_phs = tf.nn.embedding_lookup(self.rv_ent_embeds, self.rel_pos_hs) rel_prs = tf.nn.embedding_lookup(self.rel_embeds, self.rel_pos_rs) rel_pts = tf.nn.embedding_lookup(self.rv_ent_embeds, self.rel_pos_ts) rel_nhs = tf.nn.embedding_lookup(self.rv_ent_embeds, self.rel_neg_hs) rel_nrs = tf.nn.embedding_lookup(self.rel_embeds, self.rel_neg_rs) rel_nts = tf.nn.embedding_lookup(self.rv_ent_embeds, self.rel_neg_ts) with tf.name_scope('relation_triple_loss'): self.relation_loss = relation_logistic_loss( rel_phs, rel_prs, rel_pts, rel_nhs, rel_nrs, rel_nts) self.relation_optimizer = generate_optimizer( self.relation_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_embed_graph(self): with tf.name_scope('triple_placeholder'): self.pos_hs = tf.placeholder(tf.int32, shape=[None]) self.pos_rs = tf.placeholder(tf.int32, shape=[None]) self.pos_ts = tf.placeholder(tf.int32, shape=[None]) self.neg_hs = tf.placeholder(tf.int32, shape=[None]) self.neg_rs = tf.placeholder(tf.int32, shape=[None]) self.neg_ts = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('triple_lookup'): phs = tf.nn.embedding_lookup(self.ent_embeds, self.pos_hs) prs = tf.nn.embedding_lookup(self.rel_embeds, self.pos_rs) pts = tf.nn.embedding_lookup(self.ent_embeds, self.pos_ts) nhs = tf.nn.embedding_lookup(self.ent_embeds, self.neg_hs) nrs = tf.nn.embedding_lookup(self.rel_embeds, self.neg_rs) nts = tf.nn.embedding_lookup(self.ent_embeds, self.neg_ts) pos_norm_vec = tf.nn.embedding_lookup(self.normal_vector, self.pos_rs) neg_norm_vec = tf.nn.embedding_lookup(self.normal_vector, self.neg_rs) phs = self._calc(phs, pos_norm_vec) pts = self._calc(pts, pos_norm_vec) nhs = self._calc(nhs, neg_norm_vec) nts = self._calc(nts, neg_norm_vec) with tf.name_scope('triple_loss'): self.triple_loss = margin_loss(phs, prs, pts, nhs, nrs, nts, self.args.margin, self.args.loss_norm) self.triple_optimizer = generate_optimizer(self.triple_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_embed_graph(self): with tf.name_scope('triple_placeholder'): self.pos_hs = tf.placeholder(tf.int32, shape=[None]) self.pos_rs = tf.placeholder(tf.int32, shape=[None]) self.pos_ts = tf.placeholder(tf.int32, shape=[None]) self.neg_hs = tf.placeholder(tf.int32, shape=[None]) self.neg_rs = tf.placeholder(tf.int32, shape=[None]) self.neg_ts = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('triple_lookup'): phe = tf.nn.embedding_lookup(self.ent_embeds, self.pos_hs) pte = tf.nn.embedding_lookup(self.ent_embeds, self.pos_ts) pre = tf.nn.embedding_lookup(self.rel_embeds, self.pos_rs) pht = tf.nn.embedding_lookup(self.ent_transfer, self.pos_hs) ptt = tf.nn.embedding_lookup(self.ent_transfer, self.pos_ts) prt = tf.nn.embedding_lookup(self.rel_transfer, self.pos_rs) nhe = tf.nn.embedding_lookup(self.ent_embeds, self.neg_hs) nte = tf.nn.embedding_lookup(self.ent_embeds, self.neg_ts) nre = tf.nn.embedding_lookup(self.rel_embeds, self.neg_rs) nht = tf.nn.embedding_lookup(self.ent_transfer, self.neg_hs) ntt = tf.nn.embedding_lookup(self.ent_transfer, self.neg_ts) nrt = tf.nn.embedding_lookup(self.rel_transfer, self.neg_rs) with tf.name_scope('projection'): phe = self._calc(phe, pht, prt) pte = self._calc(pte, ptt, prt) nhe = self._calc(nhe, nht, nrt) nte = self._calc(nte, ntt, nrt) with tf.name_scope('triple_loss'): self.triple_loss = get_loss_func(phe, pre, pte, nhe, nre, nte, self.args) self.triple_optimizer = generate_optimizer(self.triple_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_embed_graph(self): with tf.name_scope('triple_placeholder'): self.pos_hs = tf.placeholder(tf.int32, shape=[None]) self.pos_rs = tf.placeholder(tf.int32, shape=[None]) self.pos_ts = tf.placeholder(tf.int32, shape=[None]) self.neg_hs = tf.placeholder(tf.int32, shape=[None]) self.neg_rs = tf.placeholder(tf.int32, shape=[None]) self.neg_ts = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('triple_lookup'): phs = tf.nn.embedding_lookup(self.ent_embeds, self.pos_hs) prs = tf.nn.embedding_lookup(self.rel_embeds, self.pos_rs) pts = tf.nn.embedding_lookup(self.ent_embeds, self.pos_ts) nhs = tf.nn.embedding_lookup(self.ent_embeds, self.neg_hs) nrs = tf.nn.embedding_lookup(self.rel_embeds, self.neg_rs) nts = tf.nn.embedding_lookup(self.ent_embeds, self.neg_ts) with tf.name_scope('triple_loss'): self.triple_loss = limited_loss( phs, prs, pts, nhs, nrs, nts, self.args.pos_margin, self.args.neg_margin, self.args.loss_norm, balance=self.args.neg_margin_balance) self.triple_optimizer = generate_optimizer(self.triple_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_embed_graph(self): with tf.name_scope('triple_placeholder'): self.pos_hs = tf.placeholder(tf.int32, shape=[None]) self.pos_rs = tf.placeholder(tf.int32, shape=[None]) self.pos_ts = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('triple_lookup'): phs = tf.nn.embedding_lookup(self.ent_embeds, self.pos_hs) prs = tf.nn.embedding_lookup(self.rel_embeds, self.pos_rs) with tf.variable_scope('input_bn', reuse=tf.AUTO_REUSE): bn_phs = tf.contrib.layers.batch_norm(phs, scope='bn') bn_prs = tf.contrib.layers.batch_norm(prs, reuse=True, scope='bn') with tf.variable_scope('mlp', reuse=tf.AUTO_REUSE): out_prs = bn_phs * tf.get_variable('mlp_w', [self.args.dim]) + \ bn_prs * tf.get_variable('mlp_w', [self.args.dim]) + \ tf.get_variable('mlp_bias', [self.args.dim]) with tf.variable_scope('output_bn', reuse=tf.AUTO_REUSE): bn_out_prs = tf.contrib.layers.batch_norm(out_prs, scope='bn') with tf.name_scope('triple_loss'): triple_loss = tf.nn.nce_loss( weights=self.entity_w, biases=self.entity_b, labels=tf.reshape(self.pos_ts, [-1, 1]), inputs=bn_out_prs, num_sampled=self.args.dnn_neg_nums, num_classes=self.kgs.entities_num, partition_strategy='div') self.triple_loss = tf.reduce_sum(triple_loss) self.triple_optimizer = generate_optimizer(self.triple_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_embed_graph(self): self.pos_hs = tf.placeholder(tf.int32, shape=[None]) self.pos_rs = tf.placeholder(tf.int32, shape=[None]) self.pos_ts = tf.placeholder(tf.int32, shape=[None]) self.neg_hs = tf.placeholder(tf.int32, shape=[None]) self.neg_rs = tf.placeholder(tf.int32, shape=[None]) self.neg_ts = tf.placeholder(tf.int32, shape=[None]) self.pos_rx = tf.placeholder(tf.int32, shape=[None]) self.pos_ry = tf.placeholder(tf.int32, shape=[None]) self.pos_r = tf.placeholder(tf.int32, shape=[None]) self.neg_rx = tf.placeholder(tf.int32, shape=[None]) self.neg_ry = tf.placeholder(tf.int32, shape=[None]) self.neg_r = tf.placeholder(tf.int32, shape=[None]) self.path_weight = tf.placeholder(tf.float32, shape=[None]) phs = tf.nn.embedding_lookup(self.ent_embeds, self.pos_hs) prs = tf.nn.embedding_lookup(self.rel_embeds, self.pos_rs) pts = tf.nn.embedding_lookup(self.ent_embeds, self.pos_ts) nhs = tf.nn.embedding_lookup(self.ent_embeds, self.neg_hs) nrs = tf.nn.embedding_lookup(self.rel_embeds, self.neg_rs) nts = tf.nn.embedding_lookup(self.ent_embeds, self.neg_ts) prx = tf.nn.embedding_lookup(self.rel_embeds, self.pos_rx) pry = tf.nn.embedding_lookup(self.rel_embeds, self.pos_ry) pr = tf.nn.embedding_lookup(self.rel_embeds, self.pos_r) nrx = tf.nn.embedding_lookup(self.rel_embeds, self.neg_rx) nry = tf.nn.embedding_lookup(self.rel_embeds, self.neg_ry) nr = tf.nn.embedding_lookup(self.rel_embeds, self.neg_r) self.train_loss = self._generate_loss(phs, prs, pts, nhs, nrs, nts, prx, pry, pr, nrx, nry, nr, self.path_weight) self.optimizer = generate_optimizer(self.train_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_only_attribute_graph(self): with tf.name_scope('attribute_triple_placeholder'): self.attr_pos_hs = tf.placeholder(tf.int32, shape=[None]) self.attr_pos_as = tf.placeholder(tf.int32, shape=[None]) self.attr_pos_vs = tf.placeholder(tf.float32, shape=[None]) with tf.name_scope('attribute_triple_lookup'): attr_phs = tf.nn.embedding_lookup(self.av_ent_embeds, self.attr_pos_hs) attr_pas = tf.nn.embedding_lookup(self.attr_embeds, self.attr_pos_as) attr_pc = tf.nn.embedding_lookup(self.av_c, self.attr_pos_as) attr_pdelta = tf.nn.embedding_lookup(self.av_delta, self.attr_pos_as) with tf.variable_scope('attribute_cnn'): a_pos_vs = tf.reshape(self.attr_pos_vs, [-1, 1]) dist = -tf.square(tf.subtract(tf.tile(a_pos_vs, [1, self.args.rbf_dim]), attr_pc)) delta2 = tf.square(attr_pdelta) RBF_out = tf.exp(tf.divide(dist, delta2)) attr_pvs = tf.matmul(RBF_out, self.av_W) + self.av_b pos_score = attr_conv(attr_phs, attr_pas, attr_pvs, self.args.dim) pos_score = tf.log(1 + tf.exp(-pos_score)) pos_loss = tf.reduce_sum(pos_score) self.attribute_loss = pos_loss tf.summary.scalar('attr.loss', self.attribute_loss) self.attribute_optimizer = generate_optimizer(self.attribute_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_embed_graph(self): with tf.name_scope('triple_placeholder'): self.pos_hs = tf.placeholder(tf.int32, shape=[None]) self.pos_rs = tf.placeholder(tf.int32, shape=[None]) self.pos_ts = tf.placeholder(tf.int32, shape=[None]) self.neg_hs = tf.placeholder(tf.int32, shape=[None]) self.neg_rs = tf.placeholder(tf.int32, shape=[None]) self.neg_ts = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('triple_lookup'): phs = tf.nn.embedding_lookup(self.ent_embeds, self.pos_hs) prs = tf.nn.embedding_lookup(self.rel_embeds, self.pos_rs) pts = tf.nn.embedding_lookup(self.ent_embeds, self.pos_ts) nhs = tf.nn.embedding_lookup(self.ent_embeds, self.neg_hs) nrs = tf.nn.embedding_lookup(self.rel_embeds, self.neg_rs) nts = tf.nn.embedding_lookup(self.ent_embeds, self.neg_ts) with tf.name_scope('triple_loss'): with tf.name_scope('jape_loss_distance'): pos_distance = phs + prs - pts neg_distance = nhs + nrs - nts with tf.name_scope('jape_loss_score'): pos_score = tf.reduce_sum(tf.square(pos_distance), axis=1) neg_score = tf.reduce_sum(tf.square(neg_distance), axis=1) pos_loss = tf.reduce_sum(pos_score) neg_loss = tf.reduce_sum(neg_score) self.triple_loss = pos_loss - self.args.neg_alpha * neg_loss self.triple_optimizer = generate_optimizer(self.triple_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_embed_graph(self): with tf.name_scope('triple_placeholder'): self.pos_hs = tf.placeholder(tf.int32, shape=[None]) self.pos_rs = tf.placeholder(tf.int32, shape=[None]) self.pos_ts = tf.placeholder(tf.int32, shape=[None]) self.neg_hs = tf.placeholder(tf.int32, shape=[None]) self.neg_rs = tf.placeholder(tf.int32, shape=[None]) self.neg_ts = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('triple_lookup'): phs = tf.nn.embedding_lookup(self.ent_embeds, self.pos_hs) prs = tf.nn.embedding_lookup(self.rel_embeds, self.pos_rs) pts = tf.nn.embedding_lookup(self.ent_embeds, self.pos_ts) nhs = tf.nn.embedding_lookup(self.ent_embeds, self.neg_hs) nrs = tf.nn.embedding_lookup(self.rel_embeds, self.neg_rs) nts = tf.nn.embedding_lookup(self.ent_embeds, self.neg_ts) with tf.name_scope('triple_loss'): pos_score = self._calc(phs, pts, prs) neg_score = self._calc(nhs, nts, nrs) if self.args.neg_triple_num > 1: neg_score = tf.reshape(neg_score, [-1, self.args.neg_triple_num]) neg_score = tf.reduce_mean(neg_score, 1, keep_dims=True) self.triple_loss = tf.reduce_sum(tf.nn.relu( tf.constant(self.args.margin) + pos_score - neg_score), name='margin_loss') self.triple_optimizer = generate_optimizer(self.triple_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_embed_graph(self): with tf.name_scope('triple_placeholder'): self.pos_hs = tf.placeholder(tf.int32, shape=[None]) self.pos_rs = tf.placeholder(tf.int32, shape=[None]) self.pos_ts = tf.placeholder(tf.int32, shape=[None]) self.neg_hs = tf.placeholder(tf.int32, shape=[None]) self.neg_rs = tf.placeholder(tf.int32, shape=[None]) self.neg_ts = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('triple_lookup'): phs1 = tf.nn.embedding_lookup(self.head_ent_embeds, self.pos_hs) prs1 = tf.nn.embedding_lookup(self.rel_embeds1, self.pos_rs) pts1 = tf.nn.embedding_lookup(self.tail_ent_embeds, self.pos_ts) phs2 = tf.nn.embedding_lookup(self.head_ent_embeds, self.pos_ts) prs2 = tf.nn.embedding_lookup(self.rel_embeds2, self.pos_rs) pts2 = tf.nn.embedding_lookup(self.tail_ent_embeds, self.pos_hs) nhs1 = tf.nn.embedding_lookup(self.head_ent_embeds, self.neg_hs) nrs1 = tf.nn.embedding_lookup(self.rel_embeds1, self.neg_rs) nts1 = tf.nn.embedding_lookup(self.tail_ent_embeds, self.neg_ts) nhs2 = tf.nn.embedding_lookup(self.head_ent_embeds, self.neg_ts) nrs2 = tf.nn.embedding_lookup(self.rel_embeds2, self.neg_rs) nts2 = tf.nn.embedding_lookup(self.tail_ent_embeds, self.neg_hs) with tf.name_scope('triple_loss'): self.triple_loss = self._generate_loss(phs1, prs1, pts1, phs2, prs2, pts2, pos=True) + \ self._generate_loss(nhs1, nrs1, nts1, nhs2, nrs2, nts2, pos=False) self.triple_optimizer = generate_optimizer(self.triple_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_space_mapping_graph(self): with tf.name_scope('final_entities_placeholder'): self.entities = tf.placeholder(tf.int32, shape=[ self.args.entity_batch_size, ]) with tf.name_scope('multi_view_entities_lookup'): final_ents = tf.nn.embedding_lookup(self.ent_embeds, self.entities) nv_ents = tf.nn.embedding_lookup(self.name_embeds, self.entities) rv_ents = tf.nn.embedding_lookup(self.rv_ent_embeds, self.entities) av_ents = tf.nn.embedding_lookup(self.av_ent_embeds, self.entities) with tf.name_scope('mapping_loss'): nv_space_mapping_loss = space_mapping_loss( nv_ents, final_ents, self.nv_mapping, self.eye_mat, self.args.orthogonal_weight) rv_space_mapping_loss = space_mapping_loss( rv_ents, final_ents, self.rv_mapping, self.eye_mat, self.args.orthogonal_weight) av_space_mapping_loss = space_mapping_loss( av_ents, final_ents, self.av_mapping, self.eye_mat, self.args.orthogonal_weight) self.shared_comb_loss = nv_space_mapping_loss + rv_space_mapping_loss + av_space_mapping_loss opt_vars = [ v for v in tf.trainable_variables() if v.name.startswith("shared") ] self.shared_comb_optimizer = generate_optimizer( self.shared_comb_loss, self.args.learning_rate, var_list=opt_vars, opt=self.args.optimizer)
def _loss_optimizer(self): encoder_output = self.encoder(self.batch) if self.args.encoder_normalize: encoder_output = tf.nn.l2_normalize(encoder_output) decoder_output = self.decoder(encoder_output) self.loss = tf.reduce_mean(tf.pow(decoder_output - self.batch, 2)) self.optimizer = generate_optimizer(self.loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_cross_kg_attribute_reference_graph(self): with tf.name_scope('cross_kg_attribute_reference_placeholder'): self.ckga_attr_pos_hs = tf.placeholder(tf.int32, shape=[None]) self.ckga_attr_pos_as = tf.placeholder(tf.int32, shape=[None]) self.ckga_attr_pos_vs = tf.placeholder(tf.int32, shape=[None]) self.ckga_attr_pos_ws = tf.placeholder(tf.float32, shape=[None]) with tf.name_scope('cross_kg_attribute_reference_lookup'): ckga_attr_phs = tf.nn.embedding_lookup(self.av_ent_embeds, self.ckga_attr_pos_hs) ckga_attr_pas = tf.nn.embedding_lookup(self.attr_embeds, self.ckga_attr_pos_as) ckga_attr_pvs = tf.nn.embedding_lookup(self.literal_embeds, self.ckga_attr_pos_vs) with tf.name_scope('cross_kg_attribute_reference_loss'): pos_score = conv(ckga_attr_phs, ckga_attr_pas, ckga_attr_pvs, self.args.dim) pos_score = tf.log(1 + tf.exp(-pos_score)) pos_score = tf.multiply(pos_score, self.ckga_attr_pos_ws) pos_loss = tf.reduce_sum(pos_score) self.ckga_attribute_loss = pos_loss # self.ckga_attribute_loss = tf.reduce_sum(tf.log(1 + tf.exp(-pos_score))) self.ckga_attribute_optimizer = generate_optimizer( self.ckga_attribute_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_embed_graph(self): with tf.name_scope('triple_placeholder'): self.pos_hs = tf.placeholder(tf.int32, shape=[None]) self.pos_rs = tf.placeholder(tf.int32, shape=[None]) self.pos_ts = tf.placeholder(tf.int32, shape=[None]) self.neg_hs = tf.placeholder(tf.int32, shape=[None]) self.neg_rs = tf.placeholder(tf.int32, shape=[None]) self.neg_ts = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('triple_lookup'): prh, prr, prt, pih, pir, pit = self.lookup_all( self.pos_hs, self.pos_rs, self.pos_ts) nrh, nrr, nrt, nih, nir, nit = self.lookup_all( self.neg_hs, self.neg_rs, self.neg_ts) with tf.name_scope('triple_loss'): pos_scores = self._generate_scores(prh, prr, prt, pih, pir, pit, pos=True) neg_scores = self._generate_scores(nrh, nrr, nrt, nih, nir, nit, pos=False) self.triple_loss = self._generate_loss(pos_scores, neg_scores) self.triple_optimizer = generate_optimizer(self.triple_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_embed_graph(self): with tf.name_scope('triple_placeholder'): self.pos_hs = tf.placeholder(tf.int32, shape=[None]) self.pos_rs = tf.placeholder(tf.int32, shape=[None]) self.pos_ts = tf.placeholder(tf.int32, shape=[None]) self.neg_hs = tf.placeholder(tf.int32, shape=[None]) self.neg_rs = tf.placeholder(tf.int32, shape=[None]) self.neg_ts = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('triple_lookup'): phs = tf.nn.embedding_lookup(self.ent_embeds, self.pos_hs) prs = tf.nn.embedding_lookup(self.rel_embeds, self.pos_rs) pts = tf.nn.embedding_lookup(self.ent_embeds, self.pos_ts) nhs = tf.nn.embedding_lookup(self.ent_embeds, self.neg_hs) nrs = tf.nn.embedding_lookup(self.rel_embeds, self.neg_rs) nts = tf.nn.embedding_lookup(self.ent_embeds, self.neg_ts) with tf.name_scope('triple_loss'): self.triple_loss = get_loss_func(phs, prs, pts, nhs, nrs, nts, self.args) self.triple_optimizer = generate_optimizer(self.triple_loss, self.args.learning_rate, opt=self.args.optimizer) with tf.name_scope('seed_links_placeholder'): self.labeled_entities1 = tf.placeholder(tf.int32, shape=[None]) self.labeled_entities2 = tf.placeholder(tf.int32, shape=[None]) self.unlabeled_entities1 = tf.placeholder(tf.int32, shape=[None]) self.unlabeled_entities2 = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('seed_links_lookup'): labeled_embeds1 = tf.nn.embedding_lookup(self.ent_embeds, self.labeled_entities1) labeled_embeds2 = tf.nn.embedding_lookup(self.ent_embeds, self.labeled_entities2) unlabeled_embeds1 = tf.nn.embedding_lookup(self.ent_embeds, self.unlabeled_entities1) unlabeled_embeds2 = tf.nn.embedding_lookup(self.ent_embeds, self.unlabeled_entities2) with tf.name_scope('sup_mapping_loss'): mapped_12 = tf.nn.l2_normalize(tf.matmul(labeled_embeds1, self.mapping_mat_1)) mapped_21 = tf.nn.l2_normalize(tf.matmul(labeled_embeds2, self.mapping_mat_2)) map_loss_12 = tf.reduce_sum(tf.reduce_sum(tf.pow(labeled_embeds2 - mapped_12, 2), 1)) map_loss_21 = tf.reduce_sum(tf.reduce_sum(tf.pow(labeled_embeds1 - mapped_21, 2), 1)) with tf.name_scope('semi_sup_mapping_loss'): semi_mapped_121 = tf.nn.l2_normalize(tf.matmul(tf.matmul(unlabeled_embeds1, self.mapping_mat_1), self.mapping_mat_2)) semi_mapped_212 = tf.nn.l2_normalize(tf.matmul(tf.matmul(unlabeled_embeds2, self.mapping_mat_2), self.mapping_mat_1)) map_loss_11 = tf.reduce_sum(tf.reduce_sum(tf.pow(unlabeled_embeds1 - semi_mapped_121, 2), 1)) map_loss_22 = tf.reduce_sum(tf.reduce_sum(tf.pow(unlabeled_embeds2 - semi_mapped_212, 2), 1)) self.mapping_loss = self.args.alpha_1 * (map_loss_12 + map_loss_21) + \ self.args.alpha_2 * (map_loss_11 + map_loss_22) self.mapping_optimizer = generate_optimizer(self.mapping_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_alignment_graph(self): self.new_h = tf.placeholder(tf.int32, shape=[None]) self.new_r = tf.placeholder(tf.int32, shape=[None]) self.new_t = tf.placeholder(tf.int32, shape=[None]) phs = tf.nn.embedding_lookup(self.ent_embeds, self.new_h) prs = tf.nn.embedding_lookup(self.rel_embeds, self.new_r) pts = tf.nn.embedding_lookup(self.ent_embeds, self.new_t) self.alignment_loss = - tf.reduce_sum(tf.log(tf.sigmoid(-tf.reduce_sum(tf.pow(phs + prs - pts, 2), 1)))) self.alignment_optimizer = generate_optimizer(self.alignment_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_mapping_graph_new(self): with tf.name_scope('seed_links_placeholder_new'): self.seed_entities1_new = tf.placeholder(tf.int32, shape=[None]) self.seed_entities2_new = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('seed_links_lookup_new'): tes1 = tf.nn.embedding_lookup(self.ent_embeds, self.seed_entities1_new) tes2 = tf.nn.embedding_lookup(self.ent_embeds, self.seed_entities2_new) with tf.name_scope('mapping_loss_new'): self.mapping_loss_new = self.args.new_param * mapping_loss(tes1, tes2, self.mapping_mat, self.eye_mat) self.mapping_optimizer_new = generate_optimizer(self.mapping_loss_new, self.args.learning_rate, opt=self.args.optimizer)
def _define_unify_entity_mapping_graph(self): with tf.name_scope('entity_seed_links_placeholder'): self.seed_entities1 = tf.placeholder(tf.int32, shape=[None]) self.seed_entities2 = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('entity_seed_links_lookup'): tes1 = tf.nn.embedding_lookup(self.ent_embeds, self.seed_entities1) tes2 = tf.nn.embedding_lookup(self.ent_embeds, self.seed_entities2) with tf.name_scope('entity_mapping_loss'): self.entity_mapping_loss = alignment_loss(tes1, tes2) self.entity_mapping_optimizer = generate_optimizer(self.entity_mapping_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_alignment_graph(self): self.new_h = tf.placeholder(tf.int32, shape=[None]) self.new_r = tf.placeholder(tf.int32, shape=[None]) self.new_t = tf.placeholder(tf.int32, shape=[None]) prh, prr, prt, pih, pir, pit = self.lookup_all(self.new_h, self.new_r, self.new_t) pos_scores = self._generate_scores(prh, prr, prt, pih, pir, pit, pos=True) pos_scores = tf.sigmoid(pos_scores) pos_scores = tf.log(pos_scores) pos_loss = tf.reduce_sum(pos_scores) self.alignment_loss = - pos_loss self.alignment_optimizer = generate_optimizer(self.alignment_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_likelihood_graph(self): self.entities1 = tf.placeholder(tf.int32, shape=[None]) self.entities2 = tf.placeholder(tf.int32, shape=[None]) dim = len(self.kgs.valid_links) + len(self.kgs.test_entities1) dim1 = self.args.likelihood_slice self.likelihood_mat = tf.placeholder(tf.float32, shape=[dim1, dim]) ent1_embed = tf.nn.embedding_lookup(self.ent_embeds, self.entities1) ent2_embed = tf.nn.embedding_lookup(self.ent_embeds, self.entities2) mat = tf.log(tf.sigmoid(tf.matmul(ent1_embed, ent2_embed, transpose_b=True))) self.likelihood_loss = -tf.reduce_sum(tf.multiply(mat, self.likelihood_mat)) self.likelihood_optimizer = generate_optimizer(self.likelihood_loss, self.args.learning_rate, opt=self.args.optimizer)
def add_mapping_module(model): with tf.name_scope('seed_links_placeholder'): model.seed_entities1 = tf.placeholder(tf.int32, shape=[None]) model.seed_entities2 = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('seed_links_lookup'): tes1 = tf.nn.embedding_lookup(model.ent_embeds, model.seed_entities1) tes2 = tf.nn.embedding_lookup(model.ent_embeds, model.seed_entities2) with tf.name_scope('mapping_loss'): model.mapping_loss = model.args.alpha * mapping_loss( tes1, tes2, model.mapping_mat, model.eye_mat) model.mapping_optimizer = generate_optimizer(model.mapping_loss, model.args.learning_rate, opt=model.args.optimizer)
def _define_embed_graph(self): with tf.name_scope('triple_placeholder'): self.pos_hs = tf.placeholder(tf.int32, shape=[None]) self.pos_rs = tf.placeholder(tf.int32, shape=[None]) self.pos_ts = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('triple_lookup'): phs = tf.nn.embedding_lookup(self.ent_embeds, self.pos_hs) prs = tf.nn.embedding_lookup(self.rel_embeds, self.pos_rs) pts = tf.nn.embedding_lookup(self.ent_embeds, self.pos_ts) with tf.name_scope('triple_loss'): self.triple_loss = positive_loss(phs, prs, pts, 'L2') self.triple_optimizer = generate_optimizer(self.triple_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_embed_graph(self): with tf.name_scope('triple_placeholder'): self.hs = tf.placeholder(tf.int32, shape=[None]) self.rs = tf.placeholder(tf.int32, shape=[None]) self.ts = tf.placeholder(tf.int32, shape=[None]) self.label = tf.placeholder(tf.float32, shape=[None]) with tf.name_scope('triple_lookup'): phs = tf.nn.embedding_lookup(self.ent_embeds, self.hs) prs = tf.nn.embedding_lookup(self.rel_embeds, self.rs) pts = tf.nn.embedding_lookup(self.ent_embeds, self.ts) with tf.name_scope('triple_loss'): res = tf.reduce_sum(self._calc(phs, pts, prs), 1, keep_dims=False) self.triple_loss = tf.reduce_mean(tf.nn.softplus(- self.label * res)) self.triple_optimizer = generate_optimizer(self.triple_loss, self.args.learning_rate, opt='Adagrad')
def _define_common_space_learning_graph(self): with tf.name_scope('cross_name_view_placeholder'): self.cn_hs = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('cross_name_view_lookup'): final_cn_phs = tf.nn.embedding_lookup(self.ent_embeds, self.cn_hs) cr_hs = tf.nn.embedding_lookup(self.rv_ent_embeds, self.cn_hs) ci_hs = tf.nn.embedding_lookup(self.iv_ent_embeds, self.cn_hs) ca_hs = tf.nn.embedding_lookup(self.av_ent_embeds, self.cn_hs) with tf.name_scope('cross_name_view_loss'): self.cross_name_loss = self.args.relation_loss_weight * alignment_loss(final_cn_phs, cr_hs) self.cross_name_loss += self.args.image_loss_weight * alignment_loss(final_cn_phs, ci_hs) self.cross_name_loss += self.args.attr_loss_weight * alignment_loss(final_cn_phs, ca_hs) # tf.summary.scalar('common space loss', self.cross_name_loss) self.cross_name_optimizer = generate_optimizer(self.args.cv_weight * self.cross_name_loss, self.args.ITC_learning_rate, opt=self.args.optimizer)
def _define_embed_graph(self): with tf.name_scope('triple_placeholder'): self.pos_hs = tf.placeholder(tf.int32, shape=[None]) self.pos_rs = tf.placeholder(tf.int32, shape=[None]) self.pos_ts = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('triple_lookup'): x, y = dim_factorization(self.args.dim) phs = tf.reshape( tf.nn.embedding_lookup(self.ent_embeds, self.pos_hs), [-1, 1, x, y]) prs = tf.reshape( tf.nn.embedding_lookup(self.rel_embeds, self.pos_rs), [-1, 1, x, y]) stacked_inputs = tf.concat([phs, prs], 2) stacked_inputs = tf.layers.batch_normalization(stacked_inputs) stacked_inputs = tf.nn.dropout(stacked_inputs, self.args.output_keep_prob) with tf.variable_scope('cnn'): ocnn = tf.layers.conv2d(stacked_inputs, self.args.filter_num, self.kernel_size, padding='same', use_bias=True, data_format='channels_first') ocnn = tf.layers.batch_normalization(ocnn, axis=1) ocnn = tf.nn.relu(ocnn) ocnn = tf.nn.dropout(ocnn, self.args.output_keep_prob) ocnn = tf.reshape(ocnn, [-1, self.args.filter_num * self.args.dim * 2]) ocnn = tfcontrib.layers.fully_connected(ocnn, self.args.dim) ocnn = tf.layers.batch_normalization(ocnn) with tf.name_scope('triple_loss'): triple_loss = tf.nn.nce_loss( weights=self.entity_w, biases=self.entity_b, labels=tf.reshape(self.pos_ts, [-1, 1]), inputs=ocnn, num_sampled=self.args.dnn_neg_nums, num_classes=self.kgs.entities_num, partition_strategy='div', ) self.triple_loss = tf.reduce_sum(triple_loss) self.triple_optimizer = generate_optimizer(self.triple_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_common_space_learning_graph(self): with tf.name_scope('cross_name_view_placeholder'): self.cn_hs = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('cross_name_view_lookup'): final_cn_phs = tf.nn.embedding_lookup(self.ent_embeds, self.cn_hs) cn_hs_names = tf.nn.embedding_lookup(self.name_embeds, self.cn_hs) cr_hs = tf.nn.embedding_lookup(self.rv_ent_embeds, self.cn_hs) ca_hs = tf.nn.embedding_lookup(self.av_ent_embeds, self.cn_hs) with tf.name_scope('cross_name_view_loss'): self.cross_name_loss = self.args.cv_name_weight * alignment_loss( final_cn_phs, cn_hs_names) self.cross_name_loss = alignment_loss(final_cn_phs, cr_hs) self.cross_name_loss += alignment_loss(final_cn_phs, ca_hs) self.cross_name_optimizer = generate_optimizer( self.args.cv_weight * self.cross_name_loss, self.args.ITC_learning_rate, opt=self.args.optimizer)
def _define_only_image_graph(self): with tf.name_scope('images_embedding_placeholder'): self.img_p_es = tf.placeholder(tf.int32, shape=[None]) with tf.name_scope('images_embedding_lookup'): i_pes = tf.nn.embedding_lookup(self.iv_ent_embeds, self.img_p_es) i_pis = tf.nn.embedding_lookup(self.image_embeds, self.img_p_es) with tf.variable_scope('images_cnn'): output_layer = tf.layers.dense(inputs=i_pis, units=self.args.dim, activation=tf.nn.tanh) dense = tf.layers.dropout(output_layer, rate=0.5) dense = tf.nn.l2_normalize(dense) # important!! pos_score = -tf.reduce_sum(tf.square(i_pes - dense), 1) pos_score = tf.log(1 + tf.exp(-pos_score)) self.only_image_loss = tf.reduce_sum(pos_score) # add loss weight self.only_image_loss *= self.args.only_image_loss_weight self.only_image_optimizer = generate_optimizer(self.only_image_loss, self.args.learning_rate, opt=self.args.optimizer)
def _define_embed_graph(self): with tf.name_scope('attribute_placeholder'): self.train_inputs = tf.placeholder(tf.int32, shape=[self.args.batch_size]) self.train_labels = tf.placeholder(tf.int32, shape=[self.args.batch_size]) with tf.name_scope('attribute_lookup'): self.train_inputs_embed = tf.nn.embedding_lookup( self.embeds, self.train_inputs) with tf.name_scope('attribute_nce_loss'): self.train_labels = tf.reshape(self.train_labels, [-1, 1]) self.loss = tf.reduce_mean( tf.nn.nce_loss(self.nce_weights, self.nce_biases, self.train_labels, self.train_inputs_embed, self.num_sampled_negs, self.kgs.attributes_num)) self.optimizer = generate_optimizer(self.loss, self.args.learning_rate, opt=self.opt)