Exemplo n.º 1
0
 def encoder(inputs_list,
             nb_nodes,
             training,
             attn_drop,
             ffd_drop,
             bias_mat_local_list,
             bias_mat_global_list,
             hid_units,
             n_heads,
             mp_att_size=16,
             activation=tf.nn.elu,
             residual=False):
     embed_list = []
     for inputs, bias_mat_local, bias_mat_global in zip(
             inputs_list, bias_mat_local_list, bias_mat_global_list):
         attns = []
         for _ in range(n_heads):
             attn_temp = layers.sp_attn_head(inputs,
                                             adj_mat_local=bias_mat_local,
                                             adj_mat_global=bias_mat_global,
                                             out_sz=hid_units[0],
                                             activation=activation,
                                             in_drop=ffd_drop,
                                             coef_drop=attn_drop,
                                             residual=False)
             attns.append(attn_temp)
         h_1 = tf.concat(attns, axis=-1)
         embed_list.append(tf.expand_dims(tf.squeeze(h_1), axis=1))
     multi_embed = tf.concat(embed_list, axis=1)
     final_embed, alpha = layers.SimpleAttLayer(multi_embed,
                                                mp_att_size,
                                                time_major=False,
                                                return_alphas=True)
     return final_embed
Exemplo n.º 2
0
    def inference(inputs, nb_classes, nb_nodes, training, attn_drop, ffd_drop,
                  bias_mat_list, hid_units, n_heads, activation=tf.nn.elu, residual=False,
                  mp_att_size=128):
        embed_list = []
        # coef_list = []
        for bias_mat in bias_mat_list:
            attns = []
            head_coef_list = []
            for _ in range(n_heads[0]):

                attns.append(layers.attn_head(inputs, bias_mat=bias_mat,
                                                  out_sz=hid_units[0], activation=activation,
                                                  in_drop=ffd_drop, coef_drop=attn_drop, residual=False,
                                                  return_coef=return_coef))
            h_1 = tf.concat(attns, axis=-1)
            for i in range(1, len(hid_units)):
                h_old = h_1
                attns = []
                for _ in range(n_heads[i]):
                    attns.append(layers.attn_head(h_1,
                                                  bias_mat=bias_mat,
                                                  out_sz=hid_units[i],
                                                  activation=activation,
                                                  in_drop=ffd_drop,
                                                  coef_drop=attn_drop,
                                                  residual=residual))
                h_1 = tf.concat(attns, axis=-1)
            embed_list.append(tf.expand_dims(tf.squeeze(h_1), axis=1))


        # print('att for mp')
        multi_embed = tf.concat(embed_list, axis=1)
        final_embed, att_val = layers.SimpleAttLayer(multi_embed, mp_att_size,
                                                     time_major=False,
                                                     return_alphas=True)
        # print(att_val)
        # last layer for clf
        out = []
        for i in range(n_heads[-1]):
            out.append(tf.layers.dense(final_embed, nb_classes, activation=None))
        #     out.append(layers.attn_head(h_1, bias_mat=bias_mat,
        #                                 out_sz=nb_classes, activation=lambda x: x,
        #                                 in_drop=ffd_drop, coef_drop=attn_drop, residual=False))
        logits = tf.add_n(out) / n_heads[-1]
        # logits_list.append(logits)
        print('de')
        logits = tf.expand_dims(logits, axis=0)
        # if return_coef:
        #     return logits, final_embed, att_val, coef_list
        # else:
        return logits, final_embed, att_val
Exemplo n.º 3
0
    def inference(inputs_list, nb_classes, nb_nodes, training, attn_drop, ffd_drop,
                  bias_mat_list, hid_units, n_heads, features, labels, activation=tf.nn.elu, residual=False,
                  mp_att_size=200, feature_size=100):

        #Metric Learning
        temp = inputs_list[0]
        # temp2 = tf.reduce_sum(temp, 0)
        # print ("temp2 check: ", temp2)
        MetricInputs = tf.layers.dense(temp, feature_size, activation=None)
        # ExpendMetricInputs = tf.expand_dims(MetricInputs, 0)
        print ("ExpendMetricInputs: check :", MetricInputs)
        inputs_list = [MetricInputs]


        # tests
        mp_att_size = 200
        embed_list = []
        for inputs, bias_mat in zip(inputs_list, bias_mat_list):
            attns = []
            jhy_embeds = []
            for _ in range(n_heads[0]):
                attns.append(layers.attn_head(inputs, bias_mat=bias_mat,
                                              out_sz=hid_units[0], activation=activation,
                                              in_drop=ffd_drop, coef_drop=attn_drop, residual=False))
            h_1 = tf.concat(attns, axis=-1)

            for i in range(1, len(hid_units)):
                h_old = h_1
                attns = []
                for _ in range(n_heads[i]):
                    attns.append(layers.attn_head(h_1, bias_mat=bias_mat,
                                                  out_sz=hid_units[i],
                                                  activation=activation,
                                                  in_drop=ffd_drop,
                                                  coef_drop=attn_drop, residual=residual))
                h_1 = tf.concat(attns, axis=-1)
            embed_list.append(tf.expand_dims(tf.squeeze(h_1), axis=1))

        multi_embed = tf.concat(embed_list, axis=1)
        print ("multi_embed: ", multi_embed, ", mp_att_size: ", mp_att_size)
        final_embed, att_val = layers.SimpleAttLayer(multi_embed, mp_att_size,
                                                     time_major=False,
                                                     return_alphas=True)
        # feature_size, labels, features
        # num_classes, feature_size, labels, features

        centers_embed = HeteGAT_multi.getCenters(len(set(labels)), feature_size, labels, final_embed)
        centers_embed = tf.transpose(centers_embed)

        out = []
        for i in range(n_heads[-1]):
      
            out.append(tf.layers.dense(final_embed, nb_classes, activation=None))

        #     out.append(layers.attn_head(h_1, bias_mat=bias_mat,
        #                                 out_sz=nb_classes, activation=lambda x: x,
        #                                 in_drop=ffd_drop, coef_drop=attn_drop, residual=False))
        logits = tf.add_n(out) / n_heads[-1]
        # logits_list.append(logits)
        print('de')

        logits = tf.expand_dims(logits, axis=0)
        test_final_embeed = tf.reduce_sum(MetricInputs,0)
        return logits, final_embed , att_val, centers_embed, test_final_embeed
Exemplo n.º 4
0
Arquivo: gat.py Projeto: FrankXuCN/HAN
    def inference(inputs_list,
                  attn_drop,
                  ffd_drop,
                  bias_mat_list,
                  hid_units,
                  n_heads,
                  activation=tf.nn.elu,
                  residual=False,
                  mp_att_size=128):
        embed_list = []
        for inputs, bias_mat in zip(inputs_list, bias_mat_list):
            attns = []
            jhy_embeds = []
            for _ in range(n_heads[0]):  # 8
                attns.append(
                    layers.attn_head(inputs,
                                     bias_mat=bias_mat,
                                     out_sz=hid_units[0],
                                     activation=activation,
                                     in_drop=ffd_drop,
                                     coef_drop=attn_drop,
                                     residual=False))
            h_1 = tf.concat(attns, axis=-1)  # 1*x*(8*8)

            for i in range(1, len(hid_units)):  #range(1,1)
                h_old = h_1
                attns = []
                for _ in range(n_heads[i]):
                    attns.append(
                        layers.attn_head(h_1,
                                         bias_mat=bias_mat,
                                         out_sz=hid_units[i],
                                         activation=activation,
                                         in_drop=ffd_drop,
                                         coef_drop=attn_drop,
                                         residual=residual))
                h_1 = tf.concat(attns, axis=-1)
            # h_1: 1*X*64 ==> X*1*64
            # embed_list.append(tf.expand_dims(tf.squeeze(h_1), axis=1))
            # change API because X != 1
            embed_list.append(tf.transpose(h_1, [1, 0, 2]))

        # size of inputs_list * (X*1*64) ==> X*L*64
        multi_embed = tf.concat(embed_list, axis=1)
        print("inference/multi_embed is {}".format(multi_embed))
        final_embed, att_val = layers.SimpleAttLayer(multi_embed,
                                                     mp_att_size,
                                                     time_major=False,
                                                     return_alphas=True)

        # out = []
        # for i in range(n_heads[-1]):

        #     out.append(tf.layers.dense(final_embed, nb_classes, activation=None))
        # #     out.append(layers.attn_head(h_1, bias_mat=bias_mat,
        # #                                 out_sz=nb_classes, activation=lambda x: x,
        # #                                 in_drop=ffd_drop, coef_drop=attn_drop, residual=False))
        # logits = tf.add_n(out) / n_heads[-1]
        # # logits_list.append(logits)
        # print('de')

        # logits = tf.expand_dims(logits, axis=0)
        # return logits, final_embed, att_val
        return final_embed, att_val