Ejemplo n.º 1
0
 def inference(inputs, nb_classes, nb_nodes, training, attn_drop, ffd_drop,
         bias_mat, hid_units, n_heads, activation=tf.nn.elu, 
         residual=False):
     attns = []
     for _ in range(n_heads[0]):
         attns.append(layers.sp_attn_head(inputs,  
             adj_mat=bias_mat,
             out_sz=hid_units[0], activation=activation, nb_nodes=nb_nodes,
             in_drop=ffd_drop, coef_drop=attn_drop, residual=False))
     h_1 = tf.concat(attns, axis=-1)
     h_2 = h_1
     for i in range(1, len(hid_units)):
         h_old = h_1
         attns = []
         for _ in range(n_heads[i]):
             attns.append(layers.sp_attn_head(h_1,  
                 adj_mat=bias_mat,
                 out_sz=hid_units[i], activation=activation, nb_nodes=nb_nodes,
                 in_drop=ffd_drop, coef_drop=attn_drop, residual=residual))
         h_1 = tf.concat(attns, axis=-1)
     out = []
     for i in range(n_heads[-1]):
         out.append(layers.sp_attn_head(h_1, adj_mat=bias_mat,
             out_sz=nb_classes, activation=lambda x: x, nb_nodes=nb_nodes,
             in_drop=ffd_drop, coef_drop=attn_drop, residual=False))
     logits = tf.add_n(out) / n_heads[-1]
 
     return logits, h_2
Ejemplo n.º 2
0
    def inference(inputs,
                  nb_classes,
                  nb_nodes,
                  training,
                  attn_drop,
                  ffd_drop,
                  bias_mat,
                  adj_all_mat,
                  adj_neig_mat,
                  N_target_mat,
                  hid_units,
                  n_heads,
                  activation=tf.nn.elu,
                  residual=False):
        out = []
        for i in range(n_heads[-1]):
            out.append(
                layers.sp_attn_head(inputs,
                                    adj_mat=bias_mat,
                                    adj_all_mat=adj_all_mat,
                                    adj_neig_mat=adj_neig_mat,
                                    N_target_mat=N_target_mat,
                                    out_sz=nb_classes,
                                    activation=lambda x: x,
                                    nb_nodes=nb_nodes,
                                    in_drop=ffd_drop,
                                    coef_drop=attn_drop,
                                    residual=False))
        logits = tf.add_n(out) / n_heads[-1]

        return logits
Ejemplo n.º 3
0
 def encoder(inputs_list,
             nb_nodes,
             training,
             attn_drop,
             ffd_drop,
             bias_mat_local_list,
             bias_mat_global_list,
             hid_units,
             n_heads,
             mp_att_size=16,
             activation=tf.nn.elu,
             residual=False):
     embed_list = []
     for inputs, bias_mat_local, bias_mat_global in zip(
             inputs_list, bias_mat_local_list, bias_mat_global_list):
         attns = []
         for _ in range(n_heads):
             attn_temp = layers.sp_attn_head(inputs,
                                             adj_mat_local=bias_mat_local,
                                             adj_mat_global=bias_mat_global,
                                             out_sz=hid_units[0],
                                             activation=activation,
                                             in_drop=ffd_drop,
                                             coef_drop=attn_drop,
                                             residual=False)
             attns.append(attn_temp)
         h_1 = tf.concat(attns, axis=-1)
         embed_list.append(tf.expand_dims(tf.squeeze(h_1), axis=1))
     multi_embed = tf.concat(embed_list, axis=1)
     final_embed, alpha = layers.SimpleAttLayer(multi_embed,
                                                mp_att_size,
                                                time_major=False,
                                                return_alphas=True)
     return final_embed
Ejemplo n.º 4
0
 def attn_h_1(inputs,
              nb_classes,
              nb_nodes,
              training,
              attn_drop,
              ffd_drop,
              bias_mat,
              hid_units,
              n_heads,
              activation=tf.nn.elu,
              residual=False):
     attns = []
     for _ in range(n_heads[0]):
         attns.append(
             layers.sp_attn_head(inputs,
                                 adj_mat=bias_mat,
                                 out_sz=hid_units[0],
                                 activation=activation,
                                 nb_nodes=nb_nodes,
                                 in_drop=ffd_drop,
                                 coef_drop=attn_drop,
                                 residual=False))
     h_1 = tf.concat(attns, axis=-1)
     for i in range(1, len(hid_units)):
         h_old = h_1
         attns = []
         for _ in range(n_heads[i]):
             attns.append(
                 layers.sp_attn_head(h_1,
                                     adj_mat=bias_mat,
                                     out_sz=hid_units[i],
                                     activation=activation,
                                     nb_nodes=nb_nodes,
                                     in_drop=ffd_drop,
                                     coef_drop=attn_drop,
                                     residual=residual))
         h_1 = tf.concat(attns, axis=-1)
     return h_1
Ejemplo n.º 5
0
    def inference(inputs, nb_classes, nb_nodes, training, attn_drop, ffd_drop,
            bias_mat, adj_hop1_all_mat, adj_hop2_all_mat, adj_hop1_neig_mat,adj_hop2_neig_mat, N_hop1_all_mat,N_hop2_all_mat,
                  hid_units, n_heads, activation=tf.nn.elu,
            residual=False):

        attns = []
        for _ in range(n_heads[0]):
            attns.append(layers.sp_attn_head(inputs, adj_mat=bias_mat,
                                        adj_hop1_all_mat=adj_hop1_all_mat, adj_hop2_all_mat=adj_hop2_all_mat,
                                        adj_hop1_neig_mat=adj_hop1_neig_mat,adj_hop2_neig_mat=adj_hop2_neig_mat,
                                           N_hop1_all_mat=N_hop1_all_mat, N_hop2_all_mat=N_hop2_all_mat,
                                        out_sz=hid_units[0], activation=activation, nb_nodes=nb_nodes,
                                        in_drop=ffd_drop, coef_drop=attn_drop, residual=False))
        h_1 = tf.concat(attns, axis=-1)
        out = []
        for i in range(n_heads[-1]):
            out.append(layers.sp_attn_head(h_1, adj_mat=bias_mat,
                                            adj_hop1_all_mat=adj_hop1_all_mat, adj_hop2_all_mat=adj_hop2_all_mat,
                                            adj_hop1_neig_mat=adj_hop1_neig_mat,adj_hop2_neig_mat=adj_hop2_neig_mat,
                                           N_hop1_all_mat=N_hop1_all_mat, N_hop2_all_mat=N_hop2_all_mat,
                                            out_sz=nb_classes, activation=lambda x: x, nb_nodes=nb_nodes,
                                            in_drop=ffd_drop, coef_drop=attn_drop, residual=False))
        logits = tf.add_n(out) / n_heads[-1]


        bi = []
        bi.append(layers.Bilinear(inputs, adj_mat=bias_mat,
                                            adj_hop1_all_mat=adj_hop1_all_mat, adj_hop2_all_mat=adj_hop2_all_mat,
                                            adj_hop1_neig_mat=adj_hop1_neig_mat,adj_hop2_neig_mat=adj_hop2_neig_mat,
                                           N_hop1_all_mat=N_hop1_all_mat, N_hop2_all_mat=N_hop2_all_mat,
                                            out_sz=nb_classes, activation=lambda x: x, nb_nodes=nb_nodes,
                                            in_drop=ffd_drop, coef_drop=attn_drop, residual=False))
        bi_logits = tf.add_n(bi)

        output = (1. - FLAGS.alpha) * logits + FLAGS.alpha * bi_logits
        return output
Ejemplo n.º 6
0
    def inference(inputs,
                  nb_classes,
                  nb_nodes,
                  training,
                  attn_drop,
                  ffd_drop,
                  nnz,
                  bias_mat,
                  hid_units,
                  n_heads,
                  activation=tf.nn.elu,
                  intra_drop=None,
                  intra_activation=None,
                  scheme_norm=None,
                  scheme_init_std=None,
                  residual=False,
                  use_bias=True):
        attns = []
        for _ in range(n_heads[0]):
            attns.append(
                layers.sp_attn_head(inputs,
                                    adj_mat=bias_mat,
                                    out_sz=hid_units[0],
                                    activation=activation,
                                    nb_nodes=nb_nodes,
                                    in_drop=ffd_drop,
                                    coef_drop=attn_drop,
                                    residual=False,
                                    intra_drop=intra_drop,
                                    intra_activation=intra_activation,
                                    scheme_norm=scheme_norm,
                                    scheme_init_std=scheme_init_std,
                                    use_bias=use_bias))
        h_1 = tf.concat(attns, axis=-1)
        for i in range(1, len(hid_units)):
            h_old = h_1
            attns = []
            for _ in range(n_heads[i]):
                attns.append(
                    layers.sp_attn_head(h_1,
                                        adj_mat=bias_mat,
                                        out_sz=hid_units[i],
                                        activation=activation,
                                        nb_nodes=nb_nodes,
                                        in_drop=ffd_drop,
                                        coef_drop=attn_drop,
                                        residual=residual,
                                        intra_drop=intra_drop,
                                        intra_activation=intra_activation,
                                        scheme_norm=scheme_norm,
                                        scheme_init_std=scheme_init_std,
                                        use_bias=use_bias))
            h_1 = tf.concat(attns, axis=-1)
        out = []
        for i in range(n_heads[-1]):
            out.append(
                layers.sp_attn_head(h_1,
                                    adj_mat=bias_mat,
                                    out_sz=nb_classes,
                                    activation=lambda x: x,
                                    nb_nodes=nb_nodes,
                                    in_drop=ffd_drop,
                                    coef_drop=attn_drop,
                                    residual=False,
                                    intra_drop=intra_drop,
                                    intra_activation=intra_activation,
                                    scheme_norm=scheme_norm,
                                    scheme_init_std=scheme_init_std,
                                    use_bias=use_bias))
        logits = tf.add_n(out) / n_heads[-1]

        return logits