Пример #1
0
 def encoder(inputs_list, nb_nodes, training, attn_drop, ffd_drop,
               bias_mat_list, inter_mat_list, hid_units, n_heads, activation=tf.nn.elu, residual=False,
               mp_att_size=512): 
     coefs=tf.constant([1,2])
     att_val=tf.constant([1,2])
     embed_list_temp=[]
     embed_list = []
     for inputs, bias_mat, inter_mat in zip(inputs_list, bias_mat_list, inter_mat_list):  
         attns = []
         for _ in range(n_heads[0]):
             
             inputs = layers.gcn_layer(inputs, inter_mat)  #GCN
 
             attns_temp, coefs = layers.attn_head(inputs_list[0], inputs, bias_mat=bias_mat,   #GAT
                                           out_sz=hid_units[0], activation=activation,
                                           in_drop=ffd_drop, coef_drop=attn_drop)
             attns.append(attns_temp)
         h_1 = tf.concat(attns, axis=-1)   
         embed_list_temp.append(h_1)
         embed_list.append(tf.expand_dims(tf.squeeze(h_1), axis=1))  
     multi_embed = tf.concat(embed_list, axis=1) 
     final_embed, att_val = layers.SimpleAttLayer(embed_list_temp, inputs_list[0], multi_embed, mp_att_size,                           
                                                  return_alphas=True)
     
     return final_embed, att_val, embed_list_temp
Пример #2
0
 def __init__(self, ipi, opi, opo):
     super(gcn, self).__init__()
     self.layer1 = gcn_layer(ipi, opi)
     self.layer2 = gcn_layer(opi, opo)
Пример #3
0
def model_architecture(hyperparameters):
    """Sets the hyperparameters for the model."""

    input_pc = tf.placeholder(
        tf.float32,
        [None, hyperparameters.num_points, hyperparameters.num_features])
    input_graph = tf.placeholder(
        tf.float32,
        [None, hyperparameters.num_points * hyperparameters.num_points])
    output_label = tf.placeholder(tf.float32)

    scaled_laplacian = tf.reshape(
        input_graph,
        [-1, hyperparameters.num_points, hyperparameters.num_points])

    weights = tf.placeholder(tf.float32, [None])
    learning_rate = tf.placeholder(tf.float32)
    keep_prob_1 = tf.placeholder(tf.float32)
    keep_prob_2 = tf.placeholder(tf.float32)

    # first layer: graph convolution
    gcn_1 = layers.gcn_layer(input_pc, scaled_laplacian,
                             hyperparameters.num_points,
                             hyperparameters.num_features,
                             hyperparameters.num_gcn_1_output_features,
                             hyperparameters.chebyshev_1_order)
    gcn_1_output = tf.nn.dropout(gcn_1, rate=1 - keep_prob_1)
    gcn_1_pool = layers.global_pooling(
        gcn_1_output, hyperparameters.num_gcn_1_output_features)

    # second layer: graph convolution on the output of gcn_1 before pooling
    gcn_2 = layers.gcn_layer(gcn_1_output, scaled_laplacian,
                             hyperparameters.num_points,
                             hyperparameters.num_gcn_1_output_features,
                             hyperparameters.num_gcn_2_output_features,
                             hyperparameters.chebyshev_2_order)
    gcn_2_output = tf.nn.dropout(gcn_2, rate=1 - keep_prob_1)
    gcn_2_pool = layers.global_pooling(
        gcn_2_output, hyperparameters.num_gcn_2_output_features)

    # concatenate global features between gcn_1 and gcn_2
    global_features = tf.concat([gcn_1_pool, gcn_2_pool], axis=1)
    global_features = tf.nn.dropout(global_features, rate=1 - keep_prob_2)
    num_global_features = 2 * (hyperparameters.num_gcn_1_output_features +
                               hyperparameters.num_gcn_2_output_features)

    # first fully connected layer at the end
    fc_1 = layers.fully_connected(global_features, num_global_features,
                                  hyperparameters.num_fc_1_output_features)
    fc_1 = tf.nn.relu(fc_1)
    fc_1 = tf.nn.dropout(fc_1, rate=1 - keep_prob_2)

    # second fully connected layer
    fc_2 = layers.fully_connected(fc_1,
                                  hyperparameters.num_fc_1_output_features,
                                  hyperparameters.num_fc_2_output_features)

    # =========================================================================================================
    # LOSS AND BACKPROPAGATION
    # =========================================================================================================

    # loss
    predict_label = tf.nn.sigmoid(fc_2) >= 0.5
    loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=fc_2,
                                                   labels=output_label)
    loss = tf.reduce_mean(tf.multiply(loss, weights))

    train_vars = tf.trainable_variables()
    loss_reg = tf.add_n(
        [tf.nn.l2_loss(v) for v in train_vars if 'bias' not in v.name]) * 8e-6
    loss_total = loss + loss_reg

    correct_prediction = tf.equal(predict_label, (output_label == 1))
    accuracy = tf.cast(correct_prediction, tf.float32)
    accuracy = tf.reduce_mean(accuracy)

    train = tf.train.AdamOptimizer(
        learning_rate=learning_rate).minimize(loss_total)

    train_operation = {
        'train': train,
        'loss': loss,
        'loss_reg': loss_reg,
        'loss_total': loss_total,
        'accuracy': accuracy,
        'input_pc': input_pc,
        'input_graph': input_graph,
        'output_label': output_label,
        'weights': weights,
        'predict_label': predict_label,
        'keep_prob_1': keep_prob_1,
        'keep_prob_2': keep_prob_2,
        'learning_rate': learning_rate
    }

    return train_operation
Пример #4
0
def build_model(placeholders,info,batch_size=4,adj_channel_num=1,embedding_dim=10):
    in_adjs=placeholders["adjs"]
    features=placeholders["features"]
    in_nodes=placeholders["nodes"]
    labels=placeholders["labels"]
    mask=placeholders["mask"]
    dropout_rate=placeholders["dropout_rate"]
    wd_b=None
    wd_w=0.1

    layer=features
    input_dim=info.feature_dim
    if features is None:
        layer=emmbeding_layer("embeding",in_nodes,info.all_node_num,embedding_dim,init_params_flag=True,params=None)
        input_dim=embedding_dim
    # layer: batch_size x graph_node_num x dim
    with tf.variable_scope("gcn_1") as scope:
        output_dim=64
        layer = layers.gcn_layer("graph_conv",layer,in_adjs,input_dim,output_dim,
                adj_channel_num=adj_channel_num,node_num=info.graph_node_num,batch_size=batch_size)
        layer = tf.nn.relu(layer)
        input_dim=output_dim
    with tf.variable_scope("pooling_1") as scope:
        layer = layers.graph_max_pooling_layer(layer,in_adjs, input_dim,
                adj_channel_num=adj_channel_num,node_num=info.graph_node_num,batch_size=batch_size)
    with tf.variable_scope("bn_1") as scope:
        layer=layers.graph_batch_normalization("bn",layer,input_dim,info.graph_node_num,init_params_flag=True,params=None)
    with tf.variable_scope("do_1") as scope:
        layer=layers.graph_dropout_layer(layer,info.graph_node_num,input_dim,dropout_rate)

    with tf.variable_scope("gcn_2") as scope:
        output_dim=128
        layer = layers.gcn_layer("graph_conv",layer,in_adjs,input_dim,output_dim,adj_channel_num=adj_channel_num,node_num=info.graph_node_num,batch_size=batch_size)
        layer = tf.sigmoid(layer)
        input_dim=output_dim
    with tf.variable_scope("pooling_2") as scope:
        layer = layers.graph_max_pooling_layer(layer,in_adjs, input_dim,
                adj_channel_num=adj_channel_num,node_num=info.graph_node_num,batch_size=batch_size)
    with tf.variable_scope("bn_2") as scope:
        layer=layers.graph_batch_normalization("bn",layer,input_dim,info.graph_node_num,init_params_flag=True,params=None)
    with tf.variable_scope("do_2") as scope:
        layer=layers.graph_dropout_layer(layer,info.graph_node_num,input_dim,dropout_rate)

    with tf.variable_scope("gcn_3") as scope:
        output_dim=128
        layer = layers.gcn_layer("graph_conv",layer,in_adjs,input_dim,output_dim,adj_channel_num=adj_channel_num,node_num=info.graph_node_num,batch_size=batch_size)
        layer = tf.sigmoid(layer)
        input_dim=output_dim
    with tf.variable_scope("pooling_3") as scope:
        layer = layers.graph_max_pooling_layer(layer,in_adjs, input_dim,
                adj_channel_num=adj_channel_num,node_num=info.graph_node_num,batch_size=batch_size)
    with tf.variable_scope("bn_3") as scope:
        layer=layers.graph_batch_normalization("bn",layer,input_dim,info.graph_node_num,init_params_flag=True,params=None)
    with tf.variable_scope("do_3") as scope:
        layer=layers.graph_dropout_layer(layer,info.graph_node_num,input_dim,dropout_rate)


    with tf.variable_scope("fc4") as scope:
        output_dim=64
        layer = layers.graph_fc_layer("fc",layer,input_dim, output_dim,info.graph_node_num, init_params_flag=True,params=None,wd_w=wd_w,wd_b=wd_b,activate=tf.sigmoid,with_bn=False)
        input_dim=output_dim
    with tf.variable_scope("gathering") as scope:
        layer = layers.graph_gathering_layer(layer)
    with tf.variable_scope("fc5") as scope:
        output_dim=2
        model = layers.fc_layer("fc3",layer,input_dim, output_dim, init_params_flag=True,params=None,wd_w=wd_w,wd_b=wd_b,activate=None,with_bn=False)

    prediction=tf.nn.softmax(model)
    # computing cost and metrics
    cost=mask*tf.nn.softmax_cross_entropy_with_logits(labels=labels,logits=model)
    cost_opt=tf.reduce_mean(cost)

    metrics={}
    cost_sum=tf.reduce_sum(cost)

    correct_count=mask*tf.cast(tf.equal(tf.argmax(prediction,1), tf.argmax(labels,1)),tf.float32)
    metrics["correct_count"]=tf.reduce_sum(correct_count)
    return model,prediction,cost_opt,cost_sum,metrics
Пример #5
0
    labels_all = pkl.load(f)
    print("labels load done")

with open(labels_for_test_save_filename, 'r') as f:
    labels_for_train = pkl.load(f)
    print("labels for test load done")

with open(sparse_save_filename, 'r') as f:
    sparse = pkl.load(f)
    print("sparse load done")

sparse_martix = preprocess_adj(sparse)

gcn = model.Model()
hidden_num = 15
layer1 = layers.gcn_layer("1", (features.shape[1], hidden_num), sparse_martix,
                          0)
layer2 = layers.gcn_layer("2", (hidden_num, 3), sparse_martix, 1)
gcn.build_layer((layer1, layer2))

epochs = 500

init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)

for i in range(epochs):
    sess.run(gcn.train(0.001),
             feed_dict={
                 gcn.inputs: features,
                 gcn.labels_for_train: labels_for_train
             })