예제 #1
0
def TGCN(_X, weights, biases):
    ###
    cell_1 = tgcnCell(gru_units, adj, num_nodes=num_nodes)
    cell = tf.nn.rnn_cell.MultiRNNCell([cell_1], state_is_tuple=True)
    _X = tf.unstack(_X, axis=1)
#    print(_X)
    outputs, states = tf.nn.static_rnn(cell, _X, dtype=tf.float32)
#    print('000',outputs)

#    out = outputs[-1]
#    out = tf.reshape(outputs,shape=[seq_len, -1, num_nodes,gru_units])
    out = tf.concat(outputs, axis=0)
    out = tf.reshape(out, shape=[seq_len,-1,num_nodes,gru_units])
    out = tf.transpose(out, perm=[1,0,2,3])
#    out = tf.reshape(out, shape=[-1, num_nodes, gru_units])
#    print('111',out)
#    out = tf.transpose(out,perm=[1,2,0,3]) #n,t,nodes*u

#    out = tf.transpose(out,perm=[1,0,2]) #n,t,nodes*u
#    print('111',out)
#    out = tf.reshape(out,[-1,seq_len,gru_units])
#    lstm_last_output = m[-1]
    last_output,alpha = self_attention1(out, weight_att, bias_att)
#    print('yyy')
    
#    output = tf.reshape(last_output,shape=[-1,num_nodes,gru_units])
#    output = tf.reshape(last_output,shape=[-1,gru_units])
    output = tf.reshape(last_output,shape=[-1,seq_len])
    output = tf.matmul(output, weights['out']) + biases['out']
    output = tf.reshape(output,shape=[-1,num_nodes,pre_len])
    output = tf.transpose(output, perm=[0,2,1])
    output = tf.reshape(output, shape=[-1,num_nodes])

    return output, outputs, states, alpha
예제 #2
0
파일: main.py 프로젝트: troyyxk/GcnLstm
def TGCN(_X, _weights, _biases):
    ###
    cell_1 = tgcnCell(gru_units,
                      adj,
                      num_nodes=num_nodes,
                      num_features=num_features)
    cell = tf.nn.rnn_cell.MultiRNNCell([cell_1], state_is_tuple=True)
    print("------------TCGN--------------")
    print("_X.shape: ", _X.shape)
    _X = tf.unstack(_X, axis=1)
    print("After, len(_X): ", len(_X))
    print("_X[0].shape: ", _X[0].shape)
    outputs, states = tf.nn.static_rnn(cell, _X, dtype=tf.float32)
    m = []
    print("---------------before len(outputs)----------------")
    print(len(outputs))
    print(type(outputs[0]))
    print(outputs[0])
    for i in outputs:
        o = tf.reshape(i, shape=[-1, num_nodes, gru_units])
        # comment the line below makes no difference
        o = tf.reshape(o, shape=[-1, gru_units])
        m.append(o)
    last_output = m[-1]
    print("last_output.shape", last_output.shape)
    last_output = last_output[:, 0]
    print("last_output.shape", last_output.shape)
    # outputs = gru_units * number of nodes in gcn
    output = tf.matmul(last_output, _weights['out']) + _biases['out']
    # num_nodes * pre_len, 3 in this case
    output = tf.reshape(output, shape=[-1, num_nodes, pre_len])
    output = tf.transpose(output, perm=[0, 2, 1])
    output = tf.reshape(output, shape=[-1, num_nodes])
    # pre_len * num_nodes
    return output, m, states
예제 #3
0
파일: main.py 프로젝트: Advitya17/T-GCN
 def TGCN(_X, _weights, _biases):
     ###
     cell_1 = tgcnCell(gru_units, adj, num_nodes=num_nodes)
     cell = tf.nn.rnn_cell.MultiRNNCell([cell_1], state_is_tuple=True)
     _X = tf.unstack(_X, axis=1)
     outputs, states = tf.nn.static_rnn(cell, _X, dtype=tf.float32)
     m = []
     for i in outputs:
         o = tf.reshape(i, shape=[-1, num_nodes, gru_units])
         o = tf.reshape(o, shape=[-1, gru_units])
         m.append(o)
     last_output = m[-1]
     output = tf.matmul(last_output, _weights['out']) + _biases['out']
     output = tf.reshape(output, shape=[-1, num_nodes, pre_len])
     output = tf.transpose(output, perm=[0, 2, 1])
     output = tf.reshape(output, shape=[-1, num_nodes])
     return output, m, states
def TGCN_att(_X, weights, biases, tmp_adj, keep_rate):
    ###
    tmp_adj = tf.sparse.from_dense(tmp_adj)
    cell_1 = tgcnCell(gru_units, tmp_adj, keep_rate, num_nodes=num_nodes)
    cell = tf.nn.rnn_cell.MultiRNNCell([cell_1], state_is_tuple=True)
    _X = tf.unstack(_X, axis=1)

    outputs, states = tf.nn.static_rnn(cell, _X, dtype=tf.float32)

    out = tf.concat(outputs, axis=0)
    out = tf.reshape(out, shape=[seq_len, -1, num_nodes, gru_units])
    out = tf.transpose(out, perm=[1, 0, 2, 3])

    last_output, alpha = self_attention1(out, weight_att, bias_att)

    output = tf.reshape(last_output, shape=[-1, seq_len])
    output = tf.matmul(output, weights['out']) + biases['out']
    output = tf.reshape(output, shape=[-1, num_nodes, pre_len])
    output = tf.transpose(output, perm=[0, 2, 1])
    output = tf.reshape(output, shape=[-1, num_nodes])

    return output, outputs, states