コード例 #1
0
    def __init__(self,
                 units,
                 support=1,
                 activation=None,
                 use_bias=True,
                 adj=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )
        super(GraphConvolution1, self).__init__(**kwargs)
        self.adj1 = calculate_laplacian(adj)
        self.adj = tf.sparse_tensor_to_dense(self.adj1)
        self.units = units
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.supports_masking = True

        self.support = support
コード例 #2
0
 def __init__(self, num_units, adj, num_nodes, input_size=None,
              act=tf.nn.tanh, reuse=None):
     super(tgcnCell, self).__init__(_reuse=reuse)
     self._act = act
     self._nodes = num_nodes
     self._units = num_units
     self._adj = []
     self._adj.append(calculate_laplacian(adj))
コード例 #3
0
 def __init__(self, num_units, adj, inputs, output_dim, activation = tf.nn.tanh, 
              input_size = None, num_proj=None, reuse = None, **kwargs):
     super(GCN, self).__init__(**kwargs)
     if input_size is not None:
         logging.warn("%s: The input_size parameter is deprecated.", self)
     self._num_units = num_units
     self._output_dim = output_dim
     self._inputs = inputs
     self._num_nodes = inputs.get_shape()[2].value
     self._input_dim = inputs.get_shape()[1].value ###seq_len
     self._batch_size = tf.shape(inputs)[0]
     self._adj = []  
     self._adj.append(calculate_laplacian(adj))
     self._activation = activation
     self._gconv()
    return rmse, mae, 1 - F_norm, r2, var


x_axe, batch_loss, batch_rmse, batch_pred = [], [], [], []
test_loss, test_rmse, test_mae, test_acc, test_r2, test_var, test_pred = [], [], [], [], [], [], []

# STGCN
n = num_nodes
n_his = seq_len
keep_rate = 1
total_W = np.array(adj_sampler.adj.todense()).astype(np.float32)
L = scaled_laplacian(total_W)
total_LK = cheb_poly_approx(L, Ks, n)

# TGCN or TGCN_att
TGCN_total_adj = calculate_laplacian(adj_sampler.adj.todense()).toarray()

for epoch in range(training_epoch):
    if dropmode == 'node':
        tmp_adj = adj_sampler.randomedge_sampler(ne_keep_prob)
    elif dropmode == 'edge':
        tmp_adj = adj_sampler.randomvertex_sampler(ne_keep_prob)
    elif dropmode == 'out':
        tmp_adj = adj_sampler.adj.todense()

    W = np.array(tmp_adj).astype(np.float32)
    L = scaled_laplacian(W)
    # Alternative approximation method: 1st approx - first_approx(W, n).
    Lk = cheb_poly_approx(L, Ks, n)
    # tf.add_to_collection(name='graph_kernel', value=tf.cast(tf.constant(Lk), tf.float32))
コード例 #5
0
def eval_model():
    pass


if __name__ == "__main__":
    args = parser()
    scaler = MinMaxScaler(feature_range=(0, 1))

    ###### load data ######

    train_data, test_data, adj = get_taxi_demand(file_name)
    num_nodes = train_data.shape[1]

    #### normalization
    adj = calculate_laplacian(adj)
    train_data = scaler.fit_transform(train_data)
    test_data = scaler.fit_transform(test_data)

    X_train, y_train1, y_train2, X_test, y_test1, y_test2 = preprocess_data(
        train_data, test_data, args.lag)

    print('trainX', X_train.shape)
    print('trainY', y_train1.shape)
    print('testX', X_test.shape)
    print('y_test1', y_test1.shape)
    print('y_test2', y_test2.shape)

    start = time.clock()
    y_predict = train_model(X_train, y_train2, X_test, y_test2)
    data = pd.DataFrame(y_predict)