Ejemplo n.º 1
0
# tf.add_to_collection(name='graph_kernel', value=tf.cast(tf.constant(Lk), tf.float32))
#
# # Data Preprocessing
# data_file = f'PeMSD7_V_{n}.csv'
# n_train, n_val, n_test = 34, 5, 5
# PeMS = data_gen(pjoin('./dataset', data_file), (n_train, n_val, n_test), n, n_his + n_pred)
# print(f'>> Loading dataset with Mean: {PeMS.mean:.2f}, STD: {PeMS.std:.2f}')

# Load wighted adjacency matrix W
if args.graph == 'default':
    W = weight_matrix(pjoin('./dataset', f'W_228.csv'))
else:
    # load customized graph weight matrix
    W = weight_matrix(pjoin('./dataset', args.graph))

# Calculate graph kernel
L = scaled_laplacian(W)
# Alternative approximation method: 1st approx - first_approx(W, n).
Lk = cheb_poly_approx(L, Ks, n)
tf.add_to_collection(name='graph_kernel', value=tf.cast(tf.constant(Lk), tf.float32))

# Data Preprocessing
data_file = f'V_228.csv'
n_train, n_val, n_test = 34, 5, 5
PeMS = data_gen(pjoin('./dataset', data_file), (n_train, n_val, n_test), n, n_his + n_pred)
print(f'>> Loading dataset with Mean: {PeMS.mean:.2f}, STD: {PeMS.std:.2f}')

if __name__ == '__main__':
    model_train(PeMS, blocks, args)
    model_test(PeMS, PeMS.get_len('test'), n_his, n_pred, args.inf_mode)
Ejemplo n.º 2
0
data_file = 'traffic_data.csv'
n_train, n_val, n_test = 3, 3, 3  # 61 in total
step_train, step_val, step_test = 8, 8, 8

PeMS = data_gen(pjoin('./qtraffic', data_file), (n_train, n_val, n_test),
                (step_train, step_val, step_test),
                n,
                n_his + n_pred,
                day_slot=96)
print(f'>> Loading dataset with Mean: {PeMS.mean:.2f}, STD: {PeMS.std:.2f}')
train_shape = PeMS.get_data('train').shape
val_shape = PeMS.get_data('val').shape
test_shape = PeMS.get_data('test').shape
print(f'datashape as: train:{train_shape},valid:{val_shape},test:{test_shape}')

subset = read_pickle('qtraffic/road_subset.pk')
W = read_pickle('qtraffic/W.pk')
print(f'W.shape={W.shape}')

# Calculate graph kernel
L = scaled_laplacian(W)
# Alternative approximation method: 1st approx - first_approx(W, n).
Lk = cheb_poly_approx(L, Ks, n)
tf.add_to_collection(name='graph_kernel',
                     value=tf.cast(tf.constant(Lk), tf.float32))

if __name__ == '__main__':
    model_train(PeMS, blocks, args, load=False)
    model_test(PeMS, 100, n_his, n_pred, args.inf_mode)
    # os.system("shutdown -s -t 0");
    pass
Ejemplo n.º 3
0
print(f'>> Loading dataset with Mean: {data.mean:.2f}, STD: {data.std:.2f}')

n, n_his, n_pred = args.n_route, args.n_his, args.n_pred
Ks, Kt = args.ks, args.kt
# blocks: settings of channel size in st_conv_blocks / bottleneck design
#blocks = [[2, 32, 64], [64, 32, 128]]
blocks = [[2, 32, 64]]

# Load wighted adjacency matrix W
if args.trained_adj_mx:
    L = tf.get_variable('weight_matrix', shape=(n, n), dtype=tf.float32)
    Lk = cheb_poly_approx_tf(L, Ks, n)
    #W = weight_matrix(pjoin('./dataset', f'PeMSD7_W_{n}.csv'))
    tf.add_to_collection(name='graph_kernel', value=Lk)
else:
    # load customized graph weight matrix
    #W = weight_matrix(pjoin('./dataset', args.graph))
    w = np.load(data_folder + 'w.npy')
    #w = np.array(w, dtype=np.float32)
    W = get_rescaled_W(w, delta=args.delta, epsilon=args.epsilon)
    # Calculate graph kernel
    L = scaled_laplacian(W)
    # Alternative approximation method: 1st approx - first_approx(W, n).
    Lk = cheb_poly_approx(L, Ks, n)
    tf.add_to_collection(name='graph_kernel',
                         value=tf.cast(tf.constant(Lk), tf.float32))

if __name__ == '__main__':
    model_train(data, blocks, args)
    model_test(data, data.get_len('test'), n_his, n_pred, args.inf_mode)
raw_data_path = pjoin(
    data_path, 'train_val'
)  #the folder contain train and validation as the data used in the paper

indicies = utcPlus3
if args.city == 'Berlin':
    indicies = utcPlus2

traffic4cast_data = data_gen_traffic4cast(raw_data_path,
                                          process_data_dir,
                                          node_pos,
                                          args.seq_len,
                                          args.horizon,
                                          data_start=0,
                                          val_indices=indicies,
                                          train_ratios=0.8,
                                          val_ratios=0.1)
# Data Preprocessing
# data_file = f'PeMSD7_V_{n}.csv'
# n_train, n_val, n_test = 34, 5, 5
# PeMS = data_gen(pjoin('./dataset', data_file), (n_train, n_val, n_test), n, n_his + n_pred)
print(
    f'>> Loading dataset with Mean: {traffic4cast_data.mean:.2f}, STD: {traffic4cast_data.std:.2f}'
)

if __name__ == '__main__':
    model_train(traffic4cast_data, blocks, args, output_dim=3)
    model_test(traffic4cast_data, traffic4cast_data.get_len('test'),
               args.seq_len, args.horizon, args.inf_mode)