示例#1
0
# tf.add_to_collection(name='graph_kernel', value=tf.cast(tf.constant(Lk), tf.float32))
#
# # Data Preprocessing
# data_file = f'PeMSD7_V_{n}.csv'
# n_train, n_val, n_test = 34, 5, 5
# PeMS = data_gen(pjoin('./dataset', data_file), (n_train, n_val, n_test), n, n_his + n_pred)
# print(f'>> Loading dataset with Mean: {PeMS.mean:.2f}, STD: {PeMS.std:.2f}')

# Load wighted adjacency matrix W
if args.graph == 'default':
    W = weight_matrix(pjoin('./dataset', f'W_228.csv'))
else:
    # load customized graph weight matrix
    W = weight_matrix(pjoin('./dataset', args.graph))

# Calculate graph kernel
L = scaled_laplacian(W)
# Alternative approximation method: 1st approx - first_approx(W, n).
Lk = cheb_poly_approx(L, Ks, n)
tf.add_to_collection(name='graph_kernel', value=tf.cast(tf.constant(Lk), tf.float32))

# Data Preprocessing
data_file = f'V_228.csv'
n_train, n_val, n_test = 34, 5, 5
PeMS = data_gen(pjoin('./dataset', data_file), (n_train, n_val, n_test), n, n_his + n_pred)
print(f'>> Loading dataset with Mean: {PeMS.mean:.2f}, STD: {PeMS.std:.2f}')

if __name__ == '__main__':
    model_train(PeMS, blocks, args)
    model_test(PeMS, PeMS.get_len('test'), n_his, n_pred, args.inf_mode)
示例#2
0
文件: main.py 项目: zzz2010/Contrib
def main(args):
    """main"""
    # PeMS = data_gen_mydata(args.input_file, args.label_file, args.n_route, args.n_his,
    # args.n_pred, (args.n_val, args.n_test))
    PeMS = data_gen_custom(args.input_file, args.label_file, args.city_file,
                           args.n_route, args.n_his, args.n_pred,
                           (args.n_val, args.n_test))

    log.info(PeMS.get_stats())
    log.info(PeMS.get_len('train'))

    gf = GraphFactory(args)

    place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
    train_program = fluid.Program()
    startup_program = fluid.Program()

    with fluid.program_guard(train_program, startup_program):
        gw = pgl.graph_wrapper.GraphWrapper("gw",
                                            place,
                                            node_feat=[('norm', [None, 1],
                                                        "float32")],
                                            edge_feat=[('weights', [None, 1],
                                                        "float32")])

        model = STGCNModel(args, gw)
        train_loss, y_pred = model.forward()

    infer_program = train_program.clone(for_test=True)

    with fluid.program_guard(train_program, startup_program):
        epoch_step = int(PeMS.get_len('train') / args.batch_size) + 1
        lr = fl.exponential_decay(learning_rate=args.lr,
                                  decay_steps=5 * epoch_step,
                                  decay_rate=0.7,
                                  staircase=True)
        if args.opt == 'RMSProp':
            train_op = fluid.optimizer.RMSPropOptimizer(lr).minimize(
                train_loss)
        elif args.opt == 'ADAM':
            train_op = fluid.optimizer.Adam(lr).minimize(train_loss)

    exe = fluid.Executor(place)
    exe.run(startup_program)

    if args.inf_mode == 'sep':
        # for inference mode 'sep', the type of step index is int.
        step_idx = args.n_pred - 1
        tmp_idx = [step_idx]
        min_val = min_va_val = np.array([4e1, 1e5, 1e5])
    elif args.inf_mode == 'merge':
        # for inference mode 'merge', the type of step index is np.ndarray.
        step_idx = tmp_idx = np.arange(3, args.n_pred + 1, 3) - 1
        min_val = min_va_val = np.array([4e1, 1e5, 1e5]) * len(step_idx)
    else:
        raise ValueError(f'ERROR: test mode "{inf_mode}" is not defined.')

    step = 0
    for epoch in range(1, args.epochs + 1):
        for idx, x_batch in enumerate(
                gen_batch(PeMS.get_data('train'),
                          args.batch_size,
                          dynamic_batch=True,
                          shuffle=True)):

            x = np.array(x_batch[:, 0:args.n_his, :, :], dtype=np.float32)
            graph = gf.build_graph(x)
            feed = gw.to_feed(graph)
            feed['input'] = np.array(x_batch[:, 0:args.n_his + 1, :, :],
                                     dtype=np.float32)
            b_loss, b_lr = exe.run(train_program,
                                   feed=feed,
                                   fetch_list=[train_loss, lr])

            if idx % 5 == 0:
                log.info("epoch %d | step %d | lr %.6f | loss %.6f" %
                         (epoch, idx, b_lr[0], b_loss[0]))

        min_va_val, min_val = \
                model_inference(exe, gw, gf, infer_program, y_pred, PeMS, args, \
                                step_idx, min_va_val, min_val)

        for ix in tmp_idx:
            va, te = min_va_val[ix - 2:ix + 1], min_val[ix - 2:ix + 1]
            print(f'Time Step {ix + 1}: '
                  f'MAPE {va[0]:7.3%}, {te[0]:7.3%}; '
                  f'MAE  {va[1]:4.3f}, {te[1]:4.3f}; '
                  f'RMSE {va[2]:6.3f}, {te[2]:6.3f}.')

        if epoch % 5 == 0:
            model_test(exe, gw, gf, infer_program, y_pred, PeMS, args)
示例#3
0
print(f'>> Loading dataset with Mean: {data.mean:.2f}, STD: {data.std:.2f}')

n, n_his, n_pred = args.n_route, args.n_his, args.n_pred
Ks, Kt = args.ks, args.kt
# blocks: settings of channel size in st_conv_blocks / bottleneck design
#blocks = [[2, 32, 64], [64, 32, 128]]
blocks = [[2, 32, 64]]

# Load wighted adjacency matrix W
if args.trained_adj_mx:
    L = tf.get_variable('weight_matrix', shape=(n, n), dtype=tf.float32)
    Lk = cheb_poly_approx_tf(L, Ks, n)
    #W = weight_matrix(pjoin('./dataset', f'PeMSD7_W_{n}.csv'))
    tf.add_to_collection(name='graph_kernel', value=Lk)
else:
    # load customized graph weight matrix
    #W = weight_matrix(pjoin('./dataset', args.graph))
    w = np.load(data_folder + 'w.npy')
    #w = np.array(w, dtype=np.float32)
    W = get_rescaled_W(w, delta=args.delta, epsilon=args.epsilon)
    # Calculate graph kernel
    L = scaled_laplacian(W)
    # Alternative approximation method: 1st approx - first_approx(W, n).
    Lk = cheb_poly_approx(L, Ks, n)
    tf.add_to_collection(name='graph_kernel',
                         value=tf.cast(tf.constant(Lk), tf.float32))

if __name__ == '__main__':
    model_train(data, blocks, args)
    model_test(data, data.get_len('test'), n_his, n_pred, args.inf_mode)
示例#4
0
data_file = 'traffic_data.csv'
n_train, n_val, n_test = 3, 3, 3  # 61 in total
step_train, step_val, step_test = 8, 8, 8

PeMS = data_gen(pjoin('./qtraffic', data_file), (n_train, n_val, n_test),
                (step_train, step_val, step_test),
                n,
                n_his + n_pred,
                day_slot=96)
print(f'>> Loading dataset with Mean: {PeMS.mean:.2f}, STD: {PeMS.std:.2f}')
train_shape = PeMS.get_data('train').shape
val_shape = PeMS.get_data('val').shape
test_shape = PeMS.get_data('test').shape
print(f'datashape as: train:{train_shape},valid:{val_shape},test:{test_shape}')

subset = read_pickle('qtraffic/road_subset.pk')
W = read_pickle('qtraffic/W.pk')
print(f'W.shape={W.shape}')

# Calculate graph kernel
L = scaled_laplacian(W)
# Alternative approximation method: 1st approx - first_approx(W, n).
Lk = cheb_poly_approx(L, Ks, n)
tf.add_to_collection(name='graph_kernel',
                     value=tf.cast(tf.constant(Lk), tf.float32))

if __name__ == '__main__':
    model_train(PeMS, blocks, args, load=False)
    model_test(PeMS, 100, n_his, n_pred, args.inf_mode)
    # os.system("shutdown -s -t 0");
    pass
示例#5
0
# Load wighted adjacency matrix W
if args.graph == 'default':
    W = weight_matrix(pjoin('./dataset', f'w.csv'))
else:
    # load customized graph weight matrix
    W = weight_matrix(pjoin('./dataset', args.graph))

# Calculate graph kernel
L = scaled_laplacian(W)
# Alternative approximation method: 1st approx - first_approx(W, n).
Lk = cheb_poly_approx(L, Ks, n)
tf.add_to_collection(name='graph_kernel',
                     value=tf.cast(tf.constant(Lk), tf.float32))

# Data Preprocessing
data_file = f'v.csv'
n_train, n_val, n_test = 34, 5, 5
TaiAn = data_gen(pjoin('./dataset', data_file), (n_train, n_val, n_test), n,
                 n_his + n_pred)
print(f'>> Loading dataset with Mean: {TaiAn.mean:.2f}, STD: {TaiAn.std:.2f}')

if __name__ == '__main__':
    # model_train(TaiAn, blocks, args,path='./output60min/')
    model_test(TaiAn,
               TaiAn.get_len('test'),
               n_his,
               n_pred,
               args.inf_mode,
               path='./output60min/')
raw_data_path = pjoin(
    data_path, 'train_val'
)  #the folder contain train and validation as the data used in the paper

indicies = utcPlus3
if args.city == 'Berlin':
    indicies = utcPlus2

traffic4cast_data = data_gen_traffic4cast(raw_data_path,
                                          process_data_dir,
                                          node_pos,
                                          args.seq_len,
                                          args.horizon,
                                          data_start=0,
                                          val_indices=indicies,
                                          train_ratios=0.8,
                                          val_ratios=0.1)
# Data Preprocessing
# data_file = f'PeMSD7_V_{n}.csv'
# n_train, n_val, n_test = 34, 5, 5
# PeMS = data_gen(pjoin('./dataset', data_file), (n_train, n_val, n_test), n, n_his + n_pred)
print(
    f'>> Loading dataset with Mean: {traffic4cast_data.mean:.2f}, STD: {traffic4cast_data.std:.2f}'
)

if __name__ == '__main__':
    model_train(traffic4cast_data, blocks, args, output_dim=3)
    model_test(traffic4cast_data, traffic4cast_data.get_len('test'),
               args.seq_len, args.horizon, args.inf_mode)