Esempio n. 1
0
def test_predict2():
    from model.astgcn import ASTGCN
    from model.model_config import get_backbones
    import mxnet as mx
    ctx = mx.cpu()
    all_backbones = get_backbones('configurations/PEMS08.conf',
                                  'data/PEMS08/distance.csv', ctx)

    net = ASTGCN(12, all_backbones)
    net.initialize(ctx=ctx)
    test_w = nd.random_uniform(shape=(8, 170, 3, 12), ctx=ctx)
    test_d = nd.random_uniform(shape=(8, 170, 3, 12), ctx=ctx)
    test_r = nd.random_uniform(shape=(8, 170, 3, 36), ctx=ctx)
    output = net([test_w, test_d, test_r])
    assert output.shape == (8, 170, 12)
    assert type(output.mean().asscalar()) == np.float32
Esempio n. 2
0
def test_predict2():
    from model.astgcn import ASTGCN
    from model.model_config import get_backbones
    import torch
    device = torch.device('cpu')
    all_backbones = get_backbones('configurations/PEMS08.conf',
                                  'data/PEMS08/distance.csv', device)

    net = ASTGCN(12, all_backbones, 170, 3, [[12, 12], [12, 12], [36, 12]],
                 device)

    test_w = torch.randn(8, 170, 3, 12).to(device)
    test_d = torch.randn(8, 170, 3, 12).to(device)
    test_r = torch.randn(8, 170, 3, 36).to(device)
    output = net([test_w, test_d, test_r])
    assert output.shape == (8, 170, 12)
    assert type(output.detach().numpy().mean()) == np.float32
Esempio n. 3
0
    # save Z-score mean and std
    transformer_data = {}
    for type_ in ['week', 'day', 'recent']:
        transformer = all_data['transformer'][type_]
        transformer_data[type_ + '_mean'] = transformer.mean_
        transformer_data[type_ + '_std'] = transformer.scale_
    np.savez_compressed(
        os.path.join(params_path, 'transformer_data'),
        **transformer_data
    )

    # loss function MSE
    loss_function = gluon.loss.L2Loss()

    # get model's structure
    all_backbones = get_backbones(args.config, adj_filename, ctx)

    net = model(num_for_predict, all_backbones)
    net.initialize(ctx = ctx)
    for val_w, val_d, val_r, val_t in val_loader:
        net([val_w, val_d, val_r])
        break
    net.initialize(ctx = ctx, init = MyInit(), force_reinit = True)

    # initialize a trainer to train model
    trainer = gluon.Trainer(net.collect_params(), optimizer, {'learning_rate': learning_rate})

    # initialize a SummaryWriter to write information into logs dir
    sw = SummaryWriter(logdir = params_path, flush_secs = 5)

    # compute validation loss before training
Esempio n. 4
0
    # save Z-score mean and std
    # stats_data = {}
    # for type_ in ['week', 'day', 'recent']:
    #     stats = all_data['stats'][type_]
    #     stats_data[type_ + '_mean'] = stats['mean']
    #     stats_data[type_ + '_std'] = stats['std']
    #
    # np.savez_compressed(
    #     os.path.join(params_path, 'stats_data'),
    #     **stats_data
    # )

    loss_function = torch.nn.MSELoss()

    all_backbones = get_backbones(args.config, adj_filename, device)
    # print(all_backbones[0][0]['cheb_polynomials'])

    num_of_features = 3
    num_of_timesteps = [[points_per_hour * num_of_weeks, points_per_hour],
                        [points_per_hour * num_of_days, points_per_hour],
                        [points_per_hour * num_of_hours, points_per_hour]]
    net = model(num_for_predict, all_backbones, num_of_vertices,
                num_of_features, num_of_timesteps, device)  #建立神经网络

    net = net.to(device)
    # it is the same as net.to(device)
    # i.e., to() for module is in place, which is different from tensor:to()在张量和模中的位置不同

    optimizer = optim.Adam(
        net.parameters(),
Esempio n. 5
0
# -*- coding:utf-8 -*-
import mxnet as mx

from model.model_config import get_backbones
from mxnet import nd
from model.astgcn import ASTGCN
ctx = mx.cpu()
all_backbones = get_backbones('configurations/PEMS04.conf',
                              'data/PEMS04/distance.csv', ctx)

net = ASTGCN(12, all_backbones)
net.initialize(ctx=mx.cpu())
test_w = nd.random_uniform(shape=(16, 307, 3, 12))
test_d = nd.random_uniform(shape=(16, 307, 3, 12))
test_r = nd.random_uniform(shape=(16, 307, 3, 36))
print(net([test_w, test_d, test_r]).shape)