Esempio n. 1
0
        num_timesteps_output=num_timesteps_output,
        means=means,
        stds=stds)
    test_input, test_target, test_mean_t, test_std_t = generate_dataset(
        test_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output,
        means=means,
        stds=stds)

    A_wave = get_normalized_adj(A)
    A_wave = torch.from_numpy(A_wave)
    # A_wave = torch.memory_format(padding)
    A_wave = A_wave.to(device=args.device)

    net = STGCN(A_wave.shape[0], training_input.shape[3], num_timesteps_input,
                num_timesteps_output).to(device=args.device)

    optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
    loss_criterion = nn.MSELoss()

    training_losses = []
    validation_losses = []
    validation_maes = []
    for epoch in range(epochs):
        loss = train_epoch(training_input,
                           training_target,
                           batch_size=batch_size)
        training_losses.append(loss)

        # Run validation
        with torch.no_grad():
Esempio n. 2
0
print(
    f' num_pred:{num_pred}\t dataset:{dataset}\n net_name:{net_name}\t device:{device}'
)

x, y, e = utils.get_data(f"data/{dataset}/{dataset}.npz", num_his, num_pred)
x, y, e = x.to(device), y.to(device), e.to(device)
n = x.shape[2]
adj = utils.get_adj("data/{}/distance.csv".format(dataset), n).to(device)
# adj = None

if not os.path.exists(f'experiment/{net_name}'):
    os.mkdir(f'experiment/{net_name}')
net = AG_JNet(in_feature, dim_exp, layers_sm, layers_tm, n, num_his, num_pred,
              device).to(device)
if net_name == "STGCN":
    net = STGCN(n, in_feature, num_his, num_pred).to(device)

num_params = sum(param.numel() for param in net.parameters())
print('模型参数量:', num_params)
utils.init_net(net)

criterion = nn.MSELoss().to(device)
opt = Adam(net.parameters(), lr=lr)

num_sample = x.shape[0]
split_index = int(train_split * num_sample)
split_index1 = int(val_split * num_sample)
train_x, train_y, val_x, val_y, test_x, test_y, train_e, val_e, test_e, mean, std \
    = utils.normalization(x, y, e, split_index, split_index1)

# 必须打乱数据集,不然loss降不下来。
Esempio n. 3
0
# encoding utf-8
'''
@Author: william
@Description:
@time:2020/7/1 10:25
'''

import torch
from data_load import Data_load
from stgcn import STGCN
from utils import get_normalized_adj

if __name__ == '__main__':
    num_timesteps_input = 12
    num_timesteps_output = 9

    net = STGCN(25, 1, num_timesteps_input, num_timesteps_output).cuda()
    net = torch.load('./checkpoints/params_400.pkl')

    A, means, stds, training_input, training_target, val_input, val_target, test_input, test_target = Data_load(
        num_timesteps_input, num_timesteps_output)

    A_wave = get_normalized_adj(A)
    A_wave = torch.from_numpy(A_wave)
    val_input = val_input.cuda()

    out = net(A_wave, val_input)

    print('')
Esempio n. 4
0
if __name__ == '__main__':
    torch.manual_seed(7)
    A, X, means, stds = load_metr_la_data()
    split_line2 = int(X.shape[2] * 0.8)
    test_original_data = X[:, :, split_line2:]
    test_input, test_target = generate_dataset(
        test_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output)
    print("INFO: Test data load finish!")
    A_wave = get_normalized_adj(A)
    A_wave = torch.from_numpy(A_wave)

    A_wave = A_wave.to(device=args.device)

    net = STGCN(A_wave.shape[0], test_input.shape[3], num_timesteps_input,
                num_timesteps_output).to(device=args.device)

    net.load_state_dict(torch.load('parameter.pkl'))
    print("INFO: Load model finish!")

    loss_criterion = nn.MSELoss()

    permutation = torch.randperm(test_input.shape[0])
    epoch_test_losses = []
    epoch_test_mae = []
    for i in range(0, test_input.shape[0], batch_size):
        net.eval()
        indices = permutation[i:i + batch_size]
        X_batch, y_batch = test_input[indices], test_target[indices]
        X_batch = X_batch.to(device=args.device)
        y_batch = y_batch.to(device=args.device)
Esempio n. 5
0
    train_original_data,
    num_timesteps_input=num_timesteps_input,
    num_timesteps_output=num_timesteps_output)
val_input, val_target = generate_dataset(
    val_original_data,
    num_timesteps_input=num_timesteps_input,
    num_timesteps_output=num_timesteps_output)
test_input, test_target = generate_dataset(
    test_original_data,
    num_timesteps_input=num_timesteps_input,
    num_timesteps_output=num_timesteps_output)

A_wave = get_normalized_adj(A)
A_wave = torch.from_numpy(A_wave)

net = STGCN(A_wave.shape[0], training_input.shape[3], num_timesteps_input,
            num_timesteps_output)

training_losses = []
validation_losses = []
validation_maes = []

loss_criterion = nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
for epoch in range(epochs + 1):
    if epoch > 50:
        optimizer = torch.optim.Adam(net.parameters(), lr=lr / 10)

    loss = train_epoch(training_input, training_target, batch_size=batch_size)
    training_losses.append(loss)

    # Run validation
Esempio n. 6
0
    # val_input, val_target, val_mean_t, val_std_t = generate_dataset(val_original_data,
    #                                          num_timesteps_input=num_timesteps_input,
    #                                          num_timesteps_output=num_timesteps_output,
    #                                          means=val_mean,
    #                                          stds=val_std)
    # test_input, test_target, test_mean_t, test_std_t = generate_dataset(test_original_data,
    #                                            num_timesteps_input=num_timesteps_input,
    #                                            num_timesteps_output=num_timesteps_output,
    #                                            means=test_mean,
    #                                            stds=test_std)
    A = pd.read_excel('AdjacencyMatrix.xls').values.astype(np.float32)
    A_wave = get_normalized_adj(A)
    A_wave = torch.from_numpy(A_wave)
    A_wave = A_wave.to(device=args.device)

    net = STGCN(A_wave.shape[0], feature_num, num_timesteps_input,
                num_timesteps_output).to(device=args.device)

    optimizer = torch.optim.Adam(net.parameters(), lr=1e-5)
    loss_criterion = nn.MSELoss()
    #
    training_losses = []
    epoch_training_losses = []
    validation_losses = []
    validation_maes = []
    for epoch in range(epochs):
        for batch_dix, sample in enumerate(loader):
            #print(f'sample:[train_input:{sample["train_input"].shape} train_output:{sample["train_output"].shape} mean:{sample["mean"].shape}]'
            #     f'std:{sample["std"].shape}')
            logger.info(f'idx: {batch_dix}')
            training_input = torch.from_numpy(np.array(
                sample["train_input"])).to(device=args.device)
Esempio n. 7
0
File: main.py Progetto: RMPLUV/TAGCN
    torch.manual_seed(7)  #设置 (CPU) 生成随机数的种子,设置种子的用意是一旦固定种子,后面依次生成的随机数其实都是固定的。

    # A, max_value, training_input, training_target, val_input, val_target, test_input, test_target = Data_load(num_timesteps_input, num_timesteps_output)

    # A, means, stds, training_input, training_target, val_input, val_target, test_input, test_target, train_weather, Weather_val = Data_load(num_timesteps_input, num_timesteps_output)
    A, means, stds, training_input, training_target, val_input, val_target, test_input, test_target = \
        Data_load(num_timesteps_input, num_timesteps_output)
    # A, max_X, min_X, training_input, training_target, val_input, val_target, test_input, test_target = Data_load(num_timesteps_input, num_timesteps_output)

    torch.cuda.empty_cache()  # free cuda memory
    A_wave = get_normalized_adj(A)
    A_wave = torch.from_numpy(A_wave)
    if torch.cuda.is_available():
        A_wave = A_wave.cuda()

    net = STGCN(A_wave.shape[0], training_input.shape[2], num_timesteps_input,
                num_timesteps_output)
    print('number of parameters: ' +
          str(sum(param.numel() for param in net.parameters())))

    # 数据写入cuda,便于后续加速
    if torch.cuda.is_available():
        net.cuda()  # . cuda()会分配到显存里(如果gpu可用)
    optimizer = torch.optim.Adam(net.parameters(), lr=1e-4)
    loss_criterion = nn.MSELoss()
    # loss_criterion = nn.SmoothL1Loss()

    training_losses = []
    validation_losses = []
    validation_MAE = []
    validation_RMSE = []
    validation_MAPE = []
Esempio n. 8
0
File: main.py Progetto: tungk/STGCN
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output)
    val_input, val_target = generate_dataset(
        val_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output)
    # test_input, test_target = generate_dataset(test_original_data,
    #                                            num_timesteps_input=num_timesteps_input,
    #                                            num_timesteps_output=num_timesteps_output)
    print("INFO: Load data finish!")
    A_wave = get_normalized_adj(A)
    A_wave = torch.from_numpy(A_wave)

    A_wave = A_wave.to(device=args.device)

    net = STGCN(A_wave.shape[0], training_input.shape[3], num_timesteps_input,
                num_timesteps_output).to(device=args.device)

    optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
    loss_criterion = nn.MSELoss()

    training_losses = []
    validation_losses = []
    validation_maes = []
    for epoch in tqdm(range(epochs)):
        loss = train_epoch(training_input,
                           training_target,
                           batch_size=batch_size)
        training_losses.append(loss)
        torch.cuda.empty_cache()
        # Run validation
        permutation = torch.randperm(val_input.shape[0])