def Data_load(num_timesteps_input, num_timesteps_output): A, X, means, stds, X_val = load_metr_la_data() # split_line1 = int(X.shape[0] * 0.6) # split_line2 = int(X.shape[0] * 0.8) # train_original_data = X[:, :, :split_line1] train_original_data = X # val_original_data = X[:, :, split_line1:split_line2] val_original_data = X_val test_original_data = X #X[split_line1:, :, :] training_input, training_target = generate_dataset( train_original_data, num_timesteps_input=num_timesteps_input, num_timesteps_output=num_timesteps_output) val_input, val_target = generate_dataset( val_original_data, num_timesteps_input=num_timesteps_input, num_timesteps_output=num_timesteps_output) test_input, test_target = generate_dataset( test_original_data, num_timesteps_input=num_timesteps_input, num_timesteps_output=num_timesteps_output) return A, means, stds, training_input, training_target, val_input, val_target, test_input, test_target
def Data_load(num_timesteps_input, num_timesteps_output): A, X, means, stds, nodes = load_metr_la_data() # A, X, max_X, min_X = load_metr_la_data() # A, X, max_value, X_val = load_metr_la_data() split_line1 = int(X.shape[2] * 0.6) split_line2 = int(X.shape[2] * 0.8) train_original_data = X[:, :, :split_line1] val_original_data = X[:, :, split_line1:split_line2] # val_original_data = X_val test_original_data = X[:, :, split_line2:] training_input, training_target = generate_dataset( train_original_data, num_timesteps_input=num_timesteps_input, num_timesteps_output=num_timesteps_output) val_input, val_target = generate_dataset( val_original_data, num_timesteps_input=num_timesteps_input, num_timesteps_output=num_timesteps_output) test_input, test_target = generate_dataset( test_original_data, num_timesteps_input=num_timesteps_input, num_timesteps_output=num_timesteps_output) return A, means, stds, training_input, training_target, val_input, val_target, test_input, test_target, nodes
X_batch = X_batch.to(device=args.device) y_batch = y_batch.to(device=args.device) out = net(A_wave, X_batch) loss = loss_criterion(out, y_batch) print(f'{i}/{training_input.shape[0]}: step loss:{loss.item()}') loss.backward() optimizer.step() epoch_training_losses.append(loss.detach().cpu().numpy()) return sum(epoch_training_losses) / len(epoch_training_losses) if __name__ == '__main__': torch.manual_seed(7) print(args.device) A, X, means, stds = load_metr_la_data() print(A.shape) split_line1 = int(len(X) * 0.6) split_line2 = int(len(X) * 0.8) # train_original_data = X[:split_line1] val_original_data = X[split_line1:split_line2] # val_mean, val_std = means[split_line1:split_line2], stds[split_line1:split_line2] test_original_data = X[split_line2:] # test_mean, test_std = means[split_line2:], stds[split_line2:] training_input, training_target, train_mean_t, train_std_t = generate_dataset( train_original_data, num_timesteps_input=num_timesteps_input,