Example #1
0
def main(args):
    data_path = os.path.join(args.dataset_path, args.dataset)
    train_data = TSDataset(data_path + '-train.csv',
                           args.windows, args.horizon)
    torch.save(train_data.scaler, 'scaler.pt')
    val_data = TSDataset(data_path + '-val.csv', args.windows,
                         args.horizon, train_data.scaler)
    # test_data = TSDataset(data_path + '-test.csv', args.windows, args.horizon)
    train_loader = DataLoader(train_data, args.batch_size, shuffle=True)
    val_loader = DataLoader(val_data, args.batch_size, shuffle=True)

    
    D = train_data[0][0].shape[-1]

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    net = DSANet(D, args.windows, args.horizon,
                 args.n_global, args.n_local, args.n_local_filter,
                 args.n_global_head, args.n_global_hidden, args.n_global_stack,
                 args.n_local_head, args.n_local_hidden, args.n_local_stack,
                 args.dropout)
    net = net.to(device)
    loss_fn = torch.nn.MSELoss()
    optimizer = torch.optim.Adam(net.parameters(), lr = args.lr)

    for e in range(1, args.epochs):
        # train one epochs
        train_loss = 0.0
        for index, (X, y) in enumerate(train_loader):
            optimizer.zero_grad()

            yhat = net(X.type(torch.float32).to(device))
            loss = loss_fn(yhat, y.type(torch.float32).to(device))
            train_loss += loss.item()
            loss.backward()
            optimizer.step()
        val_loss = 0.0
        with torch.no_grad():
            for (X, y) in val_loader:
                yhat = net(X.type(torch.float32).to(device))
                loss = loss_fn(yhat, y.type(torch.float32).to(device))
                val_loss += loss.item()
        train_loss /= len(train_loader)
        val_loss /= len(val_loader)
        print('Epoch %d: train loss is %.2f, val loss is %.2f' % (e, train_loss, val_loss))
    
        torch.save(net.state_dict(), 'net-%d-%.2f.pt' % (e, val_loss))
Example #2
0
def main(args):
    data_path = os.path.join(args.dataset_path, args.dataset)
    scaler = torch.load(args.scaler)
    test_data = TSDataset(data_path + '-test.csv', args.windows, args.horizon,
                          scaler)
    test_loader = DataLoader(test_data, args.batch_size)
    D = test_data[0][0].shape[-1]

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    net = DSANet(D, args.windows, args.horizon, args.n_global, args.n_local,
                 args.n_local_filter, args.n_global_head, args.n_global_hidden,
                 args.n_global_stack, args.n_local_head, args.n_local_hidden,
                 args.n_local_stack, args.dropout)
    loss_fns = []
    for metric in args.metrics:
        if metric == 'RMSE':
            loss_fns.append(RMSE)
        elif metric == 'MSE':
            loss_fns.append(MSE)
        elif metric == 'MAE':
            loss_fns.append(MAPE)
        elif metric == 'RRSE':
            loss_fns.append(RRSE)
        elif metric == 'MAPE':
            loss_fns.append(MAPE)
        else:
            loss_fns.append(lambda yhat, y: np.nan)

    net.load_state_dict(torch.load(args.model))
    net = net.to(device)
    test_losses = [0.0 for i in range(len(loss_fns))]

    with torch.no_grad():
        for (X, y) in test_loader:
            yhat = net(X.type(torch.float32).to(device)).to('cpu').numpy()
            y = y.to('cpu').numpy()
            for i, loss_fn in enumerate(loss_fns):
                loss = loss_fn(yhat, y)
                test_losses[i] += loss
    for metric, loss in zip(args.metrics, test_losses):
        print('%s: %.2f' % (metric, np.mean(loss)))