def main():
    device = torch.device(args.device)

    _, _, adj_mx = util.load_adj(args.adjdata, args.adjtype)
    supports = [torch.tensor(i).to(device) for i in adj_mx]
    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    model =  gwnet(device, args.num_nodes, args.dropout, supports=supports, gcn_bool=args.gcn_bool, addaptadj=args.addaptadj, aptinit=adjinit)
    model.to(device)
    model.load_state_dict(torch.load(args.checkpoint))
    model.eval()


    print('model load successfully')

    dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size)
    scaler = dataloader['scaler']
    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1,3)[:,0,:,:]

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testx = testx.transpose(1,3)
        with torch.no_grad():
            preds = model(testx).transpose(1,3)
        outputs.append(preds.squeeze())

    yhat = torch.cat(outputs,dim=0)
    yhat = yhat[:realy.size(0),...]

    
    amae = []
    amape = []
    armse = []
    for i in range(12):
        pred = scaler.inverse_transform(yhat[:,:,i])
        real = realy[:,:,i]
        metrics = util.metric(pred,real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i+1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae),np.mean(amape),np.mean(armse)))

    idx_list = list(range(1, 13)) + ['average']
    amae.append(np.mean(amae))
    amape.append(np.mean(amape))
    armse.append(np.mean(armse))
    df = pd.DataFrame(list(zip(idx_list, amae, amape, armse)), columns=['horizon', 'MAE', 'MAPE', 'RMSE'])

    if args.savehorizon == 'True':
        excel_dir = 'result.xlsx'
        sheet_name= args.sheetname

        if (os.path.isfile(excel_dir)): # append a new sheet
            book = load_workbook(excel_dir)
            with pd.ExcelWriter(excel_dir, engine='openpyxl') as writer:  
                writer.book = book
                writer.sheets = dict((ws.title, ws) for ws in book.worksheets)    
                df.to_excel(writer, sheet_name=sheet_name, index=False) 
                writer.save()  
                writer.close()
        else:
            df.to_excel(excel_dir, sheet_name=sheet_name, index=False) 


    if args.plotheatmap == "True":
        adp = F.softmax(F.relu(torch.mm(model.nodevec1, model.nodevec2)), dim=1)
        device = torch.device('cpu')
        adp.to(device)
        adp = adp.cpu().detach().numpy()
        adp = adp*(1/np.max(adp))
        df = pd.DataFrame(adp)
        sns.heatmap(df, cmap="RdYlBu")
        plt.savefig("./emb"+ '.pdf')

    # print(realy.shape) #torch.Size([6850, 207, 12]) (:, #node, window)

    y12 = realy[:,99,11].cpu().detach().numpy()
    yhat12 = scaler.inverse_transform(yhat[:,99,11]).cpu().detach().numpy()

    y3 = realy[:,99,2].cpu().detach().numpy()
    yhat3 = scaler.inverse_transform(yhat[:,99,2]).cpu().detach().numpy()

    df2 = pd.DataFrame({'real12':y12,'pred12':yhat12, 'real3': y3, 'pred3':yhat3})
    df2.to_csv('./wave.csv',index=False)
Esempio n. 2
0
def main():
    #set seed
    #torch.manual_seed(args.seed)
    #np.random.seed(args.seed)
    #load data
    device = torch.device(args.device)
    sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata,args.adjtype)
    dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size)
    scaler = dataloader['scaler']
    supports = [torch.tensor(i).to(device) for i in adj_mx]

    print(args)

    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None



    engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout,
                         args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj,
                         adjinit)


    print("start training...",flush=True)
    his_loss =[]
    val_time = []
    train_time = []
    for i in range(1,args.epochs+1):
        #if i % 10 == 0:
            #lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10)))
            #for g in engine.optimizer.param_groups:
                #g['lr'] = lr
        train_loss = []
        train_mape = []
        train_rmse = []
        t1 = time.time()
        dataloader['train_loader'].shuffle()
        for iter, (x, y) in enumerate(dataloader['train_loader'].get_iterator()):
            trainx = torch.Tensor(x).to(device)
            trainx= trainx.transpose(1, 3)
            trainy = torch.Tensor(y).to(device)
            trainy = trainy.transpose(1, 3)
            metrics = engine.train(trainx, trainy[:,0,:,:])
            train_loss.append(metrics[0])
            train_mape.append(metrics[1])
            train_rmse.append(metrics[2])
            if iter % args.print_every == 0 :
                log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
                print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1]),flush=True)
        t2 = time.time()
        train_time.append(t2-t1)
        #validation
        valid_loss = []
        valid_mape = []
        valid_rmse = []


        s1 = time.time()
        for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
            testx = torch.Tensor(x).to(device)
            testx = testx.transpose(1, 3)
            testy = torch.Tensor(y).to(device)
            testy = testy.transpose(1, 3)
            metrics = engine.eval(testx, testy[:,0,:,:])
            valid_loss.append(metrics[0])
            valid_mape.append(metrics[1])
            valid_rmse.append(metrics[2])
        s2 = time.time()
        log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
        print(log.format(i,(s2-s1)))
        val_time.append(s2-s1)
        mtrain_loss = np.mean(train_loss)
        mtrain_mape = np.mean(train_mape)
        mtrain_rmse = np.mean(train_rmse)

        mvalid_loss = np.mean(valid_loss)
        mvalid_mape = np.mean(valid_mape)
        mvalid_rmse = np.mean(valid_rmse)
        his_loss.append(mvalid_loss)

        log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
        print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss, mvalid_mape, mvalid_rmse, (t2 - t1)),flush=True)
        torch.save(engine.model.state_dict(), args.save+"_epoch_"+str(i)+"_"+str(round(mvalid_loss,2))+".pth")
    print("Average Training Time: {:.4f} secs/epoch".format(np.mean(train_time)))
    print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    #testing
    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(torch.load(args.save+"_epoch_"+str(bestid+1)+"_"+str(round(his_loss[bestid],2))+".pth"))


    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1,3)[:,0,:,:]

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testx = testx.transpose(1,3)
        with torch.no_grad():
            preds = engine.model(testx).transpose(1,3)
        outputs.append(preds.squeeze())

    yhat = torch.cat(outputs,dim=0)
    yhat = yhat[:realy.size(0),...]


    print("Training finished")
    print("The valid loss on best model is", str(round(his_loss[bestid],4)))


    amae = []
    amape = []
    armse = []
    for i in range(12):
        pred = scaler.inverse_transform(yhat[:,:,i])
        real = realy[:,:,i]
        metrics = util.metric(pred,real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i+1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae),np.mean(amape),np.mean(armse)))
    torch.save(engine.model.state_dict(), args.save+"_exp"+str(args.expid)+"_best_"+str(round(his_loss[bestid],2))+".pth")
Esempio n. 3
0
def main():
    device = torch.device(args.device)

    _, _, adj_mx = util.load_adj(args.adjdata, args.adjtype)
    supports = [torch.tensor(i).to(device) for i in adj_mx]
    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    model = gwnet(device,
                  args.num_nodes,
                  args.dropout,
                  supports=supports,
                  gcn_bool=args.gcn_bool,
                  addaptadj=args.addaptadj,
                  aptinit=adjinit)
    model.to(device)
    model.load_state_dict(torch.load(args.checkpoint))
    model.eval()

    print('model load successfully')

    dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size,
                                   args.batch_size)
    scaler = dataloader['scaler']
    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1, 3)[:, 0, :, :]

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testx = testx.transpose(1, 3)
        with torch.no_grad():
            preds = model(testx).transpose(1, 3)
        outputs.append(preds.squeeze())

    yhat = torch.cat(outputs, dim=0)
    yhat = yhat[:realy.size(0), ...]

    amae = []
    amape = []
    armse = []
    for i in range(12):
        pred = scaler.inverse_transform(yhat[:, :, i])
        real = scaler.inverse_transform(realy[:, :, i])
        metrics = util.metric(pred, real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))

    if args.plotheatmap == "True":
        adp = F.softmax(F.relu(torch.mm(model.nodevec1, model.nodevec2)),
                        dim=1)
        device = torch.device('cpu')
        adp.to(device)
        adp = adp.cpu().detach().numpy()
        adp = adp * (1 / np.max(adp))
        df = pd.DataFrame(adp)
        sns.heatmap(df, cmap="RdYlBu")
        plt.savefig("./emb" + '.pdf')

    y12 = np.array(scaler.inverse_transform(realy[:, 99, 11]))
    yhat12 = np.array(scaler.inverse_transform(yhat[:, 99, 11]))

    y3 = np.array(scaler.inverse_transform(realy[:, 99, 2]))
    yhat3 = np.array(scaler.inverse_transform(yhat[:, 99, 2]))

    df2 = pd.DataFrame({
        'real12': y12,
        'pred12': yhat12,
        'real3': y3,
        'pred3': yhat3
    })
    df2.to_csv('./wave.csv', index=False)
Esempio n. 4
0
def main():
    # set seed
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    # load data
    device = torch.device(args.device)
    sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(
        args.adjdata, args.adjtype)
    # suffix = '_filtered_we'  # _filtered_we, _filtered_ew
    eR_seq_size = 24  # 24
    error_size = 6
    dataloader = util.load_dataset(args.data,
                                   args.batch_size,
                                   args.batch_size,
                                   args.batch_size,
                                   eRec=args.eRec,
                                   eR_seq_size=eR_seq_size,
                                   suffix=args.suffix)
    scaler = dataloader['scaler']

    if args.retrain:
        dl_train = util.load_dataset(args.data,
                                     args.batch_size,
                                     args.batch_size,
                                     args.batch_size,
                                     eRec=args.eRec,
                                     eR_seq_size=eR_seq_size,
                                     suffix=args.suffix_train)
        scaler = dl_train['scaler']

    blocks = int(dataloader[f'x_train{args.suffix}'].shape[-3] /
                 3)  # Every block reduce the input sequence size by 3.
    print(f'blocks = {blocks}')

    supports = [torch.tensor(i).to(device) for i in adj_mx]

    print(args)

    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    engine = trainer(scaler,
                     args.in_dim,
                     args.seq_length,
                     args.num_nodes,
                     args.nhid,
                     args.dropout,
                     args.learning_rate,
                     args.weight_decay,
                     device,
                     supports,
                     args.gcn_bool,
                     args.addaptadj,
                     adjinit,
                     blocks,
                     eRec=args.eRec,
                     retrain=args.retrain,
                     checkpoint=args.checkpoint,
                     error_size=error_size)

    if args.retrain:
        dataloader['val_loader'] = dataloader['train_loader']

    print("start training...", flush=True)
    his_loss = []
    val_time = []
    train_time = []
    for i in range(1, args.epochs + 1):
        #if i % 10 == 0:
        #lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10)))
        #for g in engine.optimizer.param_groups:
        #g['lr'] = lr
        train_loss = []
        train_mape = []
        train_rmse = []
        t1 = time.time()
        dataloader['train_loader'].shuffle()
        for iter, (x,
                   y) in enumerate(dataloader['train_loader'].get_iterator()):
            trainx = torch.Tensor(x).to(device)
            trainy = torch.Tensor(y).to(device)
            if args.eRec:
                trainx = trainx.transpose(0, 1)
                trainy = trainy.transpose(0, 1)
            trainx = trainx.transpose(-3, -1)
            trainy = trainy.transpose(-3, -1)
            # print(f'trainx.shape = {trainx.shape}')
            # print(f'trainy.shape = {trainy.shape}')
            # print(f'trainy.shape final = {trainy[:,0,:,:].shape}')
            if args.eRec:
                metrics = engine.train(trainx, trainy[:, :, 0, :, :])
            else:
                metrics = engine.train(trainx, trainy[:, 0, :, :])
            train_loss.append(metrics[0])
            train_mape.append(metrics[1])
            train_rmse.append(metrics[2])
            if iter % args.print_every == 0:
                log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
                print(log.format(iter, train_loss[-1], train_mape[-1],
                                 train_rmse[-1]),
                      flush=True)
        t2 = time.time()
        train_time.append(t2 - t1)
        #validation
        valid_loss = []
        valid_mape = []
        valid_rmse = []

        s1 = time.time()
        for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
            testx = torch.Tensor(x).to(device)
            testy = torch.Tensor(y).to(device)
            if args.eRec:
                testx = testx.transpose(0, 1)
                testy = testy.transpose(0, 1)
            testx = testx.transpose(-3, -1)
            testy = testy.transpose(-3, -1)
            if args.eRec:
                metrics = engine.eval(testx, testy[:, :, 0, :, :])
            else:
                metrics = engine.eval(testx, testy[:, 0, :, :])
            valid_loss.append(metrics[0])
            valid_mape.append(metrics[1])
            valid_rmse.append(metrics[2])
        s2 = time.time()
        log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
        print(log.format(i, (s2 - s1)))
        val_time.append(s2 - s1)
        mtrain_loss = np.mean(train_loss)
        mtrain_mape = np.mean(train_mape)
        mtrain_rmse = np.mean(train_rmse)

        mvalid_loss = np.mean(valid_loss)
        mvalid_mape = np.mean(valid_mape)
        mvalid_rmse = np.mean(valid_rmse)
        his_loss.append(mvalid_loss)

        log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
        print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss,
                         mvalid_mape, mvalid_rmse, (t2 - t1)),
              flush=True)
        torch.save(
            engine.model.state_dict(), args.save + "_epoch_" + str(i) + "_" +
            str(round(mvalid_loss, 2)) + ".pth")
    print("Average Training Time: {:.4f} secs/epoch".format(
        np.mean(train_time)))
    print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    #testing
    bestid = 82  # 24 hay que sumarle 1 para obtener el ID del modelo
    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(
        torch.load(args.save + "_epoch_" + str(bestid + 1) + "_" +
                   str(round(his_loss[bestid], 2)) + ".pth"))
    # engine.model.load_state_dict(torch.load(args.save + f"_id_25_2.6_best_model.pth"))
    # engine.model.load_state_dict(torch.load(args.save + f"_exp1_best_2.6.pth"))

    #torch.save(engine.model.state_dict(), args.save + f"_id_{bestid+1}_best_model.pth")
    print(f'best_id = {bestid+1}')

    outputs = []
    realy = torch.Tensor(dataloader[f'y_test{args.suffix}']).to(device)
    #print(f'realy: {realy.shape}')
    if args.eRec:
        realy = realy.transpose(0, 1)
        realy = realy.transpose(-3, -1)[-1, :, 0, :, :]
        #print(f'realy2: {realy.shape}')
    else:
        realy = realy.transpose(-3, -1)[:, 0, :, :]
        #print(f'realy2: {realy.shape}')
    criterion = nn.MSELoss(reduction='none')  # L2 Norm
    criterion2 = nn.L1Loss(reduction='none')
    loss_mse_list = []
    loss_mae_list = []

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testy = torch.Tensor(y).to(device)
        if args.eRec:
            testx = testx.transpose(0, 1)
            testy = testy.transpose(0, 1)
        testx = testx.transpose(-3, -1)
        testy = testy.transpose(-3, -1)
        with torch.no_grad():
            if args.eRec:
                preds = engine.model(testx, testy[:, :, 0:1, :, :],
                                     scaler).transpose(1, 3)
            else:
                preds = engine.model(testx).transpose(1, 3)

        #print(f'preds: {scaler.inverse_transform(torch.squeeze(preds.transpose(-3, -1))).shape}')
        #print(f'testy: {torch.squeeze(testy[:, 0:1, :, :].transpose(-3, -1)).shape}')
        if args.eRec:
            loss_mse = criterion(
                scaler.inverse_transform(torch.squeeze(preds.transpose(-3,
                                                                       -1))),
                torch.squeeze(testy[-1, :, 0:1, :, :].transpose(-3, -1)))
            loss_mae = criterion2(
                scaler.inverse_transform(torch.squeeze(preds.transpose(-3,
                                                                       -1))),
                torch.squeeze(testy[-1, :, 0:1, :, :].transpose(-3, -1)))
        else:
            loss_mse = criterion(
                scaler.inverse_transform(torch.squeeze(preds.transpose(-3,
                                                                       -1))),
                torch.squeeze(testy[:, 0:1, :, :].transpose(-3, -1)))
            loss_mae = criterion2(
                scaler.inverse_transform(torch.squeeze(preds.transpose(-3,
                                                                       -1))),
                torch.squeeze(testy[:, 0:1, :, :].transpose(-3, -1)))

        loss_mse_list.append(loss_mse)
        loss_mae_list.append(loss_mae)

        outputs.append(preds.squeeze())

    loss_mse_list.pop(-1)
    loss_mae_list.pop(-1)
    loss_mse = torch.cat(loss_mse_list, 0)
    loss_mae = torch.cat(loss_mae_list, 0)
    #loss_mse = torch.squeeze(loss_mse).cpu()
    #loss_mae = torch.squeeze(loss_mae).cpu()
    loss_mse = loss_mse.cpu()
    loss_mae = loss_mae.cpu()
    print(f'loss_mae: {loss_mae.shape}')
    print(f'loss_mse: {loss_mae.shape}')

    res_folder = 'results/'
    original_stdout = sys.stdout
    with open(res_folder + f'loss_evaluation.txt', 'w') as filehandle:
        sys.stdout = filehandle  # Change the standard output to the file we created.
        count_parameters(engine.model)
        # loss_mae.shape --> (batch_size, seq_size, n_detect)
        print(' 1. ***********')
        print_loss('MSE', loss_mse)
        print(' 2. ***********')
        print_loss('MAE', loss_mae)
        print(' 3. ***********')
        print_loss_sensor('MAE', loss_mae)
        print(' 5. ***********')
        print_loss_seq('MAE', loss_mae)
        print(' 6. ***********')
        print_loss_sensor_seq('MAE', loss_mae)

        sys.stdout = original_stdout  # Reset the standard output to its original value

    with open(res_folder + f'loss_evaluation.txt', 'r') as filehandle:
        print(filehandle.read())

    yhat = torch.cat(outputs, dim=0)
    #print(f'yhat: {yhat.shape}')
    yhat = yhat[:realy.size(0), ...]
    #print(f'yhat2: {yhat.shape}')

    print("Training finished")
    #print("The valid loss on best model is", str(round(his_loss[bestid],4)))

    amae = []
    amape = []
    armse = []
    for i in range(args.seq_length):
        pred = scaler.inverse_transform(yhat[:, :, i])
        real = realy[:, :, i]
        metrics = util.metric(pred, real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over {:.4f} horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(
        log.format(args.seq_length, np.mean(amae), np.mean(amape),
                   np.mean(armse)))
    torch.save(
        engine.model.state_dict(), args.save + "_exp" + str(args.expid) +
        "_best_" + str(round(np.min(his_loss), 2)) + ".pth")
Esempio n. 5
0
def main():
    # set seed
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)

    # load data
    device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
    adj_mx = util.load_adj(adj_path, args.adjtype)
    dataloader = util.load_dataset(outflow_path, args.batch_size,
                                   args.batch_size, args.batch_size)
    scaler = dataloader['scaler']
    supports = [torch.tensor(i).to(device) for i in adj_mx]

    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    engine = trainer(scaler, args.in_dim, args.seq_length, num_nodes,
                     args.nhid, args.dropout, args.learning_rate,
                     args.weight_decay, device, supports, args.gcn_bool,
                     args.addaptadj, adjinit)

    logger.write("start training...")
    his_loss = []
    val_time = []
    train_time = []
    for i in range(1, args.epochs + 1):
        # learning rate schedule
        if i % 10 == 0:
            lr = max(0.000002, args.learning_rate * (0.9**(i // 10)))
            for g in engine.optimizer.param_groups:
                g['lr'] = lr

        # train
        train_mae = []
        train_rmse = []
        train_mape = []
        t1 = time.time()
        dataloader['train_loader'].shuffle()
        for iter, (x,
                   y) in enumerate(dataloader['train_loader'].get_iterator()):
            trainx = torch.Tensor(x).to(device)
            trainx = trainx.transpose(1, 3)
            trainy = torch.Tensor(y).to(device)
            trainy = trainy.transpose(1, 3)
            # NOTE: B, T, V, F, F=2, but we noly need speed for label: y[:, 0, ...]
            metrics = engine.train(trainx, trainy[:, 0, :, :])
            train_mae.append(metrics[0])
            train_rmse.append(metrics[1])
            train_mape.append(metrics[2])
        # log results of training set.
        mtrain_mae = np.mean(train_mae)
        mtrain_rmse = np.mean(train_rmse)
        mtrain_mape = np.mean(train_mape) * 100
        train_writer.add_scalar('train/mae', mtrain_mae, i)
        train_writer.add_scalar('train/rmse', mtrain_rmse, i)
        train_writer.add_scalar('train/mape', mtrain_mape, i)

        # validation
        with torch.no_grad():
            valid_mae = []
            valid_mape = []
            valid_rmse = []
            s1 = time.time()
            for _, (x,
                    y) in enumerate(dataloader['val_loader'].get_iterator()):
                testx = torch.Tensor(x).to(device)
                testx = testx.transpose(1, 3)
                testy = torch.Tensor(y).to(device)
                testy = testy.transpose(1, 3)
                metrics = engine.eval(testx, testy[:, 0, :, :])
                valid_mae.append(metrics[0])
                valid_rmse.append(metrics[1])
                valid_mape.append(metrics[2])
            # log results of validation set.
            s2 = time.time()
            val_time.append(s2 - s1)
            mvalid_mae = np.mean(valid_mae)
            mvalid_mape = np.mean(valid_mape) * 100
            mvalid_rmse = np.mean(valid_rmse)
            his_loss.append(mvalid_mae)
            val_writer.add_scalar('val/mae', mvalid_mae, i)
            val_writer.add_scalar('val/rmse', mvalid_rmse, i)
            val_writer.add_scalar('val/mape', mvalid_mape, i)

        t2 = time.time()
        train_time.append(t2 - t1)
        if i % args.print_every == 0:
            logger.write(
                f'Epoch: {i:03d}, MAE: {mtrain_mae:.2f}, RMSE: {mtrain_rmse:.2f}, MAPE: {mtrain_mape:.2f}, Valid MAE: {mvalid_mae:.2f}, RMSE: {mvalid_rmse:.2f}, MAPE: {mvalid_mape:.2f}'
            )
        torch.save(
            engine.model.state_dict(), save_path + "_epoch_" + str(i) + "_" +
            str(round(mvalid_mae, 2)) + ".pth")

    logger.write("Average Training Time: {:.4f} secs/epoch".format(
        np.mean(train_time)))
    # logger.write("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(
        torch.load(save_path + "_epoch_" + str(bestid + 1) + "_" +
                   str(round(his_loss[bestid], 2)) + ".pth"))

    logger.write("Training finished")
    logger.write(
        f"The valid loss on best model is {str(round(his_loss[bestid],4))}")

    # test
    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1, 3)[:, 0, :, :]

    with torch.no_grad():
        t1 = time.time()
        for _, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
            testx = torch.Tensor(x).to(device)
            testx = testx.transpose(1, 3)
            preds = engine.model(testx).transpose(1, 3)
            outputs.append(preds.squeeze())

        t2 = time.time()
        logger.write(f'Inference time: {t2-t1:.4f}')
        yhat = torch.cat(outputs, dim=0)
        yhat = yhat[:realy.size(0), ...]

        # calculate metrics and save predictions
        preds = []
        reals = []
        logger.write('Step i, Test MAE, Test RMSE, Test MAPE')
        for i in range(args.seq_length):
            # prediction of step i
            pred = scaler.inverse_transform(yhat[:, :, i])
            real = realy[:, :, i]
            metrics = util.metric(pred.cpu().detach().numpy(),
                                  real.cpu().detach().numpy())
            logger.write(
                f'{metrics[0]:.2f}, {metrics[1]:.2f}, {metrics[2]*100:.2f}')

            preds.append(pred.tolist())
            reals.append(real.tolist())

    reals = np.array(reals)
    preds = np.array(preds)
    np.save(f'test_{args.city}_{args.tinterval}.npy', np.array(reals))
    np.save(f'test_{args.city}_{args.tinterval}.npy', np.array(preds))
    torch.save(
        engine.model.state_dict(), save_path + "_exp" + str(args.expid) +
        "_best_" + str(round(his_loss[bestid], 2)) + ".pth")
Esempio n. 6
0
def main():
    #set seed
    args.seed = args.seed if args.seed else \
        np.random.randint(0, np.iinfo("uint32").max, size=1)[-1]
    torch.manual_seed(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    np.random.seed(args.seed)

    # update run_name & save_dir
    args.run_name += "_".join(
        [args.data_type, str(args.seq_length),
         str(args.seed)])
    args.save += args.run_name + "/"
    os.makedirs(args.save)
    wandb.init(config=args, project=args.project_name, name=args.run_name)

    #load data
    device = torch.device(args.device)
    sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(
        args.adjdata, args.adjtype)
    dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size,
                                   args.batch_size)
    scaler = dataloader['scaler']
    supports = [torch.tensor(i).to(device) for i in adj_mx]

    print(args)

    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes,
                     args.nhid, args.dropout, args.learning_rate,
                     args.weight_decay, device, supports, args.gcn_bool,
                     args.addaptadj, adjinit, args.impute_type)

    print("start training...", flush=True)
    his_loss = []
    val_time = []
    train_time = []
    for i in tqdm(range(1, args.epochs + 1)):
        train_loss = []
        train_mape = []
        train_rmse = []
        t1 = time.time()
        if i > 1:
            # Skip shuffling for 1st epoch for data imputation
            dataloader['train_loader'].shuffle()
        for iter, (x,
                   y) in enumerate(dataloader['train_loader'].get_iterator()):
            if i == 1 or engine.imputer.type == "GCN":
                trainx = engine.imputer(x.transpose(1, 3),
                                        engine.model.get_supports())
            else:
                trainx = x.transpose(1, 3)
            trainx = trainx.to(device)
            trainy = torch.Tensor(y).to(device)
            trainy = trainy.transpose(1, 3)
            metrics = engine.train(trainx, trainy[:, 0, :, :])
            train_loss.append(metrics[0])
            train_mape.append(metrics[1])
            train_rmse.append(metrics[2])
            if iter % args.print_every == 0:
                log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
                print(log.format(iter, train_loss[-1], train_mape[-1],
                                 train_rmse[-1]),
                      flush=True)
        t2 = time.time()
        train_time.append(t2 - t1)
        #validation
        valid_loss = []
        valid_mape = []
        valid_rmse = []

        s1 = time.time()
        for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
            if i == 1 or engine.imputer.type == "GCN":
                testx = engine.imputer(x.transpose(1, 3),
                                       engine.model.get_supports())
            else:
                testx = x.transpose(1, 3)
            testx = testx.to(device)
            testy = torch.Tensor(y).to(device)
            testy = testy.transpose(1, 3)
            metrics = engine.eval(testx, testy[:, 0, :, :])
            valid_loss.append(metrics[0])
            valid_mape.append(metrics[1])
            valid_rmse.append(metrics[2])
        s2 = time.time()
        log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
        print(log.format(i, (s2 - s1)))
        val_time.append(s2 - s1)
        mtrain_loss = np.mean(train_loss)
        mtrain_mape = np.mean(train_mape)
        mtrain_rmse = np.mean(train_rmse)

        mvalid_loss = np.mean(valid_loss)
        mvalid_mape = np.mean(valid_mape)
        mvalid_rmse = np.mean(valid_rmse)
        his_loss.append(mvalid_loss)

        log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
        print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss,
                         mvalid_mape, mvalid_rmse, (t2 - t1)),
              flush=True)
        torch.save(
            engine.model.state_dict(), args.save + "epoch_" + str(i) + "_" +
            str(round(mvalid_loss, 2)) + ".pth")
        wandb.log(
            {
                "Train MAE": mtrain_loss,
                "Train MAPE": mtrain_mape,
                "Train RMSE": mtrain_rmse,
                "Validation MAE": mvalid_loss,
                "Validation MAPE": mvalid_mape,
                "Validation RMSE": mvalid_rmse
            },
            step=i)
    print("Average Training Time: {:.4f} secs/epoch".format(
        np.mean(train_time)))
    print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    #testing
    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(
        torch.load(args.save + "epoch_" + str(bestid + 1) + "_" +
                   str(round(his_loss[bestid], 2)) + ".pth"))

    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1, 3)[:, 0, :, :]

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        with torch.no_grad():
            testx = engine.imputer(x.transpose(1, 3),
                                   engine.model.get_supports())
            testx = testx.to(device)
            preds = engine.model(testx).transpose(1, 3)
        outputs.append(preds.squeeze(1))

    yhat = torch.cat(outputs, dim=0)
    yhat = yhat[:realy.size(0), ...]

    print("Training finished")
    print("The valid loss on best model is", str(round(his_loss[bestid], 4)))

    amae = []
    amape = []
    armse = []
    for i in range(args.seq_length):
        pred = scaler.inverse_transform(yhat[:, :, i])
        real = realy[:, :, i]
        metrics = util.metric(pred, real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

        wandb.log(
            {
                "Test MAE": metrics[0],
                "Test MAPE": metrics[1],
                "Test RMSE": metrics[2]
            },
            step=i + args.epochs + 1)

    log = 'On average over horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))
    wandb.log({
        "Avg Test MAE": np.mean(amae),
        "Avg Test MAPE": np.mean(amape),
        "Avg Test RMSE": np.mean(armse)
    })
    torch.save(engine.model.state_dict(),
               args.save + "best_" + str(round(his_loss[bestid], 2)) + ".pth")
Esempio n. 7
0
def main():
    #set seed
    #torch.manual_seed(args.seed)
    #np.random.seed(args.seed)
    #load data
    device = torch.device(args.device)
    sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(
        args.adjdata, args.adjtype)
    dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size,
                                   args.batch_size)
    #scaler = dataloader['scaler']
    supports = [torch.tensor(i).to(device) for i in adj_mx]

    print(args)
    if args.model == 'gwnet':
        engine = trainer1(args.in_dim, args.seq_length, args.num_nodes,
                          args.nhid, args.dropout, args.learning_rate,
                          args.weight_decay, device, supports, args.decay)
    elif args.model == 'ASTGCN_Recent':
        engine = trainer2(args.in_dim, args.seq_length, args.num_nodes,
                          args.nhid, args.dropout, args.learning_rate,
                          args.weight_decay, device, supports, args.decay)
    elif args.model == 'GRCN':
        engine = trainer3(args.in_dim, args.seq_length, args.num_nodes,
                          args.nhid, args.dropout, args.learning_rate,
                          args.weight_decay, device, supports, args.decay)
    elif args.model == 'Gated_STGCN':
        engine = trainer4(args.in_dim, args.seq_length, args.num_nodes,
                          args.nhid, args.dropout, args.learning_rate,
                          args.weight_decay, device, supports, args.decay)
    elif args.model == 'H_GCN_wh':
        engine = trainer5(args.in_dim, args.seq_length, args.num_nodes,
                          args.nhid, args.dropout, args.learning_rate,
                          args.weight_decay, device, supports, args.decay)

    elif args.model == 'OGCRNN':
        engine = trainer8(args.in_dim, args.seq_length, args.num_nodes,
                          args.nhid, args.dropout, args.learning_rate,
                          args.weight_decay, device, supports, args.decay)
    elif args.model == 'OTSGGCN':
        engine = trainer9(args.in_dim, args.seq_length, args.num_nodes,
                          args.nhid, args.dropout, args.learning_rate,
                          args.weight_decay, device, supports, args.decay)
    elif args.model == 'LSTM':
        engine = trainer10(args.in_dim, args.seq_length, args.num_nodes,
                           args.nhid, args.dropout, args.learning_rate,
                           args.weight_decay, device, supports, args.decay)
    elif args.model == 'GRU':
        engine = trainer11(args.in_dim, args.seq_length, args.num_nodes,
                           args.nhid, args.dropout, args.learning_rate,
                           args.weight_decay, device, supports, args.decay)

    # check parameters file
    params_path = args.save + "/" + args.model
    if os.path.exists(params_path) and not args.force:
        raise SystemExit(
            "Params folder exists! Select a new params path please!")
    else:
        if os.path.exists(params_path):
            shutil.rmtree(params_path)
        os.makedirs(params_path)
        print('Create params directory %s' % (params_path))

    print("start training...", flush=True)
    his_loss = []
    val_time = []
    train_time = []
    for i in range(1, args.epochs + 1):
        #if i % 10 == 0:
        #lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10)))
        #for g in engine.optimizer.param_groups:
        #g['lr'] = lr
        train_loss = []
        train_mae = []
        train_mape = []
        train_rmse = []
        t1 = time.time()
        dataloader['train_loader'].shuffle()
        for iter, (x,
                   y) in enumerate(dataloader['train_loader'].get_iterator()):
            trainx = torch.Tensor(x).to(device)
            trainx = trainx.transpose(1, 3)
            trainy = torch.Tensor(y).to(device)
            trainy = trainy.transpose(1, 3)
            metrics = engine.train(trainx, trainy[:, 0, :, :])
            train_loss.append(metrics[0])
            train_mae.append(metrics[1])
            train_mape.append(metrics[2])
            train_rmse.append(metrics[3])
            #if iter % args.print_every == 0 :
            #   log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
            #  print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1]),flush=True)
        t2 = time.time()
        train_time.append(t2 - t1)
        #validation
        valid_loss = []
        valid_mae = []
        valid_mape = []
        valid_rmse = []

        s1 = time.time()

        for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
            testx = torch.Tensor(x).to(device)
            testx = testx.transpose(1, 3)
            testy = torch.Tensor(y).to(device)
            testy = testy.transpose(1, 3)
            metrics = engine.eval(testx, testy[:, 0, :, :])
            valid_loss.append(metrics[0])
            valid_mae.append(metrics[1])
            valid_mape.append(metrics[2])
            valid_rmse.append(metrics[3])
        s2 = time.time()
        log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
        print(log.format(i, (s2 - s1)))
        val_time.append(s2 - s1)
        mtrain_loss = np.mean(train_loss)
        mtrain_mae = np.mean(train_mae)
        mtrain_mape = np.mean(train_mape)
        mtrain_rmse = np.mean(train_rmse)

        mvalid_loss = np.mean(valid_loss)
        mvalid_mae = np.mean(valid_mae)
        mvalid_mape = np.mean(valid_mape)
        mvalid_rmse = np.mean(valid_rmse)
        his_loss.append(mvalid_loss)

        log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAE: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAE: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
        print(log.format(i, mtrain_loss, mtrain_mae, mtrain_mape, mtrain_rmse,
                         mvalid_loss, mvalid_mae, mvalid_mape, mvalid_rmse,
                         (t2 - t1)),
              flush=True)
        torch.save(
            engine.model.state_dict(), params_path + "/" + args.model +
            "_epoch_" + str(i) + "_" + str(round(mvalid_loss, 2)) + ".pth")
    print("Average Training Time: {:.4f} secs/epoch".format(
        np.mean(train_time)))
    print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    #testing
    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(
        torch.load(params_path + "/" + args.model + "_epoch_" +
                   str(bestid + 1) + "_" + str(round(his_loss[bestid], 2)) +
                   ".pth"))
    engine.model.eval()

    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1, 3)[:, 0, :, :]

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testx = testx.transpose(1, 3)
        with torch.no_grad():
            preds, spatial_at, parameter_adj = engine.model(testx)
            preds = preds.transpose(1, 3)
        outputs.append(preds.squeeze())

    yhat = torch.cat(outputs, dim=0)
    yhat = yhat[:realy.size(0), ...]

    print("Training finished")
    print("The valid loss on best model is", str(round(his_loss[bestid], 4)))

    amae = []
    amape = []
    armse = []
    prediction = yhat
    for i in range(12):
        pred = prediction[:, :, i]
        #pred = scaler.inverse_transform(yhat[:,:,i])
        #prediction.append(pred)
        real = realy[:, :, i]
        metrics = util.metric(pred, real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))
    torch.save(
        engine.model.state_dict(), params_path + "/" + args.model + "_exp" +
        str(args.expid) + "_best_" + str(round(his_loss[bestid], 2)) + ".pth")
    prediction_path = params_path + "/" + args.model + "_prediction_results"
    ground_truth = realy.cpu().detach().numpy()
    prediction = prediction.cpu().detach().numpy()
    spatial_at = spatial_at.cpu().detach().numpy()
    parameter_adj = parameter_adj.cpu().detach().numpy()
    np.savez_compressed(os.path.normpath(prediction_path),
                        prediction=prediction,
                        spatial_at=spatial_at,
                        parameter_adj=parameter_adj,
                        ground_truth=ground_truth)
Esempio n. 8
0
def main():
    print("*" * 10)
    print(args)
    print("*" * 10)
    dataloader = util.load_dataset(device, args.data_path, args.batch_size, args.batch_size, args.batch_size)
    scaler = dataloader['scaler']
    print("scaler: ", scaler)
    model_type = "GWaveNet"    # HA / SVR / ARIMA / STGCN / GWaveNet / LSTM

    sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adj_path, args.adjtype)
    supports = [torch.tensor(i).to(device) for i in adj_mx]

    _, _, A = util.load_pickle(args.adj_path)
    A_wave = util.get_normalized_adj(A)
    A_wave = torch.from_numpy(A_wave).to(device)
    # print("A_wave:", A_wave.shape, type(A_wave))
    best_path = os.path.join(args.save, 'best_model.pth')
    best_mae = 100

    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]
    if args.aptonly:
        supports = None

    if model_type == "GWaveNet":
        print("=========Model:GWaveNet=========")
        print("with scaler")
        model = GWNET(device, args.num_nodes, args.dropout, supports=supports, gcn_bool=args.gcn_bool,
                      addaptadj=args.addaptadj, aptinit=adjinit, in_dim=args.in_dim, out_dim=args.seq_length,
                      residual_channels=args.nhid, dilation_channels=args.nhid, skip_channels=args.nhid * 8,
                      end_channels=args.nhid * 16)

    if model_type == "STGCN":
        print("=========Model:STGCN=========")
        print("with scaler")
        model = STGCN(A_wave.shape[0], 2, num_timesteps_input=12, num_timesteps_output=12)

    if model_type == "LSTM":
        print("=========Model:LSTM=========")
        input_dim = 2
        hidden_dim = 2
        output_dim = 2
        model = LSTM(input_dim, hidden_dim, output_dim)

    best_path = f'{args.save}/{model_type}.pkl'
    record = []
    model.to(device)
    model.zero_grad()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
    optimizer.zero_grad()
    loss_MSE = torch.nn.MSELoss()
    loss_gwnet = util.masked_mae
    loss_stgcn = util.masked_mae

    print("============Begin Training============")
    his_loss = []
    val_time = []
    train_time = []
    for epoch in range(args.num_epochs):
        print('-' * 10)
        print('Epoch {}/{}'.format(epoch, args.num_epochs))
        train_loss, train_mape, train_rmse, train_mae = [], [], [], []
        t1 = time.time()
        t = time.time()
        dataloader['train_loader'].shuffle()
        for iter, (x, y) in enumerate(dataloader['train_loader'].get_iterator()):
            trainx = torch.Tensor(x).to(device)  # x: (64, 24, 207, 2)
            trainy = torch.Tensor(y).to(device)  # y: (64, 12, 207, 2)
            if trainx.shape[0] != args.batch_size:
                continue

            if model_type == "GWaveNet":
                trainx = trainx.transpose(1, 3)
                trainy = trainy.transpose(1, 3)
                trainy = trainy[:, 0, :, :]
                trainy = torch.unsqueeze(trainy, dim=1)
                trainx = nn.functional.pad(trainx, (1, 0, 0, 0))

                pred = model.forward(trainx)
                pred = pred.transpose(1, 3)
                pred = scaler.inverse_transform(pred)
                loss_train = loss_gwnet(pred, trainy, 0.0)

            if model_type == "STGCN":
                # (batch_size,num_timesteps,num_nodes,num_features=in_channels)
                # ->(batch_size,num_nodes,num_timesteps,num_features=in_channels)
                trainx = trainx.permute(0, 2, 1, 3)
                trainy = trainy[:, :, :, 0].permute(0, 2, 1)
                pred = model(A_wave, trainx)
                # pred = scaler.inverse_transform(pred)
                # loss_train = loss_MSE(pred, trainy)
                loss_train = loss_stgcn(pred, trainy, 0.0)

            if model_type == "rnn":
                [batch_size, step_size, num_of_vertices, fea_size] = trainx.size()
                trainx = trainx.permute(0, 2, 1, 3)
                trainx = trainx.reshape(-1, step_size, fea_size)
                trainy = trainy.reshape(-1, 1, fea_size)
                trainy = trainy[:, 0, :]
                pred = model.loop(trainx)
                loss_train = loss_MSE(pred, trainy)

            Y_size = trainy.shape

            if iter == 0:
                print("trainy:", trainy.shape)

            optimizer.zero_grad()
            loss_train.backward()
            clip = 5
            if clip is not None:
                torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
            optimizer.step()

            evaluation = evaluate(pred, trainy)
            train_loss.append(loss_train.item())
            train_mape.append(evaluation[0])
            train_rmse.append(evaluation[1])
            train_mae.append(evaluation[2])

            if iter % args.interval == 0:
                log = 'Iter: {:03d}|Train Loss: {:.4f}|Train MAPE: {:.4f}|Train RMSE: {:.4f}|Train MAE: {:.4f}|Time: ' \
                      '{:.4f} '
                print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1], train_mae[-1], time.time() - t),
                      flush=True)
                t = time.time()
        t2 = time.time()
        train_time.append(t2 - t1)
        # validation
        valid_loss, valid_mape, valid_rmse, valid_mae = [], [], [], []
        s1 = time.time()
        for iter, (x_val, y_val) in enumerate(dataloader['val_loader'].get_iterator()):
            # validation data loader iterator init
            inputs_val = torch.Tensor(x_val).to(device)  # x: (64, 24, 207, 2)
            labels_val = torch.Tensor(y_val).to(device)

            if model_type == "GWaveNet":
                inputs_val = inputs_val.transpose(1, 3)
                labels_val = labels_val.transpose(1, 3)
                labels_val = labels_val[:, 0, :, :]
                labels_val = torch.unsqueeze(labels_val, dim=1)

                inputs_val = nn.functional.pad(inputs_val, (1, 0, 0, 0))
                pred_val = model.forward(inputs_val)
                pred_val = pred_val.transpose(1, 3)
                pred_val = scaler.inverse_transform(pred_val)
                loss_valid = loss_gwnet(pred_val, labels_val, 0.0)

            if model_type == "STGCN":
                inputs_val = inputs_val.permute(0, 2, 1, 3)
                labels_val = labels_val[:, :, :, 0].permute(0, 2, 1)
                pred_val = model(A_wave, inputs_val)
                # pred_val = scaler.inverse_transform(pred_val)
                # loss_valid = loss_MSE(pred_val, labels_val)
                loss_valid = loss_stgcn(pred_val, labels_val, 0.0)

            if model_type == "rnn":
                [batch_size, step_size, num_of_vertices, fea_size] = trainx.size()
                inputs_val = inputs_val.permute(0, 2, 1, 3)
                inputs_val = inputs_val.reshape(-1, step_size, fea_size)
                labels_val = labels_val.reshape(-1, 1, fea_size)
                labels_val = labels_val[:, 0, :]
                pred_val = model.loop(inputs_val)
                loss_valid = loss_MSE(pred_val, labels_val)

            # pred_val = scaler.inverse_transform(pred_val)
            optimizer.zero_grad()
            # loss_valid.backward()
            evaluation = evaluate(pred_val, labels_val)

            valid_loss.append(loss_valid.item())
            valid_mape.append(evaluation[0])
            valid_rmse.append(evaluation[1])
            valid_mae.append(evaluation[2])
        s2 = time.time()
        log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
        print(log.format(epoch, (s2 - s1)))
        val_time.append(s2 - s1)
        mtrain_loss = np.mean(train_loss)
        mtrain_mape = np.mean(train_mape)
        mtrain_rmse = np.mean(train_rmse)
        mtrain_mae = np.mean(train_mae)

        mvalid_loss = np.mean(valid_loss)
        mvalid_mape = np.mean(valid_mape)
        mvalid_rmse = np.mean(valid_rmse)
        mvalid_mae = np.mean(valid_mae)
        his_loss.append(mvalid_loss)

        message = dict(train_loss=mtrain_loss, train_mape=mtrain_mape, train_rmse=mtrain_rmse,
                       valid_loss=mvalid_loss, valid_mape=mvalid_mape, valid_rmse=mvalid_rmse)
        message = pd.Series(message)
        record.append(message)
        # save model parameters
        if message.valid_loss < best_mae:
            torch.save(model.state_dict(), best_path)
            best_mae = message.valid_loss
            epochs_since_best_mae = 0
            best_epoch = epoch
        else:
            epochs_since_best_mae += 1

        record_df = pd.DataFrame(record)
        record_df.round(3).to_csv(f'{args.save}/record.csv')

        log = 'Epoch: {:03d}, Training Time: {:.4f}/epoch,\n' \
              'Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Train MAE: {:.4f}, \n' \
              'Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Valid MAE: {:.4f},'
        print(log.format(epoch, (t2 - t1),
                         mtrain_loss, mtrain_mape, mtrain_rmse, mtrain_mae,
                         mvalid_loss, mvalid_mape, mvalid_rmse, mvalid_mae), flush=True)
        print("#" * 20)

    print("=" * 10)
    print("Average Train Time: {:.4f} secs/epoch".format(np.mean(train_time)))
    print("Average Valid Time: {:.4f} secs".format(np.mean(val_time)))
    print("=" * 10)

    # Testing
    bestid = np.argmin(his_loss)
    print("bestid: ", bestid)
    model.load_state_dict(torch.load(best_path))

    outputs = []
    target = torch.Tensor(dataloader['y_test']).to(device)
    if model_type == "GWaveNet":
        target = target.transpose(1, 3)[:, 0, :, :]
    if model_type == "STGCN":
        target = target[:, :, :, 0]
        target = target.transpose(1, 2)

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)  # x: (64, 24, 207, 2)
        testy = torch.Tensor(y).to(device)  # x: (64, 24, 207, 2)

        if model_type == "GWaveNet":
            with torch.no_grad():
                testx = testx.transpose(1, 3)
                pred = model.forward(testx)
                pred = pred.transpose(1, 3)
            outputs.append(pred.squeeze())

        if model_type == "STGCN":
            with torch.no_grad():
                testx = testx.permute(0, 2, 1, 3)
                testy = testy[:, :, :, 0].permute(0, 2, 1)
                pred = model(A_wave, testx)     # (64, 207, 12)
            outputs.append(pred)

    yhat = torch.cat(outputs, dim=0)
    yhat = yhat[:target.size(0), ...]
    amae, amape, armse, test_record = [], [], [], []
    print("=" * 10)
    print("yhat:", yhat.shape)      # yhat: torch.Size([6850, 207, 12])
    print("target:", target.shape)  # target: torch.Size([6850, 207, 12])
    for i in range(Y_size[-1]):
        pred = scaler.inverse_transform(yhat[:, :, i])
        # pred = yhat[:, :, i]
        real_target = target[:, :, i]
        evaluation = evaluate(pred, real_target)
        log = 'Evaluate on test data for horizon {:d}, Test MAPE: {:.4f}, Test RMSE: {:.4f}, Test MAE: {:.4f}'
        print(log.format(i + 1, evaluation[0], evaluation[1], evaluation[2]))
        amape.append(evaluation[0])
        armse.append(evaluation[1])
        amae.append(evaluation[2])
        test_record.append([x for x in evaluation])

    test_record_df = pd.DataFrame(test_record, columns=['mape', 'rmse', 'mae']).rename_axis('t')
    test_record_df.round(3).to_csv(f'{args.save}/test_record.csv')

    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))
    print("=" * 10)
Esempio n. 9
0
def run_demo(best_path, record_save_path, model_type):
    print("============Begin Testing============")
    test_record_path = f'{record_save_path}/test_record.csv'
    dataloader = util.load_dataset(device, args.data_path, args.batch_size,
                                   args.batch_size, args.batch_size)
    g_temp = util.add_nodes_edges(adj_filename=args.adj_path,
                                  num_of_vertices=args.num_nodes)
    scaler = dataloader['scaler']
    run_gconv = 1
    lr_decay_rate = 0.97

    sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(
        args.adj_path_forbase, args.adjtype)
    supports = [torch.tensor(i).to(device) for i in adj_mx]

    _, _, A = util.load_pickle(args.adj_path_forbase)
    A_wave = util.get_normalized_adj(A)
    A_wave = torch.from_numpy(A_wave).to(device)
    # print("A_wave:", A_wave.shape, type(A_wave))
    best_mae = 100

    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]
    if args.aptonly:
        supports = None

    if model_type == "GWaveNet":
        print("=========Model:GWaveNet=========")
        print("with scaler")
        model = GWNET(device,
                      args.num_nodes,
                      args.dropout,
                      supports=supports,
                      gcn_bool=args.gcn_bool,
                      addaptadj=args.addaptadj,
                      aptinit=adjinit,
                      in_dim=args.in_dim,
                      out_dim=args.seq_length,
                      residual_channels=args.nhid,
                      dilation_channels=args.nhid,
                      skip_channels=args.nhid * 8,
                      end_channels=args.nhid * 16)

    if model_type == "STGCN":
        print("=========Model:STGCN=========")
        print("with scaler")
        model = STGCN(A_wave.shape[0],
                      2,
                      num_timesteps_input=12,
                      num_timesteps_output=12)

    if model_type == "LSTM":
        print("=========Model:LSTM=========")
        input_dim = 2
        hidden_dim = 2
        output_dim = 2
        model = LSTM(input_dim, hidden_dim, output_dim)

    model.to(device)
    model.zero_grad()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
    optimizer.zero_grad()
    scheduler = torch.optim.lr_scheduler.LambdaLR(
        optimizer, lr_lambda=lambda epoch: lr_decay_rate**epoch)

    if torch.cuda.is_available():
        model.load_state_dict(torch.load(best_path))
    else:
        model.load_state_dict(torch.load(best_path, map_location='cpu'))

    outputs = []
    target = torch.Tensor(dataloader['y_test']).to(device)
    target = target[:, :, :, 0]
    print("201 y_test:", target.shape)

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device).transpose(1, 3)
        testx = nn.functional.pad(testx, (1, 0, 0, 0))
        with torch.no_grad():
            pred = model.forward(testx).squeeze(3)
        print("iter: ", iter)
        print("pred: ", pred.shape)
        outputs.append(pred)

    yhat = torch.cat(outputs, dim=0)
    yhat = yhat[:target.size(0), ...]
    test_record, amape, armse, amae = [], [], [], []

    pred = scaler.inverse_transform(yhat)
    for i in range(12):
        pred_t = pred[:, i, :]
        real_target = target[:, i, :]
        evaluation = evaluate_all(pred_t, real_target)
        log = 'test for horizon {:d}, Test MAPE: {:.4f}, Test RMSE: {:.4f}, Test MAE: {:.4f}'
        print(log.format(i + 1, evaluation[0], evaluation[1], evaluation[2]))
        amape.append(evaluation[0])
        armse.append(evaluation[1])
        amae.append(evaluation[2])
        test_record.append([x for x in evaluation])
    test_record_df = pd.DataFrame(test_record,
                                  columns=['mape', 'rmse',
                                           'mae']).rename_axis('t')
    test_record_df.round(3).to_csv(test_record_path)
    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))
    print("=" * 10)
Esempio n. 10
0
def main():
    #load data
    device = torch.device(args.device)
    sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(
        args.adjdata, args.adjtype)
    sensor_ids_cluster, sensor_id_to_ind_cluster, adj_mx_cluster = util.load_adj(
        args.adjdatacluster, args.adjtype)
    dataloader = util.load_dataset_cluster(args.data, args.batch_size,
                                           args.batch_size, args.batch_size)
    #scaler = dataloader['scaler']
    supports = [torch.tensor(i).to(device) for i in adj_mx]
    supports_cluster = [torch.tensor(i).to(device) for i in adj_mx_cluster]
    transmit_np = np.float32(np.loadtxt(args.transmit, delimiter=','))
    transmit = torch.tensor(transmit_np).to(device)

    print(args)

    if args.model == 'H_GCN':
        engine = trainer7(args.in_dim, args.in_dim_cluster, args.seq_length,
                          args.num_nodes, args.cluster_nodes, args.nhid,
                          args.dropout, args.learning_rate, args.weight_decay,
                          device, supports, supports_cluster, transmit,
                          args.decay)
    elif args.model == 'H_GCN_wdf':
        engine = trainer6(args.in_dim, args.in_dim_cluster, args.seq_length,
                          args.num_nodes, args.cluster_nodes, args.nhid,
                          args.dropout, args.learning_rate, args.weight_decay,
                          device, supports, supports_cluster, transmit,
                          args.decay)
    # check parameters file
    params_path = args.save + "/" + args.model
    if os.path.exists(params_path) and not args.force:
        raise SystemExit(
            "Params folder exists! Select a new params path please!")
    else:
        if os.path.exists(params_path):
            shutil.rmtree(params_path)
        os.makedirs(params_path)
        print('Create params directory %s' % (params_path))

    print("start training...", flush=True)
    his_loss = []
    val_time = []
    train_time = []
    for i in range(1, args.epochs + 1):
        train_loss = []
        train_mae = []
        train_mape = []
        train_rmse = []
        t1 = time.time()

        dataloader['train_loader_cluster'].shuffle()

        for iter, (x, y, x_cluster, y_cluster) in enumerate(
                dataloader['train_loader_cluster'].get_iterator()):

            trainx = torch.Tensor(x).to(device)
            trainx = trainx.transpose(1, 3)
            trainy = torch.Tensor(y).to(device)
            trainy = trainy.transpose(1, 3)
            trainx_cluster = torch.Tensor(x_cluster).to(device)
            trainx_cluster = trainx_cluster.transpose(1, 3)
            trainy_cluster = torch.Tensor(y_cluster).to(device)
            trainy_cluster = trainy_cluster.transpose(1, 3)
            metrics = engine.train(trainx, trainx_cluster, trainy[:, 0, :, :],
                                   trainy_cluster)
            train_loss.append(metrics[0])
            train_mae.append(metrics[1])
            train_mape.append(metrics[2])
            train_rmse.append(metrics[3])

        #engine.scheduler.step()
        t2 = time.time()
        train_time.append(t2 - t1)
        #validation
        valid_loss = []
        valid_mae = []
        valid_mape = []
        valid_rmse = []

        s1 = time.time()

        for iter, (x, y, x_cluster, y_cluster) in enumerate(
                dataloader['val_loader_cluster'].get_iterator()):
            validx = torch.Tensor(x).to(device)
            validx = validx.transpose(1, 3)
            validy = torch.Tensor(y).to(device)
            validy = validy.transpose(1, 3)
            validx_cluster = torch.Tensor(x_cluster).to(device)
            validx_cluster = validx_cluster.transpose(1, 3)
            validy_cluster = torch.Tensor(y_cluster).to(device)
            validy_cluster = validy_cluster.transpose(1, 3)
            metrics = engine.eval(validx, validx_cluster, validy[:, 0, :, :],
                                  validy_cluster)
            valid_loss.append(metrics[0])
            valid_mae.append(metrics[1])
            valid_mape.append(metrics[2])
            valid_rmse.append(metrics[3])

        s2 = time.time()
        log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
        print(log.format(i, (s2 - s1)))
        val_time.append(s2 - s1)
        mtrain_loss = np.mean(train_loss)
        mtrain_mae = np.mean(train_mae)
        mtrain_mape = np.mean(train_mape)
        mtrain_rmse = np.mean(train_rmse)

        mvalid_loss = np.mean(valid_loss)
        mvalid_mae = np.mean(valid_mae)
        mvalid_mape = np.mean(valid_mape)
        mvalid_rmse = np.mean(valid_rmse)
        his_loss.append(mvalid_loss)

        log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAE: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAE: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
        print(log.format(i, mtrain_loss, mtrain_mae, mtrain_mape, mtrain_rmse,
                         mvalid_loss, mvalid_mae, mvalid_mape, mvalid_rmse,
                         (t2 - t1)),
              flush=True)
        torch.save(
            engine.model.state_dict(), params_path + "/" + args.model +
            "_epoch_" + str(i) + "_" + str(round(mvalid_loss, 2)) + ".pth")
    print("Average Training Time: {:.4f} secs/epoch".format(
        np.mean(train_time)))
    print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    #testing
    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(
        torch.load(params_path + "/" + args.model + "_epoch_" +
                   str(bestid + 1) + "_" + str(round(his_loss[bestid], 2)) +
                   ".pth"))
    engine.model.eval()

    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)

    realy = realy.transpose(1, 3)[:, 0, :, :]
    #print(realy.shape)
    for iter, (x, y, x_cluster, y_cluster) in enumerate(
            dataloader['test_loader_cluster'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testx = testx.transpose(1, 3)
        testx_cluster = torch.Tensor(x_cluster).to(device)
        testx_cluster = testx_cluster.transpose(1, 3)
        with torch.no_grad():
            preds, _, _ = engine.model(testx, testx_cluster)
            preds = preds.transpose(1, 3)
        outputs.append(preds.squeeze())
    for iter, (x, y, x_cluster, y_cluster) in enumerate(
            dataloader['test_loader_cluster'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testx = testx.transpose(1, 3)
        testx_cluster = torch.Tensor(x_cluster).to(device)
        testx_cluster = testx_cluster.transpose(1, 3)
        with torch.no_grad():
            _, spatial_at, parameter_adj = engine.model(testx, testx_cluster)
        break

    yhat = torch.cat(outputs, dim=0)
    yhat = yhat[:realy.size(0), ...]

    #print(yhat.shape)
    print("Training finished")
    print("The valid loss on best model is", str(round(his_loss[bestid], 4)))

    amae = []
    amape = []
    armse = []
    prediction = yhat
    for i in range(12):
        pred = prediction[:, :, i]
        #pred = scaler.inverse_transform(yhat[:,:,i])
        #prediction.append(pred)
        real = realy[:, :, i]
        metrics = util.metric(pred, real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))
    torch.save(
        engine.model.state_dict(), params_path + "/" + args.model + "_exp" +
        str(args.expid) + "_best_" + str(round(his_loss[bestid], 2)) + ".pth")
    prediction_path = params_path + "/" + args.model + "_prediction_results"
    ground_truth = realy.cpu().detach().numpy()
    prediction = prediction.cpu().detach().numpy()
    spatial_at = spatial_at.cpu().detach().numpy()
    parameter_adj = parameter_adj.cpu().detach().numpy()
    np.savez_compressed(os.path.normpath(prediction_path),
                        prediction=prediction,
                        spatial_at=spatial_at,
                        parameter_adj=parameter_adj,
                        ground_truth=ground_truth)
Esempio n. 11
0
def main():
    device = torch.device(args.device)

    _, _, adj_mx = util.load_adj(args.adjdata, args.adjtype)
    supports = [torch.tensor(i).to(device) for i in adj_mx]
    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    eR_seq_size = 24  # 24
    dataloader = util.load_dataset(args.data,
                                   args.batch_size,
                                   args.batch_size,
                                   args.batch_size,
                                   eRec=args.eRec,
                                   eR_seq_size=eR_seq_size,
                                   suffix=args.suffix_train)
    scaler = dataloader['scaler']

    dataloader = util.load_dataset(args.data,
                                   args.batch_size,
                                   args.batch_size,
                                   args.batch_size,
                                   eRec=args.eRec,
                                   eR_seq_size=eR_seq_size,
                                   suffix=args.suffix,
                                   scaler=scaler)

    blocks = int(dataloader[f'x_test{args.suffix}'].shape[1] /
                 3)  # Every block reduce the input sequence size by 3.
    print(f'blocks = {blocks}')

    if args.eRec:
        error_size = 6
        model = eRGwnet(device,
                        args.num_nodes,
                        args.dropout,
                        supports=supports,
                        gcn_bool=args.gcn_bool,
                        addaptadj=args.addaptadj,
                        adjinit=adjinit,
                        in_dim=args.in_dim,
                        out_dim=args.seq_length,
                        residual_channels=args.nhid,
                        dilation_channels=args.nhid,
                        skip_channels=args.nhid * 8,
                        end_channels=args.nhid * 16,
                        blocks=blocks,
                        error_size=error_size)
    else:
        model = gwnet(device,
                      args.num_nodes,
                      args.dropout,
                      supports=supports,
                      gcn_bool=args.gcn_bool,
                      addaptadj=args.addaptadj,
                      adjinit=adjinit,
                      in_dim=args.in_dim,
                      out_dim=args.seq_length,
                      residual_channels=args.nhid,
                      dilation_channels=args.nhid,
                      skip_channels=args.nhid * 8,
                      end_channels=args.nhid * 16,
                      blocks=blocks)
    model.to(device)
    model.load_state_dict(
        torch.load(args.checkpoint, map_location=torch.device(device)))
    model.eval()

    print('model load successfully')
    outputs = []
    realy = torch.Tensor(dataloader[f'y_test{args.suffix}']).to(device)
    if args.eRec:
        realy = realy.transpose(0, 1)[-1, :, :, :, :]
    realy = realy.transpose(1, 3)[:, 0, :, :]
    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testy = torch.Tensor(y).to(device)
        if args.eRec:
            testx = testx.transpose(0, 1)
            testy = testy.transpose(0, 1)
        testx = testx.transpose(-3, -1)
        testy = testy.transpose(-3, -1)
        # print(f'testx.shape = {testx.shape}')
        with torch.no_grad():
            if args.eRec:
                preds = model(testx, testy[:, :, 0:1, :, :],
                              scaler).transpose(1, 3)
            else:
                preds = model(testx).transpose(1, 3)
        outputs.append(preds.squeeze())

    yhat = torch.cat(outputs, dim=0)
    print(f'yhat before shape = {yhat.shape}')
    yhat = yhat[:realy.size(0), ...]
    print(f'yhat shape = {yhat.shape}')

    amae = []
    amape = []
    armse = []
    for i in range(args.seq_length):
        pred = scaler.inverse_transform(yhat[:, :, i])
        real = realy[:, :, i]
        metrics = util.metric(pred, real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))

    mse = nn.MSELoss(reduction='none')
    mae = nn.L1Loss(reduction='none')
    pred = scaler.inverse_transform(yhat)
    loss_mse = mse(pred, realy).transpose(1, 2)
    loss_mae = mae(pred, realy).transpose(1, 2)
    print(f'loss_mae shape = {loss_mae.shape}')
    if not args.eRec:
        model_name = f'gwnet{args.suffix}'
    else:
        model_name = f'eRgwnet{args.suffix}'
    print_results(model_name, loss_mse, loss_mae)
    plot_results(model_name, loss_mae, detector=1)

    if args.plotheatmap == "True":
        adp = F.softmax(F.relu(torch.mm(model.nodevec1, model.nodevec2)),
                        dim=1)
        device = torch.device('cpu')
        adp.to(device)
        adp = adp.cpu().detach().numpy()
        adp = adp * (1 / np.max(adp))
        df = pd.DataFrame(adp)
        sns.heatmap(df, cmap="RdYlBu")
        plt.savefig("./emb" + '.pdf')

    y12 = realy[:, -1, -1].cpu().detach().numpy()
    yhat12 = scaler.inverse_transform(yhat[:, -1, -1]).cpu().detach().numpy()

    y3 = realy[:, -1, 2].cpu().detach().numpy()
    yhat3 = scaler.inverse_transform(yhat[:, -1, 2]).cpu().detach().numpy()

    df2 = pd.DataFrame({
        'real12': y12,
        'pred12': yhat12,
        'real3': y3,
        'pred3': yhat3
    })
    df2.to_csv('./wave.csv', index=False)
Esempio n. 12
0
def main(args, **model_kwargs):
    # Train on subset of sensors (faster for isolated pred)
    # incl_sensors = list(range(207)) #[17, 111, 12, 80, 200]
    # args.num_sensors = len(incl_sensors)
    device = torch.device(args.device)
    # WARN Careful! Graph wavenet has been trained without fill zeroes in its scalar
    data = util.lazy_load_dataset(args.data,
                                  args.batch_size,
                                  args.batch_size,
                                  args.batch_size,
                                  n_obs=args.n_obs,
                                  fill_zeroes=args.fill_zeroes)
    scaler = data['scaler']
    supports = []
    aptinit = 0
    # aptinit, supports = util.make_graph_inputs(args, device)

    # Length of the prediction
    args.seq_length = data['y_val'].shape[1]
    args.num_sensors = data['x_val'].shape[2]
    if args.static:
        print('Selected static prediction')
        model = StaticNet.from_args(args, device, supports, aptinit,
                                    **model_kwargs)
    elif args.lstm:
        print('Selected LSTM-FC model')
        args.nhid = 256
        args.weight_decay = 0.0005
        args.learning_rate = 0.001
        model = LSTMNet.from_args(args, device, supports, aptinit,
                                  **model_kwargs)
    else:
        print('Selected Graph Wavenet model')
        # Params: ---graph_wavenet --data data/METR-LA --checkpoint pretrained/graph_wavenet_repr.pth --nhid 32 --do_graph_conv --addaptadj --device cuda:0 --in_dim=2 --save experiment
        sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(
            args.adjdata, args.adjtype)
        supports = [torch.tensor(i).to(device) for i in adj_mx]

        if args.randomadj:
            adjinit = None
        else:
            adjinit = supports[0]

        if args.aptonly:
            supports = None

        model = gwnet(device,
                      num_nodes=args.num_nodes,
                      dropout=args.dropout,
                      supports=supports,
                      gcn_bool=args.do_graph_conv,
                      addaptadj=args.addaptadj,
                      aptinit=adjinit,
                      in_dim=args.in_dim,
                      out_dim=args.seq_length,
                      residual_channels=args.nhid,
                      dilation_channels=args.nhid,
                      skip_channels=args.nhid * 8,
                      end_channels=args.nhid * 16)

    print(args)

    if args.checkpoint:
        model.load_checkpoint(torch.load(args.checkpoint))
    model.to(device)
    model.eval()
    print(scaler)

    # Only the speeds?
    realy = torch.Tensor(data['y_test']).transpose(1, 3)[:, 0, :, :].to(device)
    print('visualising frames')
    visualise_metrics(model, device, data['test_loader'], scaler, realy,
                      args.save)
    evaluate_multiple_horizon(model, device, data, args.seq_length)