コード例 #1
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    parser = argparse.ArgumentParser(
        description="PyTorch Query Localization in Videos Training")
    parser.add_argument(
        "--config-file",
        default="experiments/charades_sta_train.yaml",
        # default="experiments/anet_cap_train.yaml",
        # default="experiments/tacos_train.yaml",
        metavar="FILE",
        help="path to config file",
        type=str,
    )
    args = parser.parse_args()

    experiment_name = args.config_file.split("/")[-1]
    log_directory = args.config_file.replace(experiment_name, "logs/")
    vis_directory = args.config_file.replace(experiment_name, "visualization/")
    experiment_name = experiment_name.replace(".yaml", "")
    cfg.merge_from_list([
        'EXPERIMENT_NAME', experiment_name, 'LOG_DIRECTORY', log_directory,
        "VISUALIZATION_DIRECTORY", vis_directory
    ])
    cfg.merge_from_file(args.config_file)

    output_dir = "./{}".format(cfg.LOG_DIRECTORY)

    if output_dir:
        mkdir(output_dir)
    mkdir("./checkpoints/{}".format(cfg.EXPERIMENT_NAME))

    logger = setup_logger("mlnlp", output_dir, cfg.EXPERIMENT_NAME + ".txt", 0)
    logger.info("Starting moment localization with dynamic filters")
    logger.info(cfg.EXPERIMENT_NAME)

    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    # torch.backends.cudnn.enabled = False
    if cfg.ENGINE_STAGE == "TRAINER":
        print('#######')
        print(cfg.DYNAMIC_FILTER.LSTM_VIDEO.DROPOUT)
        trainer(cfg)
    elif cfg.ENGINE_STAGE == "TESTER":
        tester(cfg)
コード例 #2
0
def main():
    parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
    parser.add_argument(
        "--config-file",
        default="",
        metavar="FILE",
        help="path to config file",
        type=str,)
    args = parser.parse_args()

    experiment_name = args.config_file.split("/")[-1]
    log_directory   = args.config_file.replace(experiment_name,"logs/")
    vis_directory   = args.config_file.replace(experiment_name,"visualization/")
    experiment_name = experiment_name.replace(".yaml","")
    cfg.merge_from_list(['EXPERIMENT_NAME', experiment_name, 'LOG_DIRECTORY', log_directory, "VISUALIZATION_DIRECTORY", vis_directory])
    cfg.merge_from_file(args.config_file)

    output_dir = "./{}".format(cfg.LOG_DIRECTORY)

    if output_dir:
        mkdir(output_dir)
    mkdir("./checkpoints/{}".format(cfg.EXPERIMENT_NAME))

    logger = setup_logger("mlnlp", output_dir, cfg.EXPERIMENT_NAME + ".txt", 0)
    logger.info("Starting moment localization with dynamic filters")
    logger.info(cfg.EXPERIMENT_NAME)

    # reproductibility
    np.random.seed(0)
    torch.manual_seed(0)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    if cfg.ENGINE_STAGE == "TRAINER":
        trainer(cfg)
    elif cfg.ENGINE_STAGE == "TESTER":
        tester(cfg)
コード例 #3
0
def main():

    #load data
    device = torch.device(args.device)
    # sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata,args.adjtype)
    # supports = [torch.tensor(i).to(device) for i in adj_mx]

    adjinit = None
    supports = None
    engine = trainer(args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout,
                         args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj,
                         adjinit)
    engine.model.load_state_dict(torch.load(args.load, map_location={'cuda:1': 'cuda:0'}))
    # engine.model.load_state_dict(torch.load(args.load))
    print("start testing...", flush=True)

    val_time = []

    data = dataLoader( bs = args.batch_size, sl = args.seq_length, fd_number =args.fd_number)

    #test
    test_loss = []
    test_asf = []
    test_rmse = []

    s1 = time.time()
    for iter in range(data.test_batches):
        testx, testy  = data.testLoader.nextBatch()
        testx = testx[:, :, :, np.newaxis]
        testx = torch.Tensor(testx).to(device)
        testx = testx.permute((0, 3, 2, 1))
        testy = torch.Tensor(testy).to(device)
        metrics = engine.eval(testx, testy)
        test_loss.append(metrics[0])
        test_rmse.append(metrics[1])
        test_asf.append(metrics[2])
    s2 = time.time()
    val_time.append(s2-s1)


    test_loss = np.mean(test_loss)
    test_rmse = np.mean(test_rmse)
    test_asf = np.mean(test_asf)


    log = 'Test Loss: {:.4f},Test RMSE: {:.4f}, Test_asf:{:.4f}, inference Time: {:.4f}/epoch'
    print(log.format(test_loss, test_rmse, test_asf, (s2 - s1)),flush=True)
コード例 #4
0
def build_model(args):
    device = torch.device(args.device)
    adj_mx = load_adj(args.adjdata, args.adjtype)
    dataloader = load_dataset(args.data, args.batch_size, args.batch_size,
                              args.batch_size)
    scaler = dataloader['scaler']
    supports = [torch.tensor(i).to(device) for i in adj_mx]

    print(args)

    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes,
                     args.nhid, args.dropout, args.learning_rate,
                     args.weight_decay, device, supports, args.gcn_bool,
                     args.addaptadj, adjinit)
    return engine, scaler, dataloader, adj_mx
コード例 #5
0
def test_sample(sample_num):
    device = torch.device(args.device)
    # sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata,args.adjtype)
    # supports = [torch.tensor(i).to(device) for i in adj_mx]
    print(args)
    adjinit = None
    supports = None
    engine = trainer(args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout,
                         args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj,
                         adjinit)
    engine.model.load_state_dict(torch.load(args.load,map_location={'cuda:1':'cuda:0'}))
    data = dataLoader( bs = args.batch_size, sl = args.seq_length, fd_number =args.fd_number)
    test_feature, test_label = data.get_one_piece(sample_num)


    test_feature_b = np.reshape(test_feature, [len(test_feature), args.seq_length, 18])
    test_feature_b = test_feature_b[:, :, :, np.newaxis]
    test_feature_b = torch.Tensor(test_feature_b).to(device)
    test_feature_b = test_feature_b.permute((0, 3, 2, 1))
    result = engine.eval2(test_feature_b)
    # bc = len(test_feature)//args.batch_size
    # result = []
    # for i in range(bc):
    #     test_feature_b =  np.reshape(test_feature[bc*args.batch_size, (bc+1)*args.batch_size], [args.batch_size, args.seq_length, -1])
    #     result = result +  engine.model.eval2(test_feature_b)
    p = 1
    leng = len(result)
    plt.plot(range(leng), result, marker='|', color='coral', linewidth=1.0, linestyle='--', label='Prediction')
    plt.plot(range(leng), test_label, linestyle='-', label='Label ')

    plt.xlabel("Time(Cycle)")
    plt.ylabel("RUL(Cycle)")
    plt.title(r"RUL prediction sample")
    plt.legend()
    # plt.savefig(path)
    plt.show()
コード例 #6
0
def main():
    #set seed
    #torch.manual_seed(args.seed)
    #np.random.seed(args.seed)
    #load data
    device = torch.device(args.device)
    sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata,args.adjtype)
    dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size)
    scaler = dataloader['scaler']
    supports = [torch.tensor(i).to(device) for i in adj_mx]

    print(args)

    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None



    engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout,
                         args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj,
                         adjinit)


    print("start training...",flush=True)
    his_loss =[]
    val_time = []
    train_time = []
    for i in range(1,args.epochs+1):
        #if i % 10 == 0:
            #lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10)))
            #for g in engine.optimizer.param_groups:
                #g['lr'] = lr
        train_loss = []
        train_mape = []
        train_rmse = []
        t1 = time.time()
        dataloader['train_loader'].shuffle()
        for iter, (x, y) in enumerate(dataloader['train_loader'].get_iterator()):
            trainx = torch.Tensor(x).to(device)
            trainx= trainx.transpose(1, 3)
            trainy = torch.Tensor(y).to(device)
            trainy = trainy.transpose(1, 3)
            metrics = engine.train(trainx, trainy[:,0,:,:])
            train_loss.append(metrics[0])
            train_mape.append(metrics[1])
            train_rmse.append(metrics[2])
            if iter % args.print_every == 0 :
                log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
                print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1]),flush=True)
        t2 = time.time()
        train_time.append(t2-t1)
        #validation
        valid_loss = []
        valid_mape = []
        valid_rmse = []


        s1 = time.time()
        for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
            testx = torch.Tensor(x).to(device)
            testx = testx.transpose(1, 3)
            testy = torch.Tensor(y).to(device)
            testy = testy.transpose(1, 3)
            metrics = engine.eval(testx, testy[:,0,:,:])
            valid_loss.append(metrics[0])
            valid_mape.append(metrics[1])
            valid_rmse.append(metrics[2])
        s2 = time.time()
        log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
        print(log.format(i,(s2-s1)))
        val_time.append(s2-s1)
        mtrain_loss = np.mean(train_loss)
        mtrain_mape = np.mean(train_mape)
        mtrain_rmse = np.mean(train_rmse)

        mvalid_loss = np.mean(valid_loss)
        mvalid_mape = np.mean(valid_mape)
        mvalid_rmse = np.mean(valid_rmse)
        his_loss.append(mvalid_loss)

        log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
        print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss, mvalid_mape, mvalid_rmse, (t2 - t1)),flush=True)
        torch.save(engine.model.state_dict(), args.save+"_epoch_"+str(i)+"_"+str(round(mvalid_loss,2))+".pth")
    print("Average Training Time: {:.4f} secs/epoch".format(np.mean(train_time)))
    print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    #testing
    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(torch.load(args.save+"_epoch_"+str(bestid+1)+"_"+str(round(his_loss[bestid],2))+".pth"))


    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1,3)[:,0,:,:]

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testx = testx.transpose(1,3)
        with torch.no_grad():
            preds = engine.model(testx).transpose(1,3)
        outputs.append(preds.squeeze())

    yhat = torch.cat(outputs,dim=0)
    yhat = yhat[:realy.size(0),...]


    print("Training finished")
    print("The valid loss on best model is", str(round(his_loss[bestid],4)))


    amae = []
    amape = []
    armse = []
    for i in range(12):
        pred = scaler.inverse_transform(yhat[:,:,i])
        real = realy[:,:,i]
        metrics = util.metric(pred,real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i+1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae),np.mean(amape),np.mean(armse)))
    torch.save(engine.model.state_dict(), args.save+"_exp"+str(args.expid)+"_best_"+str(round(his_loss[bestid],2))+".pth")
コード例 #7
0
def main():
    if not os.path.exists(args.save):
        os.makedirs(args.save)
    #load data
    device = torch.device(args.device)
    # sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata,args.adjtype)
    # supports = [torch.tensor(i).to(device) for i in adj_mx]
    print(args)
    adjinit = None
    supports = None
    engine = trainer(args.in_dim, args.seq_length, args.num_nodes, args.nhid,
                     args.dropout, args.learning_rate, args.weight_decay,
                     device, supports, args.gcn_bool, args.addaptadj, adjinit)
    print("start training...", flush=True)
    train_his_loss = []
    his_loss = []
    val_time = []
    train_time = []
    data = dataLoader(bs=args.batch_size,
                      sl=args.seq_length,
                      fd_number=args.fd_number)

    for i in range(1, args.epochs + 1):
        #if i % 10 == 0:
        #lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10)))
        #for g in engine.optimizer.param_groups:
        #g['lr'] = lr
        train_loss = []
        train_asf = []
        train_rmse = []

        t1 = time.time()
        for iter in range(data.train_batches):
            trainx, trainy = data.trainLoader.nextBatch()
            trainx = trainx[:, :, :, np.newaxis]
            trainx = torch.Tensor(trainx).to(device)
            trainx = trainx.permute((0, 3, 2, 1))
            trainy = torch.Tensor(trainy).to(device)
            metrics = engine.train(trainx, trainy)
            # mc = engine.eval(trainx,trainy)
            train_loss.append(metrics[0])
            train_asf.append(metrics[2])
            train_rmse.append(metrics[1])
            if iter % args.print_every == 0:
                log = 'Iter: {:03d}, Train Loss: {:.4f},Train RMSE: {:.4f},Train asf: {:.4f}'
                print(log.format(iter, train_loss[-1], train_rmse[-1],
                                 train_asf[-1]),
                      flush=True)

        t2 = time.time()
        train_time.append(t2 - t1)
        #validation
        valid_loss = []
        valid_asf = []
        valid_rmse = []

        s1 = time.time()
        for iter in range(data.test_batches):
            testx, testy = data.testLoader.nextBatch()
            testx = testx[:, :, :, np.newaxis]
            testx = torch.Tensor(testx).to(device)
            testx = testx.permute((0, 3, 2, 1))
            testy = torch.Tensor(testy).to(device)
            metrics = engine.eval(testx, testy)
            valid_loss.append(metrics[0])
            valid_rmse.append(metrics[1])
            valid_asf.append(metrics[2])
        s2 = time.time()
        log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
        print(log.format(i, (s2 - s1)))
        val_time.append(s2 - s1)
        mtrain_loss = np.mean(train_loss)
        mtrain_rmse = np.mean(train_rmse)
        mtrain_asf = np.mean(train_asf)

        mvalid_loss = np.mean(valid_loss)
        mvalid_rmse = np.mean(valid_rmse)
        mvalid_asf = np.mean(valid_asf)
        his_loss.append(mvalid_loss)
        train_his_loss.append(train_loss)
        log = 'Epoch: {:03d}, Train Loss: {:.4f},Train RMSE: {:.4f}, Train_asf:{:.4f}, Valid Loss: {:.4f},Valid RMSE: {:.4f}, Valid_asf:{:.4f}, Training Time: {:.4f}/epoch'
        print(log.format(i, mtrain_loss, mtrain_rmse, mtrain_asf, mvalid_loss,
                         mvalid_rmse, mvalid_asf, (t2 - t1)),
              flush=True)
        torch.save(
            engine.model.state_dict(), args.save + "epoch_" + str(i) + "_" +
            str(round(mvalid_loss, 2)) + ".pth")
        for k in range(5):
            test_sample(k + 2, engine, data, device,
                        args.save + "epoch_" + str(i) + '_' + str(k))
    print("Average Training Time: {:.4f} secs/epoch".format(
        np.mean(train_time)))
    print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))
    np.save(args.save + "valiloss.npy", his_loss)
    np.save(args.save + "trainloss.npy", train_his_loss)
コード例 #8
0
def main():
    #set seed
    #torch.manual_seed(args.seed)
    #np.random.seed(args.seed)
    #load data

    frequencies = np.array([
        8.176, 8.662, 9.177, 9.723, 10.301, 10.913, 11.562, 12.250, 12.978,
        13.750, 14.568, 15.434, 16.352, 17.324, 18.354, 19.445, 20.601, 21.826,
        23.124, 24.499, 25.956, 27.500, 29.135, 30.867, 32.703, 34.648, 36.708,
        38.890, 41.203, 43.653, 46.249, 48.999, 51.913, 55.000, 58.270, 61.735,
        65.406, 69.295, 73.416, 77.781, 82.406, 87.307, 92.499, 97.998, 103.82,
        110.00, 116.54, 123.47, 130.81, 138.59, 146.83, 155.56, 164.81, 174.61,
        184.99, 195.99, 207.65, 220.00, 233.08, 246.94, 261.63, 277.18, 293.66,
        311.13, 329.63, 349.23, 369.99, 391.99, 415.31, 440.00, 466.16, 439.88,
        523.25, 554.37, 587.33, 622.25, 659.26, 698.46, 739.99, 783.99, 830.61,
        880.00, 932.32, 987.77, 1046.5, 1108.7, 1174.7, 1244.5, 1318.5, 1396.9,
        1480.0, 1568.0, 1661.2, 1760.0, 1864.7, 1975.5, 2093.0, 2217.5, 2349.3,
        2489.0, 2637.0, 2793.8, 2960.0, 3136.0, 3322.4, 3520.0, 3729.3, 3951.1,
        4186.0, 4434.9, 4698.6, 4978.0, 5274.0, 5587.7, 5919.9, 6271.9, 6644.9,
        7040.0, 7458.6, 7902.1, 8372.0, 8869.8, 9397.3, 9956.1, 10548.1,
        11175.3, 11839.8, 12543.9
    ])
    piano_adj = np.zeros((128, 128))
    for row in range(128):
        piano_adj[row] = frequencies - frequencies[row]
    print(piano_adj[10:20, 10:20])

    device = torch.device(args.device)
    adj_mx = util.load_piano_adj(piano_adj, args.adjtype)
    dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size,
                                   args.batch_size)
    scaler = dataloader['scaler']
    supports = [torch.tensor(i).to(device) for i in adj_mx]

    print(type(adj_mx))
    print(len(adj_mx))
    for elem in adj_mx:
        print(type(elem))
        print(elem[10:20, 10:20])
        print(elem.shape)
    print(args)

    time.sleep(2)
    print("tsadf" + 24)

    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    # print(adjinit)
    # # print(supports[0])
    # print("sfdssg" + 234)

    engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes,
                     args.nhid, args.dropout, args.learning_rate,
                     args.weight_decay, device, supports, args.gcn_bool,
                     args.addaptadj, adjinit)

    print("start training...", flush=True)
    his_loss = []
    val_time = []
    train_time = []
    for i in range(1, args.epochs + 1):
        #if i % 10 == 0:
        #lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10)))
        #for g in engine.optimizer.param_groups:
        #g['lr'] = lr
        train_loss = []
        train_mape = []
        train_rmse = []
        t1 = time.time()
        dataloader['train_loader'].shuffle()
        for iter, (x,
                   y) in enumerate(dataloader['train_loader'].get_iterator()):
            trainx = torch.Tensor(x).to(device)
            trainx = trainx.transpose(1, 3)
            trainy = torch.Tensor(y).to(device)
            trainy = trainy.transpose(1, 3)
            metrics = engine.train(trainx, trainy[:, 0, :, :])
            train_loss.append(metrics[0])
            train_mape.append(metrics[1])
            train_rmse.append(metrics[2])
            if iter % args.print_every == 0:
                log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
                print(log.format(iter, train_loss[-1], train_mape[-1],
                                 train_rmse[-1]),
                      flush=True)
        t2 = time.time()
        train_time.append(t2 - t1)
        #validation
        valid_loss = []
        valid_mape = []
        valid_rmse = []

        s1 = time.time()
        for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
            testx = torch.Tensor(x).to(device)
            testx = testx.transpose(1, 3)
            testy = torch.Tensor(y).to(device)
            testy = testy.transpose(1, 3)
            metrics = engine.eval(testx, testy[:, 0, :, :])
            valid_loss.append(metrics[0])
            valid_mape.append(metrics[1])
            valid_rmse.append(metrics[2])
        s2 = time.time()
        log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
        print(log.format(i, (s2 - s1)))
        val_time.append(s2 - s1)
        mtrain_loss = np.mean(train_loss)
        mtrain_mape = np.mean(train_mape)
        mtrain_rmse = np.mean(train_rmse)

        mvalid_loss = np.mean(valid_loss)
        mvalid_mape = np.mean(valid_mape)
        mvalid_rmse = np.mean(valid_rmse)
        his_loss.append(mvalid_loss)

        log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
        print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss,
                         mvalid_mape, mvalid_rmse, (t2 - t1)),
              flush=True)
        torch.save(
            engine.model.state_dict(), args.save + "_epoch_" + str(i) + "_" +
            str(round(mvalid_loss, 2)) + ".pth")
    print("Average Training Time: {:.4f} secs/epoch".format(
        np.mean(train_time)))
    print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    #testing
    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(
        torch.load(args.save + "_epoch_" + str(bestid + 1) + "_" +
                   str(round(his_loss[bestid], 2)) + ".pth"))

    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1, 3)[:, 0, :, :]

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testx = testx.transpose(1, 3)
        with torch.no_grad():
            preds = engine.model(testx).transpose(1, 3)
        outputs.append(preds.squeeze())

    yhat = torch.cat(outputs, dim=0)
    yhat = yhat[:realy.size(0), ...]

    print("Training finished")
    print("The valid loss on best model is", str(round(his_loss[bestid], 4)))

    amae = []
    amape = []
    armse = []
    for i in range(12):
        pred = scaler.inverse_transform(yhat[:, :, i])
        real = realy[:, :, i]
        metrics = util.metric(pred, real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))
    torch.save(
        engine.model.state_dict(), args.save + "_exp" + str(args.expid) +
        "_best_" + str(round(his_loss[bestid], 2)) + ".pth")
コード例 #9
0
ファイル: train.py プロジェクト: JiahuiSun/Exp-Graph-WaveNet
def main():
    # set seed
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)

    # load data
    device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
    adj_mx = util.load_adj(adj_path, args.adjtype)
    dataloader = util.load_dataset(outflow_path, args.batch_size,
                                   args.batch_size, args.batch_size)
    scaler = dataloader['scaler']
    supports = [torch.tensor(i).to(device) for i in adj_mx]

    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    engine = trainer(scaler, args.in_dim, args.seq_length, num_nodes,
                     args.nhid, args.dropout, args.learning_rate,
                     args.weight_decay, device, supports, args.gcn_bool,
                     args.addaptadj, adjinit)

    logger.write("start training...")
    his_loss = []
    val_time = []
    train_time = []
    for i in range(1, args.epochs + 1):
        # learning rate schedule
        if i % 10 == 0:
            lr = max(0.000002, args.learning_rate * (0.9**(i // 10)))
            for g in engine.optimizer.param_groups:
                g['lr'] = lr

        # train
        train_mae = []
        train_rmse = []
        train_mape = []
        t1 = time.time()
        dataloader['train_loader'].shuffle()
        for iter, (x,
                   y) in enumerate(dataloader['train_loader'].get_iterator()):
            trainx = torch.Tensor(x).to(device)
            trainx = trainx.transpose(1, 3)
            trainy = torch.Tensor(y).to(device)
            trainy = trainy.transpose(1, 3)
            # NOTE: B, T, V, F, F=2, but we noly need speed for label: y[:, 0, ...]
            metrics = engine.train(trainx, trainy[:, 0, :, :])
            train_mae.append(metrics[0])
            train_rmse.append(metrics[1])
            train_mape.append(metrics[2])
        # log results of training set.
        mtrain_mae = np.mean(train_mae)
        mtrain_rmse = np.mean(train_rmse)
        mtrain_mape = np.mean(train_mape) * 100
        train_writer.add_scalar('train/mae', mtrain_mae, i)
        train_writer.add_scalar('train/rmse', mtrain_rmse, i)
        train_writer.add_scalar('train/mape', mtrain_mape, i)

        # validation
        with torch.no_grad():
            valid_mae = []
            valid_mape = []
            valid_rmse = []
            s1 = time.time()
            for _, (x,
                    y) in enumerate(dataloader['val_loader'].get_iterator()):
                testx = torch.Tensor(x).to(device)
                testx = testx.transpose(1, 3)
                testy = torch.Tensor(y).to(device)
                testy = testy.transpose(1, 3)
                metrics = engine.eval(testx, testy[:, 0, :, :])
                valid_mae.append(metrics[0])
                valid_rmse.append(metrics[1])
                valid_mape.append(metrics[2])
            # log results of validation set.
            s2 = time.time()
            val_time.append(s2 - s1)
            mvalid_mae = np.mean(valid_mae)
            mvalid_mape = np.mean(valid_mape) * 100
            mvalid_rmse = np.mean(valid_rmse)
            his_loss.append(mvalid_mae)
            val_writer.add_scalar('val/mae', mvalid_mae, i)
            val_writer.add_scalar('val/rmse', mvalid_rmse, i)
            val_writer.add_scalar('val/mape', mvalid_mape, i)

        t2 = time.time()
        train_time.append(t2 - t1)
        if i % args.print_every == 0:
            logger.write(
                f'Epoch: {i:03d}, MAE: {mtrain_mae:.2f}, RMSE: {mtrain_rmse:.2f}, MAPE: {mtrain_mape:.2f}, Valid MAE: {mvalid_mae:.2f}, RMSE: {mvalid_rmse:.2f}, MAPE: {mvalid_mape:.2f}'
            )
        torch.save(
            engine.model.state_dict(), save_path + "_epoch_" + str(i) + "_" +
            str(round(mvalid_mae, 2)) + ".pth")

    logger.write("Average Training Time: {:.4f} secs/epoch".format(
        np.mean(train_time)))
    # logger.write("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(
        torch.load(save_path + "_epoch_" + str(bestid + 1) + "_" +
                   str(round(his_loss[bestid], 2)) + ".pth"))

    logger.write("Training finished")
    logger.write(
        f"The valid loss on best model is {str(round(his_loss[bestid],4))}")

    # test
    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1, 3)[:, 0, :, :]

    with torch.no_grad():
        t1 = time.time()
        for _, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
            testx = torch.Tensor(x).to(device)
            testx = testx.transpose(1, 3)
            preds = engine.model(testx).transpose(1, 3)
            outputs.append(preds.squeeze())

        t2 = time.time()
        logger.write(f'Inference time: {t2-t1:.4f}')
        yhat = torch.cat(outputs, dim=0)
        yhat = yhat[:realy.size(0), ...]

        # calculate metrics and save predictions
        preds = []
        reals = []
        logger.write('Step i, Test MAE, Test RMSE, Test MAPE')
        for i in range(args.seq_length):
            # prediction of step i
            pred = scaler.inverse_transform(yhat[:, :, i])
            real = realy[:, :, i]
            metrics = util.metric(pred.cpu().detach().numpy(),
                                  real.cpu().detach().numpy())
            logger.write(
                f'{metrics[0]:.2f}, {metrics[1]:.2f}, {metrics[2]*100:.2f}')

            preds.append(pred.tolist())
            reals.append(real.tolist())

    reals = np.array(reals)
    preds = np.array(preds)
    np.save(f'test_{args.city}_{args.tinterval}.npy', np.array(reals))
    np.save(f'test_{args.city}_{args.tinterval}.npy', np.array(preds))
    torch.save(
        engine.model.state_dict(), save_path + "_exp" + str(args.expid) +
        "_best_" + str(round(his_loss[bestid], 2)) + ".pth")
コード例 #10
0
def main():
    # set seed
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    # load data
    device = torch.device(args.device)
    sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(
        args.adjdata, args.adjtype)
    # suffix = '_filtered_we'  # _filtered_we, _filtered_ew
    eR_seq_size = 24  # 24
    error_size = 6
    dataloader = util.load_dataset(args.data,
                                   args.batch_size,
                                   args.batch_size,
                                   args.batch_size,
                                   eRec=args.eRec,
                                   eR_seq_size=eR_seq_size,
                                   suffix=args.suffix)
    scaler = dataloader['scaler']

    if args.retrain:
        dl_train = util.load_dataset(args.data,
                                     args.batch_size,
                                     args.batch_size,
                                     args.batch_size,
                                     eRec=args.eRec,
                                     eR_seq_size=eR_seq_size,
                                     suffix=args.suffix_train)
        scaler = dl_train['scaler']

    blocks = int(dataloader[f'x_train{args.suffix}'].shape[-3] /
                 3)  # Every block reduce the input sequence size by 3.
    print(f'blocks = {blocks}')

    supports = [torch.tensor(i).to(device) for i in adj_mx]

    print(args)

    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    engine = trainer(scaler,
                     args.in_dim,
                     args.seq_length,
                     args.num_nodes,
                     args.nhid,
                     args.dropout,
                     args.learning_rate,
                     args.weight_decay,
                     device,
                     supports,
                     args.gcn_bool,
                     args.addaptadj,
                     adjinit,
                     blocks,
                     eRec=args.eRec,
                     retrain=args.retrain,
                     checkpoint=args.checkpoint,
                     error_size=error_size)

    if args.retrain:
        dataloader['val_loader'] = dataloader['train_loader']

    print("start training...", flush=True)
    his_loss = []
    val_time = []
    train_time = []
    for i in range(1, args.epochs + 1):
        #if i % 10 == 0:
        #lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10)))
        #for g in engine.optimizer.param_groups:
        #g['lr'] = lr
        train_loss = []
        train_mape = []
        train_rmse = []
        t1 = time.time()
        dataloader['train_loader'].shuffle()
        for iter, (x,
                   y) in enumerate(dataloader['train_loader'].get_iterator()):
            trainx = torch.Tensor(x).to(device)
            trainy = torch.Tensor(y).to(device)
            if args.eRec:
                trainx = trainx.transpose(0, 1)
                trainy = trainy.transpose(0, 1)
            trainx = trainx.transpose(-3, -1)
            trainy = trainy.transpose(-3, -1)
            # print(f'trainx.shape = {trainx.shape}')
            # print(f'trainy.shape = {trainy.shape}')
            # print(f'trainy.shape final = {trainy[:,0,:,:].shape}')
            if args.eRec:
                metrics = engine.train(trainx, trainy[:, :, 0, :, :])
            else:
                metrics = engine.train(trainx, trainy[:, 0, :, :])
            train_loss.append(metrics[0])
            train_mape.append(metrics[1])
            train_rmse.append(metrics[2])
            if iter % args.print_every == 0:
                log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
                print(log.format(iter, train_loss[-1], train_mape[-1],
                                 train_rmse[-1]),
                      flush=True)
        t2 = time.time()
        train_time.append(t2 - t1)
        #validation
        valid_loss = []
        valid_mape = []
        valid_rmse = []

        s1 = time.time()
        for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
            testx = torch.Tensor(x).to(device)
            testy = torch.Tensor(y).to(device)
            if args.eRec:
                testx = testx.transpose(0, 1)
                testy = testy.transpose(0, 1)
            testx = testx.transpose(-3, -1)
            testy = testy.transpose(-3, -1)
            if args.eRec:
                metrics = engine.eval(testx, testy[:, :, 0, :, :])
            else:
                metrics = engine.eval(testx, testy[:, 0, :, :])
            valid_loss.append(metrics[0])
            valid_mape.append(metrics[1])
            valid_rmse.append(metrics[2])
        s2 = time.time()
        log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
        print(log.format(i, (s2 - s1)))
        val_time.append(s2 - s1)
        mtrain_loss = np.mean(train_loss)
        mtrain_mape = np.mean(train_mape)
        mtrain_rmse = np.mean(train_rmse)

        mvalid_loss = np.mean(valid_loss)
        mvalid_mape = np.mean(valid_mape)
        mvalid_rmse = np.mean(valid_rmse)
        his_loss.append(mvalid_loss)

        log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
        print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss,
                         mvalid_mape, mvalid_rmse, (t2 - t1)),
              flush=True)
        torch.save(
            engine.model.state_dict(), args.save + "_epoch_" + str(i) + "_" +
            str(round(mvalid_loss, 2)) + ".pth")
    print("Average Training Time: {:.4f} secs/epoch".format(
        np.mean(train_time)))
    print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    #testing
    bestid = 82  # 24 hay que sumarle 1 para obtener el ID del modelo
    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(
        torch.load(args.save + "_epoch_" + str(bestid + 1) + "_" +
                   str(round(his_loss[bestid], 2)) + ".pth"))
    # engine.model.load_state_dict(torch.load(args.save + f"_id_25_2.6_best_model.pth"))
    # engine.model.load_state_dict(torch.load(args.save + f"_exp1_best_2.6.pth"))

    #torch.save(engine.model.state_dict(), args.save + f"_id_{bestid+1}_best_model.pth")
    print(f'best_id = {bestid+1}')

    outputs = []
    realy = torch.Tensor(dataloader[f'y_test{args.suffix}']).to(device)
    #print(f'realy: {realy.shape}')
    if args.eRec:
        realy = realy.transpose(0, 1)
        realy = realy.transpose(-3, -1)[-1, :, 0, :, :]
        #print(f'realy2: {realy.shape}')
    else:
        realy = realy.transpose(-3, -1)[:, 0, :, :]
        #print(f'realy2: {realy.shape}')
    criterion = nn.MSELoss(reduction='none')  # L2 Norm
    criterion2 = nn.L1Loss(reduction='none')
    loss_mse_list = []
    loss_mae_list = []

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testy = torch.Tensor(y).to(device)
        if args.eRec:
            testx = testx.transpose(0, 1)
            testy = testy.transpose(0, 1)
        testx = testx.transpose(-3, -1)
        testy = testy.transpose(-3, -1)
        with torch.no_grad():
            if args.eRec:
                preds = engine.model(testx, testy[:, :, 0:1, :, :],
                                     scaler).transpose(1, 3)
            else:
                preds = engine.model(testx).transpose(1, 3)

        #print(f'preds: {scaler.inverse_transform(torch.squeeze(preds.transpose(-3, -1))).shape}')
        #print(f'testy: {torch.squeeze(testy[:, 0:1, :, :].transpose(-3, -1)).shape}')
        if args.eRec:
            loss_mse = criterion(
                scaler.inverse_transform(torch.squeeze(preds.transpose(-3,
                                                                       -1))),
                torch.squeeze(testy[-1, :, 0:1, :, :].transpose(-3, -1)))
            loss_mae = criterion2(
                scaler.inverse_transform(torch.squeeze(preds.transpose(-3,
                                                                       -1))),
                torch.squeeze(testy[-1, :, 0:1, :, :].transpose(-3, -1)))
        else:
            loss_mse = criterion(
                scaler.inverse_transform(torch.squeeze(preds.transpose(-3,
                                                                       -1))),
                torch.squeeze(testy[:, 0:1, :, :].transpose(-3, -1)))
            loss_mae = criterion2(
                scaler.inverse_transform(torch.squeeze(preds.transpose(-3,
                                                                       -1))),
                torch.squeeze(testy[:, 0:1, :, :].transpose(-3, -1)))

        loss_mse_list.append(loss_mse)
        loss_mae_list.append(loss_mae)

        outputs.append(preds.squeeze())

    loss_mse_list.pop(-1)
    loss_mae_list.pop(-1)
    loss_mse = torch.cat(loss_mse_list, 0)
    loss_mae = torch.cat(loss_mae_list, 0)
    #loss_mse = torch.squeeze(loss_mse).cpu()
    #loss_mae = torch.squeeze(loss_mae).cpu()
    loss_mse = loss_mse.cpu()
    loss_mae = loss_mae.cpu()
    print(f'loss_mae: {loss_mae.shape}')
    print(f'loss_mse: {loss_mae.shape}')

    res_folder = 'results/'
    original_stdout = sys.stdout
    with open(res_folder + f'loss_evaluation.txt', 'w') as filehandle:
        sys.stdout = filehandle  # Change the standard output to the file we created.
        count_parameters(engine.model)
        # loss_mae.shape --> (batch_size, seq_size, n_detect)
        print(' 1. ***********')
        print_loss('MSE', loss_mse)
        print(' 2. ***********')
        print_loss('MAE', loss_mae)
        print(' 3. ***********')
        print_loss_sensor('MAE', loss_mae)
        print(' 5. ***********')
        print_loss_seq('MAE', loss_mae)
        print(' 6. ***********')
        print_loss_sensor_seq('MAE', loss_mae)

        sys.stdout = original_stdout  # Reset the standard output to its original value

    with open(res_folder + f'loss_evaluation.txt', 'r') as filehandle:
        print(filehandle.read())

    yhat = torch.cat(outputs, dim=0)
    #print(f'yhat: {yhat.shape}')
    yhat = yhat[:realy.size(0), ...]
    #print(f'yhat2: {yhat.shape}')

    print("Training finished")
    #print("The valid loss on best model is", str(round(his_loss[bestid],4)))

    amae = []
    amape = []
    armse = []
    for i in range(args.seq_length):
        pred = scaler.inverse_transform(yhat[:, :, i])
        real = realy[:, :, i]
        metrics = util.metric(pred, real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over {:.4f} horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(
        log.format(args.seq_length, np.mean(amae), np.mean(amape),
                   np.mean(armse)))
    torch.save(
        engine.model.state_dict(), args.save + "_exp" + str(args.expid) +
        "_best_" + str(round(np.min(his_loss), 2)) + ".pth")
コード例 #11
0
def main():
    #set seed
    args.seed = args.seed if args.seed else \
        np.random.randint(0, np.iinfo("uint32").max, size=1)[-1]
    torch.manual_seed(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    np.random.seed(args.seed)

    # update run_name & save_dir
    args.run_name += "_".join(
        [args.data_type, str(args.seq_length),
         str(args.seed)])
    args.save += args.run_name + "/"
    os.makedirs(args.save)
    wandb.init(config=args, project=args.project_name, name=args.run_name)

    #load data
    device = torch.device(args.device)
    sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(
        args.adjdata, args.adjtype)
    dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size,
                                   args.batch_size)
    scaler = dataloader['scaler']
    supports = [torch.tensor(i).to(device) for i in adj_mx]

    print(args)

    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes,
                     args.nhid, args.dropout, args.learning_rate,
                     args.weight_decay, device, supports, args.gcn_bool,
                     args.addaptadj, adjinit, args.impute_type)

    print("start training...", flush=True)
    his_loss = []
    val_time = []
    train_time = []
    for i in tqdm(range(1, args.epochs + 1)):
        train_loss = []
        train_mape = []
        train_rmse = []
        t1 = time.time()
        if i > 1:
            # Skip shuffling for 1st epoch for data imputation
            dataloader['train_loader'].shuffle()
        for iter, (x,
                   y) in enumerate(dataloader['train_loader'].get_iterator()):
            if i == 1 or engine.imputer.type == "GCN":
                trainx = engine.imputer(x.transpose(1, 3),
                                        engine.model.get_supports())
            else:
                trainx = x.transpose(1, 3)
            trainx = trainx.to(device)
            trainy = torch.Tensor(y).to(device)
            trainy = trainy.transpose(1, 3)
            metrics = engine.train(trainx, trainy[:, 0, :, :])
            train_loss.append(metrics[0])
            train_mape.append(metrics[1])
            train_rmse.append(metrics[2])
            if iter % args.print_every == 0:
                log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
                print(log.format(iter, train_loss[-1], train_mape[-1],
                                 train_rmse[-1]),
                      flush=True)
        t2 = time.time()
        train_time.append(t2 - t1)
        #validation
        valid_loss = []
        valid_mape = []
        valid_rmse = []

        s1 = time.time()
        for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
            if i == 1 or engine.imputer.type == "GCN":
                testx = engine.imputer(x.transpose(1, 3),
                                       engine.model.get_supports())
            else:
                testx = x.transpose(1, 3)
            testx = testx.to(device)
            testy = torch.Tensor(y).to(device)
            testy = testy.transpose(1, 3)
            metrics = engine.eval(testx, testy[:, 0, :, :])
            valid_loss.append(metrics[0])
            valid_mape.append(metrics[1])
            valid_rmse.append(metrics[2])
        s2 = time.time()
        log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
        print(log.format(i, (s2 - s1)))
        val_time.append(s2 - s1)
        mtrain_loss = np.mean(train_loss)
        mtrain_mape = np.mean(train_mape)
        mtrain_rmse = np.mean(train_rmse)

        mvalid_loss = np.mean(valid_loss)
        mvalid_mape = np.mean(valid_mape)
        mvalid_rmse = np.mean(valid_rmse)
        his_loss.append(mvalid_loss)

        log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
        print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss,
                         mvalid_mape, mvalid_rmse, (t2 - t1)),
              flush=True)
        torch.save(
            engine.model.state_dict(), args.save + "epoch_" + str(i) + "_" +
            str(round(mvalid_loss, 2)) + ".pth")
        wandb.log(
            {
                "Train MAE": mtrain_loss,
                "Train MAPE": mtrain_mape,
                "Train RMSE": mtrain_rmse,
                "Validation MAE": mvalid_loss,
                "Validation MAPE": mvalid_mape,
                "Validation RMSE": mvalid_rmse
            },
            step=i)
    print("Average Training Time: {:.4f} secs/epoch".format(
        np.mean(train_time)))
    print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    #testing
    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(
        torch.load(args.save + "epoch_" + str(bestid + 1) + "_" +
                   str(round(his_loss[bestid], 2)) + ".pth"))

    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1, 3)[:, 0, :, :]

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        with torch.no_grad():
            testx = engine.imputer(x.transpose(1, 3),
                                   engine.model.get_supports())
            testx = testx.to(device)
            preds = engine.model(testx).transpose(1, 3)
        outputs.append(preds.squeeze(1))

    yhat = torch.cat(outputs, dim=0)
    yhat = yhat[:realy.size(0), ...]

    print("Training finished")
    print("The valid loss on best model is", str(round(his_loss[bestid], 4)))

    amae = []
    amape = []
    armse = []
    for i in range(args.seq_length):
        pred = scaler.inverse_transform(yhat[:, :, i])
        real = realy[:, :, i]
        metrics = util.metric(pred, real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

        wandb.log(
            {
                "Test MAE": metrics[0],
                "Test MAPE": metrics[1],
                "Test RMSE": metrics[2]
            },
            step=i + args.epochs + 1)

    log = 'On average over horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))
    wandb.log({
        "Avg Test MAE": np.mean(amae),
        "Avg Test MAPE": np.mean(amape),
        "Avg Test RMSE": np.mean(armse)
    })
    torch.save(engine.model.state_dict(),
               args.save + "best_" + str(round(his_loss[bestid], 2)) + ".pth")
コード例 #12
0
def main(model_name=None,
         syn_file='syn_diffG.pkl'
         ):  # directly loading trained model/ generated syn data
    #set seed
    #torch.manual_seed(args.seed)
    #np.random.seed(args.seed)
    #load data
    same_G = False
    device = torch.device(args.device)

    if args.data == 'syn':
        if os.path.isfile(syn_file):
            with open(syn_file, 'rb') as handle:
                pkl_data = pickle.load(handle)
            nTrain, nValid, nTest, num_timestep = pkl_data['nTrain'], pkl_data['nValid'],\
                                                  pkl_data['nTest'], pkl_data['num_timestep']
            dataloader, adj_mx, F_t, G = pkl_data['dataloader'], pkl_data['adj_mx'],\
                                         pkl_data['F_t'], pkl_data['G']
            print('synthetic data loaded')
        else:
            nTrain = 80  # Number of training samples
            nValid = int(0.25 * nTrain)  # Number of validation samples
            nTest = int(0.05 * nTrain)  # Number of testing samples
            num_timestep = 1000  # 1000
            dataloader, adj_mx, F_t, G = util.load_dataset_syn(args.adjtype,
                                                               args.num_nodes,
                                                               nTrain,
                                                               nValid,
                                                               nTest,
                                                               num_timestep,
                                                               args.seq_length,
                                                               args.batch_size,
                                                               args.batch_size,
                                                               args.batch_size,
                                                               same_G=same_G)
            # pkl_data = {'nTrain': nTrain, 'nValid': nValid, 'nTest': nTest,
            #             'num_timestep': num_timestep, 'dataloader': dataloader,
            #             'adj_mx': adj_mx, 'F_t': F_t, 'G':G}
            # with open(syn_file, 'wb') as handle:
            #     pickle.dump(pkl_data, handle, protocol=pickle.HIGHEST_PROTOCOL)

    elif args.data == 'CRASH':
        util.load_dataset_CRASH(args.adjtype, args.batch_size, args.batch_size,
                                args.batch_size)

    else:
        sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(
            args.adjdata, args.adjtype)
        dataloader = util.load_dataset_metr(args.data, args.batch_size,
                                            args.batch_size, args.batch_size)

    if args.data == 'syn' and not same_G:  # different graph structure for each sample
        assert len(adj_mx) == nTrain + nValid + nTest

        # separate adj matrices into train-val-test samples
        adj_train = [[], []]
        for a in adj_mx[:nTrain]:
            adj_train[0].append(a[0])
            adj_train[1].append(a[1])
        adj_train = [np.stack(np.asarray(i)) for i in adj_train]

        adj_val = [[], []]
        for a in adj_mx[nTrain:-nTest]:
            adj_val[0].append(a[0])
            adj_val[1].append(a[1])
        adj_val = [np.stack(np.asarray(i)) for i in adj_val]

        adj_test = [[], []]
        for a in adj_mx[-nTest:]:
            adj_test[0].append(a[0])
            adj_test[1].append(a[1])
        adj_test = [np.stack(np.asarray(i)) for i in adj_test]

        scaler = dataloader['scaler']
        print(args)
        supports = {}
        supports['train'] = [torch.tensor(i).to(device) for i in adj_train]
        supports['val'] = [torch.tensor(i).to(device) for i in adj_val]
        supports['test'] = [torch.tensor(i).to(device) for i in adj_test]

        adjinit = {}
        if args.randomadj:
            adjinit['train'] = adjinit['val'] = adjinit['test'] = None
        else:
            adjinit['train'] = supports['train'][0]
            adjinit['val'] = supports['val'][0]
            adjinit['test'] = supports['test'][0]

        if args.aptonly:
            supports['train'] = supports['val'] = supports['test'] = None

        engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes,
                         args.nhid, args.dropout, args.learning_rate,
                         args.weight_decay, device, supports, args.gcn_bool,
                         args.addaptadj, adjinit, args.blocks, args.layers)

        if model_name is None:
            print("start training...", flush=True)

            his_loss = []
            val_time = []
            train_time = []
            for i in range(1, args.epochs + 1):
                #if i % 10 == 0:
                #lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10)))
                #for g in engine.optimizer.param_groups:
                #g['lr'] = lr
                train_loss = []
                train_mape = []
                train_rmse = []
                t1 = time.time()
                dataloader['train_loader'].shuffle()
                engine.set_state('train')

                for iter, (x, y, adj_idx) in enumerate(
                        dataloader['train_loader'].get_iterator()):
                    trainx = torch.Tensor(x).to(
                        device)  # torch.Size([64, 15, 80, 2])
                    trainx = trainx.transpose(1,
                                              3)  # torch.Size([64, 2, 80, 15])
                    trainy = torch.Tensor(y).to(device)
                    trainy = trainy.transpose(1, 3)

                    metrics = engine.train_syn(trainx, trainy, F_t, G['train'],
                                               adj_idx)
                    train_loss.append(metrics[0])
                    train_mape.append(metrics[1])
                    train_rmse.append(metrics[2])
                    if iter % args.print_every == 0:
                        log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
                        print(log.format(iter, train_loss[-1], train_mape[-1],
                                         train_rmse[-1]),
                              flush=True)

                t2 = time.time()
                train_time.append(t2 - t1)

                #validation
                valid_loss = []
                valid_mape = []
                valid_rmse = []

                s1 = time.time()
                engine.set_state('val')
                for iter, (x, y, adj_idx) in enumerate(
                        dataloader['val_loader'].get_iterator()):
                    testx = torch.Tensor(x).to(device)
                    testx = testx.transpose(1, 3)
                    testy = torch.Tensor(y).to(device)
                    testy = testy.transpose(1, 3)
                    # [64, 2, 80, 15]
                    metrics = engine.eval_syn(testx, testy, F_t, G['val'],
                                              adj_idx)
                    valid_loss.append(metrics[0])
                    valid_mape.append(metrics[1])
                    valid_rmse.append(metrics[2])
                s2 = time.time()
                log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
                print(log.format(i, (s2 - s1)))
                val_time.append(s2 - s1)
                mtrain_loss = np.mean(train_loss)
                mtrain_mape = np.mean(train_mape)
                mtrain_rmse = np.mean(train_rmse)

                mvalid_loss = np.mean(valid_loss)
                mvalid_mape = np.mean(valid_mape)
                mvalid_rmse = np.mean(valid_rmse)
                his_loss.append(mvalid_loss)

                log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
                print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse,
                                 mvalid_loss, mvalid_mape, mvalid_rmse,
                                 (t2 - t1)),
                      flush=True)
                torch.save(
                    engine.model.state_dict(), args.save + "_epoch_" + str(i) +
                    "_" + str(round(mvalid_loss, 2)) + ".pth")
            print("Average Training Time: {:.4f} secs/epoch".format(
                np.mean(train_time)))
            print("Average Inference Time: {:.4f} secs".format(
                np.mean(val_time)))

    else:
        scaler = dataloader['scaler']
        supports = [torch.tensor(i).to(device) for i in adj_mx]
        print(args)

        if args.randomadj:
            adjinit = None
        else:
            adjinit = supports[0]

        if args.aptonly:
            supports = None

        engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes,
                         args.nhid, args.dropout, args.learning_rate,
                         args.weight_decay, device, supports, args.gcn_bool,
                         args.addaptadj, adjinit, args.blocks, args.layers)

        if model_name is None:
            print("start training...", flush=True)
            his_loss = []
            val_time = []
            train_time = []
            for i in range(1, args.epochs + 1):
                #if i % 10 == 0:
                #lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10)))
                #for g in engine.optimizer.param_groups:
                #g['lr'] = lr
                train_loss = []
                train_mape = []
                train_rmse = []
                t1 = time.time()
                dataloader['train_loader'].shuffle()
                for iter, (x, y) in enumerate(
                        dataloader['train_loader'].get_iterator()):
                    trainx = torch.Tensor(x).to(
                        device)  # torch.Size([64, 12, 207, 2])
                    trainx = trainx.transpose(
                        1, 3)  # torch.Size([64, 2, 207, 12])
                    trainy = torch.Tensor(y).to(device)
                    trainy = trainy.transpose(1, 3)
                    if args.data == 'syn':
                        metrics = engine.train_syn(trainx, trainy, F_t, G)
                    else:
                        metrics = engine.train(trainx, trainy[:, 0, :, :])
                    train_loss.append(metrics[0])
                    train_mape.append(metrics[1])
                    train_rmse.append(metrics[2])
                    if iter % args.print_every == 0:
                        log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
                        print(log.format(iter, train_loss[-1], train_mape[-1],
                                         train_rmse[-1]),
                              flush=True)

                t2 = time.time()
                train_time.append(t2 - t1)
                #validation
                valid_loss = []
                valid_mape = []
                valid_rmse = []

                s1 = time.time()
                for iter, (x, y) in enumerate(
                        dataloader['val_loader'].get_iterator()):
                    testx = torch.Tensor(x).to(device)
                    testx = testx.transpose(1, 3)
                    testy = torch.Tensor(y).to(device)
                    testy = testy.transpose(1, 3)
                    if args.data == 'syn':
                        metrics = engine.eval_syn(testx, testy, F_t, G)
                    else:
                        metrics = engine.eval(testx, testy[:, 0, :, :])
                    valid_loss.append(metrics[0])
                    valid_mape.append(metrics[1])
                    valid_rmse.append(metrics[2])

                s2 = time.time()
                log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
                print(log.format(i, (s2 - s1)))
                val_time.append(s2 - s1)
                mtrain_loss = np.mean(train_loss)
                mtrain_mape = np.mean(train_mape)
                mtrain_rmse = np.mean(train_rmse)

                mvalid_loss = np.mean(valid_loss)
                mvalid_mape = np.mean(valid_mape)
                mvalid_rmse = np.mean(valid_rmse)
                his_loss.append(mvalid_loss)

                log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
                print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse,
                                 mvalid_loss, mvalid_mape, mvalid_rmse,
                                 (t2 - t1)),
                      flush=True)
                torch.save(
                    engine.model.state_dict(), args.save + "_epoch_" + str(i) +
                    "_" + str(round(mvalid_loss, 2)) + ".pth")
            print("Average Training Time: {:.4f} secs/epoch".format(
                np.mean(train_time)))
            print("Average Inference Time: {:.4f} secs".format(
                np.mean(val_time)))

    ################################ TESTING ################################
    if model_name is None:
        bestid = np.argmin(his_loss)
        print(bestid)

        print("Training finished")
        print("The valid loss on best model is",
              str(round(his_loss[bestid], 4)))

        engine.model.load_state_dict(
            torch.load(args.save + "_epoch_" + str(bestid + 1) + "_" +
                       str(round(his_loss[bestid], 2)) + ".pth"))
    else:
        engine.model.load_state_dict(torch.load(model_name))
    amae = []
    amape = []
    armse = []

    if args.data == 'syn':
        if same_G:
            for iter, (x, y) in enumerate(
                    dataloader['test_loader'].get_iterator()):
                testx = torch.Tensor(x).to(device)
                testx = testx.transpose(1, 3)
                testy = torch.Tensor(y).to(device)
                testy = testy.transpose(1, 3)
                # [64, 2, 80, 15]
                metrics = engine.eval_syn(testx, testy, F_t, G)
                amae.append(metrics[0])
                amape.append(metrics[1])
                armse.append(metrics[2])

        else:
            engine.set_state('test')
            reals = []
            pred_Fs = []
            pred_Es = []
            for iter, (x, y, adj_idx) in enumerate(
                    dataloader['test_loader'].get_iterator()):
                testx = torch.Tensor(x).to(device)
                testx = testx.transpose(1, 3)
                testy = torch.Tensor(y).to(device)
                testy = testy.transpose(1, 3)
                # [64, 2, 80, 15]
                metrics = engine.eval_syn(testx, testy, F_t, G['val'], adj_idx)
                amae.append(metrics[0])
                amape.append(metrics[1])
                armse.append(metrics[2])

                reals.append(testy)
                pred_Fs.append(metrics[3])
                pred_Es.append(metrics[4])

            reals = torch.stack(reals).cpu().numpy()
            reals = reals.reshape(-1, *reals.shape[2:])
            pred_Fs = torch.stack(pred_Fs).cpu().numpy()
            pred_Fs = pred_Fs.reshape(-1, *pred_Fs.shape[2:]).squeeze()
            pred_Es = torch.stack(pred_Es).cpu().numpy()
            pred_Es = pred_Es.reshape(-1, *pred_Es.shape[2:]).squeeze()
            # reals shape: (1984, 2, 80, 15); pred_F/Es shape:(1984, 80, 15)

            # reverse slideing window --> results: (num_nodes, total_timesteps)
            ret = util.reverse_sliding_window([
                reals[:, 0, :, :].squeeze(), reals[:, 1, :, :].squeeze(),
                pred_Fs, pred_Es
            ])

            viz_node_idx = 0
            plt.figure()
            plt.plot(ret[0][viz_node_idx, :], label='real F')
            plt.plot(ret[1][viz_node_idx, :], label='real E')
            plt.plot(ret[2][viz_node_idx, :], label='pred F')
            plt.plot(ret[3][viz_node_idx, :], label='pred E')
            plt.legend()
            plt.show()

        if model_name is None:
            log = 'On average over seq_length horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
            print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))
            torch.save(
                engine.model.state_dict(),
                args.save + "_exp" + str(args.expid) + "_best_" +
                str(round(his_loss[bestid], 2)) + ".pth")

    else:
        outputs = []
        realy = torch.Tensor(dataloader['y_test']).to(device)
        realy = realy.transpose(1, 3)[:, 0, :, :]

        for iter, (x,
                   y) in enumerate(dataloader['test_loader'].get_iterator()):
            testx = torch.Tensor(x).to(device)
            testx = testx.transpose(1, 3)
            with torch.no_grad():
                preds = engine.model(testx).transpose(1, 3)
            outputs.append(preds.squeeze())

        yhat = torch.cat(outputs, dim=0)
        yhat = yhat[:realy.size(0), ...]

        for i in range(args.seq_length):
            pred = scaler.inverse_transform(yhat[:, :, i])
            real = realy[:, :, i]
            metrics = util.metric(pred, real)
            log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
            print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
            amae.append(metrics[0])
            amape.append(metrics[1])
            armse.append(metrics[2])

        log = 'On average over seq_length horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))
        torch.save(
            engine.model.state_dict(), args.save + "_exp" + str(args.expid) +
            "_best_" + str(round(his_loss[bestid], 2)) + ".pth")