Exemple #1
0
def res(model, valX, valTE, valY, mean, std):
    model.eval()  # 评估模式, 这会关闭dropout
    # it = test_iter.get_iterator()
    num_val = valX.shape[0]
    pred = []
    label = []
    num_batch = math.ceil(num_val / args.batch_size)
    with torch.no_grad():
        for batch_idx in range(num_batch):
            if isinstance(model, torch.nn.Module):
                start_idx = batch_idx * args.batch_size
                end_idx = min(num_val, (batch_idx + 1) * args.batch_size)

                X = torch.from_numpy(
                    valX[start_idx:end_idx]).float().to(device)
                y = valY[start_idx:end_idx]
                te = torch.from_numpy(valTE[start_idx:end_idx]).to(device)

                y_hat = model(X, te)

                pred.append(y_hat.cpu().numpy() * std + mean)
                label.append(y)

    pred = np.concatenate(pred, axis=0)
    label = np.concatenate(label, axis=0)

    # print(pred.shape, label.shape)
    maes = []
    rmses = []
    mapes = []

    for i in range(12):
        mae, rmse, mape = metric(pred[:, i, :], label[:, i, :])
        maes.append(mae)
        rmses.append(rmse)
        mapes.append(mape)
        # if i == 11:
        log_string(
            log, 'step %d, mae: %.4f, rmse: %.4f, mape: %.4f' %
            (i + 1, mae, rmse, mape))
        # print('step %d, mae: %.4f, rmse: %.4f, mape: %.4f' % (i+1, mae, rmse, mape))

    mae, rmse, mape = metric(pred, label)
    maes.append(mae)
    rmses.append(rmse)
    mapes.append(mape)
    log_string(
        log, 'average, mae: %.4f, rmse: %.4f, mape: %.4f' % (mae, rmse, mape))
    # print('average, mae: %.4f, rmse: %.4f, mape: %.4f' % (mae, rmse, mape))

    return np.stack(maes, 0), np.stack(rmses, 0), np.stack(mapes, 0)
Exemple #2
0
parser.add_argument('--SE_file',
                    default='data/METR-LA/SE(METR).txt',
                    help='spatial emebdding file')
parser.add_argument('--model_file',
                    default='data/METR-LA/METR',
                    help='save the model to disk')
parser.add_argument('--log_file',
                    default='data/METR-LA/log(METR)',
                    help='log file')
args = parser.parse_args()

log = open(args.log_file, 'w')

device = torch.device("cuda:6" if torch.cuda.is_available() else "cpu")

log_string(log, "loading data....")

trainX, trainTE, trainY, valX, valTE, valY, testX, testTE, testY, SE, mean, std = utils.loadData(
    args)

# adj = np.load('./data/metr_adj.npy')

log_string(log, "loading end....")


def res(model, valX, valTE, valY, mean, std):
    model.eval()  # 评估模式, 这会关闭dropout
    # it = test_iter.get_iterator()
    num_val = valX.shape[0]
    pred = []
    label = []
Exemple #3
0
def train(model, trainX, trainTE, trainY, valX, valTE, valY, mean, std):
    num_train = trainX.shape[0]
    min_loss = 10000000.0
    model.train()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=[5, 6, 7, 8], gamma=0.2)
    # lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=3,
    #                                 verbose=False, threshold=0.001, threshold_mode='rel', cooldown=0, min_lr=2e-6, eps=1e-08)

    for epoch in tqdm(range(1, args.max_epoch + 1)):
        # model.train()
        train_l_sum, train_acc_sum, n, batch_count, start = 0.0, 0.0, 0, 0, time.time(
        )
        permutation = np.random.permutation(num_train)
        trainX = trainX[permutation]
        trainTE = trainTE[permutation]
        trainY = trainY[permutation]
        num_batch = math.ceil(num_train / args.batch_size)
        with tqdm(total=num_batch) as pbar:
            for batch_idx in range(num_batch):
                start_idx = batch_idx * args.batch_size
                end_idx = min(num_train, (batch_idx + 1) * args.batch_size)

                X = torch.from_numpy(
                    trainX[start_idx:end_idx]).float().to(device)
                y = torch.from_numpy(
                    trainY[start_idx:end_idx]).float().to(device)
                te = torch.from_numpy(trainTE[start_idx:end_idx]).to(device)

                optimizer.zero_grad()

                y_hat = model(X, te)

                # rate = 1 - ((epoch - 1) / 100) ** 2
                # loss = (1-rate) * _compute_loss(y_hat, y, True) + rate * _compute_loss(y_hat, y, False)
                loss = _compute_loss(y, y_hat * std + mean)
                # print(loss)

                loss.backward()
                nn.utils.clip_grad_norm_(model.parameters(), 5)
                optimizer.step()

                train_l_sum += loss.cpu().item()
                # print(f"\nbatch loss: {l.cpu().item()}")
                n += y.shape[0]
                batch_count += 1
                pbar.update(1)
        # lr = lr_scheduler.get_lr()
        log_string(
            log, 'epoch %d, lr %.6f, loss %.4f, time %.1f sec' %
            (epoch, optimizer.param_groups[0]['lr'], train_l_sum / batch_count,
             time.time() - start))
        # print('epoch %d, lr %.6f, loss %.4f, time %.1f sec'
        #       % (epoch, optimizer.param_groups[0]['lr'], train_l_sum / batch_count, time.time() - start))
        mae, rmse, mape = res(model, valX, valTE, valY, mean, std)
        lr_scheduler.step()
        # lr_scheduler.step(mae)
        if mae[-1] < min_loss:
            min_loss = mae[-1]
            torch.save(model, args.model_file)
Exemple #4
0
parser.add_argument('--SE_file',
                    default='data/METR-LA/SE(METR).txt',
                    help='spatial emebdding file')
parser.add_argument('--model_file',
                    default='data/METR-LA/METR',
                    help='save the model to disk')
parser.add_argument('--log_file',
                    default='data/METR-LA/log(METR)',
                    help='log file')
args = parser.parse_args()

log = open(args.log_file, 'w')

device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")

log_string(log, "loading data....")

# trainX, trainTE, trainY, valX, valTE, valY, testX, testTE, testY, SE, mean, std = loadData(args)
trainX, trainTE, trainY, valX, valTE, valY, testX, testTE, testY, SE, mean, std = loadDatatime(
    args)

# adj = np.load('./data/metr_adj.npy')

log_string(log, "loading end....")


def res(model, valX, valTE, valY, mean, std):
    model.eval()  # 评估模式, 这会关闭dropout
    # it = test_iter.get_iterator()
    num_val = valX.shape[0]
    pred = []