예제 #1
0
def main():
    loss_func = nn.MSELoss(reduction='mean')
    data_bases, bike_sta = dataprocess.load_data_bases(train_days,
                                                       baseH5path,
                                                       0,
                                                       8,
                                                       width,
                                                       height,
                                                       reduce,
                                                       normalize=False)
    data_bases = data_bases.reshape((-1, width * height))
    model_1 = NMF(n_components=K, init='random')
    W_train = model_1.fit_transform(data_bases)
    H = model_1.components_  #bases

    #求出权重序列
    data = dataprocess.load_data_cluster(91, dataH5path, 0, width,
                                         height).reshape((-1, width * height))
    W = np.dot(data, np.linalg.pinv(H))
    model_2 = Lstm3(K, K,
                    torch.from_numpy(H).float().to(device), height, width,
                    device).to(device)
    opt2 = optim.Adam(model_2.parameters(), lr=lr)

    X_recent = []
    Y_ = []
    depends = [8, 7, 6, 5, 4, 3, 2, 1]
    first = depends[0]
    for i in range(first, len(W)):
        x_recent = [W[i - j] for j in depends]
        y_ = data[i]

        X_recent.append(x_recent)
        Y_.append(y_)

    X_recent = torch.from_numpy(np.asarray(X_recent)).float().to(device)
    Y_ = torch.from_numpy(np.asarray(Y_)).float().to(device)
    X_train, X_test, Y_train, Y_test = X_recent[:-len_test], X_recent[
        -len_test:], Y_[:-len_test], Y_[-len_test:]
    X_train, Y_train, X_valid, Y_valid = X_train[:
                                                 -len_val], Y_train[:-len_val], X_train[
                                                     -len_val:], Y_train[
                                                         -len_val:]

    train_ds = TensorDataset(X_train, Y_train)
    train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
    valid_ds = TensorDataset(X_valid, Y_valid)
    valid_dl = DataLoader(valid_ds, batch_size=bs * 2, shuffle=True)

    best_loss, best_model_wts, best_epoch = 999, copy.deepcopy(
        model_2.state_dict()), -1
    for epoch in range(epochs):
        model_2.train()
        for xb, yb in train_dl:
            yb_pred = model_2(xb)
            loss = (loss_func(yb_pred, yb) + 1e-6)**0.5
            opt2.zero_grad()
            loss.backward()
            opt2.step()

        model_2.eval()
        with torch.no_grad():
            valid_loss = (loss_func(model_2(X_valid), Y_valid) + 1e-6)**0.5

        print(
            'Epoch {}/{} ,train loss:{:.4f}  true loss:{:.4f}      valid loss:{:.4f}  val trueloss:{:.4f}'
            .format(epoch + 1, epochs, (loss + 1e-6)**0.5, (loss + 1e-6)**0.5,
                    valid_loss, valid_loss))

        if valid_loss <= best_loss:
            best_loss, best_model_wts, best_epoch = valid_loss, copy.deepcopy(
                model_2.state_dict()), epoch + 1

    print(
        'Finished Training     best val loss:{:.4f}   true loss:{:.4f}      best epoch:{}'
        .format(best_loss, best_loss, best_epoch))
    model_2.load_state_dict(best_model_wts)

    test_ds = TensorDataset(X_test, Y_test)
    test_dl = DataLoader(test_ds, batch_size=bs)
    model_2.eval()

    data_pred = []
    for xb, yb in test_dl:
        yb_pred = model_2(xb)
        data_pred.append(yb_pred)
    data_pred = torch.cat(data_pred, 0).float().to(device)
    data_real = Y_test
    loss = loss_func(data_pred, data_real)**0.5

    fwrite = open(outputpath, 'a+')
    fwrite.write('K: {}      loss: {:.4f}  true loss:{:.4f}\n'.format(
        K, loss, loss))
    fwrite.close()
    print('K: {}      loss: {:.4f}  true loss:{:.4f}'.format(K, loss, loss))
예제 #2
0

K = 50
width = 20
height = 20
attnode = 1024
lr = 0.001
epochs = 100
len_test = 48*14
len_val = 48*10
bs = 32
device = "cuda:2"
tl.set_backend('pytorch')

loss_func = nn.MSELoss(reduction='mean')
data = dataprocess.load_data_cluster(91, "bikeV2.h5", 0)

X = tl.tensor(data)
core, factors = tucker(X, rank=[K,15,15])
model_2 = Lstm5(K,K,(core,factors),height,width,device).to(device)
opt2 = optim.Adam(model_2.parameters(), lr=lr)


data_weight = factors[0].cpu().detach().numpy()
X_recent = []
Y_ = []
depends = [8,7,6,5,4,3,2,1]
first = depends[0]
for i in range(first, len(data_weight)):
    x_recent = [data_weight[i-j] for j in depends]
    y_ = data[i]
예제 #3
0
    model_1 = SoftAttention(width, height, K, attnode)
    model_2 = Lstm2(K, K, bases, height, width)
    return model_1, model_2, optim.Adam(
        model_1.parameters(), lr=lr), optim.Adam(model_2.parameters(), lr=lr)


def get_bases(Filename):
    f = h5py.File("output/" + Filename + ".h5")
    return f['data'][:]


# def loss_func():
#     return nn.MSELoss()

loss_func = nn.MSELoss(size_average=True)
data = torch.from_numpy(dataprocess.load_data_cluster(91, "bikeV2.h5",
                                                      0)).float().to(device)
bases = torch.from_numpy(get_bases("clusterNYC" + str(K) +
                                   "and100")).to(device)
model_1, model_2, opt1, opt2 = get_model(bases)
model_1.to(device)
model_2.to(device)
# A = []
# for epoch in range(len(data)):
#     lossmin = 9999
#     for i in range(20):
#         model_1 = SoftAttention(width,height,K,attnode).to(device)
#         opt1 = optim.Adam(model_1.parameters(), lr=lr)
#         loss = torch.ones([1]).to(device)
#         loss1 = torch.zeros([1]).to(device)
#         while loss.item() != loss1.item():
#             loss1 = loss
예제 #4
0
def main():
    loss_func = nn.MSELoss(reduction='mean')
    data_bases, bikesta = dataprocess.load_data_bases(train_days, baseH5path,
                                                      0, 8, width, height,
                                                      reduce)
    data_bases = torch.from_numpy(data_bases).view(-1, width *
                                                   height).float().to(device)

    model_1 = DenseAutoencoder().to(device)
    print(model_1)
    opt1 = optim.Adam(model_1.parameters(), lr=lr)

    ae_ds = TensorDataset(data_bases, data_bases)
    ae_dl = DataLoader(ae_ds, batch_size=bs * 2, shuffle=True)

    # best_loss, best_model_wts, best_epoch = 999, copy.deepcopy(model_1.state_dict()), -1
    for epoch in range(epochs_ae):
        for xb, yb in ae_dl:
            yb_pred = model_1(xb)
            loss = loss_func(yb, yb_pred)**0.5
            opt1.zero_grad()
            loss.backward()
            opt1.step()
        print("loss:{:.4f}   true loss:{:.4f}    epochs:{}".format(
            loss, loss * bikesta.std, epoch))

        # if best_loss>= loss

    torch.save(model_1.encoder, "output/bases/bikepp_en")
    torch.save(model_1.decoder, "output/bases/bikepp_de")

    encoder = torch.load("output/bases/bikepp_en")
    decoder = torch.load("output/bases/bikepp_de")

    # 求出权重序列
    data = dataprocess.load_data_cluster(91, dataH5path, 0, width,
                                         height).reshape(-1, height * width)
    print(data.shape)
    data = (data - bikesta.mean) / bikesta.std
    W = data
    # W = encoder(torch.from_numpy(data).float().to(device)).cpu().detach().numpy()
    model_2 = Lstm7(K, K, encoder, decoder, lstm_height, lstm_width,
                    device).to(device)
    opt2 = optim.Adam(model_2.parameters(), lr=lr)

    # for name,para in model_2.named_parameters():
    #     print(name)
    #     if name=="decoder":
    #         para.requires_grad=False
    #         print("yes")
    for name, param in model_2.named_parameters():
        if param.requires_grad:
            print(name)

    X_recent = []
    Y_ = []
    depends = [8, 7, 6, 5, 4, 3, 2, 1]
    first = depends[0]
    for i in range(first, len(W)):
        x_recent = [W[i - j] for j in depends]
        y_ = data[i]

        X_recent.append(x_recent)
        Y_.append(y_)

    X_recent = torch.from_numpy(np.asarray(X_recent)).float().to(device)
    Y_ = torch.from_numpy(np.asarray(Y_)).float().to(device)
    X_train, X_test, Y_train, Y_test = X_recent[:-len_test], X_recent[
        -len_test:], Y_[:-len_test], Y_[-len_test:]
    X_train, Y_train, X_valid, Y_valid = X_train[:
                                                 -len_val], Y_train[:-len_val], X_train[
                                                     -len_val:], Y_train[
                                                         -len_val:]

    train_ds = TensorDataset(X_train, Y_train)
    train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
    valid_ds = TensorDataset(X_valid, Y_valid)
    valid_dl = DataLoader(valid_ds, batch_size=bs * 2, shuffle=True)

    best_loss, best_model_wts, best_epoch = 999, copy.deepcopy(
        model_2.state_dict()), -1
    for epoch in range(epochs):
        model_2.train()
        for xb, yb in train_dl:
            yb_pred = model_2(xb)
            loss = loss_func(yb_pred, yb)
            opt2.zero_grad()
            loss.backward()
            opt2.step()

        model_2.eval()
        with torch.no_grad():
            valid_loss = (loss_func(model_2(X_valid), Y_valid) + 1e-6)**0.5
        print(
            'Epoch {}/{} ,train loss:{:.4f}  true loss:{:.4f}      valid loss:{:.4f}  val trueloss:{:.4f}'
            .format(epoch + 1, epochs, (loss + 1e-6)**0.5,
                    (loss + 1e-6)**0.5 * bikesta.std, valid_loss,
                    valid_loss * bikesta.std))
        if valid_loss <= best_loss:
            best_loss, best_model_wts, best_epoch = valid_loss, copy.deepcopy(
                model_2.state_dict()), epoch + 1
    print(
        "Finished Training     best val loss:{:.4f}   true loss:{:.4f}      best epoch:{}"
        .format(best_loss, best_loss * bikesta.std, best_epoch))
    model_2.load_state_dict(best_model_wts)

    test_ds = TensorDataset(X_test, Y_test)
    test_dl = DataLoader(test_ds, batch_size=bs)
    model_2.eval()
    data_pred = []

    for xb, yb in test_dl:
        yb_pred = model_2(xb)
        data_pred.append(yb_pred)
    data_pred = torch.cat(data_pred, 0).float().to(device)
    data_real = Y_test
    loss = loss_func(data_pred, data_real)**0.5
    fwrite = open(outputpath, "a+")
    fwrite.write("K: {}      loss: {:.4f}  true loss:{:.4f}\n".format(
        K, loss, loss * bikesta.std))
    fwrite.close()
    print("K: {}      loss: {:.4f}  true loss:{:.4f}".format(
        K, loss, loss * bikesta.std))
예제 #5
0
def main():
    loss_func = nn.MSELoss(reduction='mean')
    data_bases, bikesta = dataprocess.load_data_bases(91, baseH5path, 0, 8,
                                                      width, height, reduce)

    # 求出权重序列
    data = dataprocess.load_data_cluster(91, dataH5path, 0, width, height)
    data = (data - bikesta.mean) / bikesta.std

    model_2 = Lstm3(K, K,
                    torch.from_numpy(H).float().to(device), height, width,
                    device).to(device)
    opt2 = optim.Adam(model_2.parameters(), lr=lr)

    X_recent = []
    Y_ = []
    depends = [8, 7, 6, 5, 4, 3, 2, 1]
    first = depends[0]
    for i in range(first, len(W)):
        x_recent = [W[i - j] for j in depends]
        y_ = data[i]

        X_recent.append(x_recent)
        Y_.append(y_)

    # seed = 20
    # random.seed(seed)
    # random.shuffle(X_recent)
    # random.seed(seed)
    # random.shuffle(Y_)

    X_recent = torch.from_numpy(np.asarray(X_recent)).float().to(device)
    Y_ = torch.from_numpy(np.asarray(Y_)).float().to(device)
    X_train, X_test, Y_train, Y_test = X_recent[:-len_test], X_recent[
        -len_test:], Y_[:-len_test], Y_[-len_test:]
    X_train, Y_train, X_valid, Y_valid = X_train[:
                                                 -len_val], Y_train[:-len_val], X_train[
                                                     -len_val:], Y_train[
                                                         -len_val:]

    train_ds = TensorDataset(X_train, Y_train)
    train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
    valid_ds = TensorDataset(X_valid, Y_valid)
    valid_dl = DataLoader(valid_ds, batch_size=bs * 2, shuffle=True)

    best_loss, best_model_wts, best_epoch = 999, copy.deepcopy(
        model_2.state_dict()), -1
    for epoch in range(epochs):
        model_2.train()
        for xb, yb in train_dl:
            yb_pred = model_2(xb)
            loss = loss_func(yb_pred, yb)
            opt2.zero_grad()
            loss.backward()
            opt2.step()

        model_2.eval()
        with torch.no_grad():
            valid_loss = (loss_func(model_2(X_valid), Y_valid) + 1e-6)**0.5
        print(
            'Epoch {}/{} ,train loss:{:.4f}  true loss:{:.4f}      valid loss:{:.4f}  val trueloss:{:.4f}'
            .format(epoch + 1, epochs, (loss + 1e-6)**0.5,
                    (loss + 1e-6)**0.5 * bikesta.std, valid_loss,
                    valid_loss * bikesta.std))
        if valid_loss <= best_loss:
            best_loss, best_model_wts, best_epoch = valid_loss, copy.deepcopy(
                model_2.state_dict()), epoch + 1
    print(
        "Finished Training     best val loss:{:.4f}   true loss:{:.4f}      best epoch:{}"
        .format(best_loss, best_loss * bikesta.std, best_epoch))
    model_2.load_state_dict(best_model_wts)

    test_ds = TensorDataset(X_test, Y_test)
    test_dl = DataLoader(test_ds, batch_size=bs)
    model_2.eval()
    data_pred = []

    for xb, yb in test_dl:
        yb_pred = model_2(xb)
        data_pred.append(yb_pred)
    data_pred = torch.cat(data_pred, 0).float().to(device)
    data_real = Y_test
    loss = loss_func(data_pred, data_real)**0.5
    fwrite = open(outputpath, "a+")
    fwrite.write("K: {}      loss: {:.4f}  true loss:{:.4f}\n".format(
        K, loss, loss * bikesta.std))
    fwrite.close()
    print("K: {}      loss: {:.4f}  true loss:{:.4f}".format(
        K, loss, loss * bikesta.std))