Ejemplo n.º 1
0
def evaluate(model, val_data, batch_size, seq_len):
    model.eval()
    loss_all = []
    for i, xs_seq in enumerate(yield_data_time(val_data, batch_size, seq_len)):
        xs_seq = torch.FloatTensor(xs_seq).to(device)
        out, mu, logvar = model(xs_seq, adj)
        loss = loss_fn(xs_seq, out, mu, logvar, [0, 1])
        loss_all.append(loss.cpu().data.numpy())
    return np.mean(loss_all)
Ejemplo n.º 2
0
def train_epoch(data,
                batch_size,
                seq_len,
                val_data=None,
                show_iter=None,
                show_forward_info='',
                device=device):
    loss_data = []
    max_i = (len(data) - 1) // batch_size

    for i, xs_seq in enumerate(yield_data_time(data, batch_size, seq_len)):
        trainer.train(True)
        xs_seq = torch.FloatTensor(xs_seq).to(device)
        out, mu, std = trainer(xs_seq, adj)
        loss = loss_fn(xs_seq, out, mu, std, [0, 1])
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        loss_data.append(loss.cpu().data.numpy())
        if show_iter is not None and i % show_iter == 0:
            print('{} b:{}/{}  loss:{}'.format(show_forward_info, i, max_i,
                                               loss_data[-1]))

    if val_data is not None:
        del xs_seq, out, loss
        torch.cuda.empty_cache()
        trainer.eval()
        eval_loss_all = []
        for i, xs_seq in enumerate(
                yield_data_time(val_data, batch_size, seq_len)):
            xs_seq = torch.FloatTensor(xs_seq).to(device)
            out, mu, logvar = trainer(xs_seq, adj)
            loss = loss_fn(xs_seq, out, mu, logvar, [0, 1])
            eval_loss_all.append(loss.cpu().data.numpy())
        val_loss = np.mean(eval_loss_all)
        print('{}  val_loss:{}'.format(show_forward_info, val_loss))

    return np.array(loss_data), val_loss
Ejemplo n.º 3
0
import numpy as np
import matplotlib.pyplot as plt
from data_process import yield_data_time

from mpl_toolkits.mplot3d import Axes3D
from sklearn.manifold import TSNE

#encode10dim = np.load('result/rnn_new/encode10_seqlen/fin_delnegative/fin_delnegative_e77_encode.npy')

origin_data = np.load('data/new/data2.npy')
label = np.load('data/new/label2.npy')
label = label.T

seq_label = []
for seq in yield_data_time(label, 1, 10, False):
    seq_label.append(np.sum(seq))
seq_label = np.array(seq_label)
seq_label = (seq_label>50).astype('int')

#%%

tsne = TSNE(n_components=2)
fit2dim = tsne.fit_transform(encode10dim)
np.save('result/rnn_new/encode10_seqlen/fin_delnegative/tsne_fit2dim.npy', fit2dim)

tsne = TSNE(n_components=3)
fit3dim = tsne.fit_transform(encode10dim)
np.save('result/rnn_new/encode10_seqlen/fin_delnegative/tsne_fit3dim.npy', fit3dim)

#%%
Ejemplo n.º 4
0
with open(load_name+'.model', 'rb') as f:
    trainer = torch.load(f).to(device)
    
encoder = trainer.encoder
decoder = trainer.decoder

adj = torch.FloatTensor(adj).to(device)
adj.requires_grad = False

result = []
mu_result = []
std_result = []

start = time.time()
max_i = (len(data)-seq_len) // batch_size
for i, x in enumerate(yield_data_time(data, batch_size, seq_len, False)):
    x = torch.FloatTensor(x).to(device)
    encode, mu, std = encoder(x, adj)
    result.append(encode.cpu().data.numpy())
    mu_result.append(mu.cpu().data.numpy())
    std_result.append(std.cpu().data.numpy())
    print("{}/{}".format(i, max_i))
    
end = time.time()
print('time:{}'.format(end - start))

#result = np.array(result)
#result = np.concatenate(result)
#np.save(load_name+'_encode.npy', result)

#%%
Ejemplo n.º 5
0
encoder = Denses(encoder_list)
encoder_list.reverse()
decoder = Denses(encoder_list)
encoder_list.reverse()
trainer = Trainer(encoder, decoder).to(device)
optimizer = optim.Adam(trainer.parameters())
loss_fn = MSE()

loss_log = []
all_len = len(train_data) // batch_size + 1

for e in range(epochs):
    loss_ = []
    trainer.train()
    for i, x in enumerate(yield_data_time(train_data, batch_size, 1, True)):
        x = x.reshape(x.shape[1], -1)
        x = torch.FloatTensor(x).to(device)
        out = trainer(x, None)
        loss = loss_fn(x, out)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        loss_.append(loss.cpu().data.numpy())
        if i % 10 == 0:
            print('e:{}/{}  {}/{}  loss{}'.format(e, epochs, i, all_len,
                                                  loss_[-1]))

    trainer.eval()
    loss_eval_ = []
    for i, x in enumerate(yield_data_time(val_data, batch_size, 1, True)):
Ejemplo n.º 6
0
encoder = trainer.encoder
decoder = trainer.decoder

adj = torch.FloatTensor(adj).to(device)
adj.requires_grad = False

result = []
mu_result = []
std_result = []

decoder_mu = []
decoder_std = []

start = time.time()
max_i = (len(data)-seq_len) // batch_size
for i, x in enumerate(yield_data_time(data, batch_size, seq_len, False)):
    x = torch.FloatTensor(x).to(device)
    encode, mu, std = encoder(x, adj)
    mu_output = decoder(mu, adj.transpose(1, 0))
    std_output = decoder(std, adj.transpose(1, 0))
    
    decoder_mu.append(mu_output.cpu().data.numpy())
    decoder_std.append(std_output.cpu().data.numpy())
    
    result.append(encode.cpu().data.numpy())
    mu_result.append(mu.cpu().data.numpy())
    std_result.append(std.cpu().data.numpy())
    
    print("{}/{}".format(i, max_i))
    
end = time.time()