Exemplo n.º 1
0
    data[:-1])

out = model.log_likelihood(data,
                           momentum_vecs=momentum_vecs,
                           interaction_vecs=interaction_vecs)
print(out)

##################### training ############################

num_iters = 10
losses, opt = model.fit(data,
                        num_iters=num_iters,
                        lr=0.001,
                        momentum_vecs=momentum_vecs,
                        interaction_vecs=interaction_vecs)

##################### sampling ############################
print("start sampling")
sample_z, sample_x = model.sample(30)

#################### inference ###########################
print("inferiring most likely states...")
z = model.most_likely_states(data,
                             momentum_vecs=momentum_vecs,
                             interaction_vecs=interaction_vecs)

#print("k step prediction")
#x_predict = k_step_prediction_for_coupled_momentum_model(model, z, data, momentum_vecs=momentum_vecs, features=features)
#x_predict = k_step_prediction(model, z, data, 10)
# TODO: need to revise the k-step prediction, specifically the way to calculate the momentum
Exemplo n.º 2
0
import matplotlib.pyplot as plt

torch.manual_seed(0)
np.random.seed(0)

# test fitting

K = 3
D = 2
lags= 5

trans1 = LinearTransformation(K=K, D=D, lags=lags)
obs1 = ARGaussianObservation(K=K, D=D, transformation=trans1)
model1 = HMM(K=K,D=D, observation=obs1)

T = 100
sample_z, sample_x = model1.sample(T)

model2 = HMM(K=K, D=D, observation='gaussian', observation_kwargs=dict(lags=lags))

lls, opt = model2.fit(sample_x, num_iters=2000, lr=0.001)

z_infer = model2.most_likely_states(sample_x)

x_predict = k_step_prediction(model2, z_infer, sample_x)

plt.figure()
plt.plot(x_predict[:,0], label='prediction')
plt.plot(sample_x[:,0], label='truth')
plt.show()
Exemplo n.º 3
0
data = torch.randn(T, D, dtype=torch.float64)

lags = 1

bounds = np.array([[-2, 2], [0, 1], [-2, 2], [0, 1]])

As = np.array([
    np.column_stack([np.identity(D),
                     np.zeros((D, (lags - 1) * D))]) for _ in range(K)
])

torch.manual_seed(0)
np.random.seed(0)

tran = LinearTransformation(K=K, D=D, lags=lags, As=As)
observation = ARTruncatedNormalObservation(K=K,
                                           D=D,
                                           M=0,
                                           transformation=tran,
                                           bounds=bounds)

model = HMM(K=K, D=D, M=0, observation=observation)

lls = model.log_likelihood(data)
print(lls)

#losses_1, optimizer_1 = model_1.fit(data_1, method='adam', num_iters=2000, lr=0.001)

z_1 = model.most_likely_states(data)

x_predict_arr_lag1 = k_step_prediction(model, z_1, data)
Exemplo n.º 4
0
momentum_vecs = MomentumTransformation._compute_momentum_vecs(
    data[:-1], lags=momentum_lags)

out = model.log_likelihood(data, momentum_vecs=momentum_vecs)
print(out)

##################### training ############################

num_iters = 10
losses, opt = model.fit(data,
                        num_iters=num_iters,
                        lr=0.001,
                        momentum_vecs=momentum_vecs)

##################### sampling ############################
print("start sampling")
sample_z, sample_x = model.sample(30)

#################### inference ###########################
print("inferiring most likely states...")
z = model.most_likely_states(data, momentum_vecs=momentum_vecs)

print("k step prediction")
x_predict = k_step_prediction_for_momentum_model(model,
                                                 z,
                                                 data,
                                                 momentum_vecs=momentum_vecs)
#x_predict = k_step_prediction(model, z, data, 10)
# TODO: need to revise the k-step prediction, specifically the way to calculate the momentum
Exemplo n.º 5
0
def test_model():
    torch.manual_seed(0)
    np.random.seed(0)

    T = 100
    D = 4

    # data = np.array([[1.0, 1.0, 1.0, 6.0], [3.0, 6.0, 8.0, 6.0],
    #                [4.0, 7.0, 8.0, 5.0], [6.0, 7.0, 5.0, 6.0], [8.0, 2.0, 6.0, 1.0]])
    data = np.random.randn(T, D)
    data = torch.tensor(data, dtype=torch.float64)

    xmax = max(np.max(data[:, 0].numpy()), np.max(data[:, 2].numpy()))
    xmin = min(np.min(data[:, 0].numpy()), np.min(data[:, 2].numpy()))
    ymax = max(np.max(data[:, 1].numpy()), np.max(data[:, 3].numpy()))
    ymin = min(np.min(data[:, 1].numpy()), np.min(data[:, 3].numpy()))
    bounds = np.array([[xmin - 1, xmax + 1], [ymin - 1, ymax + 1],
                       [xmin - 1, xmax + 1], [ymin - 1, ymax + 1]])

    def toy_feature_vec_func(s):
        """
        :param s: self, (T, 2)
        :param o: other, (T, 2)
        :return: features, (T, Df, 2)
        """
        corners = torch.tensor([[0, 0], [0, 8], [10, 0], [10, 8]],
                               dtype=torch.float64)
        return feature_direction_vec(s, corners)

    K = 3

    Df = 4
    lags = 1

    tran = UniLSTMTransformation(K=K,
                                 D=D,
                                 Df=Df,
                                 feature_vec_func=toy_feature_vec_func,
                                 lags=lags,
                                 dh=10)

    # observation
    obs = ARTruncatedNormalObservation(K=K,
                                       D=D,
                                       lags=lags,
                                       bounds=bounds,
                                       transformation=tran)

    # model
    model = HMM(K=K, D=D, observation=obs)

    print("calculating log likelihood")
    feature_vecs_a = toy_feature_vec_func(data[:-1, 0:2])
    feature_vecs_b = toy_feature_vec_func(data[:-1, 2:4])
    feature_vecs = (feature_vecs_a, feature_vecs_b)
    packed_data = get_packed_data((data[:-1]), lags=lags)

    model.log_likelihood(data,
                         feature_vecs=feature_vecs,
                         packed_data=packed_data)

    # fit
    losses, _ = model.fit(data,
                          optimizer=None,
                          method="adam",
                          num_iters=50,
                          feature_vecs=feature_vecs,
                          packed_data=packed_data)

    plt.figure()
    plt.plot(losses)
    plt.show()

    # most-likely-z
    print("Most likely z...")
    z = model.most_likely_states(data,
                                 feature_vecs=feature_vecs,
                                 packed_data=packed_data)

    # prediction

    if data.shape[0] <= 1000:
        data_to_predict = data
    else:
        data_to_predict = data[-1000:]

    print("0 step prediction")
    if data.shape[0] <= 1000:
        data_to_predict = data
    else:
        data_to_predict = data[-1000:]
    x_predict = k_step_prediction_for_lstm_model(model,
                                                 z,
                                                 data_to_predict,
                                                 feature_vecs=feature_vecs)
    x_predict_err = np.mean(np.abs(x_predict - data_to_predict.numpy()),
                            axis=0)

    print("10 step prediction")
    x_predict_2 = k_step_prediction(model, z, data_to_predict, k=10)
    x_predict_2_err = np.mean(np.abs(x_predict_2 -
                                     data_to_predict[10:].numpy()),
                              axis=0)

    # samples
    print("sampling...")
    sample_T = 5
    sample_z, sample_x = model.sample(sample_T)
def test_model():
    torch.manual_seed(0)
    np.random.seed(0)

    T = 5
    x_grids = np.array([0.0, 5.0, 10.0])
    y_grids = np.array([0.0, 4.0, 8.0])

    data = np.array([[1.0, 1.0, 1.0, 6.0], [3.0, 6.0, 8.0, 6.0],
                     [4.0, 7.0, 8.0, 5.0], [6.0, 7.0, 5.0, 6.0],
                     [8.0, 2.0, 6.0, 1.0]])
    data = torch.tensor(data, dtype=torch.float64)

    def toy_feature_vec_func(s):
        """
        :param s: self, (T, 2)
        :param o: other, (T, 2)
        :return: features, (T, Df, 2)
        """
        corners = torch.tensor([[0, 0], [0, 8], [10, 0], [10, 8]],
                               dtype=torch.float64)
        return feature_direction_vec(s, corners)

    K = 3
    D = 4
    M = 0

    Df = 4

    bounds = np.array([[0.0, 10.0], [0.0, 8.0], [0.0, 10.0], [0.0, 8.0]])
    tran = LinearGridTransformation(K=K,
                                    D=D,
                                    x_grids=x_grids,
                                    y_grids=y_grids,
                                    Df=Df,
                                    feature_vec_func=toy_feature_vec_func)
    obs = ARTruncatedNormalObservation(K=K,
                                       D=D,
                                       M=0,
                                       lags=1,
                                       bounds=bounds,
                                       transformation=tran)

    model = HMM(K=K, D=D, M=M, transition="stationary", observation=obs)
    model.observation.mus_init = data[0] * torch.ones(
        K, D, dtype=torch.float64)

    # calculate memory
    gridpoints_idx_a = tran.get_gridpoints_idx_for_batch(data[:-1, 0:2])
    gridpoints_idx_b = tran.get_gridpoints_idx_for_batch(data[:-1, 2:4])
    gridpoints_a = tran.get_gridpoints_for_batch(gridpoints_idx_a)
    gridpoints_b = tran.get_gridpoints_for_batch(gridpoints_idx_b)
    feature_vecs_a = toy_feature_vec_func(data[:-1, 0:2])
    feature_vecs_b = toy_feature_vec_func(data[:-1, 2:4])

    gridpoints_idx = (gridpoints_idx_a, gridpoints_idx_b)
    gridpoints = (gridpoints_a, gridpoints_b)
    feature_vecs = (feature_vecs_a, feature_vecs_b)

    # fit
    losses, opt = model.fit(data,
                            optimizer=None,
                            method='adam',
                            num_iters=100,
                            lr=0.01,
                            pbar_update_interval=10,
                            gridpoints=gridpoints,
                            gridpoints_idx=gridpoints_idx,
                            feature_vecs=feature_vecs)

    plt.figure()
    plt.plot(losses)
    plt.show()

    # most-likely-z
    print("Most likely z...")
    z = model.most_likely_states(data,
                                 gridpoints_idx=gridpoints_idx,
                                 feature_vecs=feature_vecs)

    # prediction
    print("0 step prediction")
    if data.shape[0] <= 1000:
        data_to_predict = data
    else:
        data_to_predict = data[-1000:]
    x_predict = k_step_prediction_for_lineargrid_model(
        model,
        z,
        data_to_predict,
        gridpoints_idx=gridpoints_idx,
        feature_vecs=feature_vecs)
    x_predict_err = np.mean(np.abs(x_predict - data_to_predict.numpy()),
                            axis=0)

    print("2 step prediction")
    x_predict_2 = k_step_prediction(model, z, data_to_predict, k=2)
    x_predict_2_err = np.mean(np.abs(x_predict_2 -
                                     data_to_predict[2:].numpy()),
                              axis=0)

    # samples
    sample_T = 5
    sample_z, sample_x = model.sample(sample_T)
Exemplo n.º 7
0
def test_model():
    torch.manual_seed(0)
    np.random.seed(0)

    T = 5
    x_grids = np.array([0.0, 10.0])
    y_grids = np.array([0.0, 8.0])

    bounds = np.array([[0.0, 10.0], [0.0, 8.0], [0.0, 10.0], [0.0, 8.0]])
    data = np.array([[1.0, 1.0, 1.0, 6.0], [3.0, 6.0, 8.0, 6.0],
                     [4.0, 7.0, 8.0, 5.0], [6.0, 7.0, 5.0, 6.0],
                     [8.0, 2.0, 6.0, 1.0]])
    data = torch.tensor(data, dtype=torch.float64)

    K = 3
    D = 4
    M = 0

    obs = GPObservation(K=K,
                        D=D,
                        x_grids=x_grids,
                        y_grids=y_grids,
                        bounds=bounds,
                        train_rs=True)

    correct_kerneldist_gg = torch.tensor(
        [[0., 0., 64., 64., 100., 100., 164., 164.],
         [0., 0., 64., 64., 100., 100., 164., 164.],
         [64., 64., 0., 0., 164., 164., 100., 100.],
         [64., 64., 0., 0., 164., 164., 100., 100.],
         [100., 100., 164., 164., 0., 0., 64., 64.],
         [100., 100., 164., 164., 0., 0., 64., 64.],
         [164., 164., 100., 100., 64., 64., 0., 0.],
         [164., 164., 100., 100., 64., 64., 0., 0.]],
        dtype=torch.float64)
    assert torch.all(torch.eq(correct_kerneldist_gg,
                              obs.kernel_distsq_gg)), obs.kernel_distsq_gg

    log_prob_nocache = obs.log_prob(data)
    print("log_prob_nocache = {}".format(log_prob_nocache))

    kernel_distsq_xg_a = kernel_distsq_doubled(data[:-1, 0:2],
                                               obs.inducing_points)
    kernel_distsq_xg_b = kernel_distsq_doubled(data[:-1, 2:4],
                                               obs.inducing_points)

    correct_kernel_distsq_xg_a = torch.tensor(
        [[2., 2., 50., 50., 82., 82., 130., 130.],
         [2., 2., 50., 50., 82., 82., 130., 130.],
         [45., 45., 13., 13., 85., 85., 53., 53.],
         [45., 45., 13., 13., 85., 85., 53., 53.],
         [65., 65., 17., 17., 85., 85., 37., 37.],
         [65., 65., 17., 17., 85., 85., 37., 37.],
         [85., 85., 37., 37., 65., 65., 17., 17.],
         [85., 85., 37., 37., 65., 65., 17., 17.]],
        dtype=torch.float64)
    assert torch.all(torch.eq(correct_kernel_distsq_xg_a,
                              kernel_distsq_xg_a)), kernel_distsq_xg_a

    memory_kwargs = dict(kernel_distsq_xg_a=kernel_distsq_xg_a,
                         kernel_distsq_xg_b=kernel_distsq_xg_b)

    log_prob = obs.log_prob(data, **memory_kwargs)
    print("log_prob = {}".format(log_prob))

    assert torch.all(torch.eq(log_prob_nocache, log_prob))

    Sigma_a, A_a = obs.get_gp_cache(data[:-1, 0:2], 0, **memory_kwargs)
    Sigma_b, A_b = obs.get_gp_cache(data[:-1, 2:4], 1, **memory_kwargs)
    memory_kwargs_2 = dict(Sigma_a=Sigma_a, A_a=A_a, Sigma_b=Sigma_b, A_b=A_b)

    print("calculating log prob 2...")
    log_prob2 = obs.log_prob(data, **memory_kwargs_2)
    assert torch.all(torch.eq(log_prob, log_prob2))

    model = HMM(K=K, D=D, M=M, transition="stationary", observation=obs)
    model.observation.mus_init = data[0] * torch.ones(
        K, D, dtype=torch.float64)

    # fit
    losses, opt = model.fit(data,
                            optimizer=None,
                            method='adam',
                            num_iters=100,
                            lr=0.01,
                            pbar_update_interval=10,
                            **memory_kwargs)

    plt.figure()
    plt.plot(losses)
    plt.show()

    # most-likely-z
    print("Most likely z...")
    z = model.most_likely_states(data, **memory_kwargs)

    # prediction
    print("0 step prediction")
    if data.shape[0] <= 1000:
        data_to_predict = data
    else:
        data_to_predict = data[-1000:]
    x_predict = k_step_prediction_for_gpmodel(model, z, data_to_predict,
                                              **memory_kwargs)
    x_predict_err = np.mean(np.abs(x_predict - data_to_predict.numpy()),
                            axis=0)

    print("2 step prediction")
    x_predict_2 = k_step_prediction(model, z, data_to_predict, k=2)
    x_predict_2_err = np.mean(np.abs(x_predict_2 -
                                     data_to_predict[2:].numpy()),
                              axis=0)

    # samples
    sample_T = 5
    sample_z, sample_x = model.sample(sample_T)
obs = ARTruncatedNormalObservation(K=K, D=D, M=M, lags=momentum_lags, bounds=bounds, transformation=tran)

# model
model = HMM(K=K, D=D, M=M, observation=obs)

log_prob = model.log_likelihood(data, masks=(masks_a, masks_b),
                                memory_kwargs_a=m_kwargs_a, memory_kwargs_b=m_kwargs_b)
log_prob_2 = model.log_likelihood(data)
assert torch.eq(log_prob, log_prob_2)
print(log_prob)

# training
print("start training...")
num_iters = 10
losses, opt = model.fit(data, num_iters=num_iters, lr=0.001, masks=(masks_a, masks_b),
                        memory_kwargs_a=m_kwargs_a, memory_kwargs_b=m_kwargs_b)

# sampling
print("start sampling")
sample_z, sample_x = model.sample(T)

# inference
print("inferiring most likely states...")
z = model.most_likely_states(data, masks=(masks_a, masks_b),
                             memory_kwargs_a=m_kwargs_a, memory_kwargs_b=m_kwargs_b)

print("0 step prediction")
x_predict = k_step_prediction_for_grid_model(model, z, data, memory_kwargs_a=m_kwargs_a, memory_kwargs_b=m_kwargs_b)

print("k step prediction")
x_predict_10 = k_step_prediction(model, z, data, 10)
Exemplo n.º 9
0
out = model.log_likelihood(data,
                           momentum_vecs=momentum_vecs,
                           features=features)
print(out)

##################### training ############################

num_iters = 10
losses, opt = model.fit(data,
                        num_iters=num_iters,
                        lr=0.001,
                        momentum_vecs=momentum_vecs,
                        features=features)

##################### sampling ############################
print("start sampling")
sample_z, sample_x = model.sample(30)

#################### inference ###########################
print("inferiring most likely states...")
z = model.most_likely_states(data,
                             momentum_vecs=momentum_vecs,
                             features=features)

print("k step prediction")
x_predict = k_step_prediction_for_momentum_feature_model(
    model, z, data, momentum_vecs=momentum_vecs, features=features)
#x_predict = k_step_prediction(model, z, data, 10)
# TODO: need to revise the k-step prediction, specifically the way to calculate the momentum
optimizer = torch.optim.Adam(model.params, lr=0.001)

losses = []
for i in np.arange(num_iters):

    optimizer.zero_grad()

    loss = model.loss(data)
    loss.backward(retain_graph=True)
    optimizer.step()

    loss = loss.detach().numpy()
    losses.append(loss)

    if i % 10 == 0:
        pbar.set_description('iter {} loss {:.2f}'.format(i, loss))
        pbar.update(10)

# check reconstruction
x_reconstruct = model.sample_condition_on_zs(z, data[0])

# infer the latent states
infer_z = model.most_likely_states(data)

perm = find_permutation(z.numpy(), infer_z, K1=K, K2=K)

model.permute(perm)
hmm_z = model.most_likely_states(data)

# check prediction
x_predict_cond_z = k_step_prediction(model, z, data)