コード例 #1
0
steps = X[:, :, 0].mean(axis=0)
expected_mean = X[:, :, 1].mean(axis=0)
expected_std = X[:, :, 1].std(axis=0)

n_demonstrations, n_steps, n_task_dims = X.shape
X_train = np.empty((n_demonstrations, n_steps, n_task_dims + 1))
X_train[:, :, 1:] = X
t = np.linspace(0, 1, n_steps)
X_train[:, :, 0] = t
X_train = X_train.reshape(n_demonstrations * n_steps, n_task_dims + 1)

random_state = check_random_state(0)
n_components = 4
initial_means = kmeansplusplus_initialization(X_train, n_components,
                                              random_state)
initial_covs = covariance_initialization(X_train, n_components)
bgmm = BayesianGaussianMixture(n_components=n_components,
                               max_iter=100).fit(X_train)
gmm = GMM(n_components=n_components,
          priors=bgmm.weights_,
          means=bgmm.means_,
          covariances=bgmm.covariances_,
          random_state=random_state)

plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.title("Confidence Interval from GMM")

plt.plot(X[:, :, 0].T, X[:, :, 1].T, c="k", alpha=0.1)

means_over_time = []
コード例 #2
0
def test_initialize_two_covariances():
    cov = covariance_initialization(np.array([[0], [1], [2]]), 2)
    assert_equal(len(cov), 2)
    assert_array_almost_equal(cov, np.array([[[2.0 / 3.0]], [[2.0 / 3.0]]])**2)
コード例 #3
0
def test_initialize_2d_covariance():
    cov = covariance_initialization(np.array([[0, 0], [3, 4]]), 1)
    assert_equal(len(cov), 1)
    assert_array_almost_equal(cov, np.array([[[9.0, 0.0], [0.0, 16.0]]]))
コード例 #4
0
def test_initialize_one_covariance():
    cov = covariance_initialization(np.array([[0], [1]]), 1)
    assert_equal(len(cov), 1)
    assert_array_almost_equal(cov, np.array([[[1.0]]]))