def safe_sample(self, alpha):
    self._check_initialized()

    # Safe prior sampling
    priors = self.priors.copy()
    priors[priors < 1.0 / self.n_components] = 0.0
    priors /= priors.sum()
    assert abs(priors.sum() - 1.0) < 1e-5
    mvn_index = self.random_state.choice(self.n_components, size=1,
                                         p=priors)[0]

    # Allow only samples from alpha-confidence region
    mvn = MVN(mean=self.means[mvn_index],
              covariance=self.covariances[mvn_index],
              random_state=self.random_state)
    sample = mvn.sample(1)[0]
    while (mahalanobis_distance(sample, mvn) >
           chi2(len(sample) - 1).ppf(alpha)):
        sample = mvn.sample(1)[0]
    return sample
示例#2
0
def test_estimate_moments():
    """Test moments estimated from samples and sampling from MVN."""
    random_state = check_random_state(0)
    actual_mean = np.array([0.0, 1.0])
    actual_covariance = np.array([[0.5, -1.0], [-1.0, 5.0]])
    X = random_state.multivariate_normal(actual_mean, actual_covariance,
                                         size=(100000,))
    mvn = MVN(random_state=random_state)
    mvn.from_samples(X)
    assert_less(np.linalg.norm(mvn.mean - actual_mean), 0.02)
    assert_less(np.linalg.norm(mvn.covariance - actual_covariance), 0.02)

    X2 = mvn.sample(n_samples=100000)

    mvn2 = MVN(random_state=random_state)
    mvn2.from_samples(X2)
    assert_less(np.linalg.norm(mvn2.mean - actual_mean), 0.03)
    assert_less(np.linalg.norm(mvn2.covariance - actual_covariance), 0.03)
示例#3
0
def test_estimate_moments():
    """Test moments estimated from samples and sampling from MVN."""
    random_state = check_random_state(0)
    actual_mean = np.array([0.0, 1.0])
    actual_covariance = np.array([[0.5, -1.0], [-1.0, 5.0]])
    X = random_state.multivariate_normal(actual_mean, actual_covariance,
                                         size=(100000,))
    mvn = MVN(random_state=random_state)
    mvn.from_samples(X)
    assert_less(np.linalg.norm(mvn.mean - actual_mean), 0.02)
    assert_less(np.linalg.norm(mvn.covariance - actual_covariance), 0.02)

    X2 = mvn.sample(n_samples=100000)

    mvn2 = MVN(random_state=random_state)
    mvn2.from_samples(X2)
    assert_less(np.linalg.norm(mvn2.mean - actual_mean), 0.03)
    assert_less(np.linalg.norm(mvn2.covariance - actual_covariance), 0.03)
示例#4
0
import numpy as np
import matplotlib.pyplot as plt
from gmr import MVN, plot_error_ellipse

random_state = np.random.RandomState(100)
mvn = MVN(mean=np.array([0.0, 0.0]),
          covariance=np.array([[1.0, 2.0], [2.0, 9.0]]),
          random_state=random_state)

n_samples = 1000

plt.figure(figsize=(15, 5))

ax = plt.subplot(131)
ax.set_title("Unconstrained Sampling")
samples = mvn.sample(n_samples)
ax.scatter(samples[:, 0], samples[:, 1], alpha=0.9, s=1, label="Samples")
plot_error_ellipse(ax, mvn, factors=(1.0, 2.0), color="orange")
ax.set_xlim((-5, 5))
ax.set_ylim((-10, 10))

ax = plt.subplot(132)
ax.set_title(r"95.45 % Confidence Region ($2\sigma$)")
samples = mvn.sample_confidence_region(n_samples, 0.9545)
ax.scatter(samples[:, 0], samples[:, 1], alpha=0.9, s=1, label="Samples")
plot_error_ellipse(ax, mvn, factors=(1.0, 2.0), color="orange")
ax.set_xlim((-5, 5))
ax.set_ylim((-10, 10))

ax = plt.subplot(133)
ax.set_title(r"68.27 % Confidence Region ($\sigma$)")
示例#5
0
"""
print(__doc__)

import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import check_random_state
from gmr import MVN, plot_error_ellipse


if __name__ == "__main__":
    random_state = check_random_state(0)
    mvn = MVN(random_state=random_state)
    X = random_state.multivariate_normal([0.0, 1.0], [[0.5, -2.0], [-2.0, 5.0]],
                                         size=(100,))
    mvn.from_samples(X)
    X_sampled = mvn.sample(n_samples=100)

    plt.figure(figsize=(15, 5))
    plt.subplot(1, 3, 1)
    plt.xlim((-10, 10))
    plt.ylim((-10, 10))
    plot_error_ellipse(plt.gca(), mvn)
    plt.scatter(X[:, 0], X[:, 1], c="g", label="Training data")
    plt.scatter(X_sampled[:, 0], X_sampled[:, 1], c="r", label="Samples")
    plt.title("Bivariate Gaussian")
    plt.legend(loc="best")

    x = np.linspace(-10, 10, 100)
    plt.subplot(1, 3, 2)
    plt.xticks(())
    marginalized = mvn.marginalize(np.array([1]))
示例#6
0
distributions.
"""
print(__doc__)

import numpy as np
import matplotlib.pyplot as plt
from gmr.utils import check_random_state
from gmr import MVN, plot_error_ellipse


random_state = check_random_state(0)
mvn = MVN(random_state=random_state)
X = random_state.multivariate_normal([0.0, 1.0], [[0.5, 1.5], [1.5, 5.0]],
                                     size=(100,))
mvn.from_samples(X)
X_sampled = mvn.sample(n_samples=100)

plt.figure(figsize=(15, 5))
plt.subplot(1, 3, 1)
plt.xlim((-10, 10))
plt.ylim((-10, 10))
plot_error_ellipse(plt.gca(), mvn)
plt.scatter(X[:, 0], X[:, 1], c="g", label="Training data")
plt.scatter(X_sampled[:, 0], X_sampled[:, 1], c="r", label="Samples")
plt.title("Bivariate Gaussian")
plt.legend(loc="best")

x = np.linspace(-10, 10, 100)
plt.subplot(1, 3, 2)
plt.xticks(())
marginalized = mvn.marginalize(np.array([1]))
alpha = 1e-3
beta = 2.0  # lower values give better estimates
kappa = 0.0

ax = plt.subplot(131)
ax.set_title("(1) Cartesian coordinates")
ax.set_xlabel("$x_1$")
ax.set_ylabel("$x_2$")
ax.set_xlim((-8, 8))
ax.set_ylim((-8, 8))
mvn_cartesian = MVN(
    mean=np.array([2.5, 1.3]),
    covariance=np.array([[1.0, -1.5], [-1.5, 4.0]]),
    random_state=0)
plot_error_ellipse(ax, mvn_cartesian)
samples_cartesian = mvn_cartesian.sample(1000)
ax.scatter(samples_cartesian[:, 0], samples_cartesian[:, 1], s=1)

ax = plt.subplot(132)
ax.set_title("(2) Polar coordinates")
ax.set_xlabel("$r$")
ax.set_ylabel("$\phi$")
ax.set_xlim((-8, 8))
ax.set_ylim((-8, 8))
sigma_points_cartesian = mvn_cartesian.sigma_points(alpha=alpha, kappa=kappa)
sigma_points_polar = cartesian_to_polar(sigma_points_cartesian)
mvn_polar = mvn_cartesian.estimate_from_sigma_points(sigma_points_polar, alpha=alpha, beta=beta, kappa=kappa)
plot_error_ellipse(ax, mvn_polar)
samples_polar = cartesian_to_polar(samples_cartesian)
ax.scatter(samples_polar[:, 0], samples_polar[:, 1], s=1)