def show_sigma_selections():
    ax = plt.gca()
    ax.axes.get_xaxis().set_visible(False)
    ax.axes.get_yaxis().set_visible(False)

    x = np.array([2, 5])
    P = np.array([[3, 1.1], [1.1, 4]])

    points = MerweScaledSigmaPoints(2, .05, 2., 1.)
    sigmas = points.sigma_points(x, P)
    plot_covariance_ellipse(x, P, facecolor='b', alpha=0.6, variance=[.5])
    plt.scatter(sigmas[:, 0], sigmas[:, 1], c='k', s=50)

    x = np.array([5, 5])
    points = MerweScaledSigmaPoints(2, .15, 2., 1.)
    sigmas = points.sigma_points(x, P)
    plot_covariance_ellipse(x, P, facecolor='b', alpha=0.6, variance=[.5])
    plt.scatter(sigmas[:, 0], sigmas[:, 1], c='k', s=50)

    x = np.array([8, 5])
    points = MerweScaledSigmaPoints(2, .4, 2., 1.)
    sigmas = points.sigma_points(x, P)
    plot_covariance_ellipse(x, P, facecolor='b', alpha=0.6, variance=[.5])
    plt.scatter(sigmas[:, 0], sigmas[:, 1], c='k', s=50)

    plt.axis('equal')
    plt.xlim(0, 10)
    plt.ylim(0, 10)
    plt.show()
def plot_sigma_points():
    x = np.array([0, 0])
    P = np.array([[4, 2], [2, 4]])

    sigmas = MerweScaledSigmaPoints(n=2, alpha=.3, beta=2., kappa=1.)
    S0 = sigmas.sigma_points(x, P)
    Wm0, Wc0 = sigmas.weights()

    sigmas = MerweScaledSigmaPoints(n=2, alpha=1., beta=2., kappa=1.)
    S1 = sigmas.sigma_points(x, P)
    Wm1, Wc1 = sigmas.weights()

    def plot_sigmas(s, w, **kwargs):
        min_w = min(abs(w))
        scale_factor = 100 / min_w
        return plt.scatter(s[:, 0],
                           s[:, 1],
                           s=abs(w) * scale_factor,
                           alpha=.5,
                           **kwargs)

    plt.subplot(121)
    plot_sigmas(S0, Wc0, c='b')
    plot_covariance_ellipse(x, P, facecolor='g', alpha=0.2, variance=[1, 4])
    plt.title('alpha=0.3')
    plt.subplot(122)
    plot_sigmas(S1, Wc1, c='b', label='Kappa=2')
    plot_covariance_ellipse(x, P, facecolor='g', alpha=0.2, variance=[1, 4])
    plt.title('alpha=1')
    plt.show()
    print(sum(Wc0))
def show_sigma_selections():
    ax=plt.gca()
    ax.axes.get_xaxis().set_visible(False)
    ax.axes.get_yaxis().set_visible(False)

    x = np.array([2, 5])
    P = np.array([[3, 1.1], [1.1, 4]])

    points = MerweScaledSigmaPoints(2, .05, 2., 1.)
    sigmas = points.sigma_points(x, P)
    plot_covariance_ellipse(x, P, facecolor='b', alpha=0.6, variance=[.5])
    plt.scatter(sigmas[:,0], sigmas[:, 1], c='k', s=50)

    x = np.array([5, 5])
    points = MerweScaledSigmaPoints(2, .15, 2., 1.)
    sigmas = points.sigma_points(x, P)
    plot_covariance_ellipse(x, P, facecolor='b', alpha=0.6, variance=[.5])
    plt.scatter(sigmas[:,0], sigmas[:, 1], c='k', s=50)

    x = np.array([8, 5])
    points = MerweScaledSigmaPoints(2, .4, 2., 1.)
    sigmas = points.sigma_points(x, P)
    plot_covariance_ellipse(x, P, facecolor='b', alpha=0.6, variance=[.5])
    plt.scatter(sigmas[:,0], sigmas[:, 1], c='k', s=50)

    plt.axis('equal')
    plt.xlim(0,10); plt.ylim(0,10)
    plt.show()
def plot_sigma_points():
    x = np.array([0, 0])
    P = np.array([[4, 2], [2, 4]])

    sigmas = MerweScaledSigmaPoints(n=2, alpha=.3, beta=2., kappa=1.)
    S0 = sigmas.sigma_points(x, P)
    Wm0, Wc0 = sigmas.weights()

    sigmas = MerweScaledSigmaPoints(n=2, alpha=1., beta=2., kappa=1.)
    S1 = sigmas.sigma_points(x, P)
    Wm1, Wc1 = sigmas.weights()

    def plot_sigmas(s, w, **kwargs):
        min_w = min(abs(w))
        scale_factor = 100 / min_w
        return plt.scatter(s[:, 0], s[:, 1], s=abs(w)*scale_factor, alpha=.5, **kwargs)

    plt.subplot(121)
    plot_sigmas(S0, Wc0, c='b')
    plot_covariance_ellipse(x, P, facecolor='g', alpha=0.2, variance=[1, 4])
    plt.title('alpha=0.3')
    plt.subplot(122)
    plot_sigmas(S1, Wc1,  c='b', label='Kappa=2')
    plot_covariance_ellipse(x, P, facecolor='g', alpha=0.2, variance=[1, 4])
    plt.title('alpha=1')
    plt.show()
    print(sum(Wc0))
def show_sigma_selections():
    ax = plt.gca()
    ax.axes.get_xaxis().set_visible(False)
    ax.axes.get_yaxis().set_visible(False)

    x = np.array([2, 5])
    P = np.array([[3, 1.1], [1.1, 4]])

    points = MerweScaledSigmaPoints(2, .09, 2., 1.)
    sigmas = points.sigma_points(x, P)
    Wm, Wc = points.Wm, points.Wc
    plot_covariance_ellipse(x, P, facecolor='b', alpha=.3, variance=[.5])
    _plot_sigmas(sigmas, Wc, alpha=1.0, facecolor='k')

    x = np.array([5, 5])
    points = MerweScaledSigmaPoints(2, .15, 1., .15)
    sigmas = points.sigma_points(x, P)
    Wm, Wc = points.Wm, points.Wc
    plot_covariance_ellipse(x, P, facecolor='b', alpha=0.3, variance=[.5])
    _plot_sigmas(sigmas, Wc, alpha=1.0, facecolor='k')

    x = np.array([8, 5])
    points = MerweScaledSigmaPoints(2, .2, 3., 10)
    sigmas = points.sigma_points(x, P)
    Wm, Wc = points.Wm, points.Wc
    plot_covariance_ellipse(x, P, facecolor='b', alpha=0.3, variance=[.5])
    _plot_sigmas(sigmas, Wc, alpha=1.0, facecolor='k')

    plt.axis('equal')
    plt.xlim(0, 10)
    plt.ylim(0, 10)
    plt.show()
def show_sigma_selections():
    ax=plt.gca()
    ax.axes.get_xaxis().set_visible(False)
    ax.axes.get_yaxis().set_visible(False)

    x = np.array([2, 5])
    P = np.array([[3, 1.1], [1.1, 4]])

    points = MerweScaledSigmaPoints(2, .09, 2., 1.)
    sigmas = points.sigma_points(x, P)
    Wm, Wc = points.weights()
    plot_covariance_ellipse(x, P, facecolor='b', alpha=.3, variance=[.5])
    _plot_sigmas(sigmas, Wc, alpha=1.0, facecolor='k')

    x = np.array([5, 5])
    points = MerweScaledSigmaPoints(2, .15, 1., .15)
    sigmas = points.sigma_points(x, P)
    Wm, Wc = points.weights()
    plot_covariance_ellipse(x, P, facecolor='b', alpha=0.3, variance=[.5])
    _plot_sigmas(sigmas, Wc, alpha=1.0, facecolor='k')

    x = np.array([8, 5])
    points = MerweScaledSigmaPoints(2, .2, 3., 10)
    sigmas = points.sigma_points(x, P)
    Wm, Wc = points.weights()
    plot_covariance_ellipse(x, P, facecolor='b', alpha=0.3, variance=[.5])
    _plot_sigmas(sigmas, Wc, alpha=1.0, facecolor='k')

    plt.axis('equal')
    plt.xlim(0,10); plt.ylim(0,10)
    plt.show()
def show_sigma_transform(with_text=False):
    fig = plt.figure()
    ax=fig.gca()

    x = np.array([0, 5])
    P = np.array([[4, -2.2], [-2.2, 3]])

    plot_covariance_ellipse(x, P, facecolor='b', alpha=0.6, variance=9)
    sigmas = MerweScaledSigmaPoints(2, alpha=.5, beta=2., kappa=0.)

    S = sigmas.sigma_points(x=x, P=P)
    plt.scatter(S[:,0], S[:,1], c='k', s=80)

    x = np.array([15, 5])
    P = np.array([[3, 1.2],[1.2, 6]])
    plot_covariance_ellipse(x, P, facecolor='g', variance=9, alpha=0.3)

    ax.add_artist(arrow(S[0,0], S[0,1], 11, 4.1, 0.6))
    ax.add_artist(arrow(S[1,0], S[1,1], 13, 7.7, 0.6))
    ax.add_artist(arrow(S[2,0], S[2,1], 16.3, 0.93, 0.6))
    ax.add_artist(arrow(S[3,0], S[3,1], 16.7, 10.8, 0.6))
    ax.add_artist(arrow(S[4,0], S[4,1], 17.7, 5.6, 0.6))

    ax.axes.get_xaxis().set_visible(False)
    ax.axes.get_yaxis().set_visible(False)

    if with_text:
        plt.text(2.5, 1.5, r"$\chi$", fontsize=32)
        plt.text(13, -1, r"$\mathcal{Y}$", fontsize=32)

    #plt.axis('equal')
    plt.show()
def show_sigma_transform(with_text=False):
    fig = plt.figure()
    ax = fig.gca()

    x = np.array([0, 5])
    P = np.array([[4, -2.2], [-2.2, 3]])

    plot_covariance_ellipse(x, P, facecolor='b', alpha=0.6, variance=9)
    sigmas = MerweScaledSigmaPoints(2, alpha=.5, beta=2., kappa=0.)

    S = sigmas.sigma_points(x=x, P=P)
    plt.scatter(S[:, 0], S[:, 1], c='k', s=80)

    x = np.array([15, 5])
    P = np.array([[3, 1.2], [1.2, 6]])
    plot_covariance_ellipse(x, P, facecolor='g', variance=9, alpha=0.3)

    ax.add_artist(arrow(S[0, 0], S[0, 1], 11, 4.1, 0.6))
    ax.add_artist(arrow(S[1, 0], S[1, 1], 13, 7.7, 0.6))
    ax.add_artist(arrow(S[2, 0], S[2, 1], 16.3, 0.93, 0.6))
    ax.add_artist(arrow(S[3, 0], S[3, 1], 16.7, 10.8, 0.6))
    ax.add_artist(arrow(S[4, 0], S[4, 1], 17.7, 5.6, 0.6))

    ax.axes.get_xaxis().set_visible(False)
    ax.axes.get_yaxis().set_visible(False)

    if with_text:
        plt.text(2.5, 1.5, r"$\chi$", fontsize=32)
        plt.text(13, -1, r"$\mathcal{Y}$", fontsize=32)

    #plt.axis('equal')
    plt.show()
def print_sigmas(n=1, mean=5, cov=3, alpha=.1, beta=2., kappa=2):
    points = MerweScaledSigmaPoints(n, alpha, beta, kappa)
    print('sigmas: ', points.sigma_points(mean,  cov).T[0])
    Wm, Wc = points.weights()
    print('mean weights:', Wm)
    print('cov weights:', Wc)
    print('lambda:', alpha**2 *(n+kappa) - n)
    print('sum cov', sum(Wc))
def print_sigmas(n=1, mean=5, cov=3, alpha=.1, beta=2., kappa=2):
    points = MerweScaledSigmaPoints(n, alpha, beta, kappa)
    print('sigmas: ', points.sigma_points(mean, cov).T[0])
    Wm, Wc = points.Wm, points.Wc
    print('mean weights:', Wm)
    print('cov weights:', Wc)
    print('lambda:', alpha**2 * (n + kappa) - n)
    print('sum cov', sum(Wc))
Exemple #11
0
def test_sigma_plot():
    """ Test to make sure sigma's correctly mirror the shape and orientation
    of the covariance array."""

    x = np.array([[1, 2]])
    P = np.array([[2, 1.2], [1.2, 2]])
    kappa = .1

    # if kappa is larger, than points shoudld be closer together

    sp0 = JulierSigmaPoints(n=2, kappa=kappa)
    sp1 = JulierSigmaPoints(n=2, kappa=kappa * 1000)
    sp2 = MerweScaledSigmaPoints(n=2, kappa=0, beta=2, alpha=1e-3)
    sp3 = SimplexSigmaPoints(n=2)

    # test __repr__ doesn't crash
    str(sp0)
    str(sp1)
    str(sp2)
    str(sp3)

    w0, _ = sp0.weights()
    w1, _ = sp1.weights()
    w2, _ = sp2.weights()
    w3, _ = sp3.weights()

    Xi0 = sp0.sigma_points(x, P)
    Xi1 = sp1.sigma_points(x, P)
    Xi2 = sp2.sigma_points(x, P)
    Xi3 = sp3.sigma_points(x, P)

    assert max(Xi1[:, 0]) > max(Xi0[:, 0])
    assert max(Xi1[:, 1]) > max(Xi0[:, 1])

    if DO_PLOT:
        plt.figure()
        for i in range(Xi0.shape[0]):
            plt.scatter((Xi0[i, 0] - x[0, 0]) * w0[i] + x[0, 0],
                        (Xi0[i, 1] - x[0, 1]) * w0[i] + x[0, 1],
                        color='blue',
                        label='Julier low $\kappa$')

        for i in range(Xi1.shape[0]):
            plt.scatter((Xi1[i, 0] - x[0, 0]) * w1[i] + x[0, 0],
                        (Xi1[i, 1] - x[0, 1]) * w1[i] + x[0, 1],
                        color='green',
                        label='Julier high $\kappa$')
        # for i in range(Xi2.shape[0]):
        #     plt.scatter((Xi2[i, 0] - x[0, 0]) * w2[i] + x[0, 0],
        #                 (Xi2[i, 1] - x[0, 1]) * w2[i] + x[0, 1],
        #                 color='red')
        for i in range(Xi3.shape[0]):
            plt.scatter((Xi3[i, 0] - x[0, 0]) * w3[i] + x[0, 0],
                        (Xi3[i, 1] - x[0, 1]) * w3[i] + x[0, 1],
                        color='black',
                        label='Simplex')

        stats.plot_covariance_ellipse([1, 2], P)
Exemple #12
0
def test_sigma_plot():
    """ Test to make sure sigma's correctly mirror the shape and orientation
    of the covariance array."""

    x = np.array([[1, 2]])
    P = np.array([[2, 1.2],
                  [1.2, 2]])
    kappa = .1

    # if kappa is larger, than points shoudld be closer together

    sp0 = JulierSigmaPoints(n=2, kappa=kappa)
    sp1 = JulierSigmaPoints(n=2, kappa=kappa*1000)
    sp2 = MerweScaledSigmaPoints(n=2, kappa=0, beta=2, alpha=1e-3)
    sp3 = SimplexSigmaPoints(n=2)

    w0, _ = sp0.weights()
    w1, _ = sp1.weights()
    w2, _ = sp2.weights()
    w3, _ = sp3.weights()

    Xi0 = sp0.sigma_points(x, P)
    Xi1 = sp1.sigma_points(x, P)
    Xi2 = sp2.sigma_points(x, P)
    Xi3 = sp3.sigma_points(x, P)

    assert max(Xi1[:,0]) > max(Xi0[:,0])
    assert max(Xi1[:,1]) > max(Xi0[:,1])

    if DO_PLOT:
        plt.figure()
        for i in range(Xi0.shape[0]):
            plt.scatter((Xi0[i,0]-x[0, 0])*w0[i] + x[0, 0],
                        (Xi0[i,1]-x[0, 1])*w0[i] + x[0, 1],
                         color='blue', label='Julier low $\kappa$')

        for i in range(Xi1.shape[0]):
            plt.scatter((Xi1[i, 0]-x[0, 0]) * w1[i] + x[0,0],
                        (Xi1[i, 1]-x[0, 1]) * w1[i] + x[0,1],
                         color='green', label='Julier high $\kappa$')
        # for i in range(Xi2.shape[0]):
        #     plt.scatter((Xi2[i, 0] - x[0, 0]) * w2[i] + x[0, 0],
        #                 (Xi2[i, 1] - x[0, 1]) * w2[i] + x[0, 1],
        #                 color='red')
        for i in range(Xi3.shape[0]):
            plt.scatter((Xi3[i, 0] - x[0, 0]) * w3[i] + x[0, 0],
                        (Xi3[i, 1] - x[0, 1]) * w3[i] + x[0, 1],
                        color='black', label='Simplex')

        stats.plot_covariance_ellipse([1, 2], P)
def plot_ukf_vs_mc(alpha=0.001, beta=3., kappa=1.):

    def fx(x):
        return x**3

    def dfx(x):
        return 3*x**2

    mean = 1
    var = .1
    std = math.sqrt(var)

    data = normal(loc=mean, scale=std, size=50000)
    d_t = fx(data)


    points = MerweScaledSigmaPoints(1, alpha, beta, kappa)
    Wm, Wc = points.Wm, points.Wc
    sigmas = points.sigma_points(mean, var)

    sigmas_f = np.zeros((3, 1))
    for i in range(3):
        sigmas_f[i] = fx(sigmas[i, 0])

    ### pass through unscented transform
    ukf_mean, ukf_cov = unscented_transform(sigmas_f, Wm, Wc)
    ukf_mean = ukf_mean[0]
    ukf_std = math.sqrt(ukf_cov[0])

    norm = scipy.stats.norm(ukf_mean, ukf_std)
    xs = np.linspace(-3, 5, 200)
    plt.plot(xs, norm.pdf(xs), ls='--', lw=2, color='b')
    try:
        plt.hist(d_t, bins=200, density=True, histtype='step', lw=2, color='g')
    except:
        # older versions of matplotlib don't have the density keyword
        plt.hist(d_t, bins=200, normed=True, histtype='step', lw=2, color='g')

    actual_mean = d_t.mean()
    plt.axvline(actual_mean, lw=2, color='g', label='Monte Carlo')
    plt.axvline(ukf_mean, lw=2, ls='--', color='b', label='UKF')
    plt.legend()
    plt.show()

    print('actual mean={:.2f}, std={:.2f}'.format(d_t.mean(), d_t.std()))
    print('UKF    mean={:.2f}, std={:.2f}'.format(ukf_mean, ukf_std))
def plot_ukf_vs_mc(alpha=0.001, beta=3.0, kappa=1.0):
    def fx(x):
        return x ** 3

    def dfx(x):
        return 3 * x ** 2

    mean = 1
    var = 0.1
    std = math.sqrt(var)

    data = normal(loc=mean, scale=std, size=50000)
    d_t = fx(data)

    points = MerweScaledSigmaPoints(1, alpha, beta, kappa)
    Wm, Wc = points.weights()
    sigmas = points.sigma_points(mean, var)

    sigmas_f = np.zeros((3, 1))
    for i in range(3):
        sigmas_f[i] = fx(sigmas[i, 0])

    ### pass through unscented transform
    ukf_mean, ukf_cov = unscented_transform(sigmas_f, Wm, Wc)
    ukf_mean = ukf_mean[0]
    ukf_std = math.sqrt(ukf_cov[0])

    norm = scipy.stats.norm(ukf_mean, ukf_std)
    xs = np.linspace(-3, 5, 200)
    plt.plot(xs, norm.pdf(xs), ls="--", lw=2, color="b")
    plt.hist(d_t, bins=200, normed=True, histtype="step", lw=2, color="g")

    actual_mean = d_t.mean()
    plt.axvline(actual_mean, lw=2, color="g", label="Monte Carlo")
    plt.axvline(ukf_mean, lw=2, ls="--", color="b", label="UKF")
    plt.legend()
    plt.show()

    print("actual mean={:.2f}, std={:.2f}".format(d_t.mean(), d_t.std()))
    print("UKF    mean={:.2f}, std={:.2f}".format(ukf_mean, ukf_std))
Exemple #15
0
class SRA_epistemic(SRA_baseclass):
    """
    SRA with finite dimensional epistemic and aleatory uncertainties
    """
    def __init__(self, X_dist, E_dist, E_conservative):
        """
        Input:
        
        X_dist           -  Aleatory random variable (MultivariateDistribution)
        E_dist           -  Epistemic random variable (MultivariateDistribution)
        E_conservative   -  List [e1, e2, ..] of conservative (bad) values of E for use in MPP search
        """
        super().__init__(X_dist)
        self.E_dist = E_dist
        self.compute_epistemic_sigma()  # Sigma points for E_dist
        self.E_conservative = E_conservative

        self.pruned_samples_X = None
        self.e = None  # a single realization of the epistemic variable E

    def G(self, X):
        return self.G_e(X, self.e)

    def G_e(self, X, e):
        """
        Limit state 
        
        Input: 
        
        X = N * self.X_dist.dim array of N points in X-space
        e = a single realization of the epistemic variable E
        
        Output: N-dimensional numpy array with G(X) values
        """

        raise NotImplementedError(
            'Need to implement custom limit state G(X, e)')

    def UT_pof_moments(self):
        """
        Estimate E[pof] and var[pof] with respect to epistemic uncertainties using UT
        """

        # PoF for each sigma point
        pof = np.array([self.pof_MCIS(e) for e in self.E_sigma])

        # Estimate moments
        pof_mean, pof_var = unscented_transform(pof.reshape(-1, 1),
                                                self.E_UT_points.Wm,
                                                self.E_UT_points.Wc)

        return pof_mean[0], pof_var[0][0]

    def UT_g_moments(self, X):
        """
        Estimate E[g(X)], Var[g(X)] with respect to epistemic uncertainties using UT
        """
        G = np.array([self.G_e(X, e) for e in self.E_sigma])

        m, v = unscented_transform(G, self.E_UT_points.Wm, self.E_UT_points.Wc)
        v = v.diagonal()

        return m, v

    def pof_MCIS(self, e):
        """
        Estimate pof for given e using MCIS
        """

        # Evaluate limit state
        g = self.G_e(self.pruned_samples_X, e)
        I = (g < 0) * 1

        # Estimate pof
        pof = self.Sampler.compute_expectation(I)

        return pof

    def compute_epistemic_sigma(self):
        """
        Set sigma points used in UT for epistemic variables
        """
        self.E_UT_points = MerweScaledSigmaPoints(self.E_dist.dim,
                                                  alpha=0.9,
                                                  beta=2,
                                                  kappa=3 - self.E_dist.dim)
        self.E_sigma = self.E_UT_points.sigma_points(np.zeros(self.E_dist.dim),
                                                     np.eye(self.E_dist.dim))

    def generate_samples(self,
                         n_MPP=2,
                         n_MPP_tries=100,
                         n_warmup=1000,
                         n_max=100):
        """
        Generate samples used to estimate acquisition functions
        
        Input:
        
        n_MPP        -  try to find this number of MPP's
        n_MPP_tries  -  max number of tries in search for new MPP 
        n_warmup     -  warm up samples before pruning 
        n_max        -  max number of samples after pruning
        
        """

        # Find some center points for Gaussian mixture
        if n_MPP > 0:
            U_MPP = self.find_conservative_MPPS(n_MPP, n_MPP_tries)
            if len(U_MPP) > 0:
                U_MPP = np.append(U_MPP,
                                  np.zeros((1, self.X_dist.dim)),
                                  axis=0)
            else:
                U_MPP = np.zeros((1, self.X_dist.dim))
        else:
            U_MPP = np.zeros((1, self.X_dist.dim))

        # Define sampling distribution
        self.Sampler = PrunedSampler(U_MPP, n_warmup)

        # Evaluate pruning criterion
        X = self.X_dist.U_to_X(self.Sampler.samples_warmup)
        include_list, default_values = self.pruning_criterion(X)

        # Perform pruning
        self.Sampler.prune(include_list, default_values, n_max)

        # HOLD: catch this..
        if self.Sampler.N_pruned == 0:
            print('No samples generated!!!!')

        # Map samples to X-space and store
        self.pruned_samples_X = self.X_dist.U_to_X(self.Sampler.samples_pruned)

    def find_conservative_MPPS(self, n_MPP=2, n_MPP_tries=100, err_g_max=0.2):
        """
        Search for MPPs
        
        Input:
        
        n_MPP        -  try to find this number of MPP's
        n_MPP_tries  -  max number of tries in search for new MPP 
        err_g_max    -  convergence criterion in MPP search
        """

        k = 0
        U_MPP = []
        for i in range(n_MPP_tries):
            u0 = random.normal(size=(self.X_dist.dim))
            self.e = self.E_conservative[random.randint(
                len(self.E_conservative))]

            conv, u_MPP = self.MPP_search(u0=u0,
                                          N_max=100,
                                          err_g_max=err_g_max)
            if conv:
                k += 1
                U_MPP.append(u_MPP)
                if k >= n_MPP: break

        return np.array(U_MPP)

    def pruning_criterion(self, X):
        """
        Evaluate each input in X with pruning criterion
        """

        # Estimate E[g(X)], Var[g(X)] with respect to epistemic
        # uncertainty for each x in X
        m, v = self.UT_g_moments(X)

        include_list = np.abs(m) / np.sqrt(v) < 3
        I = (m < 0) * 1

        return include_list, I

    def bernoulli_var(self):
        """
        Estimate criteria based on Bernoulli variance 
        
        E[gamma] and E[sqrt(gamma)]**2
        """
        gamma = self.gamma(self.pruned_samples_X)

        # E[gamma]
        expectation_1 = self.Sampler.ratio_pruned * (
            (gamma * self.Sampler.q_pruned).sum() / self.Sampler.N_pruned)

        # E[sqrt(gamma)]
        expectation_2 = self.Sampler.ratio_pruned * (
            (np.sqrt(gamma) * self.Sampler.q_pruned).sum() /
            self.Sampler.N_pruned)

        return expectation_1, expectation_2**2

    def gamma(self, X):
        """
        gamma = p*(1-p) for p = P[g(x) < 0 | x]        
        """
        m, v = self.UT_g_moments(X)

        z = m / np.sqrt(v)
        phi = stats.norm.pdf(z)
        gamma = phi * (1 - phi)

        return gamma
Exemple #16
0
                               dt=dt,
                               var=0.000001**2,
                               block_size=3,
                               order_by_dim=False)
noise_fp = Q_discrete_white_noise_fp(dim=2,
                                     dt=dt,
                                     var=0.000001**2,
                                     block_size=3,
                                     order_by_dim=False)
Test6c = np.allclose(noise, noise_fp)
assert Test6c, print("Failed Test 6c: Q_discrete_white_noise")

# check sigma points
lambda_ = alpha**2 * (dim_x + kappa) - dim_x
points = sigma_points(x, P, lambda_, dim_x)
Test6d = np.allclose(points, points_fp.sigma_points(x, P))
assert Test6d, print("Failed Test 6d: sigma points")

if np.all((Test6a, Test6b, Test6c, Test6d)):
    print("Test 6: Successful: sigmas points, noise function, and weights")
'''
# check unscented transform
ut = unscented_transform(points, Wm, Wc, noise)
ut_fp = unscented_transform_fp(points, Wm, Wc, noise)
assert np.allclose(ut[0], ut_fp[0]), print("Failed Test 6e: mean from UT")
assert np.allclose(ut[1], ut_fp[1]), print("Failed Test 6f: covariance from UT")

This test was removed because it was found than when the UT matched the UT in the code, 
the predict function did not correctly calculate the mean. Could use some looking into. 
'''
Exemple #17
0
p = np.array([[32, 15], [15., 40.]])
# Compute linearized mean
mean_fx = f_nonlinear_xy(*mean)

#generate random points
xs, ys = multivariate_normal(mean=mean, cov=p, size=10000).T


#initial mean and covariance
mean = (0, 0)
p = np.array([[32., 15], [15., 40.]])

# create sigma points - we will learn about this later
points = SigmaPoints(n=2, alpha=.1, beta=2., kappa=1.)
Wm, Wc = points.weights()
sigmas = points.sigma_points(mean, p)

### pass through nonlinear function
sigmas_f = np.empty((5, 2))
for i in range(5):
    sigmas_f[i] = f_nonlinear_xy(sigmas[i, 0], sigmas[i ,1])

### use unscented transform to get new mean and covariance
ukf_mean, ukf_cov = unscented_transform(sigmas_f, Wm, Wc)

#generate random points
np.random.seed(100)
xs, ys = multivariate_normal(mean=mean, cov=p, size=5000).T

plt.figure()
#plot_monte_carlo_mean(xs, ys, f_nonlinear_xy, ukf_mean, 'Unscented Mean')
#	return State**2

State = np.array([0.1, 0.5, 0.75, 1.0])

ukf_mean = State
ukf_cov = np.identity(State.size) * 0.1

#sigmas_f = np.empty((Iterations,sigmas.shape[0],sigmas.shape[1]))
points = MerweScaledSigmaPoints(n=State.size, alpha=0.3, beta=2., kappa=0)
#points = JulierSigmaPoints(n=State.size,kappa=3-State.size)
#points =  JulierSigmaPoints(n=State.size)
means = []
covs = []
states = []
for i in range(100):
    sigmas = points.sigma_points(ukf_mean, ukf_cov)
    sigmas_f = np.empty(sigmas.shape)
    for i in range(sigmas.shape[0]):
        sigmas_f[i] = function(sigmas[i])
    ukf_mean, ukf_cov = unscented_transform(sigmas_f, points.Wm, points.Wc)
    #ukf_cov+=1e-9
    State = function(State)
    means.append(ukf_mean)
    covs.append(ukf_cov)
    states.append(State)
#	ukf_cov = nearestPD(ukf_cov)

means = np.array(means)
covs = np.array(covs)[:, 0, 0]
states = np.array(states)
Exemple #19
0
    plt.ylim([-10, 200])
    plt.xlim([-100, 100])
    plt.legend(loc='best', scatterpoints=1)
    print ('Difference in mean x={:.3f}, y={:.3f}'.format(
           computed_mean_x-mean_fx[0], computed_mean_y-mean_fx[1]))
    
# -------------------------------------------------------------------------------------------
mean = [0, 0]               # Mean of the N-dimensional distribution.
cov = [[32, 15], [15, 40]]  # Covariance matrix of the distribution.

# create sigma points(2n+1个sigma点)
# uses 3 parameters to control how the sigma points are distributed and weighted
points = SigmaPoints(n=2, alpha=.1, beta=2., kappa=1.)  
# Wm, Wc = points.weights()
Wm, Wc = points.Wm, points.Wc
sigmas = points.sigma_points(mean, cov)

# pass through nonlinear function
sigmas_f = np.empty((5, 2))
for i in range(5):  
    sigmas_f[i] = f_nonlinear_xy(sigmas[i, 0], sigmas[i ,1]) 

# use unscented transform to get new mean and covariance
ukf_mean, ukf_cov = unscented_transform(sigmas_f, Wm, Wc)

# generate random points
xs, ys = multivariate_normal(mean, cov, size=1000).T  # 从二维随机变量的正态分布中产生1000个数据点
plot2(xs, ys, f_nonlinear_xy, ukf_mean)

# 画sigma点
plt.xlim(-30, 30); plt.ylim(0, 90)