Exemplo n.º 1
0
    def reduce_data(self, mode=1, plot=True):

        print "Reducing data..."

        K = 500
        X = self.D[:, :self.state_dim]

        # Normalize
        # x_max_X = np.max(X, axis=0)
        # x_min_X = np.min(X, axis=0)
        # for i in range(self.state_action_dim):
        #     X[:,i] = (X[:,i]-x_min_X[i])/(x_max_X[i]-x_min_X[i])

        kdt = KDTree(X, leaf_size=20, metric='euclidean')
        df = DiffusionMap(sigma=1, embedding_dim=2, k=K)

        Xreduced = []
        for i in range(X.shape[0]):
            if not (i % 100):
                print i, X.shape[0]

            idx = kdt.query(X[i, :].reshape(1, -1), k=K, return_distance=False)
            X_nn = X[idx, :].reshape(K, self.state_dim)

            V, _ = df.fit_transform(X_nn, density_normalize=False)
            Xreduced.append(V[0, :])

        Xreduced = np.array(Xreduced)

        if plot:
            plt.scatter(Xreduced[:, 0], Xreduced[:, 1])
            plt.show()

        return Xreduced
Exemplo n.º 2
0
def test__density_normalize__colsum(mixtureNormals):

    df = DiffusionMap(sigma=1, embedding_dim=2, verbose=False)
    K = scipy.sparse.rand(100, 100, density=0.1)
    K_norm = df.__density_normalize__(K)

    np.testing.assert_allclose(K_norm.toarray().sum(0), 1)
Exemplo n.º 3
0
def test_DiffusionMap_fit_transform_eigenvalue_ordering(mixtureNormals):
    "must return the largest first"
    X = mixtureNormals

    embDim = 2
    df = DiffusionMap(sigma=1, embedding_dim=embDim)
    X_embed, lam = df.fit_transform(X)
    assert (lam[0] > lam[1])
Exemplo n.º 4
0
def test_DiffusionMap_fit_transform_eigenvalue_ordering(mixtureNormals):
    "must return the largest first"
    X = mixtureNormals

    embDim = 2
    df = DiffusionMap(sigma=1, embedding_dim=embDim)
    X_embed, lam = df.fit_transform(X)
    assert lam[0] > lam[1]
Exemplo n.º 5
0
def test_DiffusionMap_get_kernel_matrix_symmetry(mixtureNormals):
    "make sure the kernel matrix is symmetric"
    X = mixtureNormals
    df = DiffusionMap(sigma=1, embedding_dim=2, verbose=False)
    K = df.__get_kernel_matrix__(X, k=2)

    Q = (K - K.T).toarray()  # np.all doesnt work on sparse matrices
    assert np.all(Q == 0), "returned kernel matrix is not symmetric"
Exemplo n.º 6
0
def test_DiffusionMap_get_kernel_matrix_symmetry(mixtureNormals):
    "make sure the kernel matrix is symmetric"
    X = mixtureNormals
    df = DiffusionMap(sigma=1, embedding_dim=2)
    K = df._get_kernel_matrix(X, k=2)

    Q = (K - K.T).toarray()  # np.all doesnt work on sparse matrices
    assert np.all(Q == 0), 'returned kernel matrix is not symmetric'
Exemplo n.º 7
0
def test_DiffusionMap_fit_transform_output_dimensions(mixtureNormals):
    X = mixtureNormals

    embDim = 2
    df = DiffusionMap(sigma=1, embedding_dim=embDim)
    X_embed, lam = df.fit_transform(X)

    assert X_embed.shape == (X.shape[0], embDim), "returns wrong dimensionally"
    assert lam.shape[0] == X_embed.shape[1], "must return as many eigenvalues as embedded dimensions"
Exemplo n.º 8
0
def test_DiffusionMap_fit_transform_output_dimensions(mixtureNormals):
    X = mixtureNormals

    embDim = 2
    df = DiffusionMap(sigma=1, embedding_dim=embDim)
    X_embed, lam = df.fit_transform(X)

    assert X_embed.shape == (X.shape[0], embDim), "returns wrong dimensionally"
    assert lam.shape[0] == X_embed.shape[
        1], "must return as many eigenvalues as embedded dimensions"
Exemplo n.º 9
0
def test_DiffusionMap_nearestNeighbour_number_of_neighbours(mixtureNormals):
    X = mixtureNormals
    embDim = 2
    df = DiffusionMap(sigma=1, embedding_dim=embDim)

    kNN = 4
    distances, indices = df.__get_NN__(X, k=kNN)

    assert distances.shape == (X.shape[0], kNN)
    assert indices.shape == (X.shape[0], kNN)
Exemplo n.º 10
0
def test_DiffusionMap_nearestNeighbour_number_of_neighbours(mixtureNormals):
    X = mixtureNormals
    embDim = 2
    df = DiffusionMap(sigma=1, embedding_dim=embDim)

    kNN = 4
    distances, indices = df._get_NN(X, k=kNN)

    assert distances.shape == (X.shape[0], kNN)
    assert indices.shape == (X.shape[0], kNN)
Exemplo n.º 11
0
    def __init__(self):
        # Number of NN
        if useDiffusionMaps:
            dim = 3
            self.K = 1000
            self.K_manifold = 100
            sigma = 2.0
            if diffORspec == 'diff':
                # self.df = DiffusionMap(sigma=sigma, embedding_dim=dim)
                self.df = DiffusionMap(sigma=10, embedding_dim=dim, k=self.K)
                print(
                    '[gp_transition] Using diffusion maps with dimension %d, K: (%d, %d) and sigma=%f.'
                    % (dim, self.K_manifold, self.K, sigma))
            else:
                self.embedding = spectralEmbed(embedding_dim=dim)
                print(
                    '[gp_transition] Using spectral embedding with dimension %d.'
                    % (dim))
            data_load.__init__(self,
                               simORreal=simORreal,
                               discreteORcont=discreteORcont,
                               K=self.K,
                               K_manifold=self.K_manifold,
                               sigma=sigma,
                               dim=dim,
                               dr='diff')
        else:
            self.K = 100
            print('[gp_transition] No diffusion maps used, K=%d.' % self.K)
            data_load.__init__(self,
                               simORreal=simORreal,
                               discreteORcont=discreteORcont,
                               K=self.K,
                               dr='spec')

        svm_failure.__init__(
            self,
            simORreal=simORreal,
            discrete=(True if discreteORcont == 'discrete' else False))
        mean_shift.__init__(self)

        rospy.Service('/gp/transition', batch_transition, self.GetTransition)
        rospy.Service('/gp/transitionOneParticle', one_transition,
                      self.GetTransitionOneParticle)
        rospy.Service('/gp/transitionRepeat', batch_transition_repeat,
                      self.GetTransitionRepeat)
        rospy.Service('/gp/batchSVMcheck', batch_transition,
                      self.batch_svm_check_service)
        rospy.Service('/gp/set_K', setk, self.setK)
        rospy.Service('/gp/set_new_kdtree', setKD, self.setKDtree)
        rospy.init_node('gp_transition', anonymous=True)
        print('[gp_transition] Ready.')

        rospy.spin()
Exemplo n.º 12
0
def test_DiffusionMap_density_normalization(mixtureNormals):

    df = DiffusionMap(sigma=1, embedding_dim=2)

    A = np.random.normal(0, 1, size=(10, 2))
    K = np.dot(A, A.T)  # 10x10 symmetric kernel-matrix
    K = csr_matrix(K)

    K_norm = df.__density_normalize__(K)
    theSum = K_norm.sum(0)
    np.testing.assert_allclose(theSum, 1)
Exemplo n.º 13
0
def test_DiffusionMap_get_kernel_matrix_number_of_neighbours(mixtureNormals):
    """actually we would like to test for the exact number of neighvours
    but due tot the symmetrizing, it can exceed the kNN"""
    X = mixtureNormals
    embDim = 2
    df = DiffusionMap(sigma=1, embedding_dim=embDim)

    kNN = 4
    K = df._get_kernel_matrix(X, k=kNN)
    assert K.shape == (X.shape[0], X.shape[0])

    nonzero_elements_per_row = np.sum(K.toarray() != 0, 1)
    print(nonzero_elements_per_row)
    assert np.all(
        nonzero_elements_per_row >= kNN
    )  # the number of nonzero elements must be kNN or larger (due to the symmetrizing
Exemplo n.º 14
0
def test_DiffusionMap_get_kernel_matrix_number_of_neighbours(mixtureNormals):
    """actually we would like to test for the exact number of neighvours
    but due tot the symmetrizing, it can exceed the kNN"""
    X = mixtureNormals
    embDim = 2
    df = DiffusionMap(sigma=1, embedding_dim=embDim)

    kNN = 4
    K = df.__get_kernel_matrix__(X, k=kNN)
    assert K.shape == (X.shape[0], X.shape[0])

    nonzero_elements_per_row = np.sum(K.toarray() != 0, 1)
    print(nonzero_elements_per_row)
    assert np.all(
        nonzero_elements_per_row >= kNN
    )  # the number of nonzero elements must be kNN or larger (due to the symmetrizing
    def __init__(self):
        svm_failure.__init__(
            self, discrete=(True if discreteORcont == 'discrete' else False))
        data_load.__init__(self,
                           simORreal=simORreal,
                           discreteORcont=discreteORcont)

        # Number of NN
        if useDiffusionMaps:
            self.K = 1000
            self.K_manifold = 100
            self.df = DiffusionMap(sigma=1, embedding_dim=3, k=self.K)
        else:
            self.K = 100

        rospy.Service('/gpup/transition', gpup_transition, self.GetTransition)
        rospy.Service('/gpup/transitionRepeat', gpup_transition_repeat,
                      self.GetTransitionRepeat)
        rospy.init_node('gpup_transition', anonymous=True)
        print('[gpup_transition] Ready.')

        rate = rospy.Rate(15)  # 15hz
        while not rospy.is_shutdown():
            rospy.spin()
Exemplo n.º 16
0
    def setK(self, msg):
        V = np.array(msg.data)

        if V[0] < self.state_dim:
            useDiffusionMaps = True
            dim = int(V[0])
            self.K_manifold = int(V[1])
            self.K = int(V[2])
            if diffORspec == 'diff':
                sigma = V[3]
                self.df = DiffusionMap(sigma=sigma, embedding_dim=dim)
                if V[4]:
                    self.dr = 'diff'
                    self.precompute_hyperp(K=self.K,
                                           K_manifold=self.K_manifold,
                                           sigma=sigma,
                                           dim=dim)
                print(
                    '[gp_transition] Using diffusion maps with dimension %d, K: (%d, %d) and sigma=%f.'
                    % (dim, self.K_manifold, self.K, sigma))
            elif diffORspec == 'spec':
                self.embedding = spectralEmbed(embedding_dim=dim)
                if V[4]:
                    self.dr = 'spec'
                    self.precompute_hyperp(K=self.K,
                                           K_manifold=self.K_manifold,
                                           dim=dim)
                print(
                    '[gp_transition] Using spectral embedding with dimension %d.'
                    % (dim))
        else:
            useDiffusionMaps = False
            self.K = int(V[1])
            if V[4]:
                self.precompute_hyperp(K=self.K)
            print('[gp_transition] No diffusion maps used, K=%d.' % self.K)
import gpflow
import time
from scipy.io import loadmat
import pickle
import math
from diffusionMaps import DiffusionMap

saved = False

discrete = True
useDiffusionMaps = True

# Number of NN
if useDiffusionMaps:
    K = 5000
    df = DiffusionMap(sigma=1, embedding_dim=3, k=K)
else:
    K = 100

print('Loading data...')
if discrete:
    Q = loadmat('../../data/sim_data_discrete.mat')
    Qtrain = Q['D']
    is_start = Q['is_start'][0][0]
    is_end = Q['is_end'][0][0] - 180
    Qtest = Qtrain[is_start:is_end, :]
    Qtrain = np.concatenate((Qtrain[:is_start, :], Qtrain[is_end:, :]), axis=0)
else:
    Q = loadmat('../../data/sim_data_cont.mat')
    Qtrain = Q['D']
    is_start = Q['is_start'][0][0]
Exemplo n.º 18
0
def test__get_kernel_matrix_sparse(mixtureNormals):
    df = DiffusionMap(sigma=1, embedding_dim=2, verbose=False)
    K = df.__get_kernel_matrix__(mixtureNormals, k=10)
    assert issparse(K)
Exemplo n.º 19
0
class Spin_gp(data_load, mean_shift, svm_failure):

    time_nn = 0.0
    num_checks_nn = 0

    def __init__(self):
        # Number of NN
        if useDiffusionMaps:
            dim = 3
            self.K = 1000
            self.K_manifold = 100
            sigma = 2.0
            if diffORspec == 'diff':
                # self.df = DiffusionMap(sigma=sigma, embedding_dim=dim)
                self.df = DiffusionMap(sigma=10, embedding_dim=dim, k=self.K)
                print(
                    '[gp_transition] Using diffusion maps with dimension %d, K: (%d, %d) and sigma=%f.'
                    % (dim, self.K_manifold, self.K, sigma))
            else:
                self.embedding = spectralEmbed(embedding_dim=dim)
                print(
                    '[gp_transition] Using spectral embedding with dimension %d.'
                    % (dim))
            data_load.__init__(self,
                               simORreal=simORreal,
                               discreteORcont=discreteORcont,
                               K=self.K,
                               K_manifold=self.K_manifold,
                               sigma=sigma,
                               dim=dim,
                               dr='diff')
        else:
            self.K = 100
            print('[gp_transition] No diffusion maps used, K=%d.' % self.K)
            data_load.__init__(self,
                               simORreal=simORreal,
                               discreteORcont=discreteORcont,
                               K=self.K,
                               dr='spec')

        svm_failure.__init__(
            self,
            simORreal=simORreal,
            discrete=(True if discreteORcont == 'discrete' else False))
        mean_shift.__init__(self)

        rospy.Service('/gp/transition', batch_transition, self.GetTransition)
        rospy.Service('/gp/transitionOneParticle', one_transition,
                      self.GetTransitionOneParticle)
        rospy.Service('/gp/transitionRepeat', batch_transition_repeat,
                      self.GetTransitionRepeat)
        rospy.Service('/gp/batchSVMcheck', batch_transition,
                      self.batch_svm_check_service)
        rospy.Service('/gp/set_K', setk, self.setK)
        rospy.Service('/gp/set_new_kdtree', setKD, self.setKDtree)
        rospy.init_node('gp_transition', anonymous=True)
        print('[gp_transition] Ready.')

        rospy.spin()

    def setK(self, msg):
        V = np.array(msg.data)

        if V[0] < self.state_dim:
            useDiffusionMaps = True
            dim = int(V[0])
            self.K_manifold = int(V[1])
            self.K = int(V[2])
            if diffORspec == 'diff':
                sigma = V[3]
                self.df = DiffusionMap(sigma=sigma, embedding_dim=dim)
                if V[4]:
                    self.dr = 'diff'
                    self.precompute_hyperp(K=self.K,
                                           K_manifold=self.K_manifold,
                                           sigma=sigma,
                                           dim=dim)
                print(
                    '[gp_transition] Using diffusion maps with dimension %d, K: (%d, %d) and sigma=%f.'
                    % (dim, self.K_manifold, self.K, sigma))
            elif diffORspec == 'spec':
                self.embedding = spectralEmbed(embedding_dim=dim)
                if V[4]:
                    self.dr = 'spec'
                    self.precompute_hyperp(K=self.K,
                                           K_manifold=self.K_manifold,
                                           dim=dim)
                print(
                    '[gp_transition] Using spectral embedding with dimension %d.'
                    % (dim))
        else:
            useDiffusionMaps = False
            self.K = int(V[1])
            if V[4]:
                self.precompute_hyperp(K=self.K)
            print('[gp_transition] No diffusion maps used, K=%d.' % self.K)

        # return EmptyResponse()

    def setKDtree(self, msg):
        N = msg.data
        self.set_new_kdtree(N)

        print('[gp_transition] Change kd-tree to have %d points.' % N)

        return True

    # Particles prediction
    def batch_predict(self, SA):
        sa = np.mean(SA, 0)
        # Theta, K = self.get_theta(sa) # Get hyper-parameters for this query point

        K = 1

        idx = self.kdt.query(sa.reshape(1, -1), k=K, return_distance=False)
        X_nn = self.Xtrain[idx, :].reshape(K, self.state_action_dim)
        Y_nn = self.Ytrain[idx, :].reshape(K, self.state_dim)

        if useDiffusionMaps:
            X_nn, Y_nn = self.reduction(sa, X_nn, Y_nn)

        dS_next = np.zeros((SA.shape[0], self.state_dim))
        std_next = np.zeros((SA.shape[0], self.state_dim))
        for i in range(self.state_dim):
            gp_est = GaussianProcess(X_nn[:, :self.state_action_dim],
                                     Y_nn[:, i],
                                     optimize=True,
                                     theta=None,
                                     algorithm='Matlab')
            mm, vv = gp_est.batch_predict(SA[:, :self.state_action_dim])
            dS_next[:, i] = mm
            std_next[:, i] = np.sqrt(np.diag(vv))

        S_next = SA[:, :self.
                    state_dim] + dS_next  #np.random.normal(dS_next, std_next)

        return S_next

    # Particles prediction
    def batch_predict_iterative(self, SA):

        S_next = []
        while SA.shape[0]:
            sa = np.copy(SA[np.random.randint(SA.shape[0]), :])
            Theta, K = self.get_theta(
                sa)  # Get hyper-parameters for this query point
            D, idx = self.kdt.query(sa.reshape(1, -1),
                                    k=K,
                                    return_distance=True)
            r = np.max(D) * 1.1
            X_nn = self.Xtrain[idx, :].reshape(K, self.state_action_dim)
            Y_nn = self.Ytrain[idx, :].reshape(K, self.state_dim)

            neigh = NearestNeighbors(radius=r)
            neigh.fit(SA)
            idx_local = neigh.radius_neighbors(sa.reshape(1, -1),
                                               return_distance=False)[0]
            SA_local = np.copy(SA[idx_local, :])
            SA = np.delete(SA, idx_local, axis=0)

            if useDiffusionMaps:
                X_nn, Y_nn = self.reduction(sa, X_nn, Y_nn)

            dS_next = np.zeros((SA_local.shape[0], self.state_dim))
            std_next = np.zeros((SA_local.shape[0], self.state_dim))
            for i in range(self.state_dim):
                gp_est = GaussianProcess(X_nn[:, :self.state_action_dim],
                                         Y_nn[:, i],
                                         optimize=False,
                                         theta=Theta[i],
                                         algorithm='Matlab')
                mm, vv = gp_est.batch_predict(
                    SA_local[:, :self.state_action_dim])
                dS_next[:, i] = mm
                std_next[:, i] = np.sqrt(np.diag(vv))

            S_next_local = SA_local[:, :self.state_dim] + np.random.normal(
                dS_next, std_next)

            for s in S_next_local:
                S_next.append(s)

        return np.array(S_next)

    def one_predict(self, sa):
        Theta, _ = self.get_theta(
            sa)  # Get hyper-parameters for this query point

        K = self.K

        idx = self.kdt.query(sa.reshape(1, -1), k=K, return_distance=False)
        X_nn = self.Xtrain[idx, :].reshape(K, self.state_action_dim)
        Y_nn = self.Ytrain[idx, :].reshape(K, self.state_dim)

        if useDiffusionMaps:
            X_nn, Y_nn = self.reduction(sa, X_nn, Y_nn)

        ds_next = np.zeros((self.state_dim, ))
        std_next = np.zeros((self.state_dim, ))
        for i in range(self.state_dim):
            gp_est = GaussianProcess(X_nn[:, :self.state_action_dim],
                                     Y_nn[:, i],
                                     optimize=False,
                                     theta=Theta[i],
                                     algorithm='Matlab')
            mm, vv = gp_est.predict(sa[:self.state_action_dim])
            ds_next[i] = mm
            std_next[i] = np.sqrt(vv)

        s_next = sa[:self.
                    state_dim] + ds_next  #np.random.normal(ds_next, std_next)
        if self.state_dim == 5:
            s_next[4] += 1.0 if s_next[4] < 0.0 else 0.0
            s_next[4] -= 1.0 if s_next[4] > 1.0 else 0.0

        return s_next

    def reduction(self, sa, X, Y):
        if diffORspec == 'diff':
            inx = self.df.ReducedClosestSetIndices(sa,
                                                   X,
                                                   k_manifold=self.K_manifold)
        elif diffORspec == 'spec':
            inx = self.embedding.ReducedClosestSetIndices(
                sa, X, k_manifold=self.K_manifold)

        return X[inx, :][0], Y[inx, :][0]

    def batch_propa(self, S, a):
        SA = np.concatenate((S, np.tile(a, (S.shape[0], 1))), axis=1)

        SA = self.normz_batch(SA)
        SA_normz = self.batch_predict(SA)
        # SA_normz = self.batch_predict_iterative(SA)
        S_next = self.denormz_batch(SA_normz)

        return S_next

    def batch_svm_check(self, S, a):
        failed_inx = []
        for i in range(S.shape[0]):
            p = self.probability(S[i, :], a[:2])  # Probability of failure
            prob_fail = np.random.uniform(0, 1)
            if prob_fail <= p:
                failed_inx.append(i)

        return failed_inx

    def batch_svm_check_service(self, req):

        S = np.array(req.states).reshape(-1, self.state_dim)
        a = np.array(req.action)

        failed_inx = []
        for i in range(S.shape[0]):
            p = self.probability(S[i, :], a)  # Probability of failure
            prob_fail = np.random.uniform(0, 1)
            if prob_fail <= p:
                failed_inx.append(i)

        node_probability = 1.0 - float(len(failed_inx)) / float(S.shape[0])

        return {'node_probability': node_probability}

    # Predicts the next step by calling the GP class
    def GetTransition(self, req):

        S = np.array(req.states).reshape(-1, self.state_dim)
        a = np.array(req.action)

        collision_probability = 0.0

        if (len(S) == 1):
            p = self.probability(S[0, :], a[:2])
            node_probability = 1.0 - p
            sa = np.concatenate((S[0, :], a), axis=0)
            sa = self.normz(sa)
            sa_normz = self.one_predict(sa)
            s_next = self.denormz(sa_normz)

            return {
                'next_states': s_next,
                'mean_shift': s_next,
                'node_probability': node_probability,
                'collision_probability': collision_probability
            }
        else:

            # Check which particles failed
            failed_inx = self.batch_svm_check(S, a[:2])
            node_probability = 1.0 - float(len(failed_inx)) / float(S.shape[0])

            # Remove failed particles by duplicating good ones
            bad_action = np.array([0., 0.])
            if len(failed_inx):
                good_inx = np.delete(np.array(range(S.shape[0])), failed_inx)
                if len(good_inx) == 0:  # All particles failed
                    S_next = []
                    mean = [0, 0]
                    return {
                        'next_states': S_next,
                        'mean_shift': mean,
                        'node_probability': node_probability,
                        'bad_action': np.array([0., 0.]),
                        'collision_probability': 1.0
                    }

                # Find main direction of fail
                S_failed_mean = np.mean(S[failed_inx, :], axis=0)
                S_mean = np.mean(S, axis=0)
                ang = np.rad2deg(
                    np.arctan2(S_failed_mean[1] - S_mean[1],
                               S_failed_mean[0] - S_mean[0]))
                if ang <= 45. and ang >= -45.:
                    bad_action = np.array([1., -1.])
                elif ang >= 135. or ang <= -135.:
                    bad_action = np.array([-1., 1.])
                elif ang > 45. and ang < 135.:
                    bad_action = np.array([1., 1.])
                elif ang < -45. and ang > -135.:
                    bad_action = np.array([-1., -1.])

                dup_inx = good_inx[np.random.choice(len(good_inx),
                                                    size=len(failed_inx),
                                                    replace=True)]
                S[failed_inx, :] = S[dup_inx, :]

            # Propagate
            S_next = self.batch_propa(S, a)

            mean = np.mean(S_next, 0)  #self.get_mean_shift(S_next)

            return {
                'next_states': S_next.reshape((-1, )),
                'mean_shift': mean,
                'node_probability': node_probability,
                'bad_action': bad_action,
                'collision_probability': collision_probability
            }

    # Predicts the next step by calling the GP class - repeats the same action 'n' times
    def GetTransitionRepeat(self, req):

        S = np.array(req.states).reshape(-1, self.state_dim)
        a = np.array(req.action)
        n = req.num_repeat

        for _ in range(n):
            S_next = self.batch_propa(S, a)
            S = S_next

        mean = self.get_mean_shift(S_next)

        return {'next_states': S_next.reshape((-1, )), 'mean_shift': mean}

    # Predicts the next step by calling the GP class
    def GetTransitionOneParticle(self, req):

        s = np.array(req.state)
        a = np.array(req.action)

        # Check which particles failed
        p = 0  #self.probability(s, a[:2])
        node_probability = 1.0 - p

        # Propagate
        sa = np.concatenate((s, a), axis=0)
        # sa = self.normz( sa )
        st = rospy.get_time()
        sa_normz = self.one_predict(self.normz(sa))
        self.time_nn += rospy.get_time() - st
        self.num_checks_nn += 1
        s_next = self.denormz(sa_normz)

        # print(self.time_nn / self.num_checks_nn)

        return {'next_state': s_next, 'node_probability': node_probability}
Exemplo n.º 20
0
def test__get_kernel_matrix_sparse(mixtureNormals):
    df = DiffusionMap(sigma=1, embedding_dim=2)
    K = df._get_kernel_matrix(mixtureNormals, k=10)
    assert issparse(K)
Exemplo n.º 21
0
def test__density_normalize__sparse(mixtureNormals):

    df = DiffusionMap(sigma=1, embedding_dim=2, verbose=False)
    K = csr_matrix([[1, 1], [0, 1]])
    assert issparse(df.__density_normalize__(K)), "returned matrix is not sparse after normalization"
class Spin_gpup(data_load, svm_failure):
    def __init__(self):
        svm_failure.__init__(
            self, discrete=(True if discreteORcont == 'discrete' else False))
        data_load.__init__(self,
                           simORreal=simORreal,
                           discreteORcont=discreteORcont)

        # Number of NN
        if useDiffusionMaps:
            self.K = 1000
            self.K_manifold = 100
            self.df = DiffusionMap(sigma=1, embedding_dim=3, k=self.K)
        else:
            self.K = 100

        rospy.Service('/gpup/transition', gpup_transition, self.GetTransition)
        rospy.Service('/gpup/transitionRepeat', gpup_transition_repeat,
                      self.GetTransitionRepeat)
        rospy.init_node('gpup_transition', anonymous=True)
        print('[gpup_transition] Ready.')

        rate = rospy.Rate(15)  # 15hz
        while not rospy.is_shutdown():
            rospy.spin()
            # rate.sleep()

    def predict(self, s_mean, s_var):
        sa = s_mean.reshape(-1, 1)
        idx = self.kdt.query(sa.T, k=self.K, return_distance=False)
        X_nn = self.Xtrain[idx, :].reshape(self.K, self.state_action_dim)
        Y_nn = self.Ytrain[idx, :].reshape(self.K, self.state_dim)

        if useDiffusionMaps:
            X_nn, Y_nn = reduction(sa, X_nn, Y_nn)

        m = np.zeros(self.state_dim)
        v = np.zeros(self.state_dim)
        for i in range(self.state_dim):
            gpup_est = UncertaintyPropagation(X_nn[:, :self.state_dim],
                                              Y_nn[:, i],
                                              optimize=False,
                                              theta=self.get_theta(sa))
            m[i], v[i] = gpup_est.predict(
                sa[:self.state_dim].reshape(1, -1)[0], s_var[:self.state_dim])

        return m, np.sqrt(v)

    def reduction(self, sa, X, Y):
        inx = self.df.ReducedClosestSetIndices(sa,
                                               X,
                                               k_manifold=self.K_manifold)

        return X[inx, :][0], Y[inx, :][0]

    # Predicts the next step by calling the GP class
    def GetTransition(self, req):

        s_mean = np.array(req.mean)
        s_std = np.array(req.std)
        a = np.array(req.action)

        node_probability = 1.0 - self.svm_check(s_mean, s_std, a)

        s_mean_normz = self.normz(np.concatenate((s_mean, a), axis=0))
        s_std_normz = self.normz_change(
            s_std
        )  # self.normz( s_mean + s_std ) - s_mean_normz[:self.state_dim]

        ds_mean_next_normz, ds_std_next_normz = self.predict(
            s_mean_normz, s_std_normz**2)

        s_mean_next = self.denormz(s_mean_normz[:self.state_dim] +
                                   ds_mean_next_normz)
        s_std_next = np.sqrt(
            self.denormz_change(ds_std_next_normz)**2 + s_std**2
        )  # self.denormz(s_mean_normz[:self.state_dim] + ds_mean_next_normz + s_std_normz + ds_std_next_normz) - s_mean_next

        return {
            'next_mean': s_mean_next,
            'next_std': s_std_next,
            'node_probability': node_probability
        }

    # Predicts the next step by calling the GP class - repeats the same action 'n' times
    def GetTransitionRepeat(self, req):

        s_mean = np.array(req.mean)
        s_std = np.array(req.std)
        a = np.array(req.action)
        n = req.num_repeat

        for _ in range(n):
            s_mean_normz = self.normz(np.concatenate((s_mean, a), axis=0))
            s_std_normz = self.normz_change(
                s_std
            )  # self.normz( s_mean + s_std ) - s_mean_normz[:self.state_dim]

            ds_mean_next_normz, ds_std_next_normz = self.predict(
                s_mean_normz, s_std_normz**2)

            s_mean_next = self.denormz(s_mean_normz[:self.state_dim] +
                                       ds_mean_next_normz)
            s_std_next = np.sqrt(
                self.denormz_change(ds_std_next_normz)**2 + s_std**2
            )  # self.denormz(s_mean_normz[:self.state_dim] + ds_mean_next_normz + s_std_normz + ds_std_next_normz) - s_mean_next

            s_mean = s_mean_next
            s_std = s_std_normz

        return {'next_mean': s_mean_next, 'next_std': s_std_next}

    def svm_check(self, mean, std, a):
        N = 100

        S = np.random.normal(mean, std, size=(N, len(mean)))

        failed_inx = []
        for i in range(S.shape[0]):
            p, _ = self.probability(S[i, :], a)  # Probability of failure
            prob_fail = np.random.uniform(0, 1)
            if prob_fail <= p:
                failed_inx.append(i)

        return float(len(failed_inx)) / float(N)