Пример #1
0
    def evaluate_MSE(self, true_func, res=30):
        data = generate_grid(-2.0, 2.0, res)
        self.model = GP(self.x_train[2], self.y_train[2], self.kernel)
        m_pred, s_pred = self.model.predict(data)
        m_true = np.apply_along_axis(true_func, axis=1,
                                     arr=data).reshape(-1, 1)

        return np.sum((m_pred - m_true)**2) / (res * res)
Пример #2
0
    def compute_entropy(self, res=21):
        #print(self.x_train[2], self.y_train[2])
        self.model = GP(self.x_train[2], self.y_train[2], self.kernel)

        x_candidates = generate_grid(-2.0, 2.0, res)
        mean, var = self.model.predict(x_candidates)
        self.entropy = np.sum(np.log(var))
        #self.entropy = mean + 0.3 * var

        #print(self.entropy)
        return self.entropy
Пример #3
0
    def compute_belief_variance(self):
        priors = [[] for i in range(self.num_features)]
        for i in range(self.num_features):
            priors[i] = GP(self.x_train[i], self.y_train[i], self.kernel)
            #plotGP(priors[i], x_train[i], 'Feature '+str(i))

        m_obs = [[] for i in range(self.num_features)]
        m_exp = [[] for i in range(self.num_features)]
        s_obs = [[] for i in range(self.num_features)]

        ### sample variable distributions
        #for x in range(len(x_candidates)):
        for i in range(self.num_features):
            pred_obs = priors[2].predict(self.x_train[i])
            m_obs[i], s_obs[i] = pred_obs[0].flatten(), pred_obs[1].flatten()
            m_exp[i] = self.y_train[i]
            #print(pred[0].flatten())

        self.count = np.ones(self.num_features)

        for i in range(self.num_features):
            for j in range(len(self.x_train[i])):
                if i != 2 and s_obs[i][
                        j] < 0.8:  # if sufficiently confident about feature of interest where the side feature has been observed
                    self.count[
                        i] += 1  # we know more about the relationship between those features

        self.feature_var = np.array([1.0 / math.sqrt(n) for n in self.count])
    def infer_joint_distribution(self, res):
        ### train GP priors
        priors = [[] for i in range(self.num_features)]
        for i in range(self.num_features):
            priors[i] = GP(self.x_train[i], self.y_train[i], self.kernel)
            #plotGP(priors[i], x_train[i], 'Feature '+str(i))

        m = np.ndarray((self.num_features, res * res))
        s = np.ndarray((self.num_features, res * res))

        x_candidates = generate_grid(-2, 2, res)

        ### sample variable distributions
        for x in range(len(x_candidates)):
            for i in range(self.num_features):
                pred = priors[i].predict(np.array([x_candidates[x]]))
                m[i, x], s[i, x] = pred[0][0][0], pred[1][0][0]

        ### compute weighted covariance
        W = self.generate_weights(s)
        X = np.array([m[i] - np.mean(m[i]) for i in range(self.num_features)])

        w_cov = np.cov(X, aweights=W)

        ### use lasso to estimate a sparse precision matrix? (GGM model estimation)

        ### construct correlated joint distribution GP
        joint_distribution = MOGP(self.x_train, self.y_train, w_cov,
                                  self.kernel)
        #plotMOGP(joint_distribution, x_train, 2, 'Output 2')

        return joint_distribution
Пример #5
0
    def infer_joint_distribution(self, res):
        ### train GP priors
        priors = [[] for i in range(self.num_features)]
        for i in range(self.num_features):
            priors[i] = GP(self.x_train[i], self.y_train[i], self.kernel)
            #plotGP(priors[i], x_train[i], 'Feature '+str(i))

        m_obs = [[] for i in range(self.num_features)]
        m_exp = [[] for i in range(self.num_features)]
        s_obs = [[] for i in range(self.num_features)]

        x_candidates = generate_grid(-2, 2, res)

        ### sample variable distributions
        #for x in range(len(x_candidates)):
        for i in range(self.num_features):
            pred_obs = priors[2].predict(self.x_train[i])
            m_obs[i], s_obs[i] = pred_obs[0].flatten(), pred_obs[1].flatten()
            m_expert[i] = self.y_train[i]
            #print(pred[0].flatten())

        self.count = np.zeros(self.num_features)

        m_obs_trust = [[] for i in range(self.num_features)]
        m_exp_trust = [[] for i in range(self.num_features)]

        for i in range(self.num_features):
            for j in range(len(self.x_train[i])):
                if s_obs[i][
                        j] < 0.3:  # if sufficiently confident about feature of interest where the side feature has been observed
                    count[
                        i] += 1  # we know more about the relationship between those features

        self.feature_var = np.array([1.0 / math.sqrt(n) for n in count])

        #print(m[0, :])

        #for x in range(len(x_candidates)):
        #    for i in range(self.num_features):
        #        pred = priors[i].predict(np.array([x_candidates[x]]))
        #        m[i, x], s[i, x] = pred[0][0][0], pred[1][0][0]

        for i in range(self.num_features):
            pred = priors[i].predict(x_candidates)
            m[i], s[i] = pred[0].flatten(), pred[1].flatten()

        ### compute weighted covariance
        W = self.generate_weights(s)
        X = np.array([m[i] - np.mean(m[i]) for i in range(self.num_features)])

        w_cov = np.cov(X, aweights=W)

        ### use lasso to estimate a sparse precision matrix? (GGM model estimation)

        ### construct correlated joint distribution GP
        joint_distribution = MOGP(self.x_train, self.y_train, w_cov,
                                  self.kernel)
        #plotMOGP(joint_distribution, x_train, 2, 'Output 2')

        return joint_distribution
Пример #6
0
    def infer_joint_distribution(self, res):
        ### train GP priors
        priors = [[] for i in range(self.num_features)]
        for i in range(self.num_features):
            priors[i] = GP(self.x_train[i], self.y_train[i], self.kernel)
            #plotGP(priors[i], x_train[i], 'Feature '+str(i))


        ### construct correlated joint distribution GP
        joint_distribution = MOGP(self.x_train, self.y_train, self.cov, self.kernel)
        #plotMOGP(joint_distribution, x_train, 2, 'Output 2')

        return joint_distribution
Пример #7
0
 def infer_independent_distribution(self, feature, res):
     independent_distribution = GP(self.x_train[feature],
                                   self.y_train[feature], self.kernel)
     return independent_distribution
Пример #8
0
class GaussianProcessBeliefModel():  # InferenceModel
    def __init__(self):
        self.res = 20
        self.kernel = generate_rbfkern(2, 1.0, 0.3)
        self.entropy = 0

    def copy(self):
        newMe = GaussianProcessBeliefModel()
        newMe.x_train, newMe.y_train, newMe.num_features, newMe.entropy = self.x_train.copy(
        ), self.y_train.copy(), self.num_features, self.entropy

        return newMe

    def load_environment(self, env, start_loc=[0, 0]):
        self.x_train, self.y_train, self.num_features = env.load_prior_data(
            start_loc)
        #self.entropy = -1 * len(self.x_train)
        #print(self.entropy)

    def update(self, x, y, feature):
        #print(np.min(abs(np.sum(self.x_train[feature] - [x[0:2]], axis=1))))
        #print('a')
        #print([x[0:2]])
        #print(self.x_train[feature])
        #print(self.x_train[feature] - [x[0:2]])
        #print(np.sum(abs(self.x_train[feature] - [x[0:2]]), axis=1))
        self.x_train[feature] = np.append(self.x_train[feature], [x[0:2]],
                                          axis=0)
        self.y_train[feature] = np.append(self.y_train[feature], y)

    def update_static(self, x, y, feature):
        self.x_train[feature] = np.append(self.x_train[feature], [x[0:2]],
                                          axis=0)
        self.y_train[feature] = np.append(self.y_train[feature], y)

    def observe(self, x):
        feature = int(x[0][2])
        #if x[0][0:2] in self.x_train[feature]:
        #    print(np.where(self.x_train[feature] == x[0][0:2]))
        #    return self.y_train[np.where(self.x_train[feature] == x[0][0:2])]
        #else:
        return 0

    def infer_independent_distribution(self, feature, res):
        independent_distribution = GP(self.x_train[feature],
                                      self.y_train[feature], self.kernel)
        return independent_distribution

    def display(self, feature, title):
        independent_distribution = self.infer_independent_distribution(
            feature=feature, res=20)
        plotGP(independent_distribution,
               self.x_train[feature],
               title=title,
               res=20)

    def generate_weights(self, S):
        W = np.reciprocal(np.sqrt(S[2, :]))
        return W

    def compute_entropy(self, res=21):
        #print(self.x_train[2], self.y_train[2])
        self.model = GP(self.x_train[2], self.y_train[2], self.kernel)

        x_candidates = generate_grid(-2.0, 2.0, res)
        mean, var = self.model.predict(x_candidates)
        self.entropy = np.sum(np.log(var))
        #self.entropy = mean + 0.3 * var

        #print(self.entropy)
        return self.entropy

    def evaluate_MSE(self, true_func, res=30):
        data = generate_grid(-2.0, 2.0, res)
        self.model = GP(self.x_train[2], self.y_train[2], self.kernel)
        m_pred, s_pred = self.model.predict(data)
        m_true = np.apply_along_axis(true_func, axis=1,
                                     arr=data).reshape(-1, 1)

        return np.sum((m_pred - m_true)**2) / (res * res)