示例#1
0
    def randomize(self, seed):

        np.random.seed(seed)

        kernel = generate_rbfkern(2, 1.0, 1.5)
        xs = generate_grid(lb=-2.0, ub=2.0, res=self.res)
        ys = np.random.normal(size=(self.res * self.res, 1))

        model = GPy.models.GPRegression(xs, ys, kernel, noise_var=1e-10)
        x_disc = generate_grid(lb=-2.0, ub=2.0, res=self.true_res)
        self.truesignal = model.posterior_samples(x_disc, size=1).reshape(
            self.true_res, self.true_res)

        ys = np.random.normal(size=(self.res * self.res, 1))
        model = GPy.models.GPRegression(xs, ys, kernel, noise_var=1e-10)
        x_disc = generate_grid(lb=-2.0, ub=2.0, res=self.true_res)
        print(x_disc)
        self.randsignal = model.posterior_samples(x_disc, size=1).reshape(
            self.true_res, self.true_res)

        ys = np.random.normal(size=(self.res * self.res, 1))
        model = GPy.models.GPRegression(xs, ys, kernel, noise_var=1e-10)
        x_disc = generate_grid(lb=-2.0, ub=2.0, res=self.true_res)
        self.randsignal2 = model.posterior_samples(x_disc, size=1).reshape(
            self.true_res, self.true_res)

        ys = np.random.normal(size=(self.res * self.res, 1))
        model = GPy.models.GPRegression(xs, ys, kernel, noise_var=1e-10)
        x_disc = generate_grid(lb=-2.0, ub=2.0, res=self.true_res)
        self.randsignal3 = model.posterior_samples(x_disc, size=1).reshape(
            self.true_res, self.true_res)
示例#2
0
    def load_prior_data(self, start_loc=[0, 0]):
        ''' inputs: none
            outputs:
                x_task: a numpy array of input-values for each sample for each function
                y_task: a numpy array of output-values for each sample from each function
                num_funcs: the number of state variables in the dataset
        '''
        n_sample = [13**2, 13**2, 1, 1]
        num_funcs = len(self.func)
        x_task = np.array([None for i in range(num_funcs)])
        y_task = np.array([None for i in range(num_funcs)])

        ### generate training data from functions
        for i in range(num_funcs):
            if i == 2: 
                #x_task[i] = np.array([np.random.rand(n_sample[i]) * -1, np.random.rand(n_sample[i]) * -1]).T
                x_task[i] = np.array([[start_loc[0]], [start_loc[1]]]).T
            elif i == 1:
                #poss = [(-2.0, 0.0), (2.0, 0.0), (0.0, -2.0), (0.0, 2.0)]
                #real = np.random.choice(poss)
                real = (0, 1.8)
                x_task[i] = generate_grid(real[0], real[1], int(math.sqrt(n_sample[i])))
                print('x_task:', x_task[i])
            else:
                x_task[i] = generate_grid(-1.8, 1.8, int(math.sqrt(n_sample[i])))
            y_task[i] = np.array([self.func[i](xp) for xp in x_task[i]])
            
        return x_task, y_task, num_funcs
示例#3
0
def plotMOGP(model, x_train, output, title, res=30):

    X_guess = np.concatenate(
        [generate_grid(-2.0, 2.0, res),
         np.ones((res * res, 1)) * output],
        axis=1)
    Y_pred = model.predict(X_guess)

    plt.figure()
    CS = plt.contour(np.linspace(-2, 2, res), np.linspace(-2, 2, res),
                     Y_pred[0].reshape(res, res))
    plt.clabel(CS, inline=1, fontsize=10)
    #print(x_train[output].T[0])
    plt.plot(x_train[output].T[0], x_train[output].T[1])
    plt.title('Mean, ' + title)

    if 'signal field' in title:
        plt.scatter(points.T[0], points.T[1], c="r")
    elif 'hardness field' in title:
        plt.scatter(points.T[0], points.T[1], c="r")

    plt.figure()
    CS = plt.contour(np.linspace(-2, 2, res), np.linspace(-2, 2, res), \
        np.sqrt(Y_pred[1].reshape(res, res)))
    plt.clabel(CS, inline=1, fontsize=10)
    plt.scatter(x_train[output].T[0], x_train[output].T[1])
    plt.title('Stdev, ' + title)
示例#4
0
    def evaluate_MSE(self, true_func, res=41):
        data = np.concatenate([generate_grid(-2.0, 2.0, res), np.ones((res*res, 1))*2], axis=1)
        model = self.infer_joint_distribution(res=res)
        m_pred, s_pred = model.predict(data)
        m_true = np.apply_along_axis(true_func, axis=1, arr=data).reshape(-1, 1)

        return np.sum((m_pred - m_true)**2) / (res * res)
    def infer_joint_distribution(self, res):
        ### train GP priors
        priors = [[] for i in range(self.num_features)]
        for i in range(self.num_features):
            priors[i] = GP(self.x_train[i], self.y_train[i], self.kernel)
            #plotGP(priors[i], x_train[i], 'Feature '+str(i))

        m = np.ndarray((self.num_features, res * res))
        s = np.ndarray((self.num_features, res * res))

        x_candidates = generate_grid(-2, 2, res)

        ### sample variable distributions
        for x in range(len(x_candidates)):
            for i in range(self.num_features):
                pred = priors[i].predict(np.array([x_candidates[x]]))
                m[i, x], s[i, x] = pred[0][0][0], pred[1][0][0]

        ### compute weighted covariance
        W = self.generate_weights(s)
        X = np.array([m[i] - np.mean(m[i]) for i in range(self.num_features)])

        w_cov = np.cov(X, aweights=W)

        ### use lasso to estimate a sparse precision matrix? (GGM model estimation)

        ### construct correlated joint distribution GP
        joint_distribution = MOGP(self.x_train, self.y_train, w_cov,
                                  self.kernel)
        #plotMOGP(joint_distribution, x_train, 2, 'Output 2')

        return joint_distribution
示例#6
0
    def policy(self, alpha, inference_model, loc):
        
        stepsize = 0.5
        candidates = generate_grid(-2.0, 2.0, self.res)

        ### Entropy minimization acquisition function
        '''
        min_entropy = inference_model.compute_entropy()
        print('initial entropy')
        print(inference_model.compute_entropy())
        for candidate in candidates:
            print(candidate)
            predictedBelief = inference_model.copy()
            observation = predictedBelief.observe(np.array([[candidate[0], candidate[1], self.observed_feature]]))
            predictedBelief.update(np.array([candidate[0], candidate[1], self.observed_feature]), observation, self.observed_feature)
            print(predictedBelief.compute_entropy())
            if predictedBelief.compute_entropy() <= min_entropy:
                min_entropy = predictedBelief.compute_entropy()
                nextSample = candidate
        '''

        # UCB acquisition function
        mean, var = inference_model.infer_independent_distribution(feature=2, res=30).predict(candidates)
        dist = np.linalg.norm(candidates - loc, axis=1)
        dist_corrected = np.amax(dist.reshape(-1, 1), axis=1, initial=0.3).reshape(1, -1).T
        ucb = (mean + 2.0 * var) / dist_corrected
        #ucb = (mean - 0.0) * norm.cdf((mean - 0.0) / np.sqrt(var)) + np.sqrt(var) * norm.pdf((mean - 0.0) / np.sqrt(var))
        nextSample = (candidates[np.argmax(ucb)] - loc) / (np.linalg.norm(candidates[np.argmax(ucb)] - loc) * 3) + loc

        return nextSample
示例#7
0
    def infer_joint_distribution(self, res):
        ### train GP priors
        priors = [[] for i in range(self.num_features)]
        for i in range(self.num_features):
            priors[i] = GP(self.x_train[i], self.y_train[i], self.kernel)
            #plotGP(priors[i], x_train[i], 'Feature '+str(i))

        m_obs = [[] for i in range(self.num_features)]
        m_exp = [[] for i in range(self.num_features)]
        s_obs = [[] for i in range(self.num_features)]

        x_candidates = generate_grid(-2, 2, res)

        ### sample variable distributions
        #for x in range(len(x_candidates)):
        for i in range(self.num_features):
            pred_obs = priors[2].predict(self.x_train[i])
            m_obs[i], s_obs[i] = pred_obs[0].flatten(), pred_obs[1].flatten()
            m_expert[i] = self.y_train[i]
            #print(pred[0].flatten())

        self.count = np.zeros(self.num_features)

        m_obs_trust = [[] for i in range(self.num_features)]
        m_exp_trust = [[] for i in range(self.num_features)]

        for i in range(self.num_features):
            for j in range(len(self.x_train[i])):
                if s_obs[i][
                        j] < 0.3:  # if sufficiently confident about feature of interest where the side feature has been observed
                    count[
                        i] += 1  # we know more about the relationship between those features

        self.feature_var = np.array([1.0 / math.sqrt(n) for n in count])

        #print(m[0, :])

        #for x in range(len(x_candidates)):
        #    for i in range(self.num_features):
        #        pred = priors[i].predict(np.array([x_candidates[x]]))
        #        m[i, x], s[i, x] = pred[0][0][0], pred[1][0][0]

        for i in range(self.num_features):
            pred = priors[i].predict(x_candidates)
            m[i], s[i] = pred[0].flatten(), pred[1].flatten()

        ### compute weighted covariance
        W = self.generate_weights(s)
        X = np.array([m[i] - np.mean(m[i]) for i in range(self.num_features)])

        w_cov = np.cov(X, aweights=W)

        ### use lasso to estimate a sparse precision matrix? (GGM model estimation)

        ### construct correlated joint distribution GP
        joint_distribution = MOGP(self.x_train, self.y_train, w_cov,
                                  self.kernel)
        #plotMOGP(joint_distribution, x_train, 2, 'Output 2')

        return joint_distribution
示例#8
0
    def evaluate_MSE(self, true_func, res=30):
        data = generate_grid(-2.0, 2.0, res)
        self.model = GP(self.x_train[2], self.y_train[2], self.kernel)
        m_pred, s_pred = self.model.predict(data)
        m_true = np.apply_along_axis(true_func, axis=1,
                                     arr=data).reshape(-1, 1)

        return np.sum((m_pred - m_true)**2) / (res * res)
    def exploit(self, model):

        joint_distribution = model.infer_joint_distribution(res=self.res)
        independent_distribution = model.infer_independent_distribution(feature=2, res=self.res)

        x_candidates = np.concatenate([generate_grid(-2.0, 2.0, self.res), np.ones((self.res*self.res, 1))*2], axis=1)
        utilities = self.compute_utilities(joint_distribution, independent_distribution, x_candidates)
        nextSample = x_candidates[np.argmax(utilities)]

        return nextSample #, joint_distribution, independent_distribution
示例#10
0
    def __init__(self, points):
        self.points = points
        self.func = [self.hardness_field, self.depth_field, self.signal_field, self.random_field]
        self.func_names = ['hardness field', 'depth field', 'signal field', 'random field']

        self.res = 8

        kernel = generate_rbfkern(2, 1.0, 1.5)
        xs = generate_grid(lb=-2.0, ub=2.0, res=self.res)
        ys = np.random.normal(size=(self.res * self.res, 1))

        print(xs, ys)
    
        self.true_res = 41
        model = GPy.models.GPRegression(xs, ys, kernel, noise_var=1e-10)
        x_disc = generate_grid(lb=-2.0, ub=2.0, res=self.true_res)
        print(x_disc)

        #model.plot()

        #self.truesignal = model.predict(x_disc)[0].reshape(true_res, true_res)
        #print(self.truesignal)


        #kernel = lambda x1, x2: math.exp(- np.linalg.norm(x1 - x2)**2 / 2.0)
        #training_inputs = generate_grid(lb=-2.0, ub=2.0, res=20)
        #K = 
        #for i in range(400):
        #    for j in range(400):
        #        kernel(training_inputs[i], training_inputs[j])


        self.truesignal = model.posterior_samples(x_disc, size=1).reshape(self.true_res, self.true_res)

        kernel = generate_rbfkern(2, 1.0, 1.5)
        xs = generate_grid(lb=-2.0, ub=2.0, res=self.res)
        ys = np.random.normal(size=(self.res * self.res, 1))
        model = GPy.models.GPRegression(xs, ys, kernel, noise_var=1e-10)
        x_disc = generate_grid(lb=-2.0, ub=2.0, res=self.true_res)
        print(x_disc)
        self.randsignal = model.posterior_samples(x_disc, size=1).reshape(self.true_res, self.true_res)
        print(self.truesignal)
示例#11
0
    def compute_entropy(self, res=21):
        #print(self.x_train[2], self.y_train[2])
        self.model = GP(self.x_train[2], self.y_train[2], self.kernel)

        x_candidates = generate_grid(-2.0, 2.0, res)
        mean, var = self.model.predict(x_candidates)
        self.entropy = np.sum(np.log(var))
        #self.entropy = mean + 0.3 * var

        #print(self.entropy)
        return self.entropy
示例#12
0
    def compute_entropy(self, res=21):
        #model = self.infer_joint_distribution(res=res)
        #x_candidates = np.concatenate([generate_grid(-2.0, 2.0, res), np.ones((res*res, 1))*2], axis=1)

        model = self.infer_independent_distribution(2, res=res)
        x_candidates = generate_grid(-2.0, 2.0, res)
        mean, var = model.predict(x_candidates)
        entropy = np.sum(np.log(var))

        print(entropy)
        return entropy
    def explore(self, model):
        
        joint_distribution = model.infer_joint_distribution(res=self.res)
        independent_distribution = model.infer_independent_distribution(feature=2, res=self.res)

        ### compute predicted novelty metric
        x_candidates = np.concatenate([generate_grid(-2.0, 2.0, self.res), np.ones((self.res*self.res, 1))*0], axis=1)
        novelties = self.compute_novelties(joint_distribution, model.x_train)
        nextSample = model.x_train[1][np.argmax(novelties)]
        #nextSample = x_candidates[np.random.randint(len(x_candidates))]

        ### Sample and update model
        return nextSample #, joint_distribution, independent_distribution
示例#14
0
    def load_environment(self, env, start_loc=[0, 0]):
        self.x_train, self.y_train, self.num_features = env.load_prior_data(start_loc)

        # compute true covariance
        xs = generate_grid(-2.0, 2.0, res=self.res)
        y_true = np.zeros((self.num_features, self.res*self.res))
        
        for feature in range(self.num_features):
            y_true[feature, :] = np.apply_along_axis(env.func[feature], axis=1, arr=xs).reshape(-1, 1).flatten()

        Y_true = np.array([y_true[i] - np.mean(y_true[i]) for i in range(self.num_features)])
        print("loaded")
        self.cov = np.cov(Y_true)
        self.env = env
示例#15
0
def plot_environment():
    env = new SideInformationEnvironmentRandomGP()
    num_features = len(env.func)
    res=20

    x_test = generate_grid(lb=-2.0, ub=2.0, res=res)

    y_test = [[] for i in range(num_features)]

    for i in range(num_features):
    	y_test[i] = [env.observe(xo, i) for xo in x_test]

    for i in range(num_features):
    	plt.figure()
	    CS = plt.contour(np.linspace(-2, 2, res), np.linspace(-2, 2, res), y_test[i].reshape(res, res))
	    plt.clabel(CS, inline=1, fontsize=10)
	    plt.plot(x_train[:, 0], x_train[:, 1])
	    plt.title('i')
示例#16
0
    def compute_entropy(self, res=21):
        #model = self.infer_joint_distribution(res=res)
        #x_candidates = np.concatenate([generate_grid(-2.0, 2.0, res), np.ones((res*res, 1))*2], axis=1)
        self.compute_belief_variance()

        model = self.infer_independent_distribution(2, res=res)
        x_candidates = generate_grid(-2.0, 2.0, res)
        mean, var = model.predict(x_candidates)
        entropy = np.sum(np.log(var))

        beta = 100
        entropy += beta * np.sum(np.log(self.feature_var))
        print(self.count)
        print('Map entropy', np.sum(np.log(var)))
        print('Knowledge model entropy',
              beta * np.sum(np.log(self.feature_var)))

        print(entropy)
        return entropy
示例#17
0
def plot_environment():
    env = SideInformationEnvironmentRandomGP(points=[])
    num_features = len(env.func)
    res=20

    x_test = generate_grid(lb=-2.0, ub=2.0, res=res)

    y_test = [[] for i in range(num_features)]

    for i in range(num_features):
        y_test[i] = np.array([env.observe(xo, i) for xo in x_test])


    titles = ["Temperature", "Sea Floor Depth", "Luminosity", "Ocean Current Intensity"]
    plt.figure()
    for i in range(num_features):
        plt.subplot(1, num_features, i+1)
        CS = plt.contourf(np.linspace(0, 600, res), np.linspace(0, 600, res), y_test[i].reshape(res, res))
        #plt.clabel(CS, inline=1, fontsize=10)
        plt.title(titles[i])

    plt.show()
示例#18
0
def plotGP(model, x_train, title, res=30):

    X_guess = generate_grid(-2.0, 2.0, res)
    Y_pred = model.predict(X_guess)

    plt.figure()
    CS = plt.contour(np.linspace(-2, 2, res), np.linspace(-2, 2, res),
                     Y_pred[0].reshape(res, res))
    plt.clabel(CS, inline=1, fontsize=10)
    plt.plot(x_train[:, 0], x_train[:, 1])
    plt.title('Mean, ' + title)

    if 'signal field' in title:
        plt.scatter(points.T[0], points.T[1], c="r")
    elif 'hardness field' in title:
        plt.scatter(points.T[0], points.T[1], c="r")

    plt.figure()
    CS = plt.contour(np.linspace(-2, 2, res), np.linspace(-2, 2, res), \
        np.sqrt(Y_pred[1].reshape(res, res)))
    plt.clabel(CS, inline=1, fontsize=10)
    plt.plot(x_train[:, 0], x_train[:, 1])
    plt.title('Stdev, ' + title)
示例#19
0
    def load_prior_data(self, start_loc=[0, 0]):
        ''' inputs: none
            outputs:
                x_task: a numpy array of input-values for each sample for each function
                y_task: a numpy array of output-values for each sample from each function
                num_funcs: the number of state variables in the dataset
        '''
        n_sample = [100, 100, 1, 10]
        num_funcs = len(self.func)
        x_task = np.array([None for i in range(num_funcs)])
        y_task = np.array([None for i in range(num_funcs)])

        ### generate training data from functions
        for i in range(num_funcs):
            if i == 2:
                #x_task[i] = np.array([np.random.rand(n_sample[i]) * -1, np.random.rand(n_sample[i]) * -1]).T
                x_task[i] = np.array([[start_loc[0]], [start_loc[1]]]).T
            else:
                x_task[i] = generate_grid(-2.0, 2.0,
                                          int(math.sqrt(n_sample[i])))
            y_task[i] = np.array([self.func[i](xp) for xp in x_task[i]])

        return x_task, y_task, num_funcs