Esempio n. 1
0
def valid(loghyper,
          covfunc,
          X,
          id,
          y,
          X_val,
          y_val,
          R_tr,
          w,
          R_tr_val,
          R_val,
          trafo=None):
    '''Do validation on validation dataset 
       tuning weight parameter of SUM of 
       parameterized cov **function** AND cov **martix**. '''

    ## TRAINING the parameters
    logtheta = gpr.gp_train(loghyper, covfunc, X, y, R_tr, w)

    ## GET self covariances for the test cases (needed in covMatrix)
    d = diag(R_val).transpose()
    R_tr_val = vstack((R_tr_val, d))

    ## PREDICTION
    [MU, S2] = gpr.gp_pred(logtheta, covfunc, X, y, X_val, R_tr, w, R_tr_val)

    ## VALIDATION for predictions on validation dataset
    ## DO NOT resacle predictions - rescaling is done in valid_fig if needed!!!
    M = valid_fig(y_val, MU, trafo=trafo)
    print w
    print M
    return hstack((M, logtheta))
def gp_correct_poses(gt_poses_train,
                     poses_train,
                     state_train,
                     poses_test,
                     state_test,
                     logtheta=None):
    print "using gp poses"
    pose_error = calc_pose_error(gt_poses_train, poses_train)

    n_test = len(poses_test)
    n_task_vars = 6

    MU = np.empty((n_test, n_task_vars))
    S2 = np.empty((n_test, n_task_vars))

    for i_task_var in range(n_task_vars):
        ## data from a noisy GP
        X = state_train

        ## DEFINE parameterized covariance funcrion
        covfunc = ['kernels.covSum', ['kernels.covSEiso', 'kernels.covNoise']]

        #print 'hyperparameters: ', np.exp(logtheta)

        ### sample observations from the GP
        y = np.array([pose_error[:, i_task_var]]).reshape((-1, 1))

        ### TEST POINTS
        Xstar = state_test

        ## LEARN hyperparameters if not provided
        if logtheta == None:
            # ***UNCOMMENT THE FOLLOWING LINES TO DO TRAINING OF HYPERPARAMETERS***
            ### TRAINING GP
            print
            print 'GP: ...training'
            ### INITIALIZE (hyper)parameters by -1
            d = X.shape[1]
            init = -1.0 * np.ones((d, 1))
            loghyper = np.array([[-1.0], [-1.0]])
            loghyper = np.vstack((init, loghyper))[:, 0]
            print 'initial hyperparameters: ', np.exp(loghyper)
            ### TRAINING of (hyper)parameters
            logtheta = gpr.gp_train(loghyper, covfunc, X, y)
            print 'trained hyperparameters: ', np.exp(logtheta)

        ## PREDICTION
        print 'GP: ...prediction'
        results = gpr.gp_pred(logtheta, covfunc, X, y,
                              Xstar)  # get predictions for unlabeled data ONLY
        MU[:, i_task_var] = results[0][:, 0]
        S2[:, i_task_var:i_task_var + 1] = results[1]

    est_gt_poses_test = apply_pose_error(poses_test, MU)
    return est_gt_poses_test
def gp_correct_poses(gt_poses_train, poses_train, state_train, poses_test, state_test, logtheta=None):
    print "using gp poses"
    pose_error = calc_pose_error(gt_poses_train, poses_train)

    n_test = len(poses_test)
    n_task_vars = 6

    MU = np.empty((n_test, n_task_vars))
    S2 = np.empty((n_test, n_task_vars))

    for i_task_var in range(n_task_vars):
        ## data from a noisy GP
        X = state_train

        ## DEFINE parameterized covariance funcrion
        covfunc = ['kernels.covSum', ['kernels.covSEiso','kernels.covNoise']]

        #print 'hyperparameters: ', np.exp(logtheta)

        ### sample observations from the GP
        y = np.array([pose_error[:,i_task_var]]).reshape((-1,1))

        ### TEST POINTS
        Xstar = state_test

        ## LEARN hyperparameters if not provided
        if logtheta == None:
            # ***UNCOMMENT THE FOLLOWING LINES TO DO TRAINING OF HYPERPARAMETERS***
            ### TRAINING GP
            print
            print 'GP: ...training'
            ### INITIALIZE (hyper)parameters by -1
            d = X.shape[1]
            init = -1.0*np.ones((d,1))
            loghyper = np.array([[-1.0], [-1.0]])
            loghyper = np.vstack((init, loghyper))[:,0]
            print 'initial hyperparameters: ', np.exp(loghyper)
            ### TRAINING of (hyper)parameters
            logtheta = gpr.gp_train(loghyper, covfunc, X, y)
            print 'trained hyperparameters: ',np.exp(logtheta)

        ## PREDICTION 
        print 'GP: ...prediction'
        results = gpr.gp_pred(logtheta, covfunc, X, y, Xstar) # get predictions for unlabeled data ONLY
        MU[:,i_task_var] = results[0][:,0]
        S2[:,i_task_var:i_task_var+1] = results[1]

    est_gt_poses_test = apply_pose_error(poses_test, MU)
    return est_gt_poses_test
Esempio n. 4
0
def valid(loghyper, covfunc, X, id, y, X_val, y_val, R_tr, w, R_tr_val, R_val, trafo=None):
    '''Do validation on validation dataset 
       tuning weight parameter of SUM of 
       parameterized cov **function** AND cov **martix**. '''
    
    ## TRAINING the parameters
    logtheta = gpr.gp_train(loghyper, covfunc, X, y, R_tr, w)

    ## GET self covariances for the test cases (needed in covMatrix)
    d = diag(R_val).transpose()   
    R_tr_val = vstack((R_tr_val, d))
    
    ## PREDICTION 
    [MU, S2] = gpr.gp_pred(logtheta, covfunc, X, y, X_val, R_tr, w, R_tr_val)
        
            
    ## VALIDATION for predictions on validation dataset
    ## DO NOT resacle predictions - rescaling is done in valid_fig if needed!!!
    M = valid_fig(y_val, MU, trafo=trafo)
    print w
    print M
    return hstack((M, logtheta))
def gp_correct_poses(gt_poses_train, poses_train, state_train, poses_test, state_test, hy_params=None):
	pose_error = calc_pose_error(gt_poses_train, poses_train)

	n_test		= len(poses_test)
	n_task_vars = 6

	MU = np.empty((n_test, n_task_vars))
	S2 = np.empty((n_test, n_task_vars))
	
	for i_task_var in range(n_task_vars):
		X = state_train

		## DEFINE parameterized covariance funcrion
		covfunc = ['kernels.covSum', ['kernels.covSEiso','kernels.covNoise']]
		
		## SET (hyper)parameters
		if hy_params==None:
			logtheta = np.array([np.log(0.1), np.log(1), np.log(np.sqrt(0.5))])
		else:
			logtheta = hy_params[i_task_var,:]
			#print 'hyperparameters: ', np.exp(logtheta)

			### sample observations from the GP
			y = pose_error[:,i_task_var]

			### TEST POINTS
			Xstar = state_test
			
			## PREDICTION 
			print 'GP: ...prediction'
			results		  = gpr.gp_pred(logtheta, covfunc, X, y, Xstar) # get predictions for unlabeled data ONLY
			MU[:,i_task_var] = results[0]
			S2[:,i_task_var:i_task_var+1] = results[1]

	est_gt_poses_test = [pose_to_tf(pose_diff).dot(pose) for (pose_diff, pose) in zip(MU, poses_test)]
	return est_gt_poses_test
Esempio n. 6
0
def k_fold_cv(k, loghyper, covfunc, X, id, y, R, w, trafo=None):
    '''Do k-fold cross validation for 
       tuning weight parameter of SUM of 
       parameterized cov **function** AND cov **martix**. '''
    
    ## TRAINING the parameters
    logtheta = gpr.gp_train(loghyper, covfunc, X, y, R, w)
    #print exp(logtheta)
        
    MU = zeros((y.shape))
    S2 = zeros((y.shape))
    
    n = id.shape[0]
    s = floor(n/k)
    for j in range(0,k):
        # prepare X, y and R according to fold
        if j==0:    # first fold
            X_tr     = X[s:n,:]
            y_tr     = y[s:n,:]
            id_tr    = id[s:n,:]              
            X_val    = X[0:s,:]
            id_val   = id[0:s,:]
        elif 0<j<k-1:
            X_tr     = X[0:j*s,:]
            X_tr     = vstack((X_tr,X[j*s+s:n,:]))
            y_tr     = y[0:j*s,:]
            y_tr     = vstack((y_tr,y[j*s+s:n,:]))
            id_tr    = id[0:j*s,:]
            id_tr    = vstack((id_tr,id[j*s+s:n,:])) 
            X_val    = X[j*s:j*s+s,:]
            id_val   = id[j*s:j*s+s,:]         
        else:   # last fold
            s1 = s+fmod(n,k)
            X_tr     = X[0:n-s1,:]
            y_tr     = y[0:n-s1,:]
            id_tr    = id[0:n-s1,:]   
            X_val    = X[n-s1:n,:]
            id_val   = id[n-s1:n,:]
        

        loc_tr = general.get_index_vec(id_tr[:,0], id[:,0])
        R_tr = R.take(loc_tr.astype(int),axis=0).take(loc_tr.astype(int),axis=1)
        
        loc_val = general.get_index_vec(id_val[:,0], id[:,0])
        R_tr_val = R.take(loc_tr.astype(int),axis=0).take(loc_val.astype(int),axis=1)

        ## GET self covariances for the test cases (needed in covMatrix)
        R_val = R.take(loc_val.astype(int),axis=0).take(loc_val.astype(int),axis=1)
        d = diag(R_val).transpose()   
        R_tr_val = vstack((R_tr_val, d))
        
        ## PREDICTION 
        [MU_temp, S2_temp] = gpr.gp_pred(logtheta, covfunc, X_tr, y_tr, X_val, R_tr, w, R_tr_val)
        
        if j==0:  # first fold
            MU[0:s,:] = MU_temp
            S2[0:s,:] = S2_temp
        elif 0<j<k-1:
            MU[j*s:j*s+s,:] = MU_temp
            S2[j*s:j*s+s,:] = S2_temp      
        else:     # last fold
            MU[n-s1:n,:] = MU_temp
            S2[n-s1:n,:] = S2_temp 
            
    ## VALIDATION for predictions on validation dataset
    ## DO NOT resacle predictions - rescaling is done in valid_fig if needed!!!
    M = valid_fig(y, MU, trafo=trafo)
    print w
    print M
    return hstack((M, logtheta))
Esempio n. 7
0
#d = X.shape[1]
#init = -1*ones((d,1))
#loghyper = array([[-1], [-1]])
#loghyper = vstack((init, loghyper))[:,0]
#print 'initial hyperparameters: ', exp(loghyper)
### TRAINING of (hyper)parameters
#logtheta = gpr.gp_train(loghyper, covfunc, X, y)
#print 'trained hyperparameters: ',exp(logtheta)


## to GET prior covariance of Standard GP use:
#[Kss, Kstar] = general.feval(covfunc, logtheta, X, Xstar)    # Kss = self covariances of test cases, 
#                                                             # Kstar = cov between train and test cases


## PREDICTION 
print 'GP: ...prediction'
results = gpr.gp_pred(logtheta, covfunc, X, y, Xstar) # get predictions for unlabeled data ONLY
MU = results[0]
S2 = results[1]

#print MU


## plot results
pyplot.suptitle('logtheta:', fontsize=12)
pyplot.title(logtheta)

pyplot.plot(Xstar,MU, 'g^', X,y, 'ro')
pyplot.show()
Esempio n. 8
0
# loghyper = array([[-1], [-1]])
# loghyper = vstack((init, loghyper))[:,0]
# print 'initial hyperparameters: ', exp(loghyper)
#
# w = array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])  # set vector for grid search
# M = zeros((size(w),3+size(loghyper)))  # matrix to collect[r_sq_B, RMSE_B, MAE_B] for all weights w(i)
# print 'XGP: ...training (PT)'
# print 'XGP: ...tune weight parameter w (via CV)'
# for i in range(0,size(w)):
#    M[i,:] = validation.k_fold_cv(3, loghyper, covfunc, X, id_lab, y.copy(), A, w[i])
#
## SELECT weight and corresponding hyperparameters according to highest R_sq value
# loc = M[:,0].argmax(0)
# w_used = w[loc]
# print 'selected weight: '+str(w_used)
# logtheta = array(M[loc,3:])


# XGP PREDICTION
print "XGP: ...prediction (PT)"
[MU, S2] = gpr.gp_pred(logtheta, covfunc, X, y, Xstar, A, w_used, B)

# print MU

## plot results
pyplot.suptitle("logtheta:", fontsize=12)
pyplot.title(logtheta)

pyplot.plot(Xstar, MU, "g^", X, y, "ro")
pyplot.show()
Esempio n. 9
0
#loghyper = vstack((init, loghyper))[:,0]
#print 'initial hyperparameters: ', exp(loghyper)
#
#w = array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])  # set vector for grid search
#M = zeros((size(w),3+size(loghyper)))  # matrix to collect[r_sq_B, RMSE_B, MAE_B] for all weights w(i)
#print 'XGP: ...training (PT)'
#print 'XGP: ...tune weight parameter w (via CV)'
#for i in range(0,size(w)):
#    M[i,:] = validation.k_fold_cv(3, loghyper, covfunc, X, id_lab, y.copy(), A, w[i])      
#        
## SELECT weight and corresponding hyperparameters according to highest R_sq value
#loc = M[:,0].argmax(0)  
#w_used = w[loc]
#print 'selected weight: '+str(w_used)
#logtheta = array(M[loc,3:])



# XGP PREDICTION  
print 'XGP: ...prediction (PT)'
[MU, S2] = gpr.gp_pred(logtheta, covfunc, X, y, Xstar, A, w_used, B)

#print MU

## plot results
pyplot.suptitle('logtheta:', fontsize=12)
pyplot.title(logtheta)

pyplot.plot(Xstar,MU, 'g^', X,y, 'ro')
pyplot.show()
Esempio n. 10
0
 def predict_2(self, X, y, x_star):
     
     prediction = gpr.gp_pred(self.logtheta, self.covfunc, X, y, x_star)
     return prediction
Esempio n. 11
0
def k_fold_cv(k, loghyper, covfunc, X, id, y, R, w, trafo=None):
    '''Do k-fold cross validation for 
       tuning weight parameter of SUM of 
       parameterized cov **function** AND cov **martix**. '''

    ## TRAINING the parameters
    logtheta = gpr.gp_train(loghyper, covfunc, X, y, R, w)
    #print exp(logtheta)

    MU = zeros((y.shape))
    S2 = zeros((y.shape))

    n = id.shape[0]
    s = floor(n / k)
    for j in range(0, k):
        # prepare X, y and R according to fold
        if j == 0:  # first fold
            X_tr = X[s:n, :]
            y_tr = y[s:n, :]
            id_tr = id[s:n, :]
            X_val = X[0:s, :]
            id_val = id[0:s, :]
        elif 0 < j < k - 1:
            X_tr = X[0:j * s, :]
            X_tr = vstack((X_tr, X[j * s + s:n, :]))
            y_tr = y[0:j * s, :]
            y_tr = vstack((y_tr, y[j * s + s:n, :]))
            id_tr = id[0:j * s, :]
            id_tr = vstack((id_tr, id[j * s + s:n, :]))
            X_val = X[j * s:j * s + s, :]
            id_val = id[j * s:j * s + s, :]
        else:  # last fold
            s1 = s + fmod(n, k)
            X_tr = X[0:n - s1, :]
            y_tr = y[0:n - s1, :]
            id_tr = id[0:n - s1, :]
            X_val = X[n - s1:n, :]
            id_val = id[n - s1:n, :]

        loc_tr = general.get_index_vec(id_tr[:, 0], id[:, 0])
        R_tr = R.take(loc_tr.astype(int), axis=0).take(loc_tr.astype(int),
                                                       axis=1)

        loc_val = general.get_index_vec(id_val[:, 0], id[:, 0])
        R_tr_val = R.take(loc_tr.astype(int), axis=0).take(loc_val.astype(int),
                                                           axis=1)

        ## GET self covariances for the test cases (needed in covMatrix)
        R_val = R.take(loc_val.astype(int), axis=0).take(loc_val.astype(int),
                                                         axis=1)
        d = diag(R_val).transpose()
        R_tr_val = vstack((R_tr_val, d))

        ## PREDICTION
        [MU_temp, S2_temp] = gpr.gp_pred(logtheta, covfunc, X_tr, y_tr, X_val,
                                         R_tr, w, R_tr_val)

        if j == 0:  # first fold
            MU[0:s, :] = MU_temp
            S2[0:s, :] = S2_temp
        elif 0 < j < k - 1:
            MU[j * s:j * s + s, :] = MU_temp
            S2[j * s:j * s + s, :] = S2_temp
        else:  # last fold
            MU[n - s1:n, :] = MU_temp
            S2[n - s1:n, :] = S2_temp

    ## VALIDATION for predictions on validation dataset
    ## DO NOT resacle predictions - rescaling is done in valid_fig if needed!!!
    M = valid_fig(y, MU, trafo=trafo)
    print w
    print M
    return hstack((M, logtheta))
Esempio n. 12
0
#print 'GP: ...training'
### INITIALIZE (hyper)parameters by -1
#d = X.shape[1]
#init = -1*ones((d,1))
#loghyper = array([[-1], [-1]])
#loghyper = vstack((init, loghyper))[:,0]
#print 'initial hyperparameters: ', exp(loghyper)
### TRAINING of (hyper)parameters
#logtheta = gpr.gp_train(loghyper, covfunc, X, y)
#print 'trained hyperparameters: ',exp(logtheta)

## to GET prior covariance of Standard GP use:
#[Kss, Kstar] = general.feval(covfunc, logtheta, X, Xstar)    # Kss = self covariances of test cases,
#                                                             # Kstar = cov between train and test cases

## PREDICTION
print 'GP: ...prediction'
results = gpr.gp_pred(logtheta, covfunc, X, y,
                      Xstar)  # get predictions for unlabeled data ONLY
MU = results[0]
S2 = results[1]

#print MU

## plot results
pyplot.suptitle('logtheta:', fontsize=12)
pyplot.title(logtheta)

pyplot.plot(Xstar, MU, 'g^', X, y, 'ro')
pyplot.show()