Пример #1
0
    def __gpr_precompute(self, x_gt, x_noisy, state_train):
        """
        Compute the alphas and hyper-params on the training-data.
    
        x_gt   : ground-truth positions (nx3) matrix.
        x_noisy: noisy positions (nx3) matrix.
        state_train : the states corresponding to x_noisy (nx6) matrix (the state is xyzrpy). 
        """
        N = x_gt.shape[0]
        assert x_noisy.shape[0]==N==state_train.shape[0]
    
        x_errors  = x_gt - x_noisy

        alphas      = []
        hparams     = np.empty((3,3))
        covfunc     = ['kernels.covSum', ['kernels.covSEiso','kernels.covNoise']]
        n_task_vars = 3

        for i_task_var in range(n_task_vars):
            print 'GP training variable : ', (i_task_var+1), '/', n_task_vars
            logtheta = np.array([-1,-1,-1])
            X        = state_train
            y        = x_errors[:, i_task_var]
            y        = np.reshape(y, (N,1))
            hparams[i_task_var,:] = gpr.gp_train(logtheta, covfunc, X, y)
            alphas.append(self.__precompute_alpha(hparams[i_task_var,:], covfunc, X, y))

        return [hparams, alphas]
Пример #2
0
def valid(loghyper,
          covfunc,
          X,
          id,
          y,
          X_val,
          y_val,
          R_tr,
          w,
          R_tr_val,
          R_val,
          trafo=None):
    '''Do validation on validation dataset 
       tuning weight parameter of SUM of 
       parameterized cov **function** AND cov **martix**. '''

    ## TRAINING the parameters
    logtheta = gpr.gp_train(loghyper, covfunc, X, y, R_tr, w)

    ## GET self covariances for the test cases (needed in covMatrix)
    d = diag(R_val).transpose()
    R_tr_val = vstack((R_tr_val, d))

    ## PREDICTION
    [MU, S2] = gpr.gp_pred(logtheta, covfunc, X, y, X_val, R_tr, w, R_tr_val)

    ## VALIDATION for predictions on validation dataset
    ## DO NOT resacle predictions - rescaling is done in valid_fig if needed!!!
    M = valid_fig(y_val, MU, trafo=trafo)
    print w
    print M
    return hstack((M, logtheta))
def gp_correct_poses_precompute(gt_poses_train, poses_train, state_train, train_hyper=True, hyper_seed=None, subsample=1):
    n_task_vars = pose_error.shape[1]
    alphas = []
    loghyper = hyper_seed
    
    if train_hyper:
        loghyper = []
    if hyper_seed == None:
        hyper_seed = [-1] * n_task_vars
    
    # sample X for hyperparam training
    n = state_train.shape[0]
    d = state_train.shape[1]

    ## data from a noisy GP
    X = state_train
    X_subsample = np.empty((0, d))
    y_subsample = np.empty((0, n_task_vars))
    for i in range(n):
        if i % subsample == 0:
            X_subsample = np.r_[X_subsample, state_train[i:(i+1),:]]
            y_subsample = np.r_[y_subsample, pose_error[i:(i+1),:]]
            
    ## DEFINE parameterized covariance funcrion
    covfunc = ['kernels.covSum', ['kernels.covSEiso','kernels.covNoise']]
            
    # sample y for y training
    for i_task_var in range(n_task_vars):
        ### sample observations from the GP
        y_task_var = y_subsample[:,i_task_var]
        y = pose_error[:,i_task_var]
        
        ## SET (hyper)parameters (HACK: don't do the rotation angles right now - it doesn't do anything because the measurements are so bad)
        if train_hyper and i_task_var < 3:
            ## LEARN the hyperparameters if none are provided
            print 'GP: ...training variable ', i_task_var
            ### INITIALIZE (hyper)parameters by -1
            d = X.shape[1]
                
            h = hyper_seed[i_task_var]
            init = h * np.ones((d,1))
            loghyper_var_i = np.array([[h], [h]])
            loghyper_var_i = np.vstack((init, loghyper_var_i))[:,0]
            print 'initial hyperparameters: ', np.exp(loghyper_var_i)
            ### TRAINING of (hyper)parameters
            loghyper_var_i = gpr.gp_train(loghyper_var_i, covfunc, X_subsample, y_task_var)
            print 'trained hyperparameters: ',np.exp(loghyper_var_i)
            loghyper.append(loghyper_var_i)
        else:
            h = hyper_seed[i_task_var]
            init = h * np.ones((1,d)) 
            loghyper.append(init)

        ## PREDICTION precomputation
       #IPython.embed()
        alphas.append(gp_pred_precompute_alpha(loghyper[i_task_var], covfunc, X, y))
    return alphas, loghyper
Пример #4
0
def gp_correct_poses(gt_poses_train,
                     poses_train,
                     state_train,
                     poses_test,
                     state_test,
                     logtheta=None):
    print "using gp poses"
    pose_error = calc_pose_error(gt_poses_train, poses_train)

    n_test = len(poses_test)
    n_task_vars = 6

    MU = np.empty((n_test, n_task_vars))
    S2 = np.empty((n_test, n_task_vars))

    for i_task_var in range(n_task_vars):
        ## data from a noisy GP
        X = state_train

        ## DEFINE parameterized covariance funcrion
        covfunc = ['kernels.covSum', ['kernels.covSEiso', 'kernels.covNoise']]

        #print 'hyperparameters: ', np.exp(logtheta)

        ### sample observations from the GP
        y = np.array([pose_error[:, i_task_var]]).reshape((-1, 1))

        ### TEST POINTS
        Xstar = state_test

        ## LEARN hyperparameters if not provided
        if logtheta == None:
            # ***UNCOMMENT THE FOLLOWING LINES TO DO TRAINING OF HYPERPARAMETERS***
            ### TRAINING GP
            print
            print 'GP: ...training'
            ### INITIALIZE (hyper)parameters by -1
            d = X.shape[1]
            init = -1.0 * np.ones((d, 1))
            loghyper = np.array([[-1.0], [-1.0]])
            loghyper = np.vstack((init, loghyper))[:, 0]
            print 'initial hyperparameters: ', np.exp(loghyper)
            ### TRAINING of (hyper)parameters
            logtheta = gpr.gp_train(loghyper, covfunc, X, y)
            print 'trained hyperparameters: ', np.exp(logtheta)

        ## PREDICTION
        print 'GP: ...prediction'
        results = gpr.gp_pred(logtheta, covfunc, X, y,
                              Xstar)  # get predictions for unlabeled data ONLY
        MU[:, i_task_var] = results[0][:, 0]
        S2[:, i_task_var:i_task_var + 1] = results[1]

    est_gt_poses_test = apply_pose_error(poses_test, MU)
    return est_gt_poses_test
def gp_correct_poses(gt_poses_train, poses_train, state_train, poses_test, state_test, logtheta=None):
    print "using gp poses"
    pose_error = calc_pose_error(gt_poses_train, poses_train)

    n_test = len(poses_test)
    n_task_vars = 6

    MU = np.empty((n_test, n_task_vars))
    S2 = np.empty((n_test, n_task_vars))

    for i_task_var in range(n_task_vars):
        ## data from a noisy GP
        X = state_train

        ## DEFINE parameterized covariance funcrion
        covfunc = ['kernels.covSum', ['kernels.covSEiso','kernels.covNoise']]

        #print 'hyperparameters: ', np.exp(logtheta)

        ### sample observations from the GP
        y = np.array([pose_error[:,i_task_var]]).reshape((-1,1))

        ### TEST POINTS
        Xstar = state_test

        ## LEARN hyperparameters if not provided
        if logtheta == None:
            # ***UNCOMMENT THE FOLLOWING LINES TO DO TRAINING OF HYPERPARAMETERS***
            ### TRAINING GP
            print
            print 'GP: ...training'
            ### INITIALIZE (hyper)parameters by -1
            d = X.shape[1]
            init = -1.0*np.ones((d,1))
            loghyper = np.array([[-1.0], [-1.0]])
            loghyper = np.vstack((init, loghyper))[:,0]
            print 'initial hyperparameters: ', np.exp(loghyper)
            ### TRAINING of (hyper)parameters
            logtheta = gpr.gp_train(loghyper, covfunc, X, y)
            print 'trained hyperparameters: ',np.exp(logtheta)

        ## PREDICTION 
        print 'GP: ...prediction'
        results = gpr.gp_pred(logtheta, covfunc, X, y, Xstar) # get predictions for unlabeled data ONLY
        MU[:,i_task_var] = results[0][:,0]
        S2[:,i_task_var:i_task_var+1] = results[1]

    est_gt_poses_test = apply_pose_error(poses_test, MU)
    return est_gt_poses_test
def train_hyperparams(gt_poses_train, poses_train, state_train):
		"""
		Returns the tuned hyper-params for 6 GPs.
		Hence, returns an 3x6 matrix (3 params per DOF).

		gt_poses_train, poses_train, state_train are as in the
		function gp_correct_poses.
		"""
		covfunc = ['kernels.covSum', ['kernels.covSEiso','kernels.covNoise']]
		pose_error  = calc_pose_error(gt_poses_train, poses_train)
		hparams	 = np.empty((6,3))
		n_task_vars = 6

		for i_task_var in range(n_task_vars):
				print i_task_var
				X = state_train
				logtheta = np.array([-1,-1,-1])
				y = pose_error[:,i_task_var]
				hparams[i_task_var,:] = gpr.gp_train(logtheta, covfunc, X, np.reshape(y, (y.shape[0], 1)))
		print 'Tuned hyper-params: ', hparams
		return hparams
Пример #7
0
def valid(loghyper, covfunc, X, id, y, X_val, y_val, R_tr, w, R_tr_val, R_val, trafo=None):
    '''Do validation on validation dataset 
       tuning weight parameter of SUM of 
       parameterized cov **function** AND cov **martix**. '''
    
    ## TRAINING the parameters
    logtheta = gpr.gp_train(loghyper, covfunc, X, y, R_tr, w)

    ## GET self covariances for the test cases (needed in covMatrix)
    d = diag(R_val).transpose()   
    R_tr_val = vstack((R_tr_val, d))
    
    ## PREDICTION 
    [MU, S2] = gpr.gp_pred(logtheta, covfunc, X, y, X_val, R_tr, w, R_tr_val)
        
            
    ## VALIDATION for predictions on validation dataset
    ## DO NOT resacle predictions - rescaling is done in valid_fig if needed!!!
    M = valid_fig(y_val, MU, trafo=trafo)
    print w
    print M
    return hstack((M, logtheta))
Пример #8
0
def k_fold_cv(k, loghyper, covfunc, X, id, y, R, w, trafo=None):
    '''Do k-fold cross validation for 
       tuning weight parameter of SUM of 
       parameterized cov **function** AND cov **martix**. '''
    
    ## TRAINING the parameters
    logtheta = gpr.gp_train(loghyper, covfunc, X, y, R, w)
    #print exp(logtheta)
        
    MU = zeros((y.shape))
    S2 = zeros((y.shape))
    
    n = id.shape[0]
    s = floor(n/k)
    for j in range(0,k):
        # prepare X, y and R according to fold
        if j==0:    # first fold
            X_tr     = X[s:n,:]
            y_tr     = y[s:n,:]
            id_tr    = id[s:n,:]              
            X_val    = X[0:s,:]
            id_val   = id[0:s,:]
        elif 0<j<k-1:
            X_tr     = X[0:j*s,:]
            X_tr     = vstack((X_tr,X[j*s+s:n,:]))
            y_tr     = y[0:j*s,:]
            y_tr     = vstack((y_tr,y[j*s+s:n,:]))
            id_tr    = id[0:j*s,:]
            id_tr    = vstack((id_tr,id[j*s+s:n,:])) 
            X_val    = X[j*s:j*s+s,:]
            id_val   = id[j*s:j*s+s,:]         
        else:   # last fold
            s1 = s+fmod(n,k)
            X_tr     = X[0:n-s1,:]
            y_tr     = y[0:n-s1,:]
            id_tr    = id[0:n-s1,:]   
            X_val    = X[n-s1:n,:]
            id_val   = id[n-s1:n,:]
        

        loc_tr = general.get_index_vec(id_tr[:,0], id[:,0])
        R_tr = R.take(loc_tr.astype(int),axis=0).take(loc_tr.astype(int),axis=1)
        
        loc_val = general.get_index_vec(id_val[:,0], id[:,0])
        R_tr_val = R.take(loc_tr.astype(int),axis=0).take(loc_val.astype(int),axis=1)

        ## GET self covariances for the test cases (needed in covMatrix)
        R_val = R.take(loc_val.astype(int),axis=0).take(loc_val.astype(int),axis=1)
        d = diag(R_val).transpose()   
        R_tr_val = vstack((R_tr_val, d))
        
        ## PREDICTION 
        [MU_temp, S2_temp] = gpr.gp_pred(logtheta, covfunc, X_tr, y_tr, X_val, R_tr, w, R_tr_val)
        
        if j==0:  # first fold
            MU[0:s,:] = MU_temp
            S2[0:s,:] = S2_temp
        elif 0<j<k-1:
            MU[j*s:j*s+s,:] = MU_temp
            S2[j*s:j*s+s,:] = S2_temp      
        else:     # last fold
            MU[n-s1:n,:] = MU_temp
            S2[n-s1:n,:] = S2_temp 
            
    ## VALIDATION for predictions on validation dataset
    ## DO NOT resacle predictions - rescaling is done in valid_fig if needed!!!
    M = valid_fig(y, MU, trafo=trafo)
    print w
    print M
    return hstack((M, logtheta))
Пример #9
0
def gp_correct_poses_precompute(gt_poses_train,
                                poses_train,
                                state_train,
                                train_hyper=True,
                                hyper_seed=None,
                                subsample=1):
    n_task_vars = pose_error.shape[1]
    alphas = []
    loghyper = hyper_seed

    if train_hyper:
        loghyper = []
    if hyper_seed == None:
        hyper_seed = [-1] * n_task_vars

    # sample X for hyperparam training
    n = state_train.shape[0]
    d = state_train.shape[1]

    ## data from a noisy GP
    X = state_train
    X_subsample = np.empty((0, d))
    y_subsample = np.empty((0, n_task_vars))
    for i in range(n):
        if i % subsample == 0:
            X_subsample = np.r_[X_subsample, state_train[i:(i + 1), :]]
            y_subsample = np.r_[y_subsample, pose_error[i:(i + 1), :]]

    ## DEFINE parameterized covariance funcrion
    covfunc = ['kernels.covSum', ['kernels.covSEiso', 'kernels.covNoise']]

    # sample y for y training
    for i_task_var in range(n_task_vars):
        ### sample observations from the GP
        y_task_var = y_subsample[:, i_task_var]
        y = pose_error[:, i_task_var]

        ## SET (hyper)parameters (HACK: don't do the rotation angles right now - it doesn't do anything because the measurements are so bad)
        if train_hyper and i_task_var < 3:
            ## LEARN the hyperparameters if none are provided
            print 'GP: ...training variable ', i_task_var
            ### INITIALIZE (hyper)parameters by -1
            d = X.shape[1]

            h = hyper_seed[i_task_var]
            init = h * np.ones((d, 1))
            loghyper_var_i = np.array([[h], [h]])
            loghyper_var_i = np.vstack((init, loghyper_var_i))[:, 0]
            print 'initial hyperparameters: ', np.exp(loghyper_var_i)
            ### TRAINING of (hyper)parameters
            loghyper_var_i = gpr.gp_train(loghyper_var_i, covfunc, X_subsample,
                                          y_task_var)
            print 'trained hyperparameters: ', np.exp(loghyper_var_i)
            loghyper.append(loghyper_var_i)
        else:
            h = hyper_seed[i_task_var]
            init = h * np.ones((1, d))
            loghyper.append(init)

        ## PREDICTION precomputation
    #IPython.embed()
        alphas.append(
            gp_pred_precompute_alpha(loghyper[i_task_var], covfunc, X, y))
    return alphas, loghyper
Пример #10
0
# ***UNCOMMENT THE FOLLOWING LINES TO DO TRAINING OF HYPERPARAMETERS***
### TRAINING GP
#print 'GP: ...training'
### INITIALIZE (hyper)parameters by -1
#d = X.shape[1]
#init = -1*ones((d,1))
#loghyper = array([[-1], [-1]])
#loghyper = vstack((init, loghyper))[:,0]
#print 'initial hyperparameters: ', exp(loghyper)
### TRAINING of (hyper)parameters
y=array([x*x for x in X])
outputScaler = preprocessing.Scaler(with_std=False).fit(y)
scaledy = outputScaler.transform(y)   

logtheta = gpr.gp_train(loghyper, covfunc, X, scaledy)
#print 'trained hyperparameters: ',exp(logtheta)


## to GET prior covariance of Standard GP use:
#[Kss, Kstar] = general.feval(covfunc, logtheta, X, Xstar)    # Kss = self covariances of test cases, 
#                                                             # Kstar = cov between train and test cases

print "loghyper :",loghyper

## PREDICTION 
print 'GP: ...prediction'
results = gpr.gp_pred(logtheta, covfunc, X, scaledy, Xstar) # get predictions for unlabeled data ONLY

MU = outputScaler.inverse_transform(results[0])
S2 = results[1]
Пример #11
0
def k_fold_cv(k, loghyper, covfunc, X, id, y, R, w, trafo=None):
    '''Do k-fold cross validation for 
       tuning weight parameter of SUM of 
       parameterized cov **function** AND cov **martix**. '''

    ## TRAINING the parameters
    logtheta = gpr.gp_train(loghyper, covfunc, X, y, R, w)
    #print exp(logtheta)

    MU = zeros((y.shape))
    S2 = zeros((y.shape))

    n = id.shape[0]
    s = floor(n / k)
    for j in range(0, k):
        # prepare X, y and R according to fold
        if j == 0:  # first fold
            X_tr = X[s:n, :]
            y_tr = y[s:n, :]
            id_tr = id[s:n, :]
            X_val = X[0:s, :]
            id_val = id[0:s, :]
        elif 0 < j < k - 1:
            X_tr = X[0:j * s, :]
            X_tr = vstack((X_tr, X[j * s + s:n, :]))
            y_tr = y[0:j * s, :]
            y_tr = vstack((y_tr, y[j * s + s:n, :]))
            id_tr = id[0:j * s, :]
            id_tr = vstack((id_tr, id[j * s + s:n, :]))
            X_val = X[j * s:j * s + s, :]
            id_val = id[j * s:j * s + s, :]
        else:  # last fold
            s1 = s + fmod(n, k)
            X_tr = X[0:n - s1, :]
            y_tr = y[0:n - s1, :]
            id_tr = id[0:n - s1, :]
            X_val = X[n - s1:n, :]
            id_val = id[n - s1:n, :]

        loc_tr = general.get_index_vec(id_tr[:, 0], id[:, 0])
        R_tr = R.take(loc_tr.astype(int), axis=0).take(loc_tr.astype(int),
                                                       axis=1)

        loc_val = general.get_index_vec(id_val[:, 0], id[:, 0])
        R_tr_val = R.take(loc_tr.astype(int), axis=0).take(loc_val.astype(int),
                                                           axis=1)

        ## GET self covariances for the test cases (needed in covMatrix)
        R_val = R.take(loc_val.astype(int), axis=0).take(loc_val.astype(int),
                                                         axis=1)
        d = diag(R_val).transpose()
        R_tr_val = vstack((R_tr_val, d))

        ## PREDICTION
        [MU_temp, S2_temp] = gpr.gp_pred(logtheta, covfunc, X_tr, y_tr, X_val,
                                         R_tr, w, R_tr_val)

        if j == 0:  # first fold
            MU[0:s, :] = MU_temp
            S2[0:s, :] = S2_temp
        elif 0 < j < k - 1:
            MU[j * s:j * s + s, :] = MU_temp
            S2[j * s:j * s + s, :] = S2_temp
        else:  # last fold
            MU[n - s1:n, :] = MU_temp
            S2[n - s1:n, :] = S2_temp

    ## VALIDATION for predictions on validation dataset
    ## DO NOT resacle predictions - rescaling is done in valid_fig if needed!!!
    M = valid_fig(y, MU, trafo=trafo)
    print w
    print M
    return hstack((M, logtheta))
Пример #12
0
### TRAINING GP
#print 'GP: ...training'
### INITIALIZE (hyper)parameters by -1
d = X.shape[1]
init = -1*ones((d,1))
loghyper = array([[-1], [-1]])
loghyper = vstack((init, loghyper))[:,0]
print 'LOG-HYPER: ',loghyper
print 'INIT : ',init
print 'initial hyperparameters: ', exp(loghyper)
## TRAINING of (hyper)parameters
print loghyper.shape
print X.shape
print y.shape

logtheta = gpr.gp_train(loghyper, covfunc, X, y)
print 'trained shape: ', logtheta.shape

print 'trained hyperparameters: ',exp(logtheta)


## to GET prior covariance of Standard GP use:
#[Kss, Kstar] = general.feval(covfunc, logtheta, X, Xstar)    # Kss = self covariances of test cases, 
#                                                             # Kstar = cov between train and test cases


## PREDICTION 
print 'GP: ...prediction'
results = gpr.gp_pred(logtheta, covfunc, X, y, Xstar) # get predictions for unlabeled data ONLY
MU = results[0]
S2 = results[1]