예제 #1
1
 def state_integrate(self):
   rate = rospy.Rate(100)
   while not rospy.is_shutdown():
     rate.sleep()
     #For integration model
     Q_k = np.eye(9)*.2
     w = np.random.multivariate_normal(np.zeros([1,9])[0], Q_k)
     #For sensor measurement
     R_k = np.eye(3)*.01
     v = np.random.multivariate_normal(np.zeros([1,3])[0], R_k)
     #Predict
     s_k =  np.transpose(np.dot(self.A, self.s))
     P_k = np.add(np.dot(np.dot(self.A, self.P), np.transpose(self.A)), Q_k)
     #Update
     if self.update_unfilt:
       y_k = np.subtract(np.transpose(np.add(self.z, v)), np.dot(self.H, s_k))
       S_K = np.dot(np.dot(self.H, P_k), np.transpose(self.H))
       K = np.dot(P_k, np.dot(np.transpose(self.H), np.pinv(S_K)))
       self.s = np.add(s_k, np.dot(K, y_k))
       self.P = np.dot(np.subtract(np.eye(9), np.dot(K, self.H)), P_k)
       #This could result in a terrible race condition. But will need to test
       self.update_filt = False
     else:
       self.s = s_k
       self.P = P_k
예제 #2
0
 def denoise_image(self, regressors):
     """
     correct the image data: slice-wise
     FIXME: NOT TESTED
     """
     PCT_VAR_REDUCED = zeros(npix_x,npix_y,nslc)
     nslc = d.shape[2]
     self.nframes = d.shape[3]
     npix_x = d.shape[0]
     npix_y = d.shape[1]
     d_corrected = np.zeros(d.shape)
     for jj in range(nslc):
         slice_data = np.squeeze(d[:,:,jj,:])
         Y_slice = slice_data.reshape((npix_x*npix_y, self.nframes)).transpose() #ntime x nvox
         t = np.arange(self.nframes).transpose()
         # design matrix
         XX = np.array((t, t**2., REGRESSORS[:,:,jj]))
         XX = np.concatenate((np.ones((XX.shape[0],1)), np.zscore(XX)))
         Betas = np.pinv(XX) * Y_slice
         Y_slice_corr = Y_slice - XX[:,3:-1] * Betas[3:-1,:] # keep
         # calculate percent variance reduction
         var_reduced = (np.var(Y_slice,0,1) - np.var(Y_slice_corr,0,1)) / np.var(Y_slice,0,1)
         PCT_VAR_REDUCED[:,:,jj] = var_reduced.transpose().reshape((npix_x, npix_y))
         # fill corrected volume
         V_slice_corr = Y_slice_corr.transpose()
         for ii in range(self.nframes):
             d_corrected[:,:,jj,ii] = V_slice_corr[:,ii].reshape((npix_x,npix_y))
     return d_corrected, PCT_VAR_REDUCED
예제 #3
0
파일: nimsphysio.py 프로젝트: while321/nims
 def denoise_image(self, regressors):
     """
     correct the image data: slice-wise
     FIXME: NOT TESTED
     """
     PCT_VAR_REDUCED = zeros(npix_x,npix_y,nslc)
     nslc = d.shape[2]
     self.nframes = d.shape[3]
     npix_x = d.shape[0]
     npix_y = d.shape[1]
     d_corrected = np.zeros(d.shape)
     for jj in range(nslc):
         slice_data = np.squeeze(d[:,:,jj,:])
         Y_slice = slice_data.reshape((npix_x*npix_y, self.nframes)).transpose() #ntime x nvox
         t = np.arange(self.nframes).transpose()
         # design matrix
         XX = np.array((t, t**2., REGRESSORS[:,:,jj]))
         XX = np.concatenate((np.ones((XX.shape[0],1)), np.zscore(XX)))
         Betas = np.pinv(XX) * Y_slice
         Y_slice_corr = Y_slice - XX[:,3:-1] * Betas[3:-1,:] # keep
         # calculate percent variance reduction
         var_reduced = (np.var(Y_slice,0,1) - np.var(Y_slice_corr,0,1)) / np.var(Y_slice,0,1)
         PCT_VAR_REDUCED[:,:,jj] = var_reduced.transpose().reshape((npix_x, npix_y))
         # fill corrected volume
         V_slice_corr = Y_slice_corr.transpose()
         for ii in range(self.nframes):
             d_corrected[:,:,jj,ii] = V_slice_corr[:,ii].reshape((npix_x,npix_y))
     return d_corrected, PCT_VAR_REDUCED
예제 #4
0
def GMM(PCA, NCLASS=2):
    """
    PDF = Pr(G) (2pi)^(k/2)|S|^(-1/2)exp[-1/2 (x-mu)' S^(-1) (x-mu)]
    logPDF = logPr(G) k/2 log(2pi)-1/2log(|S|)-1/2(x-mu)'S^(-1)(x-mu)
    Pr is inverse monotonic with logPr(G)-log(|S|)-(x-mu)'S^(-1)(x-mu)
    """
    N = PCA.shape()[1]
    initsize = N / NCLASS
    classes = np.zeros((N,))
    oldclasses = np.zeros((N,))
    Pr = np.zeros((N, NCLASS))
    partition = (2 * pi) ** (-0.5 * NCLASS)
    for i in range(NCLASS):
        classes[i * initsize : (i + 1) * initsize] = i
    for ii in range(2000):
        for i in range(NCLASS):
            c = PCA[:, classes == i]
            Mu = np.mean(c, 1)
            Cm = np.cov((c.T - Mu).T)
            k = np.shape(c)[1]
            Pm = np.pinv(Cm)
            center = PCA.T - Mu
            normalize = partition * k / (N + 1.0) / np.sqrt(np.det(Cm))
            Pr[:, i] = np.exp(-0.5 * np.array([np.dot(x, np.dot(Pm, x.T)) for x in center])) * normalize
        oldclasses[:] = classes
        classes = argmax(Pr, 1)
        if all(oldclasses == classes):
            break
    classification = (Pr.T / sum(Pr, 1)).T
    return classification, Pr
예제 #5
0
    def local_IK(self):
        #Find the change in joint angles that moves the EE in delta x direction
        J = self.kin.jacobian06(self.kin.jangles)
        J_t = np.pinv(J)
        delta_q = np.dot(J_t, self.delta_x) * .001

        #Command the new joint angles
        q = self.kin.jangles + delta_q
    def local_IK(self):
        #Find the change in joint angles that moves the EE in delta x direction
        J = self.kin.jacobian06(self.kin.jangles)
        J_t = np.pinv(J)
        delta_q = np.dot(J_t,self.delta_x)*.001

        return q

        #Command the new joint angles
        q = self.kin.jangles+delta_q
예제 #7
0
def ADMM_l2Axnb_l1x_1(A, b, Nite, step, l1_r, rho):
    z = np.pinv(A).dot(b)
    u = np.zeros(z.shape)
    # iteration
    for _ in range(Nite):
        # soft threshol
        x = pf.prox_l2_Axnb(A, b, z - u, rho)
        z = pf.prox_l1_soft_thresh(x + u, l1_r / rho)
        u = u + step * (x - z)
        print np.linalg.norm(x - z)
    return x
예제 #8
0
    def combine_fuse(self, other):
        result_covar_mat_inv = self.covar_mat_inv + other.covar_mat_inv
        result_covar_mat = np.pinv(result_covar_mat_inv)

        own_mean_weight = np.matmul(self.covar_mat_inv, self.mean)
        other_mean_weight = np.matmul(other.covar_mat_inv, other.mean)
        total_mean_weight = own_mean_weight + other_mean_weight

        result_mean = np.matmul(result_covar_mat, total_mean_weight)

        return Schmear(self.space, result_mean, result_covar_mat)
예제 #9
0
def estimate_it(X, y, Yprev, qbetas):
    """ The function that ACTUALLY estimates beta's -- the rest is all upkeep & maintenance
    """

    inv = np.pinv(np.dot(X.T, X))

    betas = np.dot(inv, X.T, y)

    diff = Yprev - np.dot(X, betas)

    qbetas.put((betas, diff))
예제 #10
0
 def log_like_match_hess(self, pr, pu, ps, mov=0):
     #no! I need some fancier data structures here!
     pr1 = self.vec2squaremat(pr)
     pr2 = pr1.T
     pu1 = self.vec2squaremat(pu)
     pu2 = pu1.T
     ps1 = self.vec2squaremat(ps)
     ps2 = ps1.T
     mov1 = self.vec2squaremat(mov)
     mov2 = mov1.T
     F = self.dd_log_Pp(ps1 + ps2) + self.dd_log_Pg(pr1, pr2, mov1)
     return np.diagonal(np.pinv(-F))
예제 #11
0
def ADMM_l2Axnb_l1x_2(A, b, Nite, step, l1_r, rho):
    z = np.pinv(A).dot(b)
    u = np.zeros(z.shape)
    Q_dot, A_T_b = prox_l2_Axnb_precomputpart(A, b, rho)
    # iteration
    for _ in range(Nite):
        # soft threshold
        x = Q_dot(A_T_b + rho *
                  (z - u))  #prox_l2_Axnb_iterpart( Q_dot, A_T_b, z-u, rho )
        z = pf.prox_l1_soft_thresh(x + u, l1_r / rho)
        u = u + step * (x - z)
        print np.linalg.norm(x - z)
    return x
예제 #12
0
def find_solutions(model,method):

    target_feature = np.loadtxt("target_feature.dat")
    target_feature_err = np.loadtxt("target_feature_err.dat")
    sim_feature = np.loadtxt("sim_feature.dat")
    sim_feature_err = np.loadtxt("sim_feature_err.dat")
    Jacobian = np.loadtxt("Jacobian.dat")
    Jacobian_err = np.loadtxt("Jacobian_err.dat")
    J = Jacobian

    epsilons = model.contact_epsilons
    norm_eps = np.linalg.norm(epsilons)

    ## Normalize the target step. MAYBE NOT NECESSARY
    df = target_feature - sim_feature
    df /= np.linalg.norm(df)

    u,s,v = np.linalg.svd(J)
    temp  = list(0.5*(s[:-1] + s[1:])) + [0.0]
    temp.reverse()
    cutoffs = np.array(temp)

    nrm_soln = []
    nrm_resd = []
    condition_number = []
    solutions = []
    Taus = []
    for i in range(len(cutoffs)):
        S = np.zeros(J.shape) 
        tau = cutoffs[i] 
        s_use = s[s > tau]
        S[np.arange(len(s_use)),np.arange(len(s_use))] = 1./s_use
        x_soln = np.dot(v.T,np.dot(S.T,np.dot(u.T,df)))

        residual = np.dot(J,x_soln) - df

        nrm_soln.append(np.linalg.norm(x_soln))
        nrm_resd.append(np.linalg.norm(residual))
        solutions.append(x_soln)
        Taus.append(tau)

        J_use = np.dot(v.T,np.dot(S.T,u.T))
        J_inv_use = np.pinv(J_use)
        cond_num = np.linalg.norm(J_use)*np.linalg.norm(J_inv_use)
        condition_number.append(cond_num)

    save_solution_data(solutions,Taus,nrm_soln,nrm_resd,norm_eps,condition_number,s)
예제 #13
0
def NIPALS(X, adj_matrix, mode, scheme, not_normed = [], prox_op = prox_op.ProxOp(),
           max_iter = MAX_ITER, tolerance = TOLERANCE, **kwargs):
    """Inner loop of the NIPALS algorithm.

    Performs the NIPALS algorithm on the supplied tuple or list of numpy
    arrays in X. This method applies for 1, 2 or more blocks.

    One block would result in e.g. PCA; two blocks would result in e.g.
    SVD or CCA; and multiblock (n > 2) would be for instance GCCA,
    PLS-PM or MAXDIFF.

    This function uses Wold's procedure (based on Gauss-Siedel iteration)
    for fast convergence.

    Parameters
    ----------
    X          : A tuple or list with n numpy arrays of shape [M, N_i],
                 i=1,...,n. These are the training set.

    adj_matrix          : Adjacency matrix that is a numpy array of shape [n, n].
                 If an element in position adj_matrix[i,j] is 1, then block i and
                 j are connected, and 0 otherwise. If adj_matrix is None, then all
                 matrices are assumed to be connected.

    mode       : A tuple or list with n elements with the mode to use for
                 a matrix. The mode is represented by a string: "A", "B"
                 or "NewA". If mode = None, then "NewA" is used.

    scheme     : The inner weighting scheme to use in the algorithm. The
                 scheme may be "Horst", "Centroid" or "Factorial". If
                 scheme = None, then Horst's schme is used (the inner
                 weighting scheme is the identity).

    max_iter   : The number of iteration before the algorithm is forced
                 to stop. The default number of iterations is 500.

    tolerance  : The level below which we treat numbers as zero. This is
                 used as stop criterion in the algorithm. Smaller value
                 will give more acurate results, but will take longer
                 time to compute. The default tolerance is 5E-07.

    not_normed : In some algorithms, e.g. PLS regression, the weights or
                 loadings of some matrices (e.g. Y) are not normalised.
                 This tuple or list contains the indices in X of those
                 matrices that should not be normalised. Thus, for PLS
                 regression, this argument would be not_normed = (2,). If
                 not_normed = None, then all matrices are subject to
                 normalisation of either weights or scores depending on
                 the modes used.

    Returns
    -------
    W          : A list with n numpy arrays of weights of shape [N_i, 1].
    """

    n = len(X)

    mode           = make_list(mode, n, NEWA)
    scheme         = make_list(scheme, n, HORST)
#    soft_threshold = make_list(soft_threshold, n, 0)

    W = []
    for Xi in X:
        w = _start_vector(Xi, largest = True)
        W.append(w)

    # Main NIPALS loop
    iterations = 0
    while True:
        converged = True
        for i in range(n):
            Xi = X[i]
            ti = dot(Xi, W[i])
            ui = np.zeros(ti.shape)
            for j in range(n):
                Xj = X[j]
                wj = W[j]
                tj = dot(Xj, wj)

                # Determine weighting scheme and compute weight
                if scheme[i] == HORST:
                    eij = 1
                elif scheme[i] == CENTROID:
                    eij = sign(corr(ti, tj))
                elif scheme[i] == FACTORIAL:
                    eij = corr(ti, tj)

                # Internal estimation using connected matrices' score vectors
                if adj_matrix[i,j] != 0 or adj_matrix[j,i] != 0:
                    ui += eij*tj

            # External estimation
            if mode[i] == NEWA or mode[i] == A:
                # TODO: Ok with division here?
                wi = dot(Xi.T, ui) / dot(ui.T, ui)
            elif mode[i] == B:
                wi = dot(np.pinv(Xi), ui) # TODO: Precompute to speed up!

            # Apply proximal operator
#            if soft_threshold[i] > 0:
            wi  = prox_op.prox(wi, i)
#                wi = _soft_threshold(wi, soft_threshold[i], copy = False)

            # Normalise weight vectors according to their weighting scheme
            if mode[i] == NEWA and not i in not_normed:
                # Normalise weight vector wi to unit variance
                wi /= norm(wi)
            elif (mode[i] == A or mode[i] == B) and not i in not_normed:
                # Normalise score vector ti to unit variance
                wi /= norm(dot(Xi, wi))
                wi *= np.sqrt(wi.shape[0])

            # Check convergence for each weight vector. They all have to leave
            # converged = True in order for the algorithm to stop.
            diff = wi - W[i]
            if dot(diff.T, diff) > tolerance:
                converged = False

            # Save updated weight vector
            W[i] = wi

        if converged:
            break

        if iterations >= max_iter:
            warnings.warn('Maximum number of iterations reached '
                          'before convergence')
            break

        iterations += 1

    return W
예제 #14
0
import rospy
import numpy as npy 

number_objects = 5
number_actions = 4
number_scenes = 5

# object_list = npy.zeros((number_objects,1))
action_list = npy.zeros((number_actions,1))
search_objs = npy.zeros((number_objects,1))
search_order = npy.zeros((number_objects,1))

scene_obj_matrix = npy.zeros(number_scenes,number_objects)
scene_act_matrix = npy.zeros(number_scenes,number_actions)
obj_aff_matrix = npy.zeros((number_actions,number_objects))

for k in range(0,number_scenes):
	for i in range(0,number_objects)
		if (in_scene_obj(k,i))
			scene_obj_matrix[k][i] = 1
	for j in range(0,number_actions)
		if (in_scene_act(k,j))
			scene_act_matrix[k][j] = 1

for j in range(0,number_actions)
	obj_aff_matrix = npy.dot(npy.pinv(scene_obj_matrix),scene_act_matrix[:][j])
	obj_aff_matrix[j] = obj_aff_matrix[j]/sum(obj_aff_matrix[j])

print("Object Affordance Matrix:")
print(obj_aff_matrix)
예제 #15
0
 def __init__(self, space, mean, covariance_mat):
     self.space = space
     self.mean = mean
     self.covar_mat = covariance_mat
     self.covar_mat_inv = np.pinv(covariance_mat)