def getOrientationStep(self, q_h_orient, q_g_orient, max_ang_step): ang = ut.quat_angle(q_h_orient, q_g_orient) ang_mag = abs(ang) step_fraction = 0.001 if step_fraction * ang_mag > max_ang_step: # this is pretty much always true, can clean up the code. step_fraction = max_ang_step / ang_mag interp_q_goal = tr.tft.quaternion_slerp(q_h_orient, q_g_orient, step_fraction) return interp_q_goal
def goalOrientationInQuat(q_h_orient, q_g_orient, max_ang_step): ang = ut.quat_angle(q_h_orient, q_g_orient) ang_mag = abs(ang) step_fraction = 0.001 if step_fraction * ang_mag > max_ang_step: # this is pretty much always true, can clean up the code. step_fraction = max_ang_step / ang_mag interp_q_goal = tr.tft.quaternion_slerp(q_h_orient, q_g_orient, step_fraction) delta_q_des = tr.tft.quaternion_multiply(interp_q_goal, tr.tft.quaternion_inverse(q_h_orient)) return np.matrix(delta_q_des[0:3]).T
def goalOrientationInQuat(q_h_orient, q_g_orient, max_ang_step): ang = ut.quat_angle(q_h_orient, q_g_orient) ang_mag = abs(ang) step_fraction = 0.001 if step_fraction * ang_mag > max_ang_step: # this is pretty much always true, can clean up the code. step_fraction = max_ang_step / ang_mag interp_q_goal = tr.tft.quaternion_slerp(q_h_orient, q_g_orient, step_fraction) delta_q_des = tr.tft.quaternion_multiply( interp_q_goal, tr.tft.quaternion_inverse(q_h_orient)) return np.matrix(delta_q_des[0:3]).T
def goalOrientationInQuat(self, q_h_orient, q_g_orient, ang_dist_g): # If the goal position is invalid, return the current end effector position if not q_g_orient: q_g_orient = q_h_orient ang = ut.quat_angle(q_h_orient, q_g_orient) ang_mag = abs(ang) if ang_mag < 1e-6: return np.matrix([0.,0.,0.]).T if ang_mag > self.orientation_step_scaling_radius: slerp_fraction = ang_dist_g / ang_mag #move the fraction equal to the max ang vel per step else: slerp_fraction = ang_dist_g / self.orientation_step_scaling_radius interp_q_goal = tr.tft.quaternion_slerp(q_h_orient, q_g_orient, slerp_fraction) delta_q_des = tr.tft.quaternion_multiply(interp_q_goal, tr.tft.quaternion_inverse(q_h_orient)) return np.matrix(delta_q_des[0:3]).T
def deltaQpJepGen(self, mpc_dat): # If we have invalid control data, do nothing if mpc_dat == None: return None # If we're within the deadzone band, do nothing dist_to_goal = np.linalg.norm(mpc_dat.x_h - mpc_dat.x_g) ang_to_goal = ut.quat_angle(mpc_dat.q_h_orient, mpc_dat.q_g_orient) dist_g = mpc_dat.dist_g # The computed step based on the controller frequency and desired cartesian velocity. ang_dist_g = mpc_dat.ang_dist_g # Deadzone on pose motion in_deadzone = False # If position and orientation, most common scenario if (mpc_dat.orient_weight > 0.0 and mpc_dat.position_weight > 0.0): if abs(dist_to_goal) < self.deadzone_distance and abs(ang_to_goal) < self.deadzone_angle: in_deadzone = True elif mpc_dat.position_weight > 0.0: # Position only if abs(dist_to_goal) < self.deadzone_distance: in_deadzone = True elif mpc_dat.orient_weight > 0.0: # Orientation only if abs(ang_to_goal) < self.deadzone_angle: in_deadzone = True # Don't command any pose motion if in_deadzone: dist_g = 0.0 ang_dist_g = 0.0 if self.currently_in_deadzone == False: if self.verbose: rospy.loginfo("MPC entered deadzone: pos %s (%s); orient %s (%s)" % (str(dist_to_goal), str(self.deadzone_distance), str(np.degrees(ang_to_goal)), str(np.degrees(self.deadzone_angle)) )) self.currently_in_deadzone = True # If we're in the deadzone and have no forces, return zeroes. if len(mpc_dat.loc_l) == 0: return [0.0] * len(self.joint_angles) else: self.currently_in_deadzone = False # Generate the position/orientation deltas. These are slewed to set step size. delta_pos_g = self.goalMotionForHand(mpc_dat.x_h, mpc_dat.x_g, dist_g) delta_orient_g = self.goalOrientationInQuat(mpc_dat.q_h_orient, mpc_dat.q_g_orient, ang_dist_g) # Build the delta_x_g vector for the controller. # if mpc_dat.position_weight == 0.: # delta_x_g = delta_orient_g # J_h = mpc_dat.Je[3:] # T_quat = 0.5 * (mpc_dat.q_h_orient[3] # * np.matrix(np.eye(3)) # - haptic_mpc_util.getSkewMatrix(mpc_dat.q_h_orient[0:3])) # J_h = T_quat*J_h * np.sqrt(mpc_dat.orient_weight) # # elif mpc_dat.orient_weight == 0.: # delta_x_g = delta_pos_g # J_h = mpc_dat.Je[0:3] * np.sqrt(mpc_dat.position_weight) # else: # delta_x_g = np.vstack((delta_pos_g*np.sqrt(mpc_dat.position_weight), # delta_orient_g*np.sqrt(mpc_dat.orient_weight))) # # J_h = mpc_dat.Je # T_quat = 0.5 * (mpc_dat.q_h_orient[3] # * np.matrix(np.eye(3)) # - haptic_mpc_util.getSkewMatrix(mpc_dat.q_h_orient[0:3])) # # J_h[3:] = T_quat*J_h[3:] # J_h[0:3] = J_h[0:3] * np.sqrt(mpc_dat.position_weight) # J_h[3:] = J_h[3:] * np.sqrt(mpc_dat.orient_weight) # TODO - NEW MATHS. Explicitly apply weights - it's much easier to understand. # combine position/orientation goals delta_x_g = np.vstack( (delta_pos_g, delta_orient_g) ) J_h = mpc_dat.Je T_quat = 0.5 * (mpc_dat.q_h_orient[3]* np.matrix(np.eye(3)) - haptic_mpc_util.getSkewMatrix(mpc_dat.q_h_orient[0:3])) J_h[3:] = T_quat*J_h[3:] J_h[0:3] = J_h[0:3] #* np.sqrt(mpc_dat.position_weight) J_h[3:] = J_h[3:] #* np.sqrt(mpc_dat.orient_weight) # if both position and orientation (dim = 6, not 3), there are more competing terms in cost function # increase the weight on force reduction to compensate if delta_x_g.shape[0] == 6: mpc_dat.force_weight *= 10 n_joints = mpc_dat.K_j.shape[0] J_h[:, mpc_dat.control_point_joint_num:] = 0. J_h = np.matrix(J_h[:,0:n_joints]) # comes into play with Cody and the wrist cover # If torque constraints are violated - ie Q_des is far from Q - then reset Q_des to Q q_diff = np.array(mpc_dat.jep) - np.array(mpc_dat.q) max_q_diff = np.max(np.abs(q_diff)) if max_q_diff > self.angle_reset_threshold: # Reset JEP to Q. if self.verbose: rospy.loginfo("JEPs too far from current position - capping them to limit") #self.publishDesiredJointAngles(self.getJointAngles()) new_q_des = np.clip(mpc_dat.jep, mpc_dat.q - self.angle_reset_threshold, mpc_dat.q + self.angle_reset_threshold) self.publishDesiredJointAngles(new_q_des.tolist()) return None # If force limit is exceeded, exit and shutdown. There may be a better behavior. if len(mpc_dat.f_l) != 0: if np.max(mpc_dat.f_l) > self.stopping_force: rospy.logwarn("Haptic MPC: MAXIMUM ALLOWABLE FORCE EXCEEDED. SHUTTING DOWN!") if self.verbose: print np.max(mpc_dat.f_l) rospy.signal_shutdown("Haptic MPC: MAXIMUM ALLOWABLE FORCE EXCEEDED. SHUTTING DOWN!") # Postural term input delta_theta_des = self.goalDeltaPosture(mpc_dat.goal_posture, mpc_dat.q, self.max_theta_step, self.theta_step_scale) if len(delta_theta_des) > n_joints: # Trim to the number of DOFs being used for this - not necessarily the number of joints present, eg Cody 5DOF delta_theta_des = delta_theta_des[0:n_joints] cost_quadratic_matrices, cost_linear_matrices, \ constraint_matrices, \ constraint_vectors, lb, ub = esm.convert_to_qp_posture(J_h, # 6xDOF end effector jacobian mpc_dat.Jc_l, # list of contacts mpc_dat.K_j, # joint_stiffnesses (DOFxDOF) mpc_dat.Kc_l, # list of 3x3s mpc_dat.Rc_l, # list of 3x3s mpc_dat.delta_f_min, # num contacts * 1 mpc_dat.delta_f_max, mpc_dat.phi_curr, # DOF delta_x_g, mpc_dat.f_n, mpc_dat.q, self.joint_limits_min, # joint limits (used to be kinematics object) self.joint_limits_max, mpc_dat.jerk_opt_weight, #between 0.000000001 and .0000000001, cool things happened mpc_dat.max_force_mag, delta_theta_des, mpc_dat.posture_weight, mpc_dat.position_weight, mpc_dat.orient_weight, mpc_dat.force_weight, mpc_dat.force_reduction_goal, self.angle_constraint_threshold) lb = lb[0:n_joints] ub = ub[0:n_joints] # Trim the lower/upper bounds to the number of joints being used (may be less than DOF - eg Cody). delta_phi_opt, opt_error, feasible = esm.solve_qp(cost_quadratic_matrices, cost_linear_matrices, constraint_matrices, constraint_vectors, lb, ub, debug_qp=False, verbose=self.verbose) # Updated joint positions. delta_phi = np.zeros(len(self.joint_names)) delta_phi[0:n_joints] = delta_phi_opt.T # warn if JEP goes beyond joint limits #if self.robot_kinematics.within_joint_limits(mpc_dat.jep) == False: # rospy.logwarn('Outside joint limits. They will be clamped later...') # rospy.logwarn("Limits: %s" % str(self.robot_kinematics.joint_lim_dict)) # rospy.logwarn("Current JEP: %s" % str(mpc_dat.jep)) return delta_phi # Return a joint position delta
def initMPCData(self): # Copy the latest data received from the robot haptic state. # To ensure the data is synchronised, all necessary data is copied with the lock rather than using the accessor functions. with self.state_lock: q = copy.copy(self.joint_angles) q_des = copy.copy(self.desired_joint_angles) Jc = copy.copy(self.Jc) skin_data = copy.copy(self.skin_data) ee_pos = copy.copy(self.end_effector_pos) ee_orient_quat = copy.copy(self.end_effector_orient_quat) joint_stiffness = copy.copy(self.joint_stiffness) Je = copy.copy(self.Je) # Copy the goal parameters. with self.goal_lock: goal_pos = copy.copy(self.goal_pos) goal_orient_quat = copy.copy(self.goal_orient_quat) with self.posture_lock: goal_posture = copy.copy(self.goal_posture) n_l = haptic_mpc_util.getNormals(skin_data) f_l = haptic_mpc_util.getValues(skin_data) loc_l = haptic_mpc_util.getLocations(skin_data) # Control params k_default = self.static_contact_stiffness_estimate dist_g = self.time_step * self.goal_velocity_for_hand ang_dist_g = self.time_step * self.goal_ang_velocity_for_hand # Compute various matrices. Kc_l = self.contactStiffnessMatrix(n_l, k_default) # Using default k_est_min and k_est_max Rc_l = self.contactForceTransformationMatrix(n_l) # calculate the normal components of the current contact # forces tmp_l = [R_ci * f_ci for R_ci, f_ci in it.izip(Rc_l, f_l)] f_n = np.matrix([tmp_i[0,0] for tmp_i in tmp_l]).T delta_f_min, delta_f_max = self.deltaForceBounds(f_n, max_pushing_force = self.allowable_contact_force, max_pulling_force = self.allowable_contact_force, max_pushing_force_increase = self.max_delta_force_mag, max_pushing_force_decrease = self.max_delta_force_mag, min_decrease_when_over_max_force = 0., max_decrease_when_over_max_force = 10.0) q_des_matrix = (np.matrix(q_des)[0:len(q_des)]).T K_j = np.diag(joint_stiffness) if len(Je) >= 1: # Je could potentially be a list of end effector jacobians. We only care about the first one in this implementation. Je = Je[0] # Calculate MPC orientation/position weight. This is a bad hack to essentially damp the response as it approaches the goal. with self.gain_lock: orient_weight = self.orient_weight position_weight = self.pos_weight posture_weight = self.posture_weight planar = False # Evalute current goals to give the controller some meaningful input # If the goal is non-existant, use the current position/orientation/posture as the goal. # Position x_h = ee_pos # numpy array x_g = goal_pos if x_g == None: x_g = x_h # Orientation q_h_orient = ee_orient_quat q_g_orient = goal_orient_quat if q_g_orient == None: q_g_orient = q_h_orient # Posture if goal_posture == None: goal_posture = q dist_goal_2D = np.linalg.norm((x_g - x_h)[0:2]) dist_goal_3D = np.linalg.norm(x_g - x_h) #if planar: # dist_goal = dist_goal_2D #else: dist_goal = dist_goal_3D angle_error = ut.quat_angle(q_h_orient, q_g_orient) jerk_opt_weight = self.jerk_opt_weight if orient_weight != 0: pos_weight_scale = 1. orient_weight_scale = 1. if dist_g < self.position_weight_scaling_radius: pos_weight_scale = dist_g / self.position_weight_scaling_radius position_weight *= pos_weight_scale if ang_dist_g < self.orientation_weight_scaling_radius: orient_weight_scale = ang_dist_g / self.orientation_weight_scaling_radius orient_weight *= orient_weight_scale #Reduce the weight of jerk reduction term so that it doesn't dominate. #This function is arbitrary, gives good behavior on PR2 jerk_opt_weight *= np.mean([pos_weight_scale, orient_weight_scale]) #Original re-weighting function used for scaling jerk_opt_weight #jerk_opt_weight = max(jerk_opt_weight * position_weight * 2, jerk_opt_weight) # Initialise and return the mpc data structure. mpc_data = MPCData( q=q, x_h = x_h, # numpy array x_g = x_g, # numpy array dist_g = dist_g, ang_dist_g = ang_dist_g, goal_posture = goal_posture, q_h_orient = q_h_orient, q_g_orient = q_g_orient, position_weight = position_weight, orient_weight = orient_weight, posture_weight = posture_weight, force_weight = self.force_weight, force_reduction_goal = self.force_reduction_goal, # default desired delta for force reduction. control_point_joint_num = len(q), # number of joints Kc_l = Kc_l, Rc_l = Rc_l, Jc_l = Jc, Je = Je, # NB: Je is a list of end effector jacobians (to use the same utils functions as Jc) delta_f_min = delta_f_min, delta_f_max = delta_f_max, phi_curr = q_des_matrix, K_j = K_j, loc_l = loc_l, # Contact locations from taxels. From skin client. n_l = n_l, # Normals for force locations f_l = f_l, # Cartesian force components for contacts f_n = f_n, # Normal force for contacts jerk_opt_weight = jerk_opt_weight, # From control params max_force_mag = self.allowable_contact_force, # From control params jep = q_des, time_step = self.time_step, stop = False) return mpc_data
def angularDistanceBetweenPoses(self, poseA, poseB): quatA = [poseA.orientation.x, poseA.orientation.y, poseA.orientation.z, poseA.orientation.w] quatB = [poseB.orientation.x, poseB.orientation.y, poseB.orientation.z, poseB.orientation.w] ang_diff = ut.quat_angle(quatA, quatB) return ang_diff
def test(self, raw_data): print 'Start clustering.' print raw_data.shape N = 1000 #-----------------------------------------------------------# ## Initialization raw_pos = np.zeros((N,3)) #array raw_quat = np.zeros((N,4)) #-----------------------------------------------------------# ## Decompose data into pos,quat pairs for i in xrange(N): raw_pos[i,:] = np.array([0,0,0]) ## raw_quat = qt.quat_random( N ) quat_mean = np.array([1.,0.,0.,1.5]); raw_quat = qt.quat_QuTem( quat_mean/np.linalg.norm(quat_mean), N, [0.03,0.3,0.3,1.0] ) ## quat_mean = np.array([0.,1.,0.,-1.5]); ## raw_quat2 = qt.quat_QuTem( quat_mean/np.linalg.norm(quat_mean), N/2.0, [0.1,1.0,0.1,1.0] ) ## raw_quat = np.vstack([raw_quat1,raw_quat2]) ## raw_quat1 = np.array([[1., 0., 0., 0.], ## [1., 0.1, 0., 0.], ## [1., 0., 0.1, 0.], ## [1., 0., 0., 0.1], ## [1., 0.2, 0., 0.], ## [1., 0., 0.2, 0.], ## [1., 0., 0., 0.2], ## [1.1, 0.1, 0., 0.], ## [1.1, 0., 0.1, 0.], ## [1.1, 0., 0., 0.1]]) ## raw_quat2 = np.array([[0., 0., 1., 0.], ## [0.1, 0., 1.1, 0.], ## [0.1, 0.1, 1., 0.], ## [0., 1., 0., 0.], ## [0.1, 1., 0.1, 0.], ## [0.1, 1.1, 0., 0.], ## [0.1, 1., 0.4, 0.], ## [0.1, 1., 1.1, 0.2], ## [0.1, 1., 1.4, 0.], ## [1.1, 1., 0.1, 0.2], ## [1.1, 1.1, 0.1, 0.1] ## ]) ## raw_quat = np.vstack([raw_quat1,raw_quat2]) for i in xrange(len(raw_quat)): raw_quat[i,:] /= np.linalg.norm(raw_quat[i,:]) #-----------------------------------------------------------# pos_clustered_group = [] raw_group = np.hstack([raw_pos,raw_quat]) pos_clustered_group.append(raw_group) print "Number of pos groups: ", len(pos_clustered_group) #-----------------------------------------------------------# ## Grouping by orientation clustered_group = [] for group in pos_clustered_group: # samples X = group[:,3:] # Clustering parameters nQuatCluster = self.nQuatCluster kmsample = nQuatCluster # 0: random centres, > 0: kmeanssample kmdelta = .001 kmiter = 10 metric = "quaternion" # "chebyshev" = max, "cityblock" L1, Lqmetric # the number of clusters should be smaller than the number of samples if nQuatCluster > len(X): nQuatCluster = len(X) kmsample = len(X) # Clustering while True: centres, xtoc, dist = km.kmeanssample( X, nQuatCluster, nsample=kmsample, delta=kmdelta, maxiter=kmiter, metric=metric, verbose=0 ) # co-distance matrix bReFit = False co_pos_mat = np.zeros((nQuatCluster,nQuatCluster)) for i in xrange(nQuatCluster): # For refitting if bReFit == True: break for j in xrange(i, nQuatCluster): if i==j: co_pos_mat[i,j] = 1000000 # to avoid minimum check continue co_pos_mat[i,j] = co_pos_mat[j,i] = ut.quat_angle(centres[i],centres[j]) if co_pos_mat[i,j] < self.fMinQuatDist: bReFit = True break if bReFit == True: nQuatCluster -= 1 ## print "New # of clusters ", nQuatCluster, " in a sub group " continue else: break ## for i in xrange(nQuatCluster): ## raw_group = [] ## for j in xrange(len(group)): ## if xtoc[j] == i: ## if raw_group == []: ## raw_group = np.array([group[j,:]]) ## else: ## raw_group = np.vstack([raw_group, group[j,:]]) ## clustered_group.append(raw_group) print "Number of pos+quat groups: ", len(clustered_group) self.q_image_axis_cluster(X, xtoc)
def clustering(self, raw_data): print 'Start clustering.' print raw_data.shape #-----------------------------------------------------------# ## Initialization raw_pos, raw_quat = self.mat_to_pos_quat(raw_data) #-----------------------------------------------------------# ## K-mean Clustering by Position raw_pos_index = self.pos_clustering(raw_pos) pos_clustered_group = [] for i in xrange(self.nCluster): raw_group = [] for j in xrange(len(raw_data)): if raw_pos_index[j] == i: if raw_group == []: raw_group = np.array([np.hstack([raw_pos[j],raw_quat[j]])]) else: raw_group = np.vstack([raw_group, np.hstack([raw_pos[j],raw_quat[j]])]) pos_clustered_group.append(raw_group) print "Number of pos groups: ", len(pos_clustered_group) #-----------------------------------------------------------# ## Grouping by orientation clustered_group = [] for group in pos_clustered_group: # samples X = group[:,3:] ## print "Total X: ", X.shape[0], len(X) # Clustering parameters nQuatCluster = self.nQuatCluster kmsample = nQuatCluster # 0: random centres, > 0: kmeanssample kmdelta = .001 kmiter = 10 metric = "quaternion" # "chebyshev" = max, "cityblock" L1, Lqmetric # the number of clusters should be smaller than the number of samples if nQuatCluster > len(X): nQuatCluster = len(X) kmsample = len(X) # Clustering while True: centres, xtoc, dist = km.kmeanssample( X, nQuatCluster, nsample=kmsample, delta=kmdelta, maxiter=kmiter, metric=metric, verbose=0 ) # co-distance matrix bReFit = False co_pos_mat = np.zeros((nQuatCluster,nQuatCluster)) for i in xrange(nQuatCluster): # For refitting if bReFit == True: break for j in xrange(i, nQuatCluster): if i==j: co_pos_mat[i,j] = 1000000 # to avoid minimum check continue co_pos_mat[i,j] = co_pos_mat[j,i] = ut.quat_angle(centres[i],centres[j]) if co_pos_mat[i,j] < self.fMinQuatDist: bReFit = True break if bReFit == True: nQuatCluster -= 1 ## print "New # of clusters ", nQuatCluster, " in a sub group " continue else: break for i in xrange(nQuatCluster): raw_group = [] for j in xrange(len(group)): if xtoc[j] == i: if raw_group == []: raw_group = np.array([group[j,:]]) else: raw_group = np.vstack([raw_group, group[j,:]]) clustered_group.append(raw_group) print "Number of pos+quat groups: ", len(clustered_group) #-----------------------------------------------------------# ## Averaging avg_clustered_data = [] num_clustered_data = [] count = 0 for i,g in enumerate(clustered_group): if len(g)==0: continue count += len(g) ## print "Number of sub samples: ", len(g) # Position pos_sum = np.array([0.,0.,0.]) for j,s in enumerate(g): pos_sum += s[0:3] if j==0: quat_array = np.array([s[3:]]) else: quat_array = np.vstack([quat_array, s[3:]]) pos_avg = pos_sum/float(len(g)) # Quaternion quat_avg = qt.quat_avg( quat_array ) avg_clustered_data.append([pos_avg, quat_avg]) num_clustered_data.append([len(g)]) ## print "total: ", count # Reshape the pairs into tranformation matrix for i, g in enumerate(avg_clustered_data): mat = tft.quaternion_matrix(g[1]) mat[0,3] = g[0][0] mat[1,3] = g[0][1] mat[2,3] = g[0][2] if i==0: clustered_data = np.array([mat]) else: clustered_data = np.vstack([clustered_data, np.array([mat])]) print "Final clustered data: ", clustered_data.shape, len(num_clustered_data) return clustered_data, num_clustered_data, len(pos_clustered_group)
def test(self, raw_data): print 'Start clustering.' print raw_data.shape N = 1000 #-----------------------------------------------------------# ## Initialization raw_pos = np.zeros((N, 3)) #array raw_quat = np.zeros((N, 4)) #-----------------------------------------------------------# ## Decompose data into pos,quat pairs for i in xrange(N): raw_pos[i, :] = np.array([0, 0, 0]) ## raw_quat = qt.quat_random( N ) quat_mean = np.array([1., 0., 0., 1.5]) raw_quat = qt.quat_QuTem(quat_mean / np.linalg.norm(quat_mean), N, [0.03, 0.3, 0.3, 1.0]) ## quat_mean = np.array([0.,1.,0.,-1.5]); ## raw_quat2 = qt.quat_QuTem( quat_mean/np.linalg.norm(quat_mean), N/2.0, [0.1,1.0,0.1,1.0] ) ## raw_quat = np.vstack([raw_quat1,raw_quat2]) ## raw_quat1 = np.array([[1., 0., 0., 0.], ## [1., 0.1, 0., 0.], ## [1., 0., 0.1, 0.], ## [1., 0., 0., 0.1], ## [1., 0.2, 0., 0.], ## [1., 0., 0.2, 0.], ## [1., 0., 0., 0.2], ## [1.1, 0.1, 0., 0.], ## [1.1, 0., 0.1, 0.], ## [1.1, 0., 0., 0.1]]) ## raw_quat2 = np.array([[0., 0., 1., 0.], ## [0.1, 0., 1.1, 0.], ## [0.1, 0.1, 1., 0.], ## [0., 1., 0., 0.], ## [0.1, 1., 0.1, 0.], ## [0.1, 1.1, 0., 0.], ## [0.1, 1., 0.4, 0.], ## [0.1, 1., 1.1, 0.2], ## [0.1, 1., 1.4, 0.], ## [1.1, 1., 0.1, 0.2], ## [1.1, 1.1, 0.1, 0.1] ## ]) ## raw_quat = np.vstack([raw_quat1,raw_quat2]) for i in xrange(len(raw_quat)): raw_quat[i, :] /= np.linalg.norm(raw_quat[i, :]) #-----------------------------------------------------------# pos_clustered_group = [] raw_group = np.hstack([raw_pos, raw_quat]) pos_clustered_group.append(raw_group) print "Number of pos groups: ", len(pos_clustered_group) #-----------------------------------------------------------# ## Grouping by orientation clustered_group = [] for group in pos_clustered_group: # samples X = group[:, 3:] # Clustering parameters nQuatCluster = self.nQuatCluster kmsample = nQuatCluster # 0: random centres, > 0: kmeanssample kmdelta = .001 kmiter = 10 metric = "quaternion" # "chebyshev" = max, "cityblock" L1, Lqmetric # the number of clusters should be smaller than the number of samples if nQuatCluster > len(X): nQuatCluster = len(X) kmsample = len(X) # Clustering while True: centres, xtoc, dist = km.kmeanssample(X, nQuatCluster, nsample=kmsample, delta=kmdelta, maxiter=kmiter, metric=metric, verbose=0) # co-distance matrix bReFit = False co_pos_mat = np.zeros((nQuatCluster, nQuatCluster)) for i in xrange(nQuatCluster): # For refitting if bReFit == True: break for j in xrange(i, nQuatCluster): if i == j: co_pos_mat[i, j] = 1000000 # to avoid minimum check continue co_pos_mat[i, j] = co_pos_mat[j, i] = ut.quat_angle( centres[i], centres[j]) if co_pos_mat[i, j] < self.fMinQuatDist: bReFit = True break if bReFit == True: nQuatCluster -= 1 ## print "New # of clusters ", nQuatCluster, " in a sub group " continue else: break ## for i in xrange(nQuatCluster): ## raw_group = [] ## for j in xrange(len(group)): ## if xtoc[j] == i: ## if raw_group == []: ## raw_group = np.array([group[j,:]]) ## else: ## raw_group = np.vstack([raw_group, group[j,:]]) ## clustered_group.append(raw_group) print "Number of pos+quat groups: ", len(clustered_group) self.q_image_axis_cluster(X, xtoc)
def clustering(self, raw_data): print 'Start clustering.' print raw_data.shape #-----------------------------------------------------------# ## Initialization raw_pos, raw_quat = self.mat_to_pos_quat(raw_data) #-----------------------------------------------------------# ## K-mean Clustering by Position raw_pos_index = self.pos_clustering(raw_pos) pos_clustered_group = [] for i in xrange(self.nCluster): raw_group = [] for j in xrange(len(raw_data)): if raw_pos_index[j] == i: if raw_group == []: raw_group = np.array( [np.hstack([raw_pos[j], raw_quat[j]])]) else: raw_group = np.vstack( [raw_group, np.hstack([raw_pos[j], raw_quat[j]])]) pos_clustered_group.append(raw_group) print "Number of pos groups: ", len(pos_clustered_group) #-----------------------------------------------------------# ## Grouping by orientation clustered_group = [] for group in pos_clustered_group: # samples X = group[:, 3:] ## print "Total X: ", X.shape[0], len(X) # Clustering parameters nQuatCluster = self.nQuatCluster kmsample = nQuatCluster # 0: random centres, > 0: kmeanssample kmdelta = .001 kmiter = 10 metric = "quaternion" # "chebyshev" = max, "cityblock" L1, Lqmetric # the number of clusters should be smaller than the number of samples if nQuatCluster > len(X): nQuatCluster = len(X) kmsample = len(X) # Clustering while True: centres, xtoc, dist = km.kmeanssample(X, nQuatCluster, nsample=kmsample, delta=kmdelta, maxiter=kmiter, metric=metric, verbose=0) # co-distance matrix bReFit = False co_pos_mat = np.zeros((nQuatCluster, nQuatCluster)) for i in xrange(nQuatCluster): # For refitting if bReFit == True: break for j in xrange(i, nQuatCluster): if i == j: co_pos_mat[i, j] = 1000000 # to avoid minimum check continue co_pos_mat[i, j] = co_pos_mat[j, i] = ut.quat_angle( centres[i], centres[j]) if co_pos_mat[i, j] < self.fMinQuatDist: bReFit = True break if bReFit == True: nQuatCluster -= 1 ## print "New # of clusters ", nQuatCluster, " in a sub group " continue else: break for i in xrange(nQuatCluster): raw_group = [] for j in xrange(len(group)): if xtoc[j] == i: if raw_group == []: raw_group = np.array([group[j, :]]) else: raw_group = np.vstack([raw_group, group[j, :]]) clustered_group.append(raw_group) print "Number of pos+quat groups: ", len(clustered_group) #-----------------------------------------------------------# ## Averaging avg_clustered_data = [] num_clustered_data = [] count = 0 for i, g in enumerate(clustered_group): if len(g) == 0: continue count += len(g) ## print "Number of sub samples: ", len(g) # Position pos_sum = np.array([0., 0., 0.]) for j, s in enumerate(g): pos_sum += s[0:3] if j == 0: quat_array = np.array([s[3:]]) else: quat_array = np.vstack([quat_array, s[3:]]) pos_avg = pos_sum / float(len(g)) # Quaternion quat_avg = qt.quat_avg(quat_array) avg_clustered_data.append([pos_avg, quat_avg]) num_clustered_data.append([len(g)]) ## print "total: ", count # Reshape the pairs into tranformation matrix for i, g in enumerate(avg_clustered_data): mat = tft.quaternion_matrix(g[1]) mat[0, 3] = g[0][0] mat[1, 3] = g[0][1] mat[2, 3] = g[0][2] if i == 0: clustered_data = np.array([mat]) else: clustered_data = np.vstack([clustered_data, np.array([mat])]) print "Final clustered data: ", clustered_data.shape, len( num_clustered_data) return clustered_data, num_clustered_data, len(pos_clustered_group)