Esempio n. 1
0
 def test_absolute_distance(self):
     q = Quaternion(scalar=0, vector=[1,0,0])
     p = Quaternion(scalar=0, vector=[0,1,0])
     self.assertEqual((q-p).norm, Quaternion.absolute_distance(q,p))
     q = Quaternion(angle=pi/2, axis=[1,0,0])
     p = Quaternion(angle=pi/2, axis=[0,1,0])
     self.assertEqual((q-p).norm, Quaternion.absolute_distance(q,p))
     q = Quaternion(scalar=0, vector=[1,0,0])
     p = Quaternion(scalar=-1, vector=[0,-1,0])
     self.assertEqual((q+p).norm, Quaternion.absolute_distance(q,p))
     q = Quaternion(scalar=1, vector=[1,1,1])
     p = Quaternion(scalar=-1, vector=[-1,-1,-1])
     p._normalise()
     q._normalise()
     self.assertAlmostEqual(0, Quaternion.absolute_distance(q,p), places=8)
Esempio n. 2
0
def test(rwdobj: DetailsForReward):
    """MOVE AS MUCH AS POSSIBLE"""
    dis_traveled = euclidean(rwdobj.new_obj_pos, rwdobj.init_obj_pos)
    or_traveled = Quaternion.absolute_distance(Quaternion(rwdobj.new_obj_or),
                                               Quaternion(rwdobj.init_obj_or))
    grip_strength = np.round(np.sum(np.abs(rwdobj.tactile_state[-1])), 2)
    obj_fell = grip_strength < 0.004 * rwdobj.action_space
    if obj_fell:
        return rwdobj.extrinsic_reward, True  # reward finish
    else:
        return dis_traveled + or_traveled, False
Esempio n. 3
0
 def objF(self, x):
     """
     Blackbox optimization function, sums error relative to all transforms in marker buffer for both translation
     and rotation
     :param x:
     """
     sum = 0
     for pose in self.marker_buffer[self.optimize_id]:
         sum += np.sum(np.square(np.subtract(x[0:4],pose[0:4])))
         x_quat = Quaternion(x[6], x[3], x[4], x[5])
         pose_quat = Quaternion(pose[6], pose[3], pose[4], pose[5])
         dist = Quaternion.absolute_distance(x_quat, pose_quat)/3.14
         sum += dist*dist
     return sum
Esempio n. 4
0
def stabilize_quat(mpu, t, dist):
    """Function that returns a stabilized quaternion."""
    q0 = Quaternion((0, 0, 0, 0))
    t0 = time.time()
    tmp_dist = 10000
    while((time.time() - t0 < t) or tmp_dist > dist):
        try:
            mpu.read_value()
            rotate_vect = mpu.quaternion
            q1 = Quaternion(rotate_vect)
            tmp_dist = Quaternion.absolute_distance(q1, q0)
            q0 = q1
        except Exception:
            continue
    g = mpu.read_gravity()
    return q1, g
Esempio n. 5
0
 def precond(self, state_str):
     state = State.create_from_serialized_string(state_str)
     robot_pos = state.get_values_as_vec([robot_pos_fqn])
     robot_orn = np.array(state.get_values_as_vec([robot_orn_fqn]))
     block_pos = state.get_values_as_vec([block_pos_fqn])
     delta_side = state.get_values_as_vec(["constants/block_width"])[0] / 2
     #close to block side and also in right orientation
     robot_des_pos = np.array([
         block_pos[0] + delta_side * np.sin(dir_to_rpy[self.sidenum][2]),
         block_pos[1],
         block_pos[2] + delta_side * np.cos(dir_to_rpy[self.sidenum][2])
     ])
     gripper_pos_close = np.linalg.norm(robot_des_pos - robot_pos) < 0.03
     des_quat = quat_to_np(rpy_to_quat(np.array(dir_to_rpy[self.sidenum])),
                           format="wxyz")
     orn_dist = Quaternion.absolute_distance(Quaternion(des_quat),
                                             Quaternion(robot_orn))
     orn_close = orn_dist < 0.01
     return gripper_pos_close and orn_close
Esempio n. 6
0
    def get_closest_ee_goals(self,
                             shape_goal,
                             shape_class=Rectangle,
                             grasp_offset=0.055):
        #symmetry that minimizes the distance between shape_goal and the current ee pose.
        curr_quat = Quaternion(p.getLinkState(self.robot, self.grasp_joint)[1])
        goal_quat = Quaternion(shape_goal[1])
        syms = shape_class.grasp_symmetries()
        transformation_lib_quats = [
            quaternion_about_axis(sym, (0, 0, 1)) for sym in syms
        ]
        sym_quats = [
            Quaternion(np.hstack([lib_quat[-1], lib_quat[0:3]]))
            for lib_quat in transformation_lib_quats
        ]
        best_sym_idx = np.argmin([
            Quaternion.absolute_distance(
                curr_quat,
                Quaternion(matrix=np.dot(sym_quat.rotation_matrix,
                                         goal_quat.rotation_matrix)))
            for sym_quat in sym_quats
        ])
        best_sym = syms[best_sym_idx]
        #best_quat = Quaternion(quaternion_about_axis(best_sym, (0,0,1))).rotate(goal_quat)
        #assert(Quaternion.absolute_distance(curr_quat, best_quat) < 0.01)
        best_quat = curr_quat  #hack until I can get the angle stuff working

        #self.grasp_offset = grasp_offset+self.block_height*0.5
        grasp_translation = np.array([0, 0, self.grasp_offset])
        ee_trans = grasp_translation + shape_goal[0]
        grasp_rot = np.dot(curr_quat.rotation_matrix,
                           np.linalg.inv(best_quat.rotation_matrix))
        grasp_quat = np.array(
            [1, 0, 0, 0]
        )  # hack until I can get the angle stuff working Quaternion(matrix=grasp_rot).elements
        grasp = (grasp_translation, grasp_quat)
        ee_pose = (ee_trans, best_quat.elements)
        return ee_pose, grasp
Esempio n. 7
0
 def goto_pose(self,
               des_quat,
               des_ee_trans,
               env_index,
               env_ptr,
               transformed_pt,
               max_t=None):
     pos_tol = 0.005
     quat_tol = 1.5
     self._frankas[env_index].set_ee_transform(env_ptr, env_index,
                                               self._franka_name,
                                               transformed_pt)
     if max_t is None:
         max_t = self.max_steps_per_movement
     for i in range(int(max_t)):
         self._scene.step()
         if i % 10:
             self.render(custom_draws=custom_draws)
         if i % 20:
             ee_pose = self._frankas[env_index].get_ee_transform(
                 env_ptr, self._franka_name)
             np_pose = transform_to_np(ee_pose, format="wxyz")
             if np.linalg.norm(des_ee_trans - np_pose[:3]
                               ) < pos_tol and Quaternion.absolute_distance(
                                   Quaternion(
                                       quat_to_np(des_quat, format="wxyz")),
                                   Quaternion(np_pose[3:])) < quat_tol:
                 [(self._scene.step(),
                   self.render(custom_draws=custom_draws))
                  for i in range(10)]
                 break
         if i == max_t - 1:
             print(
                 "COuld not reach pose in time, distance still {} after time {}"
                 .format(np.linalg.norm(des_ee_trans - np_pose[:3]), i))
     return i
Esempio n. 8
0
                    Metric[name]['orientation gt'] = Metric[
                        opt.object]['orientation gt']

        # evaluation
        if not opt.real_world:
            for name in Metric:
                if Metric[name]['location'] is None:
                    Metric[name]['location error'] = 1000
                    Metric[name]['orientation error'] = 1000
                else:
                    Metric[name]['location error'] = np.linalg.norm(
                        Metric[name]['location'] - Metric[name]['location gt'])
                    q1 = Quaternion(Metric[name]['orientation'])
                    q2 = Quaternion(Metric[name]['orientation gt'])
                    Metric[name][
                        'orientation error'] = Quaternion.absolute_distance(
                            q1, q2)

        Names.append(img_name)
        Metrics.append(Metric)
        print(Metric)

        if not opt.headless:
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            cv2.imshow('DOPE', np.array(out_img)[..., ::-1])
            if opt.showbelief:
                # TODO remove the [0] for a for loop when multiple objects.
                cv2.imshow('DOPE BELIEFS', np.array(out_beliefs[0])[..., ::-1])
                # print(img_name)
                cv2.imwrite(opt.outf + 'DOPE_BELIEFS_' + img_name,
                            np.array(out_beliefs[0])[..., ::-1])
Esempio n. 9
0
def get_R_degree_error(quaternion_1, quaternion_2):

    rad = Quaternion.absolute_distance(Quaternion(quaternion_1),
                                       Quaternion(quaternion_2))

    return np.rad2deg(rad)
Esempio n. 10
0
def _quatclose(orn1, orn2, atol):
    """ Indicate quaternion similarities. It takes into account the fact
        that q and -q encode the same rotation. """
    q1 = Quaternion(orn1[3], *orn1[:3])
    q2 = Quaternion(orn2[3], *orn2[:3])
    return Quaternion.absolute_distance(q1, q2) < atol
Esempio n. 11
0
def cameraCallback(depth_image_msg, rgb_image_msg):
    global frame_count, processed_frame_count, record, tracking_frame, relative_frame, tf_listener
    global color_images, depth_images, rgb_poses, intrinsics, prev_pose_rot, prev_pose_tran
    global tsdf_integration_data, live_integration, integration_done, mesh_pub

    if record:
        try:
            # Convert your ROS Image message to OpenCV2
            # TODO: Generalize image type
            cv2_depth_img = bridge.imgmsg_to_cv2(depth_image_msg, "16UC1")
            cv2_rgb_img = bridge.imgmsg_to_cv2(rgb_image_msg, "bgr8")
        except CvBridgeError:
            print(e)
            return
        else:
            # Get camera intrinsic from camera info
            if frame_count == 0:
                camera_info = rospy.wait_for_message(camera_info_topic,
                                                     CameraInfo)
                intrinsics = getIntrinsicsFromMsg(camera_info)

            sensor_data.append([
                o3d.geometry.Image(cv2_depth_img),
                o3d.geometry.Image(cv2_rgb_img), rgb_image_msg.header.stamp
            ])
            if (frame_count > 30):
                data = sensor_data.popleft()
                try:
                    (rgb_t, rgb_r) = tf_listener.lookupTransform(
                        relative_frame, tracking_frame, data[2])
                except:
                    return
                rgb_t = np.array(rgb_t)
                rgb_r = np.array(rgb_r)

                tran_dist = np.linalg.norm(rgb_t - prev_pose_tran)
                rot_dist = Quaternion.absolute_distance(
                    Quaternion(prev_pose_rot), Quaternion(rgb_r))

                # TODO: Testing if this is a good practice, min jump to accept data
                if (tran_dist > translation_distance) or (rot_dist >
                                                          rotational_distance):
                    prev_pose_tran = rgb_t
                    prev_pose_rot = rgb_r
                    rgb_pose = tf.transformations.quaternion_matrix(rgb_r)
                    rgb_pose[0, 3] = rgb_t[0]
                    rgb_pose[1, 3] = rgb_t[1]
                    rgb_pose[2, 3] = rgb_t[2]

                    depth_images.append(data[0])
                    color_images.append(data[1])
                    rgb_poses.append(rgb_pose)
                    if live_integration:
                        integration_done = False
                        rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(
                            data[1], data[0], depth_scale, depth_trunc, False)
                        tsdf_volume.integrate(rgbd, intrinsics,
                                              np.linalg.inv(rgb_pose))
                        integration_done = True
                        if processed_frame_count % 50 == 0:
                            mesh = tsdf_volume.extract_triangle_mesh()
                            mesh_msg = meshToRos(mesh)
                            mesh_msg.header.stamp = rospy.get_rostime()
                            mesh_msg.header.frame_id = relative_frame
                            mesh_pub.publish(mesh_msg)
                    else:
                        tsdf_integration_data.append(
                            [data[0], data[1], rgb_pose])
                    processed_frame_count += 1

            frame_count += 1
Esempio n. 12
0
    def grapsPoseGen(
            self, targetPosition,
            objectOrientation):  # position: x,y,z + objectOrientation: w,x,y,z

        planedOrientation = np.zeros([4])

        positionInDatabase = (np.round(targetPosition, 2) * 100 +
                              self.center).astype(int)
        [numSolution, solutionStartIndex
         ] = self.poseSolutionsIndexInSerialization[positionInDatabase[0],
                                                    positionInDatabase[1],
                                                    positionInDatabase[2], :]

        # if grasp object with orientation, the y axis of object aligned with major axis of the object
        if (np.abs(np.linalg.norm(objectOrientation) - 1) < 1e-1):

            # turn around the y axis of the object to select the best orientation for grasp
            step = 1  # degree
            angleLoss = 10000
            for i in range(0, 360, step):
                with self.env:
                    while True:
                        # if not self.robot.CheckSelfCollision():
                        if True:
                            T = matrixFromQuat(
                                quatMult(
                                    objectOrientation,
                                    quatFromAxisAngle([0, 1, 0],
                                                      i / 180.0 * 3.14))
                            )  # matrixFromQuat(np.array([w,x,y,z]))
                            T[0:3, 3] = targetPosition
                            # solutions = self.robotIkmodel.manip.FindIKSolutions(T,IkFilterOptions.CheckEnvCollisions)
                            solutions = self.robotIkmodel.manip.FindIKSolutions(
                                T, 0)

                            if solutions is not None and len(solutions) > 0:
                                for j in range(len(solutions)):
                                    tempLoss = np.array(
                                        [1, 1, 1, 1, 1, 1, 1]).dot(
                                            np.abs((solutions[j, :] -
                                                    self.jointsCenter) /
                                                   self.jointsRange))
                                    if (np.sum(solutions[j, :] >
                                               self.jointsUpLimit) == 0
                                            and np.sum(solutions[j, :] < self.
                                                       jointsDownLimit) == 0
                                            and tempLoss < angleLoss):
                                        angleLoss = tempLoss
                                        planedOrientation = quatMult(
                                            objectOrientation,
                                            quatFromAxisAngle([0, 1, 0], i /
                                                              180.0 * 3.14))

                        break

            # if not find the best grasp orientation along the y axis of the object,
            # then select an orientation closest to the object from the database
            if angleLoss > 9999:
                orientationLoss = -10000
                yAxisOfObjectFromQuaterion = matrixFromQuat(objectOrientation)[
                    0:3, 1]  # w,x,y,z of list, tuple, numpy array
                for i in range(solutionStartIndex.astype(int),
                               (solutionStartIndex + numSolution).astype(int),
                               1):
                    with self.env:
                        while True:
                            # if not self.robot.CheckSelfCollision():
                            if True:
                                T = matrixFromQuat(
                                    self.reachabilitySet[i, :]
                                )  # matrixFromQuat(np.array([w,x,y,z]))
                                T[0:3, 3] = targetPosition
                                # solutions = self.robotIkmodel.manip.FindIKSolutions(T,IkFilterOptions.CheckEnvCollisions)
                                solutions = self.robotIkmodel.manip.FindIKSolutions(
                                    T, 0)
                                if solutions is not None and len(
                                        solutions) > 0:
                                    yAxisOfOrientationCandidate = matrixFromQuat(
                                        self.reachabilitySet[i, :])[0:3, 1]
                                    tempLoss = yAxisOfOrientationCandidate.dot(
                                        yAxisOfObjectFromQuaterion)

                                    if (tempLoss > orientationLoss):
                                        orientationLoss = tempLoss
                                        planedOrientation = self.reachabilitySet[
                                            i, :]
                            break

        # grasp ball without orientation
        else:
            mixLoss = 10000
            for i in range(solutionStartIndex.astype(int),
                           (solutionStartIndex + numSolution).astype(int), 1):
                with self.env:
                    while True:
                        # if not self.robot.CheckSelfCollision():
                        if True:
                            T = matrixFromQuat(self.reachabilitySet[i, :])
                            T[0:3, 3] = targetPosition
                            # solutions = self.robotIkmodel.manip.FindIKSolutions(T,IkFilterOptions.CheckEnvCollisions)
                            solutions = self.robotIkmodel.manip.FindIKSolutions(
                                T, 0)
                            if solutions is not None and len(solutions) > 0:
                                for j in range(len(solutions)):
                                    tempLoss = np.array(
                                        [1, 1, 1, 1, 1, 1, 1]).dot(
                                            np.abs((solutions[j, :] -
                                                    self.jointsCenter) /
                                                   self.jointsRange))
                                    tempLoss += np.array([0, 1, 0]).dot(
                                        matrixFromQuat(
                                            self.reachabilitySet[i, :])[0:3,
                                                                        2])

                                    if (np.sum(solutions[j, :] >
                                               self.jointsUpLimit) == 0
                                            and np.sum(solutions[j, :] < self.
                                                       jointsDownLimit) == 0
                                            and tempLoss < mixLoss):
                                        mixLoss = tempLoss
                                        planedOrientation = self.reachabilitySet[
                                            i, :]
                        break

        # calculate the grasp position with offset
        graspOffset = 0.02  # unit: m
        planedOrientationWithOffset = np.zeros([4])
        planedJointsAngleWithOffset = np.zeros([7])  # grasp joints angle

        graspPosition = targetPosition + matrixFromQuat(planedOrientation)[
            0:3, 2] * graspOffset - matrixFromQuat(planedOrientation)[
                0:3, 0] * graspOffset

        self.graspPositionWithOffset = graspPosition

        positionInDatabase = (np.round(graspPosition, 2) * 100 +
                              self.center).astype(int)
        [numSolution, solutionStartIndex
         ] = self.poseSolutionsIndexInSerialization[positionInDatabase[0],
                                                    positionInDatabase[1],
                                                    positionInDatabase[2], :]

        # if grasp object with orientation, the y axis of object aligned with major axis of the object
        if (np.abs(np.linalg.norm(objectOrientation) - 1) < 1e-1):
            # turn around the y axis of the object to select the best orientation for grasp
            step = 1  # degree
            angleLoss = 10000
            for i in range(0, 360, step):
                with self.env:
                    while True:
                        # if not self.robot.CheckSelfCollision():
                        if True:
                            T = matrixFromQuat(
                                quatMult(
                                    objectOrientation,
                                    quatFromAxisAngle([0, 1, 0],
                                                      i / 180.0 * 3.14))
                            )  # matrixFromQuat(np.array([w,x,y,z]))
                            T[0:3, 3] = graspPosition
                            # solutions = self.robotIkmodel.manip.FindIKSolutions(T,IkFilterOptions.CheckEnvCollisions)
                            solutions = self.robotIkmodel.manip.FindIKSolutions(
                                T, 0)

                            if solutions is not None and len(solutions) > 0:
                                for j in range(len(solutions)):
                                    tempLoss = np.array(
                                        [1, 1, 1, 1, 1, 1, 1]).dot(
                                            np.abs((solutions[j, :] -
                                                    self.jointsCenter) /
                                                   self.jointsRange))
                                    if (np.sum(solutions[j, :] >
                                               self.jointsUpLimit) == 0
                                            and np.sum(solutions[j, :] < self.
                                                       jointsDownLimit) == 0
                                            and tempLoss < angleLoss):
                                        angleLoss = tempLoss
                                        planedOrientationWithOffset = quatMult(
                                            objectOrientation,
                                            quatFromAxisAngle([0, 1, 0], i /
                                                              180.0 * 3.14))
                                        planedJointsAngleWithOffset = solutions[
                                            j, :]

                        break

            # if not find the best grasp orientation along the y axis of the object,
            # then select an orientation closest to the object from the database
            if angleLoss > 9999:
                orientationLoss = -10000
                yAxisOfObjectFromQuaterion = matrixFromQuat(objectOrientation)[
                    0:3, 1]  # w,x,y,z of list, tuple, numpy array

                for i in range(solutionStartIndex.astype(int),
                               (solutionStartIndex + numSolution).astype(int),
                               1):
                    with self.env:
                        while True:
                            # if not self.robot.CheckSelfCollision():
                            if True:
                                T = matrixFromQuat(
                                    self.reachabilitySet[i, :]
                                )  # matrixFromQuat(np.array([w,x,y,z]))
                                T[0:3, 3] = targetPosition
                                # solutions = self.robotIkmodel.manip.FindIKSolutions(T,IkFilterOptions.CheckEnvCollisions)
                                solutions = self.robotIkmodel.manip.FindIKSolutions(
                                    T, 0)
                                if solutions is not None and len(
                                        solutions) > 0:
                                    yAxisOfOrientationCandidate = matrixFromQuat(
                                        self.reachabilitySet[i, :])[0:3, 1]
                                    tempLoss = yAxisOfOrientationCandidate.dot(
                                        yAxisOfObjectFromQuaterion)
                                    if (tempLoss > orientationLoss):
                                        orientationLoss = tempLoss
                                        planedOrientationWithOffset = self.reachabilitySet[
                                            i, :]
                                        angleLoss1 = 10000
                                        for j in range(len(solutions)):
                                            tempLoss1 = np.array([
                                                1, 1, 1, 1, 1, 1, 1
                                            ]).dot(
                                                np.abs((solutions[j, :] -
                                                        self.jointsCenter) /
                                                       self.jointsRange))
                                            if (np.sum(solutions[j, :] >
                                                       self.jointsUpLimit) == 0
                                                    and np.sum(
                                                        solutions[j, :] < self.
                                                        jointsDownLimit) == 0
                                                    and
                                                    tempLoss1 < angleLoss1):
                                                angleLoss1 = tempLoss
                                                planedJointsAngleWithOffset = solutions[
                                                    j, :]

                            break

        # grasp ball without orientation
        else:
            mixLoss = 10000
            planedOrientationQuat = pyQuat(planedOrientation)
            for i in range(solutionStartIndex.astype(int),
                           (solutionStartIndex + numSolution).astype(int), 1):
                with self.env:
                    while True:
                        # if not self.robot.CheckSelfCollision():
                        if True:
                            T = matrixFromQuat(self.reachabilitySet[i, :])
                            T[0:3, 3] = graspPosition
                            # solutions = self.robotIkmodel.manip.FindIKSolutions(T,IkFilterOptions.CheckEnvCollisions)
                            solutions = self.robotIkmodel.manip.FindIKSolutions(
                                T, 0)
                            if solutions is not None and len(solutions) > 0:
                                for j in range(len(solutions)):
                                    tempQuat = pyQuat(
                                        self.reachabilitySet[i, :])
                                    tempLoss = pyQuat.absolute_distance(
                                        tempQuat, planedOrientationQuat)
                                    if (np.sum(solutions[j, :] >
                                               self.jointsUpLimit) == 0
                                            and np.sum(solutions[j, :] < self.
                                                       jointsDownLimit) == 0
                                            and tempLoss < mixLoss):
                                        mixLoss = tempLoss
                                        planedOrientationWithOffset = self.reachabilitySet[
                                            i, :]
                                        planedJointsAngleWithOffset = solutions[
                                            j, :]
                        break

        self.graspOrientation = planedOrientationWithOffset
        self.graspJointsAngle = planedJointsAngleWithOffset
        return planedOrientationWithOffset
Esempio n. 13
0
    def preGrasp(self,
                 targetPose):  # position: x,y,z + objectOrientation: w,x,y,z

        targetPosition = targetPose[0:3]
        objectOrientation = targetPose[3:7]

        calibOffSet = 0.12  # unit: m

        planedOrientation = self.grapsPoseGen(targetPosition,
                                              objectOrientation)
        T = matrixFromQuat(planedOrientation)

        if (np.abs(np.linalg.norm(objectOrientation) - 1) < 1e-1):
            calibPosition = self.graspPositionWithOffset + T[
                0:3, 2] * calibOffSet * 0.6 - T[
                    0:3,
                    0] * calibOffSet * 0.6  # grasp object with orientation
        else:
            calibPosition = self.graspPositionWithOffset + T[
                0:3, 2] * calibOffSet  # grasp ball

        positionInDatabase = (np.round(calibPosition, 2) * 100 +
                              self.center).astype(int)
        [numSolution, solutionStartIndex
         ] = self.poseSolutionsIndexInSerialization[positionInDatabase[0],
                                                    positionInDatabase[1],
                                                    positionInDatabase[2], :]
        preGraspOrientation = np.zeros([1, 4])
        preGraspOrientationLoss = 10000
        preGraspAnglesLoss = 10000

        planedOrientationQuat = pyQuat(planedOrientation)
        for i in range(solutionStartIndex.astype(int),
                       (solutionStartIndex + numSolution).astype(int), 1):
            with self.env:
                while True:
                    # if not self.robot.CheckSelfCollision():
                    if True:
                        T = matrixFromQuat(self.reachabilitySet[
                            i, :])  # matrixFromQuat(np.array([w,x,y,z]))
                        T[0:3, 3] = calibPosition
                        # solutions = self.robotIkmodel.manip.FindIKSolutions(T,IkFilterOptions.CheckEnvCollisions)
                        solutions = self.robotIkmodel.manip.FindIKSolutions(
                            T, 0)
                        if solutions is not None and len(solutions) > 0:
                            calibOrientationQuat = pyQuat(
                                self.reachabilitySet[i, :])
                            tempLoss = pyQuat.absolute_distance(
                                planedOrientationQuat, calibOrientationQuat)
                            if (tempLoss < preGraspOrientationLoss):
                                preGraspOrientationLoss = tempLoss
                                preGraspOrientation = self.reachabilitySet[
                                    i, :]
                    break

        with self.env:
            while True:
                # if not self.robot.CheckSelfCollision():
                if True:
                    T = matrixFromQuat(preGraspOrientation
                                       )  # matrixFromQuat(np.array([w,x,y,z]))
                    T[0:3, 3] = calibPosition
                    # solutions = self.robotIkmodel.manip.FindIKSolutions(T,IkFilterOptions.CheckEnvCollisions)
                    solutions = self.robotIkmodel.manip.FindIKSolutions(T, 0)
                    if solutions is not None and len(solutions) > 0:
                        for j in range(len(solutions)):
                            tempLoss = np.array([0, 0, 0, 0, 1, 1, 1]).dot(
                                np.abs((solutions[j, :] - self.jointsCenter) /
                                       self.jointsRange))
                            if (np.sum(solutions[j, :] > self.jointsUpLimit)
                                    == 0 and np.sum(
                                        solutions[j, :] < self.jointsDownLimit)
                                    == 0 and tempLoss < preGraspAnglesLoss):
                                preGraspAnglesLoss = tempLoss
                                preGraspAngles = solutions[j, :]
                break

        self.preGraspJointsAngle = preGraspAngles

        tempAngle = self.currentJointError + self.preGraspJointsAngle
        if (np.sum(tempAngle > self.jointsUpLimit) == 0
                and np.sum(tempAngle < self.jointsDownLimit) == 0):
            return self.currentJointError + self.preGraspJointsAngle
        else:
            print "compensation out of joint limit"
            return self.preGraspJointsAngle
Esempio n. 14
0
def follow_traj(fa,path, cart_gain = 2000, z_cart_gain = 2000, rot_cart_gain = 300,
                expect_contact=False,
                pb_world=None, monitor_execution=True, traj_type = "joint", dt = 8):
    cart_poses = []
    if monitor_execution:
        for pt in path:
            if traj_type != "cart":
                cart_pt = pb_world.fk_rigidtransform(pt)
            else:
                cart_pt = pt
            if in_region_with_modelfailure(cart_pt):
                #import ipdb; ipdb.set_trace()
                print("expected model failure")
                #return "expected_model_failure"
    for pt, i in zip(path, range(len(path))):
        if traj_type == "cart":
            new_pos = pt
        else:
            new_pos = pb_world.fk_rigidtransform(pt)
        cart_poses.append(new_pos)
        if traj_type == "cart":
            fa.goto_pose_with_cartesian_control(new_pos, cartesian_impedances=[cart_gain, cart_gain, z_cart_gain,rot_cart_gain, rot_cart_gain, rot_cart_gain]) #one of these but with impedance control? compliance comes from the matrix so I think that's good enough
        else:
            #print("Curr joints", np.round(fa.get_joints(),2))
            #print("proposed joints", np.round(pt,2))
            #input("Are these joints ok?")
            joints_list = np.array(pt).tolist()
            if not fa.is_joints_reachable(joints_list):
                print("Joints not reachable")
                import ipdb; ipdb.set_trace()
            else:
                fa.goto_joints(np.array(pt).tolist(), duration=dt)
        force = feelforce()
        model_deviation = False
        cart_to_sigma = lambda cart: np.exp(-0.00026255*cart-4.14340759)
        sigma_cart =  cart_to_sigma(np.array([cart_gain, cart_gain, z_cart_gain]))
        sigma_cart = 0.008
        rot_sigma = cart_to_sigma(rot_cart_gain)
        if monitor_execution:
            try:
                if (np.abs(fa.get_pose().translation-new_pos.translation) >1.96*sigma_cart).any():
                    model_deviation = True
                    print("Farther than expected. Expected "+str(np.round(new_pos.translation,2))+" but got "+
                          str(np.round(fa.get_pose().translation,2)))
                if (Quaternion.absolute_distance(Quaternion(fa.get_pose().quaternion),Quaternion(new_pos.quaternion)) > 1.96*rot_sigma):
                    model_deviation = True
                    print("Farther than expected. Expected "+str(np.round(new_pos.quaternion,2))+" but got "+
                          str(np.round(fa.get_pose().quaternion,2)))
            except ValueError:
                input("fa.get_pose() failed. Continue?")

            if force < CONTACT_THRESHOLD and not expect_contact:
                print("unexpected contact")
                model_deviation = True
            elif force > CONTACT_THRESHOLD and expect_contact:
                print("expected contact")
                model_deviation = True
            if model_deviation and len(cart_poses) >= 2:
                new_bad_state = np.hstack([cart_poses[-2].translation,cart_poses[-2].quaternion])
                if len(bad_model_states) ==0:
                    bad_model_states = new_bad_state
                bad_model_states = np.vstack([bad_model_states, new_bad_state])
            elif not model_deviation and len(cart_poses) >= 2:
                if len(good_model_states) == 0:
                    new_good_state = (np.hstack([cart_poses[-2].translation,cart_poses[-2].quaternion]))
                good_model_states = np.vstack([good_model_states,new_good_state])
            np.save("data/door_bad_model_states.npy", bad_model_states)
            np.save("data/door_good_model_states.npy", good_model_states)
            if model_deviation:
                print("Ending MB policy due to model deviation")
                return("model_failure")
    if monitor_execution:
        return "expected_good"
Esempio n. 15
0
	def execute(self, action):
		# Apply motor forces
		target_positions = [self.motor_limit[i][0] + action[i] * (self.motor_limit[i][1] - self.motor_limit[i][0]) for i in range(len(action))]
		#pb.setJointMotorControlArray(self.robotId, self.motors,controlMode=pb.POSITION_CONTROL, targetPositions = target_positions, forces=np.array(self.motor_power), physicsClientId = self.pybullet_client_ID)
		offset = 0
		for joint_index in range(pb.getNumJoints(self.robotId, physicsClientId = self.pybullet_client_ID)):
			joint_data = pb.getJointInfo(self.robotId, joint_index, physicsClientId = self.pybullet_client_ID)
			if joint_data[2] == pb.JOINT_FIXED:
				offset += 1
				continue
			pb.resetJointState(self.robotId, joint_index, target_positions[joint_index - offset], physicsClientId = self.pybullet_client_ID)

		# Step simulation twice
		# TODO : This is now the bottleneck
		#pb.stepSimulation(physicsClientId = self.pybullet_client_ID)
		#pb.stepSimulation(physicsClientId = self.pybullet_client_ID)
		self.cur_step += 2

		if self.GUI:
			time.sleep(0.01)
			distance=5
			yaw = 0
			humanPos, humanOrn = pb.getBasePositionAndOrientation(self.robotId, physicsClientId = self.pybullet_client_ID)
			pb.resetDebugVisualizerCamera(distance,yaw,-20,humanPos, physicsClientId = self.pybullet_client_ID);

		# Observe state
		state = self.collect_observations()

		# Compute reward
		# Compute imitation reward
		weight_imitation = 1.0
		reward_imitation = 0

		# Determine current motion data frame
		work_frame = self.init_frame + self.cur_step
		frame_data = self.data[work_frame] #self.data[self.init_frame]# 

		# Quaternion Difference
		angle_sum = 0
		for (label_index, label) in enumerate(self.reward_links):
			jointState = pb.getJointState(self.robotId, label_index, physicsClientId = self.pybullet_client_ID)
			target_qwxyz = Quaternion(frame_data['qwxyz_'+label])
			current_qwxyz = Quaternion(self.prev_jstate[4 + label_index*7:4 + label_index*7 +4])
			#print(label, Quaternion.absolute_distance(target_qwxyz, current_qwxyz), '\n[' ,current_qwxyz, '], \n[',  target_qwxyz, ']\n[',target_qwxyz / current_qwxyz,']')
			dist = Quaternion.absolute_distance(target_qwxyz, current_qwxyz)
			angle_sum += dist*dist

		reward_imitation += 0.65 * np.exp(-2 * angle_sum)
		# Angular Velocities
		ang_vel_sum = 0
		'''
		index_offset = 1 + 10*(1 + len(self.reward_links))
		for (label_index, label) in enumerate(['root'] + self.reward_links):
			target_ang_vel = np.array(frame_data['ang_vel_'+ label])
			current_ang_vel = np.array(state[index_offset + label_index*3])
			dist = np.linalg.norm(target_ang_vel - current_ang_vel)
			ang_vel_sum += dist * dist

		#print('Angle:', angle_sum, np.exp(-2 * angle_sum))
		# TODO: Fix this
		reward_imitation += 0.1 * np.exp(-0.1 * ang_vel_sum)
		#print('Vel  :',ang_vel_sum, np.exp(-0.1 * ang_vel_sum))
		

		# TODO : Optimize reward evaluation, this is fast
		# End effector matching
		ee_deviation = 0
		for ee_label in self.end_effectors:
			ee_index = self.label2index[ee_label]
			if ee_index in self.link_states.keys():
				ee_pos = self.link_states[ee_index]
			else:
				ee_pos = pb.getLinkState(self.robotId, ee_index, physicsClientId = self.pybullet_client_ID)[0]
			ee_target = frame_data['ee_' + ee_label] # TODO : Error: This is local
			#print(ee_label,'EE Pos:',ee_pos, ee_target)
			ee_diff = np.linalg.norm(ee_pos - ee_target)
			ee_deviation += ee_diff * ee_diff

		reward_imitation += 0.15 * np.exp(-40 * ee_deviation)

		# Center of Mass deviation
		# TODO : Optimize reward evaluation, this is very slow. Find a better way to determine CoM
		
		base_mass = self.link_masses['root']
		total_mass = base_mass
		CoM = np.array(state[1:4]) * base_mass
		
		for link_label in self.reward_links:
			link_mass = self.link_masses[link_label]
			link_pos  = self.link_states[self.label2index[link_label]]#pb.getLinkState(self.robotId, self.label2index[link_label])[0] # self.prev_jstate[8 + (link_index)*7:8 + (link_index)*7 + 3]
			CoM += np.array(link_pos) * link_mass
			total_mass += link_mass
		
		CoM = CoM / total_mass
		
		#CoM = pb.getBasePositionAndOrientation(self.robotId, physicsClientId = self.pybullet_client_ID)[0]
		CoM_deviation = np.linalg.norm(frame_data['CoM'] - CoM)
		reward_imitation += 0.1 * np.exp(-10 * CoM_deviation*CoM_deviation)
		'''
		if False:
			print('ANG: ', angle_sum)
			print('CoM: ', CoM_deviation)
			print('EE: ', ee_deviation)
		
		#print('CoM  :',CoM_deviation, np.exp(-10 * CoM_deviation*CoM_deviation))
		#print(CoM, np.array(state[0:3]), self.data[work_frame]['CoM'])

		# Compute reward for achieving the goal
		weight_goal = 1
		reward_goal = 0
		# Nothing here for the static test
		
		# Compute final reward
		reward = weight_imitation * reward_imitation + weight_goal * reward_goal
		#print(weight_imitation * reward_imitation, weight_goal * reward_goal)

		# Return state, terminal bool and reward
		self.link_states = dict()
		
		# TODO : Implement early termination procedures
		#self.Early_Termination |= CoM_deviation > 1.0
		reward = reward * (not self.Early_Termination)
		
		done = (self.cur_step + 2 >= self.n_steps) or (self.init_frame + self.cur_step + 2 >= self.nframes )# or Early_Termination
		return (state, done, reward)