def imu_state_update(state: np.ndarray,
                     input: np.ndarray,
                     time_interval=0.01) -> np.ndarray:
    '''
    state transaction method base on transform3d library.
    :param state:
    :param input:
    :param time_interval:
    :return:
    '''
    q = euler.euler2quat(state[6], state[7], state[8])
    # r_q = euler.euler2quat(input[])
    w = input[3:6] * time_interval
    r_q = euler.euler2quat(w[0], w[1], w[2])
    q = q * r_q
    # q.normalize()
    # q = q.norm()
    quaternions.qmult(q, r_q)

    # acc = np.dot(quaternions.quat2mat(q), input[0:3]) + np.asarray((0, 0, -9.8))
    acc = quaternions.rotate_vector(input[0:3], q) + np.asarray((0, 0, -9.8))
    state[0:3] = state[0:3] + state[3:6] * time_interval
    state[3:6] = state[3:6] + acc * time_interval
    # state[6:9] = quaternions.quat2axangle(q)
    state[6:9] = euler.quat2euler(q)

    return state
Example #2
0
def allocentric_to_egocentric(allo_pose, src_type="mat", dst_type="mat"):
    """Given an allocentric (object-centric) pose, compute new camera-centric
    pose Since we do detection on the image plane and our kernels are
    2D-translationally invariant, we need to ensure that rendered objects
    always look identical, independent of where we render them.

    Since objects further away from the optical center undergo skewing,
    we try to visually correct by rotating back the amount between
    optical center ray and object centroid ray. Another way to solve
    that might be translational variance
    (https://arxiv.org/abs/1807.03247)
    """
    # Compute rotation between ray to object centroid and optical center ray
    cam_ray = np.asarray([0, 0, 1.0])
    if src_type == "mat":
        trans = allo_pose[:3, 3]
    elif src_type == "quat":
        trans = allo_pose[4:7]
    else:
        raise ValueError(
            "src_type should be mat or quat, got: {}".format(src_type))
    obj_ray = trans.copy() / np.linalg.norm(trans)
    angle = math.acos(cam_ray.dot(obj_ray))

    # Rotate back by that amount
    if angle > 0:
        if dst_type == "mat":
            ego_pose = np.zeros((3, 4), dtype=allo_pose.dtype)
            ego_pose[:3, 3] = trans
            rot_mat = axangle2mat(axis=np.cross(cam_ray, obj_ray), angle=angle)
            if src_type == "mat":
                ego_pose[:3, :3] = np.dot(rot_mat, allo_pose[:3, :3])
            elif src_type == "quat":
                ego_pose[:3, :3] = np.dot(rot_mat, quat2mat(allo_pose[:4]))
        elif dst_type == "quat":
            ego_pose = np.zeros((7, ), dtype=allo_pose.dtype)
            ego_pose[4:7] = trans
            rot_q = axangle2quat(np.cross(cam_ray, obj_ray), angle)
            if src_type == "quat":
                ego_pose[:4] = qmult(rot_q, allo_pose[:4])
            elif src_type == "mat":
                ego_pose[:4] = qmult(rot_q, mat2quat(allo_pose[:3, :3]))
        else:
            raise ValueError(
                "dst_type should be mat or quat, got: {}".format(dst_type))
    else:  # allo to ego
        if src_type == "mat" and dst_type == "quat":
            ego_pose = np.zeros((7, ), dtype=allo_pose.dtype)
            ego_pose[:4] = mat2quat(allo_pose[:3, :3])
            ego_pose[4:7] = allo_pose[:3, 3]
        elif src_type == "quat" and dst_type == "mat":
            ego_pose = np.zeros((3, 4), dtype=allo_pose.dtype)
            ego_pose[:3, :3] = quat2mat(allo_pose[:4])
            ego_pose[:3, 3] = allo_pose[4:7]
        else:
            ego_pose = allo_pose.copy()
    return ego_pose
Example #3
0
    def build_camreas(self):
        b = self.scene.create_actor_builder()
        b.add_visual_from_file('./assets/camera.dae')
        cam_front = b.build(True, name="camera")
        pitch = np.deg2rad(-15)
        cam_front.set_pose(
            Pose([1, 0, 1],
                 qmult(axangle2quat([0, 0, 1], np.pi),
                       axangle2quat([0, -1, 0], pitch))))
        c = self.scene.add_mounted_camera('front_camera', cam_front, Pose(),
                                          640, 480, 0, np.deg2rad(45), 0.1, 10)
        self.front_camera = Camera(c)
        b = self.scene.create_actor_builder()
        b.add_visual_from_file('./assets/tripod.dae', Pose([0, 0, -1]))
        b.build(True, name="tripod").set_pose(Pose([1, 0, 1]))

        b = self.scene.create_actor_builder()
        b.add_visual_from_file('./assets/camera.dae')
        cam_left = b.build(True, name="camera")
        pitch = np.deg2rad(-15)
        cam_left.set_pose(
            Pose([0, 1.5, 1],
                 qmult(axangle2quat([0, 0, 1], -np.pi / 2),
                       axangle2quat([0, -1, 0], pitch))))
        c = self.scene.add_mounted_camera('left_camera', cam_left, Pose(), 640,
                                          480, 0, np.deg2rad(45), 0.1, 10)
        self.left_camera = Camera(c)
        b = self.scene.create_actor_builder()
        b.add_visual_from_file('./assets/tripod.dae', Pose([0, 0, -1]))
        b.build(True, name="tripod").set_pose(Pose([0, 1.5, 1]))

        b = self.scene.create_actor_builder()
        b.add_visual_from_file('./assets/camera.dae')
        cam_right = b.build(True, name="camera")
        pitch = np.deg2rad(-15)
        cam_right.set_pose(
            Pose([0, -1.5, 1],
                 qmult(axangle2quat([0, 0, 1], np.pi / 2),
                       axangle2quat([0, -1, 0], pitch))))
        c = self.scene.add_mounted_camera('right_camera', cam_right, Pose(),
                                          640, 480, 0, np.deg2rad(45), 0.1, 10)
        self.right_camera = Camera(c)
        b = self.scene.create_actor_builder()
        b.add_visual_from_file('./assets/tripod.dae', Pose([0, 0, -1]))
        b.build(True, name="tripod").set_pose(Pose([0, -1.5, 1]))

        b = self.scene.create_actor_builder()
        b.add_visual_from_file('./assets/camera.dae')
        cam_top = b.build(True, name="camera")
        pitch = np.deg2rad(-90)
        cam_top.set_pose(Pose([-0.5, 0, 3], axangle2quat([0, -1, 0], pitch)))
        c = self.scene.add_mounted_camera('top_camera', cam_top, Pose(), 640,
                                          480, 0, np.deg2rad(45), 0.1, 10)
        self.top_camera = Camera(c)
Example #4
0
def findQuaternionMean(sigmas):
    errors = np.zeros((6, 3))
    oldMean = sigmas[0, :]
    for i in range(1, len(sigmas)):
        currentQ = sigmas[i, :]
        errorQ = quat.qmult(currentQ, oldMean)
        vectorError = quatToAxisAngle(errorQ)
        errors[i, :] = vectorError
    averageErrorVector = np.mean(errors, axis=0)
    quatError = axisAngleToQuat(averageErrorVector)
    newMean = quat.qmult(quatError, oldMean)
    return newMean, errors, averageErrorVector
Example #5
0
def egocentric_to_allocentric(ego_pose,
                              src_type="mat",
                              dst_type="mat",
                              cam_ray=(0, 0, 1.0)):
    # Compute rotation between ray to object centroid and optical center ray
    cam_ray = np.asarray(cam_ray)
    if src_type == "mat":
        trans = ego_pose[:3, 3]
    elif src_type == "quat":
        trans = ego_pose[4:7]
    else:
        raise ValueError(
            "src_type should be mat or quat, got: {}".format(src_type))
    obj_ray = trans.copy() / np.linalg.norm(trans)
    angle = math.acos(cam_ray.dot(obj_ray))

    # Rotate back by that amount
    if angle > 0:
        if dst_type == "mat":
            allo_pose = np.zeros((3, 4), dtype=ego_pose.dtype)
            allo_pose[:3, 3] = trans
            rot_mat = axangle2mat(axis=np.cross(cam_ray, obj_ray),
                                  angle=-angle)
            if src_type == "mat":
                allo_pose[:3, :3] = np.dot(rot_mat, ego_pose[:3, :3])
            elif src_type == "quat":
                allo_pose[:3, :3] = np.dot(rot_mat, quat2mat(ego_pose[:4]))
        elif dst_type == "quat":
            allo_pose = np.zeros((7, ), dtype=ego_pose.dtype)
            allo_pose[4:7] = trans
            rot_q = axangle2quat(np.cross(cam_ray, obj_ray), -angle)
            if src_type == "quat":
                allo_pose[:4] = qmult(rot_q, ego_pose[:4])
            elif src_type == "mat":
                allo_pose[:4] = qmult(rot_q, mat2quat(ego_pose[:3, :3]))
        else:
            raise ValueError(
                "dst_type should be mat or quat, got: {}".format(dst_type))
    else:
        if src_type == "mat" and dst_type == "quat":
            allo_pose = np.zeros((7, ), dtype=ego_pose.dtype)
            allo_pose[:4] = mat2quat(ego_pose[:3, :3])
            allo_pose[4:7] = ego_pose[:3, 3]
        elif src_type == "quat" and dst_type == "mat":
            allo_pose = np.zeros((3, 4), dtype=ego_pose.dtype)
            allo_pose[:3, :3] = quat2mat(ego_pose[:4])
            allo_pose[:3, 3] = ego_pose[4:7]
        else:
            allo_pose = ego_pose.copy()
    return allo_pose
Example #6
0
def ego_pose_to_allo_pose_v2(ego_pose, rot_type="mat"):
    assert rot_type in ["quat", "mat"], rot_type
    if rot_type == "mat":
        trans = ego_pose[:3, 3]
    else:
        trans = ego_pose[4:7]

    dx = np.arctan2(trans[0], trans[2])
    dy = np.arctan2(trans[1], trans[2])
    # print(dx, dy)
    euler_order = "sxyz"

    if rot_type == "quat":
        rot = ego_pose[:4]
        quat = euler2quat(-dy, -dx, 0, axes=euler_order)
        quat = qmult(quat, rot)
        return np.concatenate([quat, trans], axis=0)
    elif rot_type == "mat":
        rot = ego_pose[:3, :3]
        mat = euler2mat(-dy, -dx, 0, axes=euler_order)
        mat = mat.dot(rot)
        return np.hstack([mat, trans.reshape(3, 1)])
    else:
        raise ValueError(
            "Unknown rot_type: {}, should be mat or quat".format(rot_type))
    def reset_random_pos(self):
        '''Add randomness to resetted initial position
        '''
        if not self.config["random"]["random_initial_pose"]:
            return

        pos = self.robot_body.get_position()
        orn = self.robot_body.get_orientation()

        x_range = self.config["random"]["random_init_x_range"]
        y_range = self.config["random"]["random_init_y_range"]
        z_range = self.config["random"]["random_init_z_range"]
        r_range = self.config["random"]["random_init_rot_range"]

        new_pos = [
            pos[0] + self.np_random.uniform(low=x_range[0], high=x_range[1]),
            pos[1] + self.np_random.uniform(low=y_range[0], high=y_range[1]),
            pos[2] + self.np_random.uniform(low=z_range[0], high=z_range[1])
        ]
        new_orn = quaternions.qmult(
            quaternions.axangle2quat([1, 0, 0],
                                     self.np_random.uniform(low=r_range[0],
                                                            high=r_range[1])),
            orn)

        self.robot_body.reset_orientation(new_orn)
        self.robot_body.reset_position(new_pos)
Example #8
0
    def reset_random_target(self):
        '''
        Reposition target randomness
        '''
        if not self.config["random"]["random_target_pose"]:
            return

        pos = self.config["target_pos"]
        orn = self.config["target_orn"]

        x_range = self.config["random"]["random_init_x_range"]
        y_range = self.config["random"]["random_init_y_range"]
        z_range = self.config["random"]["random_init_z_range"]
        r_range = self.config["random"]["random_init_rot_range"]

        new_pos = [
            pos[0] + self.np_random.uniform(low=x_range[0], high=x_range[1]),
            pos[1] + self.np_random.uniform(low=y_range[0], high=y_range[1]),
            pos[2] + self.np_random.uniform(low=z_range[0], high=z_range[1])
        ]
        new_orn = quaternions.qmult(
            quaternions.axangle2quat([1, 0, 0],
                                     self.np_random.uniform(low=r_range[0],
                                                            high=r_range[1])),
            orn)

        self.config["target_pos"] = new_pos
        self.config["target_orn"] = new_orn
Example #9
0
    def reset_base_position(self, enabled, delta_orn=np.pi / 9, delta_pos=0.2):
        #print("Reset base enabled", enabled)
        if not enabled:
            return
        pos = self.robot_body.current_position()
        orn = self.robot_body.current_orientation()

        #print("collision", len(p.getContactPoints(self.robot_body.bodyIndex)))

        #while True:
        new_pos = [
            pos[0] + self.np_random.uniform(
                low=self.env.config["random_init_x_range"][0],
                high=self.env.config["random_init_x_range"][1]),
            pos[1] + self.np_random.uniform(
                low=self.env.config["random_init_y_range"][0],
                high=self.env.config["random_init_y_range"][1]),
            pos[2] + self.np_random.uniform(
                low=self.env.config["random_init_z_range"][0],
                high=self.env.config["random_init_z_range"][1])
        ]
        new_orn = quat.qmult(
            quat.axangle2quat(
                [1, 0, 0],
                self.np_random.uniform(
                    low=self.env.config["random_init_rot_range"][0],
                    high=self.env.config["random_init_rot_range"][1])), orn)
        self.robot_body.reset_orientation(new_orn)
        self.robot_body.reset_position(new_pos)
def quater2ang_v(quater_1, quater_2, dt):
    delta_q = qmult(quater_2, qconjugate(quater_1))
    delta_q_len = np.linalg.norm(delta_q[1:])
    delta_q_angle = 2 * np.arctan2(delta_q_len, delta_q[0])
    w = delta_q[1:] * delta_q_angle * 1 / dt

    return w
def getRelativeQuaternionAtoB(quat_a, quat_b):
    """
	Given two quaternions A and B, determines the quaternion that rotates A to B
	(Multiplying A by q_rel will give B)
	"""
    q_rel = qmult(qinverse(quat_a), quat_b)
    return q_rel
Example #12
0
def mocap_set_action(sim, action):
    """The action controls the robot using mocaps. Specifically, bodies
    on the robot (for example the gripper wrist) is controlled with
    mocap bodies. In this case the action is the desired difference
    in position and orientation (quaternion), in world coordinates,
    of the of the target body. The mocap is positioned relative to
    the target body according to the delta, and the MuJoCo equality
    constraint optimizer tries to center the welded body on the mocap.
    """
    if sim.model.nmocap > 0:
        action, _ = np.split(action, (sim.model.nmocap * 7, ))
        action = action.reshape(sim.model.nmocap, 7)

        pos_delta = action[:, :3].ravel()
        quat_delta = action[:, 3:].ravel()

        reset_mocap2body_xpos(sim)

        print("pos_delta", pos_delta.shape)
        sim.data.mocap_pos[:] = sim.data.mocap_pos + pos_delta

        # upright
        default = np.array([1, 0, 1, 0.0])
        default /= np.linalg.norm(default)
        sim.data.mocap_quat[:] = quaternions.qmult(default, quat_delta)
Example #13
0
	def _set_action(self, action):
		assert action.shape == (self.n_actions,)
		action = action.copy()  # ensure that we don't change the action outside of this scope

		if self.block_gripper:
			pos_ctrl = action
		else:
			pos_ctrl, gripper_ctrl = action[:3], action[3]
			gripper_ctrl = (gripper_ctrl+1.0)/2.0

		pos_ee, quat_ee = self.psm.getPoseAtEE()
		pos_ee = pos_ee + pos_ctrl*0.001  # the maximum change in position is 0.1cm

		#Get table information to constrain orientation and position
		pos_table, q_table = self.table.getPose(self.psm.base_handle)

		#Make sure tool tip is not in the table by checking tt and which side of the table it is on

		#DH parameters to find tt position
		ct = np.cos(0)
		st = np.sin(0)

		ca = np.cos(-np.pi/2.0)
		sa = np.sin(-np.pi/2.0)

		T_x = np.array([[1,  0,  0, 0],
		               [0, ca, -sa, 0 ],
		               [0, sa,  ca, 0 ],
		               [0, 0, 0,    1 ]])
		T_z = np.array([[ct, -st, 0, 0],
		                [st,  ct, 0, 0],
		                [0,    0, 1, 0.0102],
		                [0,    0, 0, 1]])

		ee_T_tt = np.dot(T_x, T_z)

		pos_tt, quat_tt = self.psm.matrix2posquat(np.dot(self.psm.posquat2Matrix(pos_ee,quat_ee), ee_T_tt))

		pos_tt_on_table, distanceFromTable = self._project_point_on_table(pos_tt)

		#if the distance from the table is negative, then we need to project pos_tt onto the table top.
		#Or if two dim only are enabled
		if distanceFromTable < 0 or self.two_dimension_only:
			pos_ee, _ = self.psm.matrix2posquat(np.dot(self.psm.posquat2Matrix(pos_tt_on_table, quat_tt), np.linalg.inv(ee_T_tt)))


		#Get the constrained orientation of the ee
		temp_q =  quaternions.qmult([q_table[3], q_table[0], q_table[1], q_table[2]], [ 0.5, -0.5, -0.5,  0.5])
		rot_ctrl = np.array([temp_q[1], temp_q[2], temp_q[3], temp_q[0]])

		if self.block_gripper:
			gripper_ctrl = 0

		# #Make sure the new pos doesn't go out of bounds!!!
		upper_bound = self.initial_pos + self.target_range + 0.01
		lower_bound = self.initial_pos - self.target_range - 0.01

		pos_ee = np.clip(pos_ee, lower_bound, upper_bound)

		self.psm.setPoseAtEE(pos_ee, rot_ctrl, gripper_ctrl)
Example #14
0
    def diff_drive(self, robot, index, target_pose):
        """
        this diff drive is very hacky
        it tries to transport the target pose to match an end pose
        by computing the pose difference between current pose and target pose
        then it estimates a cartesian velocity for the end effector to follow.
        It uses differential IK to compute the required joint velocity, and set
        the joint velocity as current step target velocity.
        This technique makes the trajectory very unstable but it still works some times.
        """
        pf = robot.get_compute_functions()['passive_force'](True, True, False)
        max_v = 0.1
        max_w = np.pi
        qpos, qvel, poses = robot.get_observation()
        current_pose: Pose = poses[index]
        delta_p = target_pose.p - current_pose.p
        delta_q = qmult(target_pose.q, qinverse(current_pose.q))

        axis, theta = quat2axangle(delta_q)
        if (theta > np.pi):
            theta -= np.pi * 2

        t1 = np.linalg.norm(delta_p) / max_v
        t2 = theta / max_w
        t = max(np.abs(t1), np.abs(t2), 0.001)
        thres = 0.1
        if t < thres:
            k = (np.exp(thres) - 1) / thres
            t = np.log(k * t + 1)
        v = delta_p / t
        w = theta / t * axis
        target_qvel = robot.get_compute_functions()['cartesian_diff_ik'](
            np.concatenate((v, w)), 9)
        robot.set_action(qpos, target_qvel, pf)
Example #15
0
    def diff_drive2(self, robot, index, target_pose, js1, joint_target, js2):
        """
        This is a hackier version of the diff_drive
        It uses specified joints to achieve the target pose of the end effector
        while asking some other specified joints to match a global pose
        """
        pf = robot.get_compute_functions()['passive_force'](True, True, False)
        max_v = 0.1
        max_w = np.pi
        qpos, qvel, poses = robot.get_observation()
        current_pose: Pose = poses[index]
        delta_p = target_pose.p - current_pose.p
        delta_q = qmult(target_pose.q, qinverse(current_pose.q))

        axis, theta = quat2axangle(delta_q)
        if (theta > np.pi):
            theta -= np.pi * 2

        t1 = np.linalg.norm(delta_p) / max_v
        t2 = theta / max_w
        t = max(np.abs(t1), np.abs(t2), 0.001)
        thres = 0.1
        if t < thres:
            k = (np.exp(thres) - 1) / thres
            t = np.log(k * t + 1)
        v = delta_p / t
        w = theta / t * axis
        target_qvel = robot.get_compute_functions()['cartesian_diff_ik'](
            np.concatenate((v, w)), 9)
        for j, target in zip(js2, joint_target):
            qpos[j] = target
        robot.set_action(qpos, target_qvel, pf)
Example #16
0
def optimize_poses(pred_poses, vos=None, fc_vos=False, target_poses=None,
                   sax=1, saq=1, srx=1, srq=1):
    """
    optimizes poses using either the VOs or the target poses (calculates VOs
    from them)
    :param pred_poses: N x 7
    :param vos: (N-1) x 7
    :param fc_vos: whether to use relative transforms between all frames in a fully
    connected manner, not just consecutive frames
    :param target_poses: N x 7
    :param: sax: covariance of pose translation (1 number)
    :param: saq: covariance of pose rotation (1 number)
    :param: srx: covariance of VO translation (1 number)
    :param: srq: covariance of VO rotation (1 number)
    :return:
    """
    pgo = PoseGraphFC() if fc_vos else PoseGraph()
    if vos is None:
        if target_poses is not None:
            # calculate the VOs (in the pred_poses frame)
            vos = np.zeros((len(target_poses) - 1, 7))
            for i in range(len(vos)):
                vos[i, :3] = target_poses[i + 1, :3] - target_poses[i, :3]
                q0 = target_poses[i, 3:]
                q1 = target_poses[i + 1, 3:]
                vos[i, 3:] = txq.qmult(txq.qinverse(q0), q1)
        else:
            print('Specify either VO or target poses')
            return None
    optim_poses = pgo.optimize(poses=pred_poses, vos=vos, sax=sax, saq=saq,
                               srx=srx, srq=srq)
    return optim_poses
Example #17
0
def process_poses_quaternion(xyz_in, q_in, mean_t, std_t, align_R, align_t, align_s):
    """
    processes the 1x12 raw pose from dataset by aligning and then normalizing
    :param poses_in: N x 12
    :param mean_t: 3
    :param std_t: 3
    :param align_R: 3 x 3
    :param align_t: 3
    :param align_s: 1
    :return: processed poses (translation + quaternion) N x 7
    """
    poses_out = np.zeros((len(xyz_in), 6))
    poses_out[:, 0:3] = xyz_in
    align_q = txq.mat2quat(align_R)
    
    # align
    for i in range(len(poses_out)):
        q = txq.qmult(align_q, q_in[i])
        q *= np.sign(q[0])  # constrain to hemisphere
        q = qlog(q)
        poses_out[i, 3:] = q
        t = poses_out[i, :3] - align_t
        poses_out[i, :3] = align_s * \
            np.dot(align_R, t[:, np.newaxis]).squeeze()

    # normalize translation
    poses_out[:, :3] -= mean_t
    poses_out[:, :3] /= std_t
    return poses_out
def random_tilt(rob_arm, obj_name_list, min_tilt_degree, max_tilt_degree):
    ### method 1
    #rot_dir = np.random.normal(size=(3,))
    #rot_dir = rot_dir / np.linalg.norm(rot_dir)

    ### method 2
    u = random.uniform(0, 1)
    v = random.uniform(0, 1)
    theta = 2 * math.pi * u
    phi = math.acos(2 * v - 1)
    x = math.sin(theta) * math.sin(phi)
    y = math.cos(theta) * math.sin(phi)
    z = math.cos(phi)
    rot_dir = np.array([x, y, z])
    rot_dir = rot_dir / np.linalg.norm(rot_dir)
    tilt_degree = random.uniform(min_tilt_degree, max_tilt_degree)
    print('rot_dir:', rot_dir)
    print('tilt degree:', tilt_degree)
    w = math.cos(math.radians(tilt_degree / 2))
    x = math.sin(math.radians(tilt_degree / 2)) * rot_dir[0]
    y = math.sin(math.radians(tilt_degree / 2)) * rot_dir[1]
    z = math.sin(math.radians(tilt_degree / 2)) * rot_dir[2]
    rot_quat = [w, x, y, z]
    for obj_name in obj_name_list:
        obj_quat = rob_arm.get_object_quat(obj_name)  # [x,y,z,w]
        obj_quat = [obj_quat[3], obj_quat[0], obj_quat[1],
                    obj_quat[2]]  # change to [w,x,y,z]
        obj_quat = qmult(rot_quat, obj_quat)  # [w,x,y,z]
        obj_quat = [obj_quat[1], obj_quat[2], obj_quat[3],
                    obj_quat[0]]  # change to [x,y,z,w]
        rob_arm.set_object_quat(obj_name, obj_quat)
Example #19
0
def _distance_from_fixed_angle(angle, fixed_angle):
    """Designed to be mapped, this function finds the smallest rotation between
    two rotations. It assumes a no-symmettry system.

    The algorithm involves converting angles to quarternions, then finding the
    appropriate misorientation. It is tested against the slower more complete
    version finding the joining rotation.

    Parameters
    ----------
    angle : np.array()
        Euler angles representing rotation of interest.
    fixed_angle : np.array()
        Euler angles representing fixed reference rotation.

    Returns
    -------
    theta : np.array()
        Rotation angle between angle and fixed_angle.

    """
    angle = angle[0]
    q_data = euler2quat(*np.deg2rad(angle), axes='rzxz')
    q_fixed = euler2quat(*np.deg2rad(fixed_angle), axes='rzxz')
    if np.abs(2 * (np.dot(q_data, q_fixed))**2 - 1) < 1:  # arcos will work
        # https://math.stackexchange.com/questions/90081/quaternion-distance
        theta = np.arccos(2 * (np.dot(q_data, q_fixed))**2 - 1)
    else:  # slower, but also good
        q_from_mode = qmult(qinverse(q_fixed), q_data)
        axis, theta = quat2axangle(q_from_mode)
        theta = np.abs(theta)

    return np.rad2deg(theta)
Example #20
0
def obtain_v_dqs(ndat, delta, q):
    out=np.zeros((ndat,3))
    for i in range(ndat):
        j=i+delta
        #Since everything is squared does quat_reduce matter? ...no, but do so anyway.
        dq=qs.quat_reduce(qops.qmult( qops.qinverse(q[i]), q[j]))
        out[i]=dq[1:4]
    return out
Example #21
0
    def turn_right(self, delta=0.03):
        """
        Rotate robot base right without physics simulation

        :param delta: delta angle to rotate the base right
        """
        orn = self.robot_body.get_orientation()
        new_orn = qmult((euler2quat(delta, 0, 0)), orn)
        self.robot_body.set_orientation(new_orn)
Example #22
0
def test_qmult_vectorisation(random_quats):
    q1 = np.asarray([2, 3, 4, 5])
    fast = vectorised_qmult(q1, random_quats)

    stored_quat = np.ones_like(random_quats)
    for i, row in enumerate(random_quats):
        stored_quat[i] = qmult(q1, row)

    assert np.allclose(fast, stored_quat)
Example #23
0
 def rotate_quat_by_euler(xyzw, e_x, e_y, e_z):
     """
     wxyz: transforms3s array format
     xyzw: pybullet format
     """
     wxyz = PhysicsObject.quatXyzwToWxyz(xyzw)
     rot_mat = euler.euler2mat(e_x, e_y, e_z)
     wxyz = quaternions.qmult(rot_mat, wxyz)
     return PhysicsObject.quatWxyzToXyzw(wxyz)
Example #24
0
def inv_transform(pose_b, frame_b_a):
    """Inverse transforms  a quaternion pose between reference frames.
        
    Transforms a pose in reference frame B to a pose in reference frame
    A (B is expressed relative to A).
    """
    pos_a = rotate_vector(pose_b[:3], frame_b_a[3:]) + frame_b_a[:3]
    rot_a = qmult(frame_b_a[3:], pose_b[3:])
    pose_a = np.concatenate((pos_a, rot_a))
    return pose_a
Example #25
0
def transform(pose_a, frame_b_a):
    """Transforms a quaternion pose between reference frames.
        
    Transforms a pose in reference frame A to a pose in reference frame
    B (B is expressed relative to reference frame A).
    """
    pos_b = rotate_vector(pose_a[:3] - frame_b_a[:3], qinverse(frame_b_a[3:]))
    rot_b = qmult(qinverse(frame_b_a[3:]), pose_a[3:])
    pose_b = np.concatenate((pos_b, rot_b))
    return pose_b
def transformRovioQuaternionToViconCF(rovio_quat):
    """
	Rotates a rovio imu quaternion (in imu world frame)
	into the vicon world coordinate frame
	"""
    rot = np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]])
    rel_quat = mat2quat(rot)
    new_quat = qmult(rovio_quat, rel_quat)
    #print "Eulers of imu in vicon world:", [math.degrees(i) for i in quat2euler(new_quat)]
    return new_quat
Example #27
0
    def _step(self, a):
        self.nframe += 1

        if not self.scene.multiplayer:  # if multiplayer, action first applied to all robots, then global step() called, then _step() for all robots with the same actions
            self.robot.apply_action(a)
            self.scene.global_step()

        state = self.robot.calc_state()  # also calculates self.joints_at_limit
        #self.rewards, done = self.calc_rewards_and_done(a, state)
        self.rewards = self._rewards(a)
        done = self._termination(state)
        debugmode = 0
        if (debugmode):
            print("rewards=")
            print(self.rewards)
            print("sum rewards")
            print(sum(self.rewards))

        self.HUD(state, a, done)
        self.reward += sum(self.rewards)
        self.eps_reward += sum(self.rewards)

        debugmode = 0
        if debugmode:
            print("Eps frame {} reward {}".format(self.nframe, self.reward))
            print("position", self.robot.get_position())
        if self.gui:
            pos = self.robot.get_scaled_position()
            orn = self.robot.get_orientation()
            pos = (pos[0], pos[1], pos[2] + self.tracking_camera['z_offset'])
            p.resetDebugVisualizerCamera(self.tracking_camera['distance'],
                                         self.tracking_camera['yaw'],
                                         self.tracking_camera['pitch'], pos)

        eye_pos = self.robot.eyes.current_position()
        debugmode = 0
        if debugmode:
            print("Camera env eye position", eye_pos)
        x, y, z, w = self.robot.eyes.current_orientation()
        eye_quat = quaternions.qmult([w, x, y, z], self.robot.eye_offset_orn)

        debugmode = 0
        if (debugmode):
            print("episode rewards", sum(self.rewards), "steps", self.nframe)

        #print(self.reward, self.rewards, self.robot.walk_target_dist_xyz)
        episode = None
        if done:
            episode = {'r': self.reward, 'l': self.nframe}
            debugmode = 0
            if debugmode:
                print("return episode:", episode)
        return state, sum(self.rewards), bool(done), dict(eye_pos=eye_pos,
                                                          eye_quat=eye_quat,
                                                          episode=episode)
Example #28
0
 def get_allocentric_poses(self):
     poses = self.get_poses()
     poses_allocentric = []
     for pose in poses:
         dx = np.arctan2(pose[0], -pose[2])
         dy = np.arctan2(pose[1], -pose[2])
         quat = euler2quat(-dy, -dx, 0, axes='sxyz')
         quat = qmult(qinverse(quat), pose[3:])
         poses_allocentric.append(np.concatenate([pose[:3], quat]))
         #print(quat, pose[3:], pose[:3])
     return poses_allocentric
Example #29
0
def get_distance_between_two_angles_longform(angle_1, angle_2):
    """
    Using the long form to find the distance between two angles in euler form
    """
    q1 = euler2quat(*np.deg2rad(angle_1), axes='rzxz')
    q2 = euler2quat(*np.deg2rad(angle_2), axes='rzxz')
    # now assume transform of the form MODAL then Something = TOTAL
    # so we want to calculate MODAL^{-1} TOTAL

    q_from_mode = qmult(qinverse(q2), q1)
    axis, angle = quat2axangle(q_from_mode)
    return np.rad2deg(angle)
def test_qnorm():
    qi = tq.qeye()
    assert tq.qnorm(qi) == 1
    assert tq.qisunit(qi)
    qi[1] = 0.2
    assert not tq.qisunit(qi)
    # Test norm is sqrt of scalar for multiplication with conjugate.
    # https://en.wikipedia.org/wiki/Quaternion#Conjugation,_the_norm,_and_reciprocal
    for q in quats:
        q_c = tq.qconjugate(q)
        exp_norm = np.sqrt(tq.qmult(q, q_c)[0])
        assert np.allclose(tq.qnorm(q), exp_norm)
Example #31
0
def test_qnorm():
    qi = tq.qeye()
    assert tq.qnorm(qi) == 1
    assert tq.qisunit(qi)
    qi[1] = 0.2
    assert not tq.qisunit(qi)
    # Test norm is sqrt of scalar for multiplication with conjugate.
    # https://en.wikipedia.org/wiki/Quaternion#Conjugation,_the_norm,_and_reciprocal
    for q in quats:
        q_c = tq.qconjugate(q)
        exp_norm = np.sqrt(tq.qmult(q, q_c)[0])
        assert np.allclose(tq.qnorm(q), exp_norm)
def test_qmult():
    # Test that quaternion * same as matrix * 
    for M1, q1 in eg_pairs[0::4]:
        for M2, q2 in eg_pairs[1::4]:
            q21 = tq.qmult(q2, q1)
            assert_array_almost_equal(np.dot(M2,M1), tq.quat2mat(q21))