예제 #1
0
    def process_marker_transform(markers):
        sum_mat = None
        num = 0.0
        t_latest = rospy.Time(0)
        #print("Got some markers: %s" %markers)
        for marker in markers.data:
            marker_id = "/Marker%s" % marker
            try:
                to = listener.getLatestCommonTime(marker_id, 'odom')

                t = to
                (w_mat, tw) = getMarkerTFFromMap(marker)
                #(w_mat_tr, tw) = getWorldMarkerMatrixFromTF(marker)
                #(w_mat, tw) = getWorldMarkerMatrixFromTF(marker)

                # if marker==412:
                #     print("Marker%s" %marker)
                #     print(w_mat)
                #     print(w_mat_tr)

                if w_mat is None:
                    continue
                if tw is not None:
                    t = tw if tw > to else to

                t_latest = t if t > t_latest else t_latest

                (trans_o,
                 rot_o) = listener.lookupTransform(marker_id, 'odom', to)

                t_o = numpy.dot(tr.translation_matrix(trans_o),
                                tr.quaternion_matrix(rot_o))

                o_mat_i = tr.inverse_matrix(t_o)  # need odom wrt marker
                mat3 = numpy.dot(o_mat_i, w_mat)  # odom wrt world
                mat3 = tr.inverse_matrix(mat3)  # world wrt odom
                num = num + 1

                if sum_mat is None:
                    sum_mat = mat3
                else:
                    sum_mat = sum_mat + mat3
            except:
                pass
        if sum_mat is None:
            return
        avg_mat = sum_mat / num

        transform["transform"] = avg_mat
        transform["time"] = t_latest
예제 #2
0
    def __init__(self, width, height, centered = False, texture=None, rotation=None):

        module3d.Object3D.__init__(self, 'rectangle_%s' % texture)

        self.centered = centered
        self.coord_rotation = rotation
        if rotation is None:
            self.inv_coord_rotation = None
        else:
            self.inv_coord_rotation = tm.inverse_matrix(rotation)
        
        # create group
        fg = self.createFaceGroup('rectangle')
        
        # The 4 vertices
        v = self._getVerts(width, height)
        
        # The 4 uv values
        uv = ((0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0))
        
        # The face
        fv = [(0,1,2,3)]
        fuv = [(0,1,2,3)]

        self.setCoords(v)
        self.setUVs(uv)
        self.setFaces(fv, fuv, fg.idx)

        self.setCameraProjection(1)
        self.updateIndexBuffer()
예제 #3
0
    def __init__(self,
                 width,
                 height,
                 centered=False,
                 texture=None,
                 rotation=None):

        module3d.Object3D.__init__(self, 'rectangle_%s' % texture)

        self.centered = centered
        self.coord_rotation = rotation
        if rotation is None:
            self.inv_coord_rotation = None
        else:
            self.inv_coord_rotation = tm.inverse_matrix(rotation)

        # create group
        fg = self.createFaceGroup('rectangle')

        # The 4 vertices
        v = self._getVerts(width, height)

        # The 4 uv values
        uv = ((0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0))

        # The face
        fv = [(0, 1, 2, 3)]
        fuv = [(0, 1, 2, 3)]

        self.setCoords(v)
        self.setUVs(uv)
        self.setFaces(fv, fuv, fg.idx)

        self.setCameraProjection(1)
        self.updateIndexBuffer()
def duDpw(fx, fy, p_w, T_w_c):
    """
    u = proj(Tcw * p_w)
    """
    T_c_w = tfs.inverse_matrix(T_w_c)
    p_c = tu.transformPt(T_c_w, p_w)

    return np.dot(duDpc(fx, fy, p_c), dpt(T_c_w))
예제 #5
0
	def build_transforms(self):		
		self.idTw = {}			# id->tf_w	  -- transforms from world/odom to tag, aka poses from aruco detector 
		self.engTw = None		
		self.engTid = {}		# id->tf_eng  -- defines the interfeature transforms from id->engine
		
		for m in self.engine_model.markers:
			self.idTw[str(m.id)] = th.pose_stamp_to_tf(m.pose)

		self.engTw = self.idTw[target_frame]		

		for m in self.engine_model.markers:
			self.engTid[str(m.id)] = np.dot(self.engTw, transformations.inverse_matrix(self.idTw[str(m.id)]))
예제 #6
0
def relative_to_reference(reference, target):
	if len(reference) != len(target):
		raise ValueError("reference and target should be of same length! len(reference): %d, len(target): %d" % (len(reference), len(target)) )

	F_ref = to_homogeneous(reference)
	F_tar = to_homogeneous(target)

	F_ref_inv = inverse_matrix(F_ref)

	T_tar_ref = concatenate_matrices(F_ref_inv, F_tar)

	return from_homogeneous(T_tar_ref, len(reference))
예제 #7
0
def compound(reference, diff, inv_diff = False):
	if len(reference) != len(diff):
		raise ValueError("reference and diff should be of same length! len(reference): %d, len(diff): %d" % (len(reference), len(diff)) )

	F_ref = to_homogeneous(reference)
	T_d =   to_homogeneous(diff)

	if inv_diff:
		T_d = inverse_matrix(T_d)

	F_d = concatenate_matrices( F_ref, T_d )

	return from_homogeneous(F_d, len(reference))
예제 #8
0
    def calc_legs(self, pose: ndarray, base_pose: ndarray,
                  left_side: bool) -> Dict[str, float]:
        if len(self.base_to_hip) == 0:
            self.init_transforms()
        names = []
        positions = []

        legs_positions = self.triangle(pose, left_side)
        legs = ['l1', 'r2', 'l3'] if left_side else ['r1', 'l2', 'r3']

        for leg, position in zip(legs, legs_positions):
            base_to_hip = self.base_to_hip[leg]
            relative = tf.concatenate_matrices(tf.inverse_matrix(base_to_hip),
                                               tf.inverse_matrix(base_pose),
                                               position)
            point = tf.translation_from_matrix(relative)
            a, b, g = self.inverse_kinematics(*point)
            positions += [a, b, g]
            names += [
                f'coxa_joint_{leg}', f'femur_joint_{leg}', f'tibia_joint_{leg}'
            ]

        return {name: position for name, position in zip(names, positions)}
예제 #9
0
def errors(reference, target):
	if len(reference) != len(target):
		raise ValueError("reference and target should be of same length! len(reference): %d, len(target): %d" % (len(reference), len(target)) )

	F_ref = to_homogeneous(reference)
	F_tar = to_homogeneous(target)

	F_ref_inv = inverse_matrix(F_ref)

	T_tar_ref = concatenate_matrices(F_ref_inv, F_tar)

	trans = translation_from_matrix(T_tar_ref)
	ang, foo, bar = rotation_from_matrix(T_tar_ref)

	return (numpy.sum( numpy.square(trans) ), ang*ang)
예제 #10
0
파일: top.py 프로젝트: jkotur/spinning_top
	def ode( self , t , Q ) :
		w = Q[:3]
		q = Q[3:]
		q = q / np.linalg.norm( q )

		qm = tr.inverse_matrix( tr.quaternion_matrix(q) )

		if self.drawstate & GRAVITY :
			self.G = np.resize( np.dot( qm , self.g ) , 3 )
			N = np.cross( self.r , self.G )
		else :
			N = np.zeros(3)

#        print self.G , N , np.linalg.norm(self.G) , np.linalg.norm(w)

		QP = np.empty(7,np.float64)
		QP[:3] = np.dot( self.Mi , ( N + np.cross( np.dot(self.M,w) , w ) ) )
		qw = np.empty(4,np.float64)
		qw[0]  = 0
		qw[1:] = w
		QP[3:] = tr.quaternion_multiply( q , qw ) / 2.0

		return QP
예제 #11
0
파일: top.py 프로젝트: jkotur/spinning_top
    def ode(self, t, Q):
        w = Q[:3]
        q = Q[3:]
        q = q / np.linalg.norm(q)

        qm = tr.inverse_matrix(tr.quaternion_matrix(q))

        if self.drawstate & GRAVITY:
            self.G = np.resize(np.dot(qm, self.g), 3)
            N = np.cross(self.r, self.G)
        else:
            N = np.zeros(3)

#        print self.G , N , np.linalg.norm(self.G) , np.linalg.norm(w)

        QP = np.empty(7, np.float64)
        QP[:3] = np.dot(self.Mi, (N + np.cross(np.dot(self.M, w), w)))
        qw = np.empty(4, np.float64)
        qw[0] = 0
        qw[1:] = w
        QP[3:] = tr.quaternion_multiply(q, qw) / 2.0

        return QP
def main(out_file):
    bundleout = os.path.join(os.path.dirname(out_file),
                             'bundler-tmp/bundle/bundle.out')
    configfile = os.path.join(os.path.dirname(out_file), 'calib.yml')

    lines = NumpyTxt().load(out_file)

    Tlistgroundtruth = generate_ground_truth()
    outliers = []#[0, 2, 3, 21, 25, 29, 32, 33, 34]
    lines = [lines[i] for i in range(len(lines)) if i not in outliers]
    Tlistgroundtruth = [Tlistgroundtruth[i] for i in range(len(lines)) if i not in outliers]
    Tlistartk = [transform_from_quat(ta, qa)
               for tag, if0, d0, if1, d1, ta, qa, ti, qi, tm, qm in lines]
    Tlistartkinv = [tf.inverse_matrix(transform_from_quat(ti, qi))
               for tag, if0, d0, if1, d1, ta, qa, ti, qi, tm, qm in lines]
    cameraattrs = filewriters.BundlerReader().load(bundleout)
    Tlistbundler = [transform(ca['R'], ca['t']) 
                    for i, ca in enumerate(cameraattrs)
                    if i not in outliers]
    Tlistmutloc = [transform_from_quat(tm, qm)
               for tag, if0, d0, if1, d1, ta, qa, ti, qi, tm, qm in lines]
    conf = mutloc.config.Config(open(configfile))
    Tlistmutloc_comp = [mutloc.compute_best_transform(d0, d1, conf)
               for tag, if0, d0, if1, d1, ta, qa, ti, qi, tm, qm in lines]
    Tlistmutloc = Tlistmutloc_comp

    cmptk.plot_error_bars(Tlistgroundtruth, Tlistartk, Tlistmutloc,
                          Tlistbundler,
                          "media/tiles_turtlebots_artkvsmutloc_%s_errorbar_%%s.pdf",
                         scaling=True)
    cmptk.plot_error_bars(Tlistgroundtruth, Tlistartkinv, Tlistmutloc,
                          Tlistbundler,
                          "media/tiles_turtlebots_artkinv_vs_mutloc_%s_errorbar_%%s.pdf",
                         scaling=True)
    Tlistmutloc = cmptk.scale_and_translate(Tlistgroundtruth, Tlistmutloc)
    Tlistartk   = cmptk.scale_and_translate(Tlistgroundtruth, Tlistartk)
    mplot(Tlistgroundtruth, Tlistmutloc,Tlistartk)
예제 #13
0
def process_markers(markers):
    global listener, br, marker_trans_mat

    sum_mat = None
    num = 0
    t_latest = rospy.Time(0)
    for marker in markers.markers:
        print("Seeing marker %s" % marker.id)
        marker_link = '/Marker' + str(marker.id) + "__link"
        marker_id = "/Marker" + str(marker.id)
        tw = listener.getLatestCommonTime(marker_link, 'world')
        to = listener.getLatestCommonTime(marker_id, 'odom')
        t = tw if tw > to else to
        t_latest = t if t > t_latest else t_latest
        (trans_w, rot_w) = listener.lookupTransform(marker_link, 'world', tw)
        (trans_o, rot_o) = listener.lookupTransform(marker_id, 'odom', to)

        w_mat = numpy.dot(tr.translation_matrix(trans_w),
                          tr.quaternion_matrix(rot_w))
        t_o = tr.concatenate_matrices(tr.translation_matrix(trans_o),
                                      tr.quaternion_matrix(rot_o))
        # Need to do the transform equivalent to 0.25 0 0 0 0.5 -0.5 -0.5 0.5 on marker_id
        #t_o = numpy.dot(t_o, marker_trans_mat);
        o_mat_i = tr.inverse_matrix(t_o)
        mat3 = numpy.dot(w_mat, o_mat_i)
        mat3 = numpy.inverse_matrix(mat3)
        if sum_mat is None:
            sum_mat = mat3
        else:
            sum_mat = sum_mat + mat3
        num = num + 1

    avg_mat = sum_mat / num

    trans = tr.translation_from_matrix(avg_mat)
    rot = tr.quaternion_from_matrix(avg_mat)
    br.sendTransform(trans, rot, t_latest, "odom", "map")
예제 #14
0
    def load(self, di_dict, predictor):
        di = DatasetItem(di_dict)
        self.df['subject'] = pd.Series(data=di.get_subject(), index=di.get_stamps())
        self.df['scenario'] = di.get_scenario()
        self.df['humanhash'] = di.get_humanhash()
        
        for stamp in di.get_stamps():
            T_camdriver_head = di.get_T_camdriver_head(stamp)
            
            assert T_camdriver_head is not None
            
            T_headfrontal_head = T_headfrontal_camdriver.dot(T_camdriver_head)
            self.df.at[stamp, 'gt_roll'], self.df.at[stamp, 'gt_pitch'], self.df.at[stamp, 'gt_yaw'] = tr.euler_from_matrix(T_headfrontal_head)
            self.df.at[stamp, 'gt_x'], self.df.at[stamp, 'gt_y'], self.df.at[stamp, 'gt_z'] = T_camdriver_head[0:3,3]
            
            gt_angle_from_zero, _, _ = tr.rotation_from_matrix(T_headfrontal_head)
            self.df.at[stamp, 'gt_angle_from_zero'] = abs(gt_angle_from_zero)

            self.df.at[stamp, 'occlusion_state'] = di.get_occlusion_state(stamp)
            
            hypo_T_headfrontal_head = predictor.get_T_headfrontal_head(stamp)
            if hypo_T_headfrontal_head is None:
                self.df.at[stamp, 'hypo_roll'] = None
                self.df.at[stamp, 'hypo_pitch'] = None
                self.df.at[stamp, 'hypo_yaw'] = None
                self.df.at[stamp, 'angle_diff'] = None
                self.df.at[stamp, 'hypo_x'] = None
                self.df.at[stamp, 'hypo_y'] = None
                self.df.at[stamp, 'hypo_z'] = None
            else:
                self.df.at[stamp, 'hypo_roll'], self.df.at[stamp, 'hypo_pitch'], self.df.at[stamp, 'hypo_yaw'] = tr.euler_from_matrix(hypo_T_headfrontal_head)

                angle_difference, _, _ = tr.rotation_from_matrix(tr.inverse_matrix(T_headfrontal_head).dot(hypo_T_headfrontal_head))
                self.df.at[stamp, 'angle_diff'] = abs(angle_difference)

                self.df.at[stamp, 'hypo_x'], self.df.at[stamp, 'hypo_y'], self.df.at[stamp, 'hypo_z'] = predictor.get_t_camdriver_head(stamp)
예제 #15
0
파일: affine.py 프로젝트: loqoman/Vision
 def invert(self):
     self.matrix = xform.inverse_matrix(self.matrix)
     return self  # chainable
예제 #16
0
    def controls_3d(self, dx, dy, zooming_one_shot=False):
        """ Orbiting the camera is implemented the following way:

        - the rotation is split into a rotation around the *world* Z axis
          (controlled by the horizontal mouse motion along X) and a
          rotation around the *X* axis of the camera (pitch) *shifted to
          the focal origin* (the world origin for now). This is controlled
          by the vertical motion of the mouse (Y axis).
        - as a result, the resulting transformation of the camera in the
          world frame C' is:
            C' = (T · Rx · T⁻¹ · (Rz · C)⁻¹)⁻¹
          where:
          - C is the original camera transformation in the world frame,
          - Rz is the rotation along the Z axis (in the world frame)
          - T is the translation camera -> world (ie, the inverse of the
            translation part of C
          - Rx is the rotation around X in the (translated) camera frame """

        CAMERA_TRANSLATION_FACTOR = 0.01
        CAMERA_ROTATION_FACTOR = 0.01

        if not (self.is_rotating or self.is_panning or self.is_zooming):
            return

        current_pos = self.current_cam.transformation[:3, 3].copy()
        distance = numpy.linalg.norm(self.focal_point - current_pos)

        if self.is_rotating:
            rotation_camera_x = dy * CAMERA_ROTATION_FACTOR
            rotation_world_z = dx * CAMERA_ROTATION_FACTOR
            world_z_rotation = transformations.euler_matrix(0, 0, rotation_world_z)
            cam_x_rotation = transformations.euler_matrix(rotation_camera_x, 0, 0)

            after_world_z_rotation = numpy.dot(world_z_rotation, self.current_cam.transformation)

            inverse_transformation = transformations.inverse_matrix(after_world_z_rotation)

            translation = transformations.translation_matrix(
                transformations.decompose_matrix(inverse_transformation)[3])
            inverse_translation = transformations.inverse_matrix(translation)

            new_inverse = numpy.dot(inverse_translation, inverse_transformation)
            new_inverse = numpy.dot(cam_x_rotation, new_inverse)
            new_inverse = numpy.dot(translation, new_inverse)

            self.current_cam.transformation = transformations.inverse_matrix(new_inverse).astype(numpy.float32)

        if self.is_panning:
            tx = -dx * CAMERA_TRANSLATION_FACTOR * distance
            ty = dy * CAMERA_TRANSLATION_FACTOR * distance
            cam_transform = transformations.translation_matrix((tx, ty, 0)).astype(numpy.float32)
            self.current_cam.transformation = numpy.dot(self.current_cam.transformation, cam_transform)

        if self.is_zooming:
            tz = dy * CAMERA_TRANSLATION_FACTOR * distance
            cam_transform = transformations.translation_matrix((0, 0, tz)).astype(numpy.float32)
            self.current_cam.transformation = numpy.dot(self.current_cam.transformation, cam_transform)

        if zooming_one_shot:
            self.is_zooming = False

        self.update_view_camera()
예제 #17
0
파일: affine.py 프로젝트: loqoman/Vision
 def asInverse(self):
     return Affine3(xform.inverse_matrix(self.matrix))
    jac = np.zeros((3, 3))
    for i in range(3):
        pt[i] = pt[i] + eps
        ptp = tu.transformPt(T, pt)
        pt[i] = pt[i] - 2 * eps
        ptm = tu.transformPt(T, pt)
        pt[i] = pt[i] + eps
        jac[:, i] = (ptp - ptm) / (2 * eps)

    print("Ana vs Num for point jacobian:\n{0}\n{1}".format(dpt_ana, jac))

    #%% test jacobian of image coordinates
    T_w_c = tu.randomTranformation()
    p_c = np.hstack((np.random.rand(2) - 0.5, np.random.rand() + 2))
    T_c_w = tfs.inverse_matrix(T_w_c)
    p_w = tu.transformPt(T_w_c, p_c)
    u = cam.project3d(p_c)
    assert u is not None

    du_dse3_global_ana = duDse3_global(cam.fx, cam.fy, p_w, T_w_c)
    jac_global = np.zeros((2, 6))
    for i in range(6):
        eps_vec = np.zeros(6)
        eps_vec[i] = eps
        # global
        Tp = np.dot(tu.exp_se3(eps_vec), T_w_c)
        ptp = tu.transformPt(tfs.inverse_matrix(Tp), p_w)
        up = cam.project3d(ptp)
        Tm = np.dot(tu.exp_se3(-eps_vec), T_w_c)
        ptm = tu.transformPt(tfs.inverse_matrix(Tm), p_w)
예제 #19
0
def main(args):
    # load object urdf file names, tower poses, and block dimensions from csv
    csv_data = []
    poses_path = os.path.join(args.tower_dir, 'obj_poses.csv')
    with open(poses_path) as csv_file:
        csv_reader = csv.reader(csv_file, delimiter=',')
        for row in csv_reader:
            pos = [float(v) for v in row[1:4]]
            orn = [float(v) for v in row[4:8]]
            dims = [float(d) for d in row[8:11]]
            row_info = (row[0], pos, orn, dims)
            csv_data.append(row_info)

    # copy urdf files to where pb_robot expects them (will remove them later)
    for (urdf_filename, _, _, _) in csv_data:
        pb_robot_path = os.path.join(os.getcwd(), 'pb_robot/' + urdf_filename)
        urdf_path = os.path.join(os.path.join(args.tower_dir, 'urdfs'),
                                 urdf_filename)
        copyfile(urdf_path, pb_robot_path)

    # start pybullet
    utils.connect(use_gui=True)
    utils.disable_real_time()
    utils.set_default_camera()
    utils.enable_gravity()

    # Add floor object
    plane_id = p.loadURDF("plane_files/plane.urdf", (0., 0., 0.))

    # Create robot object and get transform of robot hand in EE frame
    robot = pb_robot.panda.Panda()
    hand_id = 10
    p_hand_world, q_hand_world = p.getLinkState(robot.id, hand_id)[:2]
    p_ee_world, q_ee_world = robot.arm.eeFrame.get_link_pose()
    p_hand_ee = transformation(p_hand_world,
                               p_ee_world,
                               q_ee_world,
                               inverse=True)
    q_hand_ee = quat_math(q_ee_world, q_hand_world, True, False)
    # the hand frame is a little inside the hand, want it to be at the point
    # where the hand and finger meet
    p_offset = [0., 0., 0.01]
    trans_hand_ee = compose_matrix(translate=np.add(p_hand_ee, p_offset),
                                   angles=euler_from_quaternion(q_hand_ee))

    # params
    num_blocks = len(csv_data)
    tower_offset = (0.3, 0.0, 0.0)  # tower center in x-y plane
    block_spread_y = 0.6  # width of block initial placement area in y dimension
    delta_y = block_spread_y / num_blocks  # distance between blocks in y dimension
    min_y = -block_spread_y / 2  # minimum block y dimension
    xp_com_world_init = 0.6  # initial position of blocks in x dimension

    for n, row in enumerate(csv_data):
        # unpack csv data
        urdf_filename = row[0]
        p_com_world_goal = row[1]
        q_com_world_goal = row[2]
        dims = row[3]

        # place block in world (on floor at goal orientation)
        block = pb_robot.body.createBody(urdf_filename)
        p_com_cog = p.getDynamicsInfo(block.id, -1)[3]
        up = get_up_axis(q_com_world_goal)
        zp_com_world_init = dims[up] / 2 + p_com_cog[up]
        yp_com_world_init = min_y + delta_y * n
        p_com_world_init = [
            xp_com_world_init, yp_com_world_init, zp_com_world_init
        ]
        block.set_pose((p_com_world_init, q_com_world_goal))

        # transformation from robot EE to object com when grasped (top down grasp)
        # needed when defining Grab()
        q_cog_hand = quat_math(q_com_world_goal, q_hand_world, True, False)
        trans_cog_hand = compose_matrix(translate=[0., 0., dims[up] / 2],
                                        angles=[0., np.pi,
                                                np.pi])  #angles=q_cog_hand)
        trans_cog_ee = concatenate_matrices(trans_hand_ee, trans_cog_hand)
        trans_com_cog = compose_matrix(translate=p_com_cog,
                                       angles=[0., 0., 0., 1.])
        trans_com_ee = concatenate_matrices(trans_cog_ee, trans_com_cog)

        # transformation of ee in world frame (at initial block position)
        trans_com_world_init = compose_matrix(translate=p_com_world_init,
                                              angles=q_com_world_goal)
        trans_ee_world_init = concatenate_matrices(trans_com_world_init,
                                                   trans_com_ee)

        # grasp block
        robot.arm.hand.Open()
        grasp_q = robot.arm.ComputeIK(trans_ee_world_init)
        if grasp_q is not None:
            utils.wait_for_user("Move to desired pose?")
            robot.arm.SetJointValues(grasp_q)
        else:
            print("Cannot find valid config")
        robot.arm.hand.Close()
        robot.arm.Grab(block, inverse_matrix(trans_com_ee))

        utils.wait_for_user(
            'Just grasped. Going to move to grasp pose. Ready?')
        robot.arm.SetJointValues(grasp_q)

        # transformation of EE in world frame (at final block position)
        p_com_world_goal = np.add(p_com_world_goal, tower_offset)
        trans_com_world_goal = compose_matrix(
            translate=p_com_world_goal,
            angles=euler_from_quaternion(q_com_world_goal))
        trans_ee_world_goal = concatenate_matrices(
            trans_com_world_goal,
            trans_com_ee)  # should be the other way around

        # move block to goal pose
        goal_q = robot.arm.ComputeIK(trans_ee_world_goal)
        if goal_q is not None:
            utils.wait_for_user("Move to desired pose?")
            robot.arm.SetJointValues(goal_q)
        else:
            print("Cannot find valid config")

        # release
        utils.wait_for_user('Going to release. Ready?')
        robot.arm.hand.Open()
        robot.arm.Release(block)

    for _ in range(1000):
        p.stepSimulation()
    utils.wait_for_user("Done?")
    utils.disconnect()

    # remove urdf files from pb_robot
    for (urdf_filename, _, _, _) in csv_data:
        pb_robot_path = os.path.join(os.getcwd(), 'pb_robot/' + urdf_filename)
        os.remove(pb_robot_path)
예제 #20
0
# H0_2 = H0_1.dot(H1_3).dot(H3_2)
#
# With SLAM flight enabled and GPS disabled, NED and body frame are equivalent, since the
# UAV has no idea of true north. NED origin is defined based on the starting orientation of the UAV.
if REALSENSE_ORIENTATION == 0:

    # Upside down, rotated 40 degrees, sideways mount
    # H3_2 = H3_A.dot(HA_B).dot(HB_2)
    # This breaks down into a 180 deg. flip about z (A), 45 deg rotation about x (B) and
    # left hand to right hand coord. system change.

    H0_1 = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, -1, 0, 0], [0, 0, 0, 1]])

    HA_3 = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])

    H3_A = tf.inverse_matrix(HA_3)

    HA_B = np.array(
        [[1, 0, 0, 0],
         [0, np.cos(40 * np.pi / 180), -np.sin(40 * np.pi / 180), 0],
         [0, np.sin(40 * np.pi / 180),
          np.cos(40 * np.pi / 180), 0], [0, 0, 0, 1]])

    HB_2 = np.array([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])

    H3_2 = H3_A.dot(HA_B).dot(HB_2)

elif REALSENSE_ORIENTATION == 1:

    # Forward-facing, USB port to the right
    H0_1 = np.array([[0, 0, -1, 0], [1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]])
예제 #21
0
def world2camera_from_map(camera2world_map) -> np.ndarray:
    camera2world = camera2world_from_map(camera2world_map)
    return inverse_matrix(camera2world)
예제 #22
0
    def controls_3d(self, dx, dy, zooming_one_shot=False):

        CAMERA_TRANSLATION_FACTOR = 0.01
        CAMERA_ROTATION_FACTOR = 0.01

        if not (self.is_rotating or self.is_panning or self.is_zooming):
            return

        current_pos = self.current_cam.transformation[:3, 3].copy()
        distance = numpy.linalg.norm(self.focal_point - current_pos)

        if self.is_rotating:
            """ Orbiting the camera is implemented the following way:

            - the rotation is split into a rotation around the *world* Z axis
              (controlled by the horizontal mouse motion along X) and a
              rotation around the *X* axis of the camera (pitch) *shifted to
              the focal origin* (the world origin for now). This is controlled
              by the vertical motion of the mouse (Y axis).

            - as a result, the resulting transformation of the camera in the
              world frame C' is:
                C' = (T · Rx · T⁻¹ · (Rz · C)⁻¹)⁻¹

              where:
                - C is the original camera transformation in the world frame,
                - Rz is the rotation along the Z axis (in the world frame)
                - T is the translation camera -> world (ie, the inverse of the
                  translation part of C
                - Rx is the rotation around X in the (translated) camera frame
            """

            rotation_camera_x = dy * CAMERA_ROTATION_FACTOR
            rotation_world_z = dx * CAMERA_ROTATION_FACTOR
            world_z_rotation = transformations.euler_matrix(
                0, 0, rotation_world_z)
            cam_x_rotation = transformations.euler_matrix(
                rotation_camera_x, 0, 0)

            after_world_z_rotation = numpy.dot(world_z_rotation,
                                               self.current_cam.transformation)

            inverse_transformation = transformations.inverse_matrix(
                after_world_z_rotation)

            translation = transformations.translation_matrix(
                transformations.decompose_matrix(inverse_transformation)[3])
            inverse_translation = transformations.inverse_matrix(translation)

            new_inverse = numpy.dot(inverse_translation,
                                    inverse_transformation)
            new_inverse = numpy.dot(cam_x_rotation, new_inverse)
            new_inverse = numpy.dot(translation, new_inverse)

            self.current_cam.transformation = transformations.inverse_matrix(
                new_inverse).astype(numpy.float32)

        if self.is_panning:
            tx = -dx * CAMERA_TRANSLATION_FACTOR * distance
            ty = dy * CAMERA_TRANSLATION_FACTOR * distance
            cam_transform = transformations.translation_matrix(
                (tx, ty, 0)).astype(numpy.float32)
            self.current_cam.transformation = numpy.dot(
                self.current_cam.transformation, cam_transform)

        if self.is_zooming:
            tz = dy * CAMERA_TRANSLATION_FACTOR * distance
            cam_transform = transformations.translation_matrix(
                (0, 0, tz)).astype(numpy.float32)
            self.current_cam.transformation = numpy.dot(
                self.current_cam.transformation, cam_transform)

        if zooming_one_shot:
            self.is_zooming = False

        self.update_view_camera()
예제 #23
0
import transformations
from points import *

NOM = NOM.T
#NOM_A = NOM_A.T
MEAS = MEAS.T
#MEAS_A = MEAS_A.T
	
print "\nNOMINAL\n\n",NOM,"\n\nMEASURED\n\n",MEAS,"\n"
#print "\nNOMINAL_A\n\n",NOM_A,"\n\nMEASURED_A\n\n",MEAS_A,"\n"

#Rotation matrix may be pre- or post- multiplied (changing between a right-handed system and a left-handed system).
R = transformations.superimposition_matrix(MEAS,NOM,usesvd=True)
#R = transformations.inverse_matrix(R)
print "Rotation matrix calulated from points difference\n\n",R,"\n"
rot=transformations.inverse_matrix(R)
rob = transformations.euler_matrix(math.radians(OLDBASE[3]),math.radians(OLDBASE[4]),math.radians(OLDBASE[5]), axes='sxyz')
tob = transformations.translation_matrix(OLDBASE[0:3])
robn = np.dot(tob,rob)  #rotation matrix from old base
print "Rotation matrix from old base\n\n",robn,"\n"
robnew = np.dot(rot,robn)
print "Rotation matrix RESULT\n\n",robnew,"\n"
scale, shear, angles, translate, perspective = transformations.decompose_matrix(robnew)

roll = math.degrees(angles[0])
pitch = math.degrees(angles[1])
yaw = math.degrees(angles[2])

print "OLD BASE(%.3f,%.3f,%.3f,%.3f,%.3f,%.3f)\n" %(OLDBASE[0],OLDBASE[1],OLDBASE[2],OLDBASE[3],OLDBASE[4],OLDBASE[5])
print "NEW BASE(%.3f,%.3f,%.3f,%.3f,%.3f,%.3f)\n" %(translate[0],translate[1],translate[2],roll,pitch,yaw)
print "----- COPY & PASTE -----\n\n[Base]"