예제 #1
0
    def transformPlanToBDIFrame(self, plan):
        if (self.pose_bdi is None):
            # print "haven't received POSE_BDI"
            return

        # TODO: This transformation should be rewritten using the LOCAL_TO_LOCAL_BDI frame
        # instead of using FK here

        t_bodybdi  = transformUtils.transformFromPose(self.pose_bdi.pos, self.pose_bdi.orientation)
        t_bodybdi.PostMultiply()

        current_pose = self.jointController.q
        t_bodymain = transformUtils.transformFromPose( current_pose[0:3]  , transformUtils.rollPitchYawToQuaternion(current_pose[3:6])   )
        t_bodymain.PostMultiply()

        # iterate and transform
        self.bdi_plan = plan.decode( plan.encode() ) # decode and encode ensures deepcopy
        for i, footstep in enumerate(self.bdi_plan.footsteps):
            step = footstep.pos

            t_step = frameFromPositionMessage(step)
            t_body_to_step = vtk.vtkTransform()
            t_body_to_step.DeepCopy(t_step)
            t_body_to_step.PostMultiply()
            t_body_to_step.Concatenate(t_bodymain.GetLinearInverse())

            t_stepbdi = vtk.vtkTransform()
            t_stepbdi.DeepCopy(t_body_to_step)
            t_stepbdi.PostMultiply()
            t_stepbdi.Concatenate(t_bodybdi)
            footstep.pos = positionMessageFromFrame(t_stepbdi)

        if (self.showBDIPlan is True):
            self.drawBDIFootstepPlan()
예제 #2
0
    def assignFrames(self):

        # foot to box
        self.footToBox = transformUtils.transformFromPose(np.array([-0.6436723 ,  0.18848073, -1.13987699]),
                                             np.array([ 0.99576385,  0.        ,  0.        , -0.09194753]))

        # self.palmToBox = transformUtils.transformFromPose(np.array([-0.13628039, -0.12582009,  0.33638863]), np.array([-0.69866187,  0.07267815,  0.70683338,  0.08352274]))

        self.palmToBox = transformUtils.transformFromPose(np.array([-0.13516451, -0.12463758,  0.25173153]), np.array([-0.69867721,  0.07265162,  0.70682793,  0.08346358]))

        pinchToBox = self.getPinchToPalmFrame()
        pinchToBox.PostMultiply()
        pinchToBox.Concatenate(self.palmToBox)

        self.pinchToBox = pinchToBox
예제 #3
0
def loadObjectMeshes(affordanceManager, registrationResultFilename,
                     firstFrameToWorldTransform):
    """
    Loads the object meshes from the registration_result.yaml file
    :param affordanceManager:
    :param registrationResultFilename: filename of registration_result.yaml, should be an absolute path
    :param transformsFilename: filename of transforms.yaml where firstFrameToWorld transform is.
    :return: None
    """

    stream = file(registrationResultFilename)
    registrationResult = yaml.load(stream)

    for objName, data in registrationResult.iteritems():
        objectMeshFilename = data[
            'filename']  # should be relative to getLabelFusionDataDir()
        if len(objectMeshFilename) == 0:
            objectMeshFilename = getObjectMeshFilename(objName)
        else:
            objectMeshFilename = os.path.join(getLabelFusionDataDir(),
                                              objectMeshFilename)

        # figure out object pose in world frame
        # we have stored object pose in first camera frame
        objectToFirstFrame = transformUtils.transformFromPose(
            data['pose'][0], data['pose'][1])
        objectToWorld = transformUtils.concatenateTransforms(
            [objectToFirstFrame, firstFrameToWorldTransform])
        pose = transformUtils.poseFromTransform(objectToWorld)

        loadAffordanceModel(affordanceManager,
                            name=objName,
                            filename=objectMeshFilename,
                            pose=pose)
예제 #4
0
    def testEncodeDecode(self, fields):

        encoded = json.dumps(fields, cls=ikconstraintencoder.ConstraintEncoder)
        decoded = json.loads(encoded, object_hook=ikconstraintencoder.ConstraintDecoder)

        del decoded['class']
        fields = FieldContainer(**decoded)

        del fields.options['class']
        fields.options = ikparameters.IkParameters(**fields.options)

        constraints = []

        for c in fields.constraints:
            objClass = getattr(ikconstraints, c['class'])
            del c['class']
            obj = objClass()
            constraints.append(obj)

            for attr, value in c.iteritems():
                if isinstance(value, dict) and 'position' in value and 'quaternion' in value:
                    value = transformUtils.transformFromPose(value['position'], value['quaternion'])
                setattr(obj, attr, value)

        fields.constraints = constraints

        return fields
예제 #5
0
    def update(self):

        previousTargetFrame = transformUtils.transformFromPose(self.lastTargetPosition, self.lastTargetQuaternion)
        self.storeTargetPose()

        cameraToTarget, focalDistance = self.getCameraToTargetTransform(previousTargetFrame)

        targetToWorld = self.targetFrame.transform
        # cameraToTarget = self.boomTransform
        cameraToWorld = transformUtils.concatenateTransforms([cameraToTarget, targetToWorld])

        c = self.camera

        focalPoint = cameraToWorld.TransformPoint([self.focalDistance, 0, 0])
        focalPoint = targetToWorld.GetPosition()

        # print 'focal distance:', self.focalDistance
        # print 'cameraToTarget pos:', cameraToTarget.GetPosition()
        # print 'cameraToWorld pos:', cameraToWorld.GetPosition()
        # print 'targetToWorld pos:', targetToWorld.GetPosition()
        # print 'focal pos:', focalPoint

        c.SetPosition(cameraToWorld.GetPosition())
        c.SetFocalPoint(focalPoint)
        self.view.render()
 def transformGeometry(polyDataList, geom):
     t = transformUtils.transformFromPose(geom.position, geom.quaternion)
     polyDataList = [
         filterUtils.transformPolyData(polyData, t)
         for polyData in polyDataList
     ]
     return polyDataList
예제 #7
0
 def ROSPoseToVtkTransform(pose):
     pos = [pose.position.x, pose.position.y, pose.position.z]
     quat = [
         pose.orientation.w, pose.orientation.x, pose.orientation.y,
         pose.orientation.z
     ]
     return transformUtils.transformFromPose(pos, quat)
예제 #8
0
    def loadObjectMeshes(self):
        om = self.globalsDict['om']
        objData = cutils.getResultsConfig()['object-registration']
        dataDir = cutils.getCorlDataDir()

        stream = file(self.pathDict['registrationResult'])
        registrationResult = yaml.load(stream)

        cameraToWorld = cutils.getDefaultCameraToWorld()

        folder = om.getOrCreateContainer('data files')
        for objName, data in registrationResult.iteritems():

            filename = data['filename']
            if len(filename) == 0:
                filename = cutils.getObjectMeshFilename(objName)

            polyData = ioUtils.readPolyData(filename)
            color = vis.getRandomColor()
            obj = vis.showPolyData(polyData,
                                   name=objName,
                                   parent=folder,
                                   color=color)
            self.storedColors[objName] = color

            objToWorld = transformUtils.transformFromPose(*data['pose'])
            self.objectToWorld[objName] = objToWorld
            #objToCamera = transformUtils.concatneateTransforms([objToWorld, cameraToWorld.GetLinearInverse()])
            obj.actor.SetUserTransform(objToWorld)
예제 #9
0
    def setupConfig(self):
        self.config = dict()
        self.config['scan'] = dict()
        self.config['scan']['pose_list'] = ['scan_left', 'scan_right']
        self.config['scan']['joint_speed'] = 60
        self.config['grasp_speed'] = 20
        self.config['home_pose_name'] = 'above_table_pre_grasp'
        self.config['grasp_nominal_direction'] = np.array([1, 0,
                                                           0])  # x forwards
        self.config['grasp_to_ee'] = dict()

        self.config['grasp_to_ee']['translation'] = dict()
        self.config['grasp_to_ee']['translation']['x'] = 9.32362425e-02
        self.config['grasp_to_ee']['translation']['y'] = 0
        self.config['grasp_to_ee']['translation']['z'] = 0

        self.config['grasp_to_ee']['orientation'] = dict()
        self.config['grasp_to_ee']['orientation']['w'] = 0.97921432
        self.config['grasp_to_ee']['orientation']['x'] = -0.20277454
        self.config['grasp_to_ee']['orientation']['y'] = 0.00454233
        self.config['grasp_to_ee']['orientation']['z'] = -0.00107904

        self.graspToIiiwaLinkEE = spartanUtils.transformFromPose(
            self.config['grasp_to_ee'])
        self.iiwaLinkEEToGraspFrame = self.graspToIiiwaLinkEE.GetLinearInverse(
        )

        pos = [-0.15, 0, 0]
        quat = [1, 0, 0, 0]
        self.preGraspToGraspTransform = transformUtils.transformFromPose(
            pos, quat)
예제 #10
0
    def update(self):

        previousTargetFrame = transformUtils.transformFromPose(
            self.lastTargetPosition, self.lastTargetQuaternion)
        self.storeTargetPose()

        cameraToTarget, focalDistance = self.getCameraToTargetTransform(
            previousTargetFrame)

        targetToWorld = self.targetFrame.transform
        #cameraToTarget = self.boomTransform
        cameraToWorld = transformUtils.concatenateTransforms(
            [cameraToTarget, targetToWorld])

        c = self.camera

        focalPoint = cameraToWorld.TransformPoint([self.focalDistance, 0, 0])
        focalPoint = targetToWorld.GetPosition()

        #print 'focal distance:', self.focalDistance
        #print 'cameraToTarget pos:', cameraToTarget.GetPosition()
        #print 'cameraToWorld pos:', cameraToWorld.GetPosition()
        #print 'targetToWorld pos:', targetToWorld.GetPosition()
        #print 'focal pos:', focalPoint

        c.SetPosition(cameraToWorld.GetPosition())
        c.SetFocalPoint(focalPoint)
        self.view.render()
예제 #11
0
    def _handleRigidBodies(self, bodies):

        if not bodies:
            return

        folder = self.getRootFolder()

        for body in bodies:
            name = 'Body ' + str(body.id)

            x,y,z,w = body.quat
            quat = (w,x,y,z)
            bodyToOptitrack = transformUtils.transformFromPose(body.xyz, quat)

            bodyToWorld = transformUtils.concatenateTransforms((bodyToOptitrack, self.optitrackToWorld))


            obj = folder.findChild(name)
            if not obj:
                geometry = self._makeMarkers(body.marker_xyz)
                geometry = filterUtils.transformPolyData(geometry, bodyToOptitrack.GetLinearInverse())
                obj = vis.showPolyData(geometry, name, parent=folder, color=[1,0,0])
                frameObj = vis.addChildFrame(obj)
                frameObj.setProperty('Scale', 0.2)
                frameObj.setProperty('Visible', True)

            obj.getChildFrame().copyFrame(bodyToWorld)
    def getCameraPoseAtUTime(self, utime):
        idx = np.searchsorted(self.poseTimes, utime, side='left')
        if idx == len(self.poseTimes):
            idx = len(self.poseTimes) - 1

        (pos, quat) = self.poses[idx]
        return transformUtils.transformFromPose(pos, quat)
예제 #13
0
def frameFromRigidTransformMessage(msg):
    '''
    Given an bot_core.rigid_transform_t message, returns a vtkTransform
    '''
    trans = msg.trans
    quat = msg.quat
    return transformUtils.transformFromPose(trans, quat)
    def loadObjectMeshes(self):
        stream = file(self.pathDict['registrationResult'])
        registrationResult = yaml.load(stream)

        folder = om.getOrCreateContainer('data files')
        for objName, data in registrationResult.iteritems():

            filename = data['filename']
            if len(filename) == 0:
                filename = utils.getObjectMeshFilename(self.objectData,
                                                       objName)
            else:
                filename = os.path.join(utils.getLabelFusionDataDir(),
                                        filename)

            polyData = ioUtils.readPolyData(filename)
            color = self.getColorFromIndex(objName)
            obj = vis.showPolyData(polyData,
                                   name=objName,
                                   parent=folder,
                                   color=color)
            self.storedColors[objName] = color

            objToWorld = transformUtils.transformFromPose(*data['pose'])
            self.objectToWorld[objName] = objToWorld
            obj.actor.SetUserTransform(objToWorld)
예제 #15
0
    def loadStoredObjects(self):
        if not os.path.isfile(self.pathDict['registrationResult']):
            return

        stream = file(self.pathDict['registrationResult'])
        registrationResult = yaml.load(stream)

        for objName, data in registrationResult.iteritems():
            objectMeshFilename = data[
                'filename']  # should be relative to getLabelFusionDataDir()
            if len(objectMeshFilename) == 0:
                objectMeshFilename = utils.getObjectMeshFilename(
                    self.objectData, objName)
            else:
                objectMeshFilename = os.path.join(
                    utils.getLabelFusionDataDir(), objectMeshFilename)

            objectToFirstFrame = transformUtils.transformFromPose(
                data['pose'][0], data['pose'][1])
            objectToWorld = transformUtils.concatenateTransforms(
                [objectToFirstFrame, self.firstFrameToWorldTransform])

            polyData = ioUtils.readPolyData(objectMeshFilename)
            polyDataWorld = filterUtils.transformPolyData(
                polyData, objectToWorld)

            d = dict()
            d['transform'] = objectToWorld
            d['polyData'] = polyDataWorld
            self.storedObjects[objName] = d
예제 #16
0
def frameFromRigidTransformMessage(msg):
    '''
    Given an bot_core.rigid_transform_t message, returns a vtkTransform
    '''
    trans = msg.trans
    quat = msg.quat
    return transformUtils.transformFromPose(trans, quat)
예제 #17
0
    def testEncodeDecode(self, fields):

        encoded = json.dumps(fields, cls=ikconstraintencoder.ConstraintEncoder)
        decoded = json.loads(encoded,
                             object_hook=ikconstraintencoder.ConstraintDecoder)

        del decoded['class']
        fields = FieldContainer(**decoded)

        del fields.options['class']
        fields.options = ikparameters.IkParameters(**fields.options)

        constraints = []

        for c in fields.constraints:
            objClass = getattr(ikconstraints, c['class'])
            del c['class']
            obj = objClass()
            constraints.append(obj)

            for attr, value in c.iteritems():
                if isinstance(
                        value, dict
                ) and 'position' in value and 'quaternion' in value:
                    value = transformUtils.transformFromPose(
                        value['position'], value['quaternion'])
                setattr(obj, attr, value)

        fields.constraints = constraints

        return fields
예제 #18
0
 def test(self):
     pos = [2.00213459e-01, -1.93298822e-12, 8.99913227e-01]
     quat = [
         9.99999938e-01, -1.16816462e-12, 3.51808464e-04, -5.36917849e-12
     ]
     targetFrame = transformUtils.transformFromPose(pos, quat)
     return self.runIK(targetFrame)
예제 #19
0
    def setup_horizontal_mug_grasp(self):
        self.load_object_model()

        name = "horizontal_mug_grasp"
        pos = self._config[name]['pos']
        quat = self._config[name]['quat']
        self._T_gripper_object = transformUtils.transformFromPose(pos, quat)
        self.update()
 def setTransform(self, pos, quat):
     self.transform = transformUtils.transformFromPose(pos, quat)
     for g in self.geometry:
         childFrame = g.polyDataItem.getChildFrame()
         if childFrame:
             childFrame.copyFrame(self.transform)
         else:
             g.polyDataItem.actor.SetUserTransform(self.transform)
예제 #21
0
    def setPolyData(self, polyData):

        if polyData.GetNumberOfPoints():
            originPose = self.getProperty('Origin')
            pos, quat = originPose[:3], originPose[3:]
            t = transformUtils.transformFromPose(pos, quat)
            polyData = filterUtils.transformPolyData(polyData, t.GetLinearInverse())
        PolyDataItem.setPolyData(self, polyData)
예제 #22
0
 def setTransform(self, pos, quat):
     self.transform = transformUtils.transformFromPose(pos, quat)
     for g in self.geometry:
         childFrame = g.polyDataItem.getChildFrame()
         if childFrame:
             childFrame.copyFrame(self.transform)
         else:
             g.polyDataItem.actor.SetUserTransform(self.transform)
예제 #23
0
def transformFromROSPoseMsg(msg):
    pos = [msg.position.x, msg.position.y, msg.position.z]
    quat = [
        msg.orientation.w, msg.orientation.x, msg.orientation.y,
        msg.orientation.z
    ]

    return transformUtils.transformFromPose(pos, quat)
예제 #24
0
    def setPolyData(self, polyData):

        if polyData.GetNumberOfPoints():
            originPose = self.getProperty('Origin')
            pos, quat = originPose[:3], originPose[3:]
            t = transformUtils.transformFromPose(pos, quat)
            polyData = filterUtils.transformPolyData(polyData, t.GetLinearInverse())
        PolyDataItem.setPolyData(self, polyData)
예제 #25
0
 def getCameraPoseAtTime(t):
     ind = poseTimes.searchsorted(t)
     if ind == len(poseTimes):
         ind = len(poseTimes)-1
     pose = poses[ind]
     pos = pose[:3]
     quat = pose[6], pose[3], pose[4], pose[5] # quat data from file is ordered as x, y, z, w
     return transformUtils.transformFromPose(pos, quat)
예제 #26
0
    def assignFrames(self):

        # foot to box
        self.footToBox = transformUtils.transformFromPose(
            np.array([-0.6436723, 0.18848073, -1.13987699]),
            np.array([0.99576385, 0., 0., -0.09194753]))

        # self.palmToBox = transformUtils.transformFromPose(np.array([-0.13628039, -0.12582009,  0.33638863]), np.array([-0.69866187,  0.07267815,  0.70683338,  0.08352274]))

        self.palmToBox = transformUtils.transformFromPose(
            np.array([-0.13516451, -0.12463758, 0.25173153]),
            np.array([-0.69867721, 0.07265162, 0.70682793, 0.08346358]))

        pinchToBox = self.getPinchToPalmFrame()
        pinchToBox.PostMultiply()
        pinchToBox.Concatenate(self.palmToBox)

        self.pinchToBox = pinchToBox
예제 #27
0
    def getPinchToHandFrame(self):
        pinchToHand = transformUtils.transformFromPose(
            np.array([-1.22270636e-07, -3.11575498e-01, 0.00000000e+00]),
            np.array([
                3.26794897e-07, -2.42861455e-17, -1.85832253e-16,
                1.00000000e+00
            ]))

        return pinchToHand
예제 #28
0
    def setupConfig(self):
        self.config = dict()
        self.config['base_frame_id'] = "base"
        self.config['end_effector_frame_id'] = "iiwa_link_ee"
        self.config[
            'pick_up_distance'] = 0.25  # distance to move above the table after grabbing the object

        self.config["sleep_time_for_sensor_collect"] = 0.1
        self.config['scan'] = dict()
        self.config['scan']['pose_list'] = [
            'scan_left_close', 'scan_above_table', 'scan_right'
        ]
        self.config['scan']['joint_speed'] = 45
        self.config['grasp_speed'] = 20

        normal_speed = 30
        self.config['speed'] = dict()
        self.config['speed']['stow'] = normal_speed
        self.config['speed']['pre_grasp'] = normal_speed
        self.config['speed']['grasp'] = 10

        self.config['home_pself.moveose_name'] = 'above_table_pre_grasp'
        self.config['grasp_nominal_direction'] = np.array([1, 0,
                                                           0])  # x forwards
        self.config['grasp_to_ee'] = dict()

        self.config['grasp_to_ee']['translation'] = dict()
        # self.config['grasp_to_ee']['translation']['x'] = 9.32362425e-02
        self.config['grasp_to_ee']['translation']['x'] = 0.085
        self.config['grasp_to_ee']['translation']['y'] = 0
        self.config['grasp_to_ee']['translation']['z'] = 0

        self.config['grasp_to_ee']['orientation'] = dict()
        self.config['grasp_to_ee']['orientation']['w'] = 0.97921432
        self.config['grasp_to_ee']['orientation']['x'] = -0.20277454
        self.config['grasp_to_ee']['orientation']['y'] = 0.00454233
        self.config['grasp_to_ee']['orientation']['z'] = -0.00107904

        self.config["object_interaction"] = dict()
        self.config["object_interaction"]["speed"] = 10
        self.config["object_interaction"]["rotate_speed"] = 30
        self.config["object_interaction"]["pickup_distance"] = 0.15
        # self.config["object_interaction"]["drop_distance_above_grasp"] = 0.035 # good for shoes
        self.config["object_interaction"][
            "drop_distance_above_grasp"] = 0.002  # good for mugs
        self.config["object_interaction"]["drop_location"] = [
            0.65, 0, 0.5
        ]  # z coordinate is overwritten later

        self.graspToIiwaLinkEE = spartanUtils.transformFromPose(
            self.config['grasp_to_ee'])
        self.iiwaLinkEEToGraspFrame = self.graspToIiwaLinkEE.GetLinearInverse()

        pos = [-0.15, 0, 0]
        quat = [1, 0, 0, 0]
        self.preGraspToGraspTransform = transformUtils.transformFromPose(
            pos, quat)
예제 #29
0
def getFirstFrameToWorldTransform(transformsFile):
    if os.path.isfile(transformsFile):
        print("using user specified transform")
        stream = file(transformsFile)
        transformYaml = yaml.load(stream)
        pose = transformYaml['firstFrameToWorld']
        transform = transformUtils.transformFromPose(pose[0], pose[1])
        return transform
    else:
        return vtk.vtkTransform()
예제 #30
0
    def loadSavedData(self):
        if not os.path.exists(self.savedTransformFilename):
            return

        d = CorlUtils.getDictFromYamlFilename(self.savedTransformFilename)
        if 'table frame' not in d:
            return
        (pos, quat) = d['table frame']
        t = transformUtils.transformFromPose(pos, quat)
        self.tableFrame = vis.updateFrame(t, 'table frame', scale=0.15)
예제 #31
0
def setBoxGraspTarget(position, rpy, dimensions):
    rot_quat = transformUtils.rollPitchYawToQuaternion(
        [math.radians(x) for x in rpy])
    t = transformUtils.transformFromPose(position, rot_quat)

    om.removeFromObjectModel(om.findObjectByName('box'))
    obj = makeBox()
    obj.setProperty('Dimensions', dimensions)
    obj.getChildFrame().copyFrame(t)
    obj.setProperty('Surface Mode', 'Wireframe')
    obj.setProperty('Color', [1, 0, 0])
예제 #32
0
def frameFromPositionMessage(positionMessage):
    '''
    Given an lcmdrc.position_t message, returns a vtkTransform
    '''
    trans = positionMessage.translation
    quat = positionMessage.rotation

    trans = [trans.x, trans.y, trans.z]
    quat = [quat.w, quat.x, quat.y, quat.z]

    return transformUtils.transformFromPose(trans, quat)
예제 #33
0
def frameFromPositionMessage(msg):
    '''
    Given an bot_core.position_t message, returns a vtkTransform
    '''
    trans = msg.translation
    quat = msg.rotation

    trans = [trans.x, trans.y, trans.z]
    quat = [quat.w, quat.x, quat.y, quat.z]

    return transformUtils.transformFromPose(trans, quat)
예제 #34
0
def frameFromPositionMessage(positionMessage):
    '''
    Given an bot_core.position_t message, returns a vtkTransform
    '''
    trans = positionMessage.translation
    quat = positionMessage.rotation

    trans = [trans.x, trans.y, trans.z]
    quat = [quat.w, quat.x, quat.y, quat.z]

    return transformUtils.transformFromPose(trans, quat)
예제 #35
0
def onICPCorrection(m):

    t = transformUtils.transformFromPose(m.trans, m.quat)

    _icpTransforms.append(t)
    print 'appending icp transform %d' % len(_icpTransforms)

    if len(_icpTransforms) == 1:
        storeInitialTransform()

    for func in _icpCallbacks:
        func(t)
예제 #36
0
    def assign_defaults(self):
        """
        Assign some default values for testing
        :return:
        """

        # T_goal_object
        pos = np.array([1.05195929e+00, -4.72744658e-01, -8.71958505e-04])
        quat = np.array([0.38988508, -0.0049511, -0.0066051, 0.92082646])

        T_goal_obs = transformUtils.transformFromPose(pos, quat)
        self._T_goal_object = T_goal_obs

        # T_W_G
        pos = np.array([0.74018759, 0.01560964, 0.09624007])
        quat = np.array([0.70365365, -0.16242853, 0.68819834, 0.06979651])
        self.T_W_G = transformUtils.transformFromPose(pos, quat)

        grasp_data = GraspData(self.T_W_G)
        grasp_data.gripper = Gripper.make_schunk_gripper()
        self.grasp_data = grasp_data
예제 #37
0
    def getPlanningFrame(self):
        platformToWorld = self.platform.getChildFrame().transform
        worldToPlatform = platformToWorld.GetLinearInverse()
        f = self.robotSystem.footstepsDriver.getFeetMidPoint(self.robotSystem.robotStateModel)
        footPosition = f.GetPosition()
        footPosInPlatformFrame = worldToPlatform.TransformPoint(footPosition)

        planFramePosInPlatformFrame = self.dimensions/2.0
        planFramePosInPlatformFrame[1] = footPosInPlatformFrame[1]
        planFramePosInWorld = platformToWorld.TransformPoint(planFramePosInPlatformFrame)
        # now we want to find the homogeneous transform for the planning Frame
        _,quat = transformUtils.poseFromTransform(platformToWorld)
        self.planToWorld = transformUtils.transformFromPose(planFramePosInWorld,quat)
예제 #38
0
def transformFromPose(d):
    pos = [0]*3
    pos[0] = d['translation']['x']
    pos[1] = d['translation']['y']
    pos[2] = d['translation']['z']

    quat = [0] * 4
    quat[0] = d['quaternion']['w']
    quat[1] = d['quaternion']['x']
    quat[2] = d['quaternion']['y']
    quat[3] = d['quaternion']['z']

    return transformUtils.transformFromPose(pos, quat)
예제 #39
0
    def testMoveToFrame(self):
        pos = [0.51148583, 0.0152224, 0.50182436]
        quat = [0.68751512, 0.15384615, 0.69882778, -0.12366916]
        targetFrame = transformUtils.transformFromPose(pos, quat)
        poseDict = spartanUtils.poseFromTransform(targetFrame)
        poseMsg = rosUtils.ROSPoseMsgFromPose(poseDict)

        poseStamped = geometry_msgs.msg.PoseStamped()
        poseStamped.pose = poseMsg
        poseStamped.header.frame_id = "base"
        self.poseStamped = poseStamped

        self.robotService.moveToCartesianPosition(poseStamped, 30)
예제 #40
0
    def handle_message(self, msg):
        if set(self._link_name_published) != set(msg.link_name):
            # Removes the folder completely.
            self.remove_folder()
            self._link_name_published = msg.link_name

        folder = self._get_folder()

        for i in range(0, msg.num_links):
            name = msg.link_name[i]
            transform = transformUtils.transformFromPose(
                msg.position[i], msg.quaternion[i])
            # `vis.updateFrame` will either create or update the frame
            # according to its name within its parent folder.
            vis.updateFrame(transform, name, parent=folder, scale=0.1)
예제 #41
0
def testEuler():
    '''
    Test some euler conversions
    '''
    quat = transformations.random_quaternion()
    pos = np.random.rand(3)
    frame = transformUtils.transformFromPose(pos, quat)
    mat = transformUtils.getNumpyFromTransform(frame)

    rpy = transformUtils.rollPitchYawFromTransform(frame)
    rpy2 = transformations.euler_from_matrix(mat)

    print rpy
    print rpy2
    assert np.allclose(rpy, rpy2)
예제 #42
0
def reconstructConstraints(constraints):
    '''
    Convert dicts (decoded from json) back to the original
    constraint classes using the 'class' information in the dict
    '''
    objs = []

    for c in constraints:
        objClass = getattr(ikconstraints, c['class'])
        del c['class']
        obj = objClass()
        objs.append(obj)

        for attr, value in c.iteritems():
            if isinstance(value, dict) and 'position' in value and 'quaternion' in value:
                value = transformUtils.transformFromPose(value['position'], value['quaternion'])
            setattr(obj, attr, value)

    return objs
def transformFromPose(d):
    """
    Returns a transform from a standard encoding in dict format
    :param d:
    :return:
    """
    pos = [0]*3
    pos[0] = d['translation']['x']
    pos[1] = d['translation']['y']
    pos[2] = d['translation']['z']

    quatDict = utils.getQuaternionFromDict(d)
    quat = [0]*4
    quat[0] = quatDict['w']
    quat[1] = quatDict['x']
    quat[2] = quatDict['y']
    quat[3] = quatDict['z']

    return transformUtils.transformFromPose(pos, quat)
예제 #44
0
def testTransform():
    '''
    test transformFromPose --> getAxesFromTransform is same as quat --> matrix
    '''

    quat = transformations.random_quaternion()
    pos = np.random.rand(3)

    frame = transformUtils.transformFromPose(pos, quat)
    axes = transformUtils.getAxesFromTransform(frame)
    mat = transformUtils.getNumpyFromTransform(frame)

    assert np.allclose(mat[:3,:3], np.array(axes).transpose())

    mat2 = transformations.quaternion_matrix(quat)
    mat2[:3,3] = pos

    print mat
    print mat2
    assert np.allclose(mat, mat2)
예제 #45
0
def transformFromDict(pose_data):
    return transformUtils.transformFromPose(
        pose_data.get("translation", [0, 0, 0]),
        pose_data.get("quaternion", [1, 0, 0, 0]))
 def get_camera_pose(self, idx):
     pos, quat = self.poses[idx]
     transform = transformUtils.transformFromPose(pos, quat)
     return pos, quat, transform
예제 #47
0
    def __init__(self):

        pose = transformUtils.poseFromTransform(vtk.vtkTransform())


        self.pointcloud  = ioUtils.readPolyData(director.getDRCBaseDir() + '/software/models/rehearsal_pointcloud.vtp')
        self.pointcloudPD = vis.showPolyData(self.pointcloud, 'coursemodel', parent=None)
        segmentation.makeMovable(self.pointcloudPD, transformUtils.transformFromPose(array([0, 0, 0]), array([ 1.0,  0.        ,  0.        , 0.0])))

        self.originFrame = self.pointcloudPD.getChildFrame()

        t = transformUtils.transformFromPose(array([-4.39364111, -0.51507392, -0.73125563]), array([ 0.93821625,  0.        ,  0.        , -0.34604951]))
        self.valveWalkFrame  = vis.updateFrame(t, 'ValveWalk', scale=0.2,visible=True, parent=self.pointcloudPD)

        t = transformUtils.transformFromPose(array([-3.31840048,  0.36408685, -0.67413123]), array([ 0.93449475,  0.        ,  0.        , -0.35597691]))
        self.drillPreWalkFrame = vis.updateFrame(t, 'DrillPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD)

        t = transformUtils.transformFromPose(array([-2.24553758, -0.52990939, -0.73255338]), array([ 0.93697004,  0.        ,  0.        , -0.34940972]))
        self.drillWalkFrame  = vis.updateFrame(t, 'DrillWalk', scale=0.2,visible=True, parent=self.pointcloudPD)

        t = transformUtils.transformFromPose(array([-2.51306835, -0.92994004, -0.74173541 ]), array([-0.40456572,  0.        ,  0.        ,  0.91450893]))
        self.drillWallWalkFarthestSafeFrame  = vis.updateFrame(t, 'DrillWallWalkFarthestSafe', scale=0.2,visible=True, parent=self.pointcloudPD)

        t = transformUtils.transformFromPose(array([-2.5314524 , -0.27401861, -0.71302976]), array([ 0.98691519,  0.        ,  0.        , -0.16124022]))
        self.drillWallWalkBackFrame  = vis.updateFrame(t, 'DrillWallWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD)

        t = transformUtils.transformFromPose(array([-1.16122318,  0.04723203, -0.67493468]), array([ 0.93163145,  0.        ,  0.        , -0.36340451]))
        self.surprisePreWalkFrame  = vis.updateFrame(t, 'SurprisePreWalk', scale=0.2,visible=True, parent=self.pointcloudPD)

        t = transformUtils.transformFromPose(array([-0.5176186 , -1.00151554, -0.70650799]), array([ 0.84226497,  0.        ,  0.        , -0.53906374]))
        self.surpriseWalkFrame  = vis.updateFrame(t, 'SurpriseWalk', scale=0.2,visible=True, parent=self.pointcloudPD)

        t = transformUtils.transformFromPose(array([-0.69100097, -0.43713269, -0.68495922]), array([ 0.98625075,  0.        ,  0.        , -0.16525575]))
        self.surpriseWalkBackFrame  = vis.updateFrame(t, 'SurpriseWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD)

        t = transformUtils.transformFromPose(array([ 0.65827322, -0.08028796, -0.77370834]), array([ 0.94399977,  0.        ,  0.        , -0.3299461 ]))
        self.terrainPreWalkFrame = vis.updateFrame(t, 'TerrainPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD)

        t = transformUtils.transformFromPose(array([ 5.47126425, -0.09790393, -0.70504679]), array([ 1.,  0.,  0.,  0.]))
        self.stairsPreWalkFrame = vis.updateFrame(t, 'StairsPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD)

        self.frameSync = vis.FrameSync()
        self.frameSync.addFrame(self.originFrame)
        self.frameSync.addFrame(self.pointcloudPD.getChildFrame(), ignoreIncoming=True)
        self.frameSync.addFrame(self.valveWalkFrame, ignoreIncoming=True)
        self.frameSync.addFrame(self.drillPreWalkFrame, ignoreIncoming=True)
        self.frameSync.addFrame(self.drillWalkFrame, ignoreIncoming=True)
        self.frameSync.addFrame(self.drillWallWalkFarthestSafeFrame, ignoreIncoming=True)
        self.frameSync.addFrame(self.drillWallWalkBackFrame, ignoreIncoming=True)
        self.frameSync.addFrame(self.surprisePreWalkFrame, ignoreIncoming=True)
        self.frameSync.addFrame(self.surpriseWalkFrame, ignoreIncoming=True)
        self.frameSync.addFrame(self.surpriseWalkBackFrame, ignoreIncoming=True)
        self.frameSync.addFrame(self.terrainPreWalkFrame, ignoreIncoming=True)
        self.frameSync.addFrame(self.stairsPreWalkFrame, ignoreIncoming=True)
예제 #48
0
 def repositionFromDescription(self, desc):
     position, quat = desc['pose']
     t = transformUtils.transformFromPose(position, quat)
     self.getChildFrame().copyFrame(t)
예제 #49
0
 def setTransform(self, pos, quat):
     self.transform = transformUtils.transformFromPose(pos, quat)
     for g in self.geometry:
         g.polyDataItem.getChildFrame().copyFrame(self.transform)
예제 #50
0
 def onBDIPose(self, m):
     t = transformUtils.transformFromPose(m.pos, m.orientation)
     if self.timer.elapsed() > 1.0:
         self.transforms.append((self.timer.now(), t))
         self.timer.reset()
예제 #51
0
    def __init__(self):
        self.aprilTagSubsciber = lcmUtils.addSubscriber('APRIL_TAG_TO_CAMERA_LEFT', lcmbotcore.rigid_transform_t, self.onAprilTag)
        pose = transformUtils.poseFromTransform(vtk.vtkTransform())
        desc = dict(classname='MeshAffordanceItem', Name='polaris',
                    Filename='software/models/polaris/polaris_cropped.vtp', pose=pose)
        self.pointcloudAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc)
        self.originFrame = self.pointcloudAffordance.getChildFrame()
        self.originToAprilTransform = transformUtils.transformFromPose(np.array([-0.038508  , -0.00282131, -0.01000079]),
            np.array([  9.99997498e-01,  -2.10472556e-03,  -1.33815696e-04, 7.46246794e-04])) # offset for  . . . who knows why

        # t = transformUtils.transformFromPose(np.array([ 0.14376024,  0.95920689,  0.36655712]), np.array([ 0.28745842,  0.90741428, -0.28822068,  0.10438304]))

        t = transformUtils.transformFromPose(np.array([ 0.10873244,  0.93162364,  0.40509084]),
            np.array([ 0.32997378,  0.88498408, -0.31780588,  0.08318602]))
        self.leftFootEgressStartFrame  = vis.updateFrame(t, 'left foot start', scale=0.2,visible=True, parent=self.pointcloudAffordance)


        t = transformUtils.transformFromPose(np.array([ 0.265,  0.874,  0.274]),
                                             np.array([ 0.35290731,  0.93443693, -0.04181263,  0.02314636]))
        self.leftFootEgressMidFrame  = vis.updateFrame(t, 'left foot mid', scale=0.2,visible=True, parent=self.pointcloudAffordance)

        t = transformUtils.transformFromPose(np.array([ 0.54339115,  0.89436275,  0.26681047]),
                                             np.array([ 0.34635985,  0.93680077, -0.04152008,  0.02674412]))
        self.leftFootEgressOutsideFrame  = vis.updateFrame(t, 'left foot outside', scale=0.2,visible=True, parent=self.pointcloudAffordance)


        # pose = [np.array([-0.78962299,  0.44284877, -0.29539116]), np.array([ 0.54812954,  0.44571517, -0.46063251,  0.53731713])] #old location
        # pose = [np.array([-0.78594663,  0.42026626, -0.23248139]), np.array([ 0.54812954,  0.44571517, -0.46063251,  0.53731713])] # updated location

        pose = [np.array([-0.78594663,  0.42026626, -0.23248139]), np.array([ 0.53047159,  0.46554963, -0.48086192,  0.52022615])] # update orientation

        desc = dict(classname='CapsuleRingAffordanceItem', Name='Steering Wheel', uuid=newUUID(), pose=pose,
                    Color=[1, 0, 0], Radius=float(0.18), Segments=20)
        self.steeringWheelAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc)


        pose = [np.array([-0.05907324,  0.80460545,  0.45439687]), np.array([ 0.14288327,  0.685944  , -0.703969  ,  0.11615873])]

        desc = dict(classname='BoxAffordanceItem', Name='pedal', Dimensions=[0.12, 0.33, 0.04], pose=pose, Color=[0,1,0])
        self.pedalAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc)


        # t = transformUtils.transformFromPose(np.array([ 0.04045136,  0.96565326,  0.25810111]),
        #     np.array([ 0.26484648,  0.88360091, -0.37065556, -0.10825996]))

        # t = transformUtils.transformFromPose(np.array([ -4.34908919e-04,   9.24901627e-01,   2.65614116e-01]),
        #     np.array([ 0.25022251,  0.913271  , -0.32136359, -0.00708626]))

        t = transformUtils.transformFromPose(np.array([ 0.0384547 ,  0.89273742,  0.24140762]),
            np.array([ 0.26331831,  0.915796  , -0.28055337,  0.11519963]))

        self.leftFootPedalSwingFrame = vis.updateFrame(t,'left foot pedal swing', scale=0.2, visible=True, parent=self.pointcloudAffordance)

        t = transformUtils.transformFromPose(np.array([-0.9012598 , -0.05709763,  0.34897024]),
            np.array([ 0.03879584,  0.98950919,  0.03820214,  0.13381721]))

        self.leftFootDrivingFrame = vis.updateFrame(t,'left foot driving', scale=0.2, visible=True, parent=self.pointcloudAffordance)
        # t = transformUtils.transformFromPose(np.array([-0.12702725,  0.92068409,  0.27209386]),
        #     np.array([ 0.2062255 ,  0.92155886, -0.30781119,  0.11598529]))


        # t = transformUtils.transformFromPose(np.array([-0.14940375,  0.90347275,  0.23812658]),
        #     np.array([ 0.27150909,  0.91398724, -0.28877386,  0.0867167 ]))

        # t = transformUtils.transformFromPose(np.array([-0.17331227,  0.87879312,  0.25645152]),
        #     np.array([ 0.26344489,  0.91567196, -0.28089824,  0.11505581]))
        # self.leftFootDrivingKneeInFrame = vis.updateFrame(t,'left foot driving knee in', scale=0.2, visible=True, parent=self.pointcloudAffordance)


        t = transformUtils.transformFromPose(np.array([-0.12702725,  0.92068409,  0.27209386]),
            np.array([ 0.2062255 ,  0.92155886, -0.30781119,  0.11598529]))
        self.leftFootDrivingKneeInFrame = vis.updateFrame(t,'left foot driving knee in', scale=0.2, visible=True, parent=self.pointcloudAffordance)

        t = transformUtils.transformFromPose(np.array([-0.13194951,  0.89871423,  0.24956246]),
            np.array([ 0.21589082,  0.91727326, -0.30088849,  0.14651633]))
        self.leftFootOnPedal = vis.updateFrame(t,'left foot on pedal', scale=0.2, visible=True, parent=self.pointcloudAffordance)

        t = transformUtils.transformFromPose(np.array([ 0.17712239,  0.87619935,  0.27001509]),
            np.array([ 0.33484372,  0.88280787, -0.31946488,  0.08044963]))

        self.leftFootUpFrame = vis.updateFrame(t,'left foot up frame', scale=0.2, visible=True, parent=self.pointcloudAffordance)


        t = transformUtils.transformFromPose(np.array([ 0.47214939, -0.04856998,  0.01375837]),
            np.array([  6.10521653e-03,   4.18621358e-04,   4.65520611e-01,
         8.85015882e-01]))
        self.rightHandGrabFrame = vis.updateFrame(t,'right hand grab bar', scale=0.2, visible=True, parent=self.pointcloudAffordance)

        self.frameSync = vis.FrameSync()
        self.frameSync.addFrame(self.originFrame)
        self.frameSync.addFrame(self.pointcloudAffordance.getChildFrame(), ignoreIncoming=True)
        self.frameSync.addFrame(self.leftFootEgressStartFrame, ignoreIncoming=True)
        self.frameSync.addFrame(self.leftFootEgressMidFrame, ignoreIncoming=True)
        self.frameSync.addFrame(self.leftFootEgressOutsideFrame, ignoreIncoming=True)
        self.frameSync.addFrame(self.steeringWheelAffordance.getChildFrame(), ignoreIncoming=True)
        self.frameSync.addFrame(self.pedalAffordance.getChildFrame(), ignoreIncoming=True)
        self.frameSync.addFrame(self.leftFootPedalSwingFrame, ignoreIncoming=True)
        self.frameSync.addFrame(self.leftFootDrivingFrame, ignoreIncoming=True)
        self.frameSync.addFrame(self.leftFootDrivingKneeInFrame, ignoreIncoming=True)
        self.frameSync.addFrame(self.leftFootOnPedal, ignoreIncoming=True)
        self.frameSync.addFrame(self.leftFootUpFrame, ignoreIncoming=True)
        self.frameSync.addFrame(self.rightHandGrabFrame, ignoreIncoming=True)
예제 #52
0
 def setTransform(self, pos, quat):
     trans = transformUtils.transformFromPose(pos, quat)
     self.transform.SetMatrix(trans.GetMatrix())
     self.transform.Modified()
예제 #53
0
    def getPinchToHandFrame(self):
        pinchToHand = transformUtils.transformFromPose(np.array([ -1.22270636e-07,  -3.11575498e-01,   0.00000000e+00]), np.array([  3.26794897e-07,  -2.42861455e-17,  -1.85832253e-16,
         1.00000000e+00]))

        return pinchToHand
예제 #54
0
    def drawFootstepPlan(self, msg, folder, left_color=None, right_color=None, alpha=1.0):
        for step in folder.children():
            om.removeFromObjectModel(step)
        allTransforms = []
        volFolder = getWalkingVolumesFolder()
        map(om.removeFromObjectModel, volFolder.children())
        slicesFolder = getTerrainSlicesFolder()
        map(om.removeFromObjectModel, slicesFolder.children())


        for i, footstep in enumerate(msg.footsteps):
            trans = footstep.pos.translation
            trans = [trans.x, trans.y, trans.z]
            quat = footstep.pos.rotation
            quat = [quat.w, quat.x, quat.y, quat.z]

            footstepTransform = transformUtils.transformFromPose(trans, quat)

            allTransforms.append(footstepTransform)


            if i < 2:
                continue

            if footstep.is_right_foot:
                mesh = getRightFootMesh()
                if (right_color is None):
                    color = getRightFootColor()
                else:
                    color = right_color
            else:
                mesh = getLeftFootMesh()
                if (left_color is None):
                    color = getLeftFootColor()
                else:
                    color = left_color

            # add gradual shading to steps to indicate destination
            frac = float(i)/ float(msg.num_steps-1)
            this_color = [0,0,0]
            this_color[0] = 0.25*color[0] + 0.75*frac*color[0]
            this_color[1] = 0.25*color[1] + 0.75*frac*color[1]
            this_color[2] = 0.25*color[2] + 0.75*frac*color[2]


            if self.show_contact_slices:
                self.drawContactVolumes(footstepTransform, color)

            contact_pts_left, contact_pts_right = FootstepsDriver.getContactPts()
            if footstep.is_right_foot:
                sole_offset = np.mean(contact_pts_right, axis=0)
            else:
                sole_offset = np.mean(contact_pts_left, axis=0)

            t_sole_prev = frameFromPositionMessage(msg.footsteps[i-2].pos)
            t_sole_prev.PreMultiply()
            t_sole_prev.Translate(sole_offset)
            t_sole = transformUtils.copyFrame(footstepTransform)
            t_sole.Translate(sole_offset)
            yaw = np.arctan2(t_sole.GetPosition()[1] - t_sole_prev.GetPosition()[1],
                             t_sole.GetPosition()[0] - t_sole_prev.GetPosition()[0])
            T_terrain_to_world = transformUtils.frameFromPositionAndRPY([t_sole_prev.GetPosition()[0], t_sole_prev.GetPosition()[1], 0],
                                                                        [0, 0, math.degrees(yaw)])
            path_dist = np.array(footstep.terrain_path_dist)
            height = np.array(footstep.terrain_height)
            # if np.any(height >= trans[2]):
            terrain_pts_in_local = np.vstack((path_dist, np.zeros(len(footstep.terrain_path_dist)), height))
            d = DebugData()
            for j in range(terrain_pts_in_local.shape[1]-1):
                d.addLine(terrain_pts_in_local[:,j], terrain_pts_in_local[:,j+1], radius=0.01)
            obj = vis.showPolyData(d.getPolyData(), 'terrain slice', parent=slicesFolder, visible=slicesFolder.getProperty('Visible'), color=[.8,.8,.3])
            obj.actor.SetUserTransform(T_terrain_to_world)

            renderInfeasibility = False
            if renderInfeasibility and footstep.infeasibility > 1e-6:
                d = DebugData()
                start = allTransforms[i-1].GetPosition()
                end = footstepTransform.GetPosition()
                d.addArrow(start, end, 0.02, 0.005,
                           startHead=True,
                           endHead=True)
                vis.showPolyData(d.getPolyData(), 'infeasibility %d -> %d' % (i-2, i-1), parent=folder, color=[1, 0.2, 0.2])

            stepName = 'step %d' % (i-1)

            obj = vis.showPolyData(mesh, stepName, color=this_color, alpha=alpha, parent=folder)
            obj.setIcon(om.Icons.Feet)
            frameObj = vis.showFrame(footstepTransform, stepName + ' frame', parent=obj, scale=0.3, visible=False)
            obj.actor.SetUserTransform(footstepTransform)
            obj.addProperty('Support Contact Groups', footstep.params.support_contact_groups, attributes=om.PropertyAttributes(enumNames=['Whole Foot', 'Front 2/3', 'Back 2/3']))
            obj.properties.setPropertyIndex('Support Contact Groups', 0)
            obj.footstep_index = i
            obj.footstep_property_callback = obj.properties.connectPropertyChanged(functools.partial(self.onFootstepPropertyChanged, obj))

            self.drawContactPts(obj, footstep, color=this_color)
예제 #55
0
 def transformGeometry(polyDataList, geom):
     t = transformUtils.transformFromPose(geom.position, geom.quaternion)
     polyDataList = [filterUtils.transformPolyData(polyData, t) for polyData in polyDataList]
     return polyDataList
예제 #56
0
 def setTransform(self, pos, quat):
     self.transform = transformUtils.transformFromPose(pos, quat)
     for g in self.geometry:
         g.polyDataItem.actor.SetUserTransform(self.transform)