Beispiel #1
0
def makeGraspFrames(obj, graspOffset, pregraspOffset=(-0.08, 0, 0), suffix=''):

    pos, rpy = graspOffset
    objectToWorld = obj.getChildFrame().transform
    graspToObject = transformUtils.frameFromPositionAndRPY(pos, rpy)
    preGraspToGrasp = transformUtils.frameFromPositionAndRPY(
        pregraspOffset, [0, 0, 0])
    graspToWorld = transformUtils.concatenateTransforms(
        [graspToObject, objectToWorld])
    preGraspToWorld = transformUtils.concatenateTransforms(
        [preGraspToGrasp, graspToWorld])

    graspFrameName = 'grasp to world%s' % suffix
    pregraspFrameName = 'pregrasp to world%s' % suffix

    om.removeFromObjectModel(om.findObjectByName(graspFrameName))
    om.removeFromObjectModel(om.findObjectByName(pregraspFrameName))

    graspFrame = vis.showFrame(graspToWorld,
                               graspFrameName,
                               scale=0.1,
                               parent=obj,
                               visible=False)
    preGraspFrame = vis.showFrame(preGraspToWorld,
                                  pregraspFrameName,
                                  scale=0.1,
                                  parent=obj,
                                  visible=False)

    obj.getChildFrame().getFrameSync().addFrame(graspFrame,
                                                ignoreIncoming=True)
    graspFrame.getFrameSync().addFrame(preGraspFrame, ignoreIncoming=True)
Beispiel #2
0
    def computeGraspPlan(self,
                         targetFrame,
                         graspToHandFrame,
                         inLine=False,
                         ikParameters=None):

        startPose = self.getPlanningStartPose()
        endPose, constraintSet = self.computeGraspPose(startPose, targetFrame)
        if ikParameters:
            constraintSet.ikParameters = ikParameters

        constraintSet.ikParameters.usePointwise = False

        if inLine:

            handLinkName = self.ikPlanner.getHandLink(self.graspingHand)
            graspToHand = graspToHandFrame

            handToWorld1 = self.ikPlanner.getLinkFrameAtPose(
                handLinkName, startPose)
            handToWorld2 = self.ikPlanner.getLinkFrameAtPose(
                handLinkName, endPose)

            handToWorld1 = transformUtils.concatenateTransforms(
                [graspToHand, handToWorld1])
            handToWorld2 = transformUtils.concatenateTransforms(
                [graspToHand, handToWorld2])

            motionVector = np.array(handToWorld2.GetPosition()) - np.array(
                handToWorld1.GetPosition())
            motionTargetFrame = transformUtils.getTransformFromOriginAndNormal(
                np.array(handToWorld2.GetPosition()), motionVector)

            #vis.updateFrame(motionTargetFrame, 'motion target frame', scale=0.1)
            #d = DebugData()
            #d.addLine(np.array(handToWorld2.GetPosition()), np.array(handToWorld2.GetPosition()) - motionVector)
            #vis.updatePolyData(d.getPolyData(), 'motion vector', visible=False)

            p = self.ikPlanner.createLinePositionConstraint(
                handLinkName,
                graspToHand,
                motionTargetFrame,
                lineAxis=2,
                bounds=[-np.linalg.norm(motionVector), 0.001],
                positionTolerance=0.001)
            p.tspan = np.linspace(0, 1, 5)

            constraintSet.constraints.append(p)
            newPlan = constraintSet.runIkTraj()

        else:

            newPlan = self.ikPlanner.computePostureGoal(startPose, endPose)

        return newPlan
Beispiel #3
0
    def attemptGrasp(self, graspFrame):

    	preGraspFrame = transformUtils.concatenateTransforms([self.preGraspToGraspTransform, self.graspFrame])

        graspLocationData = self.graspingParams[self.state.graspingLocation]
        above_table_pre_grasp = graspLocationData['poses']['above_table_pre_grasp']
        preGraspFramePoseStamped = self.makePoseStampedFromGraspFrame(preGraspFrame)
        preGrasp_ik_response = self.robotService.runIK(preGraspFramePoseStamped, seedPose=above_table_pre_grasp, nominalPose=above_table_pre_grasp)

        if not preGrasp_ik_response.success:
            rospy.loginfo("pre grasp pose ik failed, returning")
            return False

        graspFramePoseStamped = self.makePoseStampedFromGraspFrame(graspFrame)
        preGraspPose = preGrasp_ik_response.joint_state.position

        grasp_ik_response = self.robotService.runIK(graspFramePoseStamped, seedPose=preGraspPose, nominalPose=preGraspPose)

        if not  grasp_ik_response.success:
            rospy.loginfo("grasp pose not reachable, returning")
            return False


        graspPose = grasp_ik_response.joint_state.position
        # store for future use
        self.preGraspFrame = preGraspFrame
        self.graspFrame = graspFrame
        self.gripperDriver.sendOpenGripperCommand()
        rospy.sleep(0.5) # wait for the gripper to open
        self.robotService.moveToJointPosition(preGraspPose, maxJointDegreesPerSecond=self.graspingParams['speed']['pre_grasp'])
        self.robotService.moveToJointPosition(graspPose, maxJointDegreesPerSecond=self.graspingParams['speed']['grasp'])
    	
        objectInGripper = self.gripperDriver.closeGripper()
        return objectInGripper
Beispiel #4
0
    def planStandUp(self):
        startPose = self.getPlanningStartPose()
        startPoseName = 'q_egress_start'
        self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
        endPoseName = 'q_egress_end'
        pelvisFrame = self.robotSystem.ikPlanner.getLinkFrameAtPose('pelvis', startPose)
        t = transformUtils.frameFromPositionAndRPY([self.pelvisLiftX, 0, self.pelvisLiftZ], [0, 0, 0])
        liftFrame = transformUtils.concatenateTransforms([t, pelvisFrame])

        constraints = []
        utorsoFrame = self.robotSystem.ikPlanner.getLinkFrameAtPose('utorso', startPose)
        g = self.createUtorsoGazeConstraints([1.0, 1.0])
        constraints.append(g[1])
        p = ik.PositionConstraint(linkName='pelvis', referenceFrame=liftFrame,
                                  lowerBound=np.array([0.0, -np.inf, 0.0]),
                                  upperBound=np.array([np.inf, np.inf, 0.0]))
        constraints.append(p)
        constraints.append(ik.QuasiStaticConstraint(leftFootEnabled=True, rightFootEnabled=True, pelvisEnabled=False,
                                                    shrinkFactor=self.quasiStaticShrinkFactor))
        constraints.append(self.robotSystem.ikPlanner.createXYZMovingBasePostureConstraint(startPoseName))
        constraints.append(self.robotSystem.ikPlanner.createLockedLeftArmPostureConstraint(startPoseName))
        constraints.append(self.robotSystem.ikPlanner.createLockedRightArmPostureConstraint(startPoseName))
        constraints.extend(self.robotSystem.ikPlanner.createFixedFootConstraints(startPoseName))
        constraints.append(self.robotSystem.ikPlanner.createKneePostureConstraint([0.7, 2.5]))
        constraintSet = ConstraintSet(self.robotSystem.ikPlanner, constraints, endPoseName, startPoseName)
        constraintSet.ikParameters = IkParameters(usePointwise=True, maxBaseMetersPerSecond=0.02)

        constraintSet.runIk()
        keyFramePlan = constraintSet.planEndPoseGoal(feetOnGround=True)
        poseTimes, poses = planplayback.PlanPlayback.getPlanPoses(keyFramePlan)
        ts = [poseTimes[0]]
        supportsList = [['r_foot', 'l_foot']]
        plan = self.publishPlanWithSupports(keyFramePlan, supportsList, ts, True)
        self.addPlan(plan)
        return plan
Beispiel #5
0
 def onAprilTag(self, msg):
     t = vtk.vtkTransform()
     cameraview.imageManager.queue.getTransform('april_tag_car_beam',
                                                'local', msg.utime, t)
     self.originFrame.copyFrame(
         transformUtils.concatenateTransforms(
             [self.originToAprilTransform, t]))
Beispiel #6
0
    def loadStoredObjects(self):
        if not os.path.isfile(self.pathDict['registrationResult']):
            return

        stream = file(self.pathDict['registrationResult'])
        registrationResult = yaml.load(stream)

        for objName, data in registrationResult.iteritems():
            objectMeshFilename = data[
                'filename']  # should be relative to getLabelFusionDataDir()
            if len(objectMeshFilename) == 0:
                objectMeshFilename = utils.getObjectMeshFilename(
                    self.objectData, objName)
            else:
                objectMeshFilename = os.path.join(
                    utils.getLabelFusionDataDir(), objectMeshFilename)

            objectToFirstFrame = transformUtils.transformFromPose(
                data['pose'][0], data['pose'][1])
            objectToWorld = transformUtils.concatenateTransforms(
                [objectToFirstFrame, self.firstFrameToWorldTransform])

            polyData = ioUtils.readPolyData(objectMeshFilename)
            polyDataWorld = filterUtils.transformPolyData(
                polyData, objectToWorld)

            d = dict()
            d['transform'] = objectToWorld
            d['polyData'] = polyDataWorld
            self.storedObjects[objName] = d
Beispiel #7
0
    def setupImage(self, imageNumber, saveLabeledImages=False):
        """
        Loads the given imageNumber as background.
        Also updates the poses of the objects to match the image
        """
        baseName = cutils.getImageBasenameFromImageNumber(
            imageNumber, self.pathDict)
        imageFilename = baseName + "_rgb.png"
        if not os.path.exists(imageFilename):
            return False

        utimeFile = open(baseName + "_utime.txt", 'r')
        utime = int(utimeFile.read())

        # update camera transform
        cameraToCameraStart = self.getCameraPoseAtUTime(utime)
        t = transformUtils.concatenateTransforms(
            [cameraToCameraStart,
             cutils.getDefaultCameraToWorld()])
        vis.updateFrame(t, 'camera pose')
        setCameraTransform(self.globalsDict['view'].camera(), t)

        cameraPose = self.globalsDict['om'].findObjectByName('camera pose')
        cameraPose.setProperty('Visible', False)

        self.loadBackgroundImage(imageFilename)
        self.globalsDict['view'].forceRender()  # render it again

        if saveLabeledImages:
            self.saveImages(baseName)

        return True
    def _handleRigidBodies(self, bodies):

        if not bodies:
            return

        folder = self.getRootFolder()

        for body in bodies:
            name = 'Body ' + str(body.id)

            x,y,z,w = body.quat
            quat = (w,x,y,z)
            bodyToOptitrack = transformUtils.transformFromPose(body.xyz, quat)

            bodyToWorld = transformUtils.concatenateTransforms((bodyToOptitrack, self.optitrackToWorld))


            obj = folder.findChild(name)
            if not obj:
                geometry = self._makeMarkers(body.marker_xyz)
                geometry = filterUtils.transformPolyData(geometry, bodyToOptitrack.GetLinearInverse())
                obj = vis.showPolyData(geometry, name, parent=folder, color=[1,0,0])
                frameObj = vis.addChildFrame(obj)
                frameObj.setProperty('Scale', 0.2)
                frameObj.setProperty('Visible', True)

            obj.getChildFrame().copyFrame(bodyToWorld)
Beispiel #9
0
    def saveRegistrationResults(self, filename=None):
        registrationResultDict = dict()
        if os.path.isfile(self.pathDict['registrationResult']):
            registrationResultDict = utils.getDictFromYamlFilename(
                self.pathDict['registrationResult'])

        affordanceFolder = om.getOrCreateContainer('affordances')
        affordanceList = affordanceFolder.children()

        for affordance in affordanceList:
            objectName = affordance.getProperty('Name')
            modelToWorld = affordance.getChildFrame().transform
            modelToFirstFrame = transformUtils.concatenateTransforms([
                modelToWorld,
                self.firstFrameToWorldTransform.GetLinearInverse()
            ])
            pose = transformUtils.poseFromTransform(modelToFirstFrame)
            poseAsList = [pose[0].tolist(), pose[1].tolist()]
            d = dict()
            meshFilename = affordance.getProperty('Filename')
            relPathToDataDir = os.path.relpath(meshFilename,
                                               utils.getLabelFusionDataDir())

            d['filename'] = relPathToDataDir
            d['pose'] = poseAsList
            registrationResultDict[objectName] = d

        if filename is None:
            filename = self.pathDict['registrationResult']

        utils.saveDictToYaml(registrationResultDict, filename)
Beispiel #10
0
def initRobotTeleopCameraFrame(robotSystem):
    endEffectorToWorld = robotSystem.teleopRobotModel.getLinkFrame(
        'iiwa_link_ee')
    frameObj = vis.updateFrame(endEffectorToWorld,
                               'iiwa_link_ee_teleop',
                               parent='debug',
                               scale=0.15,
                               visible=False)
    cameraToEE = getCameraToKukaEndEffectorFrame()
    cameraToWorld = transformUtils.concatenateTransforms(
        [cameraToEE, endEffectorToWorld])
    obj = vis.updateFrame(cameraToWorld,
                          'camera frame teleop',
                          parent=frameObj,
                          scale=0.15,
                          visible=False)
    frameObj.getFrameSync().addFrame(obj, ignoreIncoming=True)

    def updateFrame(model):
        EEToWorld = model.getLinkFrame('iiwa_link_ee')
        frameObj = vis.updateFrame(EEToWorld,
                                   'iiwa_link_ee_teleop',
                                   parent='debug',
                                   scale=0.15,
                                   visible=False)

    # setup the callback so it updates when we move the teleop model
    robotSystem.teleopRobotModel.connectModelChanged(updateFrame)
Beispiel #11
0
def loadObjectMeshes(affordanceManager, registrationResultFilename,
                     firstFrameToWorldTransform):
    """
    Loads the object meshes from the registration_result.yaml file
    :param affordanceManager:
    :param registrationResultFilename: filename of registration_result.yaml, should be an absolute path
    :param transformsFilename: filename of transforms.yaml where firstFrameToWorld transform is.
    :return: None
    """

    stream = file(registrationResultFilename)
    registrationResult = yaml.load(stream)

    for objName, data in registrationResult.iteritems():
        objectMeshFilename = data[
            'filename']  # should be relative to getLabelFusionDataDir()
        if len(objectMeshFilename) == 0:
            objectMeshFilename = getObjectMeshFilename(objName)
        else:
            objectMeshFilename = os.path.join(getLabelFusionDataDir(),
                                              objectMeshFilename)

        # figure out object pose in world frame
        # we have stored object pose in first camera frame
        objectToFirstFrame = transformUtils.transformFromPose(
            data['pose'][0], data['pose'][1])
        objectToWorld = transformUtils.concatenateTransforms(
            [objectToFirstFrame, firstFrameToWorldTransform])
        pose = transformUtils.poseFromTransform(objectToWorld)

        loadAffordanceModel(affordanceManager,
                            name=objName,
                            filename=objectMeshFilename,
                            pose=pose)
Beispiel #12
0
    def update(self):

        previousTargetFrame = transformUtils.transformFromPose(self.lastTargetPosition, self.lastTargetQuaternion)
        self.storeTargetPose()

        cameraToTarget, focalDistance = self.getCameraToTargetTransform(previousTargetFrame)

        targetToWorld = self.targetFrame.transform
        # cameraToTarget = self.boomTransform
        cameraToWorld = transformUtils.concatenateTransforms([cameraToTarget, targetToWorld])

        c = self.camera

        focalPoint = cameraToWorld.TransformPoint([self.focalDistance, 0, 0])
        focalPoint = targetToWorld.GetPosition()

        # print 'focal distance:', self.focalDistance
        # print 'cameraToTarget pos:', cameraToTarget.GetPosition()
        # print 'cameraToWorld pos:', cameraToWorld.GetPosition()
        # print 'targetToWorld pos:', targetToWorld.GetPosition()
        # print 'focal pos:', focalPoint

        c.SetPosition(cameraToWorld.GetPosition())
        c.SetFocalPoint(focalPoint)
        self.view.render()
Beispiel #13
0
    def update(self):

        previousTargetFrame = transformUtils.transformFromPose(
            self.lastTargetPosition, self.lastTargetQuaternion)
        self.storeTargetPose()

        cameraToTarget, focalDistance = self.getCameraToTargetTransform(
            previousTargetFrame)

        targetToWorld = self.targetFrame.transform
        #cameraToTarget = self.boomTransform
        cameraToWorld = transformUtils.concatenateTransforms(
            [cameraToTarget, targetToWorld])

        c = self.camera

        focalPoint = cameraToWorld.TransformPoint([self.focalDistance, 0, 0])
        focalPoint = targetToWorld.GetPosition()

        #print 'focal distance:', self.focalDistance
        #print 'cameraToTarget pos:', cameraToTarget.GetPosition()
        #print 'cameraToWorld pos:', cameraToWorld.GetPosition()
        #print 'targetToWorld pos:', targetToWorld.GetPosition()
        #print 'focal pos:', focalPoint

        c.SetPosition(cameraToWorld.GetPosition())
        c.SetFocalPoint(focalPoint)
        self.view.render()
Beispiel #14
0
    def saveObjectPoses(self, imageFilename, cameraToCameraStart, baseName):
        # count pixels
        img = scipy.misc.imread(imageFilename)
        assert img.dtype == np.uint8

        if img.ndim in (3, 4):
            img = img[:, :, 0]
        else:
            assert img.ndim == 2

        labels, counts = np.unique(img, return_counts=True)
        labelToCount = dict(zip(labels, counts))

        num_pixels_per_class = np.array([])

        for i in xrange(0, img.max() + 1):
            num_pixels = labelToCount.get(i, 0)
            num_pixels_per_class = np.append(num_pixels_per_class, num_pixels)

        pose_file_name = baseName + "_poses.yaml"
        target = open(pose_file_name, 'w')
        print 'writing:', pose_file_name

        # iterate through each class with 1 or more pixel and save pose...
        for index, val in enumerate(num_pixels_per_class):
            if index == 0:
                # don't save for background class
                continue
            if val > 0:
                cameraStartToCamera = cameraToCameraStart.GetLinearInverse()
                objectName = utils.getObjectName(self.objectData, index)
                target.write(objectName + ":")
                target.write("\n")
                target.write("  label: " + str(index))
                target.write("\n")
                target.write("  num_pixels: " + str(val))
                target.write("\n")
                objToCameraStart = self.objectToWorld[objectName]
                objToCamera = transformUtils.concatenateTransforms(
                    [objToCameraStart, cameraStartToCamera])
                pose = transformUtils.poseFromTransform(objToCamera)

                #Write T Matrix
                target.write("  cam_R_m2c: " + str(pose[0].tolist()))
                target.write("\n")

                print "cam_R_m2c:\n"
                print pose

                q7 = Quaternion(pose[1])
                c = q7.rotation_matrix
                target.write("  cam_t_m2c: " + str(c.flatten().tolist()))
                target.write("\n")

                print "cam_t_m2c:\n"
                print pose
                print "\n"

        target.close()
Beispiel #15
0
    def getCameraToTargetTransform(self, targetFrame):

        targetToWorld = transformUtils.copyFrame(targetFrame)
        cameraToWorld = self.getCameraTransform()

        cameraToTarget = transformUtils.concatenateTransforms([cameraToWorld, targetToWorld.GetLinearInverse()])
        focalDistance = np.linalg.norm(np.array(self.camera.GetFocalPoint()) - np.array(self.camera.GetPosition()))
        return cameraToTarget, focalDistance
Beispiel #16
0
def setRobotPoseFromOptitrack(robotToBodyOffset=[0.0, -0.035, 0.0, -90, -90.5, 0], optitrackBodyName='Body 1'):
    bodyToWorld = om.findObjectByName(optitrackBodyName + ' frame').transform
    robotToBody = transformUtils.frameFromPositionAndRPY(robotToBodyOffset[:3], robotToBodyOffset[3:])
    robotToWorld = transformUtils.concatenateTransforms([robotToBody, bodyToWorld])
    pos, quat = transformUtils.poseFromTransform(robotToWorld)
    rpy = transformUtils.quaternionToRollPitchYaw(quat)
    robotSystem.robotStateJointController.q[:6] = np.hstack((pos,rpy))
    robotSystem.robotStateJointController.push()
Beispiel #17
0
    def getCameraToTargetTransform(self, targetFrame):

        targetToWorld = transformUtils.copyFrame(targetFrame)
        cameraToWorld = self.getCameraTransform()

        cameraToTarget = transformUtils.concatenateTransforms([cameraToWorld, targetToWorld.GetLinearInverse()])
        focalDistance = np.linalg.norm(np.array(self.camera.GetFocalPoint()) - np.array(self.camera.GetPosition()))
        return cameraToTarget, focalDistance
Beispiel #18
0
 def getCameraToLocal(self):
     '''
     Returns cameraToLocal.  cameraToHead is pulled from bot frames while
     headToLocal is pulled from the robot model forward kinematics.
     '''
     headToLocal = self.robotModel.getLinkFrame( self.robotModel.getHeadLink() )
     cameraToHead = vtk.vtkTransform()
     self.imageManager.queue.getTransform(self.cameraName, self.robotModel.getHeadLink(), 0, cameraToHead)
     return transformUtils.concatenateTransforms([cameraToHead, headToLocal])
Beispiel #19
0
def setTfRootForCamera():

    tfVis.setTfToWorld(vtk.vtkTransform())

    opticalToRoot = transformUtils.copyFrame(om.findObjectByName('camera_depth_optical_frame').transform)
    rootToOptical = opticalToRoot.GetLinearInverse()
    cameraToWorld = getCameraToWorld()
    t = transformUtils.concatenateTransforms([rootToOptical, cameraToWorld])
    tfVis.setTfToWorld(t)
Beispiel #20
0
    def moveToGraspFrame(self, graspFrame):
        preGraspFrame = transformUtils.concatenateTransforms(
            [self.preGraspToGraspTransform, self.graspFrame])
        vis.updateFrame(preGraspFrame, 'pre grasp frame', scale=0.15)
        vis.updateFrame(graspFrame, 'grasp frame', scale=0.15)

        self.moveToFrame(preGraspFrame)
        # rospy.sleep(1.0)
        self.moveToFrame(graspFrame)
Beispiel #21
0
 def getCameraToLocal(self):
     """
     Returns cameraToLocal.  cameraToHead is pulled from bot frames while
     headToLocal is pulled from the robot model forward kinematics.
     """
     headToLocal = self.robotModel.getLinkFrame(self.robotModel.getHeadLink())
     cameraToHead = vtk.vtkTransform()
     self.imageManager.queue.getTransform(self.cameraName, self.robotModel.getHeadLink(), 0, cameraToHead)
     return transformUtils.concatenateTransforms([cameraToHead, headToLocal])
Beispiel #22
0
    def planFootOut(self, startPose=None, finalFootHeight=0.05):

        if startPose is None:
            startPose = self.getPlanningStartPose()

        startPoseName = 'q_egress_start'
        self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
        endPoseName = 'q_egress_end'

        utorsoFrame = self.robotSystem.ikPlanner.getLinkFrameAtPose('utorso', startPose)
        finalLeftFootFrame = self.computeLeftFootOverPlatformFrame(startPose, finalFootHeight)

        constraints = []
        constraints.extend(self.createUtorsoGazeConstraints([0.0, 1.0]))
        constraints.append(ik.QuasiStaticConstraint(leftFootEnabled=False, rightFootEnabled=True,
                                                    pelvisEnabled=False, shrinkFactor=0.01))
        constraints.append(self.robotSystem.ikPlanner.createMovingBaseSafeLimitsConstraint())
        constraints.append(self.robotSystem.ikPlanner.createLockedLeftArmPostureConstraint(startPoseName))
        constraints.append(self.robotSystem.ikPlanner.createLockedRightArmPostureConstraint(startPoseName))
        #constraints.append(self.robotSystem.ikPlanner.createLockedBackPostureConstraint(startPoseName))
        constraints.append(self.robotSystem.ikPlanner.createKneePostureConstraint([0.7, 2.5]))
        constraints.append(self.robotSystem.ikPlanner.createFixedLinkConstraints(startPoseName, 'r_foot'))
        constraints.extend(self.createLeftFootPoseConstraint(finalLeftFootFrame, tspan=[1,1]))

        constraintSet = ConstraintSet(self.robotSystem.ikPlanner, constraints, endPoseName, startPoseName)
        constraintSet.ikParameters = IkParameters(usePointwise=True, maxBaseRPYDegreesPerSecond=10,
                                                  rescaleBodyNames=['l_foot'],
                                                  rescaleBodyPts=[0.0, 0.0, 0.0],
                                                  maxBodyTranslationSpeed=self.maxFootTranslationSpeed)
        #constraintSet.seedPoseName = 'q_start'
        #constraintSet.nominalPoseName = 'q_start'

        constraintSet.runIk()

        footFrame = self.robotSystem.ikPlanner.getLinkFrameAtPose('l_foot', startPose)
        t = transformUtils.frameFromPositionAndRPY([0, 0, self.polaris.leftFootEgressOutsideFrame.transform.GetPosition()[2]-footFrame.GetPosition()[2]], [0, 0, 0])
        liftFrame = transformUtils.concatenateTransforms([footFrame, t])
        vis.updateFrame(liftFrame, 'lift frame')

        c = ik.WorldFixedOrientConstraint()
        c.linkName = 'l_foot'
        c.tspan = [0.0, 0.1, 0.2]
        constraints.append(c)
        constraints.extend(self.createLeftFootPoseConstraint(liftFrame, tspan=[0.2, 0.2]))
        constraints.extend(self.createLeftFootPoseConstraint(self.polaris.leftFootEgressMidFrame, tspan=[0.5, 0.5]))

        constraints.extend(self.createLeftFootPoseConstraint(self.polaris.leftFootEgressOutsideFrame, tspan=[0.8, 0.8]))

        #plan = constraintSet.planEndPoseGoal(feetOnGround=False)
        keyFramePlan = constraintSet.runIkTraj()
        poseTimes, poses = planplayback.PlanPlayback.getPlanPoses(keyFramePlan)
        ts = [poseTimes[0]]
        supportsList = [['r_foot']]
        plan = self.publishPlanWithSupports(keyFramePlan, supportsList, ts, False)
        self.addPlan(plan)
        return plan
Beispiel #23
0
    def reset(self):
        self.storeTargetPose()

        targetToWorld = transformUtils.copyFrame(self.targetFrame.transform)
        cameraToWorld = self.getCameraTransform()

        cameraToTarget = transformUtils.concatenateTransforms([cameraToWorld, targetToWorld.GetLinearInverse()])

        self.boomTransform = cameraToTarget
        self.focalDistance = np.linalg.norm(np.array(self.camera.GetFocalPoint()) - np.array(self.camera.GetPosition()))
Beispiel #24
0
 def showRGBCameraFrame(self):
     p = om.getOrCreateContainer('Camera Transform')
     linkFrame = self.robotStateModel.getLinkFrame(
         self.referenceLinkName)  # this is a vtkTransform object
     cameraToWorld = transformUtils.concatenateTransforms(
         [self.rgbCameraToLinkTransform, linkFrame])
     vis.updateFrame(cameraToWorld,
                     'rgb camera frame',
                     scale=0.15,
                     parent=p)
Beispiel #25
0
    def reset(self):
        self.storeTargetPose()

        targetToWorld = transformUtils.copyFrame(self.targetFrame.transform)
        cameraToWorld = self.getCameraTransform()

        cameraToTarget = transformUtils.concatenateTransforms([cameraToWorld, targetToWorld.GetLinearInverse()])

        self.boomTransform = cameraToTarget
        self.focalDistance = np.linalg.norm(np.array(self.camera.GetFocalPoint()) - np.array(self.camera.GetPosition()))
Beispiel #26
0
 def getCameraToLocal(self):
     '''
     Returns cameraToLocal.  cameraToHead is pulled from bot frames while
     headToLocal is pulled from the robot model forward kinematics.
     '''
     headToLocal = self.robotModel.getLinkFrame( self.headLink )
     cameraToHead = vtk.vtkTransform()
     # TODO: 'head' should match self.headLink
     self.imageManager.queue.getTransform(self.cameraName, 'head', 0, cameraToHead)
     return transformUtils.concatenateTransforms([cameraToHead, headToLocal])
Beispiel #27
0
    def get_object_pose_relative_to_gripper(self):
        T_world_object = self._object.actor.GetUserTransform()
        T_world_gripper = self._robotStateModel.getLinkFrame(
            self._gripper_link_frame_name)

        T_gripper_object = transformUtils.concatenateTransforms(
            [T_world_object,
             T_world_gripper.GetLinearInverse()])

        print(transformUtils.poseFromTransform(T_gripper_object))
        return T_gripper_object
    def _updateMarkerCollection(self, prefix, folder, marker_ids,
                                positions, base_transform=None):
        markers = folder.children()
        if len(markers) != len(positions):
            for obj in markers:
                om.removeFromObjectModel(obj)
            modelColor = vis.getRandomColor()
            markers = self.createMarkerObjects(
                marker_ids, folder, prefix, modelColor)
        if len(markers):
            modelColor = markers[0].getProperty('Color')

        for i, pos in enumerate(positions):
            marker_frame = transformUtils.transformFromPose(pos, (1, 0, 0, 0))
            marker_frame = transformUtils.concatenateTransforms(
                [marker_frame, self.optitrackToWorld])
            if base_transform is not None:
                marker_frame = transformUtils.concatenateTransforms(
                    [marker_frame, base_transform])
            markers[i].getChildFrame().copyFrame(marker_frame)
    def loadReconstructedPointCloud(self):
        utime = self.openniDepthPointCloud.lastUtime
        cameraToFirstFrame = self.cameraPoses.getCameraPoseAtUTime(utime)
        cameraToWorld = om.findObjectByName('camera frame').transform
        firstFrameToCamera = cameraToFirstFrame.GetLinearInverse()
        firstFrameToWorld = transformUtils.concatenateTransforms(
            [firstFrameToCamera, cameraToWorld])

        self.firstFrameToWorld = firstFrameToWorld
        CorlUtils.loadElasticFusionReconstruction(
            self.pathDict['reconstruction'], transform=firstFrameToWorld)
    def computeGraspPlan(self, targetFrame, graspToHandFrame, inLine=False, ikParameters=None):

        startPose = self.getPlanningStartPose()
        endPose, constraintSet = self.computeGraspPose(startPose, targetFrame)
        if ikParameters:
            constraintSet.ikParameters = ikParameters

        constraintSet.ikParameters.usePointwise = False

        if inLine:

            handLinkName = self.ikPlanner.getHandLink(self.graspingHand)
            graspToHand = graspToHandFrame

            handToWorld1 = self.ikPlanner.getLinkFrameAtPose(handLinkName, startPose)
            handToWorld2 = self.ikPlanner.getLinkFrameAtPose(handLinkName, endPose)

            handToWorld1 = transformUtils.concatenateTransforms([graspToHand, handToWorld1])
            handToWorld2 = transformUtils.concatenateTransforms([graspToHand, handToWorld2])

            motionVector = np.array(handToWorld2.GetPosition()) - np.array(handToWorld1.GetPosition())
            motionTargetFrame = transformUtils.getTransformFromOriginAndNormal(np.array(handToWorld2.GetPosition()), motionVector)


            #vis.updateFrame(motionTargetFrame, 'motion target frame', scale=0.1)
            #d = DebugData()
            #d.addLine(np.array(handToWorld2.GetPosition()), np.array(handToWorld2.GetPosition()) - motionVector)
            #vis.updatePolyData(d.getPolyData(), 'motion vector', visible=False)

            p = self.ikPlanner.createLinePositionConstraint(handLinkName, graspToHand, motionTargetFrame,
                  lineAxis=2, bounds=[-np.linalg.norm(motionVector), 0.001], positionTolerance=0.001)
            p.tspan = np.linspace(0, 1, 5)

            constraintSet.constraints.append(p)
            newPlan = constraintSet.runIkTraj()

        else:

            newPlan = self.ikPlanner.computePostureGoal(startPose, endPose)

        return newPlan
Beispiel #31
0
    def computeSingleCameraPose(self, targetLocationWorld=[1,0,0], cameraFrameLocation=[0.22, 0, 0.89], flip=False):
        cameraAxis = [0,0,1]

        linkName = self.handFrame
        linkName = 'iiwa_link_7'
        linkFrame =self.robotSystem.robotStateModel.getLinkFrame(linkName)
        cameraFrame = self.robotSystem.robotStateModel.getLinkFrame(self.handFrame)

        cameraToLinkFrame = transformUtils.concatenateTransforms([cameraFrame, linkFrame.GetLinearInverse()])
        ikPlanner = self.robotSystem.ikPlanner
        startPoseName = 'q_nom'
        endPoseName = 'reach_end'
        seedPoseName = 'q_nom'

        if flip:
            print "FLIPPING startPoseName"
            startPoseName = 'q_nom_invert_7th_joint'
            seedPoseName  = 'q_nom_invert_7th_joint'
            pose = np.asarray([ 0.    ,  0.    ,  0.    ,  0.    ,  0.    ,  0.    ,  0.    ,
       -0.68  ,  1.0    , -1.688 ,  1.0    ,  -0.5635,  1.0    ])
            ikPlanner.addPose(pose, startPoseName)
        else:
            print "NOT flipped"

        constraints = []
        constraints.append(ikPlanner.createPostureConstraint(startPoseName, robotstate.matchJoints('base_')))

        positionConstraint = HandEyeCalibration.createPositionConstraint(targetPosition=cameraFrameLocation, linkName=linkName, linkOffsetFrame=cameraToLinkFrame, positionTolerance=0.05)

        cameraGazeConstraint = HandEyeCalibration.createCameraGazeTargetConstraint(linkName=linkName, cameraToLinkFrame=cameraToLinkFrame, cameraAxis=cameraAxis, worldPoint=targetLocationWorld, coneThresholdDegrees=5.0)


        constraints.append(positionConstraint)
        constraints.append(cameraGazeConstraint)


        constraintSet = ConstraintSet(ikPlanner, constraints, 'reach_end',
                                      startPoseName)

        if flip:
            constraintSet.ikPlanner.addPose(pose, startPoseName)


        constraintSet.ikParameters = IkParameters()

        constraintSet.seedPoseName = seedPoseName

        endPose, info = constraintSet.runIk()
        returnData = dict()
        returnData['info'] = info
        returnData['endPose'] = endPose

        return returnData
Beispiel #32
0
def initRobotKinematicsCameraFrame(robotSystem):
    endEffectorToWorld = robotSystem.robotStateModel.getLinkFrame('iiwa_link_ee')
    frameObj = vis.updateFrame(endEffectorToWorld, 'iiwa_link_ee', parent='debug', scale=0.15, visible=False)
    cameraToEE = getCameraToKukaEndEffectorFrame()
    cameraToWorld = transformUtils.concatenateTransforms([cameraToEE, endEffectorToWorld])
    obj = vis.updateFrame(cameraToWorld, 'camera frame', parent=frameObj, scale=0.15)
    frameObj.getFrameSync().addFrame(obj, ignoreIncoming=True)

    def onCameraFrameModified(f):
        setCameraToWorld(f.transform)

    obj.connectFrameModified(onCameraFrameModified)
Beispiel #33
0
    def computeSingleCameraPose(self,
                                targetLocationWorld=[1, 0, 0],
                                cameraFrameLocation=[0.22, 0, 0.89]):
        cameraAxis = [0, -1,
                      0]  # assuming we are using 'palm' as the link frame

        linkName = self.handFrame
        linkName = 'iiwa_link_7'
        linkFrame = self.robotSystem.robotStateModel.getLinkFrame(linkName)
        cameraFrame = self.robotSystem.robotStateModel.getLinkFrame(
            self.handFrame)

        cameraToLinkFrame = transformUtils.concatenateTransforms(
            [cameraFrame, linkFrame.GetLinearInverse()])
        ikPlanner = self.robotSystem.ikPlanner
        startPoseName = 'q_nom'
        endPoseName = 'reach_end'
        seedPoseName = 'q_nom'

        constraints = []
        constraints.append(
            ikPlanner.createPostureConstraint(startPoseName,
                                              robotstate.matchJoints('base_')))

        positionConstraint = HandEyeCalibration.createPositionConstraint(
            targetPosition=cameraFrameLocation,
            linkName=linkName,
            linkOffsetFrame=cameraToLinkFrame,
            positionTolerance=0.05)

        cameraGazeConstraint = HandEyeCalibration.createCameraGazeTargetConstraint(
            linkName=linkName,
            cameraToLinkFrame=cameraToLinkFrame,
            cameraAxis=cameraAxis,
            worldPoint=targetLocationWorld,
            coneThresholdDegrees=5.0)

        constraints.append(positionConstraint)
        constraints.append(cameraGazeConstraint)

        constraintSet = ConstraintSet(ikPlanner, constraints, 'reach_end',
                                      startPoseName)
        constraintSet.ikParameters = IkParameters()

        constraintSet.seedPoseName = seedPoseName

        endPose, info = constraintSet.runIk()
        returnData = dict()
        returnData['info'] = info
        returnData['endPose'] = endPose

        return returnData
Beispiel #34
0
    def update(self):
        """
        Updates the location of all objects. Use a `connectFrameModified` callback
        :return:
        :rtype:
        """
        if self._object is None:
            return

        T_world_gripper = self._robotStateModel.getLinkFrame(
            self._gripper_link_frame_name)
        T_world_object = transformUtils.concatenateTransforms(
            [self._T_gripper_object, T_world_gripper])
        self._object.getChildFrame().copyFrame(T_world_object)
Beispiel #35
0
def onAprilTagMessage(msg, channel):
    tagToCamera = lcmframe.frameFromRigidTransformMessage(msg)
    vis.updateFrame(tagToCamera, channel, visible=False)

    cameraToTag = tagToCamera.GetLinearInverse()
    tagToWorld = getTagToWorld()
    cameraToWorld = transformUtils.concatenateTransforms([cameraToTag, tagToWorld])

    cameraToWorldMsg = lcmframe.rigidTransformMessageFromFrame(cameraToWorld)
    lcmUtils.publish('OPENNI_FRAME_LEFT_TO_LOCAL', cameraToWorldMsg)

    vis.updateFrame(vtk.vtkTransform(), 'world', visible=False)
    vis.updateFrame(cameraToWorld, 'camera to world', visible=False)
    vis.updateFrame(tagToWorld, 'tag to world', visible=False)
    def compute_transforms(self):
        """
        Make sure that grasp_data is set before you call this function
        :return:
        """

        # gripper palm to world
        # this also happens to be gripper palm to object
        if self.T_W_G is None:
            self.T_W_G = transformUtils.copyFrame(self.grasp_data.grasp_frame)

        T_W_Gn = transformUtils.concatenateTransforms(
            [self.T_W_G, self._T_goal_object])

        self._T_W_Gn = T_W_Gn
 def onCameraModified():
     cameraToWorld = getCameraTransform(depthScanner.view.camera())
     depthScanner.pointCloudObj.actor.SetUserTransform(transformUtils.concatenateTransforms([pointCloudToCamera, cameraToWorld]))
     cameraFrame.copyFrame(cameraToWorld)
    def get_camera_to_world_pose(self, idx):
        _, _, camera_to_first_frame = self.get_camera_pose(idx)
        camera_to_world = transformUtils.concatenateTransforms([camera_to_first_frame,
                                                                self.first_frame_to_world])

        return camera_to_world
Beispiel #39
0
 def onAprilTag(self, msg):
     t = vtk.vtkTransform()
     cameraview.imageManager.queue.getTransform('april_tag_car_beam', 'local', msg.utime, t)
     self.originFrame.copyFrame(transformUtils.concatenateTransforms([self.originToAprilTransform, t]))