コード例 #1
0
ファイル: pfgrasp.py プロジェクト: rdeits/director
    def planDeltaMove(self, Direction, LocalOrWorld, Amount):
        linkMap = { 'left' : 'l_hand_face', 'right': 'r_hand_face'}
        linkName = linkMap[self.graspingHand]

        #handToWorld = self.robotModel.getLinkFrame(linkName)

        if Direction == 'X':
            delta = transformUtils.frameFromPositionAndRPY([Amount,0,0],[0,0,0])
        elif Direction == 'Y':
            delta = transformUtils.frameFromPositionAndRPY([0,Amount,0],[0,0,0])
        else:
            delta = transformUtils.frameFromPositionAndRPY([0,0,Amount],[0,0,0])

        startPose = self.getPlanningStartPose() 
        constraintSet = self.ikPlanner.planEndEffectorDelta(startPose, self.graspingHand, 
                        delta.GetPosition(), constraints=None, LocalOrWorldDelta=LocalOrWorld)
        handfaceToWorld = self.ikPlanner.getLinkFrameAtPose(linkName, self.getPlanningStartPose())
        # constraint orientation
        p,q = self.ikPlanner.createPositionOrientationGraspConstraints(self.graspingHand,handfaceToWorld)
        q.tspan=[0.5,np.inf]
        
        constraintSet.constraints.append(q)
        ##
        endPose, info = constraintSet.runIk()    
        if info>10:
            return None    
        graspPlan = constraintSet.runIkTraj()
        return graspPlan
コード例 #2
0
ファイル: bihandeddemo.py プロジェクト: wxmerkt/director
    def computeBoardStanceFrame(self):
        objectTransform = transformUtils.copyFrame(
            self.graspLeftFrame.transform)
        self.relativeStanceTransform = transformUtils.copyFrame(
            transformUtils.frameFromPositionAndRPY(self.relativeStanceXYZ,
                                                   self.relativeStanceRPY))
        robotStance = self.b.computeRobotStanceFrame(
            objectTransform, self.relativeStanceTransform)
        self.stanceFrame = vis.updateFrame(robotStance,
                                           'board stance',
                                           parent=self.affordance,
                                           visible=False,
                                           scale=0.2)
        self.stanceFrame.addToView(app.getDRCView())

        objectTransform = transformUtils.copyFrame(
            self.graspLeftFrame.transform)
        self.relativeAsymmetricStanceTransform = transformUtils.copyFrame(
            transformUtils.frameFromPositionAndRPY(
                self.relativeAsymmetricStanceXYZ, self.relativeStanceRPY))
        robotAsymmetricStance = self.b.computeRobotStanceFrame(
            objectTransform, self.relativeAsymmetricStanceTransform)
        self.asymmetricStanceFrame = vis.updateFrame(robotAsymmetricStance,
                                                     'board Asymmetric stance',
                                                     parent=self.affordance,
                                                     visible=False,
                                                     scale=0.2)
        self.asymmetricStanceFrame.addToView(app.getDRCView())
コード例 #3
0
ファイル: bihandeddemo.py プロジェクト: wxmerkt/director
    def computeBoardReachFrames(self):
        ''' Reach ~10cm short of the grasp frame '''
        reachLeftXYZ = copy.deepcopy(self.graspLeftHandFrameXYZ)
        reachLeftXYZ[0] = reachLeftXYZ[0] - self.reachDepth
        reachLeftRPY = copy.deepcopy(self.graspLeftHandFrameRPY)

        tl = transformUtils.frameFromPositionAndRPY(reachLeftXYZ, reachLeftRPY)
        tl.Concatenate(self.frame.transform)
        self.reachLeftFrame = vis.updateFrame(tl,
                                              'reach left frame',
                                              parent=self.affordance,
                                              visible=False,
                                              scale=0.2)
        self.reachLeftFrame.addToView(app.getDRCView())

        reachRightXYZ = copy.deepcopy(self.graspRightHandFrameXYZ)
        reachRightXYZ[0] = reachRightXYZ[0] - self.reachDepth
        reachRightRPY = copy.deepcopy(self.graspRightHandFrameRPY)

        tr = transformUtils.frameFromPositionAndRPY(reachRightXYZ,
                                                    reachRightRPY)
        tr.Concatenate(self.frame.transform)
        self.reachRightFrame = vis.updateFrame(tr,
                                               'reach right frame',
                                               parent=self.affordance,
                                               visible=False,
                                               scale=0.2)
        self.reachRightFrame.addToView(app.getDRCView())
コード例 #4
0
ファイル: bihandeddemo.py プロジェクト: rdeits/director
    def computeBoardStanceFrame(self):
        objectTransform = transformUtils.copyFrame( self.graspLeftFrame.transform )
        self.relativeStanceTransform = transformUtils.copyFrame( transformUtils.frameFromPositionAndRPY( self.relativeStanceXYZ , self.relativeStanceRPY ) )
        robotStance = self.b.computeRobotStanceFrame( objectTransform, self.relativeStanceTransform )
        self.stanceFrame = vis.updateFrame(robotStance, 'board stance', parent=self.affordance, visible=False, scale=0.2)
        self.stanceFrame.addToView(app.getDRCView())

        objectTransform = transformUtils.copyFrame( self.graspLeftFrame.transform )
        self.relativeAsymmetricStanceTransform = transformUtils.copyFrame( transformUtils.frameFromPositionAndRPY( self.relativeAsymmetricStanceXYZ , self.relativeStanceRPY ) )
        robotAsymmetricStance = self.b.computeRobotStanceFrame( objectTransform, self.relativeAsymmetricStanceTransform )
        self.asymmetricStanceFrame = vis.updateFrame(robotAsymmetricStance, 'board Asymmetric stance', parent=self.affordance, visible=False, scale=0.2)
        self.asymmetricStanceFrame.addToView(app.getDRCView())
コード例 #5
0
ファイル: bihandeddemo.py プロジェクト: rdeits/director
 def computeBoardGraspFrames(self):
     t = transformUtils.frameFromPositionAndRPY( self.graspLeftHandFrameXYZ , self.graspLeftHandFrameRPY )
     t_copy = transformUtils.copyFrame(t)
     t_copy.Concatenate(self.frame.transform)
     self.graspLeftFrame = vis.updateFrame(t_copy, 'grasp left frame', parent=self.affordance, visible=False, scale=0.2)
     self.graspLeftFrame.addToView(app.getDRCView())
     
     t = transformUtils.frameFromPositionAndRPY( self.graspRightHandFrameXYZ , self.graspRightHandFrameRPY )
     t_copy = transformUtils.copyFrame(t)
     t_copy.Concatenate(self.frame.transform)
     self.graspRightFrame = vis.updateFrame(t_copy, 'grasp right frame', parent=self.affordance, visible=False, scale=0.2)
     self.graspRightFrame.addToView(app.getDRCView())
コード例 #6
0
ファイル: tabledemo.py プロジェクト: edowson/director
    def computeTableStanceFrame(self):
        assert self.tableData

        zGround = 0.0
        tableHeight = self.tableData.frame.GetPosition()[2] - zGround

        t = transformUtils.copyFrame(self.tableData.frame)
        t.PreMultiply()
        t1 = transformUtils.frameFromPositionAndRPY([-x/2 for x in self.tableData.dims],[0,0,0])
        t.Concatenate(t1)
        t2 = transformUtils.frameFromPositionAndRPY([-0.35, self.tableData.dims[1]*0.5, -tableHeight],[0,0,0])
        t.Concatenate(t2)

        self.tableStanceFrame = vis.showFrame(t, 'table stance frame', parent=self.tableObj, scale=0.2)
コード例 #7
0
ファイル: valvedemo.py プロジェクト: mlab-upenn/arch-apex
    def spawnValveAffordance(self):
        radius = 0.10
        tubeRadius = 0.02
        position = [0, 0, 1.2]
        rpy = [0, 0, 0]
        t_feet_mid = self.footstepPlanner.getFeetMidPoint(self.robotModel)
        t = transformUtils.frameFromPositionAndRPY(position, rpy)
        t_grasp = self.computeRelativeGraspTransform()
        t_grasp.Concatenate(t)
        t_stance = self.computeRobotStanceFrame(t_grasp, self.computeRelativeStanceTransform())
        t_valve = t_stance.GetInverse()

        # This is necessary to get the inversion to actually happen. We don't know why.
        t_valve.GetMatrix()

        t_valve.Concatenate(t)
        t_valve.Concatenate(t_feet_mid)
        pose = transformUtils.poseFromTransform(t_valve)
        desc = dict(classname='CapsuleRingAffordanceItem', Name='valve', uuid=newUUID(), pose=pose,
                    Color=[0, 1, 0], Radius=float(radius), Segments=20)
        desc['Tube Radius'] = tubeRadius

        import affordancepanel
        obj = affordancepanel.panel.affordanceFromDescription(desc)
        obj.params = dict(radius=radius)
コード例 #8
0
ファイル: valvedemo.py プロジェクト: mlab-upenn/arch-apex
 def computeRelativeGraspTransform(self):
     t = transformUtils.copyFrame(transformUtils.frameFromPositionAndRPY(self.graspFrameXYZ,
                                                                         self.graspFrameRPY))
     t.PostMultiply()
     t.RotateX(180)
     t.RotateY(-90)
     return t
コード例 #9
0
ファイル: bihandeddemo.py プロジェクト: rdeits/director
    def spawnBoardAffordance(self, randomize=False):
        self.boardLength = 1.5

        if randomize:

            position = [random.uniform(0.5, 0.8), random.uniform(-0.2, 0.2), random.uniform(0.5, 0.8)]
            rpy = [random.choice((random.uniform(-35, 35), random.uniform(70, 110))), random.uniform(-10, 10),  random.uniform(-5, 5)]
            zwidth = random.uniform(0.5, 1.0)

        else:
            if self.b.val:
                position = [0.4, 0.0, 1.]
            else:
                position = [0.6, 0.0, 1.]
                
            rpy = [90, 1, 0]
            zwidth = self.boardLength

        xwidth = 3.75 * .0254
        ywidth = 1.75 * .0254
        t = transformUtils.frameFromPositionAndRPY(position, rpy)
        t.Concatenate(self.b.computeGroundFrame(self.b.robotModel))
        xaxis = [1,0,0]
        yaxis = [0,1,0]
        zaxis = [0,0,1]
        for axis in (xaxis, yaxis, zaxis):
            t.TransformVector(axis, axis)

        self.affordance = segmentation.createBlockAffordance(t.GetPosition(), xaxis, yaxis, zaxis, xwidth, ywidth, zwidth, 'board', parent='affordances')
        self.affordance.setProperty('Color', QtGui.QColor(200, 150, 100))
        t = self.affordance.actor.GetUserTransform()
        self.frame = vis.showFrame(t, 'board frame', parent=self.affordance, visible=False, scale=0.2)
コード例 #10
0
    def makeDebugRegions(self):

        stepWidth = (15 + 3/8.0) * 0.0254
        stepLength = (15 + 5/8.0) * 0.0254
        stepHeight = (5 + 5/8.0) * 0.0254

        stepPoints = np.array([
          [-stepLength/2.0, -stepWidth/2.0, 0.0],
          [-stepLength/2.0, stepWidth/2.0, 0.0],
          [stepLength/2.0, stepWidth/2.0, 0.0],
          [stepLength/2.0, -stepWidth/2.0, 0.0]
        ])

        t = vtk.vtkTransform()
        t.Translate(0.0, 0.0, 0.0)
        t.RotateZ(4.5)

        for i in xrange(len(stepPoints)):
            stepPoints[i] = np.array(t.TransformPoint(stepPoints[i]))

        stepOffset = np.array([stepLength, 0.0, stepHeight])

        numSteps = 5

        goalFrame = transformUtils.frameFromPositionAndRPY([0.4, 0.0, 0.1], [0,0,0])
        vis.showFrame(goalFrame, 'goal frame', scale=0.2)

        rpySeed = np.radians(goalFrame.GetOrientation())
        for i in xrange(numSteps):

            step = stepPoints + (i+1)*stepOffset
            self.convertStepToSafeRegion(step, rpySeed)

        self.footstepsPanel.onNewWalkingGoal(goalFrame)
コード例 #11
0
ファイル: affordancepanel.py プロジェクト: rdeits/director
 def getSpawnFrame(self):
     pos = self.jointController.q[:3]
     rpy = np.degrees(self.jointController.q[3:6])
     frame = transformUtils.frameFromPositionAndRPY(pos, rpy)
     frame.PreMultiply()
     frame.Translate(0.5, 0.0, 0.3)
     return frame
コード例 #12
0
    def findFarRightCorner(self, polyData, linkFrame):
        '''
        Within a point cloud find the point to the far right from the link
        The input is typically the 4 corners of a minimum bounding box
        '''

        diagonalTransform = transformUtils.frameFromPositionAndRPY([0,0,0], [0,0,45])
        diagonalTransform.Concatenate(linkFrame)
        vis.updateFrame(diagonalTransform, 'diagonal frame', parent='cont debug', visible=False)

        #polyData = shallowCopy(polyData)
        points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
        #vtkNumpy.addNumpyToVtk(polyData, points[:,0].copy(), 'x')
        #vtkNumpy.addNumpyToVtk(polyData, points[:,1].copy(), 'y')
        #vtkNumpy.addNumpyToVtk(polyData, points[:,2].copy(), 'z')

        viewOrigin = diagonalTransform.TransformPoint([0.0, 0.0, 0.0])
        viewX = diagonalTransform.TransformVector([1.0, 0.0, 0.0])
        viewY = diagonalTransform.TransformVector([0.0, 1.0, 0.0])
        viewZ = diagonalTransform.TransformVector([0.0, 0.0, 1.0])
        #polyData = segmentation.labelPointDistanceAlongAxis(polyData, viewX, origin=viewOrigin, resultArrayName='distance_along_foot_x')
        polyData = segmentation.labelPointDistanceAlongAxis(polyData, viewY, origin=viewOrigin, resultArrayName='distance_along_foot_y')
        #polyData = segmentation.labelPointDistanceAlongAxis(polyData, viewZ, origin=viewOrigin, resultArrayName='distance_along_foot_z')

        vis.updatePolyData( polyData, 'cornerPoints', parent='cont debug', visible=False)
        farRightIndex = vtkNumpy.getNumpyFromVtk(polyData, 'distance_along_foot_y').argmin()
        points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
        return points[farRightIndex,:]
コード例 #13
0
    def planReach(self):
        startPose = self.getPlanningStartPose()

        reachFrame = vtk.vtkTransform()
        reachFrame.Identity()
        reachFrame.PostMultiply()
        if self.graspingHand == 'right':
            reachFrame.Concatenate(
                transformUtils.frameFromPositionAndRPY([0, 0, 0], [0, 180, 0]))
        reachFrame.Concatenate(
            transformUtils.copyFrame(
                om.findObjectByName('reach frame').actor.GetUserTransform()))
        constraintSet = self.ikPlanner.planEndEffectorGoal(startPose,
                                                           self.graspingHand,
                                                           reachFrame,
                                                           lockBase=False,
                                                           lockBack=True)

        ## broken robot arm has a new joint limit
        if self.graspingHand == 'left':
            constraintSet.constraints.append(self.createBrokenArmConstraint())

        endPose, info = constraintSet.runIk()
        #print endPose

        if info > 10:
            self.log("in Target received: Bad movement")
            return
        reachPlan = constraintSet.runIkTraj()
コード例 #14
0
 def getSpawnFrame(self):
     pos = self.jointController.q[:3]
     rpy = np.degrees(self.jointController.q[3:6])
     frame = transformUtils.frameFromPositionAndRPY(pos, rpy)
     frame.PreMultiply()
     frame.Translate(0.5, 0.0, 0.3)
     return frame
コード例 #15
0
ファイル: pfgrasp.py プロジェクト: rdeits/director
    def moveDrill(self,Pos=[0,0,0],RPY=[0,0,0],Style='Local'):
        linkMap = { 'left' : 'l_hand_face', 'right': 'r_hand_face'}
        linkName = linkMap[self.graspingHand]
        
        affordance = om.findObjectByName('drill')
        affordanceReach = om.findObjectByName('reach frame')
        frame = om.findObjectByName('drill frame')

        
        drillTransform = affordance.actor.GetUserTransform()
        reach = transformUtils.copyFrame(affordanceReach.actor.GetUserTransform())
        drillTransformCopy = transformUtils.copyFrame(affordance.actor.GetUserTransform())
        drillToReach = vtkTransform()
        drillToReach.Identity()
        drillToReach.PostMultiply()
        drillToReach.Concatenate(drillTransformCopy)
        drillToReach.Concatenate(reach.GetLinearInverse())
        
        handfaceToWorld = self.ikPlanner.getLinkFrameAtPose(linkName, self.getPlanningStartPose())
        
        # find a transform that move forward wrt hand palm
        delta = transformUtils.frameFromPositionAndRPY(Pos, RPY)
        drillTransform.Identity()
        drillTransform.PostMultiply()
        drillTransform.Concatenate(drillToReach)
        drillTransform.Concatenate(delta)
        drillTransform.Concatenate(handfaceToWorld)
コード例 #16
0
    def makeDebugRegions(self):

        stepWidth = (15 + 3/8.0) * 0.0254
        stepLength = (15 + 5/8.0) * 0.0254
        stepHeight = (5 + 5/8.0) * 0.0254

        stepPoints = np.array([
          [-stepLength/2.0, -stepWidth/2.0, 0.0],
          [-stepLength/2.0, stepWidth/2.0, 0.0],
          [stepLength/2.0, stepWidth/2.0, 0.0],
          [stepLength/2.0, -stepWidth/2.0, 0.0]
        ])

        t = vtk.vtkTransform()
        t.Translate(0.0, 0.0, 0.0)
        t.RotateZ(4.5)

        for i in xrange(len(stepPoints)):
            stepPoints[i] = np.array(t.TransformPoint(stepPoints[i]))

        stepOffset = np.array([stepLength, 0.0, stepHeight])

        numSteps = 5

        goalFrame = transformUtils.frameFromPositionAndRPY([0.4, 0.0, 0.1], [0,0,0])
        vis.showFrame(goalFrame, 'goal frame', scale=0.2)

        rpySeed = np.radians(goalFrame.GetOrientation())
        for i in xrange(numSteps):

            step = stepPoints + (i+1)*stepOffset
            self.convertStepToSafeRegion(step, rpySeed)

        self.footstepsPanel.onNewWalkingGoal(goalFrame)
コード例 #17
0
ファイル: pfgrasp.py プロジェクト: rdeits/director
    def planGraspLineMotion(self):
        self.turnPointwiseOffSlow()
        startPose = self.getPlanningStartPose()

        graspFrame = vtk.vtkTransform()
        graspFrame.Identity()
        graspFrame.PostMultiply()
        if self.graspingHand == 'right':
            graspFrame.Concatenate(transformUtils.frameFromPositionAndRPY([0,0,0], [0,180,0]))
        graspFrame.Concatenate(transformUtils.copyFrame(om.findObjectByName('grasp frame').actor.GetUserTransform()))

        constraintSet = self.ikPlanner.planEndEffectorGoal(startPose, self.graspingHand, graspFrame, \
                                                            lockBase=False, lockBack=True)
        # constraint orientation
        p,q = self.ikPlanner.createPositionOrientationGraspConstraints(self.graspingHand, graspFrame)
        q.tspan=[0.5,1]
        
        constraintSet.constraints.append(q)
        
        # constraint line axis 
        positionConstraint, orientationConstraint, axisConstraint = self.ikPlanner.createMoveOnLineConstraints(startPose, graspFrame)
        
        ## broken robot arm has a new joint limit
        if self.graspingHand == 'left':
            constraintSet.constraints.append(self.createBrokenArmConstraint())
        
        constraintSet.constraints.append(axisConstraint)
        constraintSet.constraints[-1].tspan = [0.5,np.inf]
        endPose, info = constraintSet.runIk()
        #print endPose
        if info > 10:
            self.log("in Target received: Bad movement")
            return
        graspPlan = constraintSet.runIkTraj()
コード例 #18
0
    def makeGoalFrames(self):

        for linkName, positionGoal, _ in self.positionCosts:
            orientationGoal = [0.0, 0.0, 0.0]
            t = transformUtils.frameFromPositionAndRPY(positionGoal, orientationGoal)
            f = vis.showFrame(t, '%s position goal' % linkName)
            f.connectFrameModified(self.onGoalFrameModified)
コード例 #19
0
    def findFarRightCorner(self, polyData, linkFrame):
        '''
        Within a point cloud find the point to the far right from the link
        The input is typically the 4 corners of a minimum bounding box
        '''

        diagonalTransform = transformUtils.frameFromPositionAndRPY([0,0,0], [0,0,45])
        diagonalTransform.Concatenate(linkFrame)
        vis.updateFrame(diagonalTransform, 'diagonal frame', parent='cont debug', visible=False)

        #polyData = shallowCopy(polyData)
        points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
        #vtkNumpy.addNumpyToVtk(polyData, points[:,0].copy(), 'x')
        #vtkNumpy.addNumpyToVtk(polyData, points[:,1].copy(), 'y')
        #vtkNumpy.addNumpyToVtk(polyData, points[:,2].copy(), 'z')

        viewOrigin = diagonalTransform.TransformPoint([0.0, 0.0, 0.0])
        viewX = diagonalTransform.TransformVector([1.0, 0.0, 0.0])
        viewY = diagonalTransform.TransformVector([0.0, 1.0, 0.0])
        viewZ = diagonalTransform.TransformVector([0.0, 0.0, 1.0])
        #polyData = segmentation.labelPointDistanceAlongAxis(polyData, viewX, origin=viewOrigin, resultArrayName='distance_along_foot_x')
        polyData = segmentation.labelPointDistanceAlongAxis(polyData, viewY, origin=viewOrigin, resultArrayName='distance_along_foot_y')
        #polyData = segmentation.labelPointDistanceAlongAxis(polyData, viewZ, origin=viewOrigin, resultArrayName='distance_along_foot_z')

        vis.updatePolyData( polyData, 'cornerPoints', parent='cont debug', visible=False)
        farRightIndex = vtkNumpy.getNumpyFromVtk(polyData, 'distance_along_foot_y').argmin()
        points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
        return points[farRightIndex,:]
コード例 #20
0
ファイル: egressplanner.py プロジェクト: edowson/director
    def planStandUp(self):
        startPose = self.getPlanningStartPose()
        startPoseName = 'q_egress_start'
        self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
        endPoseName = 'q_egress_end'
        pelvisFrame = self.robotSystem.ikPlanner.getLinkFrameAtPose('pelvis', startPose)
        t = transformUtils.frameFromPositionAndRPY([self.pelvisLiftX, 0, self.pelvisLiftZ], [0, 0, 0])
        liftFrame = transformUtils.concatenateTransforms([t, pelvisFrame])

        constraints = []
        utorsoFrame = self.robotSystem.ikPlanner.getLinkFrameAtPose('utorso', startPose)
        g = self.createUtorsoGazeConstraints([1.0, 1.0])
        constraints.append(g[1])
        p = ik.PositionConstraint(linkName='pelvis', referenceFrame=liftFrame,
                                  lowerBound=np.array([0.0, -np.inf, 0.0]),
                                  upperBound=np.array([np.inf, np.inf, 0.0]))
        constraints.append(p)
        constraints.append(ik.QuasiStaticConstraint(leftFootEnabled=True, rightFootEnabled=True, pelvisEnabled=False,
                                                    shrinkFactor=self.quasiStaticShrinkFactor))
        constraints.append(self.robotSystem.ikPlanner.createXYZMovingBasePostureConstraint(startPoseName))
        constraints.append(self.robotSystem.ikPlanner.createLockedLeftArmPostureConstraint(startPoseName))
        constraints.append(self.robotSystem.ikPlanner.createLockedRightArmPostureConstraint(startPoseName))
        constraints.extend(self.robotSystem.ikPlanner.createFixedFootConstraints(startPoseName))
        constraints.append(self.robotSystem.ikPlanner.createKneePostureConstraint([0.7, 2.5]))
        constraintSet = ConstraintSet(self.robotSystem.ikPlanner, constraints, endPoseName, startPoseName)
        constraintSet.ikParameters = IkParameters(usePointwise=True, maxBaseMetersPerSecond=0.02)

        constraintSet.runIk()
        keyFramePlan = constraintSet.planEndPoseGoal(feetOnGround=True)
        poseTimes, poses = planplayback.PlanPlayback.getPlanPoses(keyFramePlan)
        ts = [poseTimes[0]]
        supportsList = [['r_foot', 'l_foot']]
        plan = self.publishPlanWithSupports(keyFramePlan, supportsList, ts, True)
        self.addPlan(plan)
        return plan
コード例 #21
0
ファイル: viewbehaviors.py プロジェクト: edowson/director
def toggleFootstepWidget(displayPoint, view, useHorizontalWidget=False):

    obj, _ = vis.findPickedObject(displayPoint, view)

    if not obj:
        return False

    name = obj.getProperty('Name')

    if name in ('footstep widget', 'footstep widget frame'):
        om.removeFromObjectModel(om.findObjectByName('footstep widget'))
        return True

    match = re.match('^step (\d+)$', name)
    if not match:
        return False

    stepIndex = int(match.group(1))

    existingWidget = om.findObjectByName('footstep widget')
    if existingWidget:
        previousStep = existingWidget.stepIndex
        print 'have existing widget for step:', stepIndex

        om.removeFromObjectModel(existingWidget)
        if previousStep == stepIndex:
            print 'returning because widget was for selected step'
            return True


    footMesh = shallowCopy(obj.polyData)
    footFrame = transformUtils.copyFrame(obj.getChildFrame().transform)

    if useHorizontalWidget:
        rpy = [0.0, 0.0, transformUtils.rollPitchYawFromTransform(footFrame)[2]]
        footFrame = transformUtils.frameFromPositionAndRPY(footFrame.GetPosition(), np.degrees(rpy))

    footObj = vis.showPolyData(footMesh, 'footstep widget', parent='planning', alpha=0.2)
    footObj.stepIndex = stepIndex
    frameObj = vis.showFrame(footFrame, 'footstep widget frame', parent=footObj, scale=0.2)
    footObj.actor.SetUserTransform(frameObj.transform)
    footObj.setProperty('Color', obj.getProperty('Color'))
    frameObj.setProperty('Edit', True)

    rep = frameObj.widget.GetRepresentation()
    rep.SetTranslateAxisEnabled(2, False)
    rep.SetRotateAxisEnabled(0, False)
    rep.SetRotateAxisEnabled(1, False)
    frameObj.widget.HandleRotationEnabledOff()

    walkGoal = om.findObjectByName('walking goal')
    if walkGoal:
        walkGoal.setProperty('Edit', False)


    def onFootWidgetChanged(frame):
        footstepsDriver.onStepModified(stepIndex - 1, frame)

    frameObj.connectFrameModified(onFootWidgetChanged)
    return True
コード例 #22
0
    def projectDrillDemoInCamera():
        q = om.findObjectByName('camera overlay')
        om.removeFromObjectModel(q)

        imageView = cameraview.views['CAMERA_LEFT']
        imageView.imageActor.SetOpacity(.2)

        drawFrameInCamera(drillDemo.drill.frame.transform, 'drill frame',visible=False)

        tf = transformUtils.copyFrame( drillDemo.drill.frame.transform )
        tf.PreMultiply()
        tf.Concatenate( drillDemo.drill.drillToButtonTransform )
        drawFrameInCamera(tf, 'drill button')


        tf2 = transformUtils.copyFrame( tf )
        tf2.PreMultiply()
        tf2.Concatenate( transformUtils.frameFromPositionAndRPY( [0,0,0] , [180,0,0] ) )
        drawFrameInCamera(tf2, 'drill button flip')

        drawObjectInCamera('drill',visible=False)

        drawObjectInCamera('sensed pointer tip')
        obj = om.findObjectByName('sensed pointer tip frame')
        if (obj is not None):
            drawFrameInCamera(obj.transform, 'sensed pointer tip frame',visible=False)

        #drawObjectInCamera('left robotiq',visible=False)
        #drawObjectInCamera('right pointer',visible=False)

        v = imageView.view
        v.render()
コード例 #23
0
 def __init__(self, jointController):
     self.jointController = jointController
     pos, rpy = jointController.q[:3], jointController.q[3:6]
     t = transformUtils.frameFromPositionAndRPY(pos, np.degrees(rpy))
     self.frame = vis.showFrame(t, 'mover widget', scale=0.3)
     self.frame.setProperty('Edit', True)
     self.frame.connectFrameModified(self.onFrameModified)
コード例 #24
0
    def spawnValveAffordance(self):
        radius = 0.10
        tubeRadius = 0.02
        position = [0, 0, 1.2]
        rpy = [0, 0, 0]
        t_feet_mid = self.footstepPlanner.getFeetMidPoint(self.robotModel)
        t = transformUtils.frameFromPositionAndRPY(position, rpy)
        t_grasp = self.computeRelativeGraspTransform()
        t_grasp.Concatenate(t)
        t_stance = self.computeRobotStanceFrame(
            t_grasp, self.computeRelativeStanceTransform())
        t_valve = t_stance.GetInverse()

        # This is necessary to get the inversion to actually happen. We don't know why.
        t_valve.GetMatrix()

        t_valve.Concatenate(t)
        t_valve.Concatenate(t_feet_mid)
        pose = transformUtils.poseFromTransform(t_valve)
        desc = dict(classname='CapsuleRingAffordanceItem',
                    Name='valve',
                    uuid=newUUID(),
                    pose=pose,
                    Color=[0, 1, 0],
                    Radius=float(radius),
                    Segments=20)
        desc['Tube Radius'] = tubeRadius

        import affordancepanel
        obj = affordancepanel.panel.affordanceFromDescription(desc)
        obj.params = dict(radius=radius)
コード例 #25
0
    def makeGoalFrames(self):

        for linkName, positionGoal, _ in self.positionCosts:
            orientationGoal = [0.0, 0.0, 0.0]
            t = transformUtils.frameFromPositionAndRPY(positionGoal,
                                                       orientationGoal)
            f = vis.showFrame(t, '%s position goal' % linkName)
            f.connectFrameModified(self.onGoalFrameModified)
コード例 #26
0
 def computeRelativeGraspTransform(self):
     t = transformUtils.copyFrame(
         transformUtils.frameFromPositionAndRPY(self.graspFrameXYZ,
                                                self.graspFrameRPY))
     t.PostMultiply()
     t.RotateX(180)
     t.RotateY(-90)
     return t
コード例 #27
0
    def getRoomSweepFrames(self, rotateHandFrame=False):
        topFrame = transformUtils.frameFromPositionAndRPY([0.65, 0.0, 0.8],
                                                          [160, 0, 90])
        yawFrame = transformUtils.frameFromPositionAndRPY(
            [0, 0.0, 0], [0, 0, self.currentYawDegrees])
        if rotateHandFrame:
            fixHandFrame = transformUtils.frameFromPositionAndRPY([0, 0.0, 0],
                                                                  [0, -90, 0])
            topFrame.PreMultiply()
            topFrame.Concatenate(fixHandFrame)
        topFrame.PostMultiply()
        topFrame.Concatenate(yawFrame)

        bottomFrame = transformUtils.frameFromPositionAndRPY([0.6, 0.0, 0.4],
                                                             [210, 0, 90])
        yawFrame = transformUtils.frameFromPositionAndRPY(
            [0, 0.0, 0], [0, 0, self.currentYawDegrees])
        if rotateHandFrame:
            bottomFrame.PreMultiply()
            bottomFrame.Concatenate(fixHandFrame)
        bottomFrame.PostMultiply()
        bottomFrame.Concatenate(yawFrame)

        if (self.fromTop):
            self.startFrame = vis.showFrame(topFrame,
                                            'frame start',
                                            visible=False,
                                            scale=0.1,
                                            parent=self.mapFolder)
            self.endFrame = vis.showFrame(bottomFrame,
                                          'frame end',
                                          visible=False,
                                          scale=0.1,
                                          parent=self.mapFolder)
        else:
            self.startFrame = vis.showFrame(bottomFrame,
                                            'frame start',
                                            visible=False,
                                            scale=0.1,
                                            parent=self.mapFolder)
            self.endFrame = vis.showFrame(topFrame,
                                          'frame end',
                                          visible=False,
                                          scale=0.1,
                                          parent=self.mapFolder)
コード例 #28
0
ファイル: egressplanner.py プロジェクト: edowson/director
    def planFootOut(self, startPose=None, finalFootHeight=0.05):

        if startPose is None:
            startPose = self.getPlanningStartPose()

        startPoseName = 'q_egress_start'
        self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
        endPoseName = 'q_egress_end'

        utorsoFrame = self.robotSystem.ikPlanner.getLinkFrameAtPose('utorso', startPose)
        finalLeftFootFrame = self.computeLeftFootOverPlatformFrame(startPose, finalFootHeight)

        constraints = []
        constraints.extend(self.createUtorsoGazeConstraints([0.0, 1.0]))
        constraints.append(ik.QuasiStaticConstraint(leftFootEnabled=False, rightFootEnabled=True,
                                                    pelvisEnabled=False, shrinkFactor=0.01))
        constraints.append(self.robotSystem.ikPlanner.createMovingBaseSafeLimitsConstraint())
        constraints.append(self.robotSystem.ikPlanner.createLockedLeftArmPostureConstraint(startPoseName))
        constraints.append(self.robotSystem.ikPlanner.createLockedRightArmPostureConstraint(startPoseName))
        #constraints.append(self.robotSystem.ikPlanner.createLockedBackPostureConstraint(startPoseName))
        constraints.append(self.robotSystem.ikPlanner.createKneePostureConstraint([0.7, 2.5]))
        constraints.append(self.robotSystem.ikPlanner.createFixedLinkConstraints(startPoseName, 'r_foot'))
        constraints.extend(self.createLeftFootPoseConstraint(finalLeftFootFrame, tspan=[1,1]))

        constraintSet = ConstraintSet(self.robotSystem.ikPlanner, constraints, endPoseName, startPoseName)
        constraintSet.ikParameters = IkParameters(usePointwise=True, maxBaseRPYDegreesPerSecond=10,
                                                  rescaleBodyNames=['l_foot'],
                                                  rescaleBodyPts=[0.0, 0.0, 0.0],
                                                  maxBodyTranslationSpeed=self.maxFootTranslationSpeed)
        #constraintSet.seedPoseName = 'q_start'
        #constraintSet.nominalPoseName = 'q_start'

        constraintSet.runIk()

        footFrame = self.robotSystem.ikPlanner.getLinkFrameAtPose('l_foot', startPose)
        t = transformUtils.frameFromPositionAndRPY([0, 0, self.polaris.leftFootEgressOutsideFrame.transform.GetPosition()[2]-footFrame.GetPosition()[2]], [0, 0, 0])
        liftFrame = transformUtils.concatenateTransforms([footFrame, t])
        vis.updateFrame(liftFrame, 'lift frame')

        c = ik.WorldFixedOrientConstraint()
        c.linkName = 'l_foot'
        c.tspan = [0.0, 0.1, 0.2]
        constraints.append(c)
        constraints.extend(self.createLeftFootPoseConstraint(liftFrame, tspan=[0.2, 0.2]))
        constraints.extend(self.createLeftFootPoseConstraint(self.polaris.leftFootEgressMidFrame, tspan=[0.5, 0.5]))

        constraints.extend(self.createLeftFootPoseConstraint(self.polaris.leftFootEgressOutsideFrame, tspan=[0.8, 0.8]))

        #plan = constraintSet.planEndPoseGoal(feetOnGround=False)
        keyFramePlan = constraintSet.runIkTraj()
        poseTimes, poses = planplayback.PlanPlayback.getPlanPoses(keyFramePlan)
        ts = [poseTimes[0]]
        supportsList = [['r_foot']]
        plan = self.publishPlanWithSupports(keyFramePlan, supportsList, ts, False)
        self.addPlan(plan)
        return plan
コード例 #29
0
 def handleStatus(self, msg):
     if msg.plan_type == msg.STANDING:
         goal = transformUtils.frameFromPositionAndRPY(
                  np.array([robotStateJointController.q[0] + 2 * self.max_distance_per_plan * (np.random.random() - 0.5),
                            robotStateJointController.q[1] + 2 * self.max_distance_per_plan * (np.random.random() - 0.5),
                            robotStateJointController.q[2] - 0.84]),
                  [0, 0, robotStateJointController.q[5] + 2 * np.degrees(np.pi) * (np.random.random() - 0.5)])
         request = footstepsDriver.constructFootstepPlanRequest(robotStateJointController.q, goal)
         request.params.max_num_steps = 18
         footstepsDriver.sendFootstepPlanRequest(request)
コード例 #30
0
ファイル: bihandeddemo.py プロジェクト: rdeits/director
    def computeBoardReachFrames(self):
        ''' Reach ~10cm short of the grasp frame '''
        reachLeftXYZ = copy.deepcopy( self.graspLeftHandFrameXYZ )
        reachLeftXYZ[0] = reachLeftXYZ[0] - self.reachDepth
        reachLeftRPY = copy.deepcopy ( self.graspLeftHandFrameRPY )

        tl = transformUtils.frameFromPositionAndRPY( reachLeftXYZ , reachLeftRPY )
        tl.Concatenate(self.frame.transform)
        self.reachLeftFrame = vis.updateFrame(tl, 'reach left frame', parent=self.affordance, visible=False, scale=0.2)
        self.reachLeftFrame.addToView(app.getDRCView())
        
        reachRightXYZ = copy.deepcopy( self.graspRightHandFrameXYZ )
        reachRightXYZ[0] = reachRightXYZ[0] - self.reachDepth
        reachRightRPY = copy.deepcopy ( self.graspRightHandFrameRPY )

        tr = transformUtils.frameFromPositionAndRPY( reachRightXYZ , reachRightRPY )
        tr.Concatenate(self.frame.transform)
        self.reachRightFrame = vis.updateFrame(tr, 'reach right frame', parent=self.affordance, visible=False, scale=0.2)
        self.reachRightFrame.addToView(app.getDRCView())
コード例 #31
0
ファイル: bihandeddemo.py プロジェクト: rdeits/director
    def planGraspLift(self):
        t3 = transformUtils.frameFromPositionAndRPY( [0.00,0.0, 0.04] , [0,0,0] )
        liftTransform = transformUtils.copyFrame(self.board.graspFrame.transform)
        liftTransform.Concatenate(t3)

        self.board.liftFrame = vis.updateFrame(liftTransform, 'lift frame', parent="affordances", visible=False, scale=0.2)
        startPose = self.getPlanningStartPose()
        constraintSet = self.ikPlanner.planEndEffectorGoal(startPose, self.graspingHand, self.board.liftFrame, lockBase=self.lockBase, lockBack=self.lockBack)
        endPose, info = constraintSet.runIk()
        liftPlan = constraintSet.runIkTraj()
        self.addPlan(liftPlan)
コード例 #32
0
ファイル: bihandeddemo.py プロジェクト: wxmerkt/director
    def spawnBoardAffordance(self, randomize=False):
        self.boardLength = 1.5

        if randomize:

            position = [
                random.uniform(0.5, 0.8),
                random.uniform(-0.2, 0.2),
                random.uniform(0.5, 0.8)
            ]
            rpy = [
                random.choice((random.uniform(-35,
                                              35), random.uniform(70, 110))),
                random.uniform(-10, 10),
                random.uniform(-5, 5)
            ]
            zwidth = random.uniform(0.5, 1.0)

        else:
            if self.b.val:
                position = [0.4, 0.0, 1.]
            else:
                position = [0.6, 0.0, 1.]

            rpy = [90, 1, 0]
            zwidth = self.boardLength

        xwidth = 3.75 * .0254
        ywidth = 1.75 * .0254
        t = transformUtils.frameFromPositionAndRPY(position, rpy)
        t.Concatenate(self.b.computeGroundFrame(self.b.robotModel))
        xaxis = [1, 0, 0]
        yaxis = [0, 1, 0]
        zaxis = [0, 0, 1]
        for axis in (xaxis, yaxis, zaxis):
            t.TransformVector(axis, axis)

        self.affordance = segmentation.createBlockAffordance(
            t.GetPosition(),
            xaxis,
            yaxis,
            zaxis,
            xwidth,
            ywidth,
            zwidth,
            'board',
            parent='affordances')
        self.affordance.setProperty('Color', QtGui.QColor(200, 150, 100))
        t = self.affordance.actor.GetUserTransform()
        self.frame = vis.showFrame(t,
                                   'board frame',
                                   parent=self.affordance,
                                   visible=False,
                                   scale=0.2)
コード例 #33
0
 def computeNextRoomFrame(self):
     assert self.targetAffordance
     t = transformUtils.frameFromPositionAndRPY(self.nextPosition,
                                                [0, 0, 0])
     self.faceTransformLocal = transformUtils.copyFrame(t)  # copy required
     t.Concatenate(self.targetFrame)
     self.faceFrameDesired = vis.showFrame(t,
                                           'target frame desired',
                                           parent=self.targetAffordance,
                                           visible=False,
                                           scale=0.2)
コード例 #34
0
ファイル: footstepsdriver.py プロジェクト: manuelli/director
    def getFeetMidPoint(model, useWorldZ=True):
        '''
        Returns a frame in world coordinate system that is the average of the left
        and right foot reference point positions in world frame, the average of the
        left and right foot yaw in world frame, and Z axis aligned with world Z.
        The foot reference point is the average of the foot contact points in the foot frame.
        '''
        contact_pts_left, contact_pts_right = FootstepsDriver.getContactPts()

        contact_pts_mid_left = np.mean(contact_pts_left, axis=0) # mid point on foot relative to foot frame
        contact_pts_mid_right = np.mean(contact_pts_right, axis=0) # mid point on foot relative to foot frame

        t_lf_mid = model.getLinkFrame(_leftFootLink)
        t_lf_mid.PreMultiply()
        t_lf_mid.Translate(contact_pts_mid_left)

        t_rf_mid = model.getLinkFrame(_rightFootLink)
        t_rf_mid.PreMultiply()
        t_rf_mid.Translate(contact_pts_mid_right)

        if (_robotType == 0): # Atlas
            t_feet_mid = transformUtils.frameInterpolate(t_lf_mid, t_rf_mid, 0.5)
        elif (_robotType == 1):
            # Valkyrie v1 Foot orientation is silly
            l_foot_sole = transformUtils.frameFromPositionAndRPY([0,0,0], [180.0, 82.5, 0])
            l_foot_sole.PostMultiply()
            l_foot_sole.Concatenate(t_lf_mid)
            r_foot_sole = transformUtils.frameFromPositionAndRPY([0,0,0], [0.0, -82.5, 0])
            r_foot_sole.PostMultiply()
            r_foot_sole.Concatenate(t_rf_mid)
            t_feet_mid = transformUtils.frameInterpolate(l_foot_sole, r_foot_sole, 0.5)
        elif (_robotType == 2):
            # Valkyrie v2 is better
            t_feet_mid = transformUtils.frameInterpolate(t_lf_mid, t_rf_mid, 0.5)


        if useWorldZ:
            rpy = [0.0, 0.0, np.degrees(transformUtils.rollPitchYawFromTransform(t_feet_mid)[2])]
            return transformUtils.frameFromPositionAndRPY(t_feet_mid.GetPosition(), rpy)
        else:
            return t_feet_mid
コード例 #35
0
    def getFeetMidPoint(model, useWorldZ=True):
        '''
        Returns a frame in world coordinate system that is the average of the left
        and right foot reference point positions in world frame, the average of the
        left and right foot yaw in world frame, and Z axis aligned with world Z.
        The foot reference point is the average of the foot contact points in the foot frame.
        '''
        contact_pts_left, contact_pts_right = FootstepsDriver.getContactPts()

        contact_pts_mid_left = np.mean(contact_pts_left, axis=0) # mid point on foot relative to foot frame
        contact_pts_mid_right = np.mean(contact_pts_right, axis=0) # mid point on foot relative to foot frame

        t_lf_mid = model.getLinkFrame(_leftFootLink)
        t_lf_mid.PreMultiply()
        t_lf_mid.Translate(contact_pts_mid_left)

        t_rf_mid = model.getLinkFrame(_rightFootLink)
        t_rf_mid.PreMultiply()
        t_rf_mid.Translate(contact_pts_mid_right)

        if (_robotType == 0): # Atlas
            t_feet_mid = transformUtils.frameInterpolate(t_lf_mid, t_rf_mid, 0.5)
        elif (_robotType == 1):
            # Valkyrie v1 Foot orientation is silly
            l_foot_sole = transformUtils.frameFromPositionAndRPY([0,0,0], [180.0, 82.5, 0])
            l_foot_sole.PostMultiply()
            l_foot_sole.Concatenate(t_lf_mid)
            r_foot_sole = transformUtils.frameFromPositionAndRPY([0,0,0], [0.0, -82.5, 0])
            r_foot_sole.PostMultiply()
            r_foot_sole.Concatenate(t_rf_mid)
            t_feet_mid = transformUtils.frameInterpolate(l_foot_sole, r_foot_sole, 0.5)
        elif (_robotType == 2):
            # Valkyrie v2 is better
            t_feet_mid = transformUtils.frameInterpolate(t_lf_mid, t_rf_mid, 0.5)


        if useWorldZ:
            rpy = [0.0, 0.0, np.degrees(transformUtils.rollPitchYawFromTransform(t_feet_mid)[2])]
            return transformUtils.frameFromPositionAndRPY(t_feet_mid.GetPosition(), rpy)
        else:
            return t_feet_mid
コード例 #36
0
    def placeStepsOnBlocks(self, blocks, groundPlane, standingFootName, standingFootFrame, removeFirstLeftStep = True):

        footsteps = []
        for i, block in enumerate(blocks):
            # move back less for stereo:
            # lidar: -0.27 and -0.23
            if self.processContinuousStereo or self.processRawStereo:
                nextLeftTransform = transformUtils.frameFromPositionAndRPY([-0.24,0.29,0.08], [0,0,0])
                nextRightTransform = transformUtils.frameFromPositionAndRPY([-0.20,0.1,0.08], [0,0,0])
            else:
                nextLeftTransform = transformUtils.frameFromPositionAndRPY([-0.27,0.29,0.08], [0,0,0])
                nextRightTransform = transformUtils.frameFromPositionAndRPY([-0.23,0.1,0.08], [0,0,0])

            nextLeftTransform.Concatenate(block.cornerTransform)
            footsteps.append(Footstep(nextLeftTransform,False))

            nextRightTransform.Concatenate(block.cornerTransform)
            footsteps.append(Footstep(nextRightTransform,True))

        #footOnGround = False
        #if (groundPlane):
        #    # TODO: 0.08 is distance from foot frames to sole. remove hard coding!
        #    distOffGround = abs(groundPlane.cornerTransform.GetPosition()[2]-standingFootFrame.GetPosition()[2] + 0.08)
        #    #print "distOffGround",distOffGround
        #    footOnGround = (distOffGround < 0.05)
        #    if (footOnGround):
        #        # the robot is standing on the ground plane
        #        nextRightTransform = transformUtils.frameFromPositionAndRPY([(-0.23-0.38),0.1,0.08-0.13], [0,0,0])
        #        nextRightTransform.Concatenate(blocks[0].cornerTransform)
        #        footsteps = [Footstep(nextRightTransform,True)] + footsteps

        #if (footOnGround is False):
        #  # if we are standing on right foot, we can see the next block.
        #  # but the next left step has been committed - so remove it from the the list

        if (removeFirstLeftStep is True):
            if (standingFootName is self.ikPlanner.rightFootLink ):
                footsteps = footsteps[1:]
              #print "removing the first left step"

        return footsteps
コード例 #37
0
    def placeStepsOnBlocks(self, blocks, groundPlane, standingFootName, standingFootFrame, removeFirstLeftStep = True):

        footsteps = []
        for i, block in enumerate(blocks):
            # move back less for stereo:
            # lidar: -0.27 and -0.23
            if self.processContinuousStereo or self.processRawStereo:
                nextLeftTransform = transformUtils.frameFromPositionAndRPY([-0.24,0.29,0.08], [0,0,0])
                nextRightTransform = transformUtils.frameFromPositionAndRPY([-0.20,0.1,0.08], [0,0,0])
            else:
                nextLeftTransform = transformUtils.frameFromPositionAndRPY([-0.27,0.29,0.08], [0,0,0])
                nextRightTransform = transformUtils.frameFromPositionAndRPY([-0.23,0.1,0.08], [0,0,0])

            nextLeftTransform.Concatenate(block.cornerTransform)
            footsteps.append(Footstep(nextLeftTransform,False))

            nextRightTransform.Concatenate(block.cornerTransform)
            footsteps.append(Footstep(nextRightTransform,True))

        #footOnGround = False
        #if (groundPlane):
        #    # TODO: 0.08 is distance from foot frames to sole. remove hard coding!
        #    distOffGround = abs(groundPlane.cornerTransform.GetPosition()[2]-standingFootFrame.GetPosition()[2] + 0.08)
        #    #print "distOffGround",distOffGround
        #    footOnGround = (distOffGround < 0.05)
        #    if (footOnGround):
        #        # the robot is standing on the ground plane
        #        nextRightTransform = transformUtils.frameFromPositionAndRPY([(-0.23-0.38),0.1,0.08-0.13], [0,0,0])
        #        nextRightTransform.Concatenate(blocks[0].cornerTransform)
        #        footsteps = [Footstep(nextRightTransform,True)] + footsteps

        #if (footOnGround is False):
        #  # if we are standing on right foot, we can see the next block.
        #  # but the next left step has been committed - so remove it from the the list

        if (removeFirstLeftStep is True):
            if (standingFootName is self.ikPlanner.rightFootLink ):
                footsteps = footsteps[1:]
              #print "removing the first left step"

        return footsteps
コード例 #38
0
ファイル: walkingtestdemo.py プロジェクト: rdeits/director
    def __init__(self, robotModel, playbackRobotModel, teleopRobotModel,
                 footstepPlanner, manipPlanner, ikPlanner, lhandDriver,
                 rhandDriver, atlasDriver, multisenseDriver,
                 sensorJointController, planPlaybackFunction,
                 showPoseFunction):
        self.robotModel = robotModel
        self.playbackRobotModel = playbackRobotModel  # not used inside the demo
        self.teleopRobotModel = teleopRobotModel  # not used inside the demo
        self.footstepPlanner = footstepPlanner
        self.manipPlanner = manipPlanner
        self.ikPlanner = ikPlanner
        self.lhandDriver = lhandDriver
        self.rhandDriver = rhandDriver
        self.atlasDriver = atlasDriver
        self.multisenseDriver = multisenseDriver
        self.sensorJointController = sensorJointController
        self.planPlaybackFunction = planPlaybackFunction
        self.showPoseFunction = showPoseFunction

        self.goalTransform1 = transformUtils.frameFromPositionAndRPY([1, 0, 0],
                                                                     [0, 0, 0])
        self.goalTransform2 = transformUtils.frameFromPositionAndRPY(
            [2, 0, 0], [0, 0, 10])
        #self.goalTransform2 = transformUtils.frameFromPositionAndRPY([1,1,0],[0,0,90])

        self.visOnly = False  # True for development, False for operation
        self.planFromCurrentRobotState = True  # False for development, True for operation
        useDevelopment = False
        if (useDevelopment):
            self.visOnly = True  # True for development, False for operation
            self.planFromCurrentRobotState = False  # False for development, True for operation

        self.optionalUserPromptEnabled = False
        self.requiredUserPromptEnabled = True

        self.constraintSet = None

        self.plans = []

        self._setupSubscriptions()
コード例 #39
0
ファイル: robottasks.py プロジェクト: simalpha/director
    def run(self):

        polyData = self.getPointCloud()
        annotation = self.getAnnotationInput()
        annotationPoint = annotation.annotationPoints[0]

        mesh = segmentation.fitShelfItem(polyData, annotationPoint, clusterTolerance=self.properties.getProperty('Cluster tolerance'))

        annotation.setProperty('Visible', False)
        om.removeFromObjectModel(om.findObjectByName('shelf item'))
        obj = vis.showPolyData(mesh, 'shelf item', color=[0,1,0])
        t = transformUtils.frameFromPositionAndRPY(segmentation.computeCentroid(mesh), [0,0,0])
        segmentation.makeMovable(obj, t)
コード例 #40
0
ファイル: bihandeddemo.py プロジェクト: wxmerkt/director
    def computeBoardGraspFrames(self):
        t = transformUtils.frameFromPositionAndRPY(self.graspLeftHandFrameXYZ,
                                                   self.graspLeftHandFrameRPY)
        t_copy = transformUtils.copyFrame(t)
        t_copy.Concatenate(self.frame.transform)
        self.graspLeftFrame = vis.updateFrame(t_copy,
                                              'grasp left frame',
                                              parent=self.affordance,
                                              visible=False,
                                              scale=0.2)
        self.graspLeftFrame.addToView(app.getDRCView())

        t = transformUtils.frameFromPositionAndRPY(self.graspRightHandFrameXYZ,
                                                   self.graspRightHandFrameRPY)
        t_copy = transformUtils.copyFrame(t)
        t_copy.Concatenate(self.frame.transform)
        self.graspRightFrame = vis.updateFrame(t_copy,
                                               'grasp right frame',
                                               parent=self.affordance,
                                               visible=False,
                                               scale=0.2)
        self.graspRightFrame.addToView(app.getDRCView())
コード例 #41
0
def updateCamera(msg):
    T = transformUtils.frameFromPositionAndRPY(msg.q[:3],np.degrees(msg.q[3:]))
    axes = transformUtils.getAxesFromTransform(T)
   
    #vis.updateFrame(T, 'vicon camera')
    #return

    camera = app.view.camera()
    camera.SetPosition(T.GetPosition())
    camera.SetFocalPoint(np.array(T.GetPosition())+axes[0])
    camera.SetViewUp(axes[2])
    camera.SetViewAngle(122.6)
    app.view.render()
コード例 #42
0
def updateCamera(msg):
    T = transformUtils.frameFromPositionAndRPY(msg.q[:3], np.degrees(msg.q[3:]))
    axes = transformUtils.getAxesFromTransform(T)

    # vis.updateFrame(T, 'vicon camera')
    # return

    camera = app.view.camera()
    camera.SetPosition(T.GetPosition())
    camera.SetFocalPoint(np.array(T.GetPosition()) + axes[0])
    camera.SetViewUp(axes[2])
    camera.SetViewAngle(122.6)
    app.view.render()
コード例 #43
0
ファイル: takktilevis.py プロジェクト: mlab-upenn/arch-apex
    def createSpheres(self, sensorValues):
        d = DebugData()

        for key in sensorValues.keys():
            frame, pos, rpy = self.sensorLocations[key]

            t = transformUtils.frameFromPositionAndRPY(pos, rpy)
            t.PostMultiply()
            t.Concatenate(self.frames[frame])
            d.addSphere(t.GetPosition(),
                        radius=0.005,
                        color=self.getColor(sensorValues[key], key),
                        resolution=8)
        vis.updatePolyData(d.getPolyData(), self.name, colorByName='RGB255')
コード例 #44
0
ファイル: tabledemo.py プロジェクト: edowson/director
    def planReachToTableObject(self, side='left'):

        obj, frame = self.getNextTableObject(side)
        startPose = self.getPlanningStartPose()

        if self.ikPlanner.fixedBaseArm: # includes reachDist hack instead of in ikPlanner (TODO!)
            f = transformUtils.frameFromPositionAndRPY( np.array(frame.transform.GetPosition())-np.array([self.reachDist,0,0]), [0,0,-90] )
            f.PreMultiply()
            f.RotateY(90)
            f.Update()
            self.constraintSet = self.ikPlanner.planEndEffectorGoal(startPose, side, f, lockBase=False, lockBack=True)
            #newFrame = vis.FrameItem('reach_item', f, self.view)
            #self.constraintSet = self.ikPlanner.planGraspOrbitReachPlan(startPose, side, newFrame, constraints=None, dist=self.reachDist, lockBase=self.lockBase, lockBack=self.lockBack, lockArm=False)
        else:
            self.constraintSet = self.ikPlanner.planGraspOrbitReachPlan(startPose, side, frame, constraints=None, dist=self.reachDist, lockBase=self.lockBase, lockBack=self.lockBack, lockArm=False)
            loweringSide = 'left' if side == 'right' else 'right'
            armPose = self.getLoweredArmPose(startPose, loweringSide)
            armPoseName = 'lowered_arm_pose'
            self.ikPlanner.ikServer.sendPoseToServer(armPose, armPoseName)

            loweringSideJoints = []
            if (loweringSide == 'left'):
              loweringSideJoints += self.ikPlanner.leftArmJoints
            else:
              loweringSideJoints += self.ikPlanner.rightArmJoints

            reachingSideJoints = []
            if (side == 'left'):
                reachingSideJoints += self.ikPlanner.leftArmJoints
            else:
                reachingSideJoints += self.ikPlanner.rightArmJoints


            armPostureConstraint = self.ikPlanner.createPostureConstraint(armPoseName, loweringSideJoints)
            armPostureConstraint.tspan = np.array([1.0, 1.0])
            self.constraintSet.constraints.append(armPostureConstraint)
        
        self.constraintSet.runIk()

        #armPose = self.getRaisedArmPose(startPose, side)
        #armPoseName = 'raised_arm_pose'
        #self.ikPlanner.ikServer.sendPoseToServer(armPose, armPoseName)
        #armPostureConstraint = self.ikPlanner.createPostureConstraint(armPoseName, reachingSideJoints)
        #armPostureConstraint.tspan = np.array([0.5, 0.5])
        #self.constraintSet.constraints.append(armPostureConstraint)

        print 'planning reach to'
        plan = self.constraintSet.runIkTraj()
        self.addPlan(plan)
コード例 #45
0
ファイル: walkingtestdemo.py プロジェクト: rdeits/director
    def __init__(self, robotModel, playbackRobotModel, teleopRobotModel, footstepPlanner, manipPlanner, ikPlanner,
                 lhandDriver, rhandDriver, atlasDriver, multisenseDriver, sensorJointController,
                 planPlaybackFunction, showPoseFunction):
        self.robotModel = robotModel
        self.playbackRobotModel = playbackRobotModel # not used inside the demo
        self.teleopRobotModel = teleopRobotModel # not used inside the demo
        self.footstepPlanner = footstepPlanner
        self.manipPlanner = manipPlanner
        self.ikPlanner = ikPlanner
        self.lhandDriver = lhandDriver
        self.rhandDriver = rhandDriver
        self.atlasDriver = atlasDriver
        self.multisenseDriver = multisenseDriver
        self.sensorJointController = sensorJointController
        self.planPlaybackFunction = planPlaybackFunction
        self.showPoseFunction = showPoseFunction

        self.goalTransform1 = transformUtils.frameFromPositionAndRPY([1,0,0],[0,0,0])
        self.goalTransform2 = transformUtils.frameFromPositionAndRPY([2,0,0],[0,0,10])
        #self.goalTransform2 = transformUtils.frameFromPositionAndRPY([1,1,0],[0,0,90])

        self.visOnly = False # True for development, False for operation
        self.planFromCurrentRobotState = True # False for development, True for operation
        useDevelopment = False
        if (useDevelopment):
            self.visOnly = True # True for development, False for operation
            self.planFromCurrentRobotState = False # False for development, True for operation

        self.optionalUserPromptEnabled = False
        self.requiredUserPromptEnabled = True

        self.constraintSet = None

        self.plans = []

        self._setupSubscriptions()
コード例 #46
0
def testEulerToFrame():
    '''
    Test some euler converions
    '''
    rpy = transformations.euler_from_quaternion(
        transformations.random_quaternion())

    frame = transformUtils.frameFromPositionAndRPY([0, 0, 0], np.degrees(rpy))
    mat = transformUtils.getNumpyFromTransform(frame)

    mat2 = transformations.euler_matrix(rpy[0], rpy[1], rpy[2])

    print mat
    print mat2
    assert np.allclose(mat, mat2)
コード例 #47
0
ファイル: mappingdemo.py プロジェクト: mlab-upenn/arch-apex
    def getRoomSweepFrames(self, rotateHandFrame=False):
        topFrame = transformUtils.frameFromPositionAndRPY([0.65,0.0,0.8],[160,0,90])
        yawFrame = transformUtils.frameFromPositionAndRPY([0,0.0,0],[0,0,self.currentYawDegrees])
        if rotateHandFrame:
            fixHandFrame = transformUtils.frameFromPositionAndRPY([0,0.0,0],[0,-90,0])
            topFrame.PreMultiply()
            topFrame.Concatenate( fixHandFrame )
        topFrame.PostMultiply()
        topFrame.Concatenate( yawFrame )

        bottomFrame = transformUtils.frameFromPositionAndRPY([0.6,0.0,0.4],[210,0,90])
        yawFrame = transformUtils.frameFromPositionAndRPY([0,0.0,0],[0,0,self.currentYawDegrees])
        if rotateHandFrame:
            bottomFrame.PreMultiply()
            bottomFrame.Concatenate( fixHandFrame )
        bottomFrame.PostMultiply()
        bottomFrame.Concatenate( yawFrame )

        if (self.fromTop):
            self.startFrame = vis.showFrame(topFrame, 'frame start', visible=False, scale=0.1,parent=self.mapFolder)
            self.endFrame = vis.showFrame(bottomFrame, 'frame end', visible=False, scale=0.1,parent=self.mapFolder)
        else:
            self.startFrame = vis.showFrame(bottomFrame, 'frame start', visible=False, scale=0.1,parent=self.mapFolder)
            self.endFrame = vis.showFrame(topFrame, 'frame end', visible=False, scale=0.1,parent=self.mapFolder)
コード例 #48
0
    def findSafeRegion(self, pose):
        pose = np.asarray(pose)
        tformForProjection = transformUtils.frameFromPositionAndRPY([0,0,0], pose[3:] * 180 / np.pi)
        tform = transformUtils.frameFromPositionAndRPY(pose[:3], pose[3:] * 180 / np.pi)

        contact_pts_on_plane = np.zeros((2, self.bot_pts.shape[1]))
        for j in range(self.bot_pts.shape[1]):
            contact_pts_on_plane[:,j] = tformForProjection.TransformPoint([self.bot_pts[0,j], self.bot_pts[1,j], 0])[:2]

        Rdot = np.array([[0, -1], [1, 0]])
        contact_vel_in_world = Rdot.dot(contact_pts_on_plane)

        c_region = {'A': [], 'b': []}

        for i in range(self.planar_polyhedron.A.shape[0]):
            ai = self.planar_polyhedron.A[i,:]
            n = np.linalg.norm(ai)
            ai = ai / n
            bi = self.planar_polyhedron.b[i] / n

            p = ai.dot(contact_pts_on_plane)
            v = ai.dot(contact_vel_in_world)

            mask = np.logical_or(p >= 0, v >= 0)
            for j, tf in enumerate(mask):
                if tf:
                    c_region['A'].append(np.hstack((ai, v[j])))
                    c_region['b'].append([bi - p[j]])

        A = np.vstack(c_region['A'])
        b = np.hstack(c_region['b'])

        b = b + A.dot(np.array([0,0,pose[5]]))

        self.c_space_polyhedron = Hrep(A, b)
        return SafeTerrainRegion(A, b, [], [], tform)