コード例 #1
0
ファイル: cameraview.py プロジェクト: mlab-upenn/arch-apex
    def showFusedMaps(self):
        om.removeFromObjectModel(om.findObjectByName('stereo'))
        om.getOrCreateContainer('stereo')

        q = imageManager.queue
        cameraToLocalNow = vtk.vtkTransform()
        utime = q.getCurrentImageTime('CAMERA_TSDF')

        q.getTransform('CAMERA_LEFT','local', utime,cameraToLocalNow)
        cameraToLocalFusedNow = vtk.vtkTransform()
        q.getTransform('CAMERA_LEFT_ALT','local', utime,cameraToLocalFusedNow)

        for i in range(len(self.pointClouds)):

            fusedNowToLocalNow = vtk.vtkTransform()
            fusedNowToLocalNow.PreMultiply()
            fusedNowToLocalNow.Concatenate( cameraToLocalNow)
            fusedNowToLocalNow.Concatenate( cameraToLocalFusedNow.GetLinearInverse() )


            fusedTransform = vtk.vtkTransform()
            fusedTransform.PreMultiply()
            fusedTransform.Concatenate( fusedNowToLocalNow)
            fusedTransform.Concatenate( self.cameraToLocalFusedTransforms[i] )

            pd = filterUtils.transformPolyData(self.pointClouds[i], fusedTransform)
            vis.showFrame(fusedTransform, ('cloud frame ' + str(i)), visible=True, scale=0.2, parent='stereo')
            vis.showPolyData(pd, ('stereo ' + str(i)), parent='stereo', colorByName='rgb_colors')
コード例 #2
0
ファイル: affordanceupdater.py プロジェクト: wxmerkt/director
    def updateObjectInCamera(self, obj, cameraObj):

        imageView = self.imageView

        objToLocalT = transformUtils.copyFrame(obj.actor.GetUserTransform()
                                               or vtk.vtkTransform())

        localToCameraT = vtk.vtkTransform()
        self.imageQueue.getTransform('local', imageView.imageName,
                                     localToCameraT)

        t = vtk.vtkTransform()
        t.PostMultiply()
        t.Concatenate(objToLocalT)
        t.Concatenate(localToCameraT)

        pd = filterUtils.transformPolyData(obj.polyData, t)
        '''
        normals = pd.GetPointData().GetNormals()
        cameraToImageT = vtk.vtkTransform()
        imageQueue.getCameraProjectionTransform(imageView.imageName, cameraToImageT)
        pd = filterUtils.transformPolyData(pd, cameraToImageT)
        pts = vnp.getNumpyFromVtk(pd, 'Points')
        pts[:,0] /= pts[:,2]
        pts[:,1] /= pts[:,2]
        pd.GetPointData().SetNormals(normals)
        '''

        self.imageQueue.projectPoints(imageView.imageName, pd)

        cameraObj.setPolyData(pd)

        self.addActorToImageOverlay(cameraObj, imageView)
コード例 #3
0
ファイル: footstepsdriver.py プロジェクト: manuelli/director
    def transformPlanToBDIFrame(self, plan):
        if (self.pose_bdi is None):
            # print "haven't received POSE_BDI"
            return

        # TODO: This transformation should be rewritten using the LOCAL_TO_LOCAL_BDI frame
        # instead of using FK here

        t_bodybdi  = transformUtils.transformFromPose(self.pose_bdi.pos, self.pose_bdi.orientation)
        t_bodybdi.PostMultiply()

        current_pose = self.jointController.q
        t_bodymain = transformUtils.transformFromPose( current_pose[0:3]  , botpy.roll_pitch_yaw_to_quat(current_pose[3:6])   )
        t_bodymain.PostMultiply()

        # iterate and transform
        self.bdi_plan = plan.decode( plan.encode() ) # decode and encode ensures deepcopy
        for i, footstep in enumerate(self.bdi_plan.footsteps):
            step = footstep.pos

            t_step = transformUtils.frameFromPositionMessage(step)
            t_body_to_step = vtk.vtkTransform()
            t_body_to_step.DeepCopy(t_step)
            t_body_to_step.PostMultiply()
            t_body_to_step.Concatenate(t_bodymain.GetLinearInverse())

            t_stepbdi = vtk.vtkTransform()
            t_stepbdi.DeepCopy(t_body_to_step)
            t_stepbdi.PostMultiply()
            t_stepbdi.Concatenate(t_bodybdi)
            footstep.pos = transformUtils.positionMessageFromFrame(t_stepbdi)

        if (self.showBDIPlan is True):
            self.drawBDIFootstepPlan()
コード例 #4
0
ファイル: cameraview.py プロジェクト: steinachim/director
    def onViewDoubleClicked(self, displayPoint):

        obj, pickedPoint = vis.findPickedObject(displayPoint, self.view)

        if pickedPoint is None or not obj:
            return

        imageName = obj.getProperty('Name')
        imageUtime = self.imageManager.getUtime(imageName)

        cameraToLocal = vtk.vtkTransform()
        self.imageManager.queue.getTransform(imageName, 'local', imageUtime,
                                             cameraToLocal)

        utorsoToLocal = vtk.vtkTransform()
        self.imageManager.queue.getTransform('utorso', 'local', imageUtime,
                                             utorsoToLocal)

        p = range(3)
        utorsoToLocal.TransformPoint(pickedPoint, p)

        ray = np.array(p) - np.array(cameraToLocal.GetPosition())
        ray /= np.linalg.norm(ray)

        if self.rayCallback:
            self.rayCallback(np.array(cameraToLocal.GetPosition()), ray)
コード例 #5
0
    def transformPlanToBDIFrame(self, plan):
        if (self.pose_bdi is None):
            # print "haven't received POSE_BDI"
            return

        # TODO: This transformation should be rewritten using the LOCAL_TO_LOCAL_BDI frame
        # instead of using FK here

        t_bodybdi  = transformUtils.transformFromPose(self.pose_bdi.pos, self.pose_bdi.orientation)
        t_bodybdi.PostMultiply()

        current_pose = self.jointController.q
        t_bodymain = transformUtils.transformFromPose( current_pose[0:3]  , transformUtils.rollPitchYawToQuaternion(current_pose[3:6])   )
        t_bodymain.PostMultiply()

        # iterate and transform
        self.bdi_plan = plan.decode( plan.encode() ) # decode and encode ensures deepcopy
        for i, footstep in enumerate(self.bdi_plan.footsteps):
            step = footstep.pos

            t_step = transformUtils.frameFromPositionMessage(step)
            t_body_to_step = vtk.vtkTransform()
            t_body_to_step.DeepCopy(t_step)
            t_body_to_step.PostMultiply()
            t_body_to_step.Concatenate(t_bodymain.GetLinearInverse())

            t_stepbdi = vtk.vtkTransform()
            t_stepbdi.DeepCopy(t_body_to_step)
            t_stepbdi.PostMultiply()
            t_stepbdi.Concatenate(t_bodybdi)
            footstep.pos = transformUtils.positionMessageFromFrame(t_stepbdi)

        if (self.showBDIPlan is True):
            self.drawBDIFootstepPlan()
コード例 #6
0
ファイル: cameraview.py プロジェクト: steinachim/director
    def showFusedMaps(self):
        om.removeFromObjectModel(om.findObjectByName('stereo'))
        om.getOrCreateContainer('stereo')

        q = imageManager.queue
        cameraToLocalNow = vtk.vtkTransform()
        utime = q.getCurrentImageTime('CAMERA_TSDF')

        q.getTransform('CAMERA_LEFT', 'local', utime, cameraToLocalNow)
        cameraToLocalFusedNow = vtk.vtkTransform()
        q.getTransform('CAMERA_LEFT_ALT', 'local', utime,
                       cameraToLocalFusedNow)

        for i in range(len(self.pointClouds)):

            fusedNowToLocalNow = vtk.vtkTransform()
            fusedNowToLocalNow.PreMultiply()
            fusedNowToLocalNow.Concatenate(cameraToLocalNow)
            fusedNowToLocalNow.Concatenate(
                cameraToLocalFusedNow.GetLinearInverse())

            fusedTransform = vtk.vtkTransform()
            fusedTransform.PreMultiply()
            fusedTransform.Concatenate(fusedNowToLocalNow)
            fusedTransform.Concatenate(self.cameraToLocalFusedTransforms[i])

            pd = filterUtils.transformPolyData(self.pointClouds[i],
                                               fusedTransform)
            vis.showFrame(fusedTransform, ('cloud frame ' + str(i)),
                          visible=True,
                          scale=0.2,
                          parent='stereo')
            vis.showPolyData(pd, ('stereo ' + str(i)),
                             parent='stereo',
                             colorByName='rgb_colors')
コード例 #7
0
    def updateObjectInCamera(self, obj, cameraObj):

        imageView = self.imageView

        objToLocalT = transformUtils.copyFrame(obj.actor.GetUserTransform() or vtk.vtkTransform())

        localToCameraT = vtk.vtkTransform()
        self.imageQueue.getTransform('local', imageView.imageName, localToCameraT)

        t = vtk.vtkTransform()
        t.PostMultiply()
        t.Concatenate(objToLocalT)
        t.Concatenate(localToCameraT)

        pd = filterUtils.transformPolyData(obj.polyData, t)

        '''
        normals = pd.GetPointData().GetNormals()
        cameraToImageT = vtk.vtkTransform()
        imageQueue.getCameraProjectionTransform(imageView.imageName, cameraToImageT)
        pd = filterUtils.transformPolyData(pd, cameraToImageT)
        pts = vnp.getNumpyFromVtk(pd, 'Points')
        pts[:,0] /= pts[:,2]
        pts[:,1] /= pts[:,2]
        pd.GetPointData().SetNormals(normals)
        '''

        self.imageQueue.projectPoints(imageView.imageName, pd)

        cameraObj.setPolyData(pd)

        self.addActorToImageOverlay(cameraObj, imageView)
コード例 #8
0
ファイル: cameraview.py プロジェクト: mlab-upenn/arch-apex
    def getStereoPointCloudElapsed(self,decimation=4, imagesChannel='CAMERA', cameraName='CAMERA_LEFT', removeSize=0):
        q = imageManager.queue

        utime = q.getCurrentImageTime(cameraName)
        if utime == 0:
            return None, None, None

        if (utime - self.lastUtime < 1E6):
            return None, None, None

        p = vtk.vtkPolyData()
        cameraToLocalFused = vtk.vtkTransform()
        q.getTransform('CAMERA_LEFT_ALT', 'local', utime, cameraToLocalFused)
        cameraToLocal = vtk.vtkTransform()
        q.getTransform('CAMERA_LEFT', 'local', utime, cameraToLocal)
        prevToCurrentCameraTransform = vtk.vtkTransform()
        prevToCurrentCameraTransform.PostMultiply()
        prevToCurrentCameraTransform.Concatenate( cameraToLocal )
        prevToCurrentCameraTransform.Concatenate( self.lastCameraToLocal.GetLinearInverse() )
        distTravelled = np.linalg.norm( prevToCurrentCameraTransform.GetPosition() )

        # 0.2 heavy overlap
        # 0.5 quite a bit of overlap
        # 1.0 is good
        if (distTravelled  < 0.2 ):
            return None, None, None

        q.getPointCloudFromImages(imagesChannel, p, decimation, removeSize)

        self.lastCameraToLocal = cameraToLocal
        self.lastUtime = utime
        return p, cameraToLocalFused, cameraToLocal
コード例 #9
0
ファイル: visualization.py プロジェクト: wxmerkt/director
def computeAToB(a,b):

    t = vtk.vtkTransform()
    t.PostMultiply()
    t.Concatenate(b)
    t.Concatenate(a.GetLinearInverse())
    tt = vtk.vtkTransform()
    tt.SetMatrix(t.GetMatrix())
    return tt
コード例 #10
0
    def drawFrameInCamera(t, frameName='new frame',visible=True):

        v = imageView.view
        q = cameraview.imageManager.queue
        localToCameraT = vtk.vtkTransform()
        q.getTransform('local', 'CAMERA_LEFT', localToCameraT)

        res = vis.showFrame( vtk.vtkTransform() , 'temp',view=v, visible=True, scale = 0.2)
        om.removeFromObjectModel(res)
        pd = res.polyData
        pd = filterUtils.transformPolyData(pd, t)
        pd = filterUtils.transformPolyData(pd, localToCameraT)
        q.projectPoints('CAMERA_LEFT', pd )
        vis.showPolyData(pd, ('overlay ' + frameName), view=v, colorByName='Axes',parent='camera overlay',visible=visible)
コード例 #11
0
ファイル: splinewidget.py プロジェクト: mlab-upenn/arch-apex
    def interpolateReach(self):

        t = vtk.vtkTransform()
        handObj, handFrame = self.handFactory.placeHandModelWithTransform(t, self.view, side=self.side, name='grasp interpolation', parent='debug')
        handObj.setProperty('Alpha', 0.2)
        handFrame.setProperty('Visible', True)

        sliderMax = 100.0

        def sliderChanged(sliderValue):
            sliderValue = sliderValue/float(sliderMax)
            t = self.splineInterp(sliderValue)
            handFrame.copyFrame(t)

            reachGoal = self.getReachGoalFrame(self.side)
            if reachGoal:
                reachGoal.copyFrame(t)


        self.slider = QtGui.QSlider(QtCore.Qt.Horizontal)
        self.slider.connect('valueChanged(int)', sliderChanged)
        self.slider.setMaximum(sliderMax)
        self.slider.show()
        self.slider.resize(500, 30)
        sliderChanged(sliderMax)
コード例 #12
0
    def promotePolyDataItem(cls, obj):
        parent = obj.parent()
        view = obj.views[0]
        name = obj.getProperty('Name')
        polyData = obj.polyData
        props = obj.properties._properties
        childFrame = obj.getChildFrame()
        if childFrame:
            t = transformUtils.copyFrame(childFrame.transform)
        else:
            t = vtk.vtkTransform()
            t.PostMultiply()
            t.Translate(filterUtils.computeCentroid(polyData))
            polyData = filterUtils.transformPolyData(polyData, t.GetLinearInverse())

        children = [c for c in obj.children() if c is not childFrame]

        meshId = cls.getMeshManager().add(polyData)

        om.removeFromObjectModel(obj)
        obj = MeshAffordanceItem(name, view)
        obj.setProperty('Filename', meshId)
        om.addToObjectModel(obj, parentObj=parent)
        frame = vis.addChildFrame(obj)
        frame.copyFrame(t)

        for child in children:
            om.addToObjectModel(child, parentObj=obj)

        obj.syncProperties(props)
        return obj
コード例 #13
0
ファイル: visualization.py プロジェクト: wxmerkt/director
def showGrid(view, cellSize=0.5, numberOfCells=25, name='grid', parent='sensors', color=[1,1,1], alpha=0.05, gridTransform=None):

    grid = vtk.vtkGridSource()
    grid.SetScale(cellSize)
    grid.SetGridSize(numberOfCells)
    grid.SetSurfaceEnabled(True)
    grid.Update()

    gridObj = showPolyData(grid.GetOutput(), 'grid', view=view, alpha=alpha, color=color, visible=True, parent=parent)
    gridObj.gridSource = grid
    gridObj.actor.GetProperty().LightingOff()
    gridObj.actor.SetPickable(False)

    gridTransform = gridTransform or vtk.vtkTransform()
    gridObj.actor.SetUserTransform(gridTransform)
    showFrame(gridTransform, 'grid frame', scale=0.2, visible=False, parent=gridObj, view=view)

    gridObj.setProperty('Surface Mode', 'Wireframe')

    def computeViewBoundsNoGrid():
        if not gridObj.getProperty('Visible'):
            return

        gridObj.actor.SetUseBounds(False)
        bounds = view.renderer().ComputeVisiblePropBounds()
        gridObj.actor.SetUseBounds(True)
        if vtk.vtkMath.AreBoundsInitialized(bounds):
            view.addCustomBounds(bounds)
        else:
            view.addCustomBounds([-1, 1, -1, 1, -1, 1])

    view.connect('computeBoundsRequest(ddQVTKWidgetView*)', computeViewBoundsNoGrid)
    return gridObj
コード例 #14
0
    def drawObjectInCamera(objectName,visible=True):
        v = imageView.view
        q = cameraview.imageManager.queue
        localToCameraT = vtk.vtkTransform()
        q.getTransform('local', 'CAMERA_LEFT', localToCameraT)

        obj = om.findObjectByName(objectName)
        if obj is None:
            return
        objToLocalT = transformUtils.copyFrame(obj.actor.GetUserTransform() or vtk.vtkTransform())
        objPolyDataOriginal = obj.polyData
        pd = objPolyDataOriginal
        pd = filterUtils.transformPolyData(pd, objToLocalT)
        pd = filterUtils.transformPolyData(pd, localToCameraT)
        q.projectPoints('CAMERA_LEFT', pd)
        vis.showPolyData(pd, ('overlay ' + objectName), view=v, color=[0,1,0],parent='camera overlay',visible=visible)
コード例 #15
0
    def buildRobot(x=0, y=0):
        #print "building robot"
        polyData = ioUtils.readPolyData('celica.obj')

        scale = 0.04
        t = vtk.vtkTransform()
        t.RotateZ(90)
        t.Scale(scale, scale, scale)
        polyData = filterUtils.transformPolyData(polyData, t)

        #d = DebugData()
        #d.addCone((x,y,0), (1,0,0), height=0.2, radius=0.1)
        #polyData = d.getPolyData()

        obj = vis.showPolyData(polyData, 'robot')
        robotFrame = vis.addChildFrame(obj)

        # this makes sure we can only rotate it in two dimensions
        robotFrame.setProperty('Scale', 3)
        robotFrame.setProperty('Edit', True)
        robotFrame.widget.HandleRotationEnabledOff()
        rep = robotFrame.widget.GetRepresentation()
        rep.SetTranslateAxisEnabled(2, False)
        rep.SetRotateAxisEnabled(0, False)
        rep.SetRotateAxisEnabled(1, False)
        return obj, robotFrame
コード例 #16
0
ファイル: visualization.py プロジェクト: wxmerkt/director
def addChildFrame(obj, initialTransform=None):
    '''
    Adds a child frame to the given PolyDataItem.  If initialTransform is given,
    the object's polydata is transformed using the inverse of initialTransform
    and then a child frame is assigned to the object to maintain its original
    position.
    '''
    pd = obj.polyData
    t = initialTransform

    if t is None:
        t = vtk.vtkTransform()
        t.PostMultiply()
    else:
        pd = transformPolyData(pd, t.GetLinearInverse())
        obj.setPolyData(pd)

    frame = obj.getChildFrame()
    if frame:
        frame.copyFrame(t)
    else:
        frame = showFrame(t, obj.getProperty('Name') + ' frame', parent=obj, scale=0.2, visible=False)
        obj.actor.SetUserTransform(t)

    return frame
コード例 #17
0
ファイル: cameraview.py プロジェクト: steinachim/director
    def getWorldPositionAndRay(self, imagePixel, imageUtime=None):
        '''
        Given an XY image pixel, computes an equivalent ray in the world
        coordinate system using the camera to local transform at the given
        imageUtime.  If imageUtime is None, then the utime of the most recent
        image is used.

        Returns the camera xyz position in world, and a ray unit vector.
        '''
        if imageUtime is None:
            imageUtime = self.imageManager.getUtime(self.imageName)

        # input is pixel u,v, output is unit x,y,z in camera coordinates
        cameraPoint = self.imageManager.queue.unprojectPixel(
            self.imageName, imagePixel[0], imagePixel[1])

        cameraToLocal = vtk.vtkTransform()
        self.imageManager.queue.getTransform(self.imageName, 'local',
                                             imageUtime, cameraToLocal)

        p = np.array(cameraToLocal.TransformPoint(cameraPoint))
        cameraPosition = np.array(cameraToLocal.GetPosition())
        ray = p - cameraPosition
        ray /= np.linalg.norm(ray)

        return cameraPosition, ray
コード例 #18
0
ファイル: cameraview.py プロジェクト: mlab-upenn/arch-apex
def applyCameraTexture(obj, imageManager, imageName='CAMERA_LEFT'):

    imageUtime = imageManager.getUtime(imageName)
    if not imageUtime:
        return

    cameraToLocal = vtk.vtkTransform()
    imageManager.queue.getTransform(imageName, 'local', imageUtime, cameraToLocal)

    pd = filterUtils.transformPolyData(obj.polyData, obj.actor.GetUserTransform())
    pd = filterUtils.transformPolyData(pd, cameraToLocal.GetLinearInverse())

    imageManager.queue.computeTextureCoords(imageName, pd)

    tcoordsArrayName = 'tcoords_%s' % imageName
    tcoords = pd.GetPointData().GetArray(tcoordsArrayName)
    assert tcoords

    obj.polyData.GetPointData().SetTCoords(None)
    obj.polyData.GetPointData().SetTCoords(tcoords)
    obj._updateColorByProperty()

    obj.actor.SetTexture(imageManager.getTexture(imageName))
    obj.actor.GetProperty().LightingOff()
    obj.actor.GetProperty().SetColor([1,1,1])
コード例 #19
0
ファイル: roboturdf.py プロジェクト: wxmerkt/director
    def newPolyData(self, name, view, parent=None):
        polyData = self.getNewHandPolyData()

        if isinstance(parent, str):
            parent = om.getOrCreateContainer(parent)

        color = [1.0, 1.0, 0.0]
        if self.side == 'right':
            color = [0.33, 1.0, 0.0]
        obj = vis.showPolyData(polyData,
                               name,
                               view=view,
                               color=color,
                               visible=False,
                               parent=parent)
        obj.side = self.side
        frame = vtk.vtkTransform()
        frame.PostMultiply()
        obj.actor.SetUserTransform(frame)
        frameObj = vis.showFrame(frame,
                                 '%s frame' % name,
                                 view=view,
                                 scale=0.2,
                                 visible=False,
                                 parent=obj)
        return obj
コード例 #20
0
    def promotePolyDataItem(cls, obj):
        parent = obj.parent()
        view = obj.views[0]
        name = obj.getProperty('Name')
        polyData = obj.polyData
        props = obj.properties._properties
        childFrame = obj.getChildFrame()
        if childFrame:
            t = transformUtils.copyFrame(childFrame.transform)
        else:
            t = vtk.vtkTransform()
            t.PostMultiply()
            t.Translate(filterUtils.computeCentroid(polyData))
            polyData = filterUtils.transformPolyData(polyData,
                                                     t.GetLinearInverse())

        children = [c for c in obj.children() if c is not childFrame]

        meshId = cls.getMeshManager().add(polyData)

        om.removeFromObjectModel(obj)
        obj = MeshAffordanceItem(name, view)
        obj.setProperty('Filename', meshId)
        om.addToObjectModel(obj, parentObj=parent)
        frame = vis.addChildFrame(obj)
        frame.copyFrame(t)

        for child in children:
            om.addToObjectModel(child, parentObj=obj)

        obj.syncProperties(props)
        return obj
コード例 #21
0
ファイル: cameraview.py プロジェクト: steinachim/director
def applyCameraTexture(obj, imageManager, imageName='CAMERA_LEFT'):

    imageUtime = imageManager.getUtime(imageName)
    if not imageUtime:
        return

    cameraToLocal = vtk.vtkTransform()
    imageManager.queue.getTransform(imageName, 'local', imageUtime,
                                    cameraToLocal)

    pd = filterUtils.transformPolyData(obj.polyData,
                                       obj.actor.GetUserTransform())
    pd = filterUtils.transformPolyData(pd, cameraToLocal.GetLinearInverse())

    imageManager.queue.computeTextureCoords(imageName, pd)

    tcoordsArrayName = 'tcoords_%s' % imageName
    tcoords = pd.GetPointData().GetArray(tcoordsArrayName)
    assert tcoords

    obj.polyData.GetPointData().SetTCoords(None)
    obj.polyData.GetPointData().SetTCoords(tcoords)
    obj._updateColorByProperty()

    obj.actor.SetTexture(imageManager.getTexture(imageName))
    obj.actor.GetProperty().LightingOff()
    obj.actor.GetProperty().SetColor([1, 1, 1])
コード例 #22
0
 def getLinkFrame(self, linkName):
     t = vtk.vtkTransform()
     t.PostMultiply()
     if self.model.getLinkToWorld(linkName, t):
         return t
     else:
         return None
コード例 #23
0
ファイル: roboturdf.py プロジェクト: steinachim/director
 def getLinkFrame(self, linkName):
     t = vtk.vtkTransform()
     t.PostMultiply()
     if self.model.getLinkToWorld(linkName, t):
         return t
     else:
         return None
コード例 #24
0
ファイル: roboturdf.py プロジェクト: liutaotao369/director
    def newPolyData(self, name, view, parent=None):
        self.handModel.model.setJointPositions(
            np.zeros(self.handModel.model.numberOfJoints()))
        polyData = vtk.vtkPolyData()
        self.handModel.model.getModelMesh(polyData)
        polyData = filterUtils.transformPolyData(polyData, self.modelToPalm)

        if isinstance(parent, str):
            parent = om.getOrCreateContainer(parent)

        color = [1.0, 1.0, 0.0]
        if self.side == 'right':
            color = [0.33, 1.0, 0.0]
        obj = vis.showPolyData(polyData,
                               name,
                               view=view,
                               color=color,
                               visible=False,
                               parent=parent)
        obj.side = self.side
        frame = vtk.vtkTransform()
        frame.PostMultiply()
        obj.actor.SetUserTransform(frame)
        frameObj = vis.showFrame(frame,
                                 '%s frame' % name,
                                 view=view,
                                 scale=0.2,
                                 visible=False,
                                 parent=obj)
        return obj
コード例 #25
0
ファイル: cameraview.py プロジェクト: steinachim/director
    def __init__(self):

        self.lastUtime = 0
        self.lastCameraToLocal = vtk.vtkTransform()

        self.cameraToLocalFusedTransforms = []
        self.cameraToLocalTransforms = []
        self.pointClouds = []
コード例 #26
0
ファイル: roboturdf.py プロジェクト: steinachim/director
    def moveToGraspFrame(self, frame):

        t = vtk.vtkTransform()
        t.PostMultiply()
        t.Concatenate(self.modelToPalm)
        t.Concatenate(frame)

        self.moveHandModelToFrame(self.handModel, t)
コード例 #27
0
    def scaleGeometry(polyDataList, geom):
        scale = geom.float_data[0]
        if scale != 1.0:
            t = vtk.vtkTransform()
            t.Scale(scale, scale, scale)
            polyDataList = [filterUtils.transformPolyData(polyData, t) for polyData in polyDataList]

        return polyDataList
コード例 #28
0
ファイル: cameraview.py プロジェクト: mlab-upenn/arch-apex
    def __init__(self):

        self.lastUtime = 0
        self.lastCameraToLocal = vtk.vtkTransform()

        self.cameraToLocalFusedTransforms = []
        self.cameraToLocalTransforms = []
        self.pointClouds = []
コード例 #29
0
    def moveToGraspFrame(self, frame):

        t = vtk.vtkTransform()
        t.PostMultiply()
        t.Concatenate(self.modelToPalm)
        t.Concatenate(frame)

        self.moveHandModelToFrame(self.handModel, t)
コード例 #30
0
ファイル: drakevisualizer.py プロジェクト: wxmerkt/director
    def __init__(self, link):
        self.transform = vtk.vtkTransform()

        self.geometry = []
        for g in link.geom:
            self.geometry.extend(
                Geometry.createGeometry(link.name + ' geometry data', g,
                                        self.transform))
コード例 #31
0
ファイル: robottasks.py プロジェクト: simalpha/director
    def getGroundFrame(self):
        return vtk.vtkTransform()

        robotModel = robotSystem.robotStateModel
        baseLinkFrame = robotModel.model.getLinkFrame(robotModel.model.getLinkNames()[0])
        #baseLinkFrame.PostMultiply()
        #baseLinkFrame.Translate(0,0,-baseLinkFrame.GetPosition()[2])
        return baseLinkFrame
コード例 #32
0
ファイル: robottasks.py プロジェクト: liutaotao369/director
    def getGroundFrame(self):
        return vtk.vtkTransform()

        robotModel = robotSystem.robotStateModel
        baseLinkFrame = robotModel.model.getLinkFrame(
            robotModel.model.getLinkNames()[0])
        #baseLinkFrame.PostMultiply()
        #baseLinkFrame.Translate(0,0,-baseLinkFrame.GetPosition()[2])
        return baseLinkFrame
コード例 #33
0
 def getLinkToLinkTransform(model, linkA, linkB):
     linkAToWorld = model.getLinkFrame(linkA)
     linkBToWorld = model.getLinkFrame(linkB)
     assert linkAToWorld
     assert linkBToWorld
     t = vtk.vtkTransform()
     t.PostMultiply()
     t.Concatenate(linkAToWorld)
     t.Concatenate(linkBToWorld.GetLinearInverse())
     return t
コード例 #34
0
ファイル: cameraview.py プロジェクト: mlab-upenn/arch-apex
 def getCameraToLocal(self):
     '''
     Returns cameraToLocal.  cameraToHead is pulled from bot frames while
     headToLocal is pulled from the robot model forward kinematics.
     '''
     headToLocal = self.robotModel.getLinkFrame( self.headLink )
     cameraToHead = vtk.vtkTransform()
     # TODO: 'head' should match self.headLink
     self.imageManager.queue.getTransform(self.cameraName, 'head', 0, cameraToHead)
     return transformUtils.concatenateTransforms([cameraToHead, headToLocal])
コード例 #35
0
ファイル: cameraview.py プロジェクト: mlab-upenn/arch-apex
    def updateSphereGeometry(self):

        for imageName in self.sphereImages:
            sphereObj = self.getSphereGeometry(imageName)
            if not sphereObj:
                continue

            transform = vtk.vtkTransform()
            self.imageManager.queue.getBodyToCameraTransform(imageName, transform)
            sphereObj.actor.SetUserTransform(transform.GetLinearInverse())
コード例 #36
0
    def updateGrid(self, model):
        pos = self.jointController.q[:3]

        x = int(np.round(pos[0])) / 10
        y = int(np.round(pos[1])) / 10
        z = int(np.round(pos[2] - 0.85)) / 1

        t = vtk.vtkTransform()
        t.Translate((x*10,y*10,z))
        self.gridFrame.copyFrame(t)
コード例 #37
0
ファイル: roboturdf.py プロジェクト: steinachim/director
 def getLinkToLinkTransform(model, linkA, linkB):
     linkAToWorld = model.getLinkFrame(linkA)
     linkBToWorld = model.getLinkFrame(linkB)
     assert linkAToWorld
     assert linkBToWorld
     t = vtk.vtkTransform()
     t.PostMultiply()
     t.Concatenate(linkAToWorld)
     t.Concatenate(linkBToWorld.GetLinearInverse())
     return t
コード例 #38
0
    def getPalmToWorldTransform(self, robotModel):

        handLinkToWorld = robotModel.getLinkFrame(self.handLinkName)

        t = vtk.vtkTransform()
        t.PostMultiply()
        t.Concatenate(self.palmToHandLink)
        t.Concatenate(handLinkToWorld)

        return t
コード例 #39
0
ファイル: roboturdf.py プロジェクト: steinachim/director
    def getPalmToWorldTransform(self, robotModel):

        handLinkToWorld = robotModel.getLinkFrame(self.handLinkName)

        t = vtk.vtkTransform()
        t.PostMultiply()
        t.Concatenate(self.palmToHandLink)
        t.Concatenate(handLinkToWorld)

        return t
コード例 #40
0
ファイル: tabledemo.py プロジェクト: edowson/director
    def computeBinStanceFrame(self):
        assert self.binFrame

        zGround = 0.0
        binHeight = self.binFrame.transform.GetPosition()[2] - zGround

        t = vtk.vtkTransform()
        t.PostMultiply()
        t.Translate(-0.45, 0.1, -binHeight)
        t.Concatenate(self.binFrame.transform)

        self.binStanceFrame = vis.showFrame(t, 'bin stance frame', parent=None, scale=0.2)

        t = vtk.vtkTransform()
        t.PostMultiply()
        t.RotateZ(30)
        t.Translate(-0.8, 0.4, -binHeight)
        t.Concatenate(self.binFrame.transform)

        self.startStanceFrame = vis.showFrame(t, 'start stance frame', parent=None, scale=0.2)
コード例 #41
0
    def setFrameToState(self, state=None):
        if state is None:
            state = self.state

        x = state[self.idx['x']]
        y = state[self.idx['y']]
        theta = state[self.idx['theta']]
        t = vtk.vtkTransform()
        t.Translate(x, y, 0.0)
        t.RotateZ(np.degrees(theta))
        self.frame.copyFrame(t)
コード例 #42
0
ファイル: cameraview.py プロジェクト: steinachim/director
    def updateSphereGeometry(self):

        for imageName in self.sphereImages:
            sphereObj = self.getSphereGeometry(imageName)
            if not sphereObj:
                continue

            transform = vtk.vtkTransform()
            self.imageManager.queue.getBodyToCameraTransform(
                imageName, transform)
            sphereObj.actor.SetUserTransform(transform.GetLinearInverse())
コード例 #43
0
ファイル: visualization.py プロジェクト: wxmerkt/director
    def _moveFrame(self, frameId, modifiedFrameId):

        frameData = self.frames[frameId]
        modifiedFrameData = self.frames[modifiedFrameId]

        t = vtk.vtkTransform()
        t.PostMultiply()
        t.Concatenate(frameData.baseTransform)
        t.Concatenate(modifiedFrameData.baseTransform.GetLinearInverse())
        t.Concatenate(modifiedFrameData.ref().transform)
        frameData.ref().copyFrame(t)
コード例 #44
0
ファイル: roboturdf.py プロジェクト: steinachim/director
    def moveToRobot(self, robotModel):

        handLinkToWorld = robotModel.getLinkFrame(self.handLinkName)

        t = vtk.vtkTransform()
        t.PostMultiply()
        t.Concatenate(self.modelToPalm)
        t.Concatenate(self.palmToHandLink)
        t.Concatenate(handLinkToWorld)

        self.moveHandModelToFrame(self.handModel, t)
        vis.updateFrame(self.getPalmToWorldTransform(), '%s palm' % self.side)
コード例 #45
0
    def moveToRobot(self, robotModel):

        handLinkToWorld = robotModel.getLinkFrame(self.handLinkName)

        t = vtk.vtkTransform()
        t.PostMultiply()
        t.Concatenate(self.modelToPalm)
        t.Concatenate(self.palmToHandLink)
        t.Concatenate(handLinkToWorld)

        self.moveHandModelToFrame(self.handModel, t)
        vis.updateFrame(self.getPalmToWorldTransform(), '%s palm' % self.side)
コード例 #46
0
ファイル: cameraview.py プロジェクト: steinachim/director
 def getCameraToLocal(self):
     '''
     Returns cameraToLocal.  cameraToHead is pulled from bot frames while
     headToLocal is pulled from the robot model forward kinematics.
     '''
     headToLocal = self.robotModel.getLinkFrame(self.headLink)
     cameraToHead = vtk.vtkTransform()
     # TODO: 'head' should match self.headLink
     self.imageManager.queue.getTransform(self.cameraName, 'head', 0,
                                          cameraToHead)
     return transformUtils.concatenateTransforms(
         [cameraToHead, headToLocal])
コード例 #47
0
ファイル: ikconstraints.py プロジェクト: mlab-upenn/arch-apex
    def __init__(self, **kwargs):

        self._add_fields(
            linkName = '',
            referenceFrame = vtk.vtkTransform(),
            pointInLink    = np.zeros(3),
            positionTarget = np.zeros(3),
            positionOffset = np.zeros(3),
            lowerBound     = np.zeros(3),
            upperBound     = np.zeros(3),
            )

        ConstraintBase.__init__(self, **kwargs)
コード例 #48
0
ファイル: footstepsdriver.py プロジェクト: manuelli/director
def loadFootMeshes():
    meshes = []
    for i in  range(0,2):
        d = DebugData()

        for footMeshFile in _footMeshFiles[i]:
          d.addPolyData(ioUtils.readPolyData( footMeshFile , computeNormals=True))

        t = vtk.vtkTransform()
        t.Scale(0.98, 0.98, 0.98)
        pd = filterUtils.transformPolyData(d.getPolyData(), t)
        meshes.append(pd)
    return meshes
コード例 #49
0
ファイル: pointcloudlcm.py プロジェクト: simalpha/director
    def _updateSource(self):
        p = vtk.vtkPolyData()
        utime = self.PointCloudQueue.getPointCloudFromPointCloud(p)

        if not p.GetNumberOfPoints():
            return

        sensorToLocalFused = vtk.vtkTransform()
        self.queue.getTransform('VELODYNE', 'local', utime, sensorToLocalFused)
        p = filterUtils.transformPolyData(p,sensorToLocalFused)
        self.polyDataObj.setPolyData(p)

        if not self.polyDataObj.initialized:
            self.polyDataObj.initialized = True
コード例 #50
0
ファイル: footstepsdriver.py プロジェクト: manuelli/director
    def constructFootstepPlanRequest(self, pose, goalFrame=None):

        msg = lcmdrc.footstep_plan_request_t()
        msg.utime = getUtime()
        state_msg = robotstate.drakePoseToRobotState(pose)
        msg.initial_state = state_msg

        if goalFrame is None:
            goalFrame = vtk.vtkTransform()
        msg.goal_pos = transformUtils.positionMessageFromFrame(goalFrame)

        msg = self.applyParams(msg)
        msg = self.applySafeRegions(msg)
        return msg
コード例 #51
0
ファイル: ikconstraints.py プロジェクト: mlab-upenn/arch-apex
    def __init__(self, **kwargs):

        self._add_fields(
            bodyNameA = '',
            bodyNameB = '',
            pointInBodyA    = np.zeros(3),
            frameInBodyB = vtk.vtkTransform(),
            positionTarget = np.zeros(3),
            positionOffset = np.zeros(3),
            lowerBound     = np.zeros(3),
            upperBound     = np.zeros(3),
            )

        ConstraintBase.__init__(self, **kwargs)