def launchViewer(self): playButtonFps = 1.0/self.options['Sim']['dt'] print "playButtonFPS", playButtonFps self.playTimer = TimerCallback(targetFps=playButtonFps) self.playTimer.callback = self.playTimerCallback panel = QtGui.QWidget() l = QtGui.QHBoxLayout(panel) playButton = QtGui.QPushButton('Play/Pause') playButton.connect('clicked()', self.onPlayButton) l.addWidget(playButton) w = QtGui.QWidget() l = QtGui.QVBoxLayout(w) l.addWidget(self.view) l.addWidget(panel) w.showMaximized() self.carFrame.connectFrameModified(self.updateDrawIntersection) self.updateDrawIntersection(self.carFrame) applogic.resetCamera(viewDirection=[0.2,0,-1]) self.view.showMaximized() self.view.raise_() self.app.start()
def __init__(self): om.init() self.view = PythonQt.dd.ddQVTKWidgetView() # init grid self.gridObj = vis.showGrid(self.view, parent='scene') self.gridObj.setProperty('Surface Mode', 'Surface with edges') self.gridObj.setProperty('Color', [0,0,0]) self.gridObj.setProperty('Alpha', 0.1) # init view options self.viewOptions = vis.ViewOptionsItem(self.view) om.addToObjectModel(self.viewOptions, parentObj=om.findObjectByName('scene')) self.viewOptions.setProperty('Background color', [0.3, 0.3, 0.35]) self.viewOptions.setProperty('Background color 2', [0.95,0.95,1]) # setup camera applogic.setCameraTerrainModeEnabled(self.view, True) applogic.resetCamera(viewDirection=[-1, 0, -0.3], view=self.view) # add view behaviors viewBehaviors = viewbehaviors.ViewBehaviors(self.view) applogic._defaultRenderView = self.view self.mainWindow = QtGui.QMainWindow() self.mainWindow.setCentralWidget(self.view) self.mainWindow.resize(768 * (16/9.0), 768) self.mainWindow.setWindowTitle('Drake Visualizer') self.mainWindow.setWindowIcon(QtGui.QIcon(':/images/drake_logo.png')) self.mainWindow.show() self.drakeVisualizer = DrakeVisualizer(self.view) self.lcmglManager = lcmgl.LCMGLManager(self.view) if lcmgl.LCMGL_AVAILABLE else None self.screenGrabberPanel = ScreenGrabberPanel(self.view) self.screenGrabberDock = self.addWidgetToDock(self.screenGrabberPanel.widget, QtCore.Qt.RightDockWidgetArea) self.screenGrabberDock.setVisible(False) self.cameraBookmarksPanel = camerabookmarks.CameraBookmarkWidget(self.view) self.cameraBookmarksDock = self.addWidgetToDock(self.cameraBookmarksPanel.widget, QtCore.Qt.RightDockWidgetArea) self.cameraBookmarksDock.setVisible(False) model = om.getDefaultObjectModel() model.getTreeWidget().setWindowTitle('Scene Browser') model.getPropertiesPanel().setWindowTitle('Properties Panel') model.setActiveObject(self.viewOptions) self.sceneBrowserDock = self.addWidgetToDock(model.getTreeWidget(), QtCore.Qt.LeftDockWidgetArea) self.propertiesDock = self.addWidgetToDock(self.wrapScrollArea(model.getPropertiesPanel()), QtCore.Qt.LeftDockWidgetArea) self.sceneBrowserDock.setVisible(False) self.propertiesDock.setVisible(False) applogic.addShortcut(self.mainWindow, 'Ctrl+Q', self.applicationInstance().quit) applogic.addShortcut(self.mainWindow, 'F1', self._toggleObjectModel) applogic.addShortcut(self.mainWindow, 'F2', self._toggleScreenGrabber) applogic.addShortcut(self.mainWindow, 'F3', self._toggleCameraBookmarks) applogic.addShortcut(self.mainWindow, 'F8', applogic.showPythonConsole)
def setupPlayback(self): self.timer = TimerCallback(targetFps=30) self.timer.callback = self.tick playButtonFps = 1.0 / self.dt print "playButtonFPS", playButtonFps self.playTimer = TimerCallback(targetFps=playButtonFps) self.playTimer.callback = self.playTimerCallback self.sliderMovedByPlayTimer = False panel = QtGui.QWidget() l = QtGui.QHBoxLayout(panel) playButton = QtGui.QPushButton('Play/Pause') playButton.connect('clicked()', self.onPlayButton) slider = QtGui.QSlider(QtCore.Qt.Horizontal) slider.connect('valueChanged(int)', self.onSliderChanged) self.sliderMax = self.numTimesteps slider.setMaximum(self.sliderMax) self.slider = slider l.addWidget(playButton) l.addWidget(slider) w = QtGui.QWidget() l = QtGui.QVBoxLayout(w) l.addWidget(self.view) l.addWidget(panel) w.showMaximized() self.frame.connectFrameModified(self.updateDrawIntersection) self.updateDrawIntersection(self.frame) applogic.resetCamera(viewDirection=[0.2, 0, -1]) self.view.showMaximized() self.view.raise_() panel = screengrabberpanel.ScreenGrabberPanel(self.view) panel.widget.show() elapsed = time.time() - self.startSimTime simRate = self.counter / elapsed print "Total run time", elapsed print "Ticks (Hz)", simRate print "Number of steps taken", self.counter self.app.start()
def setupPlayback(self): self.timer = TimerCallback(targetFps=30) self.timer.callback = self.tick playButtonFps = 1.0 / self.dt print "playButtonFPS", playButtonFps self.playTimer = TimerCallback(targetFps=playButtonFps) self.playTimer.callback = self.playTimerCallback self.sliderMovedByPlayTimer = False panel = QtGui.QWidget() l = QtGui.QHBoxLayout(panel) playButton = QtGui.QPushButton("Play/Pause") playButton.connect("clicked()", self.onPlayButton) slider = QtGui.QSlider(QtCore.Qt.Horizontal) slider.connect("valueChanged(int)", self.onSliderChanged) self.sliderMax = self.numTimesteps slider.setMaximum(self.sliderMax) self.slider = slider l.addWidget(playButton) l.addWidget(slider) w = QtGui.QWidget() l = QtGui.QVBoxLayout(w) l.addWidget(self.view) l.addWidget(panel) w.showMaximized() self.frame.connectFrameModified(self.updateDrawIntersection) self.updateDrawIntersection(self.frame) applogic.resetCamera(viewDirection=[0.2, 0, -1]) self.view.showMaximized() self.view.raise_() panel = screengrabberpanel.ScreenGrabberPanel(self.view) panel.widget.show() elapsed = time.time() - self.startSimTime simRate = self.counter / elapsed print "Total run time", elapsed print "Ticks (Hz)", simRate print "Number of steps taken", self.counter self.app.start()
def createView(self, useGrid=True): view = PythonQt.dd.ddQVTKWidgetView() view.resize(600, 400) applogic.setCameraTerrainModeEnabled(view, True) if useGrid: self.gridObj = vis.showGrid(view, parent='scene') self.viewOptions = vis.ViewOptionsItem(view) om.addToObjectModel(self.viewOptions, parentObj=om.findObjectByName('scene')) applogic.resetCamera(viewDirection=[-1,-1,-0.3], view=view) viewBehaviors = viewbehaviors.ViewBehaviors(view) applogic._defaultRenderView = view applogic.addShortcut(view, 'Ctrl+Q', self.quit) applogic.addShortcut(view, 'F8', self.showPythonConsole) applogic.addShortcut(view, 'F1', self.showObjectModel) view.setWindowIcon(om.Icons.getIcon(om.Icons.Robot)) view.setWindowTitle('View') return view
def createView(self, useGrid=True): view = PythonQt.dd.ddQVTKWidgetView() view.resize(600, 400) applogic.setCameraTerrainModeEnabled(view, True) if useGrid: self.gridObj = vis.showGrid(view, parent='scene') self.viewOptions = vis.ViewOptionsItem(view) om.addToObjectModel(self.viewOptions, parentObj=om.findObjectByName('scene')) applogic.resetCamera(viewDirection=[-1, -1, -0.3], view=view) viewBehaviors = viewbehaviors.ViewBehaviors(view) applogic._defaultRenderView = view applogic.addShortcut(view, 'Ctrl+Q', self.quit) applogic.addShortcut(view, 'F8', self.showPythonConsole) applogic.addShortcut(view, 'F1', self.showObjectModel) view.setWindowIcon(om.Icons.getIcon(om.Icons.Robot)) view.setWindowTitle('View') return view
def init(self, view=None, globalsDict=None): view = view or applogic.getCurrentRenderView() useRobotState = True usePerception = True useFootsteps = True useHands = True usePlanning = True useAtlasDriver = True useAtlasConvexHull = False useWidgets = False directorConfig = self.getDirectorConfig() neckPitchJoint = 'neck_ay' if useAtlasDriver: atlasDriver = atlasdriver.init(None) if useRobotState: robotStateModel, robotStateJointController = roboturdf.loadRobotModel('robot state model', view, urdfFile=directorConfig['urdfConfig']['robotState'], parent='sensors', color=roboturdf.getRobotGrayColor(), visible=True) robotStateJointController.setPose('EST_ROBOT_STATE', robotStateJointController.getPose('q_nom')) roboturdf.startModelPublisherListener([(robotStateModel, robotStateJointController)]) robotStateJointController.addLCMUpdater('EST_ROBOT_STATE') segmentationroutines.SegmentationContext.initWithRobot(robotStateModel) if usePerception: multisenseDriver, mapServerSource = perception.init(view) def getNeckPitch(): return robotStateJointController.q[robotstate.getDrakePoseJointNames().index( neckPitchJoint )] neckDriver = perception.NeckDriver(view, getNeckPitch) def getSpindleAngleFunction(): if (robotStateJointController.lastRobotStateMessage): if ('hokuyo_joint' in robotStateJointController.lastRobotStateMessage.joint_name): index = robotStateJointController.lastRobotStateMessage.joint_name.index('hokuyo_joint') return (float(robotStateJointController.lastRobotStateMessage.utime)/(1e6), robotStateJointController.lastRobotStateMessage.joint_position[index]) return (0, 0) spindleMonitor = perception.SpindleMonitor(getSpindleAngleFunction) robotStateModel.connectModelChanged(spindleMonitor.onRobotStateChanged) if useHands: rHandDriver = handdriver.RobotiqHandDriver(side='right') lHandDriver = handdriver.RobotiqHandDriver(side='left') if useFootsteps: footstepsDriver = footstepsdriver.FootstepsDriver(robotStateJointController) irisDriver = irisdriver.IRISDriver(robotStateJointController, footstepsDriver.params) raycastDriver = raycastdriver.RaycastDriver() if usePlanning: ikRobotModel, ikJointController = roboturdf.loadRobotModel('ik model', urdfFile=directorConfig['urdfConfig']['ik'], parent=None) om.removeFromObjectModel(ikRobotModel) ikJointController.addPose('q_end', ikJointController.getPose('q_nom')) ikJointController.addPose('q_start', ikJointController.getPose('q_nom')) if 'leftFootLink' in directorConfig: ikServer = ik.AsyncIKCommunicator(directorConfig['urdfConfig']['ik'], directorConfig['fixedPointFile'], directorConfig['leftFootLink'], directorConfig['rightFootLink'], directorConfig['pelvisLink']) else: # assume that robot has no feet e.g. fixed base arm ikServer = ik.AsyncIKCommunicator(directorConfig['urdfConfig']['ik'], directorConfig['fixedPointFile'], '', '', '') def startIkServer(): ikServer.startServerAsync() ikServer.comm.writeCommandsToLogFile = True #startIkServer() playbackRobotModel, playbackJointController = roboturdf.loadRobotModel('playback model', view, urdfFile=directorConfig['urdfConfig']['playback'], parent='planning', color=roboturdf.getRobotBlueColor(), visible=False) teleopRobotModel, teleopJointController = roboturdf.loadRobotModel('teleop model', view, urdfFile=directorConfig['urdfConfig']['teleop'], parent='planning', color=roboturdf.getRobotBlueColor(), visible=False) if useAtlasConvexHull: chullRobotModel, chullJointController = roboturdf.loadRobotModel('convex hull atlas', view, urdfFile=urdfConfig['chull'], parent='planning', color=roboturdf.getRobotOrangeColor(), visible=False) playbackJointController.models.append(chullRobotModel) planPlayback = planplayback.PlanPlayback() if (roboturdf.numberOfHands == 1): handFactory = roboturdf.HandFactory(robotStateModel) handModels = [handFactory.getLoader(side) for side in ['left']] elif (roboturdf.numberOfHands == 2): handFactory = roboturdf.HandFactory(robotStateModel) handModels = [handFactory.getLoader(side) for side in ['left', 'right']] else: handFactory = None handModels = [] ikPlanner = ikplanner.IKPlanner(ikServer, ikRobotModel, ikJointController, handModels) manipPlanner = robotplanlistener.ManipulationPlanDriver(ikPlanner) affordanceManager = affordancemanager.AffordanceObjectModelManager(view) affordanceitems.MeshAffordanceItem.getMeshManager().collection.sendEchoRequest() affordanceManager.collection.sendEchoRequest() segmentation.affordanceManager = affordanceManager plannerPub = plannerPublisher.PlannerPublisher(ikPlanner,affordanceManager) ikPlanner.setPublisher(plannerPub) # This joint angle is mapped to the Multisense panel neckPitchJoint = ikPlanner.neckPitchJoint applogic.resetCamera(viewDirection=[-1,0,0], view=view) if useWidgets: playbackPanel = playbackpanel.PlaybackPanel(planPlayback, playbackRobotModel, playbackJointController, robotStateModel, robotStateJointController, manipPlanner) teleopPanel = teleoppanel.TeleopPanel(robotStateModel, robotStateJointController, teleopRobotModel, teleopJointController, ikPlanner, manipPlanner, playbackPanel.setPlan, playbackPanel.hidePlan) footstepsDriver.walkingPlanCallback = playbackPanel.setPlan manipPlanner.connectPlanReceived(playbackPanel.setPlan) robotSystemArgs = dict(locals()) for arg in ['globalsDict', 'self']: del robotSystemArgs[arg] if globalsDict is not None: globalsDict.update(robotSystemArgs) robotSystem = FieldContainer(**robotSystemArgs) viewbehaviors.ViewBehaviors.addRobotBehaviors(robotSystem) return robotSystem
msg.utime = 1 lcmUtils.publish('ENABLE_ENCODERS', msg) def disableArmEncoders(): msg = lcmdrc.utime_t() msg.utime = -1 lcmUtils.publish('ENABLE_ENCODERS', msg) def sendDesiredPumpPsi(desiredPsi): atlasDriver.sendDesiredPumpPsi(desiredPsi) app.setCameraTerrainModeEnabled(view, True) app.resetCamera(viewDirection=[-1,0,0], view=view) viewBehaviors = viewbehaviors.ViewBehaviors(view) # Drill Demo Functions for in-image rendering: useDrillDemo = False if useDrillDemo: def spawnHandAtCurrentLocation(side='left'): if (side is 'left'): tf = transformUtils.copyFrame( getLinkFrame( 'l_hand_face') ) handFactory.placeHandModelWithTransform( tf , app.getCurrentView(), 'left') else: tf = transformUtils.copyFrame( getLinkFrame( 'right_pointer_tip') ) handFactory.placeHandModelWithTransform( tf , app.getCurrentView(), 'right')
angleMax = np.pi/2 angleGrid = np.linspace(angleMin, angleMax, numRays) rays = np.zeros((3,numRays)) rays[0,:] = np.cos(angleGrid) rays[1,:] = np.sin(angleGrid) app = ConsoleApp() view = app.createView() world = buildWorld() robot = buildRobot() locator = buildCellLocator(world.polyData) frame = robot.getChildFrame() frame.setProperty('Edit', True) frame.widget.HandleRotationEnabledOff() rep = frame.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) frame.connectFrameModified(updateIntersection) updateIntersection(frame) applogic.resetCamera(viewDirection=[0.2,0,-1]) view.showMaximized() view.raise_() app.start()
def init(self, view=None, globalsDict=None): view = view or applogic.getCurrentRenderView() useRobotState = True usePerception = True useFootsteps = True useHands = True usePlanning = True useAtlasDriver = True useAtlasConvexHull = False useWidgets = False directorConfig = self.getDirectorConfig() neckPitchJoint = 'neck_ay' if useAtlasDriver: atlasDriver = atlasdriver.init(None) if useRobotState: robotStateModel, robotStateJointController = roboturdf.loadRobotModel( 'robot state model', view, urdfFile=directorConfig['urdfConfig']['robotState'], parent='sensors', color=roboturdf.getRobotGrayColor(), visible=True) robotStateJointController.setPose( 'EST_ROBOT_STATE', robotStateJointController.getPose('q_nom')) roboturdf.startModelPublisherListener([ (robotStateModel, robotStateJointController) ]) robotStateJointController.addLCMUpdater('EST_ROBOT_STATE') segmentationroutines.SegmentationContext.initWithRobot( robotStateModel) if usePerception: multisenseDriver, mapServerSource = perception.init(view) def getNeckPitch(): return robotStateJointController.q[ robotstate.getDrakePoseJointNames().index(neckPitchJoint)] neckDriver = perception.NeckDriver(view, getNeckPitch) def getSpindleAngleFunction(): if (robotStateJointController.lastRobotStateMessage): if ('hokuyo_joint' in robotStateJointController. lastRobotStateMessage.joint_name): index = robotStateJointController.lastRobotStateMessage.joint_name.index( 'hokuyo_joint') return (float(robotStateJointController. lastRobotStateMessage.utime) / (1e6), robotStateJointController. lastRobotStateMessage.joint_position[index]) return (0, 0) spindleMonitor = perception.SpindleMonitor(getSpindleAngleFunction) robotStateModel.connectModelChanged( spindleMonitor.onRobotStateChanged) if useHands: rHandDriver = handdriver.RobotiqHandDriver(side='right') lHandDriver = handdriver.RobotiqHandDriver(side='left') if useFootsteps: footstepsDriver = footstepsdriver.FootstepsDriver( robotStateJointController) irisDriver = irisdriver.IRISDriver(robotStateJointController, footstepsDriver.params) raycastDriver = raycastdriver.RaycastDriver() if usePlanning: ikRobotModel, ikJointController = roboturdf.loadRobotModel( 'ik model', urdfFile=directorConfig['urdfConfig']['ik'], parent=None) om.removeFromObjectModel(ikRobotModel) ikJointController.addPose('q_end', ikJointController.getPose('q_nom')) ikJointController.addPose('q_start', ikJointController.getPose('q_nom')) if 'leftFootLink' in directorConfig: ikServer = ik.AsyncIKCommunicator( directorConfig['urdfConfig']['ik'], directorConfig['fixedPointFile'], directorConfig['leftFootLink'], directorConfig['rightFootLink'], directorConfig['pelvisLink']) else: # assume that robot has no feet e.g. fixed base arm ikServer = ik.AsyncIKCommunicator( directorConfig['urdfConfig']['ik'], directorConfig['fixedPointFile'], '', '', '') def startIkServer(): ikServer.startServerAsync() ikServer.comm.writeCommandsToLogFile = True #startIkServer() playbackRobotModel, playbackJointController = roboturdf.loadRobotModel( 'playback model', view, urdfFile=directorConfig['urdfConfig']['playback'], parent='planning', color=roboturdf.getRobotBlueColor(), visible=False) teleopRobotModel, teleopJointController = roboturdf.loadRobotModel( 'teleop model', view, urdfFile=directorConfig['urdfConfig']['teleop'], parent='planning', color=roboturdf.getRobotBlueColor(), visible=False) if useAtlasConvexHull: chullRobotModel, chullJointController = roboturdf.loadRobotModel( 'convex hull atlas', view, urdfFile=urdfConfig['chull'], parent='planning', color=roboturdf.getRobotOrangeColor(), visible=False) playbackJointController.models.append(chullRobotModel) planPlayback = planplayback.PlanPlayback() if (roboturdf.numberOfHands == 1): handFactory = roboturdf.HandFactory(robotStateModel) handModels = [handFactory.getLoader(side) for side in ['left']] elif (roboturdf.numberOfHands == 2): handFactory = roboturdf.HandFactory(robotStateModel) handModels = [ handFactory.getLoader(side) for side in ['left', 'right'] ] else: handFactory = None handModels = [] ikPlanner = ikplanner.IKPlanner(ikServer, ikRobotModel, ikJointController, handModels) manipPlanner = robotplanlistener.ManipulationPlanDriver(ikPlanner) affordanceManager = affordancemanager.AffordanceObjectModelManager( view) affordanceitems.MeshAffordanceItem.getMeshManager( ).collection.sendEchoRequest() affordanceManager.collection.sendEchoRequest() segmentation.affordanceManager = affordanceManager plannerPub = plannerPublisher.PlannerPublisher( ikPlanner, affordanceManager) ikPlanner.setPublisher(plannerPub) # This joint angle is mapped to the Multisense panel neckPitchJoint = ikPlanner.neckPitchJoint applogic.resetCamera(viewDirection=[-1, 0, 0], view=view) if useWidgets: playbackPanel = playbackpanel.PlaybackPanel( planPlayback, playbackRobotModel, playbackJointController, robotStateModel, robotStateJointController, manipPlanner) teleopPanel = teleoppanel.TeleopPanel( robotStateModel, robotStateJointController, teleopRobotModel, teleopJointController, ikPlanner, manipPlanner, playbackPanel.setPlan, playbackPanel.hidePlan) footstepsDriver.walkingPlanCallback = playbackPanel.setPlan manipPlanner.connectPlanReceived(playbackPanel.setPlan) robotSystemArgs = dict(locals()) for arg in ['globalsDict', 'self']: del robotSystemArgs[arg] if globalsDict is not None: globalsDict.update(robotSystemArgs) robotSystem = FieldContainer(**robotSystemArgs) viewbehaviors.ViewBehaviors.addRobotBehaviors(robotSystem) return robotSystem