def main(args): app = Application(name="detect_net_inference") # Load subgraph and get interface node app.load("packages/detect_net/apps/detect_net_inference.subgraph.json", prefix="detect_net_inference") detect_net_inferface = app.nodes["detect_net_inference.subgraph"]\ .components["interface"] # Load configuration app.load(args.config) # Configure detection model detection_model = app.nodes["detect_net_inference.tensor_r_t_inference"]\ .components["isaac.ml.TensorRTInference"] if args.detection_model_file_path is not None: detection_model.config.model_file_path = args.detection_model_file_path if args.etlt_password is not None: detection_model.config.etlt_password = args.etlt_password # Configure detection decoder decoder = app.nodes["detect_net_inference.detection_decoder"]\ .components["isaac.detect_net.DetectNetDecoder"] decoder.config.output_scale = [args.rows, args.cols] if args.confidence_threshold is not None: decoder.config.confidence_threshold = args.confidence_threshold if args.nms_threshold is not None: decoder.config.non_maximum_suppression_threshold = args.nms_threshold if args.mode == 'cask': # Load replay subgraph and configure interface node app.load("packages/cask/apps/replay.subgraph.json", prefix="replay") replay_interface = app.nodes["replay.interface"].components["output"] replay_interface.config.cask_directory = args.cask_directory # Connect the output of the replay subgraph to the detection subgraph app.connect(replay_interface, "color", detect_net_inferface, "image") elif args.mode == 'sim': # Load simulation subgraph and get interface node app.load("packages/navsim/apps/navsim_training.subgraph.json",\ prefix="simulation") simulation_interface = app.nodes["simulation.interface"].components[ "output"] # Connect the output of the simulation with user-specified channel to the detection subgraph app.connect(simulation_interface, args.image_channel, detect_net_inferface, "image") elif args.mode == 'realsense': app.load_module('realsense') # Create and configure realsense camera codelet camera = app.add("camera").add(app.registry.isaac.RealsenseCamera) camera.config.rows = args.rows camera.config.cols = args.cols camera.config.color_framerate = args.fps camera.config.depth_framerate = args.fps camera.config.enable_ir_stereo = False # Connect the output of the camera node to the detection subgraph app.connect(camera, "color", detect_net_inferface, "image") elif args.mode == 'v4l': app.load_module('sensors:v4l2_camera') # Create and configure V4L camera codelet camera = app.add("camera").add(app.registry.isaac.V4L2Camera) camera.config.device_id = 0 camera.config.rows = args.rows camera.config.cols = args.cols camera.config.rate_hz = args.fps # Connect the output of the camera node to the detection subgraph app.connect(camera, "frame", detect_net_inferface, "image") elif args.mode == 'image': app.load_module('message_generators') # Create feeder node feeder = app.add("feeder").add( app.registry.isaac.message_generators.ImageLoader) feeder.config.color_glob_pattern = args.image_directory feeder.config.tick_period = "1Hz" # Connect the output of the image feeder node to the detection subgraph app.connect(feeder, "color", detect_net_inferface, "image") else: raise ValueError('Not supported mode {}'.format(args.mode)) app.run()
if __name__ == "__main__": parser = populate_parser() args = parser.parse_args() # Create april_tag_python application app = Application(name="nvapril_tag_vs_april_tag3", modules=[ "//packages/fiducials:april_tags", "//packages/fiducials:april_tags3", "sensors:v4l2_camera", "message_generators", "viewers", "sight", ]) # Setup april tag node nv_tags_detection = app.add('nv_april_tags_detection').add( app.registry.isaac.fiducials.AprilTagsDetection) nv_tags_detection.config.max_tags = 50 fiducials_viewer = app.nodes['nv_april_tags_detection'].add( app.registry.isaac.viewers.FiducialsViewer) # Setup april tag 3 node tags_detection3 = app.add('april_tags3_detection').add( app.registry.isaac.fiducials.AprilTags3Detection) fiducials_viewer3 = app.nodes['april_tags3_detection'].add( app.registry.isaac.viewers.FiducialsViewer) # Setup image viewer node image_viewer = app.add('image_viewers').add( app.registry.isaac.viewers.ImageViewer) # Create camera_intrinsics node
and proprietary rights in and to this software, related documentation and any modifications thereto. Any use, reproduction, disclosure or distribution of this software and related documentation without an express license agreement from NVIDIA CORPORATION is strictly prohibited. ''' from isaac import Application, Node if __name__ == '__main__': app = Application(name="svo_realsense", modules=[ 'realsense', ]) app.load("packages/visual_slam/apps/stereo_visual_odometry_grayscale.subgraph.json", "svo") svo_interface = app.nodes["svo.subgraph"].components["interface"] camera = app.add('camera').add(app.registry.isaac.RealsenseCamera) camera.config.align_to_color = False camera.config.auto_exposure_priority = False camera.config.enable_color = False camera.config.cols = 640 camera.config.rows = 360 camera.config.enable_depth = False camera.config.enable_depth_laser = False camera.config.enable_ir_stereo = True camera.config.ir_framerate = 30 tracker = app.nodes['svo.tracker'].components['StereoVisualOdometry'] tracker.config.horizontal_stereo_camera = True tracker.config.process_imu_readings = False tracker.config.lhs_camera_frame = "left_ir_camera" tracker.config.rhs_camera_frame = "right_ir_camera"
def main(args): # Check that the provided directories are different, otherwise if (args.base_directory_images == args.base_directory_gt): print("Base directory for image and ground truth logs must not be the same.") return app = Application(name="record_sim_ground_truth", modules=["viewers"]) app_uuid = app.uuid # Load simulation subgraph and get interface node app.load("packages/navsim/apps/navsim_training.subgraph.json",\ prefix="simulation") simulation_interface = app.nodes["simulation.interface"].components["output"] # Load record subgraph for images app.load("packages/cask/apps/record.subgraph.json", prefix="image_record") image_record_interface = app.nodes["image_record.interface"].components["input"] image_record_interface.config.base_directory = args.base_directory_images # Load record subgraph for ground truth app.load("packages/cask/apps/record.subgraph.json", prefix="ground_truth_record") ground_truth_record_interface = app.nodes["ground_truth_record.interface"].components["input"] ground_truth_record_interface.config.base_directory = args.base_directory_gt # Create viewer codelets image_viewer = app.add("image_viewer").add(app.registry.isaac.viewers.ImageViewer) image_viewer.config.camera_name = "camera" # Connect the image and bounding boxes channels to the record interface app.connect(simulation_interface, args.image_channel, image_record_interface, "color") app.connect(simulation_interface, args.image_channel, image_viewer, "image") app.connect(simulation_interface, args.intrinsics_channel, image_record_interface,\ "color_intrinsics") app.connect(simulation_interface, args.intrinsics_channel, image_viewer, "intrinsics") # Set the scenario manager config options if scene name is provided if (args.scenario_scene is not None): print(args.detections3viewer_box_dimensions) scenario_manager = app.nodes["simulation.scenario_manager"].components["scenario_manager"] scenario_manager.config.scene = args.scenario_scene if (args.scenario_robot_prefab is not None): scenario_manager.config.robot_prefab = args.scenario_robot_prefab # Connect ground truth data channels according to user input mode = args.mode if (mode == 'bounding_box' or mode == 'all'): detections_viewer = app.add("detections_viewer").add( app.registry.isaac.viewers.DetectionsViewer) bbox_conversion = app.nodes["simulation.bounding_boxes"].components["conversion"] app.connect(simulation_interface, args.segmentation_channel + "_class", bbox_conversion, "class_segmentation") app.connect(simulation_interface, args.segmentation_channel + "_instance", bbox_conversion, "instance_segmentation") app.connect(simulation_interface, args.segmentation_channel + "_labels", bbox_conversion, "class_labels") app.connect(simulation_interface, "bounding_boxes", ground_truth_record_interface, "bounding_boxes") app.connect(simulation_interface, "bounding_boxes", detections_viewer, "detections") if (mode == 'pose' or mode == 'all'): app.load_module('ml') detections3_viewer = app.add("detections3_viewer").add( app.registry.isaac.viewers.Detections3Viewer) pose_bodies = app.add("rigid_body_to_detections3").add( app.registry.isaac.ml.RigidbodyToDetections3) detections3_viewer.config.frame = "camera" if (args.detections3viewer_box_dimensions is not None): # Enable 3D detections in the viewer detections3_viewer.config.box_dimensions = eval(args.detections3viewer_box_dimensions) detections3_viewer.config.object_T_box_center = eval( args.detections3viewer_object_T_box_center) app.connect(simulation_interface, "bodies", pose_bodies, "rigid_bodies") app.connect(pose_bodies, "detections", ground_truth_record_interface, "gt_poses") app.connect(pose_bodies, "detections", detections3_viewer, "detections") app.connect(simulation_interface, args.intrinsics_channel, ground_truth_record_interface, "image_intrinsics") # Run application app.run(args.runtime) # Write metadata to JSON data per output cask. The metadata servers to associate # corresponding image and ground truth casks. As per RACI evaluation workflow # and data management, image casks and ground truth casks are stored in separate # directories. if args.raci_metadata: # Populate image cask metadata image_metadata_json = {} image_metadata_json["Robot_Name"] = "" image_metadata_json["Location/Scene"] = "" image_metadata_json["Domain"] = "simulation" image_metadata_json["Recording_Time"] = str(datetime.datetime.now().strftime("%Y-%m-%d")) # Write image cask metadata image_metadata_path = os.path.join(args.base_directory_images, app_uuid + "_md.json") with open(image_metadata_path, 'w') as f: json.dump(image_metadata_json, f, indent=2) # Populate ground truth cask metadata ground_truth_metadata_json = {} ground_truth_metadata_json["Image_Cask_File"] = app.uuid ground_truth_metadata_json["Data_Source"] = "ground_truth" # Write ground truth cask metadata ground_truth_metadata_path = os.path.join(args.base_directory_gt, app_uuid + "_md.json") with open(ground_truth_metadata_path, 'w') as f: json.dump(ground_truth_metadata_json, f, indent=2)
parser.add_argument( '--no-imu', dest='imu', action='store_false', help='Disables the support for the on-board camera IMU.') parser.set_defaults(imu=False) args, _ = parser.parse_known_args() app = Application(name="svo_zed", modules=["zed"]) app.load( "packages/visual_slam/apps/stereo_visual_odometry_grayscale.subgraph.json", "svo") svo_interface = app.nodes["svo.subgraph"].components["interface"] camera = app.add('camera').add(app.registry.isaac.ZedCamera) camera.config.enable_factory_rectification = True camera.config.enable_imu = args.imu camera.config.camera_fps = 60 camera.config.resolution = "672x376" camera.config.gray_scale = True camera.config.rgb = False tracker = app.nodes['svo.tracker'].components['StereoVisualOdometry'] tracker.config.horizontal_stereo_camera = True tracker.config.process_imu_readings = args.imu tracker.config.lhs_camera_frame = "zed_left_camera" tracker.config.rhs_camera_frame = "zed_right_camera" tracker.config.imu_frame = "zed_imu" if (args.imu):
distribution of this software and related documentation without an express license agreement from NVIDIA CORPORATION is strictly prohibited. ''' from isaac import Application import argparse if __name__ == '__main__': parser = argparse.ArgumentParser(description='Measures recording performance') args = parser.parse_args() app = Application(name="write_speed_test", modules=[ "message_generators", "cask", "sight", ]) img_gen_node = app.add("img_gen") img_gen = img_gen_node.add(app.registry.isaac.message_generators.CameraGenerator) img_gen.config.rows = 720 img_gen.config.cols = 1080 img_gen.config.tick_period = "15 Hz" recorder_node = app.add("recorder") recorder = recorder_node.add(app.registry.isaac.cask.Recorder) recorder.config.use_compression = True app.connect(img_gen, "color_left", recorder, "img1") app.connect(img_gen, "color_right", recorder, "img2") app.start_wait_stop(duration=10.0)
args, _ = parser.parse_known_args() app = Application(name="record_realsense", modules=["cask", "realsense", "sight"]) if args.mode == '720p': rows = 720 cols = 1280 elif args.mode == '640x480': rows = 640 cols = 480 else: raise ValueError('Not supported camera resolution') # Create recorder node recorder = app.add("recorder").add(app.registry.isaac.cask.Recorder) recorder.config.base_directory = args.base_directory # Create realsense camera codelet camera = app.add("cam").add(app.registry.isaac.RealsenseCamera) camera.config.rows = rows camera.config.cols = cols camera.config.color_framerate = args.fps camera.config.depth_framerate = args.fps app.connect(camera, "color", recorder, "color") app.connect(camera, "color_intrinsics", recorder, "color_intrinsics") app.connect(camera, "depth", recorder, "depth") app.connect(camera, "depth_intrinsics", recorder, "depth_intrinsics") app.run()
app, args.arm, args.end_effector, args.pose, args.speed, args.acceleration) # load sim subgraph or driver codelet if args.mode == 'sim': app.load("packages/navsim/apps/navsim_tcp.subgraph.json", prefix="simulation") sim_in = app.nodes["simulation.interface"]["input"] sim_out = app.nodes["simulation.interface"]["output"] app.connect(sim_out, "joint_state", lqr_interface, "joint_state") app.connect(sim_out, "joint_state", cartesian_planner, "joint_state") app.connect(lqr_interface, "joint_command", sim_in, "joint_position") elif args.mode == 'hardware': if args.arm == 'kinova': app.load_module("kinova_jaco") arm = app.add("arm").add(app.registry.isaac.kinova_jaco.KinovaJaco) arm.config.kinematic_tree = "lqr.kinematic_tree" arm.config.kinova_jaco_sdk_path = args.kinova_api arm.config.tick_period = "10ms" # for kinova hardware we use speed control, since it's much smoother. angle control # seems to only able to move one joint at a time and thus very jerky arm.config.control_mode = "joint velocity" controller = app.nodes["lqr.controller"]["MultiJointController"] controller.config.control_mode = "speed" controller.config.command_delay = 0.1 else: raise ValueError("Arm {0} not support on hardware. Use sim mode.", args.arm) app.connect(arm, "arm_state", lqr_interface, "joint_state") app.connect(arm, "arm_state", cartesian_planner, "joint_state")
class TestApplication(unittest.TestCase): ''' Test loading subgraph via the application API ''' @classmethod def setUpClass(cls): # method will be ran once before any test is ran pass @classmethod def tearDownClass(cls): # method will be ran once after all tests have run pass def setUp(self): # ran before each test return super().setUp() def tearDown(self): # ran after each test return super().tearDown() def test_get_component(self): self._app = Application() self._app.add('node') node = self._app.nodes['node'] self.assertIsNotNone(node) # Get None if no component has such name component = node['ledger'] self.assertIsNone(component) component = node['MessageLedger'] self.assertIsNotNone(component) self._app.start() self._app.stop() def test_load_subgraph(self): self._app = Application() self._app.add('node') # loads subgraph and checks if the node/component are created, config is good self._app.load('packages/pyalice/tests/application_test.subgraph.json', 'foo') node = self._app.nodes['foo.camera_viewer'] component = node['ImageViewer'] self.assertIsNotNone(component) fps_readback = self._app.nodes['foo.camera_viewer'].components[ 'ImageViewer'].config['target_fps'] self.assertEqual(fps_readback, 11) camera_name_readback = self._app.nodes['foo.camera_viewer'].components[ 'ImageViewer'].config['camera_name'] self.assertEqual(camera_name_readback, 'bar') # loads the subgraph again with different prefix name self._app.load('packages/pyalice/tests/application_test.subgraph.json', 'foo1') node = self._app.nodes['foo1.camera_viewer'] component = node['ImageViewer'] self.assertIsNotNone(component) fps_readback = self._app.nodes['foo1.camera_viewer'].components[ 'ImageViewer'].config['target_fps'] self.assertEqual(fps_readback, 11) camera_name_readback = self._app.nodes[ 'foo1.camera_viewer'].components['ImageViewer'].config[ 'camera_name'] self._app.start() self._app.stop() def test_load_bogus_subgraph(self): bogus_json_filename = '' with tempfile.NamedTemporaryFile('w') as f: f.write('this is bogus json') bogus_json_filename = f.name self._app = Application() with self.assertRaises(ValueError): self._app.load('/no/such/file') with self.assertRaises(ValueError): self._app.load(bogus_json_filename) def test_load_subgraph_in_asset_path(self): self._app = Application() self._app.add('node') # override the asset path to point to our test location self._app._asset_path = 'packages/pyalice/tests/mock_asset_path' # loads subgraph and checks if the node/component are created, config is good self._app.load('mock_installed_application_test.subgraph.json', 'foo') pynode = self._app.nodes['foo.camera_viewer'] self.assertIsNotNone(pynode) component = pynode['ImageViewer'] self.assertIsNotNone(component) fps_readback = self._app.nodes['foo.camera_viewer'].components[ 'ImageViewer'].config['target_fps'] self.assertEqual(fps_readback, 11) camera_name_readback = self._app.nodes['foo.camera_viewer'].components[ 'ImageViewer'].config['camera_name'] self.assertEqual(camera_name_readback, 'bar') self._app.start() self._app.stop() def test_module_explorer(self): self._app = Application() self._app.load_module('json') self._app.registry.isaac.json.JsonToProto with self.assertRaises(RuntimeError): self._app.registry.isaac.foo def test_node_accesor(self): self._app = Application() self._app.load_module('json') node = self._app.add('foo1') self.assertIsNotNone(node) component = node.add(self._app.registry.isaac.json.JsonToProto, 'bar1') self.assertIsNotNone(component) node = self._app.add('foo2') self.assertIsNotNone(node) component = node.add(self._app.registry.isaac.json.ProtoToJson, 'bar2') self.assertIsNotNone(component) self.assertIsNotNone(self._app.nodes['foo1']) self.assertIsNotNone(self._app.nodes['foo2']) self.assertIsNone(self._app.nodes['foo3']) self._app.start() self._app.stop() def test_load_module(self): self._app = Application() result = self._app.load_module('message_generators') self.assertTrue(result) component = self._app.registry.isaac.message_generators.ImageLoader self.assertIsNotNone(component) self.assertEqual(str(component), "['isaac::message_generators::ImageLoader']") def test_clock(self): self._app = Application() self._app.start() clock = self._app.clock self.assertIsNotNone(clock) self.assertIsNotNone(clock.time) cur_time = clock.time self.assertGreater(cur_time, 0.0) self.assertGreater(clock.time, cur_time) self._app.stop() def test_pose(self): self._app = Application() self._app.start() self.assertTrue( self._app.atlas.set_pose( 'foo', 'bar', 1.0, [np.quaternion(1.0, 0.0, 0.0, 0.0), np.array([0.0, 0.0, 0.0])])) self.assertTrue( self._app.atlas.set_pose( 'foo', 'bar', 2.0, [np.quaternion(1.0, 0.0, 0.0, 0.0), np.array([1.0, 2.0, 3.0])])) read_pose = self._app.atlas.pose('foo', 'bar', 1.5) self.assertIsNotNone(read_pose) self.assertEqual(len(read_pose), 2) q = read_pose[0] t = read_pose[1] self.assertLess(q.w - 1.0, 1e-6) self.assertLess(q.x - 0.0, 1e-6) self.assertLess(q.y - 0.0, 1e-6) self.assertLess(q.z - 0.0, 1e-6) self.assertLess(t[0] - 0.5, 1e-6) self.assertLess(t[1] - 1.0, 1e-6) self.assertLess(t[2] - 1.5, 1e-6) self._app.stop() def test_node_start_stop(self): self._app = Application() result = self._app.load_module('message_generators') self.assertTrue(result) node = self._app.add('src') self.assertIsNotNone(node) component = node.add( self._app.registry.isaac.message_generators.PanTiltStateGenerator, 'pantilt') component.config['tick_period'] = '20 Hz' self.assertIsNotNone(component) node_sink = self._app.add('sink') self.assertIsNotNone(node_sink) self._app.connect(component, 'target', node_sink.components['MessageLedger'], 'rcv') self._app.start() time.sleep(0.1) msg = self._app.receive('sink', 'MessageLedger', 'rcv') self.assertIsNotNone(msg) node.stop() msg = self._app.receive('sink', 'MessageLedger', 'rcv') time.sleep(0.1) msg = self._app.receive('sink', 'MessageLedger', 'rcv') self.assertIsNone(msg) time.sleep(0.1) msg = self._app.receive('sink', 'MessageLedger', 'rcv') self.assertIsNone(msg) node.start() time.sleep(0.1) msg = self._app.receive('sink', 'MessageLedger', 'rcv') self.assertIsNotNone(msg) self._app.stop() with self.assertRaises(RuntimeError): self._app.stop() def test_wait_for_node(self): self._app = Application() self._app.load_module('behavior_tree') self._app.add('node').add( self._app.registry.isaac.behavior_tree.TimerBehavior) self._app.start() status = self._app.wait_for_node('node') self._app.stop() self.assertEqual(str(status), 'Status.Success', 'Should reach Status.Success in 1 second.') def test_wait_for_node_timeout(self): self._app = Application() self._app.load_module('behavior_tree') self._app.add('node').add( self._app.registry.isaac.behavior_tree.TimerBehavior) self._app.start() status = self._app.wait_for_node('node', duration=0.1) self._app.stop() self.assertEqual(str(status), 'Status.Running', 'Should still be in Status.Running') def test_run_until_succeed(self): now_secs = time.time() self._app = Application() self._app.add('success') node_success = self._app.nodes['success'] self.assertIsNotNone(node_success) self._app.nodes['success'].add(SucceedLaterCodelet) self._app.run('success') delta_secs = time.time() - now_secs self.assertGreater(delta_secs, 0.4) def test_run_until_failure(self): now_secs = time.time() self._app = Application() self._app.add('failure') node_failure = self._app.nodes['failure'] self.assertIsNotNone(node_failure) self._app.nodes['failure'].add(FailureLaterCodelet) self._app.run('failure') delta_secs = time.time() - now_secs self.assertGreater(delta_secs, 0.4) def test_perf_report(self): PERF_REPORT_PATH = '/tmp/perf_report' if os.path.exists(PERF_REPORT_PATH): os.remove(PERF_REPORT_PATH) self._app = Application( argv=['--performance_report_out', PERF_REPORT_PATH]) self._app.load_module('behavior_tree') self._app.add('node').add( self._app.registry.isaac.behavior_tree.TimerBehavior) self._app.run(0.2) self.assertTrue(os.path.exists(PERF_REPORT_PATH)) def test_max_duration(self): time_now = time.time() self._app = Application(argv=['--max_duration', '0.5s']) self._app.load_module('behavior_tree') self._app.add('node').add( self._app.registry.isaac.behavior_tree.TimerBehavior) self._app.run(2.0) time_dt = time.time() - time_now self.assertLess(time_dt, 1.0) time_now = time.time() self._app = Application(argv=['--max_duration', '0.5s']) self._app.load_module('behavior_tree') self._app.add('node').add( self._app.registry.isaac.behavior_tree.TimerBehavior) self._app.run(2) time_dt = time.time() - time_now self.assertLess(time_dt, 1.0) def test_print_node(self): self._app = Application() self._app.add('foo') node_names = self._app.nodes._names self.assertTrue(isinstance(node_names, list) and 'foo' in node_names) self._app.add('bar') node_names = self._app.nodes._names self.assertTrue( isinstance(node_names, list) and 'foo' in node_names and 'bar' in node_names) foo_node = None bar_node = None for n in self._app.nodes: if n.name == 'foo': foo_node = n if n.name == 'bar': bar_node = n self.assertIsNotNone(foo_node) self.assertIsNotNone(bar_node) def test_expand_asset(self): self._app = Application() ws, path = self._app._app.expand_asset_path('@workspace_//foo/bar') self.assertEqual(path, 'external/workspace_/foo/bar') self.assertEqual(ws, 'workspace_') ws1 = self._app.home_workspace_name self.assertEqual(len(ws1), 0) self._app.home_workspace_name = 'workspace_' ws2 = self._app.home_workspace_name self.assertEqual(ws2, 'workspace_') ws3, path3 = self._app._app.expand_asset_path('@workspace_//foo/bar') self.assertEqual(ws3, 'workspace_') self.assertEqual(path3, 'foo/bar')
# Parse arguments parser = argparse.ArgumentParser( description="Converts casks with deprecated types") parser.add_argument( '--input_cask_path', required=True, help='Path to the cask file to read with deprecated message types.') parser.add_argument( '--output_cask_path', required=True, help='Path to the cask file to write with new message types.') args = parser.parse_args() # Create and start an application that has a node with a MessageLedger app = Application() app.add('node') app.start() node = app.nodes['node'] component = node['MessageLedger'] # Create cask objects for input and output input_cask = Cask(args.input_cask_path) output_cask = Cask(args.output_cask_path, writable=True) # For each input channel, either convert or pass through the messages for input_channel in input_cask.channels: series = input_cask[input_channel.name] if series[0].type_id == Message.create_message_builder( 'ColorCameraProto').proto.schema.node.id: image_channel = output_cask.open_channel(component, input_channel.name) intrinsics_channel = output_cask.open_channel(
parser.add_argument("--sim_port", type=int, help="Tcp port to license to simulation.", default=44998) args = parser.parse_args() app = Application(name='transport_mission', more_jsons=args.map, modules=[ 'map', 'navigation', 'engine_tcp_udp', 'sight', 'utils', 'viewers', 'navigation', 'planner' ]) app.nodes["websight"]["WebsightServer"].config.port = args.sight_port mission_server = MissionServer(port=args.mission_port) task_manager_node = app.add("task_manager") transporter_manager = task_manager_node.add(RobotManager, 'TransporterManager') transporter_manager.config.idle_threshold = 3 transporter_manager.config.robot_name = "transporter" station = task_manager_node.add(VanillaManager, "DummyStationManager") station.config.num_assets = args.num_stations station.config.type = 'station' task_manager_node.add(MissionCoordinator, 'MissionCoordinator') scenario = Scenario(["station", "transporter"]) # Set scenario and mission server (if applicable) for all PyCodelets for _, frontend in app._pycodelet_frontends.items(): frontend.scenario = scenario if isinstance(frontend, RobotManager):
svo_tracker.config.disable_automatic_start = True svo_config = svo_tracker.components["StereoVisualOdometry"].config svo_config.horizontal_stereo_camera = True svo_config.process_imu_readings = False # The L and R camera and IMU poses are published by a simulator svo_config.lhs_camera_frame = "color_cam_L" svo_config.rhs_camera_frame = "color_cam_R" svo_config.imu_frame = "imu_gt" svo_config.num_points = 300 app.nodes["svo.svo_grayscale.tracking_behavior"].config[ "disable_automatic_start"] = True # The pose visualization is perfomed in parallel to the SVIO tracking navigate_and_track = app.add('navigate_and_track', [ app.registry.isaac.behavior_tree.NodeGroup, app.registry.isaac.behavior_tree.ParallelBehavior ]) navigate_and_track.components['NodeGroup'].config.node_names = [ "svo.svo_grayscale.tracking_behavior", "pose_visualization_behavior" ] navigate_and_track.config["disable_automatic_start"] = True # Add a delay before the start of SVIO tracker delay_tracking = app.add('delay_tracking', [ app.registry.isaac.behavior_tree.NodeGroup, app.registry.isaac.behavior_tree.TimerBehavior ]) delay_tracking.config["disable_automatic_start"] = True delay_tracking.components["TimerBehavior"].config.delay = 1. # The pose mapping and visualization is perfomed in parallel to SVIO tracking
type=int, help="The TCP port to listen for robot connections on", default=9998) parser.add_argument( "--sim_port", type=int, help="Port to publish message to simulator", default=44999) parser.add_argument("--num_cubes", type=int, help="Number of stations.", default=4) parser.add_argument( "--robot_host", type=str, help="Tcp host to license to the robots.", default='localhost') parser.add_argument("--sight_port", type=int, help="Port for websight", default=2999) args = parser.parse_args() app = Application(name='station_mission', modules=['engine_tcp_udp', 'sight']) app.nodes["websight"]["WebsightServer"].config.port = args.sight_port # Add TCP publisher for task simulation_node = app.add("simulation") simulation_node.add(app.registry.isaac.alice.TimeSynchronizer) tcp_publisher = simulation_node.add(app.registry.isaac.alice.TcpPublisher) tcp_publisher.config.port = args.sim_port task_manager_node = app.add("task_manager") cube_manager = task_manager_node.add(TeleportManager, 'CubeManager') cube_manager.config.num_assets = args.num_cubes cube_manager.config.type = 'cube' cube_manager.config.name = "/environments/cubes/green_block_{0:02d}/Cube" app.connect(cube_manager, "teleport", tcp_publisher, "teleport") mission_server = MissionServer(port=args.mission_port) station_manager = task_manager_node.add(RobotManager, 'StationManager') station_manager.config.idle_threshold = 3 station_manager.config.robot_name = "station"
RelinkDestinationPoseCodelet) destination.config.root_frame = "world" # Task flow control app.nodes['done_checker'].add(AllTasksDoneChecker) app.nodes['in_task_done_checker'].add(TasksRemainingChecker) app.nodes['done_signal'].add(TaskDoneSignal) # Set task manager for all PyCodelets for _, frontend in app._pycodelet_frontends.items(): frontend.task_planner = task_planner if args.groundtruth: # use detection3 generator to unblock WaitUntilDetection and DetectionsToPoseTree node perception_interface = app.add("detection_pose_estimation").add( app.registry.isaac.utils.RigidBodiesToDetections, "RigidBodiesToDetections") app.connect(driver_out, "bodies", perception_interface, 'bodies') app.connect( perception_interface, 'detections', app.nodes['pick_task.perceive_object']['WaitUntilDetection'], 'detections') app.connect( perception_interface, 'detections', app.nodes['pick_task.detections_to_pose_tree'] ['DetectionsToPoseTree'], 'detections') app.nodes['pick_task.detections_to_pose_tree'][ 'DetectionsToPoseTree'].config.detection_frame = 'world' else: # Pose estimation if args.arm == 'franka':
class TestComponent(unittest.TestCase): ''' Test loading subgraph via the application API ''' @classmethod def setUpClass(cls): # method will be ran once before any test is ran pass @classmethod def tearDownClass(cls): # method will be ran once after all tests have run pass def setUp(self): # ran before each test return super().setUp() def tearDown(self): # ran after each test return super().tearDown() def test_component_config(self): self._app = Application() self._app.add('node') node = self._app.nodes['node'] self.assertIsNotNone(node) result = self._app.load_module('viewers') self.assertTrue(result) component = node.add(self._app.registry.isaac.viewers.ImageViewer) self.assertTrue(isinstance(component, Component.Component)) self.assertIsNotNone(component) self.assertIsNotNone(component.config) component.config.reduce_scale = 2.0 self.assertEqual(component.config.reduce_scale, 2.0) self.assertEqual(component.config['reduce_scale'], 2.0) component.config['reduce_scale'] = 3.0 self.assertEqual(component.config['reduce_scale'], 3.0) self.assertEqual(component.config.reduce_scale, 3.0) self._app.start() self._app.stop() def test_component_access(self): self._app = Application() self._app.add('node') node = self._app.nodes['node'] self.assertIsNotNone(node) result = self._app.load_module('viewers') self.assertTrue(result) node.add(self._app.registry.isaac.viewers.ImageViewer) component = node.components['ImageViewer'] self.assertTrue(isinstance(component, Component.Component)) self.assertIsNotNone(component) self.assertEqual(node.components['ImageViewer'].config['target_fps'], 30.0) component = node.components['ImageViewer'] self.assertTrue(isinstance(component, Component.Component)) self.assertIsNotNone(component) self.assertEqual(node.components['ImageViewer'].config['target_fps'], 30.0) node.components['ImageViewer'].config['target_fps'] = 45.0 self.assertEqual(node.components['ImageViewer'].config['target_fps'], 45.0)
def main(args): app = Application(name="detection_pose_estimation_inference") # Load subgraph and get interface node app.load("packages/object_pose_estimation/apps/pose_cnn_decoder/"\ "detection_pose_estimation_cnn_inference.subgraph.json", prefix="detection_pose_estimation") detection_pose_estimation_interface = app.nodes["detection_pose_estimation.interface"]\ .components["Subgraph"] # Load configuration app.load(args.config) # Configure detection model detection_model = app.nodes["detection_pose_estimation.object_detection.tensor_r_t_inference"]\ .components["isaac.ml.TensorRTInference"] if args.detection_model_file_path is not None: detection_model.config.model_file_path = args.detection_model_file_path if args.etlt_password is not None: detection_model.config.etlt_password = args.etlt_password # Configure pose estimation model pose_estimation_model = app.nodes\ ["detection_pose_estimation.object_pose_estimation.pose_encoder"]\ .components["TensorRTInference"] if args.pose_estimation_model_file_path is not None: pose_estimation_model.config.model_file_path = args.pose_estimation_model_file_path # Configure detection decoder decoder = app.nodes["detection_pose_estimation.object_detection.detection_decoder"]\ .components["isaac.detect_net.DetectNetDecoder"] decoder.config.output_scale = [args.rows, args.cols] if args.confidence_threshold is not None: decoder.config.confidence_threshold = args.confidence_threshold if args.nms_threshold is not None: decoder.config.non_maximum_suppression_threshold = args.nms_threshold # Configure bounding box encoder bbox_encoder = app.nodes\ ["detection_pose_estimation.object_pose_estimation.detection_convertor"]\ .components["BoundingBoxEncoder"] bbox_encoder.config.image_dimensions = [args.rows, args.cols] # Configure detections filter detections_filter = app.nodes\ ["detection_pose_estimation.object_pose_estimation.detection_filter"]\ .components["FilterDetections"] detections_filter.config.image_cols = args.cols if args.mode == 'cask': # Load replay subgraph and configure interface node app.load("packages/cask/apps/replay.subgraph.json", prefix="replay") replay_interface = app.nodes["replay.interface"].components["output"] replay_interface.config.cask_directory = args.cask_directory # Connect the output of the replay subgraph to the detection subgraph app.connect(replay_interface, "color", detection_pose_estimation_interface, "color") app.connect(replay_interface, "color_intrinsics", detection_pose_estimation_interface, "intrinsics") elif args.mode == 'sim': # Load simulation subgraph and get interface node app.load("packages/object_pose_estimation/apps/pose_cnn_decoder"\ "/pose_estimation_sim.subgraph.json",\ prefix="simulation") simulation_interface = app.nodes["simulation.interface"].components[ "TcpSubscriber"] # Connect the output of the simulation with user-specified channel to the detection subgraph app.connect(simulation_interface, args.image_channel, detection_pose_estimation_interface, "color") app.connect(simulation_interface, args.intrinsics_channel, detection_pose_estimation_interface, "intrinsics") # Set the scenario manager config options if scene name is provided if (args.scenario_scene is not None): scenario_manager = app.nodes[ "simulation.scenario_manager"].components["ScenarioManager"] scenario_manager.config.scene = args.scenario_scene if (args.scenario_robot_prefab is not None): scenario_manager.config.robot_prefab = args.scenario_robot_prefab elif args.mode == 'realsense': app.load_module('realsense') # Create and configure realsense camera codelet camera = app.add("camera").add(app.registry.isaac.RealsenseCamera) camera.config.rows = args.rows camera.config.cols = args.cols camera.config.color_framerate = args.fps camera.config.depth_framerate = args.fps camera.config.enable_ir_stereo = False # Connect the output of the camera node to the detection subgraph app.connect(camera, "color", detection_pose_estimation_interface, "color") app.connect(camera, "color_intrinsics", detection_pose_estimation_interface, "intrinsics") elif args.mode == 'v4l': app.load_module('sensors:v4l2_camera') # Create and configure V4L camera codelet camera = app.add("camera").add(app.registry.isaac.V4L2Camera) camera.config.device_id = 0 camera.config.rows = args.rows camera.config.cols = args.cols camera.config.rate_hz = args.fps # Connect the output of the camera node to the detection subgraph app.connect(camera, "frame", detection_pose_estimation_interface, "color") app.connect(camera, "intrinsics", detection_pose_estimation_interface, "intrinsics") elif args.mode == 'image': app.load_module('message_generators') # Create feeder node feeder = app.add("feeder").add( app.registry.isaac.message_generators.ImageLoader) feeder.config.color_glob_pattern = args.image_directory feeder.config.tick_period = "1Hz" # Create intrinsic node intrinsic = app.add("intrinsic").add( app.registry.isaac.message_generators.CameraIntrinsicsGenerator) intrinsic.config.focal_length = [args.focal_length, args.focal_length] intrinsic.config.optical_center = [ args.optical_center_rows, args.optical_center_cols ] intrinsic.config.distortion_coefficients = [ 0.01, 0.01, 0.01, 0.01, 0.01 ] # Connect the output of the image feeder node to the detection subgraph app.connect(feeder, "color", detection_pose_estimation_interface, "color") # Connect the output of the intrinsic node to the detection subgraph app.connect(feeder, "color", intrinsic, "image") app.connect(intrinsic, "intrinsics", detection_pose_estimation_interface, "intrinsics") else: raise ValueError('Not supported mode {}'.format(args.mode)) app.run()
def main(args): app = Application(name="dope_inference") # Load dope inference subgraph app.load( "packages/object_pose_estimation/apps/dope/dope_inference.subgraph.json", "detection_pose_estimation") detection_pose_estimation_interface = app.nodes[ "detection_pose_estimation.interface"]["subgraph"] if args.mode == 'cask': # Load replay subgraph and configure interface node app.load("packages/cask/apps/replay.subgraph.json", prefix="replay") replay = app.nodes["replay.interface"].components["output"] replay.config.cask_directory = args.file app.connect(replay, "color", detection_pose_estimation_interface, "color") app.connect(replay, "color_intrinsics", detection_pose_estimation_interface, "intrinsics") elif args.mode == "realsense": app.load_module('realsense') # Create and configure realsense camera codelet camera = app.add("camera").add(app.registry.isaac.RealsenseCamera) camera.config.rows = 360 camera.config.cols = 640 camera.config.color_framerate = 15 camera.config.depth_framerate = 15 camera.config.enable_ir_stereo = False app.connect(camera, "color", detection_pose_estimation_interface, "color") app.connect(camera, "color_intrinsics", detection_pose_estimation_interface, "intrinsics") elif args.mode == "sim": app.load(filename='packages/navsim/apps/navsim_tcp.subgraph.json', prefix='simulation') sim = app.nodes["simulation.interface"]["output"] app.connect(sim, "color", detection_pose_estimation_interface, "color") app.connect(sim, "color_intrinsics", detection_pose_estimation_interface, "intrinsics") elif args.mode == "image": app.load_module("message_generators") image_loader = app.add("image").add( app.registry.isaac.message_generators.ImageLoader, "ImageLoader") image_loader.config.color_filename = args.file image_loader.config.tick_period = "1Hz" app.connect(image_loader, "color", detection_pose_estimation_interface, "color") # Create intrinsic node intrinsic = app.add("intrinsic").add( app.registry.isaac.message_generators.CameraIntrinsicsGenerator) intrinsic.config.focal_length = [args.focal, args.focal] intrinsic.config.optical_center = args.optical_center intrinsic.config.distortion_coefficients = [ 0.01, 0.01, 0.01, 0.01, 0.01 ] app.connect(image_loader, "color", intrinsic, "image") app.connect(intrinsic, "intrinsics", detection_pose_estimation_interface, "intrinsics") # TensorRT inference trt = app.nodes["detection_pose_estimation.inference"]["TensorRTInference"] trt.config.model_file_path = args.model dir, filename = os.path.split(args.model) trt.config.engine_file_path = "{0}/{1}.plan".format( dir, os.path.splitext(filename)[0]) # Dope decoder decoder = app.nodes["detection_pose_estimation.decoder"]["DopeDecoder"] decoder.config.box_dimensions = args.box decoder.config.label = args.label # Detections3Viewer detection3_viewer = app.nodes["detection_pose_estimation.viewers"][ "Detections3Viewer"] detection3_viewer.config.box_dimensions = args.box if args.camera_pose is not None: app.load_module("utils") pose_node = app.add("pose") detection_injector = pose_node.add( app.registry.isaac.utils.DetectionsToPoseTree) detection_injector.config.detection_frame = "camera" app.connect(decoder, "output_poses", detection_injector, "detections") camera_initializer = pose_node.add( app.registry.isaac.alice.PoseInitializer) camera_initializer.config.lhs_frame = "world" camera_initializer.config.rhs_frame = "camera" camera_initializer.config.pose = args.camera_pose app.run()
ur_driver.config.tool_digital_out_names = ["valve", "pump"] ur_driver.config.tool_digital_in_names = ["unknown", "gripper"] # Edges app.connect(ur_interface, "arm_state", behavior_interface, "joint_state") app.connect(ur_interface, "io_state", behavior_interface, "io_state") app.connect(behavior_interface, "io_command", ur_interface, "io_command") app.connect(behavior_interface, "joint_target", ur_interface, "joint_target") # Sequence nodes sequence_behavior = app.nodes["behavior.sequence_behavior"] repeat_behavior = app.nodes["behavior.repeat_behavior"] nodes_stopped = True # Enable sight widget = app.add("sight").add(app.registry.isaac.sight.SightWidget, "IO") widget.config.type = "plot" widget.config.channels = [ { "name": "ur.universal_robots/UniversalRobots/gripper" }, { "name": "ur.universal_robots/UniversalRobots/pump" }, { "name": "ur.universal_robots/UniversalRobots/valve" } ] # run app app.start()
dest='rows', type=int, default=720, help='The vertical resolution of the camera images') parser.add_argument('--cols', dest='cols', type=int, default=1280, help='The horizontal resolution of the camera images') args, _ = parser.parse_known_args() app = Application(name="record_dummy", modules=["message_generators", "cask"]) app.load_module('sight') # Create recorder node app.load_module('cask') recorder = app.add("recorder").add(app.registry.isaac.cask.Recorder) recorder.config.base_directory = args.base_directory # Create dummy camera codelet camera = app.add("cam").add( app.registry.isaac.message_generators.CameraGenerator) camera.config.rows = args.rows camera.config.cols = args.cols camera.config.tick_period = str(args.fps) + "Hz" app.connect(camera, "color_left", recorder, "color") app.connect(camera, "depth", recorder, "depth") app.run()
default=10, help='Number of virtual lidar nodes to run') parser.add_argument('--points_per_message', dest='points_per_message', default=10, help='Number of virtual lidar points to generate at each tick per node') parser.add_argument('--node_tick_rate', dest='node_tick_rate', default='50Hz', help='Rate at which each node should generate sets of points') args, _ = parser.parse_known_args() app = Application(name="record_small_point_clouds_test", modules=["message_generators", "cask", "sight"]) app.load_module("cask") app.load_module("sight") recorder = app.add("recorder").add(app.registry.isaac.cask.Recorder) recorder.config.base_directory = args.base_directory generators = list() for i in range(int(args.num_nodes)): pcd = app.add("cam" + str(i)).add(app.registry.isaac.message_generators.PointCloudGenerator) pcd.config.point_count = 100000000 pcd.config.point_per_message = int(args.points_per_message) pcd.config.tick_period = args.node_tick_rate generators.append(pcd) app.connect(generators[i - 1], "point_cloud", recorder, "point_cloud" + str(i)) app.run()
"/detection_pose_estimation_cnn_inference.subgraph.json", prefix="detection_pose_estimation") perception_interface = app.nodes[ "detection_pose_estimation.interface"]["Subgraph"] app.load( "apps/samples/manipulation/shuffle_box_detection_pose_estimation.config.json" ) # load sim tsubgraph for tcp connection. app.load("packages/navsim/apps/navsim_tcp.subgraph.json", "simulation") sim_in = app.nodes["simulation.interface"]["input"] sim_out = app.nodes["simulation.interface"]["output"] app.connect(sim_out, "joint_state", lqr_interface, "joint_state") app.connect(sim_out, "joint_state", behavior_interface, "joint_state") app.connect(sim_out, "io_state", behavior_interface, "io_state") app.connect(lqr_interface, "joint_command", sim_in, "joint_position") app.connect(behavior_interface, "io_command", sim_in, "io_command") if use_perception: app.connect(sim_out, "color", perception_interface, "color") app.connect(sim_out, "color_intrinsics", perception_interface, "intrinsics") viewers = app.add("viewers") depth_viewer = viewers.add(app.registry.isaac.viewers.DepthCameraViewer, "DepthViewer") app.connect(sim_out, "depth", depth_viewer, "depth") app.connect(sim_out, "depth_intrinsics", depth_viewer, "intrinsics") depth_viewer.config.max_visualization_depth = 3 # run app app.run()