def main(): app = Application(app_filename="apps/rm/realsense_gmap/realsense_gmap.app.json") odometry = app.nodes["odometry"].add(Odometry, "Odometry") app.connect(app.nodes["visual_odometry_tracker"]["StereoVisualOdometry"], "left_camera_pose", odometry, "pose") app.connect(odometry, "odometry", app.nodes["gmapping"]["GMapping"], "odometry") gmap = app.nodes['gmapping']["GMapping"] pprint(gmap.config.get_config()) app.run()
def main(): app = Application( app_filename= "apps/rm/realsense_cartographer/realsense_cartographer.app.json") pose_to_edge = app.nodes["pose_tree_injector"].add(PoseToEdge, "PoseToEdge") app.connect(app.nodes["visual_odometry_tracker"]["StereoVisualOdometry"], "left_camera_pose", pose_to_edge, "pose") app.connect(pose_to_edge, "edge", app.nodes["pose_tree_injector"]["PoseMessageInjector"], "pose") comp = app.nodes['visual_odometry_tracker']["StereoVisualOdometry"] pprint(comp.config.get_config()) app.run()
def main(args): app = Application(name="detect_net_inference") # Load subgraph and get interface node app.load("packages/detect_net/apps/detect_net_inference.subgraph.json", prefix="detect_net_inference") detect_net_inferface = app.nodes["detect_net_inference.subgraph"]\ .components["interface"] # Load configuration app.load(args.config) # Configure detection model detection_model = app.nodes["detect_net_inference.tensor_r_t_inference"]\ .components["isaac.ml.TensorRTInference"] if args.detection_model_file_path is not None: detection_model.config.model_file_path = args.detection_model_file_path if args.etlt_password is not None: detection_model.config.etlt_password = args.etlt_password # Configure detection decoder decoder = app.nodes["detect_net_inference.detection_decoder"]\ .components["isaac.detect_net.DetectNetDecoder"] decoder.config.output_scale = [args.rows, args.cols] if args.confidence_threshold is not None: decoder.config.confidence_threshold = args.confidence_threshold if args.nms_threshold is not None: decoder.config.non_maximum_suppression_threshold = args.nms_threshold if args.mode == 'cask': # Load replay subgraph and configure interface node app.load("packages/record_replay/apps/replay.subgraph.json", prefix="replay") replay_interface = app.nodes["replay.interface"].components["output"] replay_interface.config.cask_directory = args.cask_directory # Connect the output of the replay subgraph to the detection subgraph app.connect(replay_interface, "color", detect_net_inferface, "image") elif args.mode == 'sim': # Load simulation subgraph and get interface node app.load("packages/navsim/apps/navsim_training.subgraph.json",\ prefix="simulation") simulation_interface = app.nodes["simulation.interface"].components[ "output"] # Connect the output of the simulation with user-specified channel to the detection subgraph app.connect(simulation_interface, args.image_channel, detect_net_inferface, "image") elif args.mode == 'realsense': app.load_module('realsense') # Create and configure realsense camera codelet camera = app.add("camera").add(app.registry.isaac.RealsenseCamera) camera.config.rows = args.rows camera.config.cols = args.cols camera.config.color_framerate = args.fps camera.config.depth_framerate = args.fps camera.config.enable_ir_stereo = False # Connect the output of the camera node to the detection subgraph app.connect(camera, "color", detect_net_inferface, "image") elif args.mode == 'v4l': app.load_module('sensors:v4l2_camera') # Create and configure V4L camera codelet camera = app.add("camera").add(app.registry.isaac.V4L2Camera) camera.config.device_id = 0 camera.config.rows = args.rows camera.config.cols = args.cols camera.config.rate_hz = args.fps # Connect the output of the camera node to the detection subgraph app.connect(camera, "frame", detect_net_inferface, "image") elif args.mode == 'image': app.load_module('message_generators') # Create feeder node feeder = app.add("feeder").add( app.registry.isaac.message_generators.ImageLoader) feeder.config.color_glob_pattern = args.image_directory feeder.config.tick_period = "1Hz" feeder.config.focal_length = [args.focal_length, args.focal_length] feeder.config.optical_center = [ args.optical_center_rows, args.optical_center_cols ] feeder.config.distortion_coefficients = [0.01, 0.01, 0.01, 0.01, 0.01] # Connect the output of the image feeder node to the detection subgraph app.connect(feeder, "color", detect_net_inferface, "image") else: raise ValueError('Not supported mode {}'.format(args.mode)) app.run()
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary rights in and to this software, related documentation and any modifications thereto. Any use, reproduction, disclosure or distribution of this software and related documentation without an express license agreement from NVIDIA CORPORATION is strictly prohibited. ''' from engine.pyalice import Application import argparse import random import sys DEMOS = ["demo_1", "demo_2", "demo_3", "demo_4"] if __name__ == '__main__': parser = argparse.ArgumentParser( description='flatsim is a flat-world simulator for navigation') parser.add_argument('--demo', dest='demo', help='The scenario which will be used for flatsim') args, _ = parser.parse_known_args() demo = args.demo if args.demo is not None else random.choice(DEMOS) app = Application(name="flatsim") app.load("packages/flatsim/apps/flatsim.subgraph.json", prefix="flatsim") app.load("packages/flatsim/apps/{}.json".format(demo)) app.run()