def initialize_app(self): app = Application(name="rm_isaac_bridge") app.load(filename="packages/navsim/apps/navsim_tcp.subgraph.json", prefix="simulation") if hasattr(self, 'arm'): app = self.arm.connect_app(app) if hasattr(self, 'camera'): app = self.camera.connect_app(app) if hasattr(self, 'effector'): app = self.effector.connect_app(app) self._app = app
def setup_app(): app = Application(name="composite_atlas", modules=["composite"]) atlas = app.add("atlas").add(app.registry.isaac.composite.CompositeAtlas) atlas.config["cask"] = "packages/composite/tests/waypoints" pub_node = app.add("pub") pub = pub_node.add(app.registry.isaac.composite.CompositePublisher, name="CompositePublisher") pub.config.tick_period = "20Hz" pub.config.atlas = "atlas/CompositeAtlas" pub.config.path = ["cart", "dolly"] return app, pub
def main(): app = Application(app_filename="apps/rm/realsense_gmap/realsense_gmap.app.json") odometry = app.nodes["odometry"].add(Odometry, "Odometry") app.connect(app.nodes["visual_odometry_tracker"]["StereoVisualOdometry"], "left_camera_pose", odometry, "pose") app.connect(odometry, "odometry", app.nodes["gmapping"]["GMapping"], "odometry") gmap = app.nodes['gmapping']["GMapping"] pprint(gmap.config.get_config()) app.run()
def main(): app = Application( app_filename= "apps/rm/realsense_cartographer/realsense_cartographer.app.json") pose_to_edge = app.nodes["pose_tree_injector"].add(PoseToEdge, "PoseToEdge") app.connect(app.nodes["visual_odometry_tracker"]["StereoVisualOdometry"], "left_camera_pose", pose_to_edge, "pose") app.connect(pose_to_edge, "edge", app.nodes["pose_tree_injector"]["PoseMessageInjector"], "pose") comp = app.nodes['visual_odometry_tracker']["StereoVisualOdometry"] pprint(comp.config.get_config()) app.run()
def setup_app(): app = Application(name="follow_path", modules=["composite"]) atlas = app.add("atlas").add(app.registry.isaac.composite.CompositeAtlas) atlas.config["cask"] = "packages/composite/tests/waypoints" pub_node = app.add("pub") pub = pub_node.add(app.registry.isaac.composite.CompositePublisher, name="CompositePublisher") pub.config.tick_period = "20Hz" pub.config.atlas = "atlas/CompositeAtlas" pub.config.path = ["cart", "dolly"] follow_node = app.add("follow") follow = follow_node.add(app.registry.isaac.composite.FollowPath) follow.config.tick_period = "10Hz" follow.config.wait_time = 0.05 follow_node.add(app.registry.isaac.composite.CompositeMetric) app.connect(pub, "path", follow, "path") app.connect(follow, "goal", follow, "state") return app, follow_node
dest='base_directory', default='/tmp', help='The directory in which log files will be stored') parser.add_argument('--fps', dest='fps', type=int, default=30, help='The framerate at which to record') parser.add_argument('--mode', dest='mode', choices=['720p', '640x480'], default='720p', help='The resolution of the camera images') args, _ = parser.parse_known_args() app = Application(name="record_realsense", modules=["realsense"]) if args.mode == '720p': rows = 720 cols = 1280 elif args.mode == '640x480': rows = 640 cols = 480 else: raise ValueError('Not supported camera resolution') # Create recorder node recorder = app.add("recorder").add(app.registry.isaac.alice.Recorder) recorder.config.base_directory = args.base_directory # Create realsense camera codelet
from engine.pyalice import Application app = Application(name = "mybot") app.load_module('message_generators') app.load_module('viewers') app.load('packages/navsim/apps/navsim_tcp.subgraph.json', 'simulation') node_view = app.add("viewer") component_view = node_view.add(app.registry.isaac.viewers.ColorCameraViewer, 'ColorCameraViewer') node_view = app.add("depth_viewer") node_view.add(app.registry.isaac.viewers.DepthCameraViewer, 'DepthCameraViewer') app.connect('simulation.interface/output', 'color', 'viewer/ColorCameraViewer', 'color_listener') app.connect('simulation.interface/output', 'depth', 'depth_viewer/DepthCameraViewer', 'depth_listener') app.run()
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary rights in and to this software, related documentation and any modifications thereto. Any use, reproduction, disclosure or distribution of this software and related documentation without an express license agreement from NVIDIA CORPORATION is strictly prohibited. ''' from engine.pyalice import Application import argparse import random import sys DEMOS = ["demo_1", "demo_2", "demo_3", "demo_4"] if __name__ == '__main__': parser = argparse.ArgumentParser( description='flatsim is a flat-world simulator for navigation') parser.add_argument('--demo', dest='demo', help='The scenario which will be used for flatsim') args, _ = parser.parse_known_args() demo = args.demo if args.demo is not None else random.choice(DEMOS) app = Application(name="flatsim") app.load("packages/flatsim/apps/flatsim.subgraph.json", prefix="flatsim") app.load("packages/flatsim/apps/{}.json".format(demo)) app.run()
def main(args): app = Application(name="detect_net_inference") # Load subgraph and get interface node app.load("packages/detect_net/apps/detect_net_inference.subgraph.json", prefix="detect_net_inference") detect_net_inferface = app.nodes["detect_net_inference.subgraph"]\ .components["interface"] # Load configuration app.load(args.config) # Configure detection model detection_model = app.nodes["detect_net_inference.tensor_r_t_inference"]\ .components["isaac.ml.TensorRTInference"] if args.detection_model_file_path is not None: detection_model.config.model_file_path = args.detection_model_file_path if args.etlt_password is not None: detection_model.config.etlt_password = args.etlt_password # Configure detection decoder decoder = app.nodes["detect_net_inference.detection_decoder"]\ .components["isaac.detect_net.DetectNetDecoder"] decoder.config.output_scale = [args.rows, args.cols] if args.confidence_threshold is not None: decoder.config.confidence_threshold = args.confidence_threshold if args.nms_threshold is not None: decoder.config.non_maximum_suppression_threshold = args.nms_threshold if args.mode == 'cask': # Load replay subgraph and configure interface node app.load("packages/record_replay/apps/replay.subgraph.json", prefix="replay") replay_interface = app.nodes["replay.interface"].components["output"] replay_interface.config.cask_directory = args.cask_directory # Connect the output of the replay subgraph to the detection subgraph app.connect(replay_interface, "color", detect_net_inferface, "image") elif args.mode == 'sim': # Load simulation subgraph and get interface node app.load("packages/navsim/apps/navsim_training.subgraph.json",\ prefix="simulation") simulation_interface = app.nodes["simulation.interface"].components[ "output"] # Connect the output of the simulation with user-specified channel to the detection subgraph app.connect(simulation_interface, args.image_channel, detect_net_inferface, "image") elif args.mode == 'realsense': app.load_module('realsense') # Create and configure realsense camera codelet camera = app.add("camera").add(app.registry.isaac.RealsenseCamera) camera.config.rows = args.rows camera.config.cols = args.cols camera.config.color_framerate = args.fps camera.config.depth_framerate = args.fps camera.config.enable_ir_stereo = False # Connect the output of the camera node to the detection subgraph app.connect(camera, "color", detect_net_inferface, "image") elif args.mode == 'v4l': app.load_module('sensors:v4l2_camera') # Create and configure V4L camera codelet camera = app.add("camera").add(app.registry.isaac.V4L2Camera) camera.config.device_id = 0 camera.config.rows = args.rows camera.config.cols = args.cols camera.config.rate_hz = args.fps # Connect the output of the camera node to the detection subgraph app.connect(camera, "frame", detect_net_inferface, "image") elif args.mode == 'image': app.load_module('message_generators') # Create feeder node feeder = app.add("feeder").add( app.registry.isaac.message_generators.ImageLoader) feeder.config.color_glob_pattern = args.image_directory feeder.config.tick_period = "1Hz" feeder.config.focal_length = [args.focal_length, args.focal_length] feeder.config.optical_center = [ args.optical_center_rows, args.optical_center_cols ] feeder.config.distortion_coefficients = [0.01, 0.01, 0.01, 0.01, 0.01] # Connect the output of the image feeder node to the detection subgraph app.connect(feeder, "color", detect_net_inferface, "image") else: raise ValueError('Not supported mode {}'.format(args.mode)) app.run()
parser.add_argument( "--mission_host", help= "The ip address or hostname of the host to connect to and receive missions from", default="localhost") parser.add_argument("--mission_port", help="The TCP port to connect to the mission server", type=int, default=9998) args = parser.parse_args() # Create and start the app more_jsons = args.map_json more_jsons += ",packages/navsim/robots/str4.json" app = Application( app_filename="packages/cart_delivery/apps/cart_delivery.app.json", more_jsons=more_jsons) app.load("packages/cart_delivery/apps/navigation.config.json", prefix=args.demo_subgraph_name) app.load( "packages/cart_delivery/apps/detection_pose_estimation.config.json", prefix=args.demo_subgraph_name) if args.pose2_planner: app.load("packages/cart_delivery/apps/pose2_planner.config.json", prefix=args.demo_subgraph_name) if args.mission_robot_name: # Load the mission subgraph and set the config based on the input parameters app.load("packages/behavior_tree/apps/missions.graph.json") app.nodes["tcp_client"].components["JsonTcpClient"].config[ "host"] = args.mission_host app.nodes["tcp_client"].components["JsonTcpClient"].config[
''' Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary rights in and to this software, related documentation and any modifications thereto. Any use, reproduction, disclosure or distribution of this software and related documentation without an express license agreement from NVIDIA CORPORATION is strictly prohibited. ''' from engine.pyalice import Application, Node if __name__ == '__main__': app = Application(name="svo_realsense", modules=[ 'perception:stereo_visual_odometry', 'realsense', 'utils', "viewers", ]) camera = app.add('camera').add(app.registry.isaac.RealsenseCamera) camera.config.align_to_color = False camera.config.auto_exposure_priority = False camera.config.enable_color = False camera.config.cols = 640 camera.config.rows = 360 camera.config.enable_depth = False camera.config.enable_depth_laser = False camera.config.enable_ir_stereo = True camera.config.ir_framerate = 30
"--mission_host", help= "The ip address or hostname of the host to connect to and receive missions from", default="localhost") parser.add_argument("--mission_port", help="The TCP port to connect to the mission server", type=int, default=9998) args = parser.parse_args() # Create and start the app more_jsons = args.map_json + "," + args.robot_json if args.more: more_jsons += "," + args.more app_path = "apps/navsim/navsim_navigate.app.json" app = Application(app_filename=app_path, more_jsons=more_jsons) if args.mission_robot_name: # Load the mission subgraph and set the config based on the input parameters app.load("packages/behavior_tree/apps/missions.graph.json") app.nodes["tcp_client"].components["JsonTcpClient"].config[ "host"] = args.mission_host app.nodes["tcp_client"].components["JsonTcpClient"].config[ "port"] = args.mission_port app.nodes["mission_control"].components["NodeGroup"].config["node_names"] = \ ["goals.goal_behavior"] app.nodes["robot_name"].components["JsonMockup"].config["json_mock"] = \ {"text":args.mission_robot_name} run_on_start = app.nodes["goals.run_on_start"] # Change the start behavior to the mission behavior nodes = run_on_start.components["NodeGroup"].config["node_names"]
# get kinematic file and joints kinematic_file = "apps/assets/kinematic_trees/{}.kinematic.json".format( args.arm) joints = [] with open(kinematic_file, 'r') as fd: kt = json.load(fd) for link in kt['links']: if 'motor' in link and link['motor']['type'] != 'constant': joints.append(link['name']) # create composite atlas create_composite_atlas(args.cask, args.arm, joints) # Create and start the app app = Application(name="Shuffle Box") # load bebavior subgraph. this contains the sequency behavior to move the arm between # waypoints and control suction on/off app.load("apps/samples/manipulation/shuffle_box_behavior.subgraph.json", prefix="behavior") behavior_interface = app.nodes["behavior.interface"]["subgraph"] app.nodes["behavior.atlas"]["CompositeAtlas"].config.cask = args.cask app.load("packages/planner/apps/multi_joint_lqr_control.subgraph.json", prefix="lqr") # load multi joint lqr control subgraph lqr_interface = app.nodes["lqr.subgraph"]["interface"] kinematic_tree = app.nodes["lqr.kinematic_tree"]["KinematicTree"] lqr_planner = app.nodes["lqr.local_plan"]["MultiJointLqrPlanner"] app.connect(behavior_interface, "joint_target", lqr_interface, "joint_target")
default='1280x720', help='Camera resolution') parser.add_argument( '--framerate', dest='framerate', action='store', type=int, default=60, help='Camera framerate') parser.add_argument( '--device_id', dest='device_id', action='store', type=int, help='Camera device id') args = parser.parse_args() # Create april_tag_python application app = Application( name="april_tags_python", modules=[ "//packages/perception:april_tags", "realsense", "sensors:v4l2_camera", "viewers", "zed" ]) # Setup camera node camera = None if args.camera == "zed": camera = app.add('input_images').add(app.registry.isaac.ZedCamera) camera.config.resolution = args.resolution camera.config.tick_period = '{tick_period}Hz'.format(tick_period=args.framerate) camera_out_channel = "left_camera_rgb" elif args.camera == "realsense": camera = app.add('input_images').add(app.registry.isaac.RealsenseCamera) camera.config.cols, camera.config.rows = tuple( [int(arg) for arg in args.resolution.split('x')]) camera.config.color_framerate = args.framerate camera_out_channel = "color"
type=float, default=3.0, help='The framerate at which to record') parser.add_argument('--rows', dest='rows', type=int, default=720, help='The vertical resolution of the camera images') parser.add_argument('--cols', dest='cols', type=int, default=1280, help='The horizontal resolution of the camera images') args, _ = parser.parse_known_args() app = Application(name="record_dummy", modules=["message_generators"]) # Create recorder node recorder = app.add("recorder").add(app.registry.isaac.alice.Recorder) recorder.config.base_directory = args.base_directory # Create dummy camera codelet camera = app.add("cam").add( app.registry.isaac.message_generators.CameraGenerator) camera.config.rows = args.rows camera.config.cols = args.cols camera.config.tick_period = str(args.fps) + "Hz" app.connect(camera, "color_left", recorder, "color") app.connect(camera, "depth", recorder, "depth") app.run()
args = parser.parse_args() # get kinematic file and joints kinematic_file = "apps/assets/kinematic_trees/ur10.kinematic.json" joints = [] with open(kinematic_file, 'r') as fd: kt = json.load(fd) for link in kt['links']: if 'motor' in link and link['motor']['type'] != 'constant': joints.append(link['name']) # create composite atlas create_composite_atlas(args.cask, joints) # Create and start the app app = Application(name="Shuffle Box Hardware") # load bebavior subgraph. this contains the sequency behavior to move the arm between # waypoints and control suction on/off app.load("apps/samples/manipulation/shuffle_box_behavior.subgraph.json", prefix="behavior") behavior_interface = app.nodes["behavior.interface"]["subgraph"] app.nodes["behavior.atlas"]["CompositeAtlas"].config.cask = args.cask app.load("packages/planner/apps/multi_joint_lqr_control.subgraph.json", prefix="lqr") # load multi joint lqr control subgraph lqr_interface = app.nodes["lqr.subgraph"]["interface"] kinematic_tree = app.nodes["lqr.kinematic_tree"]["KinematicTree"] lqr_planner = app.nodes["lqr.local_plan"]["MultiJointLqrPlanner"] app.connect(behavior_interface, "joint_target", lqr_interface, "joint_target") kinematic_tree.config.kinematic_file = kinematic_file lqr_planner.config.speed_min = [-1.0] * len(joints) lqr_planner.config.speed_max = [1.0] * len(joints)
if __name__ == '__main__': parser = argparse.ArgumentParser( description='Demonstrates the Stereo Visual Odometry tracking' ' using the live stereo image feed obtained from the ZED (Mini) camera.') parser.add_argument('--imu', dest='imu', action='store_true', help='Enables the support for the on-board camera IMU.') parser.add_argument('--no-imu', dest='imu', action='store_false', help='Disables the support for the on-board camera IMU.') parser.set_defaults(imu=False) args = parser.parse_args() app = Application(name="svo_zed", modules=["perception:stereo_visual_odometry", "utils", "viewers", "zed"]) camera = app.add('camera').add(app.registry.isaac.ZedCamera) camera.config.enable_factory_rectification = True camera.config.enable_imu = args.imu camera.config.resolution = "672x376" camera.config.gray_scale = True camera.config.rgb = False camera.config.tick_period = "60Hz" splitter_left = app.add('camera_splitter_left').add( app.registry.isaac.utils.ColorCameraProtoSplitter) splitter_left.config.only_pinhole = False splitter_right = app.add('camera_splitter_right').add( app.registry.isaac.utils.ColorCameraProtoSplitter)
''' Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary rights in and to this software, related documentation and any modifications thereto. Any use, reproduction, disclosure or distribution of this software and related documentation without an express license agreement from NVIDIA CORPORATION is strictly prohibited. ''' from engine.pyalice import Application, Node import xsense_imu if __name__ == '__main__': app = Application(app_filename="packages/xsense_imu/svo_realsense_xsens_imu.config.json") camera = app.add('camera').add(app.registry.isaac.RealsenseCamera) camera.config.align_to_color = False camera.config.auto_exposure_priority = False camera.config.enable_color = False camera.config.cols = 640 camera.config.rows = 360 camera.config.enable_depth = False camera.config.enable_depth_laser = False camera.config.enable_ir_stereo = True camera.config.ir_framerate = 30 splitter_left = app.add('camera_splitter_left').add( app.registry.isaac.utils.ColorCameraProtoSplitter) splitter_left.config.only_pinhole = False