def main(args):
    app = Application(name="detection_pose_estimation_inference")

    # Load subgraph and get interface node
    app.load("packages/object_pose_estimation/apps/pose_cnn_decoder/"\
        "detection_pose_estimation_cnn_inference.subgraph.json",
        prefix="detection_pose_estimation")
    detection_pose_estimation_interface = app.nodes["detection_pose_estimation.interface"]\
        .components["Subgraph"]

    # Load configuration
    app.load(args.config)

    # Configure detection model
    detection_model = app.nodes["detection_pose_estimation.object_detection.tensor_r_t_inference"]\
        .components["isaac.ml.TensorRTInference"]
    if args.detection_model_file_path is not None:
        detection_model.config.model_file_path = args.detection_model_file_path
    if args.etlt_password is not None:
        detection_model.config.etlt_password = args.etlt_password

    # Configure pose estimation model
    pose_estimation_model = app.nodes\
        ["detection_pose_estimation.object_pose_estimation.pose_encoder"]\
        .components["TensorRTInference"]
    if args.pose_estimation_model_file_path is not None:
        pose_estimation_model.config.model_file_path = args.pose_estimation_model_file_path

    # Configure detection decoder
    decoder = app.nodes["detection_pose_estimation.object_detection.detection_decoder"]\
        .components["isaac.detect_net.DetectNetDecoder"]
    decoder.config.output_scale = [args.rows, args.cols]
    if args.confidence_threshold is not None:
        decoder.config.confidence_threshold = args.confidence_threshold
    if args.nms_threshold is not None:
        decoder.config.non_maximum_suppression_threshold = args.nms_threshold

    # Configure bounding box encoder
    bbox_encoder = app.nodes\
        ["detection_pose_estimation.object_pose_estimation.detection_convertor"]\
        .components["BoundingBoxEncoder"]
    bbox_encoder.config.image_dimensions = [args.rows, args.cols]

    # Take ros Image and Intrinsic input
    app.load("packages/ros_bridge/apps/ros_to_perception.subgraph.json", prefix="ros")
    RosToImage = app.nodes["ros.ros_converters"].components["RosToImage"]
    RosToCameraIntrinsics = app.nodes["ros.ros_converters"].components["RosToCameraIntrinsics"]

    # Connect the output of RosToImage and RosToCameraIntrinsics to the detection subgraph
    app.connect(RosToImage, "proto", detection_pose_estimation_interface, "color")
    app.connect(RosToCameraIntrinsics, "proto", detection_pose_estimation_interface, "intrinsics")
    app.run()
Esempio n. 2
0
def main(args):
    '''
    Creates and runs block pose estimation app on RGBD image. Input image may come from sim (
    default), cask recording or realsense camera

    To run with sim (sim needs to be running first):
      bazel run apps/samples/pick_and_place:block_pose_estimation
    To see all options:
      bazel run apps/samples/pick_and_place:block_pose_estimation -- --help
    '''
    app = Application(name="block_pose_estimation")
    perception_interface = create_block_pose_estimation(app, use_refinement=args.refinement)

    if args.mode == 'cask':
        assert args.cask is not None, "cask path not specified. use --cask to set."
        # Load replay subgraph and configure interface node
        app.load("packages/cask/apps/replay.subgraph.json", prefix="replay")
        source = app.nodes["replay.interface"].components["output"]
        source.config.cask_directory = args.cask
        source.config.loop = True
    elif args.mode == "realsense":
        # Create realsense camera codelet
        app.load_module("realsense")
        source = app.add("camera").add(app.registry.isaac.RealsenseCamera)
        source.config.rows = 720
        source.config.cols = 1280
        source.config.color_framerate = 15
        source.config.depth_framerate = 15
    elif args.mode == "sim":
        app.load(filename='packages/navsim/apps/navsim_tcp.subgraph.json', prefix='simulation')
        source = app.nodes["simulation.interface"]["output"]

    app.connect(source, "color", perception_interface, "color")
    app.connect(source, "depth", perception_interface, "depth")
    app.connect(source, "color_intrinsics", perception_interface, "intrinsics")

    app.load_module("utils")
    pose_node = app.add("pose")
    pose_injector = pose_node.add(app.registry.isaac.utils.DetectionsToPoseTree)
    pose_injector.config.detection_frame = "camera"
    app.connect(perception_interface, "output_poses", pose_injector, "detections")

    app.run()
    # Load components
    ur_interface = app.nodes["ur.subgraph"]["interface"]
    ur_controller = app.nodes["ur.controller"]["ScaledMultiJointController"]
    ur_driver = app.nodes["ur.universal_robots"]["UniversalRobots"]

    # Configs
    ur_controller.config.control_mode = "joint position"
    ur_driver.config.control_mode = "joint position"
    ur_driver.config.robot_ip = args.robot_ip
    ur_driver.config.headless_mode = args.headless_mode
    ur_driver.config.tool_digital_out_names = ["valve", "pump"]
    ur_driver.config.tool_digital_in_names = ["unknown", "gripper"]

    # Edges
    app.connect(ur_interface, "arm_state", behavior_interface, "joint_state")
    app.connect(ur_interface, "io_state", behavior_interface, "io_state")
    app.connect(behavior_interface, "io_command", ur_interface, "io_command")
    app.connect(behavior_interface, "joint_target", ur_interface, "joint_target")

    # Sequence nodes
    sequence_behavior = app.nodes["behavior.sequence_behavior"]
    repeat_behavior = app.nodes["behavior.repeat_behavior"]
    nodes_stopped = True

    # Enable sight
    widget = app.add("sight").add(app.registry.isaac.sight.SightWidget, "IO")
    widget.config.type = "plot"
    widget.config.channels = [
      {
        "name": "ur.universal_robots/UniversalRobots/gripper"
Esempio n. 4
0
def main(args):
    app = Application(name="detection_pose_estimation_inference")

    # Load subgraph and get interface node
    app.load("packages/object_pose_estimation/apps/pose_cnn_decoder/"\
        "detection_pose_estimation_cnn_inference.subgraph.json",
        prefix="detection_pose_estimation")
    detection_pose_estimation_interface = app.nodes["detection_pose_estimation.interface"]\
        .components["Subgraph"]

    # Load configuration
    app.load(args.config)

    # Configure detection model
    detection_model = app.nodes["detection_pose_estimation.object_detection.tensor_r_t_inference"]\
        .components["isaac.ml.TensorRTInference"]
    if args.detection_model_file_path is not None:
        detection_model.config.model_file_path = args.detection_model_file_path
    if args.etlt_password is not None:
        detection_model.config.etlt_password = args.etlt_password

    # Configure pose estimation model
    pose_estimation_model = app.nodes\
        ["detection_pose_estimation.object_pose_estimation.pose_encoder"]\
        .components["TensorRTInference"]
    if args.pose_estimation_model_file_path is not None:
        pose_estimation_model.config.model_file_path = args.pose_estimation_model_file_path

    # Configure detection decoder
    decoder = app.nodes["detection_pose_estimation.object_detection.detection_decoder"]\
        .components["isaac.detect_net.DetectNetDecoder"]
    decoder.config.output_scale = [args.rows, args.cols]
    if args.confidence_threshold is not None:
        decoder.config.confidence_threshold = args.confidence_threshold
    if args.nms_threshold is not None:
        decoder.config.non_maximum_suppression_threshold = args.nms_threshold

    # Configure bounding box encoder
    bbox_encoder = app.nodes\
        ["detection_pose_estimation.object_pose_estimation.detection_convertor"]\
        .components["BoundingBoxEncoder"]
    bbox_encoder.config.image_dimensions = [args.rows, args.cols]

    # Configure detections filter
    detections_filter = app.nodes\
        ["detection_pose_estimation.object_pose_estimation.detection_filter"]\
        .components["FilterDetections"]
    detections_filter.config.image_cols = args.cols

    if args.mode == 'cask':
        # Load replay subgraph and configure interface node
        app.load("packages/cask/apps/replay.subgraph.json", prefix="replay")
        replay_interface = app.nodes["replay.interface"].components["output"]
        replay_interface.config.cask_directory = args.cask_directory

        # Connect the output of the replay subgraph to the detection subgraph
        app.connect(replay_interface, "color",
                    detection_pose_estimation_interface, "color")
        app.connect(replay_interface, "color_intrinsics",
                    detection_pose_estimation_interface, "intrinsics")
    elif args.mode == 'sim':
        # Load simulation subgraph and get interface node
        app.load("packages/object_pose_estimation/apps/pose_cnn_decoder"\
            "/pose_estimation_sim.subgraph.json",\
             prefix="simulation")
        simulation_interface = app.nodes["simulation.interface"].components[
            "TcpSubscriber"]

        # Connect the output of the simulation with user-specified channel to the detection subgraph
        app.connect(simulation_interface, args.image_channel,
                    detection_pose_estimation_interface, "color")
        app.connect(simulation_interface, args.intrinsics_channel,
                    detection_pose_estimation_interface, "intrinsics")
        # Set the scenario manager config options if scene name is provided
        if (args.scenario_scene is not None):
            scenario_manager = app.nodes[
                "simulation.scenario_manager"].components["ScenarioManager"]
            scenario_manager.config.scene = args.scenario_scene
            if (args.scenario_robot_prefab is not None):
                scenario_manager.config.robot_prefab = args.scenario_robot_prefab
    elif args.mode == 'realsense':
        app.load_module('realsense')
        # Create and configure realsense camera codelet
        camera = app.add("camera").add(app.registry.isaac.RealsenseCamera)
        camera.config.rows = args.rows
        camera.config.cols = args.cols
        camera.config.color_framerate = args.fps
        camera.config.depth_framerate = args.fps
        camera.config.enable_ir_stereo = False

        # Connect the output of the camera node to the detection subgraph
        app.connect(camera, "color", detection_pose_estimation_interface,
                    "color")
        app.connect(camera, "color_intrinsics",
                    detection_pose_estimation_interface, "intrinsics")
    elif args.mode == 'v4l':
        app.load_module('sensors:v4l2_camera')
        # Create and configure V4L camera codelet
        camera = app.add("camera").add(app.registry.isaac.V4L2Camera)
        camera.config.device_id = 0
        camera.config.rows = args.rows
        camera.config.cols = args.cols
        camera.config.rate_hz = args.fps

        # Connect the output of the camera node to the detection subgraph
        app.connect(camera, "frame", detection_pose_estimation_interface,
                    "color")
        app.connect(camera, "intrinsics", detection_pose_estimation_interface,
                    "intrinsics")
    elif args.mode == 'image':
        app.load_module('message_generators')
        # Create feeder node
        feeder = app.add("feeder").add(
            app.registry.isaac.message_generators.ImageLoader)
        feeder.config.color_glob_pattern = args.image_directory
        feeder.config.tick_period = "1Hz"

        # Create intrinsic node
        intrinsic = app.add("intrinsic").add(
            app.registry.isaac.message_generators.CameraIntrinsicsGenerator)
        intrinsic.config.focal_length = [args.focal_length, args.focal_length]
        intrinsic.config.optical_center = [
            args.optical_center_rows, args.optical_center_cols
        ]
        intrinsic.config.distortion_coefficients = [
            0.01, 0.01, 0.01, 0.01, 0.01
        ]

        # Connect the output of the image feeder node to the detection subgraph
        app.connect(feeder, "color", detection_pose_estimation_interface,
                    "color")
        # Connect the output of the intrinsic node to the detection subgraph
        app.connect(feeder, "color", intrinsic, "image")
        app.connect(intrinsic, "intrinsics",
                    detection_pose_estimation_interface, "intrinsics")
    else:
        raise ValueError('Not supported mode {}'.format(args.mode))
    app.run()
        raise ValueError("pose must be a 7-element list, got {0}".format(
            len(args.pose)))

    app = Application(name="End Effector Control", modules=['sight'])
    # add controller subgraph/codelets
    lqr_interface, cartesian_planner = add_cartesian_control(
        app, args.arm, args.end_effector, args.pose, args.speed,
        args.acceleration)

    # load sim subgraph or driver codelet
    if args.mode == 'sim':
        app.load("packages/navsim/apps/navsim_tcp.subgraph.json",
                 prefix="simulation")
        sim_in = app.nodes["simulation.interface"]["input"]
        sim_out = app.nodes["simulation.interface"]["output"]
        app.connect(sim_out, "joint_state", lqr_interface, "joint_state")
        app.connect(sim_out, "joint_state", cartesian_planner, "joint_state")
        app.connect(lqr_interface, "joint_command", sim_in, "joint_position")
    elif args.mode == 'hardware':
        if args.arm == 'kinova':
            app.load_module("kinova_jaco")
            arm = app.add("arm").add(app.registry.isaac.kinova_jaco.KinovaJaco)
            arm.config.kinematic_tree = "lqr.kinematic_tree"
            arm.config.kinova_jaco_sdk_path = args.kinova_api
            arm.config.tick_period = "10ms"
            # for kinova hardware we use speed control, since it's much smoother. angle control
            # seems to only able to move one joint at a time and thus very jerky
            arm.config.control_mode = "joint velocity"
            controller = app.nodes["lqr.controller"]["MultiJointController"]
            controller.config.control_mode = "speed"
            controller.config.command_delay = 0.1
Esempio n. 6
0
    app.load("packages/flatsim/apps/flatsim.subgraph.json", prefix="flatsim")
    app.load("packages/flatsim/apps/{}.json".format(demo))
    if args.build_graph:
        app.load(
            "packages/path_planner/apps/pose2_graph_builder.subgraph.json")
    if args.use_graph_planner:
        app.load("packages/flatsim/apps/pose2_graph_planner.config.json")
    if (args.robot):
        app.load("packages/flatsim/apps/{}.config.json".format(args.robot))

    if args.mission_robot_name:
        # Load the mission subgraph and set the config based on the input parameters
        app.load("packages/behavior_tree/apps/missions.graph.json")
        app.nodes["tcp_client"].components["JsonTcpClient"].config[
            "host"] = args.mission_host
        app.nodes["tcp_client"].components["JsonTcpClient"].config[
            "port"] = args.mission_port
        app.nodes["mission_control"].components["NodeGroup"].config["node_names"] = \
            ["flatsim.goals.goal_behavior"]
        app.nodes["robot_name"].components["JsonMockup"].config["json_mock"] = \
            {"text":args.mission_robot_name}
        app.nodes["flatsim.goals.goal_behavior"].config[
            "disable_automatic_start"] = True
        # Send the navigation output back through the json tcp client
        app.connect(
            app.nodes["flatsim.navigation.subgraph"].components["interface"],
            "feedback", app.nodes["tcp_client"].components["JsonTcpClient"],
            "feedback")

    app.run()
Esempio n. 7
0
                                       ] * len(arm_joint_names)

    # Load driver/sim subgraph, use driver_in / driver_out as the component for arm and camera data
    if args.hardware:
        # Placeholder, should add hardware subgraph for each arm later
        raise ValueError("Hardware not supported for arm {0}".format(args.arm))
    else:
        # Omniverse needs to be running and configured properly. Load the subgraphs required for
        # connecting to Omniverse.
        app.load('packages/navsim/apps/navsim_tcp.subgraph.json', 'simulation')
        driver_in = app.nodes['simulation.interface']['input']
        driver_out = app.nodes['simulation.interface']['output']

    # Connect the LQR planner arm input/output
    controller_interface = app.nodes['controller.subgraph']['interface']
    app.connect(driver_out, 'joint_state', controller_interface, 'joint_state')
    app.connect(controller_interface, 'joint_command', driver_in,
                'joint_position')

    # The mission tasks used in this example: picking and placing objects.
    task_graphs = [
        app.nodes['pick_task.interface']['Subgraph'],
        app.nodes['place_task.interface']['Subgraph']
    ]

    # Go through all task graphs and connect their channels to the appropriate infrastructure nodes.
    for task_graph in task_graphs:
        # Connect the edges between the task, the controller, and the state observation node.
        app.connect(driver_out, 'joint_state', task_graph, 'joint_state')
        app.connect(task_graph, 'joint_command', controller_interface,
                    'joint_target')
Esempio n. 8
0
    # Create and start the app
    app = Application(name="Shuffle Box")
    # load bebavior subgraph. this contains the sequency behavior to move the arm between
    # waypoints and control suction on/off
    app.load("apps/samples/manipulation/shuffle_box_behavior.subgraph.json",
             prefix="behavior")
    behavior_interface = app.nodes["behavior.interface"]["subgraph"]
    app.nodes["behavior.atlas"]["CompositeAtlas"].config.cask = args.cask
    app.load("packages/planner/apps/multi_joint_lqr_control.subgraph.json",
             prefix="lqr")

    # load multi joint lqr control subgraph
    lqr_interface = app.nodes["lqr.subgraph"]["interface"]
    kinematic_tree = app.nodes["lqr.kinematic_tree"]["KinematicTree"]
    lqr_planner = app.nodes["lqr.local_plan"]["MultiJointLqrPlanner"]
    app.connect(behavior_interface, "joint_target", lqr_interface,
                "joint_target")
    kinematic_tree.config.kinematic_file = kinematic_file
    lqr_planner.config.speed_min = [-1.5] + [-1.0] * (len(joints) - 1)
    lqr_planner.config.speed_max = [1.5] + [1.0] * (len(joints) - 1)
    lqr_planner.config.acceleration_min = [-1.5] + [-1.0] * (len(joints) - 1)
    lqr_planner.config.acceleration_max = [1.5] + [1.0] * (len(joints) - 1)

    # load perception subgraph for KLTBox detection in ur10 usecase
    use_perception = False
    if args.arm == "ur10":
        use_perception = True
        app.load(
            "packages/object_pose_estimation/apps/pose_cnn_decoder" \
            "/detection_pose_estimation_cnn_inference.subgraph.json",
            prefix="detection_pose_estimation")
        perception_interface = app.nodes[
Esempio n. 9
0
def main(args):
    app = Application(name="detect_net_inference")

    # Load subgraph and get interface node
    app.load("packages/detect_net/apps/detect_net_inference.subgraph.json",
             prefix="detect_net_inference")
    detect_net_inferface = app.nodes["detect_net_inference.subgraph"]\
        .components["interface"]

    # Load configuration
    app.load(args.config)

    # Configure detection model
    detection_model = app.nodes["detect_net_inference.tensor_r_t_inference"]\
        .components["isaac.ml.TensorRTInference"]
    if args.detection_model_file_path is not None:
        detection_model.config.model_file_path = args.detection_model_file_path
    if args.etlt_password is not None:
        detection_model.config.etlt_password = args.etlt_password

    # Configure detection decoder
    decoder = app.nodes["detect_net_inference.detection_decoder"]\
        .components["isaac.detect_net.DetectNetDecoder"]
    decoder.config.output_scale = [args.rows, args.cols]
    if args.confidence_threshold is not None:
        decoder.config.confidence_threshold = args.confidence_threshold
    if args.nms_threshold is not None:
        decoder.config.non_maximum_suppression_threshold = args.nms_threshold

    if args.mode == 'cask':
        # Load replay subgraph and configure interface node
        app.load("packages/cask/apps/replay.subgraph.json", prefix="replay")
        replay_interface = app.nodes["replay.interface"].components["output"]
        replay_interface.config.cask_directory = args.cask_directory

        # Connect the output of the replay subgraph to the detection subgraph
        app.connect(replay_interface, "color", detect_net_inferface, "image")
    elif args.mode == 'sim':
        # Load simulation subgraph and get interface node
        app.load("packages/navsim/apps/navsim_training.subgraph.json",\
             prefix="simulation")
        simulation_interface = app.nodes["simulation.interface"].components[
            "output"]

        # Connect the output of the simulation with user-specified channel to the detection subgraph
        app.connect(simulation_interface, args.image_channel,
                    detect_net_inferface, "image")
    elif args.mode == 'realsense':
        app.load_module('realsense')
        # Create and configure realsense camera codelet
        camera = app.add("camera").add(app.registry.isaac.RealsenseCamera)
        camera.config.rows = args.rows
        camera.config.cols = args.cols
        camera.config.color_framerate = args.fps
        camera.config.depth_framerate = args.fps
        camera.config.enable_ir_stereo = False

        # Connect the output of the camera node to the detection subgraph
        app.connect(camera, "color", detect_net_inferface, "image")
    elif args.mode == 'v4l':
        app.load_module('sensors:v4l2_camera')
        # Create and configure V4L camera codelet
        camera = app.add("camera").add(app.registry.isaac.V4L2Camera)
        camera.config.device_id = 0
        camera.config.rows = args.rows
        camera.config.cols = args.cols
        camera.config.rate_hz = args.fps

        # Connect the output of the camera node to the detection subgraph
        app.connect(camera, "frame", detect_net_inferface, "image")
    elif args.mode == 'image':
        app.load_module('message_generators')
        # Create feeder node
        feeder = app.add("feeder").add(
            app.registry.isaac.message_generators.ImageLoader)
        feeder.config.color_glob_pattern = args.image_directory
        feeder.config.tick_period = "1Hz"

        # Connect the output of the image feeder node to the detection subgraph
        app.connect(feeder, "color", detect_net_inferface, "image")
    else:
        raise ValueError('Not supported mode {}'.format(args.mode))
    app.run()
Esempio n. 10
0
    camera = app.add('camera').add(app.registry.isaac.ZedCamera)
    camera.config.enable_factory_rectification = True
    camera.config.enable_imu = args.imu
    camera.config.camera_fps = 60
    camera.config.resolution = "672x376"
    camera.config.gray_scale = True
    camera.config.rgb = False

    tracker = app.nodes['svo.tracker'].components['StereoVisualOdometry']
    tracker.config.horizontal_stereo_camera = True
    tracker.config.process_imu_readings = args.imu
    tracker.config.lhs_camera_frame = "zed_left_camera"
    tracker.config.rhs_camera_frame = "zed_right_camera"
    tracker.config.imu_frame = "zed_imu"

    if (args.imu):
        camera_imu_reader = app.nodes['camera'].add(
            app.registry.isaac.zed.ZedImuReader)
        camera_imu_reader.config.tick_period = "300Hz"
        camera_imu_reader.config.vendor_imu_calibration = False
        app.connect(camera_imu_reader, "imu_raw", svo_interface, "imu")

    app.connect(camera, "left_camera_gray", svo_interface, "left_image")
    app.connect(camera, "left_intrinsics", svo_interface, "left_intrinsics")

    app.connect(camera, "right_camera_gray", svo_interface, "right_image")
    app.connect(camera, "right_intrinsics", svo_interface, "right_intrinsics")

    app.run()
Esempio n. 11
0
             "simulation")
    app.load(
        "packages/navigation/apps/differential_base_navigation.subgraph.json",
        "navigation")
    app.load("packages/navigation/apps/goal_generators.subgraph.json", "goals")
    app.load(
        "packages/visual_slam/apps/stereo_visual_odometry_rgb.subgraph.json",
        "svo")

    simulation_interface = app.nodes['simulation.interface']
    simulation_output = simulation_interface["output"]

    # connect the simulation with navigation stack
    navigation_interface = app.nodes['navigation.subgraph'].components[
        'interface']
    app.connect(simulation_output, "base_state", navigation_interface, "state")
    app.connect(simulation_output, "imu_raw", navigation_interface, "imu_raw")
    noisy = simulation_interface["noisy"]
    app.connect(noisy, "flatscan", navigation_interface,
                "flatscan_for_localization")
    app.connect(noisy, "flatscan", navigation_interface,
                "flatscan_for_obstacles")
    app.connect(navigation_interface, "command", simulation_interface["input"],
                "base_command")

    # connect the navigation with goals
    goals_interface = app.nodes['goals.subgraph'].components['interface']
    app.connect(goals_interface, "goal", navigation_interface, "goal")
    app.connect(navigation_interface, "feedback", goals_interface, "feedback")

    # connect the simulation mini Sight with navigation
Esempio n. 12
0
def main(args):
    app = Application(name="dope_inference")

    # Load dope inference subgraph
    app.load(
        "packages/object_pose_estimation/apps/dope/dope_inference.subgraph.json",
        "detection_pose_estimation")
    detection_pose_estimation_interface = app.nodes[
        "detection_pose_estimation.interface"]["subgraph"]

    if args.mode == 'cask':
        # Load replay subgraph and configure interface node
        app.load("packages/cask/apps/replay.subgraph.json", prefix="replay")
        replay = app.nodes["replay.interface"].components["output"]
        replay.config.cask_directory = args.file
        app.connect(replay, "color", detection_pose_estimation_interface,
                    "color")
        app.connect(replay, "color_intrinsics",
                    detection_pose_estimation_interface, "intrinsics")
    elif args.mode == "realsense":
        app.load_module('realsense')
        # Create and configure realsense camera codelet
        camera = app.add("camera").add(app.registry.isaac.RealsenseCamera)
        camera.config.rows = 360
        camera.config.cols = 640
        camera.config.color_framerate = 15
        camera.config.depth_framerate = 15
        camera.config.enable_ir_stereo = False
        app.connect(camera, "color", detection_pose_estimation_interface,
                    "color")
        app.connect(camera, "color_intrinsics",
                    detection_pose_estimation_interface, "intrinsics")
    elif args.mode == "sim":
        app.load(filename='packages/navsim/apps/navsim_tcp.subgraph.json',
                 prefix='simulation')
        sim = app.nodes["simulation.interface"]["output"]
        app.connect(sim, "color", detection_pose_estimation_interface, "color")
        app.connect(sim, "color_intrinsics",
                    detection_pose_estimation_interface, "intrinsics")
    elif args.mode == "image":
        app.load_module("message_generators")
        image_loader = app.add("image").add(
            app.registry.isaac.message_generators.ImageLoader, "ImageLoader")
        image_loader.config.color_filename = args.file
        image_loader.config.tick_period = "1Hz"
        app.connect(image_loader, "color", detection_pose_estimation_interface,
                    "color")

        # Create intrinsic node
        intrinsic = app.add("intrinsic").add(
            app.registry.isaac.message_generators.CameraIntrinsicsGenerator)
        intrinsic.config.focal_length = [args.focal, args.focal]
        intrinsic.config.optical_center = args.optical_center
        intrinsic.config.distortion_coefficients = [
            0.01, 0.01, 0.01, 0.01, 0.01
        ]
        app.connect(image_loader, "color", intrinsic, "image")
        app.connect(intrinsic, "intrinsics",
                    detection_pose_estimation_interface, "intrinsics")

    # TensorRT inference
    trt = app.nodes["detection_pose_estimation.inference"]["TensorRTInference"]
    trt.config.model_file_path = args.model
    dir, filename = os.path.split(args.model)
    trt.config.engine_file_path = "{0}/{1}.plan".format(
        dir,
        os.path.splitext(filename)[0])

    # Dope decoder
    decoder = app.nodes["detection_pose_estimation.decoder"]["DopeDecoder"]
    decoder.config.box_dimensions = args.box
    decoder.config.label = args.label

    # Detections3Viewer
    detection3_viewer = app.nodes["detection_pose_estimation.viewers"][
        "Detections3Viewer"]
    detection3_viewer.config.box_dimensions = args.box

    if args.camera_pose is not None:
        app.load_module("utils")
        pose_node = app.add("pose")
        detection_injector = pose_node.add(
            app.registry.isaac.utils.DetectionsToPoseTree)
        detection_injector.config.detection_frame = "camera"
        app.connect(decoder, "output_poses", detection_injector, "detections")
        camera_initializer = pose_node.add(
            app.registry.isaac.alice.PoseInitializer)
        camera_initializer.config.lhs_frame = "world"
        camera_initializer.config.rhs_frame = "camera"
        camera_initializer.config.pose = args.camera_pose

    app.run()
                        dest='rows',
                        type=int,
                        default=720,
                        help='The vertical resolution of the camera images')
    parser.add_argument('--cols',
                        dest='cols',
                        type=int,
                        default=1280,
                        help='The horizontal resolution of the camera images')
    args, _ = parser.parse_known_args()

    app = Application(name="record_dummy",
                      modules=["message_generators", "cask"])
    app.load_module('sight')

    # Create recorder node
    app.load_module('cask')
    recorder = app.add("recorder").add(app.registry.isaac.cask.Recorder)
    recorder.config.base_directory = args.base_directory

    # Create dummy camera codelet
    camera = app.add("cam").add(
        app.registry.isaac.message_generators.CameraGenerator)
    camera.config.rows = args.rows
    camera.config.cols = args.cols
    camera.config.tick_period = str(args.fps) + "Hz"
    app.connect(camera, "color_left", recorder, "color")
    app.connect(camera, "depth", recorder, "depth")

    app.run()
Esempio n. 14
0
    args, _ = parser.parse_known_args()

    app = Application(name="record_realsense",
                      modules=["cask", "realsense", "sight"])

    if args.mode == '720p':
        rows = 720
        cols = 1280
    elif args.mode == '640x480':
        rows = 640
        cols = 480
    else:
        raise ValueError('Not supported camera resolution')

    # Create recorder node
    recorder = app.add("recorder").add(app.registry.isaac.cask.Recorder)
    recorder.config.base_directory = args.base_directory

    # Create realsense camera codelet
    camera = app.add("cam").add(app.registry.isaac.RealsenseCamera)
    camera.config.rows = rows
    camera.config.cols = cols
    camera.config.color_framerate = args.fps
    camera.config.depth_framerate = args.fps
    app.connect(camera, "color", recorder, "color")
    app.connect(camera, "color_intrinsics", recorder, "color_intrinsics")
    app.connect(camera, "depth", recorder, "depth")
    app.connect(camera, "depth_intrinsics", recorder, "depth_intrinsics")

    app.run()
Esempio n. 15
0
class TestApplication(unittest.TestCase):
    '''
    Test loading subgraph via the application API
    '''
    @classmethod
    def setUpClass(cls):
        # method will be ran once before any test is ran
        pass

    @classmethod
    def tearDownClass(cls):
        # method will be ran once after all tests have run
        pass

    def setUp(self):
        # ran before each test
        return super().setUp()

    def tearDown(self):
        # ran after each test
        return super().tearDown()

    def test_get_component(self):
        self._app = Application()
        self._app.add('node')

        node = self._app.nodes['node']
        self.assertIsNotNone(node)

        # Get None if no component has such name
        component = node['ledger']
        self.assertIsNone(component)

        component = node['MessageLedger']
        self.assertIsNotNone(component)

        self._app.start()
        self._app.stop()

    def test_load_subgraph(self):
        self._app = Application()
        self._app.add('node')

        # loads subgraph and checks if the node/component are created, config is good
        self._app.load('packages/pyalice/tests/application_test.subgraph.json',
                       'foo')

        node = self._app.nodes['foo.camera_viewer']
        component = node['ImageViewer']
        self.assertIsNotNone(component)

        fps_readback = self._app.nodes['foo.camera_viewer'].components[
            'ImageViewer'].config['target_fps']
        self.assertEqual(fps_readback, 11)

        camera_name_readback = self._app.nodes['foo.camera_viewer'].components[
            'ImageViewer'].config['camera_name']
        self.assertEqual(camera_name_readback, 'bar')

        # loads the subgraph again with different prefix name
        self._app.load('packages/pyalice/tests/application_test.subgraph.json',
                       'foo1')
        node = self._app.nodes['foo1.camera_viewer']
        component = node['ImageViewer']
        self.assertIsNotNone(component)

        fps_readback = self._app.nodes['foo1.camera_viewer'].components[
            'ImageViewer'].config['target_fps']
        self.assertEqual(fps_readback, 11)

        camera_name_readback = self._app.nodes[
            'foo1.camera_viewer'].components['ImageViewer'].config[
                'camera_name']

        self._app.start()
        self._app.stop()

    def test_load_bogus_subgraph(self):
        bogus_json_filename = ''
        with tempfile.NamedTemporaryFile('w') as f:
            f.write('this is bogus json')
            bogus_json_filename = f.name

        self._app = Application()
        with self.assertRaises(ValueError):
            self._app.load('/no/such/file')

        with self.assertRaises(ValueError):
            self._app.load(bogus_json_filename)

    def test_load_subgraph_in_asset_path(self):
        self._app = Application()
        self._app.add('node')

        # override the asset path to point to our test location
        self._app._asset_path = 'packages/pyalice/tests/mock_asset_path'

        # loads subgraph and checks if the node/component are created, config is good
        self._app.load('mock_installed_application_test.subgraph.json', 'foo')

        pynode = self._app.nodes['foo.camera_viewer']
        self.assertIsNotNone(pynode)
        component = pynode['ImageViewer']
        self.assertIsNotNone(component)

        fps_readback = self._app.nodes['foo.camera_viewer'].components[
            'ImageViewer'].config['target_fps']
        self.assertEqual(fps_readback, 11)

        camera_name_readback = self._app.nodes['foo.camera_viewer'].components[
            'ImageViewer'].config['camera_name']
        self.assertEqual(camera_name_readback, 'bar')

        self._app.start()
        self._app.stop()

    def test_module_explorer(self):
        self._app = Application()
        self._app.load_module('json')
        self._app.registry.isaac.json.JsonToProto
        with self.assertRaises(RuntimeError):
            self._app.registry.isaac.foo

    def test_node_accesor(self):
        self._app = Application()
        self._app.load_module('json')

        node = self._app.add('foo1')
        self.assertIsNotNone(node)
        component = node.add(self._app.registry.isaac.json.JsonToProto, 'bar1')
        self.assertIsNotNone(component)

        node = self._app.add('foo2')
        self.assertIsNotNone(node)
        component = node.add(self._app.registry.isaac.json.ProtoToJson, 'bar2')
        self.assertIsNotNone(component)

        self.assertIsNotNone(self._app.nodes['foo1'])
        self.assertIsNotNone(self._app.nodes['foo2'])
        self.assertIsNone(self._app.nodes['foo3'])

        self._app.start()
        self._app.stop()

    def test_load_module(self):
        self._app = Application()

        result = self._app.load_module('message_generators')
        self.assertTrue(result)

        component = self._app.registry.isaac.message_generators.ImageLoader
        self.assertIsNotNone(component)
        self.assertEqual(str(component),
                         "['isaac::message_generators::ImageLoader']")

    def test_clock(self):
        self._app = Application()

        self._app.start()
        clock = self._app.clock
        self.assertIsNotNone(clock)
        self.assertIsNotNone(clock.time)
        cur_time = clock.time
        self.assertGreater(cur_time, 0.0)
        self.assertGreater(clock.time, cur_time)
        self._app.stop()

    def test_pose(self):
        self._app = Application()

        self._app.start()

        self.assertTrue(
            self._app.atlas.set_pose(
                'foo', 'bar', 1.0,
                [np.quaternion(1.0, 0.0, 0.0, 0.0),
                 np.array([0.0, 0.0, 0.0])]))
        self.assertTrue(
            self._app.atlas.set_pose(
                'foo', 'bar', 2.0,
                [np.quaternion(1.0, 0.0, 0.0, 0.0),
                 np.array([1.0, 2.0, 3.0])]))

        read_pose = self._app.atlas.pose('foo', 'bar', 1.5)
        self.assertIsNotNone(read_pose)
        self.assertEqual(len(read_pose), 2)

        q = read_pose[0]
        t = read_pose[1]
        self.assertLess(q.w - 1.0, 1e-6)
        self.assertLess(q.x - 0.0, 1e-6)
        self.assertLess(q.y - 0.0, 1e-6)
        self.assertLess(q.z - 0.0, 1e-6)
        self.assertLess(t[0] - 0.5, 1e-6)
        self.assertLess(t[1] - 1.0, 1e-6)
        self.assertLess(t[2] - 1.5, 1e-6)

        self._app.stop()

    def test_node_start_stop(self):
        self._app = Application()

        result = self._app.load_module('message_generators')
        self.assertTrue(result)

        node = self._app.add('src')
        self.assertIsNotNone(node)
        component = node.add(
            self._app.registry.isaac.message_generators.PanTiltStateGenerator,
            'pantilt')
        component.config['tick_period'] = '20 Hz'
        self.assertIsNotNone(component)

        node_sink = self._app.add('sink')
        self.assertIsNotNone(node_sink)

        self._app.connect(component, 'target',
                          node_sink.components['MessageLedger'], 'rcv')

        self._app.start()

        time.sleep(0.1)
        msg = self._app.receive('sink', 'MessageLedger', 'rcv')
        self.assertIsNotNone(msg)

        node.stop()
        msg = self._app.receive('sink', 'MessageLedger', 'rcv')
        time.sleep(0.1)
        msg = self._app.receive('sink', 'MessageLedger', 'rcv')
        self.assertIsNone(msg)
        time.sleep(0.1)
        msg = self._app.receive('sink', 'MessageLedger', 'rcv')
        self.assertIsNone(msg)

        node.start()

        time.sleep(0.1)
        msg = self._app.receive('sink', 'MessageLedger', 'rcv')
        self.assertIsNotNone(msg)

        self._app.stop()

        with self.assertRaises(RuntimeError):
            self._app.stop()

    def test_wait_for_node(self):
        self._app = Application()
        self._app.load_module('behavior_tree')
        self._app.add('node').add(
            self._app.registry.isaac.behavior_tree.TimerBehavior)
        self._app.start()
        status = self._app.wait_for_node('node')
        self._app.stop()
        self.assertEqual(str(status), 'Status.Success',
                         'Should reach Status.Success in 1 second.')

    def test_wait_for_node_timeout(self):
        self._app = Application()
        self._app.load_module('behavior_tree')
        self._app.add('node').add(
            self._app.registry.isaac.behavior_tree.TimerBehavior)
        self._app.start()
        status = self._app.wait_for_node('node', duration=0.1)
        self._app.stop()
        self.assertEqual(str(status), 'Status.Running',
                         'Should still be in Status.Running')

    def test_run_until_succeed(self):
        now_secs = time.time()

        self._app = Application()
        self._app.add('success')

        node_success = self._app.nodes['success']
        self.assertIsNotNone(node_success)

        self._app.nodes['success'].add(SucceedLaterCodelet)

        self._app.run('success')

        delta_secs = time.time() - now_secs
        self.assertGreater(delta_secs, 0.4)

    def test_run_until_failure(self):
        now_secs = time.time()

        self._app = Application()
        self._app.add('failure')

        node_failure = self._app.nodes['failure']
        self.assertIsNotNone(node_failure)

        self._app.nodes['failure'].add(FailureLaterCodelet)

        self._app.run('failure')

        delta_secs = time.time() - now_secs
        self.assertGreater(delta_secs, 0.4)

    def test_perf_report(self):
        PERF_REPORT_PATH = '/tmp/perf_report'
        if os.path.exists(PERF_REPORT_PATH):
            os.remove(PERF_REPORT_PATH)

        self._app = Application(
            argv=['--performance_report_out', PERF_REPORT_PATH])
        self._app.load_module('behavior_tree')
        self._app.add('node').add(
            self._app.registry.isaac.behavior_tree.TimerBehavior)
        self._app.run(0.2)
        self.assertTrue(os.path.exists(PERF_REPORT_PATH))

    def test_max_duration(self):
        time_now = time.time()
        self._app = Application(argv=['--max_duration', '0.5s'])
        self._app.load_module('behavior_tree')
        self._app.add('node').add(
            self._app.registry.isaac.behavior_tree.TimerBehavior)
        self._app.run(2.0)
        time_dt = time.time() - time_now

        self.assertLess(time_dt, 1.0)

        time_now = time.time()
        self._app = Application(argv=['--max_duration', '0.5s'])
        self._app.load_module('behavior_tree')
        self._app.add('node').add(
            self._app.registry.isaac.behavior_tree.TimerBehavior)
        self._app.run(2)
        time_dt = time.time() - time_now

        self.assertLess(time_dt, 1.0)

    def test_print_node(self):
        self._app = Application()
        self._app.add('foo')

        node_names = self._app.nodes._names
        self.assertTrue(isinstance(node_names, list) and 'foo' in node_names)

        self._app.add('bar')
        node_names = self._app.nodes._names
        self.assertTrue(
            isinstance(node_names, list) and 'foo' in node_names
            and 'bar' in node_names)

        foo_node = None
        bar_node = None
        for n in self._app.nodes:
            if n.name == 'foo':
                foo_node = n
            if n.name == 'bar':
                bar_node = n

        self.assertIsNotNone(foo_node)
        self.assertIsNotNone(bar_node)

    def test_expand_asset(self):
        self._app = Application()
        ws, path = self._app._app.expand_asset_path('@workspace_//foo/bar')
        self.assertEqual(path, 'external/workspace_/foo/bar')
        self.assertEqual(ws, 'workspace_')

        ws1 = self._app.home_workspace_name
        self.assertEqual(len(ws1), 0)
        self._app.home_workspace_name = 'workspace_'
        ws2 = self._app.home_workspace_name
        self.assertEqual(ws2, 'workspace_')

        ws3, path3 = self._app._app.expand_asset_path('@workspace_//foo/bar')
        self.assertEqual(ws3, 'workspace_')
        self.assertEqual(path3, 'foo/bar')
Esempio n. 16
0
    app.load(
        "packages/navigation/apps/differential_base_imu_odometry.subgraph.json",
        "odometry")
    app.load(
        "packages/navigation/apps/differential_base_commander.subgraph.json",
        "commander")
    app.load(
        "packages/visual_slam/apps/stereo_visual_odometry_rgb.subgraph.json",
        "svo")

    simulation_interface = app.nodes['simulation.interface']
    simulation_output = simulation_interface["output"]

    # connect the simulation with the odometry and commander
    odometry_interface = app.nodes['odometry.subgraph'].components['interface']
    app.connect(simulation_output, "base_state", odometry_interface,
                "base_state")
    app.connect(simulation_output, "imu_raw", odometry_interface, "imu_raw")
    commander_interface = app.nodes['commander.subgraph'].components[
        'interface']
    app.connect(commander_interface, "command", simulation_interface["input"],
                "base_command")

    # connect the simulation with SVIO
    svo_interface = app.nodes['svo.subgraph'].components['interface']
    app.connect(simulation_output, "color_left", svo_interface, "left_image")
    app.connect(simulation_output, "color_left_intrinsics", svo_interface,
                "left_intrinsics")
    app.connect(simulation_output, "color_right", svo_interface, "right_image")
    app.connect(simulation_output, "color_right_intrinsics", svo_interface,
                "right_intrinsics")
    app = Application(name='station_mission', modules=['engine_tcp_udp', 'sight'])
    app.nodes["websight"]["WebsightServer"].config.port = args.sight_port

    # Add TCP publisher for task
    simulation_node = app.add("simulation")
    simulation_node.add(app.registry.isaac.alice.TimeSynchronizer)
    tcp_publisher = simulation_node.add(app.registry.isaac.alice.TcpPublisher)
    tcp_publisher.config.port = args.sim_port

    task_manager_node = app.add("task_manager")
    cube_manager = task_manager_node.add(TeleportManager, 'CubeManager')
    cube_manager.config.num_assets = args.num_cubes
    cube_manager.config.type = 'cube'
    cube_manager.config.name = "/environments/cubes/green_block_{0:02d}/Cube"
    app.connect(cube_manager, "teleport", tcp_publisher, "teleport")

    mission_server = MissionServer(port=args.mission_port)
    station_manager = task_manager_node.add(RobotManager, 'StationManager')
    station_manager.config.idle_threshold = 3
    station_manager.config.robot_name = "station"

    task_manager_node.add(MissionCoordinator, 'MissionCoordinator')

    scenario = Scenario(["station", "cube"])
    # Set scenario and mission server (if applicable) for all PyCodelets
    for _, frontend in app._pycodelet_frontends.items():
        frontend.scenario = scenario
        if isinstance(frontend, RobotManager):
            frontend.mission_server = mission_server
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
'''
from isaac import Application
import argparse

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Measures recording performance')
    args = parser.parse_args()

    app = Application(name="write_speed_test", modules=[
        "message_generators",
        "cask",
        "sight",
    ])

    img_gen_node = app.add("img_gen")
    img_gen = img_gen_node.add(app.registry.isaac.message_generators.CameraGenerator)
    img_gen.config.rows = 720
    img_gen.config.cols = 1080
    img_gen.config.tick_period = "15 Hz"

    recorder_node = app.add("recorder")
    recorder = recorder_node.add(app.registry.isaac.cask.Recorder)
    recorder.config.use_compression = True

    app.connect(img_gen, "color_left", recorder, "img1")
    app.connect(img_gen, "color_right", recorder, "img2")

    app.start_wait_stop(duration=10.0)
Esempio n. 19
0
                        default=10,
                        help='Number of virtual lidar nodes to run')
    parser.add_argument('--points_per_message',
                        dest='points_per_message',
                        default=10,
                        help='Number of virtual lidar points to generate at each tick per node')
    parser.add_argument('--node_tick_rate',
                        dest='node_tick_rate',
                        default='50Hz',
                        help='Rate at which each node should generate sets of points')
    args, _ = parser.parse_known_args()

    app = Application(name="record_small_point_clouds_test",
                      modules=["message_generators", "cask", "sight"])
    app.load_module("cask")
    app.load_module("sight")
    recorder = app.add("recorder").add(app.registry.isaac.cask.Recorder)
    recorder.config.base_directory = args.base_directory

    generators = list()
    for i in range(int(args.num_nodes)):
        pcd = app.add("cam" + str(i)).add(app.registry.isaac.message_generators.PointCloudGenerator)
        pcd.config.point_count = 100000000
        pcd.config.point_per_message = int(args.points_per_message)
        pcd.config.tick_period = args.node_tick_rate

        generators.append(pcd)
        app.connect(generators[i - 1], "point_cloud", recorder, "point_cloud" + str(i))

    app.run()
        'realsense',
    ])

    app.load("packages/visual_slam/apps/stereo_visual_odometry_grayscale.subgraph.json", "svo")
    svo_interface = app.nodes["svo.subgraph"].components["interface"]

    camera = app.add('camera').add(app.registry.isaac.RealsenseCamera)
    camera.config.align_to_color = False
    camera.config.auto_exposure_priority = False
    camera.config.enable_color = False
    camera.config.cols = 640
    camera.config.rows = 360
    camera.config.enable_depth = False
    camera.config.enable_depth_laser = False
    camera.config.enable_ir_stereo = True
    camera.config.ir_framerate = 30

    tracker = app.nodes['svo.tracker'].components['StereoVisualOdometry']
    tracker.config.horizontal_stereo_camera = True
    tracker.config.process_imu_readings = False
    tracker.config.lhs_camera_frame = "left_ir_camera"
    tracker.config.rhs_camera_frame = "right_ir_camera"

    app.connect(camera, "left_ir", svo_interface, "left_image")
    app.connect(camera, "left_ir_intrinsics", svo_interface, "left_intrinsics")

    app.connect(camera, "right_ir", svo_interface, "right_image")
    app.connect(camera, "right_ir_intrinsics", svo_interface, "right_intrinsics")

    app.run()
        camera_out_channel = "color"
    elif args.camera == "v4l2":
        camera = app.add('input_images').add(app.registry.isaac.V4L2Camera)
        if args.device_id == None:
            raise ValueError('Could not set None. Please provide device id')
        camera.config.device_id = args.device_id
        camera.config.cols, camera.config.rows = tuple(
            [int(arg) for arg in args.resolution.split('x')])
        camera.config.rate_hz = args.framerate
        camera_out_channel = "frame"
    else:
        raise ValueError('Not supported Camera type {}'.format(args.camera))
    # Setup april tag node
    tags_detection = app.add('april_tags_detection').add(
        app.registry.isaac.fiducials.AprilTagsDetection)
    tags_detection.config.max_tags = 50
    fiducials_viewer = app.nodes['april_tags_detection'].add(
        app.registry.isaac.viewers.FiducialsViewer)
    # Setup viewer node
    viewer = app.add('image_viewers').add(
        app.registry.isaac.viewers.ImageViewer)
    # Connect message channels
    app.connect(camera, camera_out_channel, tags_detection, "image")
    app.connect(camera, "intrinsics", tags_detection, "intrinsics")
    app.connect(tags_detection, "april_tags", fiducials_viewer, "fiducials")
    app.connect(camera, camera_out_channel, viewer, "image")

    app.load("apps/samples/april_tags/april_tags_python.config.json")

    app.run()
def main(args):

    # Check that the provided directories are different, otherwise
    if (args.base_directory_images == args.base_directory_gt):
        print("Base directory for image and ground truth logs must not be the same.")
        return

    app = Application(name="record_sim_ground_truth", modules=["viewers"])
    app_uuid = app.uuid

    # Load simulation subgraph and get interface node
    app.load("packages/navsim/apps/navsim_training.subgraph.json",\
            prefix="simulation")
    simulation_interface = app.nodes["simulation.interface"].components["output"]

    # Load record subgraph for images
    app.load("packages/cask/apps/record.subgraph.json", prefix="image_record")
    image_record_interface = app.nodes["image_record.interface"].components["input"]
    image_record_interface.config.base_directory = args.base_directory_images

    # Load record subgraph for ground truth
    app.load("packages/cask/apps/record.subgraph.json", prefix="ground_truth_record")
    ground_truth_record_interface = app.nodes["ground_truth_record.interface"].components["input"]
    ground_truth_record_interface.config.base_directory = args.base_directory_gt

    # Create viewer codelets
    image_viewer = app.add("image_viewer").add(app.registry.isaac.viewers.ImageViewer)
    image_viewer.config.camera_name = "camera"

    # Connect the image and bounding boxes channels to the record interface
    app.connect(simulation_interface, args.image_channel, image_record_interface, "color")
    app.connect(simulation_interface, args.image_channel, image_viewer, "image")
    app.connect(simulation_interface, args.intrinsics_channel, image_record_interface,\
                "color_intrinsics")
    app.connect(simulation_interface, args.intrinsics_channel, image_viewer, "intrinsics")

    # Set the scenario manager config options if scene name is provided
    if (args.scenario_scene is not None):
        print(args.detections3viewer_box_dimensions)
        scenario_manager = app.nodes["simulation.scenario_manager"].components["scenario_manager"]
        scenario_manager.config.scene = args.scenario_scene
        if (args.scenario_robot_prefab is not None):
            scenario_manager.config.robot_prefab = args.scenario_robot_prefab

    # Connect ground truth data channels according to user input
    mode = args.mode
    if (mode == 'bounding_box' or mode == 'all'):
        detections_viewer = app.add("detections_viewer").add(
            app.registry.isaac.viewers.DetectionsViewer)
        bbox_conversion = app.nodes["simulation.bounding_boxes"].components["conversion"]
        app.connect(simulation_interface, args.segmentation_channel + "_class", bbox_conversion,
                    "class_segmentation")
        app.connect(simulation_interface, args.segmentation_channel + "_instance",
                    bbox_conversion, "instance_segmentation")
        app.connect(simulation_interface, args.segmentation_channel + "_labels", bbox_conversion,
                    "class_labels")
        app.connect(simulation_interface, "bounding_boxes", ground_truth_record_interface,
                    "bounding_boxes")
        app.connect(simulation_interface, "bounding_boxes", detections_viewer, "detections")

    if (mode == 'pose' or mode == 'all'):
        app.load_module('ml')
        detections3_viewer = app.add("detections3_viewer").add(
            app.registry.isaac.viewers.Detections3Viewer)
        pose_bodies = app.add("rigid_body_to_detections3").add(
            app.registry.isaac.ml.RigidbodyToDetections3)
        detections3_viewer.config.frame = "camera"
        if (args.detections3viewer_box_dimensions is not None):
            # Enable 3D detections in the viewer
            detections3_viewer.config.box_dimensions = eval(args.detections3viewer_box_dimensions)
            detections3_viewer.config.object_T_box_center = eval(
                args.detections3viewer_object_T_box_center)
        app.connect(simulation_interface, "bodies", pose_bodies, "rigid_bodies")
        app.connect(pose_bodies, "detections", ground_truth_record_interface, "gt_poses")
        app.connect(pose_bodies, "detections", detections3_viewer, "detections")
        app.connect(simulation_interface, args.intrinsics_channel, ground_truth_record_interface,
                    "image_intrinsics")

    # Run application
    app.run(args.runtime)

    # Write metadata to JSON data per output cask. The metadata servers to associate
    # corresponding image and ground truth casks. As per RACI evaluation workflow
    # and data management, image casks and ground truth casks are stored in separate
    # directories.
    if args.raci_metadata:
        # Populate image cask metadata
        image_metadata_json = {}
        image_metadata_json["Robot_Name"] = ""
        image_metadata_json["Location/Scene"] = ""
        image_metadata_json["Domain"] = "simulation"
        image_metadata_json["Recording_Time"] = str(datetime.datetime.now().strftime("%Y-%m-%d"))

        # Write image cask metadata
        image_metadata_path = os.path.join(args.base_directory_images, app_uuid + "_md.json")
        with open(image_metadata_path, 'w') as f:
            json.dump(image_metadata_json, f, indent=2)

        # Populate ground truth cask metadata
        ground_truth_metadata_json = {}
        ground_truth_metadata_json["Image_Cask_File"] = app.uuid
        ground_truth_metadata_json["Data_Source"] = "ground_truth"

        # Write ground truth cask metadata
        ground_truth_metadata_path = os.path.join(args.base_directory_gt, app_uuid + "_md.json")
        with open(ground_truth_metadata_path, 'w') as f:
            json.dump(ground_truth_metadata_json, f, indent=2)