def initialize_app(self):
     app = Application(name="rm_isaac_bridge")
     app.load(filename="packages/navsim/apps/navsim_tcp.subgraph.json", prefix="simulation")
     
     if hasattr(self, 'arm'):
         app = self.arm.connect_app(app)
     if hasattr(self, 'camera'):
         app = self.camera.connect_app(app)
     if hasattr(self, 'effector'):
         app = self.effector.connect_app(app)
     
     self._app = app
def main(args):
    app = Application(name="detect_net_inference")

    # Load subgraph and get interface node
    app.load("packages/detect_net/apps/detect_net_inference.subgraph.json",
             prefix="detect_net_inference")
    detect_net_inferface = app.nodes["detect_net_inference.subgraph"]\
        .components["interface"]

    # Load configuration
    app.load(args.config)

    # Configure detection model
    detection_model = app.nodes["detect_net_inference.tensor_r_t_inference"]\
        .components["isaac.ml.TensorRTInference"]
    if args.detection_model_file_path is not None:
        detection_model.config.model_file_path = args.detection_model_file_path
    if args.etlt_password is not None:
        detection_model.config.etlt_password = args.etlt_password

    # Configure detection decoder
    decoder = app.nodes["detect_net_inference.detection_decoder"]\
        .components["isaac.detect_net.DetectNetDecoder"]
    decoder.config.output_scale = [args.rows, args.cols]
    if args.confidence_threshold is not None:
        decoder.config.confidence_threshold = args.confidence_threshold
    if args.nms_threshold is not None:
        decoder.config.non_maximum_suppression_threshold = args.nms_threshold

    if args.mode == 'cask':
        # Load replay subgraph and configure interface node
        app.load("packages/record_replay/apps/replay.subgraph.json",
                 prefix="replay")
        replay_interface = app.nodes["replay.interface"].components["output"]
        replay_interface.config.cask_directory = args.cask_directory

        # Connect the output of the replay subgraph to the detection subgraph
        app.connect(replay_interface, "color", detect_net_inferface, "image")
    elif args.mode == 'sim':
        # Load simulation subgraph and get interface node
        app.load("packages/navsim/apps/navsim_training.subgraph.json",\
             prefix="simulation")
        simulation_interface = app.nodes["simulation.interface"].components[
            "output"]

        # Connect the output of the simulation with user-specified channel to the detection subgraph
        app.connect(simulation_interface, args.image_channel,
                    detect_net_inferface, "image")
    elif args.mode == 'realsense':
        app.load_module('realsense')
        # Create and configure realsense camera codelet
        camera = app.add("camera").add(app.registry.isaac.RealsenseCamera)
        camera.config.rows = args.rows
        camera.config.cols = args.cols
        camera.config.color_framerate = args.fps
        camera.config.depth_framerate = args.fps
        camera.config.enable_ir_stereo = False

        # Connect the output of the camera node to the detection subgraph
        app.connect(camera, "color", detect_net_inferface, "image")
    elif args.mode == 'v4l':
        app.load_module('sensors:v4l2_camera')
        # Create and configure V4L camera codelet
        camera = app.add("camera").add(app.registry.isaac.V4L2Camera)
        camera.config.device_id = 0
        camera.config.rows = args.rows
        camera.config.cols = args.cols
        camera.config.rate_hz = args.fps

        # Connect the output of the camera node to the detection subgraph
        app.connect(camera, "frame", detect_net_inferface, "image")
    elif args.mode == 'image':
        app.load_module('message_generators')
        # Create feeder node
        feeder = app.add("feeder").add(
            app.registry.isaac.message_generators.ImageLoader)
        feeder.config.color_glob_pattern = args.image_directory
        feeder.config.tick_period = "1Hz"
        feeder.config.focal_length = [args.focal_length, args.focal_length]
        feeder.config.optical_center = [
            args.optical_center_rows, args.optical_center_cols
        ]
        feeder.config.distortion_coefficients = [0.01, 0.01, 0.01, 0.01, 0.01]

        # Connect the output of the image feeder node to the detection subgraph
        app.connect(feeder, "color", detect_net_inferface, "image")
    else:
        raise ValueError('Not supported mode {}'.format(args.mode))
    app.run()
Пример #3
0
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.

NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
'''
from engine.pyalice import Application
import argparse
import random
import sys

DEMOS = ["demo_1", "demo_2", "demo_3", "demo_4"]

if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='flatsim is a flat-world simulator for navigation')
    parser.add_argument('--demo',
                        dest='demo',
                        help='The scenario which will be used for flatsim')
    args, _ = parser.parse_known_args()

    demo = args.demo if args.demo is not None else random.choice(DEMOS)

    app = Application(name="flatsim")
    app.load("packages/flatsim/apps/flatsim.subgraph.json", prefix="flatsim")
    app.load("packages/flatsim/apps/{}.json".format(demo))

    app.run()
        app.registry.isaac.utils.ColorCameraProtoSplitter)
    splitter_left.config.only_pinhole = False

    splitter_right = app.add('camera_splitter_right').add(
        app.registry.isaac.utils.ColorCameraProtoSplitter)
    splitter_right.config.only_pinhole = False

    tracker = app.add('tracker').add(app.registry.isaac.StereoVisualOdometry)
    tracker.config.horizontal_stereo_camera = True
    tracker.config.process_imu_readings = False
    tracker.config.lhs_camera_frame = "left_ir_camera"
    tracker.config.rhs_camera_frame = "right_ir_camera"

    viewer = app.add('viewers').add(
        app.registry.isaac.viewers.ColorCameraViewer)
    viewer.config.reduce_scale = 2

    app.connect(camera, "left_ir", splitter_left, "color_camera")
    app.connect(splitter_left, "image", tracker, "left_image")
    app.connect(splitter_left, "intrinsics", tracker, "left_intrinsics")

    app.connect(camera, "right_ir", splitter_right, "color_camera")
    app.connect(splitter_right, "image", tracker, "right_image")
    app.connect(splitter_right, "intrinsics", tracker, "right_intrinsics")

    app.connect(camera, "left_ir", viewer, "color_listener")

    app.load("apps/samples/stereo_vo/svo_realsense.config.json")

    app.run()
Пример #5
0
        help=
        "The ip address or hostname of the host to connect to and receive missions from",
        default="localhost")
    parser.add_argument("--mission_port",
                        help="The TCP port to connect to the mission server",
                        type=int,
                        default=9998)
    args = parser.parse_args()

    # Create and start the app
    more_jsons = args.map_json
    more_jsons += ",packages/navsim/robots/str4.json"
    app = Application(
        app_filename="packages/cart_delivery/apps/cart_delivery.app.json",
        more_jsons=more_jsons)
    app.load("packages/cart_delivery/apps/navigation.config.json",
             prefix=args.demo_subgraph_name)
    app.load(
        "packages/cart_delivery/apps/detection_pose_estimation.config.json",
        prefix=args.demo_subgraph_name)
    if args.pose2_planner:
        app.load("packages/cart_delivery/apps/pose2_planner.config.json",
                 prefix=args.demo_subgraph_name)
    if args.mission_robot_name:
        # Load the mission subgraph and set the config based on the input parameters
        app.load("packages/behavior_tree/apps/missions.graph.json")
        app.nodes["tcp_client"].components["JsonTcpClient"].config[
            "host"] = args.mission_host
        app.nodes["tcp_client"].components["JsonTcpClient"].config[
            "port"] = args.mission_port
        app.nodes["mission_control"].components["NodeGroup"].config["node_names"] = \
            [args.demo_subgraph_name + ".sequence_behavior"]
Пример #6
0
    parser.add_argument("--mission_port",
                        help="The TCP port to connect to the mission server",
                        type=int,
                        default=9998)
    args = parser.parse_args()

    # Create and start the app
    more_jsons = args.map_json + "," + args.robot_json
    if args.more:
        more_jsons += "," + args.more
    app_path = "apps/navsim/navsim_navigate.app.json"
    app = Application(app_filename=app_path, more_jsons=more_jsons)

    if args.mission_robot_name:
        # Load the mission subgraph and set the config based on the input parameters
        app.load("packages/behavior_tree/apps/missions.graph.json")
        app.nodes["tcp_client"].components["JsonTcpClient"].config[
            "host"] = args.mission_host
        app.nodes["tcp_client"].components["JsonTcpClient"].config[
            "port"] = args.mission_port
        app.nodes["mission_control"].components["NodeGroup"].config["node_names"] = \
            ["goals.goal_behavior"]
        app.nodes["robot_name"].components["JsonMockup"].config["json_mock"] = \
            {"text":args.mission_robot_name}
        run_on_start = app.nodes["goals.run_on_start"]
        # Change the start behavior to the mission behavior
        nodes = run_on_start.components["NodeGroup"].config["node_names"]
        run_on_start.components["NodeGroup"].config["node_names"] = nodes + [
            "mission_control"
        ]
        run_on_start.components["SwitchBehavior"].config[
    splitter_right.config.only_pinhole = False

    tracker = app.add('tracker').add(app.registry.isaac.StereoVisualOdometry)
    tracker.config.horizontal_stereo_camera = True
    tracker.config.process_imu_readings = args.imu
    tracker.config.lhs_camera_frame = "zed_left_camera"
    tracker.config.rhs_camera_frame = "zed_right_camera"
    tracker.config.imu_frame = "zed_imu"

    if (args.imu):
        camera_imu_reader = app.nodes['camera'].add(app.registry.isaac.zed.ZedImuReader)
        camera_imu_reader.config.tick_period = "300Hz"
        app.connect(camera_imu_reader, "imu_raw", tracker, "imu")

    viewer = app.add('viewers').add(app.registry.isaac.viewers.ColorCameraViewer)
    viewer.config.reduce_scale = 2

    app.connect(camera, "left_camera_gray", splitter_left, "color_camera")
    app.connect(splitter_left, "image", tracker, "left_image")
    app.connect(splitter_left, "intrinsics", tracker, "left_intrinsics")

    app.connect(camera, "right_camera_gray", splitter_right, "color_camera")
    app.connect(splitter_right, "image", tracker, "right_image")
    app.connect(splitter_right, "intrinsics", tracker, "right_intrinsics")

    app.connect(camera, "left_camera_gray", viewer, "color_listener")

    app.load("apps/samples/stereo_vo/svo_zed.config.json")

    app.run()
Пример #8
0
    kinematic_file = "apps/assets/kinematic_trees/ur10.kinematic.json"
    joints = []
    with open(kinematic_file, 'r') as fd:
        kt = json.load(fd)
        for link in kt['links']:
            if 'motor' in link and link['motor']['type'] != 'constant':
                joints.append(link['name'])

    # create composite atlas
    create_composite_atlas(args.cask, joints)

    # Create and start the app
    app = Application(name="Shuffle Box Hardware")
    # load bebavior subgraph. this contains the sequency behavior to move the arm between
    # waypoints and control suction on/off
    app.load("apps/samples/manipulation/shuffle_box_behavior.subgraph.json", prefix="behavior")
    behavior_interface = app.nodes["behavior.interface"]["subgraph"]
    app.nodes["behavior.atlas"]["CompositeAtlas"].config.cask = args.cask
    app.load("packages/planner/apps/multi_joint_lqr_control.subgraph.json", prefix="lqr")

    # load multi joint lqr control subgraph
    lqr_interface = app.nodes["lqr.subgraph"]["interface"]
    kinematic_tree = app.nodes["lqr.kinematic_tree"]["KinematicTree"]
    lqr_planner = app.nodes["lqr.local_plan"]["MultiJointLqrPlanner"]
    app.connect(behavior_interface, "joint_target", lqr_interface, "joint_target")
    kinematic_tree.config.kinematic_file = kinematic_file
    lqr_planner.config.speed_min = [-1.0] * len(joints)
    lqr_planner.config.speed_max = [1.0] * len(joints)
    lqr_planner.config.acceleration_min = [-1.0] * len(joints)
    lqr_planner.config.acceleration_max = [1.0] * len(joints)
        args.arm)
    joints = []
    with open(kinematic_file, 'r') as fd:
        kt = json.load(fd)
        for link in kt['links']:
            if 'motor' in link and link['motor']['type'] != 'constant':
                joints.append(link['name'])

    # create composite atlas
    create_composite_atlas(args.cask, args.arm, joints)

    # Create and start the app
    app = Application(name="Shuffle Box")
    # load bebavior subgraph. this contains the sequency behavior to move the arm between
    # waypoints and control suction on/off
    app.load("apps/samples/manipulation/shuffle_box_behavior.subgraph.json",
             prefix="behavior")
    behavior_interface = app.nodes["behavior.interface"]["subgraph"]
    app.nodes["behavior.atlas"]["CompositeAtlas"].config.cask = args.cask
    app.load("packages/planner/apps/multi_joint_lqr_control.subgraph.json",
             prefix="lqr")

    # load multi joint lqr control subgraph
    lqr_interface = app.nodes["lqr.subgraph"]["interface"]
    kinematic_tree = app.nodes["lqr.kinematic_tree"]["KinematicTree"]
    lqr_planner = app.nodes["lqr.local_plan"]["MultiJointLqrPlanner"]
    app.connect(behavior_interface, "joint_target", lqr_interface,
                "joint_target")
    kinematic_tree.config.kinematic_file = kinematic_file
    lqr_planner.config.speed_min = [-1.5] + [-1.0] * (len(joints) - 1)
    lqr_planner.config.speed_max = [1.5] + [1.0] * (len(joints) - 1)
    lqr_planner.config.acceleration_min = [-1.5] + [-1.0] * (len(joints) - 1)
            [int(arg) for arg in args.resolution.split('x')])
        camera.config.color_framerate = args.framerate
        camera_out_channel = "color"
    elif args.camera == "v4l2":
        camera = app.add('input_images').add(app.registry.isaac.V4L2Camera)
        if args.device_id == None:
            raise ValueError('Could not set None. Please provide device id')
        camera.config.device_id = args.device_id
        camera.config.cols, camera.config.rows = tuple(
            [int(arg) for arg in args.resolution.split('x')])
        camera.config.rate_hz = args.framerate
        camera_out_channel = "frame"
    else:
        raise ValueError('Not supported Camera type {}'.format(args.camera))
    # Setup april tag node
    tags_detection = app.add('april_tags_detection').add(
        app.registry.isaac.perception.AprilTagsDetection)
    tags_detection.config.max_tags = 50
    fiducials_viewer = app.nodes['april_tags_detection'].add(
        app.registry.isaac.viewers.FiducialsViewer)
    # Setup viewer node
    viewer = app.add('image_viewers').add(app.registry.isaac.viewers.ColorCameraViewer)
    # Connect message channels
    app.connect(camera, camera_out_channel, tags_detection, "image")
    app.connect(tags_detection, "april_tags", fiducials_viewer, "fiducials")
    app.connect(camera, camera_out_channel, viewer, "color_listener")

    app.load("apps/samples/april_tags/april_tags_python.config.json")

    app.run()
Пример #11
0
from engine.pyalice import Application

app = Application(name = "mybot")
app.load_module('message_generators')
app.load_module('viewers')

app.load('packages/navsim/apps/navsim_tcp.subgraph.json', 'simulation')

node_view = app.add("viewer")
component_view = node_view.add(app.registry.isaac.viewers.ColorCameraViewer, 'ColorCameraViewer')

node_view = app.add("depth_viewer")
node_view.add(app.registry.isaac.viewers.DepthCameraViewer, 'DepthCameraViewer')

app.connect('simulation.interface/output', 'color', 'viewer/ColorCameraViewer', 'color_listener')
app.connect('simulation.interface/output', 'depth', 'depth_viewer/DepthCameraViewer', 'depth_listener')
app.run()