def setup_app():
    app = Application(name="composite_atlas", modules=["composite"])
    atlas = app.add("atlas").add(app.registry.isaac.composite.CompositeAtlas)
    atlas.config["cask"] = "packages/composite/tests/waypoints"

    pub_node = app.add("pub")
    pub = pub_node.add(app.registry.isaac.composite.CompositePublisher,
                       name="CompositePublisher")
    pub.config.tick_period = "20Hz"
    pub.config.atlas = "atlas/CompositeAtlas"
    pub.config.path = ["cart", "dolly"]
    return app, pub
Exemplo n.º 2
0
def setup_app():
  app = Application(name="follow_path", modules=["composite"])
  atlas = app.add("atlas").add(app.registry.isaac.composite.CompositeAtlas)
  atlas.config["cask"] = "packages/composite/tests/waypoints"

  pub_node = app.add("pub")
  pub = pub_node.add(app.registry.isaac.composite.CompositePublisher, name="CompositePublisher")
  pub.config.tick_period = "20Hz"
  pub.config.atlas = "atlas/CompositeAtlas"
  pub.config.path = ["cart", "dolly"]

  follow_node = app.add("follow")
  follow = follow_node.add(app.registry.isaac.composite.FollowPath)
  follow.config.tick_period = "10Hz"
  follow.config.wait_time = 0.05
  follow_node.add(app.registry.isaac.composite.CompositeMetric)

  app.connect(pub, "path", follow, "path")
  app.connect(follow, "goal", follow, "state")
  return app, follow_node
def main(args):
    app = Application(name="detect_net_inference")

    # Load subgraph and get interface node
    app.load("packages/detect_net/apps/detect_net_inference.subgraph.json",
             prefix="detect_net_inference")
    detect_net_inferface = app.nodes["detect_net_inference.subgraph"]\
        .components["interface"]

    # Load configuration
    app.load(args.config)

    # Configure detection model
    detection_model = app.nodes["detect_net_inference.tensor_r_t_inference"]\
        .components["isaac.ml.TensorRTInference"]
    if args.detection_model_file_path is not None:
        detection_model.config.model_file_path = args.detection_model_file_path
    if args.etlt_password is not None:
        detection_model.config.etlt_password = args.etlt_password

    # Configure detection decoder
    decoder = app.nodes["detect_net_inference.detection_decoder"]\
        .components["isaac.detect_net.DetectNetDecoder"]
    decoder.config.output_scale = [args.rows, args.cols]
    if args.confidence_threshold is not None:
        decoder.config.confidence_threshold = args.confidence_threshold
    if args.nms_threshold is not None:
        decoder.config.non_maximum_suppression_threshold = args.nms_threshold

    if args.mode == 'cask':
        # Load replay subgraph and configure interface node
        app.load("packages/record_replay/apps/replay.subgraph.json",
                 prefix="replay")
        replay_interface = app.nodes["replay.interface"].components["output"]
        replay_interface.config.cask_directory = args.cask_directory

        # Connect the output of the replay subgraph to the detection subgraph
        app.connect(replay_interface, "color", detect_net_inferface, "image")
    elif args.mode == 'sim':
        # Load simulation subgraph and get interface node
        app.load("packages/navsim/apps/navsim_training.subgraph.json",\
             prefix="simulation")
        simulation_interface = app.nodes["simulation.interface"].components[
            "output"]

        # Connect the output of the simulation with user-specified channel to the detection subgraph
        app.connect(simulation_interface, args.image_channel,
                    detect_net_inferface, "image")
    elif args.mode == 'realsense':
        app.load_module('realsense')
        # Create and configure realsense camera codelet
        camera = app.add("camera").add(app.registry.isaac.RealsenseCamera)
        camera.config.rows = args.rows
        camera.config.cols = args.cols
        camera.config.color_framerate = args.fps
        camera.config.depth_framerate = args.fps
        camera.config.enable_ir_stereo = False

        # Connect the output of the camera node to the detection subgraph
        app.connect(camera, "color", detect_net_inferface, "image")
    elif args.mode == 'v4l':
        app.load_module('sensors:v4l2_camera')
        # Create and configure V4L camera codelet
        camera = app.add("camera").add(app.registry.isaac.V4L2Camera)
        camera.config.device_id = 0
        camera.config.rows = args.rows
        camera.config.cols = args.cols
        camera.config.rate_hz = args.fps

        # Connect the output of the camera node to the detection subgraph
        app.connect(camera, "frame", detect_net_inferface, "image")
    elif args.mode == 'image':
        app.load_module('message_generators')
        # Create feeder node
        feeder = app.add("feeder").add(
            app.registry.isaac.message_generators.ImageLoader)
        feeder.config.color_glob_pattern = args.image_directory
        feeder.config.tick_period = "1Hz"
        feeder.config.focal_length = [args.focal_length, args.focal_length]
        feeder.config.optical_center = [
            args.optical_center_rows, args.optical_center_cols
        ]
        feeder.config.distortion_coefficients = [0.01, 0.01, 0.01, 0.01, 0.01]

        # Connect the output of the image feeder node to the detection subgraph
        app.connect(feeder, "color", detect_net_inferface, "image")
    else:
        raise ValueError('Not supported mode {}'.format(args.mode))
    app.run()
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
'''
from engine.pyalice import Application, Node

if __name__ == '__main__':
    app = Application(name="svo_realsense",
                      modules=[
                          'perception:stereo_visual_odometry',
                          'realsense',
                          'utils',
                          "viewers",
                      ])

    camera = app.add('camera').add(app.registry.isaac.RealsenseCamera)
    camera.config.align_to_color = False
    camera.config.auto_exposure_priority = False
    camera.config.enable_color = False
    camera.config.cols = 640
    camera.config.rows = 360
    camera.config.enable_depth = False
    camera.config.enable_depth_laser = False
    camera.config.enable_ir_stereo = True
    camera.config.ir_framerate = 30

    splitter_left = app.add('camera_splitter_left').add(
        app.registry.isaac.utils.ColorCameraProtoSplitter)
    splitter_left.config.only_pinhole = False

    splitter_right = app.add('camera_splitter_right').add(
                        default=3.0,
                        help='The framerate at which to record')
    parser.add_argument('--rows',
                        dest='rows',
                        type=int,
                        default=720,
                        help='The vertical resolution of the camera images')
    parser.add_argument('--cols',
                        dest='cols',
                        type=int,
                        default=1280,
                        help='The horizontal resolution of the camera images')
    args, _ = parser.parse_known_args()

    app = Application(name="record_dummy", modules=["message_generators"])

    # Create recorder node
    recorder = app.add("recorder").add(app.registry.isaac.alice.Recorder)
    recorder.config.base_directory = args.base_directory

    # Create dummy camera codelet
    camera = app.add("cam").add(
        app.registry.isaac.message_generators.CameraGenerator)
    camera.config.rows = args.rows
    camera.config.cols = args.cols
    camera.config.tick_period = str(args.fps) + "Hz"
    app.connect(camera, "color_left", recorder, "color")
    app.connect(camera, "depth", recorder, "depth")

    app.run()
Exemplo n.º 6
0
    app.load("packages/planner/apps/multi_joint_lqr_control.subgraph.json", prefix="lqr")

    # load multi joint lqr control subgraph
    lqr_interface = app.nodes["lqr.subgraph"]["interface"]
    kinematic_tree = app.nodes["lqr.kinematic_tree"]["KinematicTree"]
    lqr_planner = app.nodes["lqr.local_plan"]["MultiJointLqrPlanner"]
    app.connect(behavior_interface, "joint_target", lqr_interface, "joint_target")
    kinematic_tree.config.kinematic_file = kinematic_file
    lqr_planner.config.speed_min = [-1.0] * len(joints)
    lqr_planner.config.speed_max = [1.0] * len(joints)
    lqr_planner.config.acceleration_min = [-1.0] * len(joints)
    lqr_planner.config.acceleration_max = [1.0] * len(joints)

    # load hardware subgraph
    app.load_module("universal_robots")
    arm = app.add("hardware").add(app.registry.isaac.universal_robots.UniversalRobots)
    arm.config.robot_ip = "10.32.221.190"
    arm.config.control_mode = "joint position"
    arm.config.tick_period = '125Hz'
    arm.config.kinematic_tree = "lqr.kinematic_tree"
    arm.config.tool_digital_out_names = ["valve", "pump"]
    arm.config.tool_digital_in_names = ["unknown", "gripper"]

    app.connect(arm, "arm_state", lqr_interface, "joint_state")
    app.connect(arm, "arm_state", behavior_interface, "joint_state")
    app.connect(arm, "io_state", behavior_interface, "io_state")
    app.connect(lqr_interface, "joint_command", arm, "arm_command")
    app.connect(behavior_interface, "io_command", arm, "io_command")

    # visualize IO in sight
    widget_io = app.add("sight_widgets").add(app.registry.isaac.sight.SightWidget, "IO")
        use_perception = True
        app.load(
            "packages/object_pose_estimation/apps/pose_cnn_decoder" \
            "/detection_pose_estimation_cnn_inference.subgraph.json",
            prefix="detection_pose_estimation")
        perception_interface = app.nodes[
            "detection_pose_estimation.interface"]["Subgraph"]
        app.load(
            "apps/samples/manipulation/shuffle_box_detection_pose_estimation.config.json"
        )

    # load sim tsubgraph for tcp connection.
    app.load("packages/navsim/apps/navsim_tcp.subgraph.json", "simulation")
    sim_in = app.nodes["simulation.interface"]["input"]
    sim_out = app.nodes["simulation.interface"]["output"]
    app.connect(sim_out, "joint_state", lqr_interface, "joint_state")
    app.connect(sim_out, "joint_state", behavior_interface, "joint_state")
    app.connect(sim_out, "io_state", behavior_interface, "io_state")
    app.connect(lqr_interface, "joint_command", sim_in, "joint_position")
    app.connect(behavior_interface, "io_command", sim_in, "io_command")
    if use_perception:
        app.connect(sim_out, "color", perception_interface, "color")
    viewers = app.add("viewers")
    depth_viewer = viewers.add(app.registry.isaac.viewers.DepthCameraViewer,
                               "DepthViewer")
    app.connect(sim_out, "depth", depth_viewer, "depth_listener")
    depth_viewer.config.max_visualization_depth = 3

    # run app
    app.run()
                        choices=['720p', '640x480'],
                        default='720p',
                        help='The resolution of the camera images')
    args, _ = parser.parse_known_args()

    app = Application(name="record_realsense", modules=["realsense"])

    if args.mode == '720p':
        rows = 720
        cols = 1280
    elif args.mode == '640x480':
        rows = 640
        cols = 480
    else:
        raise ValueError('Not supported camera resolution')

    # Create recorder node
    recorder = app.add("recorder").add(app.registry.isaac.alice.Recorder)
    recorder.config.base_directory = args.base_directory

    # Create realsense camera codelet
    camera = app.add("cam").add(app.registry.isaac.RealsenseCamera)
    camera.config.rows = rows
    camera.config.cols = cols
    camera.config.color_framerate = args.fps
    camera.config.depth_framerate = args.fps
    app.connect(camera, "color", recorder, "color")
    app.connect(camera, "depth", recorder, "depth")

    app.run()
     type=int,
     default=60,
     help='Camera framerate')
 parser.add_argument(
     '--device_id', dest='device_id', action='store', type=int, help='Camera device id')
 args = parser.parse_args()
 # Create april_tag_python application
 app = Application(
     name="april_tags_python",
     modules=[
         "//packages/perception:april_tags", "realsense", "sensors:v4l2_camera", "viewers", "zed"
     ])
 # Setup camera node
 camera = None
 if args.camera == "zed":
     camera = app.add('input_images').add(app.registry.isaac.ZedCamera)
     camera.config.resolution = args.resolution
     camera.config.tick_period = '{tick_period}Hz'.format(tick_period=args.framerate)
     camera_out_channel = "left_camera_rgb"
 elif args.camera == "realsense":
     camera = app.add('input_images').add(app.registry.isaac.RealsenseCamera)
     camera.config.cols, camera.config.rows = tuple(
         [int(arg) for arg in args.resolution.split('x')])
     camera.config.color_framerate = args.framerate
     camera_out_channel = "color"
 elif args.camera == "v4l2":
     camera = app.add('input_images').add(app.registry.isaac.V4L2Camera)
     if args.device_id == None:
         raise ValueError('Could not set None. Please provide device id')
     camera.config.device_id = args.device_id
     camera.config.cols, camera.config.rows = tuple(
Exemplo n.º 10
0
from engine.pyalice import Application

app = Application(name = "mybot")
app.load_module('message_generators')
app.load_module('viewers')

app.load('packages/navsim/apps/navsim_tcp.subgraph.json', 'simulation')

node_view = app.add("viewer")
component_view = node_view.add(app.registry.isaac.viewers.ColorCameraViewer, 'ColorCameraViewer')

node_view = app.add("depth_viewer")
node_view.add(app.registry.isaac.viewers.DepthCameraViewer, 'DepthCameraViewer')

app.connect('simulation.interface/output', 'color', 'viewer/ColorCameraViewer', 'color_listener')
app.connect('simulation.interface/output', 'depth', 'depth_viewer/DepthCameraViewer', 'depth_listener')
app.run()