Exemplo n.º 1
0
def main():
    app = Application(app_filename="apps/rm/realsense_gmap/realsense_gmap.app.json")

    odometry = app.nodes["odometry"].add(Odometry, "Odometry")
    app.connect(app.nodes["visual_odometry_tracker"]["StereoVisualOdometry"], "left_camera_pose", odometry, "pose")
    app.connect(odometry, "odometry", app.nodes["gmapping"]["GMapping"], "odometry")

    gmap = app.nodes['gmapping']["GMapping"]
    pprint(gmap.config.get_config())

    app.run()
def main():
    app = Application(
        app_filename=
        "apps/rm/realsense_cartographer/realsense_cartographer.app.json")

    pose_to_edge = app.nodes["pose_tree_injector"].add(PoseToEdge,
                                                       "PoseToEdge")
    app.connect(app.nodes["visual_odometry_tracker"]["StereoVisualOdometry"],
                "left_camera_pose", pose_to_edge, "pose")
    app.connect(pose_to_edge, "edge",
                app.nodes["pose_tree_injector"]["PoseMessageInjector"], "pose")

    comp = app.nodes['visual_odometry_tracker']["StereoVisualOdometry"]
    pprint(comp.config.get_config())

    app.run()
Exemplo n.º 3
0
def setup_app():
  app = Application(name="follow_path", modules=["composite"])
  atlas = app.add("atlas").add(app.registry.isaac.composite.CompositeAtlas)
  atlas.config["cask"] = "packages/composite/tests/waypoints"

  pub_node = app.add("pub")
  pub = pub_node.add(app.registry.isaac.composite.CompositePublisher, name="CompositePublisher")
  pub.config.tick_period = "20Hz"
  pub.config.atlas = "atlas/CompositeAtlas"
  pub.config.path = ["cart", "dolly"]

  follow_node = app.add("follow")
  follow = follow_node.add(app.registry.isaac.composite.FollowPath)
  follow.config.tick_period = "10Hz"
  follow.config.wait_time = 0.05
  follow_node.add(app.registry.isaac.composite.CompositeMetric)

  app.connect(pub, "path", follow, "path")
  app.connect(follow, "goal", follow, "state")
  return app, follow_node
def main(args):
    app = Application(name="detect_net_inference")

    # Load subgraph and get interface node
    app.load("packages/detect_net/apps/detect_net_inference.subgraph.json",
             prefix="detect_net_inference")
    detect_net_inferface = app.nodes["detect_net_inference.subgraph"]\
        .components["interface"]

    # Load configuration
    app.load(args.config)

    # Configure detection model
    detection_model = app.nodes["detect_net_inference.tensor_r_t_inference"]\
        .components["isaac.ml.TensorRTInference"]
    if args.detection_model_file_path is not None:
        detection_model.config.model_file_path = args.detection_model_file_path
    if args.etlt_password is not None:
        detection_model.config.etlt_password = args.etlt_password

    # Configure detection decoder
    decoder = app.nodes["detect_net_inference.detection_decoder"]\
        .components["isaac.detect_net.DetectNetDecoder"]
    decoder.config.output_scale = [args.rows, args.cols]
    if args.confidence_threshold is not None:
        decoder.config.confidence_threshold = args.confidence_threshold
    if args.nms_threshold is not None:
        decoder.config.non_maximum_suppression_threshold = args.nms_threshold

    if args.mode == 'cask':
        # Load replay subgraph and configure interface node
        app.load("packages/record_replay/apps/replay.subgraph.json",
                 prefix="replay")
        replay_interface = app.nodes["replay.interface"].components["output"]
        replay_interface.config.cask_directory = args.cask_directory

        # Connect the output of the replay subgraph to the detection subgraph
        app.connect(replay_interface, "color", detect_net_inferface, "image")
    elif args.mode == 'sim':
        # Load simulation subgraph and get interface node
        app.load("packages/navsim/apps/navsim_training.subgraph.json",\
             prefix="simulation")
        simulation_interface = app.nodes["simulation.interface"].components[
            "output"]

        # Connect the output of the simulation with user-specified channel to the detection subgraph
        app.connect(simulation_interface, args.image_channel,
                    detect_net_inferface, "image")
    elif args.mode == 'realsense':
        app.load_module('realsense')
        # Create and configure realsense camera codelet
        camera = app.add("camera").add(app.registry.isaac.RealsenseCamera)
        camera.config.rows = args.rows
        camera.config.cols = args.cols
        camera.config.color_framerate = args.fps
        camera.config.depth_framerate = args.fps
        camera.config.enable_ir_stereo = False

        # Connect the output of the camera node to the detection subgraph
        app.connect(camera, "color", detect_net_inferface, "image")
    elif args.mode == 'v4l':
        app.load_module('sensors:v4l2_camera')
        # Create and configure V4L camera codelet
        camera = app.add("camera").add(app.registry.isaac.V4L2Camera)
        camera.config.device_id = 0
        camera.config.rows = args.rows
        camera.config.cols = args.cols
        camera.config.rate_hz = args.fps

        # Connect the output of the camera node to the detection subgraph
        app.connect(camera, "frame", detect_net_inferface, "image")
    elif args.mode == 'image':
        app.load_module('message_generators')
        # Create feeder node
        feeder = app.add("feeder").add(
            app.registry.isaac.message_generators.ImageLoader)
        feeder.config.color_glob_pattern = args.image_directory
        feeder.config.tick_period = "1Hz"
        feeder.config.focal_length = [args.focal_length, args.focal_length]
        feeder.config.optical_center = [
            args.optical_center_rows, args.optical_center_cols
        ]
        feeder.config.distortion_coefficients = [0.01, 0.01, 0.01, 0.01, 0.01]

        # Connect the output of the image feeder node to the detection subgraph
        app.connect(feeder, "color", detect_net_inferface, "image")
    else:
        raise ValueError('Not supported mode {}'.format(args.mode))
    app.run()
        app.registry.isaac.utils.ColorCameraProtoSplitter)
    splitter_left.config.only_pinhole = False

    splitter_right = app.add('camera_splitter_right').add(
        app.registry.isaac.utils.ColorCameraProtoSplitter)
    splitter_right.config.only_pinhole = False

    tracker = app.add('tracker').add(app.registry.isaac.StereoVisualOdometry)
    tracker.config.horizontal_stereo_camera = True
    tracker.config.process_imu_readings = False
    tracker.config.lhs_camera_frame = "left_ir_camera"
    tracker.config.rhs_camera_frame = "right_ir_camera"

    viewer = app.add('viewers').add(
        app.registry.isaac.viewers.ColorCameraViewer)
    viewer.config.reduce_scale = 2

    app.connect(camera, "left_ir", splitter_left, "color_camera")
    app.connect(splitter_left, "image", tracker, "left_image")
    app.connect(splitter_left, "intrinsics", tracker, "left_intrinsics")

    app.connect(camera, "right_ir", splitter_right, "color_camera")
    app.connect(splitter_right, "image", tracker, "right_image")
    app.connect(splitter_right, "intrinsics", tracker, "right_intrinsics")

    app.connect(camera, "left_ir", viewer, "color_listener")

    app.load("apps/samples/stereo_vo/svo_realsense.config.json")

    app.run()
                        default=3.0,
                        help='The framerate at which to record')
    parser.add_argument('--rows',
                        dest='rows',
                        type=int,
                        default=720,
                        help='The vertical resolution of the camera images')
    parser.add_argument('--cols',
                        dest='cols',
                        type=int,
                        default=1280,
                        help='The horizontal resolution of the camera images')
    args, _ = parser.parse_known_args()

    app = Application(name="record_dummy", modules=["message_generators"])

    # Create recorder node
    recorder = app.add("recorder").add(app.registry.isaac.alice.Recorder)
    recorder.config.base_directory = args.base_directory

    # Create dummy camera codelet
    camera = app.add("cam").add(
        app.registry.isaac.message_generators.CameraGenerator)
    camera.config.rows = args.rows
    camera.config.cols = args.cols
    camera.config.tick_period = str(args.fps) + "Hz"
    app.connect(camera, "color_left", recorder, "color")
    app.connect(camera, "depth", recorder, "depth")

    app.run()
Exemplo n.º 7
0
        more_jsons += "," + args.more
    app_path = "apps/navsim/navsim_navigate.app.json"
    app = Application(app_filename=app_path, more_jsons=more_jsons)

    if args.mission_robot_name:
        # Load the mission subgraph and set the config based on the input parameters
        app.load("packages/behavior_tree/apps/missions.graph.json")
        app.nodes["tcp_client"].components["JsonTcpClient"].config[
            "host"] = args.mission_host
        app.nodes["tcp_client"].components["JsonTcpClient"].config[
            "port"] = args.mission_port
        app.nodes["mission_control"].components["NodeGroup"].config["node_names"] = \
            ["goals.goal_behavior"]
        app.nodes["robot_name"].components["JsonMockup"].config["json_mock"] = \
            {"text":args.mission_robot_name}
        run_on_start = app.nodes["goals.run_on_start"]
        # Change the start behavior to the mission behavior
        nodes = run_on_start.components["NodeGroup"].config["node_names"]
        run_on_start.components["NodeGroup"].config["node_names"] = nodes + [
            "mission_control"
        ]
        run_on_start.components["SwitchBehavior"].config[
            "desired_behavior"] = "mission_control"
        # Send the navigation output back through the json tcp client
        app.connect(app.nodes["navigation.subgraph"].components["interface"],
                    "feedback",
                    app.nodes["tcp_client"].components["JsonTcpClient"],
                    "feedback")

    app.start_wait_stop()
Exemplo n.º 8
0
    create_composite_atlas(args.cask, joints)

    # Create and start the app
    app = Application(name="Shuffle Box Hardware")
    # load bebavior subgraph. this contains the sequency behavior to move the arm between
    # waypoints and control suction on/off
    app.load("apps/samples/manipulation/shuffle_box_behavior.subgraph.json", prefix="behavior")
    behavior_interface = app.nodes["behavior.interface"]["subgraph"]
    app.nodes["behavior.atlas"]["CompositeAtlas"].config.cask = args.cask
    app.load("packages/planner/apps/multi_joint_lqr_control.subgraph.json", prefix="lqr")

    # load multi joint lqr control subgraph
    lqr_interface = app.nodes["lqr.subgraph"]["interface"]
    kinematic_tree = app.nodes["lqr.kinematic_tree"]["KinematicTree"]
    lqr_planner = app.nodes["lqr.local_plan"]["MultiJointLqrPlanner"]
    app.connect(behavior_interface, "joint_target", lqr_interface, "joint_target")
    kinematic_tree.config.kinematic_file = kinematic_file
    lqr_planner.config.speed_min = [-1.0] * len(joints)
    lqr_planner.config.speed_max = [1.0] * len(joints)
    lqr_planner.config.acceleration_min = [-1.0] * len(joints)
    lqr_planner.config.acceleration_max = [1.0] * len(joints)

    # load hardware subgraph
    app.load_module("universal_robots")
    arm = app.add("hardware").add(app.registry.isaac.universal_robots.UniversalRobots)
    arm.config.robot_ip = "10.32.221.190"
    arm.config.control_mode = "joint position"
    arm.config.tick_period = '125Hz'
    arm.config.kinematic_tree = "lqr.kinematic_tree"
    arm.config.tool_digital_out_names = ["valve", "pump"]
    arm.config.tool_digital_in_names = ["unknown", "gripper"]
    # Create and start the app
    app = Application(name="Shuffle Box")
    # load bebavior subgraph. this contains the sequency behavior to move the arm between
    # waypoints and control suction on/off
    app.load("apps/samples/manipulation/shuffle_box_behavior.subgraph.json",
             prefix="behavior")
    behavior_interface = app.nodes["behavior.interface"]["subgraph"]
    app.nodes["behavior.atlas"]["CompositeAtlas"].config.cask = args.cask
    app.load("packages/planner/apps/multi_joint_lqr_control.subgraph.json",
             prefix="lqr")

    # load multi joint lqr control subgraph
    lqr_interface = app.nodes["lqr.subgraph"]["interface"]
    kinematic_tree = app.nodes["lqr.kinematic_tree"]["KinematicTree"]
    lqr_planner = app.nodes["lqr.local_plan"]["MultiJointLqrPlanner"]
    app.connect(behavior_interface, "joint_target", lqr_interface,
                "joint_target")
    kinematic_tree.config.kinematic_file = kinematic_file
    lqr_planner.config.speed_min = [-1.5] + [-1.0] * (len(joints) - 1)
    lqr_planner.config.speed_max = [1.5] + [1.0] * (len(joints) - 1)
    lqr_planner.config.acceleration_min = [-1.5] + [-1.0] * (len(joints) - 1)
    lqr_planner.config.acceleration_max = [1.5] + [1.0] * (len(joints) - 1)

    # load perception subgraph for KLTBox detection in ur10 usecase
    use_perception = False
    if args.arm == "ur10":
        use_perception = True
        app.load(
            "packages/object_pose_estimation/apps/pose_cnn_decoder" \
            "/detection_pose_estimation_cnn_inference.subgraph.json",
            prefix="detection_pose_estimation")
        perception_interface = app.nodes[
                        choices=['720p', '640x480'],
                        default='720p',
                        help='The resolution of the camera images')
    args, _ = parser.parse_known_args()

    app = Application(name="record_realsense", modules=["realsense"])

    if args.mode == '720p':
        rows = 720
        cols = 1280
    elif args.mode == '640x480':
        rows = 640
        cols = 480
    else:
        raise ValueError('Not supported camera resolution')

    # Create recorder node
    recorder = app.add("recorder").add(app.registry.isaac.alice.Recorder)
    recorder.config.base_directory = args.base_directory

    # Create realsense camera codelet
    camera = app.add("cam").add(app.registry.isaac.RealsenseCamera)
    camera.config.rows = rows
    camera.config.cols = cols
    camera.config.color_framerate = args.fps
    camera.config.depth_framerate = args.fps
    app.connect(camera, "color", recorder, "color")
    app.connect(camera, "depth", recorder, "depth")

    app.run()
            [int(arg) for arg in args.resolution.split('x')])
        camera.config.color_framerate = args.framerate
        camera_out_channel = "color"
    elif args.camera == "v4l2":
        camera = app.add('input_images').add(app.registry.isaac.V4L2Camera)
        if args.device_id == None:
            raise ValueError('Could not set None. Please provide device id')
        camera.config.device_id = args.device_id
        camera.config.cols, camera.config.rows = tuple(
            [int(arg) for arg in args.resolution.split('x')])
        camera.config.rate_hz = args.framerate
        camera_out_channel = "frame"
    else:
        raise ValueError('Not supported Camera type {}'.format(args.camera))
    # Setup april tag node
    tags_detection = app.add('april_tags_detection').add(
        app.registry.isaac.perception.AprilTagsDetection)
    tags_detection.config.max_tags = 50
    fiducials_viewer = app.nodes['april_tags_detection'].add(
        app.registry.isaac.viewers.FiducialsViewer)
    # Setup viewer node
    viewer = app.add('image_viewers').add(app.registry.isaac.viewers.ColorCameraViewer)
    # Connect message channels
    app.connect(camera, camera_out_channel, tags_detection, "image")
    app.connect(tags_detection, "april_tags", fiducials_viewer, "fiducials")
    app.connect(camera, camera_out_channel, viewer, "color_listener")

    app.load("apps/samples/april_tags/april_tags_python.config.json")

    app.run()
Exemplo n.º 12
0
from engine.pyalice import Application

app = Application(name = "mybot")
app.load_module('message_generators')
app.load_module('viewers')

app.load('packages/navsim/apps/navsim_tcp.subgraph.json', 'simulation')

node_view = app.add("viewer")
component_view = node_view.add(app.registry.isaac.viewers.ColorCameraViewer, 'ColorCameraViewer')

node_view = app.add("depth_viewer")
node_view.add(app.registry.isaac.viewers.DepthCameraViewer, 'DepthCameraViewer')

app.connect('simulation.interface/output', 'color', 'viewer/ColorCameraViewer', 'color_listener')
app.connect('simulation.interface/output', 'depth', 'depth_viewer/DepthCameraViewer', 'depth_listener')
app.run()