예제 #1
0
    app.nodes["behavior.atlas"]["CompositeAtlas"].config.cask = args.cask
    app.load("packages/planner/apps/multi_joint_lqr_control.subgraph.json", prefix="lqr")

    # load multi joint lqr control subgraph
    lqr_interface = app.nodes["lqr.subgraph"]["interface"]
    kinematic_tree = app.nodes["lqr.kinematic_tree"]["KinematicTree"]
    lqr_planner = app.nodes["lqr.local_plan"]["MultiJointLqrPlanner"]
    app.connect(behavior_interface, "joint_target", lqr_interface, "joint_target")
    kinematic_tree.config.kinematic_file = kinematic_file
    lqr_planner.config.speed_min = [-1.0] * len(joints)
    lqr_planner.config.speed_max = [1.0] * len(joints)
    lqr_planner.config.acceleration_min = [-1.0] * len(joints)
    lqr_planner.config.acceleration_max = [1.0] * len(joints)

    # load hardware subgraph
    app.load_module("universal_robots")
    arm = app.add("hardware").add(app.registry.isaac.universal_robots.UniversalRobots)
    arm.config.robot_ip = "10.32.221.190"
    arm.config.control_mode = "joint position"
    arm.config.tick_period = '125Hz'
    arm.config.kinematic_tree = "lqr.kinematic_tree"
    arm.config.tool_digital_out_names = ["valve", "pump"]
    arm.config.tool_digital_in_names = ["unknown", "gripper"]

    app.connect(arm, "arm_state", lqr_interface, "joint_state")
    app.connect(arm, "arm_state", behavior_interface, "joint_state")
    app.connect(arm, "io_state", behavior_interface, "io_state")
    app.connect(lqr_interface, "joint_command", arm, "arm_command")
    app.connect(behavior_interface, "io_command", arm, "io_command")

    # visualize IO in sight
def main(args):
    app = Application(name="detect_net_inference")

    # Load subgraph and get interface node
    app.load("packages/detect_net/apps/detect_net_inference.subgraph.json",
             prefix="detect_net_inference")
    detect_net_inferface = app.nodes["detect_net_inference.subgraph"]\
        .components["interface"]

    # Load configuration
    app.load(args.config)

    # Configure detection model
    detection_model = app.nodes["detect_net_inference.tensor_r_t_inference"]\
        .components["isaac.ml.TensorRTInference"]
    if args.detection_model_file_path is not None:
        detection_model.config.model_file_path = args.detection_model_file_path
    if args.etlt_password is not None:
        detection_model.config.etlt_password = args.etlt_password

    # Configure detection decoder
    decoder = app.nodes["detect_net_inference.detection_decoder"]\
        .components["isaac.detect_net.DetectNetDecoder"]
    decoder.config.output_scale = [args.rows, args.cols]
    if args.confidence_threshold is not None:
        decoder.config.confidence_threshold = args.confidence_threshold
    if args.nms_threshold is not None:
        decoder.config.non_maximum_suppression_threshold = args.nms_threshold

    if args.mode == 'cask':
        # Load replay subgraph and configure interface node
        app.load("packages/record_replay/apps/replay.subgraph.json",
                 prefix="replay")
        replay_interface = app.nodes["replay.interface"].components["output"]
        replay_interface.config.cask_directory = args.cask_directory

        # Connect the output of the replay subgraph to the detection subgraph
        app.connect(replay_interface, "color", detect_net_inferface, "image")
    elif args.mode == 'sim':
        # Load simulation subgraph and get interface node
        app.load("packages/navsim/apps/navsim_training.subgraph.json",\
             prefix="simulation")
        simulation_interface = app.nodes["simulation.interface"].components[
            "output"]

        # Connect the output of the simulation with user-specified channel to the detection subgraph
        app.connect(simulation_interface, args.image_channel,
                    detect_net_inferface, "image")
    elif args.mode == 'realsense':
        app.load_module('realsense')
        # Create and configure realsense camera codelet
        camera = app.add("camera").add(app.registry.isaac.RealsenseCamera)
        camera.config.rows = args.rows
        camera.config.cols = args.cols
        camera.config.color_framerate = args.fps
        camera.config.depth_framerate = args.fps
        camera.config.enable_ir_stereo = False

        # Connect the output of the camera node to the detection subgraph
        app.connect(camera, "color", detect_net_inferface, "image")
    elif args.mode == 'v4l':
        app.load_module('sensors:v4l2_camera')
        # Create and configure V4L camera codelet
        camera = app.add("camera").add(app.registry.isaac.V4L2Camera)
        camera.config.device_id = 0
        camera.config.rows = args.rows
        camera.config.cols = args.cols
        camera.config.rate_hz = args.fps

        # Connect the output of the camera node to the detection subgraph
        app.connect(camera, "frame", detect_net_inferface, "image")
    elif args.mode == 'image':
        app.load_module('message_generators')
        # Create feeder node
        feeder = app.add("feeder").add(
            app.registry.isaac.message_generators.ImageLoader)
        feeder.config.color_glob_pattern = args.image_directory
        feeder.config.tick_period = "1Hz"
        feeder.config.focal_length = [args.focal_length, args.focal_length]
        feeder.config.optical_center = [
            args.optical_center_rows, args.optical_center_cols
        ]
        feeder.config.distortion_coefficients = [0.01, 0.01, 0.01, 0.01, 0.01]

        # Connect the output of the image feeder node to the detection subgraph
        app.connect(feeder, "color", detect_net_inferface, "image")
    else:
        raise ValueError('Not supported mode {}'.format(args.mode))
    app.run()
예제 #3
0
from engine.pyalice import Application

app = Application(name = "mybot")
app.load_module('message_generators')
app.load_module('viewers')

app.load('packages/navsim/apps/navsim_tcp.subgraph.json', 'simulation')

node_view = app.add("viewer")
component_view = node_view.add(app.registry.isaac.viewers.ColorCameraViewer, 'ColorCameraViewer')

node_view = app.add("depth_viewer")
node_view.add(app.registry.isaac.viewers.DepthCameraViewer, 'DepthCameraViewer')

app.connect('simulation.interface/output', 'color', 'viewer/ColorCameraViewer', 'color_listener')
app.connect('simulation.interface/output', 'depth', 'depth_viewer/DepthCameraViewer', 'depth_listener')
app.run()