def main(args):
    app = Application(name="detection_pose_estimation_inference")

    # Load subgraph and get interface node
    app.load("packages/object_pose_estimation/apps/pose_cnn_decoder/"\
        "detection_pose_estimation_cnn_inference.subgraph.json",
        prefix="detection_pose_estimation")
    detection_pose_estimation_interface = app.nodes["detection_pose_estimation.interface"]\
        .components["Subgraph"]

    # Load configuration
    app.load(args.config)

    # Configure detection model
    detection_model = app.nodes["detection_pose_estimation.object_detection.tensor_r_t_inference"]\
        .components["isaac.ml.TensorRTInference"]
    if args.detection_model_file_path is not None:
        detection_model.config.model_file_path = args.detection_model_file_path
    if args.etlt_password is not None:
        detection_model.config.etlt_password = args.etlt_password

    # Configure pose estimation model
    pose_estimation_model = app.nodes\
        ["detection_pose_estimation.object_pose_estimation.pose_encoder"]\
        .components["TensorRTInference"]
    if args.pose_estimation_model_file_path is not None:
        pose_estimation_model.config.model_file_path = args.pose_estimation_model_file_path

    # Configure detection decoder
    decoder = app.nodes["detection_pose_estimation.object_detection.detection_decoder"]\
        .components["isaac.detect_net.DetectNetDecoder"]
    decoder.config.output_scale = [args.rows, args.cols]
    if args.confidence_threshold is not None:
        decoder.config.confidence_threshold = args.confidence_threshold
    if args.nms_threshold is not None:
        decoder.config.non_maximum_suppression_threshold = args.nms_threshold

    # Configure bounding box encoder
    bbox_encoder = app.nodes\
        ["detection_pose_estimation.object_pose_estimation.detection_convertor"]\
        .components["BoundingBoxEncoder"]
    bbox_encoder.config.image_dimensions = [args.rows, args.cols]

    # Take ros Image and Intrinsic input
    app.load("packages/ros_bridge/apps/ros_to_perception.subgraph.json", prefix="ros")
    RosToImage = app.nodes["ros.ros_converters"].components["RosToImage"]
    RosToCameraIntrinsics = app.nodes["ros.ros_converters"].components["RosToCameraIntrinsics"]

    # Connect the output of RosToImage and RosToCameraIntrinsics to the detection subgraph
    app.connect(RosToImage, "proto", detection_pose_estimation_interface, "color")
    app.connect(RosToCameraIntrinsics, "proto", detection_pose_estimation_interface, "intrinsics")
    app.run()
Beispiel #2
0
def main():
    # patching capnp paths. Needed if using isaac python API.
    patch_capnp_paths()

    # creating app
    app = Application(app_filename="app/graphs/graph.app.json")

    # adding python codelet to the hello_node in graph
    app.nodes["hello_node"].add(HelloWorld, "print")

    # configuring Websight webroot and assetroot. Needed if using isaac python API.
    configure_websight_root(app)

    # running the application
    app.run()
Beispiel #3
0
def main():
    # patching capnp paths. Needed if using isaac python API.
    patch_capnp_paths()

    # creating app
    app = Application(app_filename="apps/hello_world_py/graphs/graph.app.json")

    # adding python codelets to graph
    app.nodes["sender"].add(Sender, "sender_component")
    app.nodes["receiver"].add(Receiver, "receiver_component")

    # configuring Websight webroot and assetroot. Needed if using isaac python API.
    configure_websight_root(app)

    # running the application
    app.run()
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--simulate",
        action="store_true",
        help="uses issac sim unity3d instead of a v4l2 camera.",
    )
    args = parser.parse_args()

    if args.simulate:
        app_file = "apps/isaac_object_detection/graphs/detection_unity3d.app.json"
    else:
        app_file = "apps/isaac_object_detection/graphs/detection.app.json"

    app = Application(app_filename=app_file)
    app.nodes["edge_detector.subgraph.edge_detector"].add(EdgeDetector, "detector")
    app.run()
Beispiel #5
0
def main(args):
    '''
    Creates and runs block pose estimation app on RGBD image. Input image may come from sim (
    default), cask recording or realsense camera

    To run with sim (sim needs to be running first):
      bazel run apps/samples/pick_and_place:block_pose_estimation
    To see all options:
      bazel run apps/samples/pick_and_place:block_pose_estimation -- --help
    '''
    app = Application(name="block_pose_estimation")
    perception_interface = create_block_pose_estimation(app, use_refinement=args.refinement)

    if args.mode == 'cask':
        assert args.cask is not None, "cask path not specified. use --cask to set."
        # Load replay subgraph and configure interface node
        app.load("packages/cask/apps/replay.subgraph.json", prefix="replay")
        source = app.nodes["replay.interface"].components["output"]
        source.config.cask_directory = args.cask
        source.config.loop = True
    elif args.mode == "realsense":
        # Create realsense camera codelet
        app.load_module("realsense")
        source = app.add("camera").add(app.registry.isaac.RealsenseCamera)
        source.config.rows = 720
        source.config.cols = 1280
        source.config.color_framerate = 15
        source.config.depth_framerate = 15
    elif args.mode == "sim":
        app.load(filename='packages/navsim/apps/navsim_tcp.subgraph.json', prefix='simulation')
        source = app.nodes["simulation.interface"]["output"]

    app.connect(source, "color", perception_interface, "color")
    app.connect(source, "depth", perception_interface, "depth")
    app.connect(source, "color_intrinsics", perception_interface, "intrinsics")

    app.load_module("utils")
    pose_node = app.add("pose")
    pose_injector = pose_node.add(app.registry.isaac.utils.DetectionsToPoseTree)
    pose_injector.config.detection_frame = "camera"
    app.connect(perception_interface, "output_poses", pose_injector, "detections")

    app.run()
Beispiel #6
0
def main():
    patch_capnp_paths()
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--simulate",
        action="store_true",
        help="uses issac sim unity3d instead of a v4l2 camera.",
    )
    args = parser.parse_args()

    if args.simulate:
        app_file = "app/graphs/edge_detection_unity3d.app.json"
    else:
        app_file = "app/graphs/edge_detection.app.json"

    # creating app
    app = Application(app_filename=app_file)

    # adding EdgeDetector codelet to the detector component of the edge_detector node
    app.nodes["edge_detector"].add(EdgeDetector, "detector")
    configure_websight_root(app)

    # running the application
    app.run()
        camera_out_channel = "color"
    elif args.camera == "v4l2":
        camera = app.add('input_images').add(app.registry.isaac.V4L2Camera)
        if args.device_id == None:
            raise ValueError('Could not set None. Please provide device id')
        camera.config.device_id = args.device_id
        camera.config.cols, camera.config.rows = tuple(
            [int(arg) for arg in args.resolution.split('x')])
        camera.config.rate_hz = args.framerate
        camera_out_channel = "frame"
    else:
        raise ValueError('Not supported Camera type {}'.format(args.camera))
    # Setup april tag node
    tags_detection = app.add('april_tags_detection').add(
        app.registry.isaac.fiducials.AprilTagsDetection)
    tags_detection.config.max_tags = 50
    fiducials_viewer = app.nodes['april_tags_detection'].add(
        app.registry.isaac.viewers.FiducialsViewer)
    # Setup viewer node
    viewer = app.add('image_viewers').add(
        app.registry.isaac.viewers.ImageViewer)
    # Connect message channels
    app.connect(camera, camera_out_channel, tags_detection, "image")
    app.connect(camera, "intrinsics", tags_detection, "intrinsics")
    app.connect(tags_detection, "april_tags", fiducials_viewer, "fiducials")
    app.connect(camera, camera_out_channel, viewer, "image")

    app.load("apps/samples/april_tags/april_tags_python.config.json")

    app.run()
Beispiel #8
0
def main(args):
    app = Application(name="detection_pose_estimation_inference")

    # Load subgraph and get interface node
    app.load("packages/object_pose_estimation/apps/pose_cnn_decoder/"\
        "detection_pose_estimation_cnn_inference.subgraph.json",
        prefix="detection_pose_estimation")
    detection_pose_estimation_interface = app.nodes["detection_pose_estimation.interface"]\
        .components["Subgraph"]

    # Load configuration
    app.load(args.config)

    # Configure detection model
    detection_model = app.nodes["detection_pose_estimation.object_detection.tensor_r_t_inference"]\
        .components["isaac.ml.TensorRTInference"]
    if args.detection_model_file_path is not None:
        detection_model.config.model_file_path = args.detection_model_file_path
    if args.etlt_password is not None:
        detection_model.config.etlt_password = args.etlt_password

    # Configure pose estimation model
    pose_estimation_model = app.nodes\
        ["detection_pose_estimation.object_pose_estimation.pose_encoder"]\
        .components["TensorRTInference"]
    if args.pose_estimation_model_file_path is not None:
        pose_estimation_model.config.model_file_path = args.pose_estimation_model_file_path

    # Configure detection decoder
    decoder = app.nodes["detection_pose_estimation.object_detection.detection_decoder"]\
        .components["isaac.detect_net.DetectNetDecoder"]
    decoder.config.output_scale = [args.rows, args.cols]
    if args.confidence_threshold is not None:
        decoder.config.confidence_threshold = args.confidence_threshold
    if args.nms_threshold is not None:
        decoder.config.non_maximum_suppression_threshold = args.nms_threshold

    # Configure bounding box encoder
    bbox_encoder = app.nodes\
        ["detection_pose_estimation.object_pose_estimation.detection_convertor"]\
        .components["BoundingBoxEncoder"]
    bbox_encoder.config.image_dimensions = [args.rows, args.cols]

    # Configure detections filter
    detections_filter = app.nodes\
        ["detection_pose_estimation.object_pose_estimation.detection_filter"]\
        .components["FilterDetections"]
    detections_filter.config.image_cols = args.cols

    if args.mode == 'cask':
        # Load replay subgraph and configure interface node
        app.load("packages/cask/apps/replay.subgraph.json", prefix="replay")
        replay_interface = app.nodes["replay.interface"].components["output"]
        replay_interface.config.cask_directory = args.cask_directory

        # Connect the output of the replay subgraph to the detection subgraph
        app.connect(replay_interface, "color",
                    detection_pose_estimation_interface, "color")
        app.connect(replay_interface, "color_intrinsics",
                    detection_pose_estimation_interface, "intrinsics")
    elif args.mode == 'sim':
        # Load simulation subgraph and get interface node
        app.load("packages/object_pose_estimation/apps/pose_cnn_decoder"\
            "/pose_estimation_sim.subgraph.json",\
             prefix="simulation")
        simulation_interface = app.nodes["simulation.interface"].components[
            "TcpSubscriber"]

        # Connect the output of the simulation with user-specified channel to the detection subgraph
        app.connect(simulation_interface, args.image_channel,
                    detection_pose_estimation_interface, "color")
        app.connect(simulation_interface, args.intrinsics_channel,
                    detection_pose_estimation_interface, "intrinsics")
        # Set the scenario manager config options if scene name is provided
        if (args.scenario_scene is not None):
            scenario_manager = app.nodes[
                "simulation.scenario_manager"].components["ScenarioManager"]
            scenario_manager.config.scene = args.scenario_scene
            if (args.scenario_robot_prefab is not None):
                scenario_manager.config.robot_prefab = args.scenario_robot_prefab
    elif args.mode == 'realsense':
        app.load_module('realsense')
        # Create and configure realsense camera codelet
        camera = app.add("camera").add(app.registry.isaac.RealsenseCamera)
        camera.config.rows = args.rows
        camera.config.cols = args.cols
        camera.config.color_framerate = args.fps
        camera.config.depth_framerate = args.fps
        camera.config.enable_ir_stereo = False

        # Connect the output of the camera node to the detection subgraph
        app.connect(camera, "color", detection_pose_estimation_interface,
                    "color")
        app.connect(camera, "color_intrinsics",
                    detection_pose_estimation_interface, "intrinsics")
    elif args.mode == 'v4l':
        app.load_module('sensors:v4l2_camera')
        # Create and configure V4L camera codelet
        camera = app.add("camera").add(app.registry.isaac.V4L2Camera)
        camera.config.device_id = 0
        camera.config.rows = args.rows
        camera.config.cols = args.cols
        camera.config.rate_hz = args.fps

        # Connect the output of the camera node to the detection subgraph
        app.connect(camera, "frame", detection_pose_estimation_interface,
                    "color")
        app.connect(camera, "intrinsics", detection_pose_estimation_interface,
                    "intrinsics")
    elif args.mode == 'image':
        app.load_module('message_generators')
        # Create feeder node
        feeder = app.add("feeder").add(
            app.registry.isaac.message_generators.ImageLoader)
        feeder.config.color_glob_pattern = args.image_directory
        feeder.config.tick_period = "1Hz"

        # Create intrinsic node
        intrinsic = app.add("intrinsic").add(
            app.registry.isaac.message_generators.CameraIntrinsicsGenerator)
        intrinsic.config.focal_length = [args.focal_length, args.focal_length]
        intrinsic.config.optical_center = [
            args.optical_center_rows, args.optical_center_cols
        ]
        intrinsic.config.distortion_coefficients = [
            0.01, 0.01, 0.01, 0.01, 0.01
        ]

        # Connect the output of the image feeder node to the detection subgraph
        app.connect(feeder, "color", detection_pose_estimation_interface,
                    "color")
        # Connect the output of the intrinsic node to the detection subgraph
        app.connect(feeder, "color", intrinsic, "image")
        app.connect(intrinsic, "intrinsics",
                    detection_pose_estimation_interface, "intrinsics")
    else:
        raise ValueError('Not supported mode {}'.format(args.mode))
    app.run()
Beispiel #9
0
def main(args):
    app = Application(name="detect_net_inference")

    # Load subgraph and get interface node
    app.load("packages/detect_net/apps/detect_net_inference.subgraph.json",
             prefix="detect_net_inference")
    detect_net_inferface = app.nodes["detect_net_inference.subgraph"]\
        .components["interface"]

    # Load configuration
    app.load(args.config)

    # Configure detection model
    detection_model = app.nodes["detect_net_inference.tensor_r_t_inference"]\
        .components["isaac.ml.TensorRTInference"]
    if args.detection_model_file_path is not None:
        detection_model.config.model_file_path = args.detection_model_file_path
    if args.etlt_password is not None:
        detection_model.config.etlt_password = args.etlt_password

    # Configure detection decoder
    decoder = app.nodes["detect_net_inference.detection_decoder"]\
        .components["isaac.detect_net.DetectNetDecoder"]
    decoder.config.output_scale = [args.rows, args.cols]
    if args.confidence_threshold is not None:
        decoder.config.confidence_threshold = args.confidence_threshold
    if args.nms_threshold is not None:
        decoder.config.non_maximum_suppression_threshold = args.nms_threshold

    if args.mode == 'cask':
        # Load replay subgraph and configure interface node
        app.load("packages/cask/apps/replay.subgraph.json", prefix="replay")
        replay_interface = app.nodes["replay.interface"].components["output"]
        replay_interface.config.cask_directory = args.cask_directory

        # Connect the output of the replay subgraph to the detection subgraph
        app.connect(replay_interface, "color", detect_net_inferface, "image")
    elif args.mode == 'sim':
        # Load simulation subgraph and get interface node
        app.load("packages/navsim/apps/navsim_training.subgraph.json",\
             prefix="simulation")
        simulation_interface = app.nodes["simulation.interface"].components[
            "output"]

        # Connect the output of the simulation with user-specified channel to the detection subgraph
        app.connect(simulation_interface, args.image_channel,
                    detect_net_inferface, "image")
    elif args.mode == 'realsense':
        app.load_module('realsense')
        # Create and configure realsense camera codelet
        camera = app.add("camera").add(app.registry.isaac.RealsenseCamera)
        camera.config.rows = args.rows
        camera.config.cols = args.cols
        camera.config.color_framerate = args.fps
        camera.config.depth_framerate = args.fps
        camera.config.enable_ir_stereo = False

        # Connect the output of the camera node to the detection subgraph
        app.connect(camera, "color", detect_net_inferface, "image")
    elif args.mode == 'v4l':
        app.load_module('sensors:v4l2_camera')
        # Create and configure V4L camera codelet
        camera = app.add("camera").add(app.registry.isaac.V4L2Camera)
        camera.config.device_id = 0
        camera.config.rows = args.rows
        camera.config.cols = args.cols
        camera.config.rate_hz = args.fps

        # Connect the output of the camera node to the detection subgraph
        app.connect(camera, "frame", detect_net_inferface, "image")
    elif args.mode == 'image':
        app.load_module('message_generators')
        # Create feeder node
        feeder = app.add("feeder").add(
            app.registry.isaac.message_generators.ImageLoader)
        feeder.config.color_glob_pattern = args.image_directory
        feeder.config.tick_period = "1Hz"

        # Connect the output of the image feeder node to the detection subgraph
        app.connect(feeder, "color", detect_net_inferface, "image")
    else:
        raise ValueError('Not supported mode {}'.format(args.mode))
    app.run()
def main(args):

    # Check that the provided directories are different, otherwise
    if (args.base_directory_images == args.base_directory_gt):
        print("Base directory for image and ground truth logs must not be the same.")
        return

    app = Application(name="record_sim_ground_truth", modules=["viewers"])
    app_uuid = app.uuid

    # Load simulation subgraph and get interface node
    app.load("packages/navsim/apps/navsim_training.subgraph.json",\
            prefix="simulation")
    simulation_interface = app.nodes["simulation.interface"].components["output"]

    # Load record subgraph for images
    app.load("packages/cask/apps/record.subgraph.json", prefix="image_record")
    image_record_interface = app.nodes["image_record.interface"].components["input"]
    image_record_interface.config.base_directory = args.base_directory_images

    # Load record subgraph for ground truth
    app.load("packages/cask/apps/record.subgraph.json", prefix="ground_truth_record")
    ground_truth_record_interface = app.nodes["ground_truth_record.interface"].components["input"]
    ground_truth_record_interface.config.base_directory = args.base_directory_gt

    # Create viewer codelets
    image_viewer = app.add("image_viewer").add(app.registry.isaac.viewers.ImageViewer)
    image_viewer.config.camera_name = "camera"

    # Connect the image and bounding boxes channels to the record interface
    app.connect(simulation_interface, args.image_channel, image_record_interface, "color")
    app.connect(simulation_interface, args.image_channel, image_viewer, "image")
    app.connect(simulation_interface, args.intrinsics_channel, image_record_interface,\
                "color_intrinsics")
    app.connect(simulation_interface, args.intrinsics_channel, image_viewer, "intrinsics")

    # Set the scenario manager config options if scene name is provided
    if (args.scenario_scene is not None):
        print(args.detections3viewer_box_dimensions)
        scenario_manager = app.nodes["simulation.scenario_manager"].components["scenario_manager"]
        scenario_manager.config.scene = args.scenario_scene
        if (args.scenario_robot_prefab is not None):
            scenario_manager.config.robot_prefab = args.scenario_robot_prefab

    # Connect ground truth data channels according to user input
    mode = args.mode
    if (mode == 'bounding_box' or mode == 'all'):
        detections_viewer = app.add("detections_viewer").add(
            app.registry.isaac.viewers.DetectionsViewer)
        bbox_conversion = app.nodes["simulation.bounding_boxes"].components["conversion"]
        app.connect(simulation_interface, args.segmentation_channel + "_class", bbox_conversion,
                    "class_segmentation")
        app.connect(simulation_interface, args.segmentation_channel + "_instance",
                    bbox_conversion, "instance_segmentation")
        app.connect(simulation_interface, args.segmentation_channel + "_labels", bbox_conversion,
                    "class_labels")
        app.connect(simulation_interface, "bounding_boxes", ground_truth_record_interface,
                    "bounding_boxes")
        app.connect(simulation_interface, "bounding_boxes", detections_viewer, "detections")

    if (mode == 'pose' or mode == 'all'):
        app.load_module('ml')
        detections3_viewer = app.add("detections3_viewer").add(
            app.registry.isaac.viewers.Detections3Viewer)
        pose_bodies = app.add("rigid_body_to_detections3").add(
            app.registry.isaac.ml.RigidbodyToDetections3)
        detections3_viewer.config.frame = "camera"
        if (args.detections3viewer_box_dimensions is not None):
            # Enable 3D detections in the viewer
            detections3_viewer.config.box_dimensions = eval(args.detections3viewer_box_dimensions)
            detections3_viewer.config.object_T_box_center = eval(
                args.detections3viewer_object_T_box_center)
        app.connect(simulation_interface, "bodies", pose_bodies, "rigid_bodies")
        app.connect(pose_bodies, "detections", ground_truth_record_interface, "gt_poses")
        app.connect(pose_bodies, "detections", detections3_viewer, "detections")
        app.connect(simulation_interface, args.intrinsics_channel, ground_truth_record_interface,
                    "image_intrinsics")

    # Run application
    app.run(args.runtime)

    # Write metadata to JSON data per output cask. The metadata servers to associate
    # corresponding image and ground truth casks. As per RACI evaluation workflow
    # and data management, image casks and ground truth casks are stored in separate
    # directories.
    if args.raci_metadata:
        # Populate image cask metadata
        image_metadata_json = {}
        image_metadata_json["Robot_Name"] = ""
        image_metadata_json["Location/Scene"] = ""
        image_metadata_json["Domain"] = "simulation"
        image_metadata_json["Recording_Time"] = str(datetime.datetime.now().strftime("%Y-%m-%d"))

        # Write image cask metadata
        image_metadata_path = os.path.join(args.base_directory_images, app_uuid + "_md.json")
        with open(image_metadata_path, 'w') as f:
            json.dump(image_metadata_json, f, indent=2)

        # Populate ground truth cask metadata
        ground_truth_metadata_json = {}
        ground_truth_metadata_json["Image_Cask_File"] = app.uuid
        ground_truth_metadata_json["Data_Source"] = "ground_truth"

        # Write ground truth cask metadata
        ground_truth_metadata_path = os.path.join(args.base_directory_gt, app_uuid + "_md.json")
        with open(ground_truth_metadata_path, 'w') as f:
            json.dump(ground_truth_metadata_json, f, indent=2)
Beispiel #11
0
def main(args):
    app = Application(name="dope_inference")

    # Load dope inference subgraph
    app.load(
        "packages/object_pose_estimation/apps/dope/dope_inference.subgraph.json",
        "detection_pose_estimation")
    detection_pose_estimation_interface = app.nodes[
        "detection_pose_estimation.interface"]["subgraph"]

    if args.mode == 'cask':
        # Load replay subgraph and configure interface node
        app.load("packages/cask/apps/replay.subgraph.json", prefix="replay")
        replay = app.nodes["replay.interface"].components["output"]
        replay.config.cask_directory = args.file
        app.connect(replay, "color", detection_pose_estimation_interface,
                    "color")
        app.connect(replay, "color_intrinsics",
                    detection_pose_estimation_interface, "intrinsics")
    elif args.mode == "realsense":
        app.load_module('realsense')
        # Create and configure realsense camera codelet
        camera = app.add("camera").add(app.registry.isaac.RealsenseCamera)
        camera.config.rows = 360
        camera.config.cols = 640
        camera.config.color_framerate = 15
        camera.config.depth_framerate = 15
        camera.config.enable_ir_stereo = False
        app.connect(camera, "color", detection_pose_estimation_interface,
                    "color")
        app.connect(camera, "color_intrinsics",
                    detection_pose_estimation_interface, "intrinsics")
    elif args.mode == "sim":
        app.load(filename='packages/navsim/apps/navsim_tcp.subgraph.json',
                 prefix='simulation')
        sim = app.nodes["simulation.interface"]["output"]
        app.connect(sim, "color", detection_pose_estimation_interface, "color")
        app.connect(sim, "color_intrinsics",
                    detection_pose_estimation_interface, "intrinsics")
    elif args.mode == "image":
        app.load_module("message_generators")
        image_loader = app.add("image").add(
            app.registry.isaac.message_generators.ImageLoader, "ImageLoader")
        image_loader.config.color_filename = args.file
        image_loader.config.tick_period = "1Hz"
        app.connect(image_loader, "color", detection_pose_estimation_interface,
                    "color")

        # Create intrinsic node
        intrinsic = app.add("intrinsic").add(
            app.registry.isaac.message_generators.CameraIntrinsicsGenerator)
        intrinsic.config.focal_length = [args.focal, args.focal]
        intrinsic.config.optical_center = args.optical_center
        intrinsic.config.distortion_coefficients = [
            0.01, 0.01, 0.01, 0.01, 0.01
        ]
        app.connect(image_loader, "color", intrinsic, "image")
        app.connect(intrinsic, "intrinsics",
                    detection_pose_estimation_interface, "intrinsics")

    # TensorRT inference
    trt = app.nodes["detection_pose_estimation.inference"]["TensorRTInference"]
    trt.config.model_file_path = args.model
    dir, filename = os.path.split(args.model)
    trt.config.engine_file_path = "{0}/{1}.plan".format(
        dir,
        os.path.splitext(filename)[0])

    # Dope decoder
    decoder = app.nodes["detection_pose_estimation.decoder"]["DopeDecoder"]
    decoder.config.box_dimensions = args.box
    decoder.config.label = args.label

    # Detections3Viewer
    detection3_viewer = app.nodes["detection_pose_estimation.viewers"][
        "Detections3Viewer"]
    detection3_viewer.config.box_dimensions = args.box

    if args.camera_pose is not None:
        app.load_module("utils")
        pose_node = app.add("pose")
        detection_injector = pose_node.add(
            app.registry.isaac.utils.DetectionsToPoseTree)
        detection_injector.config.detection_frame = "camera"
        app.connect(decoder, "output_poses", detection_injector, "detections")
        camera_initializer = pose_node.add(
            app.registry.isaac.alice.PoseInitializer)
        camera_initializer.config.lhs_frame = "world"
        camera_initializer.config.rhs_frame = "camera"
        camera_initializer.config.pose = args.camera_pose

    app.run()
Beispiel #12
0
class TestApplication(unittest.TestCase):
    '''
    Test loading subgraph via the application API
    '''
    @classmethod
    def setUpClass(cls):
        # method will be ran once before any test is ran
        pass

    @classmethod
    def tearDownClass(cls):
        # method will be ran once after all tests have run
        pass

    def setUp(self):
        # ran before each test
        return super().setUp()

    def tearDown(self):
        # ran after each test
        return super().tearDown()

    def test_get_component(self):
        self._app = Application()
        self._app.add('node')

        node = self._app.nodes['node']
        self.assertIsNotNone(node)

        # Get None if no component has such name
        component = node['ledger']
        self.assertIsNone(component)

        component = node['MessageLedger']
        self.assertIsNotNone(component)

        self._app.start()
        self._app.stop()

    def test_load_subgraph(self):
        self._app = Application()
        self._app.add('node')

        # loads subgraph and checks if the node/component are created, config is good
        self._app.load('packages/pyalice/tests/application_test.subgraph.json',
                       'foo')

        node = self._app.nodes['foo.camera_viewer']
        component = node['ImageViewer']
        self.assertIsNotNone(component)

        fps_readback = self._app.nodes['foo.camera_viewer'].components[
            'ImageViewer'].config['target_fps']
        self.assertEqual(fps_readback, 11)

        camera_name_readback = self._app.nodes['foo.camera_viewer'].components[
            'ImageViewer'].config['camera_name']
        self.assertEqual(camera_name_readback, 'bar')

        # loads the subgraph again with different prefix name
        self._app.load('packages/pyalice/tests/application_test.subgraph.json',
                       'foo1')
        node = self._app.nodes['foo1.camera_viewer']
        component = node['ImageViewer']
        self.assertIsNotNone(component)

        fps_readback = self._app.nodes['foo1.camera_viewer'].components[
            'ImageViewer'].config['target_fps']
        self.assertEqual(fps_readback, 11)

        camera_name_readback = self._app.nodes[
            'foo1.camera_viewer'].components['ImageViewer'].config[
                'camera_name']

        self._app.start()
        self._app.stop()

    def test_load_bogus_subgraph(self):
        bogus_json_filename = ''
        with tempfile.NamedTemporaryFile('w') as f:
            f.write('this is bogus json')
            bogus_json_filename = f.name

        self._app = Application()
        with self.assertRaises(ValueError):
            self._app.load('/no/such/file')

        with self.assertRaises(ValueError):
            self._app.load(bogus_json_filename)

    def test_load_subgraph_in_asset_path(self):
        self._app = Application()
        self._app.add('node')

        # override the asset path to point to our test location
        self._app._asset_path = 'packages/pyalice/tests/mock_asset_path'

        # loads subgraph and checks if the node/component are created, config is good
        self._app.load('mock_installed_application_test.subgraph.json', 'foo')

        pynode = self._app.nodes['foo.camera_viewer']
        self.assertIsNotNone(pynode)
        component = pynode['ImageViewer']
        self.assertIsNotNone(component)

        fps_readback = self._app.nodes['foo.camera_viewer'].components[
            'ImageViewer'].config['target_fps']
        self.assertEqual(fps_readback, 11)

        camera_name_readback = self._app.nodes['foo.camera_viewer'].components[
            'ImageViewer'].config['camera_name']
        self.assertEqual(camera_name_readback, 'bar')

        self._app.start()
        self._app.stop()

    def test_module_explorer(self):
        self._app = Application()
        self._app.load_module('json')
        self._app.registry.isaac.json.JsonToProto
        with self.assertRaises(RuntimeError):
            self._app.registry.isaac.foo

    def test_node_accesor(self):
        self._app = Application()
        self._app.load_module('json')

        node = self._app.add('foo1')
        self.assertIsNotNone(node)
        component = node.add(self._app.registry.isaac.json.JsonToProto, 'bar1')
        self.assertIsNotNone(component)

        node = self._app.add('foo2')
        self.assertIsNotNone(node)
        component = node.add(self._app.registry.isaac.json.ProtoToJson, 'bar2')
        self.assertIsNotNone(component)

        self.assertIsNotNone(self._app.nodes['foo1'])
        self.assertIsNotNone(self._app.nodes['foo2'])
        self.assertIsNone(self._app.nodes['foo3'])

        self._app.start()
        self._app.stop()

    def test_load_module(self):
        self._app = Application()

        result = self._app.load_module('message_generators')
        self.assertTrue(result)

        component = self._app.registry.isaac.message_generators.ImageLoader
        self.assertIsNotNone(component)
        self.assertEqual(str(component),
                         "['isaac::message_generators::ImageLoader']")

    def test_clock(self):
        self._app = Application()

        self._app.start()
        clock = self._app.clock
        self.assertIsNotNone(clock)
        self.assertIsNotNone(clock.time)
        cur_time = clock.time
        self.assertGreater(cur_time, 0.0)
        self.assertGreater(clock.time, cur_time)
        self._app.stop()

    def test_pose(self):
        self._app = Application()

        self._app.start()

        self.assertTrue(
            self._app.atlas.set_pose(
                'foo', 'bar', 1.0,
                [np.quaternion(1.0, 0.0, 0.0, 0.0),
                 np.array([0.0, 0.0, 0.0])]))
        self.assertTrue(
            self._app.atlas.set_pose(
                'foo', 'bar', 2.0,
                [np.quaternion(1.0, 0.0, 0.0, 0.0),
                 np.array([1.0, 2.0, 3.0])]))

        read_pose = self._app.atlas.pose('foo', 'bar', 1.5)
        self.assertIsNotNone(read_pose)
        self.assertEqual(len(read_pose), 2)

        q = read_pose[0]
        t = read_pose[1]
        self.assertLess(q.w - 1.0, 1e-6)
        self.assertLess(q.x - 0.0, 1e-6)
        self.assertLess(q.y - 0.0, 1e-6)
        self.assertLess(q.z - 0.0, 1e-6)
        self.assertLess(t[0] - 0.5, 1e-6)
        self.assertLess(t[1] - 1.0, 1e-6)
        self.assertLess(t[2] - 1.5, 1e-6)

        self._app.stop()

    def test_node_start_stop(self):
        self._app = Application()

        result = self._app.load_module('message_generators')
        self.assertTrue(result)

        node = self._app.add('src')
        self.assertIsNotNone(node)
        component = node.add(
            self._app.registry.isaac.message_generators.PanTiltStateGenerator,
            'pantilt')
        component.config['tick_period'] = '20 Hz'
        self.assertIsNotNone(component)

        node_sink = self._app.add('sink')
        self.assertIsNotNone(node_sink)

        self._app.connect(component, 'target',
                          node_sink.components['MessageLedger'], 'rcv')

        self._app.start()

        time.sleep(0.1)
        msg = self._app.receive('sink', 'MessageLedger', 'rcv')
        self.assertIsNotNone(msg)

        node.stop()
        msg = self._app.receive('sink', 'MessageLedger', 'rcv')
        time.sleep(0.1)
        msg = self._app.receive('sink', 'MessageLedger', 'rcv')
        self.assertIsNone(msg)
        time.sleep(0.1)
        msg = self._app.receive('sink', 'MessageLedger', 'rcv')
        self.assertIsNone(msg)

        node.start()

        time.sleep(0.1)
        msg = self._app.receive('sink', 'MessageLedger', 'rcv')
        self.assertIsNotNone(msg)

        self._app.stop()

        with self.assertRaises(RuntimeError):
            self._app.stop()

    def test_wait_for_node(self):
        self._app = Application()
        self._app.load_module('behavior_tree')
        self._app.add('node').add(
            self._app.registry.isaac.behavior_tree.TimerBehavior)
        self._app.start()
        status = self._app.wait_for_node('node')
        self._app.stop()
        self.assertEqual(str(status), 'Status.Success',
                         'Should reach Status.Success in 1 second.')

    def test_wait_for_node_timeout(self):
        self._app = Application()
        self._app.load_module('behavior_tree')
        self._app.add('node').add(
            self._app.registry.isaac.behavior_tree.TimerBehavior)
        self._app.start()
        status = self._app.wait_for_node('node', duration=0.1)
        self._app.stop()
        self.assertEqual(str(status), 'Status.Running',
                         'Should still be in Status.Running')

    def test_run_until_succeed(self):
        now_secs = time.time()

        self._app = Application()
        self._app.add('success')

        node_success = self._app.nodes['success']
        self.assertIsNotNone(node_success)

        self._app.nodes['success'].add(SucceedLaterCodelet)

        self._app.run('success')

        delta_secs = time.time() - now_secs
        self.assertGreater(delta_secs, 0.4)

    def test_run_until_failure(self):
        now_secs = time.time()

        self._app = Application()
        self._app.add('failure')

        node_failure = self._app.nodes['failure']
        self.assertIsNotNone(node_failure)

        self._app.nodes['failure'].add(FailureLaterCodelet)

        self._app.run('failure')

        delta_secs = time.time() - now_secs
        self.assertGreater(delta_secs, 0.4)

    def test_perf_report(self):
        PERF_REPORT_PATH = '/tmp/perf_report'
        if os.path.exists(PERF_REPORT_PATH):
            os.remove(PERF_REPORT_PATH)

        self._app = Application(
            argv=['--performance_report_out', PERF_REPORT_PATH])
        self._app.load_module('behavior_tree')
        self._app.add('node').add(
            self._app.registry.isaac.behavior_tree.TimerBehavior)
        self._app.run(0.2)
        self.assertTrue(os.path.exists(PERF_REPORT_PATH))

    def test_max_duration(self):
        time_now = time.time()
        self._app = Application(argv=['--max_duration', '0.5s'])
        self._app.load_module('behavior_tree')
        self._app.add('node').add(
            self._app.registry.isaac.behavior_tree.TimerBehavior)
        self._app.run(2.0)
        time_dt = time.time() - time_now

        self.assertLess(time_dt, 1.0)

        time_now = time.time()
        self._app = Application(argv=['--max_duration', '0.5s'])
        self._app.load_module('behavior_tree')
        self._app.add('node').add(
            self._app.registry.isaac.behavior_tree.TimerBehavior)
        self._app.run(2)
        time_dt = time.time() - time_now

        self.assertLess(time_dt, 1.0)

    def test_print_node(self):
        self._app = Application()
        self._app.add('foo')

        node_names = self._app.nodes._names
        self.assertTrue(isinstance(node_names, list) and 'foo' in node_names)

        self._app.add('bar')
        node_names = self._app.nodes._names
        self.assertTrue(
            isinstance(node_names, list) and 'foo' in node_names
            and 'bar' in node_names)

        foo_node = None
        bar_node = None
        for n in self._app.nodes:
            if n.name == 'foo':
                foo_node = n
            if n.name == 'bar':
                bar_node = n

        self.assertIsNotNone(foo_node)
        self.assertIsNotNone(bar_node)

    def test_expand_asset(self):
        self._app = Application()
        ws, path = self._app._app.expand_asset_path('@workspace_//foo/bar')
        self.assertEqual(path, 'external/workspace_/foo/bar')
        self.assertEqual(ws, 'workspace_')

        ws1 = self._app.home_workspace_name
        self.assertEqual(len(ws1), 0)
        self._app.home_workspace_name = 'workspace_'
        ws2 = self._app.home_workspace_name
        self.assertEqual(ws2, 'workspace_')

        ws3, path3 = self._app._app.expand_asset_path('@workspace_//foo/bar')
        self.assertEqual(ws3, 'workspace_')
        self.assertEqual(path3, 'foo/bar')