コード例 #1
0
 def initialize_app(self):
     app = Application(name="rm_isaac_bridge")
     # app.load(filename="packages/navsim/apps/navsim_tcp.subgraph.json", prefix="simulation")
     app.load(filename="packages/ros_bridge/apps/ros_to_perception.subgraph.json", prefix="ros_perception")
     
     if hasattr(self, 'arm'):
         app = self.arm.connect_app(app)
     if hasattr(self, 'camera'):
         app = self.camera.connect_app(app)
     if hasattr(self, 'effector'):
         app = self.effector.connect_app(app)
     
     self._app = app
コード例 #2
0
    def setUpClass(cls):
        ip = args.robotip
        robot = args.robot

        cls.app = Application(name="tool_io_test")
        if robot == "e-series":
            cls.app.load(
                "packages/universal_robots/ur_robot_driver/apps/ur_eseries_robot.subgraph.json",
                prefix="ur")
        elif robot == "cb3":
            cls.app.load(
                "packages/universal_robots/ur_robot_driver/apps/ur_cb3_robot.subgraph.json",
                prefix="ur")
        else:  # default to eseries
            cls.app.load(
                "packages/universal_robots/ur_robot_driver/apps/ur_eseries_robot.subgraph.json",
                prefix="ur")

        ur_driver = cls.app.nodes["ur.universal_robots"]["UniversalRobots"]
        ur_driver.config.robot_ip = ip
        ur_driver.config.headless_mode = True

        cls.app.start()

        io_names = ["tool_digital_out_0", "tool_digital_out_1"]
        cls.io_parser = [[x, "none", 1] for x in io_names]
コード例 #3
0
    def setUpClass(cls):
        ip = args.robotip
        robot = args.robot

        cls.app = Application(name="trajectory_test")
        if robot == "e-series":
            cls.app.load("packages/universal_robots/ur_robot_driver/apps/ur_eseries_robot.subgraph.json", prefix="ur")
        elif robot == "cb3":
            cls.app.load("packages/universal_robots/ur_robot_driver/apps/ur_cb3_robot.subgraph.json", prefix="ur")
        else: # default to eseries
            cls.app.load("packages/universal_robots/ur_robot_driver/apps/ur_eseries_robot.subgraph.json", prefix="ur")

        cls.ur_controller = cls.app.nodes["ur.controller"]["ScaledMultiJointController"]
        cls.ur_controller.config.control_mode = "joint position"

        cls.ur_driver = cls.app.nodes["ur.universal_robots"]["UniversalRobots"]
        cls.ur_driver.config.control_mode = "joint position"
        cls.ur_driver.config.robot_ip = ip
        cls.ur_driver.config.headless_mode = True

        planner = cls.app.nodes["ur.local_plan"]["MultiJointLqrPlanner"]
        planner.config.speed_max = [2, 2, 2, 2, 2, 2]
        planner.config.speed_min = [-2,-2,-2,-2,-2,-2]
        planner.config.acceleration_max = [2, 2, 2, 2, 2, 2]
        planner.config.acceleration_min = [-2, -2, -2, -2, -2, -2]

        cls.app.start()

        cls.joint_names = ['shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint', 'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint']
        cls.position_parser = [[x, "position", 1] for x in cls.joint_names]

        cls.init_robot(cls)

        cls.joint_values = [[0.0 for i in range(6)]]
        cls.joint_values.append([-1.0 for i in range(6)])
コード例 #4
0
    def setUpClass(cls):
        ip = args.robotip
        robot = args.robot

        cls.app = Application(name="external_control_test")
        if robot == "e-series":
            cls.app.load(
                "packages/universal_robots/ur_robot_driver/apps/ur_eseries_robot.subgraph.json",
                prefix="ur")
        elif robot == "cb3":
            cls.app.load(
                "packages/universal_robots/ur_robot_driver/apps/ur_cb3_robot.subgraph.json",
                prefix="ur")
        else:  # default to eseries
            cls.app.load(
                "packages/universal_robots/ur_robot_driver/apps/ur_eseries_robot.subgraph.json",
                prefix="ur")

        ur_controller = cls.app.nodes["ur.controller"][
            "ScaledMultiJointController"]
        ur_controller.config.control_mode = "joint position"

        ur_driver = cls.app.nodes["ur.universal_robots"]["UniversalRobots"]
        ur_driver.config.control_mode = "joint position"
        ur_driver.config.robot_ip = ip

        cls.app.start()

        cls.joint_names = [
            'shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint',
            'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint'
        ]
        cls.position_parser = [[x, "position", 1] for x in cls.joint_names]

        cls.init_robot(cls)
コード例 #5
0
    def execute_impl(self):
        if not self.verbose:
            set_severity(severity.ERROR)

        app = Application(name="sensor_cert")

        if not self.verbose:
            app.logger.setLevel(logging.ERROR)

        # need to explicitly load websight node
        app.load_module("packages/sight")

        app.load_module("packages/viewers")

        for k, v in self.get_websight_config().items():
            app.nodes["websight"].components["WebsightServer"].config[k] = v

        app.load_module("packages/sensor_certification/evaluators")

        # Load design under test (dut)
        app.load_module(self.module_spec["module_name"])
        app.add("dut", [self.module_spec["codelet"]])

        # Load dut specific config
        component_name = self.module_spec["codelet"].split("::")[-1]
        for key, value in self.sensor_drv_config.items():
            app.nodes["dut"].components[component_name].config[key] = value

        # Load evaluator
        app.add("evaluator", [self.get_evaluator()])
        evaluator_name = self.get_evaluator().split("::")[-1]

        # Connect dut to evaluator
        for src, dest in self.get_links():
            app.connect(app.nodes["dut"].components[component_name], src,
                        app.nodes["evaluator"].components[evaluator_name],
                        dest)

        # Configure evaluator config to match parameters
        eval_config = app.nodes["evaluator"].components[evaluator_name].config
        for key, value in self.get_evaluator_config().items():
            eval_config[key] = value

        # Add any extra setup to the app
        self.extra_app_setup(app)

        # Create report directory
        try:
            os.mkdir(self.report_dir)
        except FileExistsError:
            pass
        eval_config["report_directory"] = self.report_dir

        # Load additional evaluator config from the tolerances
        for tolerance in self.tolerances:
            config_name = self.get_tolerance_map()[tolerance.name]
            eval_config[config_name] = tolerance.tolerance

        try:
            app.start()
            self.do_setup(app)
            app.nodes["evaluator"].components[evaluator_name].config[
                "setup_done"] = True
            status = app.wait_for_node("evaluator")
        finally:
            app.stop()

        self.detailed_report = app.nodes["evaluator"].components[
            evaluator_name].config["report"]

        if status == Status.Success:
            return True
        else:
            return False
コード例 #6
0
def main(args):
    # Read CVAT XML file
    cvat_xml_path = args.cvat_xml
    if os.path.exists(cvat_xml_path):
        tree = et.parse(cvat_xml_path)
    else:
        print("Please provide a valid XML file from CVAT.")
        return

    # Get image cask UUID that these labels are associated with
    image_cask_uuid = cvat_xml_path.split('/')[-1].split('.')[0]

    # Start application to record
    app = Application()
    # Add a dummy node to publish the constructed Detections2 messages from
    app.add("node")
    message_ledger = app.nodes["node"].components["MessageLedger"]
    # Load record subgraph and configure
    app.load("packages/cask/apps/record.subgraph.json", prefix="record")
    record_interface = app.nodes["record.interface"].components["input"]
    record_interface.config.base_directory = args.base_directory_gt
    # Connect output of dummy node to recorder
    app.connect(message_ledger, 'in', record_interface, 'bounding_boxes')
    app.start()

    # Loop through each image element in the XML tree
    count = 0
    for image in tree.findall("./image"):
        # "Name" attribute corresponds to the image filepath that was input to the CVAT labeling
        # tool. Convention is: <image_cask_uuid>/<channel>/<acqtime>.png
        image_uuid, channel, png = image.attrib['name'].split('/')

        # Check that the image_uuid corresponds to the one specified by the XML filename
        if (image_uuid != image_cask_uuid): continue

        # Extract the acquisition time
        acqtime = int(png.lstrip('0').split('.')[0])

        # S the detections of interest for this image
        all_ground_truth_bboxes = image.findall("./box")
        sliced_ground_truth_boxes = slice_detections(all_ground_truth_bboxes,
                                                     args.slice_mode)
        num_sliced_ground_truth_boxes = len(sliced_ground_truth_boxes)

        # Build Detections2Proto message
        detections2 = Message.create_message_builder('Detections2Proto')
        detections2.acqtime = acqtime
        detections2.uuid = str(uuid.uuid1())
        predictions = detections2.proto.init('predictions',
                                             num_sliced_ground_truth_boxes)
        bounding_boxes = detections2.proto.init('boundingBoxes',
                                                num_sliced_ground_truth_boxes)

        # Populate the Detections2Proto and PredictionProto messages per sliced boudning box
        for i in range(num_sliced_ground_truth_boxes):
            box = sliced_ground_truth_boxes[i]
            row = {
                a.attrib['name']: a.text
                for a in box.findall("./attribute")
            }
            row.update(box.attrib)

            prediction = predictions[i]
            prediction.label = row['label']
            prediction.confidence = 1.0

            bbox = bounding_boxes[i]
            bbox.min.y = float(row['xtl'])
            bbox.min.x = float(row['ytl'])
            bbox.max.y = float(row['xbr'])
            bbox.max.x = float(row['ybr'])

        # Publish the message to the node being recorded
        app.publish('node', 'MessageLedger', 'in', detections2)
        recv_msg = app.receive('node', 'MessageLedger', 'in')
        count += 1
        time.sleep(0.1)  #sleep to make sure we don't lose any messages

    app.stop()
    print("Wrote " + str(count) + " messages")

    # Write metadata to JSON data per output cask. The metadata servers to associate
    # corresponding image and ground truth casks. As per RACI evaluation workflow
    # and data management, image casks and ground truth casks are stored in separate
    # directories.
    if args.raci_metadata:
        # Populate ground truth cask metadata
        ground_truth_metadata_json = {}
        ground_truth_metadata_json["Image_Cask_File"] = image_cask_uuid
        ground_truth_metadata_json["Data_Source"] = "ground_truth"

        # Write ground truth cask metadata
        ground_truth_metadata_path = os.path.join(args.base_directory_gt,
                                                  app.uuid + "_md.json")
        with open(ground_truth_metadata_path, 'w') as f:
            json.dump(ground_truth_metadata_json, f, indent=2)