Beispiel #1
0
 def initialize_app(self):
     app = Application(name="rm_isaac_bridge")
     # app.load(filename="packages/navsim/apps/navsim_tcp.subgraph.json", prefix="simulation")
     app.load(filename="packages/ros_bridge/apps/ros_to_perception.subgraph.json", prefix="ros_perception")
     
     if hasattr(self, 'arm'):
         app = self.arm.connect_app(app)
     if hasattr(self, 'camera'):
         app = self.camera.connect_app(app)
     if hasattr(self, 'effector'):
         app = self.effector.connect_app(app)
     
     self._app = app
Beispiel #2
0
def main(args):
    # Read CVAT XML file
    cvat_xml_path = args.cvat_xml
    if os.path.exists(cvat_xml_path):
        tree = et.parse(cvat_xml_path)
    else:
        print("Please provide a valid XML file from CVAT.")
        return

    # Get image cask UUID that these labels are associated with
    image_cask_uuid = cvat_xml_path.split('/')[-1].split('.')[0]

    # Start application to record
    app = Application()
    # Add a dummy node to publish the constructed Detections2 messages from
    app.add("node")
    message_ledger = app.nodes["node"].components["MessageLedger"]
    # Load record subgraph and configure
    app.load("packages/cask/apps/record.subgraph.json", prefix="record")
    record_interface = app.nodes["record.interface"].components["input"]
    record_interface.config.base_directory = args.base_directory_gt
    # Connect output of dummy node to recorder
    app.connect(message_ledger, 'in', record_interface, 'bounding_boxes')
    app.start()

    # Loop through each image element in the XML tree
    count = 0
    for image in tree.findall("./image"):
        # "Name" attribute corresponds to the image filepath that was input to the CVAT labeling
        # tool. Convention is: <image_cask_uuid>/<channel>/<acqtime>.png
        image_uuid, channel, png = image.attrib['name'].split('/')

        # Check that the image_uuid corresponds to the one specified by the XML filename
        if (image_uuid != image_cask_uuid): continue

        # Extract the acquisition time
        acqtime = int(png.lstrip('0').split('.')[0])

        # S the detections of interest for this image
        all_ground_truth_bboxes = image.findall("./box")
        sliced_ground_truth_boxes = slice_detections(all_ground_truth_bboxes,
                                                     args.slice_mode)
        num_sliced_ground_truth_boxes = len(sliced_ground_truth_boxes)

        # Build Detections2Proto message
        detections2 = Message.create_message_builder('Detections2Proto')
        detections2.acqtime = acqtime
        detections2.uuid = str(uuid.uuid1())
        predictions = detections2.proto.init('predictions',
                                             num_sliced_ground_truth_boxes)
        bounding_boxes = detections2.proto.init('boundingBoxes',
                                                num_sliced_ground_truth_boxes)

        # Populate the Detections2Proto and PredictionProto messages per sliced boudning box
        for i in range(num_sliced_ground_truth_boxes):
            box = sliced_ground_truth_boxes[i]
            row = {
                a.attrib['name']: a.text
                for a in box.findall("./attribute")
            }
            row.update(box.attrib)

            prediction = predictions[i]
            prediction.label = row['label']
            prediction.confidence = 1.0

            bbox = bounding_boxes[i]
            bbox.min.y = float(row['xtl'])
            bbox.min.x = float(row['ytl'])
            bbox.max.y = float(row['xbr'])
            bbox.max.x = float(row['ybr'])

        # Publish the message to the node being recorded
        app.publish('node', 'MessageLedger', 'in', detections2)
        recv_msg = app.receive('node', 'MessageLedger', 'in')
        count += 1
        time.sleep(0.1)  #sleep to make sure we don't lose any messages

    app.stop()
    print("Wrote " + str(count) + " messages")

    # Write metadata to JSON data per output cask. The metadata servers to associate
    # corresponding image and ground truth casks. As per RACI evaluation workflow
    # and data management, image casks and ground truth casks are stored in separate
    # directories.
    if args.raci_metadata:
        # Populate ground truth cask metadata
        ground_truth_metadata_json = {}
        ground_truth_metadata_json["Image_Cask_File"] = image_cask_uuid
        ground_truth_metadata_json["Data_Source"] = "ground_truth"

        # Write ground truth cask metadata
        ground_truth_metadata_path = os.path.join(args.base_directory_gt,
                                                  app.uuid + "_md.json")
        with open(ground_truth_metadata_path, 'w') as f:
            json.dump(ground_truth_metadata_json, f, indent=2)