Пример #1
0
    def execute_impl(self):
        if not self.verbose:
            set_severity(severity.ERROR)

        app = Application(name="sensor_cert")

        if not self.verbose:
            app.logger.setLevel(logging.ERROR)

        # need to explicitly load websight node
        app.load_module("packages/sight")

        app.load_module("packages/viewers")

        for k, v in self.get_websight_config().items():
            app.nodes["websight"].components["WebsightServer"].config[k] = v

        app.load_module("packages/sensor_certification/evaluators")

        # Load design under test (dut)
        app.load_module(self.module_spec["module_name"])
        app.add("dut", [self.module_spec["codelet"]])

        # Load dut specific config
        component_name = self.module_spec["codelet"].split("::")[-1]
        for key, value in self.sensor_drv_config.items():
            app.nodes["dut"].components[component_name].config[key] = value

        # Load evaluator
        app.add("evaluator", [self.get_evaluator()])
        evaluator_name = self.get_evaluator().split("::")[-1]

        # Connect dut to evaluator
        for src, dest in self.get_links():
            app.connect(app.nodes["dut"].components[component_name], src,
                        app.nodes["evaluator"].components[evaluator_name],
                        dest)

        # Configure evaluator config to match parameters
        eval_config = app.nodes["evaluator"].components[evaluator_name].config
        for key, value in self.get_evaluator_config().items():
            eval_config[key] = value

        # Add any extra setup to the app
        self.extra_app_setup(app)

        # Create report directory
        try:
            os.mkdir(self.report_dir)
        except FileExistsError:
            pass
        eval_config["report_directory"] = self.report_dir

        # Load additional evaluator config from the tolerances
        for tolerance in self.tolerances:
            config_name = self.get_tolerance_map()[tolerance.name]
            eval_config[config_name] = tolerance.tolerance

        try:
            app.start()
            self.do_setup(app)
            app.nodes["evaluator"].components[evaluator_name].config[
                "setup_done"] = True
            status = app.wait_for_node("evaluator")
        finally:
            app.stop()

        self.detailed_report = app.nodes["evaluator"].components[
            evaluator_name].config["report"]

        if status == Status.Success:
            return True
        else:
            return False
Пример #2
0
def main(args):
    # Read CVAT XML file
    cvat_xml_path = args.cvat_xml
    if os.path.exists(cvat_xml_path):
        tree = et.parse(cvat_xml_path)
    else:
        print("Please provide a valid XML file from CVAT.")
        return

    # Get image cask UUID that these labels are associated with
    image_cask_uuid = cvat_xml_path.split('/')[-1].split('.')[0]

    # Start application to record
    app = Application()
    # Add a dummy node to publish the constructed Detections2 messages from
    app.add("node")
    message_ledger = app.nodes["node"].components["MessageLedger"]
    # Load record subgraph and configure
    app.load("packages/cask/apps/record.subgraph.json", prefix="record")
    record_interface = app.nodes["record.interface"].components["input"]
    record_interface.config.base_directory = args.base_directory_gt
    # Connect output of dummy node to recorder
    app.connect(message_ledger, 'in', record_interface, 'bounding_boxes')
    app.start()

    # Loop through each image element in the XML tree
    count = 0
    for image in tree.findall("./image"):
        # "Name" attribute corresponds to the image filepath that was input to the CVAT labeling
        # tool. Convention is: <image_cask_uuid>/<channel>/<acqtime>.png
        image_uuid, channel, png = image.attrib['name'].split('/')

        # Check that the image_uuid corresponds to the one specified by the XML filename
        if (image_uuid != image_cask_uuid): continue

        # Extract the acquisition time
        acqtime = int(png.lstrip('0').split('.')[0])

        # S the detections of interest for this image
        all_ground_truth_bboxes = image.findall("./box")
        sliced_ground_truth_boxes = slice_detections(all_ground_truth_bboxes,
                                                     args.slice_mode)
        num_sliced_ground_truth_boxes = len(sliced_ground_truth_boxes)

        # Build Detections2Proto message
        detections2 = Message.create_message_builder('Detections2Proto')
        detections2.acqtime = acqtime
        detections2.uuid = str(uuid.uuid1())
        predictions = detections2.proto.init('predictions',
                                             num_sliced_ground_truth_boxes)
        bounding_boxes = detections2.proto.init('boundingBoxes',
                                                num_sliced_ground_truth_boxes)

        # Populate the Detections2Proto and PredictionProto messages per sliced boudning box
        for i in range(num_sliced_ground_truth_boxes):
            box = sliced_ground_truth_boxes[i]
            row = {
                a.attrib['name']: a.text
                for a in box.findall("./attribute")
            }
            row.update(box.attrib)

            prediction = predictions[i]
            prediction.label = row['label']
            prediction.confidence = 1.0

            bbox = bounding_boxes[i]
            bbox.min.y = float(row['xtl'])
            bbox.min.x = float(row['ytl'])
            bbox.max.y = float(row['xbr'])
            bbox.max.x = float(row['ybr'])

        # Publish the message to the node being recorded
        app.publish('node', 'MessageLedger', 'in', detections2)
        recv_msg = app.receive('node', 'MessageLedger', 'in')
        count += 1
        time.sleep(0.1)  #sleep to make sure we don't lose any messages

    app.stop()
    print("Wrote " + str(count) + " messages")

    # Write metadata to JSON data per output cask. The metadata servers to associate
    # corresponding image and ground truth casks. As per RACI evaluation workflow
    # and data management, image casks and ground truth casks are stored in separate
    # directories.
    if args.raci_metadata:
        # Populate ground truth cask metadata
        ground_truth_metadata_json = {}
        ground_truth_metadata_json["Image_Cask_File"] = image_cask_uuid
        ground_truth_metadata_json["Data_Source"] = "ground_truth"

        # Write ground truth cask metadata
        ground_truth_metadata_path = os.path.join(args.base_directory_gt,
                                                  app.uuid + "_md.json")
        with open(ground_truth_metadata_path, 'w') as f:
            json.dump(ground_truth_metadata_json, f, indent=2)