Пример #1
0
class Capsule(BaseCapsule):
    name = "classifier_mask_openvino"
    description = "OpenVINO face mask classifier."
    version = 1
    device_mapper = DeviceMapper.map_to_openvino_devices()
    input_type = NodeDescription(
        size=NodeDescription.Size.SINGLE,
        detections=["face"])
    output_type = NodeDescription(
        size=NodeDescription.Size.SINGLE,
        detections=["face"],
        attributes={"mask": Backend.LABELS},
        extra_data=["mask_confidence"])
    backend_loader = lambda capsule_files, device: BackendRpcProcess(
        Backend,
        model_xml=capsule_files["face_mask.xml"],
        weights_bin=capsule_files["face_mask.bin"],
        device_name=device
    )
    options = {
        "threshold": FloatOption(
            description="Scores under this value are deemed to not be not "
                        "wearing a mask.",
            default=0.3,
            min_val=None,
            max_val=None
        )
    }
Пример #2
0
class Capsule(BaseCapsule):
    name = "detector_person_example"
    version = 1
    device_mapper = DeviceMapper.map_to_all_gpus()
    # This capsule takes no input from other capsules
    input_type = NodeDescription(size=NodeDescription.Size.NONE)
    # This capsule produces DetectionNodes of people
    output_type = NodeDescription(size=NodeDescription.Size.ALL,
                                  detections=["person"])
    options = {
        "detection_threshold":
        FloatOption(
            description="The confidence threshold for the model. A higher "
            "value means fewer detections",
            default=0.5,
            min_val=0.1,
            max_val=1.0),
        "scale_frame":
        BoolOption(
            description="If true, the frame width and height will be clamped "
            "to the value of scale_frame_max_side_length, "
            "preserving aspect ratio",
            default=False),
        "scale_frame_max_side_length":
        IntOption(description="The width or height to scale frames down to "
                  "if scale_frames is True",
                  default=2000,
                  min_val=200,
                  max_val=4000)
    }

    @staticmethod
    def backend_loader(capsule_files: Dict[str, bytes], device: str) \
            -> BaseBackend:

        # Real capsules do not need to do this check. This is only to provide
        # a warning for this example because the model is not included in the
        # repo.
        model_filename = "ssd_mobilenet_v1_coco.pb"
        try:
            model_file = capsule_files[model_filename]
        except KeyError as exc:
            message = f"Model [{model_filename}] not found. Did you make " \
                      f"sure to run tests? Example models files are not " \
                      f"stored directly in the repo, but are downloaded " \
                      f"when tests are run."
            raise FileNotFoundError(message) from exc

        return Backend(model_bytes=model_file,
                       metadata_bytes=capsule_files["dataset_metadata.json"],
                       device=device)
Пример #3
0
class Capsule(BaseCapsule):
    name = "recognizer_face"
    description = "✨ Recognize faces. Works best close-up."
    version = 2
    input_type = NodeDescription(size=NodeDescription.Size.SINGLE,
                                 detections=["face"])
    output_type = NodeDescription(size=NodeDescription.Size.SINGLE,
                                  detections=["face"],
                                  encoded=True)
    backend_loader = lambda capsule_files, device: Backend(
        device=device,
        model_bytes=capsule_files["encoder_face_center_loss.pb"],
        model_name="vggface2_center_loss")
    options = {
        "recognition_threshold":
        FloatOption(default=0.8, min_val=0.0, max_val=None)
    }
Пример #4
0
class Capsule(BaseCapsule):
    # Metadata of this capsule
    name = "face_detector"
    description = "This is an example of how to wrap a TensorFlow Object " \
                  "Detection API model"
    version = 1

    # Define the input type. Since this is an object detector, and doesn't
    # require any input from other capsules, the input type will be a
    # NodeDescription with size=NONE.
    input_type = NodeDescription(size=NodeDescription.Size.NONE)

    # Define the output type. In this case, as we are going to return a list of
    # bounding boxes, the output type will be size=ALL. The type of detection
    # will be "face", and we will place the detection confidence in extra_data.
    output_type = NodeDescription(size=NodeDescription.Size.ALL,
                                  detections=["face"],
                                  extra_data=["detection_confidence"])

    # Define the backend_loader
    backend_loader = lambda capsule_files, device: Backend(
        device=device,
        model_bytes=capsule_files["detector.pb"],
        metadata_bytes=capsule_files["dataset_metadata.json"])

    # The options for this capsule. In this example, we will allow the user to
    # set a threshold for the minimum detection confidence. This can be adjusted
    # using the BrainFrame client or through REST API.
    options = {
        "threshold":
        FloatOption(
            description="Filter out bad detections",
            default=0.5,
            min_val=0.0,
            max_val=1.0,
        )
    }
Пример #5
0
class Capsule(BaseCapsule):
    name = "detector_safety_gear_openvino"
    description = "OpenVino's safety gear detector (safety vest and safety " \
                  "hat)"
    version = 1
    device_mapper = DeviceMapper.map_to_openvino_devices()
    input_type = NodeDescription(size=NodeDescription.Size.NONE)
    output_type = NodeDescription(
        size=NodeDescription.Size.ALL,
        detections=["safety vest", "safety hat"]
    )
    backend_loader = lambda capsule_files, device: BackendRpcProcess(
        Backend,
        model_xml=capsule_files["worker_safety_mobilenet_FP16.xml"],
        weights_bin=capsule_files["worker_safety_mobilenet_FP16.bin"],
        device_name=device
    )
    options = {
        "threshold": FloatOption(
            default=0.5,
            min_val=0.0,
            max_val=None
        )
    }
Пример #6
0
ignore = [
    "answer phone", "swim", "brush teeth", "martial art",
    "carry/hold (an object)", "catch (an object)", "clink glass",
    "close (e.g., a door, a box)", "cook", "cut", "dig", "dance", "chop",
    "climb (e.g., a mountain)", "dress/put on clothing", "drink",
    "drive (e.g., a car, a truck)", "eat", "enter", "exit", "extract",
    "fishing", "hit (an object)", "kick (an object)", "lift/pick up",
    "listen (e.g., to music)", "open (e.g., a window, a car door)", "paint",
    "play board game", "play musical instrument", "play with pets",
    "point to (an object)", "press", "pull (an object)", "push (an object)",
    "put down", "read", "ride (e.g., a bike, a car, a horse)", "row boat",
    "sail boat", "shoot", "shovel", "smoke", "stir", "take a photo",
    "text on/look at a cellphone", "throw", "touch (an object)",
    "turn (e.g., a screwdriver)", "watch (e.g., TV)", "work on a computer",
    "write", "fight/hit (a person)", "give/serve (an object) to (a person)",
    "grab (a person)", "hand clap", "hand shake", "hand wave",
    "hug (a person)", "kick (a person)", "kiss (a person)", "lift (a person)",
    "listen to (a person)", "play with kids", "push (another person)",
    "sing to (e.g., self, a person, a group)",
    "take (an object) from (a person)",
    "talk to (e.g., self, a person, a group)", "watch (a person)"
]

confidence_threshold = "confidence_threshold"
iou_threshold = "iou_threshold"
capsule_options = {
    confidence_threshold: FloatOption(default=0.5, min_val=0.0, max_val=1.0),
    iou_threshold: FloatOption(default=0.5, min_val=0.0, max_val=1.0)
}
Пример #7
0
from vcap import FloatOption

safety_hat = "safety_hat"
safety_vest = "safety_vest"

gear_types = [safety_hat, safety_vest]

with_safety_hat = ["without_safety_hat", "with_safety_hat"]
with_safety_vest = ["without_safety_vest", "with_safety_vest"]

confidence_threshold = "confidence_threshold"
safety_hat_iou_threshold = "safety_hat_iou_threshold"
safety_vest_iou_threshold = "safety_vest_iou_threshold"

capsule_options = {
    confidence_threshold: FloatOption(default=0.5, min_val=0.0, max_val=1.0),
    safety_hat_iou_threshold: FloatOption(default=0.1,
                                          min_val=0.0,
                                          max_val=1.0),
    safety_vest_iou_threshold: FloatOption(default=0.5,
                                           min_val=0.0,
                                           max_val=1.0)
}
Пример #8
0
    'gender': ['masculine', 'feminine', 'unknown'],
    'bag': ['has_bag', 'no_bag', 'unknown'],
    'backpack': ['has_backpack', 'no_backpack', 'unknown'],
    'hat': ['has_hat', 'no_hat', 'unknown'],
    'sleeves': ['has_long_sleeves', 'has_short_sleeves', 'unknown'],
    'pants': ['has_long_pants', 'has_short_pants', 'unknown'],
    'hair': ['has_long_hair', 'has_short_hair', 'unknown'],
    'coat_jacket': ['has_coat_jacket', 'no_coat_jacket', 'unknown']
}

# Generate the options based on the attributes
options = {
    f"{attribute}_confidence":
    FloatOption(default=0.3,
                min_val=0.0,
                max_val=1.0,
                description=f"If a person is detected as possibly having the "
                f"attribute {attribute}, this threshold determines how "
                f"confident it must be to return a detection.")
    for category, attributes in ATTRIBUTES.items() for attribute in attributes
    if attribute != "unknown"
}


class Backend(BaseOpenVINOBackend):
    def process_frame(self, frame: np.ndarray,
                      detection_node: DETECTION_NODE_TYPE,
                      options: Dict[str, OPTION_TYPE],
                      state: BaseStreamState) -> DETECTION_NODE_TYPE:
        crop = Resize(frame).crop_bbox(detection_node.bbox).frame
        input_dict, _ = self.prepare_inputs(crop)
        prediction = self.send_to_batch(input_dict).get()
Пример #9
0
min_iou_for_iou_match = "min_iou_for_iou_match"
high_detection_threshold = "high_detection_threshold"
min_track_length = "min_track_length"
max_misses = "max_misses"
tracks_classes = [
    "car",
    "motorcycle",
    "bus",
    "train",
    "truck",
    "boat",
    "vehicle",
    "license_plate"
]

capsule_options = {
    min_iou_for_iou_match: FloatOption(
        min_val=0.0,
        max_val=1.0,
        default=0.5),
    min_track_length: IntOption(
        min_val=0,
        max_val=None,
        default=2),
    max_misses: IntOption(
        default=10,
        min_val=0,
        max_val=None)
}