class Capsule(BaseCapsule): name = "classifier_mask_openvino" description = "OpenVINO face mask classifier." version = 1 device_mapper = DeviceMapper.map_to_openvino_devices() input_type = NodeDescription( size=NodeDescription.Size.SINGLE, detections=["face"]) output_type = NodeDescription( size=NodeDescription.Size.SINGLE, detections=["face"], attributes={"mask": Backend.LABELS}, extra_data=["mask_confidence"]) backend_loader = lambda capsule_files, device: BackendRpcProcess( Backend, model_xml=capsule_files["face_mask.xml"], weights_bin=capsule_files["face_mask.bin"], device_name=device ) options = { "threshold": FloatOption( description="Scores under this value are deemed to not be not " "wearing a mask.", default=0.3, min_val=None, max_val=None ) }
class Capsule(BaseCapsule): name = "detector_text_openvino" description = "✨ OpenVINO text detector and reader." version = 1 device_mapper = DeviceMapper.map_to_openvino_devices() input_type = NodeDescription(size=NodeDescription.Size.NONE) output_type = NodeDescription(size=NodeDescription.Size.ALL, detections=["text"], extra_data=["detection_confidence", "text"]) backend_loader = lambda capsule_files, device: Backend( detector=OpenVINOModel( model_xml=capsule_files["models/text-spotting-0001-detector.xml"], weights_bin=capsule_files["models/text-spotting-0001-detector.bin" ], device_name=device), recognizer_encoder=OpenVINOModel( model_xml=capsule_files[ "models/text-spotting-0001-recognizer-encoder.xml"], weights_bin=capsule_files[ "models/text-spotting-0001-recognizer-encoder.bin"], device_name=device), recognizer_decoder=OpenVINOModel( model_xml=capsule_files[ "models/text-spotting-0001-recognizer-decoder.xml"], weights_bin=capsule_files[ "models/text-spotting-0001-recognizer-decoder.bin"], device_name=device)) options = common_detector_options
class Capsule(BaseCapsule): name = "classifier_gait_example" version = 1 device_mapper = DeviceMapper.map_to_all_gpus() input_type = NodeDescription(size=NodeDescription.Size.SINGLE, detections=["person"]) output_type = NodeDescription(size=NodeDescription.Size.SINGLE, detections=["person"], attributes={config.category: config.values}, extra_data=[config.extra_data]) options = {} @staticmethod def backend_loader(capsule_files: Dict[str, bytes], device: str) \ -> BaseBackend: # Real capsules do not need to do this check. This is only to provide # a warning for this example because the model is not included in the # repo. model_filename = "classification_gait_model.pb" try: model_file = capsule_files[model_filename] except KeyError as exc: message = f"Model [{model_filename}] not found. Did you make " \ f"sure to run tests? Example models files are not " \ f"stored directly in the repo, but are downloaded " \ f"when tests are run." raise FileNotFoundError(message) from exc return Backend(model_bytes=model_file, metadata_bytes=capsule_files["dataset_metadata.json"], model_name="inception_resnet_v2", device=device)
class Capsule(BaseCapsule): name = "detector_person_example" version = 1 device_mapper = DeviceMapper.map_to_all_gpus() # This capsule takes no input from other capsules input_type = NodeDescription(size=NodeDescription.Size.NONE) # This capsule produces DetectionNodes of people output_type = NodeDescription(size=NodeDescription.Size.ALL, detections=["person"]) options = { "detection_threshold": FloatOption( description="The confidence threshold for the model. A higher " "value means fewer detections", default=0.5, min_val=0.1, max_val=1.0), "scale_frame": BoolOption( description="If true, the frame width and height will be clamped " "to the value of scale_frame_max_side_length, " "preserving aspect ratio", default=False), "scale_frame_max_side_length": IntOption(description="The width or height to scale frames down to " "if scale_frames is True", default=2000, min_val=200, max_val=4000) } @staticmethod def backend_loader(capsule_files: Dict[str, bytes], device: str) \ -> BaseBackend: # Real capsules do not need to do this check. This is only to provide # a warning for this example because the model is not included in the # repo. model_filename = "ssd_mobilenet_v1_coco.pb" try: model_file = capsule_files[model_filename] except KeyError as exc: message = f"Model [{model_filename}] not found. Did you make " \ f"sure to run tests? Example models files are not " \ f"stored directly in the repo, but are downloaded " \ f"when tests are run." raise FileNotFoundError(message) from exc return Backend(model_bytes=model_file, metadata_bytes=capsule_files["dataset_metadata.json"], device=device)
class Capsule(BaseCapsule): name = "detector_face_openvino" description = "✨ OpenVINO fast face detector." version = 1 device_mapper = DeviceMapper.map_to_openvino_devices() input_type = NodeDescription(size=NodeDescription.Size.NONE) output_type = NodeDescription(size=NodeDescription.Size.ALL, detections=["face"]) backend_loader = lambda capsule_files, device: BackendRpcProcess( Backend, model_xml=capsule_files["face-detection-adas-0001.xml"], weights_bin=capsule_files["face-detection-adas-0001.bin"], device_name=device) options = common_detector_options
class Capsule(BaseCapsule): name = "detector_person_overhead_openvino" description = "OpenVINO fast person detector. Works best in " \ "surveillance perspectives from a downwards facing point " \ "of view." version = 1 device_mapper = DeviceMapper.map_to_openvino_devices() input_type = NodeDescription(size=NodeDescription.Size.NONE) output_type = NodeDescription( size=NodeDescription.Size.ALL, detections=["person"]) backend_loader = lambda capsule_files, device: BackendRpcProcess( Backend, model_xml=capsule_files["person-detection-retail-0013.xml"], weights_bin=capsule_files["person-detection-retail-0013.bin"], device_name=device ) options = common_detector_options
class Capsule(BaseCapsule): name = "classifier_face_age_gender_openvino" description = "OpenVINO face age/gender classifier." version = 1 device_mapper = DeviceMapper.map_to_openvino_devices() input_type = NodeDescription(size=NodeDescription.Size.SINGLE, detections=["face"]) output_type = NodeDescription(size=NodeDescription.Size.SINGLE, detections=["face"], attributes={ "gender": config.genders, "age": list(config.age_bins.values()) }, extra_data=["age", "gender_confidence"]) backend_loader = lambda capsule_files, device: BackendRpcProcess( Backend, model_xml=capsule_files["age-gender-recognition-retail-0013.xml"], weights_bin=capsule_files["age-gender-recognition-retail-0013.bin"], device_name=device)
class Capsule(BaseCapsule): name = "classifier_vehicle_color_openvino" description = "OpenVINO vehicle color classifier." version = 1 device_mapper = DeviceMapper.map_to_openvino_devices() input_type = NodeDescription( size=NodeDescription.Size.SINGLE, detections=config.vehicle_types) output_type = NodeDescription( size=NodeDescription.Size.SINGLE, detections=config.vehicle_types, attributes={"color": config.colors}) backend_loader = lambda capsule_files, device: BackendRpcProcess( Backend, model_xml=capsule_files[ "vehicle-attributes-recognition-barrier-0039.xml"], weights_bin=capsule_files[ "vehicle-attributes-recognition-barrier-0039.bin"], device_name=device )
class Capsule(BaseCapsule): name = "detector_person_vehicle_bike_openvino" description = ("OpenVINO person, vehicle, and bike detector. Optimized " "for surveillance camera scenarios.") version = 1 device_mapper = DeviceMapper.map_to_openvino_devices() input_type = NodeDescription(size=NodeDescription.Size.NONE) output_type = NodeDescription(size=NodeDescription.Size.ALL, detections=["vehicle", "person", "bike"]) backend_loader = lambda capsule_files, device: BackendRpcProcess( Backend, model_xml=capsule_files[ "person-vehicle-bike-detection-crossroad-1016-fp32.xml"], weights_bin=capsule_files[ "person-vehicle-bike-detection-crossroad-1016-fp32.bin"], device_name=device) options = { **common_detector_options, "only_person_detections": BoolOption( default=False, description="Filter out anything that's not a person detection") }
class Capsule(BaseCapsule): name = "classifier_face_emotion_openvino" description = "OpenVINO face emotion classifier." version = 1 device_mapper = DeviceMapper.map_to_openvino_devices() input_type = NodeDescription( size=NodeDescription.Size.SINGLE, detections=["face"]) output_type = NodeDescription( size=NodeDescription.Size.SINGLE, detections=["face"], attributes={"emotion": EMOTION_TYPES}, extra_data=["emotion_confidence"] ) backend_loader = lambda capsule_files, device: BackendRpcProcess( Backend, model_xml=capsule_files[ "emotions-recognition-retail-0003.xml"], weights_bin=capsule_files[ "emotions-recognition-retail-0003.bin"], device_name=device )
class Capsule(BaseCapsule): name = "detector_safety_gear_openvino" description = "OpenVino's safety gear detector (safety vest and safety " \ "hat)" version = 1 device_mapper = DeviceMapper.map_to_openvino_devices() input_type = NodeDescription(size=NodeDescription.Size.NONE) output_type = NodeDescription( size=NodeDescription.Size.ALL, detections=["safety vest", "safety hat"] ) backend_loader = lambda capsule_files, device: BackendRpcProcess( Backend, model_xml=capsule_files["worker_safety_mobilenet_FP16.xml"], weights_bin=capsule_files["worker_safety_mobilenet_FP16.bin"], device_name=device ) options = { "threshold": FloatOption( default=0.5, min_val=0.0, max_val=None ) }
class Capsule(BaseCapsule): name = "classifier_safety_gear_openvino" description = "Roughly identify if person is wearing safety hat " \ "and safety vest." version = 1 device_mapper = DeviceMapper.map_to_openvino_devices() input_type = NodeDescription( size=NodeDescription.Size.ALL, detections=["person"]) output_type = NodeDescription( size=NodeDescription.Size.ALL, detections=["person"], attributes={safety_hat: with_safety_hat, safety_vest: with_safety_vest}, extra_data=["safety_hat_iou", "safety_hat_confidence", "safety_vest_iou", "safety_vest_confidence"], ) backend_loader = lambda capsule_files, device: BackendRpcProcess( Backend, model_xml=capsule_files["worker_safety_mobilenet_FP16.xml"], weights_bin=capsule_files["worker_safety_mobilenet_FP16.bin"], device_name=device ) options = capsule_options