_LOGGER = logging.getLogger(__name__) ATTR_BOUNDING_BOX = "bounding_box" ATTR_CLASSIFIER = "classifier" ATTR_IMAGE_ID = "image_id" ATTR_MATCHED = "matched" FACEBOX_NAME = "name" CLASSIFIER = "facebox" DATA_FACEBOX = "facebox_classifiers" FILE_PATH = "file_path" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_IP_ADDRESS): cv.string, vol.Required(CONF_PORT): cv.port, vol.Optional(CONF_USERNAME): cv.string, vol.Optional(CONF_PASSWORD): cv.string, }) SERVICE_TEACH_SCHEMA = vol.Schema({ vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, vol.Required(ATTR_NAME): cv.string, vol.Required(FILE_PATH): cv.string, }) def check_box_health(url, username, password): """Check the health of the classifier and return its id if healthy.""" kwargs = {}
CATEGORY_SCHEMA = vol.Schema({ vol.Required(CONF_CATEGORY): cv.string, vol.Optional(CONF_AREA): AREA_SCHEMA }) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_FILE_OUT, default=[]): vol.All(cv.ensure_list, [cv.template]), vol.Required(CONF_MODEL): vol.Schema({ vol.Required(CONF_GRAPH): cv.isdir, vol.Optional(CONF_AREA): AREA_SCHEMA, vol.Optional(CONF_CATEGORIES, default=[]): vol.All(cv.ensure_list, [vol.Any(cv.string, CATEGORY_SCHEMA)]), vol.Optional(CONF_LABELS): cv.isfile, vol.Optional(CONF_LABEL_OFFSET, default=1): int, vol.Optional(CONF_MODEL_DIR): cv.isdir, }), }) def get_model_detection_function(model): """Get a tf.function for detection.""" @tf.function def detect_fn(image):
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_IP_ADDRESS): cv.string, vol.Required(CONF_PORT): cv.port, vol.Optional(CONF_API_KEY, default=DEFAULT_API_KEY): cv.string, vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int, vol.Optional(CONF_CUSTOM_MODEL, default=""): cv.string, vol.Optional(CONF_TARGETS, default=DEFAULT_TARGETS): vol.All(cv.ensure_list, [vol.Schema(TARGETS_SCHEMA)]), vol.Optional(CONF_ROI_Y_MIN, default=DEFAULT_ROI_Y_MIN): cv.small_float, vol.Optional(CONF_ROI_X_MIN, default=DEFAULT_ROI_X_MIN): cv.small_float, vol.Optional(CONF_ROI_Y_MAX, default=DEFAULT_ROI_Y_MAX): cv.small_float, vol.Optional(CONF_ROI_X_MAX, default=DEFAULT_ROI_X_MAX): cv.small_float, vol.Optional(CONF_SCALE, default=DEAULT_SCALE): vol.All(vol.Coerce(float, vol.Range(min=0.1, max=1))), vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir, vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False): cv.boolean, vol.Optional(CONF_ALWAYS_SAVE_LATEST_JPG, default=False): cv.boolean, vol.Optional(CONF_SHOW_BOXES, default=True): cv.boolean, })
import homeassistant.helpers.config_validation as cv from homeassistant.components.image_processing import (PLATFORM_SCHEMA, ImageProcessingEntity, CONF_SOURCE, CONF_ENTITY_ID, CONF_NAME) _LOGGER = logging.getLogger(__name__) CONF_ENDPOINT = 'endpoint' CONF_TAGS = 'tags' ROUNDING_DECIMALS = 2 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_ENDPOINT): cv.string, vol.Optional(CONF_TAGS, default=[]): vol.All(cv.ensure_list, [cv.string]), }) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the classifier.""" entities = [] for camera in config[CONF_SOURCE]: entities.append( Tagbox( camera.get(CONF_NAME), config[CONF_ENDPOINT], camera[CONF_ENTITY_ID], config[CONF_TAGS], ))
] CONF_ATTRIBUTES = 'attributes' DEFAULT_ATTRIBUTES = [ATTR_AGE, ATTR_GENDER] def validate_attributes(list_attributes): """Validate face attributes.""" for attr in list_attributes: if attr not in SUPPORTED_ATTRIBUTES: raise vol.Invalid("Invalid attribtue {0}".format(attr)) return list_attributes PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_ATTRIBUTES, default=DEFAULT_ATTRIBUTES): vol.All(cv.ensure_list, validate_attributes), }) @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Set up the Microsoft Face detection platform.""" api = hass.data[DATA_MICROSOFT_FACE] attributes = config[CONF_ATTRIBUTES] entities = [] for camera in config[CONF_SOURCE]: entities.append(MicrosoftFaceDetectEntity( camera[CONF_ENTITY_ID], api, attributes, camera.get(CONF_NAME) ))
'gb', 'kr', 'mx', 'sg', ] CONF_REGION = 'region' CONF_ALPR_BIN = 'alp_bin' DEFAULT_BINARY = 'alpr' DEFAULT_CONFIDENCE = 80 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_REGION): vol.All(vol.Lower, vol.In(OPENALPR_REGIONS)), vol.Optional(CONF_ALPR_BIN, default=DEFAULT_BINARY): cv.string, vol.Optional(CONF_CONFIDENCE, default=DEFAULT_CONFIDENCE): vol.All(vol.Coerce(float), vol.Range(min=0, max=100)) }) @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Set up the openalpr local platform.""" command = [config[CONF_ALPR_BIN], '-c', config[CONF_REGION], '-'] confidence = config[CONF_CONFIDENCE] entities = [] for camera in config[CONF_SOURCE]: entities.append(OpenAlprLocalEntity( camera[CONF_ENTITY_ID], command, confidence, camera.get(CONF_NAME)
CONF_SOURCE, PLATFORM_SCHEMA, ImageProcessingFaceEntity, ) from homeassistant.core import split_entity_id import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) ATTR_NAME = "name" CONF_FACES = "faces" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_FACES): { cv.string: cv.isfile }, vol.Optional(CONF_CONFIDENCE, default=0.6): vol.Coerce(float), }) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Dlib Face detection platform.""" entities = [] for camera in config[CONF_SOURCE]: entities.append( DlibFaceIdentifyEntity( camera[CONF_ENTITY_ID], config[CONF_FACES], camera.get(CONF_NAME), config[CONF_CONFIDENCE],
CLASSIFIER_GROUP_CONFIG, CONF_CLASSIFIER, CONF_ENTITY_ID, CONF_NAME, process_image, ) DEPENDENCIES = ['opencv'] _LOGGER = logging.getLogger(__name__) DEFAULT_TIMEOUT = 10 SCAN_INTERVAL = timedelta(seconds=2) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(CLASSIFIER_GROUP_CONFIG) def _create_processor_from_config(hass, camera_entity, config): """Create an OpenCV processor from configurtaion.""" classifier_config = config[CONF_CLASSIFIER] name = '{} {}'.format( config[CONF_NAME], split_entity_id(camera_entity)[1].replace('_', ' ')) processor = OpenCVImageProcessor( hass, camera_entity, name, classifier_config, )
ImageProcessingEntity, CONF_SOURCE, CONF_ENTITY_ID, CONF_NAME) _LOGGER = logging.getLogger(__name__) CONF_URL = 'url' CONF_CONCEPTS = 'concepts' DEFAULT_CONCEPTS = 'NO_CONCEPT' SCAN_INTERVAL = timedelta(seconds=5) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_URL): cv.string, vol.Optional(CONF_CONCEPTS, default=[DEFAULT_CONCEPTS]): vol.All(cv.ensure_list, [cv.string]), }) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the classifier.""" entities = [] for camera in config[CONF_SOURCE]: entities.append( Classifier( hass, camera.get(CONF_NAME), config[CONF_URL], config[CONF_CONCEPTS], camera[CONF_ENTITY_ID],
CONF_ROTATE = "rotate" CONF_SSOCR_BIN = "ssocr_bin" CONF_THRESHOLD = "threshold" CONF_WIDTH = "width" CONF_X_POS = "x_position" CONF_Y_POS = "y_position" DEFAULT_BINARY = "ssocr" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_EXTRA_ARGUMENTS, default=""): cv.string, vol.Optional(CONF_DIGITS): cv.positive_int, vol.Optional(CONF_HEIGHT, default=0): cv.positive_int, vol.Optional(CONF_SSOCR_BIN, default=DEFAULT_BINARY): cv.string, vol.Optional(CONF_THRESHOLD, default=0): cv.positive_int, vol.Optional(CONF_ROTATE, default=0): cv.positive_int, vol.Optional(CONF_WIDTH, default=0): cv.positive_int, vol.Optional(CONF_X_POS, default=0): cv.string, vol.Optional(CONF_Y_POS, default=0): cv.positive_int, } ) async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the Seven segments OCR platform."""
DEFAULT_API_KEY = "" DEFAULT_TIMEOUT = 10 CONF_DETECT_ONLY = "detect_only" CLASSIFIER = "deepstack_face" DATA_DEEPSTACK = "deepstack_classifiers" FILE_PATH = "file_path" SERVICE_TEACH_FACE = "deepstack_teach_face" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_IP_ADDRESS): cv.string, vol.Required(CONF_PORT): cv.port, vol.Optional(CONF_API_KEY, default=DEFAULT_API_KEY): cv.string, vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int, vol.Optional(CONF_DETECT_ONLY, default=False): cv.boolean, }) SERVICE_TEACH_SCHEMA = vol.Schema({ vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, vol.Required(ATTR_NAME): cv.string, vol.Required(FILE_PATH): cv.string, }) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the classifier."""
DEFAULT_NEIGHBORS = 4 DEFAULT_SCALE = 1.1 DEFAULT_TIMEOUT = 10 SCAN_INTERVAL = timedelta(seconds=2) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_CLASSIFIER): { cv.string: vol.Any( cv.isfile, vol.Schema({ vol.Required(CONF_FILE): cv.isfile, vol.Optional(CONF_SCALE, DEFAULT_SCALE): float, vol.Optional(CONF_NEIGHBORS, DEFAULT_NEIGHBORS): cv.positive_int, vol.Optional(CONF_MIN_SIZE, DEFAULT_MIN_SIZE): vol.Schema( vol.All(vol.Coerce(tuple), vol.ExactSequence([int, int]))), }), ) } }) def _create_processor_from_config(hass, camera_entity, config): """Create an OpenCV processor from configuration.""" classifier_config = config.get(CONF_CLASSIFIER) name = f"{config[CONF_NAME]} {split_entity_id(camera_entity)[1].replace('_', ' ')}"
from homeassistant.core import split_entity_id import homeassistant.helpers.config_validation as cv from homeassistant.components.image_processing import ( PLATFORM_SCHEMA, CONF_SOURCE, CONF_ENTITY_ID, CONF_NAME, CONF_CONFIDENCE, DEFAULT_CONFIDENCE) from homeassistant.components.image_processing.microsoft_face_identify import ( ImageProcessingFaceEntity) _LOGGER = logging.getLogger(__name__) CONF_ENDPOINT = 'endpoint' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_ENDPOINT): cv.string, vol.Optional(CONF_CONFIDENCE, default=DEFAULT_CONFIDENCE): vol.All(vol.Coerce(float), vol.Range(min=0, max=100)) }) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the classifier.""" entities = [] for camera in config[CONF_SOURCE]: entities.append( Facebox(camera.get(CONF_NAME), config[CONF_ENDPOINT], camera[CONF_ENTITY_ID], config[CONF_CONFIDENCE])) add_devices(entities) class Facebox(ImageProcessingFaceEntity):
ATTR_PLATES = 'plates' ATTR_VEHICLES = 'vehicles' OPENALPR_REGIONS = [ 'au', 'auwide', 'br', 'eu', 'fr', 'gb', 'kr', 'kr2', 'mx', 'sg', 'us', 'vn2' ] CONF_REGION = 'region' CONF_ALPR_BIN = 'alp_bin' DEFAULT_BINARY = 'alpr' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_REGION): vol.All(vol.Lower, vol.In(OPENALPR_REGIONS)), vol.Optional(CONF_ALPR_BIN, default=DEFAULT_BINARY): cv.string, }) @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Set up the openalpr local platform.""" command = [config[CONF_ALPR_BIN], '-c', config[CONF_REGION], '-'] confidence = config[CONF_CONFIDENCE] entities = [] for camera in config[CONF_SOURCE]: entities.append( OpenAlprLocalEntity(camera[CONF_ENTITY_ID], command, confidence, camera.get(CONF_NAME)))
CONF_SAVE_FILE_FOLDER = "save_file_folder" CONF_TARGET = "target" DEFAULT_TARGET = "Person" CONF_SAVE_TIMESTAMPTED_FILE = "save_timestamped_file" DATETIME_FORMAT = "%Y-%m-%d_%H:%M:%S" REQUIREMENTS = ["boto3"] PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_REGION, default=DEFAULT_REGION): vol.In(SUPPORTED_REGIONS), vol.Required(CONF_ACCESS_KEY_ID): cv.string, vol.Required(CONF_SECRET_ACCESS_KEY): cv.string, vol.Optional(CONF_TARGET, default=DEFAULT_TARGET): cv.string, vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir, vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False): cv.boolean, }) def get_label_instances(response, target): """Get the number of instances of a target label.""" for label in response["Labels"]: if (label["Name"].lower() == target.lower() ): # Lowercase both to prevent any comparing issues return len(label["Instances"]) return 0
'eu', 'fr', 'gb', 'kr', 'kr2', 'mx', 'sg', 'us', 'vn2' ] CONF_REGION = 'region' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_REGION): vol.All(vol.Lower, vol.In(OPENALPR_REGIONS)), }) @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Set up the openalpr cloud api platform.""" confidence = config[CONF_CONFIDENCE] params = { 'secret_key': config[CONF_API_KEY], 'tasks': "plate", 'return_image': 0, 'country': config[CONF_REGION], }
from homeassistant.core import split_entity_id import homeassistant.helpers.config_validation as cv from homeassistant.components.image_processing import (PLATFORM_SCHEMA, CONF_SOURCE, CONF_ENTITY_ID, CONF_NAME, ImageProcessingEntity) _LOGGER = logging.getLogger(__name__) CONF_ENDPOINT = 'endpoint' ROUNDING_DECIMALS = 2 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_ENDPOINT): cv.string, }) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the classifier.""" entities = [] for camera in config[CONF_SOURCE]: entities.append( Facebox( camera.get(CONF_NAME), config[CONF_ENDPOINT], camera[CONF_ENTITY_ID], )) add_devices(entities)
vol.Optional(CONF_LEFT, default=0): cv.small_float, vol.Optional(CONF_RIGHT, default=1): cv.small_float, vol.Optional(CONF_TOP, default=0): cv.small_float, }) CATEGORY_SCHEMA = vol.Schema({ vol.Required(CONF_CATEGORY): cv.string, vol.Optional(CONF_AREA): AREA_SCHEMA, }) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_FILE_OUT, default=[]): vol.All(cv.ensure_list, [cv.template]), vol.Required(CONF_MODEL): vol.Schema({ vol.Required(CONF_GRAPH): cv.isfile, vol.Optional(CONF_AREA): AREA_SCHEMA, vol.Optional(CONF_CATEGORIES, default=[]): vol.All(cv.ensure_list, [vol.Any(cv.string, CATEGORY_SCHEMA)]), vol.Optional(CONF_LABELS): cv.isfile, vol.Optional(CONF_MODEL_DIR): cv.isdir, }) }) def draw_box(draw, box, img_width, img_height, text='', color=(255, 255, 0)): """Draw bounding box on image.""" ymin, xmin, ymax, xmax = box (left, right, top, bottom) = (xmin * img_width, xmax * img_width, ymin * img_height, ymax * img_height) draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=5, fill=color)
ATTR_BOUNDING_BOX = 'bounding_box' ATTR_GENDER_CONFIDENCE = 'gender_confidence' ATTR_PERSONS = 'persons' ATTR_TOTAL_PERSONS = 'total_persons' DEV = 'dev' PROD = 'prod' STATE_DISPLAY = 'state_display' ACCOUNT_TYPE_SCHEMA = vol.In([DEV, PROD]) STATE_DISPLAY_SCHEMA = vol.In([ATTR_FACES, ATTR_PERSONS]) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_API_KEY): cv.string, vol.Optional(CONF_MODE, default=DEV): ACCOUNT_TYPE_SCHEMA, vol.Optional(STATE_DISPLAY, default=ATTR_FACES): STATE_DISPLAY_SCHEMA, }) def encode_image(image): """base64 encode an image stream.""" base64_img = base64.b64encode(image).decode('ascii') return base64_img def parse_api_response(response): """Parse the response from the API. """ faces = [] persons = []
'fr', 'gb', 'kr', 'kr2', 'mx', 'sg', 'us', 'vn2' ] CONF_ALPR_BIN = 'alp_bin' DEFAULT_BINARY = 'alpr' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_REGION): vol.All(vol.Lower, vol.In(OPENALPR_REGIONS)), vol.Optional(CONF_ALPR_BIN, default=DEFAULT_BINARY): cv.string, }) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the OpenALPR local platform.""" command = [config[CONF_ALPR_BIN], '-c', config[CONF_REGION], '-'] confidence = config[CONF_CONFIDENCE] entities = [] for camera in config[CONF_SOURCE]: entities.append(OpenAlprLocalEntity( camera[CONF_ENTITY_ID], command, confidence, camera.get(CONF_NAME) ))
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_REGION, default=DEFAULT_REGION): vol.In(SUPPORTED_REGIONS), vol.Required(CONF_ACCESS_KEY_ID): cv.string, vol.Required(CONF_SECRET_ACCESS_KEY): cv.string, vol.Optional(CONF_ROI_Y_MIN, default=DEFAULT_ROI_Y_MIN): cv.small_float, vol.Optional(CONF_ROI_X_MIN, default=DEFAULT_ROI_X_MIN): cv.small_float, vol.Optional(CONF_ROI_Y_MAX, default=DEFAULT_ROI_Y_MAX): cv.small_float, vol.Optional(CONF_ROI_X_MAX, default=DEFAULT_ROI_X_MAX): cv.small_float, vol.Optional(CONF_NUMBERS_ONLY, default=False): cv.boolean, vol.Optional(CONF_MAKE_BW, default=False): cv.boolean, vol.Optional(CONF_ERODE, default=None): vol.In([None, "low", "medium", "high"]), vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir, vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False): cv.boolean, vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=""): cv.string, vol.Optional(CONF_BOTO_RETRIES, default=DEFAULT_BOTO_RETRIES): vol.All(vol.Coerce(int), vol.Range(min=0)), })
ATTR_BOUNDING_BOX = 'bounding_box' ATTR_CLASSIFIER = 'classifier' ATTR_IMAGE_ID = 'image_id' ATTR_ID = 'id' ATTR_MATCHED = 'matched' FACEBOX_NAME = 'name' CLASSIFIER = 'facebox' DATA_FACEBOX = 'facebox_classifiers' FILE_PATH = 'file_path' SERVICE_TEACH_FACE = 'facebox_teach_face' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_IP_ADDRESS): cv.string, vol.Required(CONF_PORT): cv.port, vol.Optional(CONF_USERNAME): cv.string, vol.Optional(CONF_PASSWORD): cv.string, }) SERVICE_TEACH_SCHEMA = vol.Schema({ vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, vol.Required(ATTR_NAME): cv.string, vol.Required(FILE_PATH): cv.string, }) def check_box_health(url, username, password): """Check the health of the classifier and return its id if healthy.""" kwargs = {} if username: kwargs['auth'] = requests.auth.HTTPBasicAuth(username, password)
cv.string, vol.Optional(CONF_AREA): AREA_SCHEMA, vol.Optional(CONF_CONFIDENCE): vol.Range(min=0, max=100), }) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_URL): cv.string, vol.Required(CONF_DETECTOR): cv.string, vol.Required(CONF_TIMEOUT, default=90): cv.positive_int, vol.Optional(CONF_AUTH_KEY, default=""): cv.string, vol.Optional(CONF_FILE_OUT, default=[]): vol.All(cv.ensure_list, [cv.template]), vol.Optional(CONF_CONFIDENCE, default=0.0): vol.Range(min=0, max=100), vol.Optional(CONF_LABELS, default=[]): vol.All(cv.ensure_list, [vol.Any(cv.string, LABEL_SCHEMA)]), vol.Optional(CONF_AREA): AREA_SCHEMA, }) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Doods client.""" url = config[CONF_URL] auth_key = config[CONF_AUTH_KEY] detector_name = config[CONF_DETECTOR]
CLASSIFIER = "deepstack_face" DATA_DEEPSTACK = "deepstack_classifiers" FILE_PATH = "file_path" SERVICE_TEACH_FACE = "teach_face" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_IP_ADDRESS): cv.string, vol.Required(CONF_PORT): cv.port, vol.Optional(CONF_API_KEY, default=DEFAULT_API_KEY): cv.string, vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int, vol.Optional(CONF_DETECT_ONLY, default=False): cv.boolean, vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir, vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False): cv.boolean, vol.Optional(CONF_SAVE_FACES_FOLDER): cv.isdir, vol.Optional(CONF_SAVE_FACES, default=False): cv.boolean, vol.Optional(CONF_SHOW_BOXES, default=True): cv.boolean, }) SERVICE_TEACH_SCHEMA = vol.Schema({ vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, vol.Required(ATTR_NAME): cv.string, vol.Required(FILE_PATH): cv.string,
from homeassistant.core import split_entity_id from homeassistant.components.image_processing import ( ImageProcessingFaceEntity, PLATFORM_SCHEMA, CONF_SOURCE, CONF_ENTITY_ID, CONF_NAME) import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['face_recognition==1.2.3'] _LOGGER = logging.getLogger(__name__) ATTR_NAME = 'name' CONF_FACES = 'faces' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_FACES): { cv.string: cv.isfile }, }) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Dlib Face detection platform.""" entities = [] for camera in config[CONF_SOURCE]: entities.append( DlibFaceIdentifyEntity(camera[CONF_ENTITY_ID], config[CONF_FACES], camera.get(CONF_NAME))) add_entities(entities)
DEFAULT_MIN_SIZE = (30, 30) DEFAULT_NEIGHBORS = 4 DEFAULT_SCALE = 1.1 DEFAULT_TIMEOUT = 10 SCAN_INTERVAL = timedelta(seconds=2) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_CLASSIFIER): { cv.string: vol.Any( cv.isfile, vol.Schema({ vol.Required(CONF_FILE): cv.isfile, vol.Optional(CONF_SCALE, DEFAULT_SCALE): float, vol.Optional(CONF_NEIGHBORS, DEFAULT_NEIGHBORS): cv.positive_int, vol.Optional(CONF_MIN_SIZE, DEFAULT_MIN_SIZE): vol.Schema((int, int)), }), ) } }) def _create_processor_from_config(hass, camera_entity, config): """Create an OpenCV processor from configuration.""" classifier_config = config.get(CONF_CLASSIFIER) name = "{} {}".format(config[CONF_NAME], split_entity_id(camera_entity)[1].replace("_", " "))
REQUIREMENTS = ['face_recognition==0.2.0'] _LOGGER = logging.getLogger(__name__) EVENT_DETECT_FACE = 'image_processing.detect_face' CONF_WITH_FACES = 'keep_faces' CONF_WITHOUT_FACES = 'keep_no_faces' DEFAULT_FACES_DIR = 'dlib_faces' DEFAULT_WITHOUT_FACES_DIR = 'dlib_nofaces' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_WITH_FACES, default=False): cv.boolean, vol.Optional(CONF_WITHOUT_FACES, default=False): cv.boolean, }) def keep_image(image, filename): """Save image for troubleshooting.""" directory = os.path.dirname(filename) if not os.path.isdir(directory): os.mkdir(directory) with open(filename, 'wb') as fdb: fdb.write(image.getvalue())
import homeassistant.helpers.config_validation as cv from homeassistant.components.image_processing import ( PLATFORM_SCHEMA, ImageProcessingFaceEntity, ATTR_CONFIDENCE, CONF_SOURCE, CONF_ENTITY_ID, CONF_NAME, DOMAIN) from homeassistant.const import (CONF_IP_ADDRESS, CONF_PORT, HTTP_BAD_REQUEST, HTTP_OK, HTTP_UNAUTHORIZED) _LOGGER = logging.getLogger(__name__) CLASSIFIER = 'deepstack_face' DATA_DEEPSTACK = 'deepstack_classifiers' FILE_PATH = 'file_path' SERVICE_TEACH_FACE = 'deepstack_teach_face' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_IP_ADDRESS): cv.string, vol.Required(CONF_PORT): cv.port, }) SERVICE_TEACH_SCHEMA = vol.Schema({ vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, vol.Required(ATTR_NAME): cv.string, vol.Required(FILE_PATH): cv.string, }) def get_matched_faces(predictions): """ Get the predicted faces and their confidence. """ try: matched_faces = {
CONF_API_KEY_FILE = "api_key_file" CONF_SAVE_FILE_FOLDER = "save_file_folder" CONF_TARGET = "target" DEFAULT_TARGET = "face" EVENT_OBJECT_DETECTED = "image_processing.object_detected" EVENT_FACE_DETECTED = "image_processing.face_detected" EVENT_FILE_SAVED = "image_processing.file_saved" FILE = "file" OBJECT = "object" FACE = "face" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_API_KEY_FILE): cv.string, vol.Optional(CONF_TARGET, default=DEFAULT_TARGET): cv.string, vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir, }) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up platform.""" save_file_folder = config.get(CONF_SAVE_FILE_FOLDER) if save_file_folder: save_file_folder = os.path.join(save_file_folder, "") # If no trailing / add it entities = [] for camera in config[CONF_SOURCE]:
_LOGGER = logging.getLogger(__name__) EVENT_DETECT_FACE = 'image_processing.detect_face' ATTR_NAME = 'name' ATTR_TOTAL_FACES = 'total_faces' ATTR_AGE = 'age' ATTR_GENDER = 'gender' ATTR_MOTION = 'motion' ATTR_GLASSES = 'glasses' ATTR_FACES = 'faces' CONF_GROUP = 'group' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_GROUP): cv.slugify, }) @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Set up the microsoft face identify platform.""" api = hass.data[DATA_MICROSOFT_FACE] face_group = config[CONF_GROUP] confidence = config[CONF_CONFIDENCE] entities = [] for camera in config[CONF_SOURCE]: entities.append( MicrosoftFaceIdentifyEntity(camera[CONF_ENTITY_ID], api, face_group, confidence,
"auwide", "br", "eu", "fr", "gb", "kr", "kr2", "mx", "sg", "us", "vn2", ] PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_REGION): vol.All(vol.Lower, vol.In(OPENALPR_REGIONS)), } ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the OpenALPR cloud API platform.""" confidence = config[CONF_CONFIDENCE] params = { "secret_key": config[CONF_API_KEY], "tasks": "plate", "return_image": 0, "country": config[CONF_REGION], } entities = []
from homeassistant.core import split_entity_id import homeassistant.helpers.config_validation as cv from homeassistant.components.image_processing import ( PLATFORM_SCHEMA, CONF_SOURCE, CONF_ENTITY_ID, CONF_NAME) from homeassistant.components.image_processing.microsoft_face_identify import ( ImageProcessingFaceEntity) _LOGGER = logging.getLogger(__name__) CONF_ENDPOINT = 'endpoint' ROUNDING_DECIMALS = 2 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_ENDPOINT): cv.string, }) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the classifier.""" entities = [] for camera in config[CONF_SOURCE]: entities.append(Facebox( camera.get(CONF_NAME), config[CONF_ENDPOINT], camera[CONF_ENTITY_ID] )) add_devices(entities)
DEFAULT_TARGET = "person" DEFAULT_TIMEOUT = 10 EVENT_OBJECT_DETECTED = "image_processing.object_detected" EVENT_FILE_SAVED = "image_processing.file_saved" BOX = "box" CENTROID = "centroid" FILE = "file" OBJECT = "object" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_IP_ADDRESS): cv.string, vol.Required(CONF_PORT): cv.port, vol.Optional(CONF_API_KEY, default=DEFAULT_API_KEY): cv.string, vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int, vol.Optional(CONF_TARGET, default=DEFAULT_TARGET): cv.string, vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir, }) def get_box(prediction: dict, img_width: int, img_height: int): """ Return the relative bounxing box coordinates defined by the tuple (y_min, x_min, y_max, x_max) where the coordinates are floats in the range [0.0, 1.0] and relative to the width and height of the image. """
CONF_HEIGHT = 'height' CONF_ROTATE = 'rotate' CONF_SSOCR_BIN = 'ssocr_bin' CONF_THRESHOLD = 'threshold' CONF_WIDTH = 'width' CONF_X_POS = 'x_position' CONF_Y_POS = 'y_position' DEFAULT_BINARY = 'ssocr' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_EXTRA_ARGUMENTS, default=''): cv.string, vol.Optional(CONF_DIGITS): cv.positive_int, vol.Optional(CONF_HEIGHT, default=0): cv.positive_int, vol.Optional(CONF_SSOCR_BIN, default=DEFAULT_BINARY): cv.string, vol.Optional(CONF_THRESHOLD, default=0): cv.positive_int, vol.Optional(CONF_ROTATE, default=0): cv.positive_int, vol.Optional(CONF_WIDTH, default=0): cv.positive_int, vol.Optional(CONF_X_POS, default=0): cv.string, vol.Optional(CONF_Y_POS, default=0): cv.positive_int, }) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the Seven segments OCR platform.""" entities = [] for camera in config[CONF_SOURCE]: entities.append(ImageProcessingSsocr( hass, camera[CONF_ENTITY_ID], config, camera.get(CONF_NAME) ))
'gb', 'kr', 'mx', 'sg', ] CONF_REGION = 'region' CONF_ALPR_BIN = 'alp_bin' DEFAULT_BINARY = 'alpr' DEFAULT_CONFIDENCE = 80 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_REGION): vol.All(vol.Lower, vol.In(OPENALPR_REGIONS)), vol.Optional(CONF_ALPR_BIN, default=DEFAULT_BINARY): cv.string, vol.Optional(CONF_CONFIDENCE, default=DEFAULT_CONFIDENCE): vol.All(vol.Coerce(float), vol.Range(min=0, max=100)) }) @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Set up the openalpr local platform.""" command = [config[CONF_ALPR_BIN], '-c', config[CONF_REGION], '-'] confidence = config[CONF_CONFIDENCE] entities = [] for camera in config[CONF_SOURCE]: entities.append( OpenAlprLocalEntity(camera[CONF_ENTITY_ID], command, confidence,
from homeassistant.core import split_entity_id from homeassistant.components.image_processing import ( ImageProcessingFaceEntity, PLATFORM_SCHEMA, CONF_SOURCE, CONF_ENTITY_ID, CONF_NAME) import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['face_recognition==1.2.3'] _LOGGER = logging.getLogger(__name__) ATTR_NAME = 'name' CONF_FACES = 'faces' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_FACES): {cv.string: cv.isfile}, }) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Dlib Face detection platform.""" entities = [] for camera in config[CONF_SOURCE]: entities.append(DlibFaceIdentifyEntity( camera[CONF_ENTITY_ID], config[CONF_FACES], camera.get(CONF_NAME) )) add_entities(entities) class DlibFaceIdentifyEntity(ImageProcessingFaceEntity):
from homeassistant.components.image_processing import (ImageProcessingEntity, PLATFORM_SCHEMA) from homeassistant.components.opencv import (ATTR_MATCHES, CLASSIFIER_GROUP_CONFIG, CONF_CLASSIFIER, CONF_ENTITY_ID, CONF_NAME, process_image) _LOGGER = logging.getLogger(__name__) DEPENDENCIES = ['opencv'] DEFAULT_TIMEOUT = 10 SCAN_INTERVAL = timedelta(seconds=2) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(CLASSIFIER_GROUP_CONFIG) def _create_processor_from_config(hass, camera_entity, config): """Create an OpenCV processor from configuration.""" classifier_config = config[CONF_CLASSIFIER] name = '{} {}'.format(config[CONF_NAME], split_entity_id(camera_entity)[1].replace('_', ' ')) processor = OpenCVImageProcessor(hass, camera_entity, name, classifier_config) return processor def setup_platform(hass, config, add_devices, discovery_info=None):
DEFAULT_CLASSIFIER_PATH = 'lbp_frontalface.xml' DEFAULT_MIN_SIZE = (30, 30) DEFAULT_NEIGHBORS = 4 DEFAULT_SCALE = 1.1 DEFAULT_TIMEOUT = 10 SCAN_INTERVAL = timedelta(seconds=2) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_CLASSIFIER): { cv.string: vol.Any( cv.isfile, vol.Schema({ vol.Required(CONF_FILE): cv.isfile, vol.Optional(CONF_SCALE, DEFAULT_SCALE): float, vol.Optional(CONF_NEIGHBORS, DEFAULT_NEIGHBORS): cv.positive_int, vol.Optional(CONF_MIN_SIZE, DEFAULT_MIN_SIZE): vol.Schema((int, int)) }) ) } }) def _create_processor_from_config(hass, camera_entity, config): """Create an OpenCV processor from configuration.""" classifier_config = config.get(CONF_CLASSIFIER) name = '{} {}'.format( config[CONF_NAME], split_entity_id(camera_entity)[1].replace('_', ' '))
ATTR_MATCHES = "matches" ATTR_TOTAL_MATCHES = "total_matches" CONF_CLASSIFIER = "classifier" CONFIDENCE_THRESHOLD = 0.5 NMS_THRESHOLD = 0.4 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") imgsz = int(672) sys.path.insert(0, str(Path.home())+'/.local/lib/python3.7/site-packages/homeassistant/components/opencv/') model = torch.load(home+'yolov4x-mish.pt', device)['model'].fuse().eval().half() with open(home+'cococlasses.txt', "r") as f: class_names = [cname.strip() for cname in f.readlines()] PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_CLASSIFIER, default="person"): cv.string, vol.Optional(CONF_CONFIDENCE, default=0.6): vol.Coerce(float), } ) def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = torch.zeros_like(x) y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y return y def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, classes=None, agnostic=False): """Performs Non-Maximum Suppression (NMS) on inference results Returns:
from homeassistant.components.image_processing import ( ATTR_CONFIDENCE, CONF_CONFIDENCE, CONF_ENTITY_ID, CONF_NAME, CONF_SOURCE, PLATFORM_SCHEMA, ImageProcessingFaceEntity) from homeassistant.components.microsoft_face import DATA_MICROSOFT_FACE from homeassistant.const import ATTR_NAME from homeassistant.core import split_entity_id from homeassistant.exceptions import HomeAssistantError import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) CONF_GROUP = 'group' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_GROUP): cv.slugify, }) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the Microsoft Face identify platform.""" api = hass.data[DATA_MICROSOFT_FACE] face_group = config[CONF_GROUP] confidence = config[CONF_CONFIDENCE] entities = [] for camera in config[CONF_SOURCE]: entities.append(MicrosoftFaceIdentifyEntity( camera[CONF_ENTITY_ID], api, face_group, confidence, camera.get(CONF_NAME)
EVENT_PERSON_DETECTED = "sighthound.person_detected" ATTR_BOUNDING_BOX = "bounding_box" ATTR_PEOPLE = "people" CONF_ACCOUNT_TYPE = "account_type" CONF_SAVE_FILE_FOLDER = "save_file_folder" CONF_SAVE_TIMESTAMPTED_FILE = "save_timestamped_file" DATETIME_FORMAT = "%Y-%m-%d_%H:%M:%S" DEV = "dev" PROD = "prod" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_API_KEY): cv.string, vol.Optional(CONF_ACCOUNT_TYPE, default=DEV): vol.In([DEV, PROD]), vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir, vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False): cv.boolean, }) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the platform.""" # Validate credentials by processing image. api_key = config[CONF_API_KEY] account_type = config[CONF_ACCOUNT_TYPE] api = hound.cloud(api_key, account_type) try: api.detect(b"Test") except hound.SimplehoundException as exc:
import requests import voluptuous as vol from homeassistant.core import split_entity_id import homeassistant.helpers.config_validation as cv from homeassistant.components.image_processing import ( PLATFORM_SCHEMA, ImageProcessingFaceEntity, CONF_SOURCE, CONF_ENTITY_ID, CONF_NAME) from homeassistant.const import (CONF_IP_ADDRESS, CONF_PORT) _LOGGER = logging.getLogger(__name__) CLASSIFIER = 'facebox' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_IP_ADDRESS): cv.string, vol.Required(CONF_PORT): cv.port, }) def encode_image(image): """base64 encode an image stream.""" base64_img = base64.b64encode(image).decode('ascii') return {"base64": base64_img} def get_matched_faces(faces): """Return the name and rounded confidence of matched faces.""" return {face['name']: round(face['confidence'], 2) for face in faces if face['matched']}
SUPPORTED_ATTRIBUTES = [ATTR_AGE, ATTR_GENDER, ATTR_GLASSES] CONF_ATTRIBUTES = 'attributes' DEFAULT_ATTRIBUTES = [ATTR_AGE, ATTR_GENDER] def validate_attributes(list_attributes): """Validate face attributes.""" for attr in list_attributes: if attr not in SUPPORTED_ATTRIBUTES: raise vol.Invalid("Invalid attribute {0}".format(attr)) return list_attributes PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_ATTRIBUTES, default=DEFAULT_ATTRIBUTES): vol.All(cv.ensure_list, validate_attributes), }) @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Set up the Microsoft Face detection platform.""" api = hass.data[DATA_MICROSOFT_FACE] attributes = config[CONF_ATTRIBUTES] entities = [] for camera in config[CONF_SOURCE]: entities.append( MicrosoftFaceDetectEntity(camera[CONF_ENTITY_ID], api, attributes, camera.get(CONF_NAME)))