]

CONF_ATTRIBUTES = 'attributes'
DEFAULT_ATTRIBUTES = [ATTR_AGE, ATTR_GENDER]


def validate_attributes(list_attributes):
    """Validate face attributes."""
    for attr in list_attributes:
        if attr not in SUPPORTED_ATTRIBUTES:
            raise vol.Invalid("Invalid attribute {0}".format(attr))
    return list_attributes


PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Optional(CONF_ATTRIBUTES, default=DEFAULT_ATTRIBUTES):
        vol.All(cv.ensure_list, validate_attributes),
})


@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
    """Set up the Microsoft Face detection platform."""
    api = hass.data[DATA_MICROSOFT_FACE]
    attributes = config[CONF_ATTRIBUTES]

    entities = []
    for camera in config[CONF_SOURCE]:
        entities.append(MicrosoftFaceDetectEntity(
            camera[CONF_ENTITY_ID], api, attributes, camera.get(CONF_NAME)
        ))
_LOGGER = logging.getLogger(__name__)

CONF_SAVE_FILE_FOLDER = "save_file_folder"
CONF_TARGET = "target"
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
DEFAULT_PORT = 5000
DEFAULT_TARGET = "person"
RED = (255, 0, 0)
SCAN_INTERVAL = timedelta(days=365)  # NEVER SCAN.

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_IP_ADDRESS):
    cv.string,
    vol.Optional(CONF_PORT, default=DEFAULT_PORT):
    cv.port,
    vol.Optional(CONF_TARGET, default=DEFAULT_TARGET):
    cv.string,
    vol.Optional(CONF_SAVE_FILE_FOLDER):
    cv.isdir,
})


def get_target(predictions: List, target: str):
    """
    Return only the info for the targets.
    """
    targets = []
    for result in predictions:
        if result["name"] == target:
            targets.append(result)
    return targets
Exemplo n.º 3
0
CONF_WIDTH = "width"
CONF_X_POS = "x_position"
CONF_Y_POS = "y_position"

DEFAULT_BINARY = "ssocr"

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Optional(CONF_EXTRA_ARGUMENTS, default=""):
    cv.string,
    vol.Optional(CONF_DIGITS):
    cv.positive_int,
    vol.Optional(CONF_HEIGHT, default=0):
    cv.positive_int,
    vol.Optional(CONF_SSOCR_BIN, default=DEFAULT_BINARY):
    cv.string,
    vol.Optional(CONF_THRESHOLD, default=0):
    cv.positive_int,
    vol.Optional(CONF_ROTATE, default=0):
    cv.positive_int,
    vol.Optional(CONF_WIDTH, default=0):
    cv.positive_int,
    vol.Optional(CONF_X_POS, default=0):
    cv.string,
    vol.Optional(CONF_Y_POS, default=0):
    cv.positive_int,
})


async def async_setup_platform(hass,
                               config,
                               async_add_entities,
                               discovery_info=None):
Exemplo n.º 4
0
DEFAULT_MIN_SIZE = (30, 30)
DEFAULT_NEIGHBORS = 4
DEFAULT_SCALE = 1.1
DEFAULT_TIMEOUT = 10

SCAN_INTERVAL = timedelta(seconds=2)

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Optional(CONF_CLASSIFIER, default=None): {
        cv.string:
        vol.Any(
            cv.isfile,
            vol.Schema({
                vol.Required(CONF_FILE):
                cv.isfile,
                vol.Optional(CONF_SCALE, DEFAULT_SCALE):
                float,
                vol.Optional(CONF_NEIGHBORS, DEFAULT_NEIGHBORS):
                cv.positive_int,
                vol.Optional(CONF_MIN_SIZE, DEFAULT_MIN_SIZE):
                vol.Schema((int, int))
            }))
    }
})


def _create_processor_from_config(hass, camera_entity, config):
    """Create an OpenCV processor from configuration."""
    classifier_config = config[CONF_CLASSIFIER]
    name = '{} {}'.format(config[CONF_NAME],
                          split_entity_id(camera_entity)[1].replace('_', ' '))
Exemplo n.º 5
0
                                 CONF_PORT, CONF_PASSWORD, CONF_USERNAME,
                                 HTTP_OK, HTTP_UNAUTHORIZED)

_LOGGER = logging.getLogger(__name__)

ATTR_MODEL_ID = 'model_id'
ATTR_MODEL_NAME = 'model_name'
CLASSIFIER = 'classificationbox'
EVENT_IMAGE_CLASSIFICATION = 'image_processing.image_classification'
TIMEOUT = 9

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_IP_ADDRESS):
    cv.string,
    vol.Required(CONF_PORT):
    cv.port,
    vol.Optional(CONF_USERNAME):
    cv.string,
    vol.Optional(CONF_PASSWORD):
    cv.string,
})


def check_box_health(url, username, password):
    """Check the health of the classifier and return its id if healthy."""
    kwargs = {}
    if username:
        kwargs['auth'] = requests.auth.HTTPBasicAuth(username, password)
    try:
        response = requests.get(url, timeout=TIMEOUT, **kwargs)
        if response.status_code == HTTP_UNAUTHORIZED:
            _LOGGER.error("AuthenticationError on %s", CLASSIFIER)
Exemplo n.º 6
0
from homeassistant.core import split_entity_id
import homeassistant.helpers.config_validation as cv

_LOGGER = logging.getLogger(__name__)

EVENT_PERSON_DETECTED = "sighthound.person_detected"

ATTR_BOUNDING_BOX = "bounding_box"
ATTR_PEOPLE = "people"
CONF_ACCOUNT_TYPE = "account_type"
DEV = "dev"
PROD = "prod"

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
    {
        vol.Required(CONF_API_KEY): cv.string,
        vol.Optional(CONF_ACCOUNT_TYPE, default=DEV): vol.In([DEV, PROD]),
    }
)


def setup_platform(hass, config, add_entities, discovery_info=None):
    """Set up the platform."""
    # Validate credentials by processing image.
    api_key = config[CONF_API_KEY]
    account_type = config[CONF_ACCOUNT_TYPE]
    api = hound.cloud(api_key, account_type)
    try:
        api.detect(b"Test")
    except hound.SimplehoundException as exc:
        _LOGGER.error("Sighthound error %s setup aborted", exc)
        return
Exemplo n.º 7
0
import requests
import voluptuous as vol

from homeassistant.core import split_entity_id
import homeassistant.helpers.config_validation as cv
from homeassistant.components.image_processing import (
    PLATFORM_SCHEMA, ImageProcessingFaceEntity, CONF_SOURCE, CONF_ENTITY_ID,
    CONF_NAME)
from homeassistant.const import (CONF_IP_ADDRESS, CONF_PORT)

_LOGGER = logging.getLogger(__name__)

CLASSIFIER = 'facebox'

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_IP_ADDRESS): cv.string,
    vol.Required(CONF_PORT): cv.port,
})


def encode_image(image):
    """base64 encode an image stream."""
    base64_img = base64.b64encode(image).decode('ascii')
    return {"base64": base64_img}


def get_matched_faces(faces):
    """Return the name and rounded confidence of matched faces."""
    return {face['name']: round(face['confidence'], 2)
            for face in faces if face['matched']}

Exemplo n.º 8
0
    vol.Optional(CONF_CONFIDENCE): vol.All(
        vol.Coerce(float), vol.Range(min=10, max=100)
    ),
}


PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
    {
        vol.Required(CONF_IP_ADDRESS): cv.string,
        vol.Required(CONF_PORT): cv.port,
        vol.Optional(CONF_API_KEY, default=DEFAULT_API_KEY): cv.string,
        vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
        vol.Optional(CONF_CUSTOM_MODEL, default=""): cv.string,
        vol.Optional(CONF_TARGETS, default=DEFAULT_TARGETS): vol.All(
            cv.ensure_list, [vol.Schema(TARGETS_SCHEMA)]
        ),
        vol.Optional(CONF_ROI_Y_MIN, default=DEFAULT_ROI_Y_MIN): cv.small_float,
        vol.Optional(CONF_ROI_X_MIN, default=DEFAULT_ROI_X_MIN): cv.small_float,
        vol.Optional(CONF_ROI_Y_MAX, default=DEFAULT_ROI_Y_MAX): cv.small_float,
        vol.Optional(CONF_ROI_X_MAX, default=DEFAULT_ROI_X_MAX): cv.small_float,
        vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir,
        vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False): cv.boolean,
        vol.Optional(CONF_SHOW_BOXES, default=True): cv.boolean,
        vol.Optional(CONF_DATETIME_FORMAT, default=DATETIME_FORMAT): cv.string,
    }
)

Box = namedtuple("Box", "y_min x_min y_max x_max")
Point = namedtuple("Point", "y x")


def point_in_box(box: Box, point: Point) -> bool:
Exemplo n.º 9
0
CONF_HEIGHT = 'height'
CONF_ROTATE = 'rotate'
CONF_SSOCR_BIN = 'ssocr_bin'
CONF_THRESHOLD = 'threshold'
CONF_WIDTH = 'width'
CONF_X_POS = 'x_position'
CONF_Y_POS = 'y_position'

DEFAULT_BINARY = 'ssocr'

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Optional(CONF_EXTRA_ARGUMENTS, default=''): cv.string,
    vol.Optional(CONF_DIGITS): cv.positive_int,
    vol.Optional(CONF_HEIGHT, default=0): cv.positive_int,
    vol.Optional(CONF_SSOCR_BIN, default=DEFAULT_BINARY): cv.string,
    vol.Optional(CONF_THRESHOLD, default=0): cv.positive_int,
    vol.Optional(CONF_ROTATE, default=0): cv.positive_int,
    vol.Optional(CONF_WIDTH, default=0): cv.positive_int,
    vol.Optional(CONF_X_POS, default=0): cv.string,
    vol.Optional(CONF_Y_POS, default=0): cv.positive_int,
})


async def async_setup_platform(hass, config, async_add_entities,
                               discovery_info=None):
    """Set up the Seven segments OCR platform."""
    entities = []
    for camera in config[CONF_SOURCE]:
        entities.append(ImageProcessingSsocr(
            hass, camera[CONF_ENTITY_ID], config, camera.get(CONF_NAME)
        ))
CONF_API_KEY = 'api_key'
CONF_SECRET_KEY = 'secret_key'
CONF_SNAPSHOT_FILEPATH = 'snapshot_filepath'
CONF_RESIZE = 'resize'
CONF_HA_URL = 'ha_url'
CONF_HA_PASSWORD = '******'
CONF_DETECT_TOP_NUM = 'detect_top_num'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_APP_ID):
    cv.string,
    vol.Required(CONF_API_KEY):
    cv.string,
    vol.Required(CONF_SECRET_KEY):
    cv.string,
    vol.Required(CONF_RESIZE, default='0'):
    cv.string,
    vol.Required(CONF_HA_URL):
    cv.url,
    vol.Required(CONF_HA_PASSWORD):
    cv.string,
    vol.Required(CONF_SNAPSHOT_FILEPATH):
    cv.string,
    vol.Optional(CONF_DETECT_TOP_NUM, default=1):
    cv.positive_int,
})

ATTR_USERINFO = 'user_info'
ATTR_IMAGE = 'image'
ATTR_UID = 'uid'
ATTR_GROUPID = 'group_id'
DOMAIN = 'image_processing'
Exemplo n.º 11
0
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Optional(CONF_REGION, default=DEFAULT_REGION):
    vol.In(SUPPORTED_REGIONS),
    vol.Required(CONF_ACCESS_KEY_ID):
    cv.string,
    vol.Required(CONF_SECRET_ACCESS_KEY):
    cv.string,
    vol.Optional(CONF_TARGETS, default=DEFAULT_TARGETS):
    vol.All(cv.ensure_list, [vol.Schema(TARGETS_SCHEMA)]),
    vol.Optional(CONF_ROI_Y_MIN, default=DEFAULT_ROI_Y_MIN):
    cv.small_float,
    vol.Optional(CONF_ROI_X_MIN, default=DEFAULT_ROI_X_MIN):
    cv.small_float,
    vol.Optional(CONF_ROI_Y_MAX, default=DEFAULT_ROI_Y_MAX):
    cv.small_float,
    vol.Optional(CONF_ROI_X_MAX, default=DEFAULT_ROI_X_MAX):
    cv.small_float,
    vol.Optional(CONF_SCALE, default=DEAULT_SCALE):
    vol.All(vol.Coerce(float, vol.Range(min=0.1, max=1))),
    vol.Optional(CONF_SAVE_FILE_FOLDER):
    cv.isdir,
    vol.Optional(CONF_SAVE_FILE_FORMAT, default=JPG):
    vol.In([JPG, PNG]),
    vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False):
    cv.boolean,
    vol.Optional(CONF_ALWAYS_SAVE_LATEST_FILE, default=False):
    cv.boolean,
    vol.Optional(CONF_S3_BUCKET):
    cv.string,
    vol.Optional(CONF_SHOW_BOXES, default=True):
    cv.boolean,
    vol.Optional(CONF_BOTO_RETRIES, default=DEFAULT_BOTO_RETRIES):
    vol.All(vol.Coerce(int), vol.Range(min=0)),
})
Exemplo n.º 12
0
import homeassistant.helpers.config_validation as cv
from homeassistant.components.image_processing import (PLATFORM_SCHEMA,
                                                       ImageProcessingEntity,
                                                       CONF_SOURCE,
                                                       CONF_ENTITY_ID,
                                                       CONF_NAME, DOMAIN)

_LOGGER = logging.getLogger(__name__)

CONF_ENDPOINT = 'endpoint'
CONF_TAGS = 'tags'
ROUNDING_DECIMALS = 2

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_ENDPOINT):
    cv.string,
    vol.Optional(CONF_TAGS, default=[]):
    vol.All(cv.ensure_list, [cv.string]),
})


def setup_platform(hass, config, add_devices, discovery_info=None):
    """Set up the classifier."""
    entities = []
    for camera in config[CONF_SOURCE]:
        entities.append(
            Tagbox(
                camera.get(CONF_NAME),
                config[CONF_ENDPOINT],
                camera[CONF_ENTITY_ID],
                config[CONF_TAGS],
            ))
Exemplo n.º 13
0
    'eu',
    'fr',
    'gb',
    'kr',
    'kr2',
    'mx',
    'sg',
    'us',
    'vn2'
]

CONF_REGION = 'region'

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_API_KEY): cv.string,
    vol.Required(CONF_REGION):
        vol.All(vol.Lower, vol.In(OPENALPR_REGIONS)),
})


@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
    """Set up the openalpr cloud api platform."""
    confidence = config[CONF_CONFIDENCE]
    params = {
        'secret_key': config[CONF_API_KEY],
        'tasks': "plate",
        'return_image': 0,
        'country': config[CONF_REGION],
    }
Exemplo n.º 14
0
from homeassistant.core import split_entity_id
import homeassistant.helpers.config_validation as cv
from homeassistant.components.image_processing import (
    PLATFORM_SCHEMA, CONF_SOURCE, CONF_ENTITY_ID,
    CONF_NAME)
from homeassistant.components.image_processing.microsoft_face_identify import (
    ImageProcessingFaceEntity)

_LOGGER = logging.getLogger(__name__)

CONF_ENDPOINT = 'endpoint'
ROUNDING_DECIMALS = 2

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_ENDPOINT): cv.string,
})


def setup_platform(hass, config, add_devices, discovery_info=None):
    """Set up the classifier."""
    entities = []
    for camera in config[CONF_SOURCE]:
        entities.append(Facebox(
            camera.get(CONF_NAME),
            config[CONF_ENDPOINT],
            camera[CONF_ENTITY_ID]
        ))
    add_devices(entities)

Exemplo n.º 15
0
    ATTR_CONFIDENCE, CONF_CONFIDENCE, CONF_ENTITY_ID, CONF_NAME, CONF_SOURCE,
    PLATFORM_SCHEMA, ImageProcessingFaceEntity)
from homeassistant.components.microsoft_face import DATA_MICROSOFT_FACE
from homeassistant.const import ATTR_NAME
from homeassistant.core import split_entity_id
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv

DEPENDENCIES = ['microsoft_face']

_LOGGER = logging.getLogger(__name__)

CONF_GROUP = 'group'

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_GROUP): cv.slugify,
})


async def async_setup_platform(hass,
                               config,
                               async_add_entities,
                               discovery_info=None):
    """Set up the Microsoft Face identify platform."""
    api = hass.data[DATA_MICROSOFT_FACE]
    face_group = config[CONF_GROUP]
    confidence = config[CONF_CONFIDENCE]

    entities = []
    for camera in config[CONF_SOURCE]:
        entities.append(
Exemplo n.º 16
0
DEFAULT_TIMEOUT = 10
DEFAULT_CONFIDENCE = 0.55
DEFAULT_CROP = {'x1': None, 'x2': None, 'y1': None, 'y2': None}

SCAN_INTERVAL = timedelta(seconds=30)

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_OPTIONS): {
        vol.Required(CONF_MODEL):
        cv.isfile,
        vol.Required(CONF_WEIGHTS):
        cv.isfile,
        vol.Required(CONF_LABELS):
        cv.isfile,
        vol.Optional(CONF_CROP, DEFAULT_CROP): {
            vol.Required(CONF_X1): vol.All(vol.Coerce(int), vol.Range(min=0)),
            vol.Required(CONF_Y1): vol.All(vol.Coerce(int), vol.Range(min=0)),
            vol.Required(CONF_X2): vol.All(vol.Coerce(int), vol.Range(min=0)),
            vol.Required(CONF_Y2): vol.All(vol.Coerce(int), vol.Range(min=0))
        },
        vol.Optional(CONF_CONFIDENCE, DEFAULT_CONFIDENCE):
        vol.All(vol.Coerce(float), vol.Range(min=0, max=1, max_included=True))
    }
})


def setup_platform(hass, config, add_devices, discovery_info=None):
    """Set up the Darkflow image processing platform."""
    try:
        # Verify that the Darkflow python package is pre-installed
        # pylint: disable=unused-import,unused-variable
from homeassistant.components.image_processing import (
    ATTR_CONFIDENCE, CONF_CONFIDENCE, CONF_ENTITY_ID, CONF_NAME, CONF_SOURCE,
    PLATFORM_SCHEMA, ImageProcessingFaceEntity)
from homeassistant.components.microsoft_face import DATA_MICROSOFT_FACE
from homeassistant.const import ATTR_NAME
from homeassistant.core import split_entity_id
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv

_LOGGER = logging.getLogger(__name__)

CONF_GROUP = 'group'

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_GROUP): cv.slugify,
})


async def async_setup_platform(hass, config, async_add_entities,
                               discovery_info=None):
    """Set up the Microsoft Face identify platform."""
    api = hass.data[DATA_MICROSOFT_FACE]
    face_group = config[CONF_GROUP]
    confidence = config[CONF_CONFIDENCE]

    entities = []
    for camera in config[CONF_SOURCE]:
        entities.append(MicrosoftFaceIdentifyEntity(
            camera[CONF_ENTITY_ID], api, face_group, confidence,
            camera.get(CONF_NAME)
CONF_ALWAYS_SAVE_LATEST_FILE = "always_save_latest_file"
CONF_WATCHED_PLATES = "watched_plates"
CONF_MMC = "mmc"

DATETIME_FORMAT = "%Y-%m-%d_%H-%M-%S"
RED = (255, 0, 0)  # For objects within the ROI
DEFAULT_REGIONS = ['None']

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_API_TOKEN):
    cv.string,
    vol.Optional(CONF_REGIONS, default=DEFAULT_REGIONS):
    vol.All(cv.ensure_list, [cv.string]),
    vol.Optional(CONF_MMC, default=False):
    cv.boolean,
    vol.Optional(CONF_SAVE_FILE_FOLDER):
    cv.isdir,
    vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False):
    cv.boolean,
    vol.Optional(CONF_ALWAYS_SAVE_LATEST_FILE, default=False):
    cv.boolean,
    vol.Optional(CONF_WATCHED_PLATES):
    vol.All(cv.ensure_list, [cv.string]),
})


def get_plates(results: List[Dict]) -> List[str]:
    """
    Return the list of candidate plates. 
    If no plates empty list returned.
    """
    plates = []
Exemplo n.º 19
0
EVENT_PERSON_DETECTED = "sighthound.person_detected"

ATTR_BOUNDING_BOX = "bounding_box"
ATTR_PEOPLE = "people"
CONF_ACCOUNT_TYPE = "account_type"
CONF_SAVE_FILE_FOLDER = "save_file_folder"
CONF_SAVE_TIMESTAMPTED_FILE = "save_timestamped_file"
DATETIME_FORMAT = "%Y-%m-%d_%H:%M:%S"
DEV = "dev"
PROD = "prod"

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
    {
        vol.Required(CONF_API_KEY): cv.string,
        vol.Optional(CONF_ACCOUNT_TYPE, default=DEV): vol.In([DEV, PROD]),
        vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir,
        vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False): cv.boolean,
    }
)


def setup_platform(hass, config, add_entities, discovery_info=None):
    """Set up the platform."""
    # Validate credentials by processing image.
    api_key = config[CONF_API_KEY]
    account_type = config[CONF_ACCOUNT_TYPE]
    api = hound.cloud(api_key, account_type)
    try:
        api.detect(b"Test")
    except hound.SimplehoundException as exc:
        _LOGGER.error("Sighthound error %s setup aborted", exc)
)
from homeassistant.core import split_entity_id

_LOGGER = logging.getLogger(__name__)

CONF_API_KEY = "api_key"
CONF_TIMEOUT = "timeout"

DEFAULT_API_KEY = ""
DEFAULT_TIMEOUT = 10

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_IP_ADDRESS):
    cv.string,
    vol.Required(CONF_PORT):
    cv.port,
    vol.Optional(CONF_API_KEY, default=DEFAULT_API_KEY):
    cv.string,
    vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT):
    cv.positive_int,
})


def setup_platform(hass, config, add_devices, discovery_info=None):
    """Set up the integration."""

    entities = []
    for camera in config[CONF_SOURCE]:
        object_entity = SceneEntity(
            config.get(CONF_IP_ADDRESS),
            config.get(CONF_PORT),
            config.get(CONF_API_KEY),
ATTR_BOUNDING_BOX = 'bounding_box'
ATTR_CLASSIFIER = 'classifier'
ATTR_IMAGE_ID = 'image_id'
ATTR_ID = 'id'
ATTR_MATCHED = 'matched'
FACEBOX_NAME = 'name'
CLASSIFIER = 'facebox'
DATA_FACEBOX = 'facebox_classifiers'
FILE_PATH = 'file_path'
SERVICE_TEACH_FACE = 'facebox_teach_face'


PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_IP_ADDRESS): cv.string,
    vol.Required(CONF_PORT): cv.port,
    vol.Optional(CONF_USERNAME): cv.string,
    vol.Optional(CONF_PASSWORD): cv.string,
})

SERVICE_TEACH_SCHEMA = vol.Schema({
    vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
    vol.Required(ATTR_NAME): cv.string,
    vol.Required(FILE_PATH): cv.string,
})


def check_box_health(url, username, password):
    """Check the health of the classifier and return its id if healthy."""
    kwargs = {}
    if username:
        kwargs['auth'] = requests.auth.HTTPBasicAuth(username, password)
Exemplo n.º 22
0
    CLASSIFIER_GROUP_CONFIG,
    CONF_CLASSIFIER,
    CONF_ENTITY_ID,
    CONF_NAME,
    process_image,
)

DEPENDENCIES = ['opencv']

_LOGGER = logging.getLogger(__name__)

DEFAULT_TIMEOUT = 10

SCAN_INTERVAL = timedelta(seconds=2)

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(CLASSIFIER_GROUP_CONFIG)


def _create_processor_from_config(hass, camera_entity, config):
    """Create an OpenCV processor from configurtaion."""
    classifier_config = config[CONF_CLASSIFIER]
    name = '{} {}'.format(
        config[CONF_NAME],
        split_entity_id(camera_entity)[1].replace('_', ' '))

    processor = OpenCVImageProcessor(
        hass,
        camera_entity,
        name,
        classifier_config,
    )
Exemplo n.º 23
0
DEFAULT_CLASSIFIER_PATH = 'lbp_frontalface.xml'
DEFAULT_MIN_SIZE = (30, 30)
DEFAULT_NEIGHBORS = 4
DEFAULT_SCALE = 1.1
DEFAULT_TIMEOUT = 10

SCAN_INTERVAL = timedelta(seconds=2)

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Optional(CONF_CLASSIFIER): {
        cv.string: vol.Any(
            cv.isfile,
            vol.Schema({
                vol.Required(CONF_FILE): cv.isfile,
                vol.Optional(CONF_SCALE, DEFAULT_SCALE): float,
                vol.Optional(CONF_NEIGHBORS, DEFAULT_NEIGHBORS):
                    cv.positive_int,
                vol.Optional(CONF_MIN_SIZE, DEFAULT_MIN_SIZE):
                    vol.Schema((int, int))
            })
        )
    }
})


def _create_processor_from_config(hass, camera_entity, config):
    """Create an OpenCV processor from configuration."""
    classifier_config = config.get(CONF_CLASSIFIER)
    name = '{} {}'.format(
        config[CONF_NAME], split_entity_id(camera_entity)[1].replace('_', ' '))
Exemplo n.º 24
0
from homeassistant.core import split_entity_id
from homeassistant.components.image_processing import (
    ImageProcessingFaceEntity, PLATFORM_SCHEMA, CONF_SOURCE, CONF_ENTITY_ID,
    CONF_NAME)
import homeassistant.helpers.config_validation as cv

REQUIREMENTS = ['face_recognition==1.2.3']

_LOGGER = logging.getLogger(__name__)

ATTR_NAME = 'name'
CONF_FACES = 'faces'

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_FACES): {cv.string: cv.isfile},
})


def setup_platform(hass, config, add_entities, discovery_info=None):
    """Set up the Dlib Face detection platform."""
    entities = []
    for camera in config[CONF_SOURCE]:
        entities.append(DlibFaceIdentifyEntity(
            camera[CONF_ENTITY_ID], config[CONF_FACES], camera.get(CONF_NAME)
        ))

    add_entities(entities)


class DlibFaceIdentifyEntity(ImageProcessingFaceEntity):
Exemplo n.º 25
0
DEFAULT_NEIGHBORS = 4
DEFAULT_SCALE = 1.1
DEFAULT_TIMEOUT = 10

SCAN_INTERVAL = timedelta(seconds=2)

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Optional(CONF_CLASSIFIER): {
        cv.string:
        vol.Any(
            cv.isfile,
            vol.Schema({
                vol.Required(CONF_FILE):
                cv.isfile,
                vol.Optional(CONF_SCALE, DEFAULT_SCALE):
                float,
                vol.Optional(CONF_NEIGHBORS, DEFAULT_NEIGHBORS):
                cv.positive_int,
                vol.Optional(CONF_MIN_SIZE, DEFAULT_MIN_SIZE):
                vol.Schema(
                    vol.All(vol.ExactSequence([int, int]), vol.Coerce(tuple))),
            }),
        )
    }
})


def _create_processor_from_config(hass, camera_entity, config):
    """Create an OpenCV processor from configuration."""
    classifier_config = config.get(CONF_CLASSIFIER)
    name = f"{config[CONF_NAME]} {split_entity_id(camera_entity)[1].replace('_', ' ')}"
Exemplo n.º 26
0
from homeassistant.core import split_entity_id
import homeassistant.helpers.config_validation as cv
from homeassistant.components.image_processing import (PLATFORM_SCHEMA,
                                                       CONF_SOURCE,
                                                       CONF_ENTITY_ID,
                                                       CONF_NAME,
                                                       ImageProcessingEntity)

_LOGGER = logging.getLogger(__name__)

CONF_ENDPOINT = 'endpoint'
ROUNDING_DECIMALS = 2

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_ENDPOINT):
    cv.string,
})


def setup_platform(hass, config, add_devices, discovery_info=None):
    """Set up the classifier."""
    entities = []
    for camera in config[CONF_SOURCE]:
        entities.append(
            Facebox(
                camera.get(CONF_NAME),
                config[CONF_ENDPOINT],
                camera[CONF_ENTITY_ID],
            ))
    add_devices(entities)
Exemplo n.º 27
0
    cv.string,
    vol.Optional(CONF_AREA):
    AREA_SCHEMA,
    vol.Optional(CONF_CONFIDENCE):
    vol.Range(min=0, max=100),
})

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_URL):
    cv.string,
    vol.Required(CONF_DETECTOR):
    cv.string,
    vol.Required(CONF_TIMEOUT, default=90):
    cv.positive_int,
    vol.Optional(CONF_AUTH_KEY, default=""):
    cv.string,
    vol.Optional(CONF_FILE_OUT, default=[]):
    vol.All(cv.ensure_list, [cv.template]),
    vol.Optional(CONF_CONFIDENCE, default=0.0):
    vol.Range(min=0, max=100),
    vol.Optional(CONF_LABELS, default=[]):
    vol.All(cv.ensure_list, [vol.Any(cv.string, LABEL_SCHEMA)]),
    vol.Optional(CONF_AREA):
    AREA_SCHEMA,
})


def setup_platform(hass, config, add_entities, discovery_info=None):
    """Set up the Doods client."""
    url = config[CONF_URL]
    auth_key = config[CONF_AUTH_KEY]
    detector_name = config[CONF_DETECTOR]
Exemplo n.º 28
0
SCAN_INTERVAL = timedelta(days=365)  # Effectively disable scan.

CONF_API_KEY_FILE = "api_key_file"
CONF_SAVE_FILE_FOLDER = "save_file_folder"
CONF_TARGET = "target"
DEFAULT_TARGET = "person"
EVENT_OBJECT_DETECTED = "image_processing.object_detected"
EVENT_FILE_SAVED = "image_processing.file_saved"
FILE = "file"
OBJECT = "object"

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_API_KEY_FILE):
    cv.string,
    vol.Optional(CONF_TARGET, default=DEFAULT_TARGET):
    cv.string,
    vol.Optional(CONF_SAVE_FILE_FOLDER):
    cv.isdir,
})


def setup_platform(hass, config, add_devices, discovery_info=None):
    """Set up platform."""

    save_file_folder = config.get(CONF_SAVE_FILE_FOLDER)
    if save_file_folder:
        save_file_folder = os.path.join(save_file_folder,
                                        "")  # If no trailing / add it

    entities = []
    for camera in config[CONF_SOURCE]:
Exemplo n.º 29
0
    CONF_NAME,
    CONF_SOURCE,
    PLATFORM_SCHEMA,
    ImageProcessingFaceEntity,
)
from homeassistant.const import ATTR_NAME
from homeassistant.core import split_entity_id
import homeassistant.helpers.config_validation as cv

_LOGGER = logging.getLogger(__name__)

CONF_FACES = "faces"

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
    {
        vol.Required(CONF_FACES): {cv.string: cv.isfile},
        vol.Optional(CONF_CONFIDENCE, default=0.6): vol.Coerce(float),
    }
)


def setup_platform(hass, config, add_entities, discovery_info=None):
    """Set up the Dlib Face detection platform."""
    entities = []
    for camera in config[CONF_SOURCE]:
        entities.append(
            DlibFaceIdentifyEntity(
                camera[CONF_ENTITY_ID],
                config[CONF_FACES],
                camera.get(CONF_NAME),
                config[CONF_CONFIDENCE],
            )
DEFAULT_TARGET = 'Person'

DEFAULT_REGION = 'us-east-1'
SUPPORTED_REGIONS = [
    'us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'ca-central-1',
    'eu-west-1', 'eu-central-1', 'eu-west-2', 'eu-west-3', 'ap-southeast-1',
    'ap-southeast-2', 'ap-northeast-2', 'ap-northeast-1', 'ap-south-1',
    'sa-east-1'
]

REQUIREMENTS = ['boto3 == 1.9.16']

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Optional(CONF_REGION, default=DEFAULT_REGION):
    vol.In(SUPPORTED_REGIONS),
    vol.Required(CONF_ACCESS_KEY_ID):
    cv.string,
    vol.Required(CONF_SECRET_ACCESS_KEY):
    cv.string,
})


def get_label_data(response, label_string='Person'):
    """Get label data."""
    for label in response['Labels']:
        if label['Name'] == label_string:
            data = {}
            data['Confidence'] = round(label['Confidence'], 2)
            data['Instances'] = len(label['Instances'])

            bounding_boxes = []
            for instance in label['Instances']:
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_IP_ADDRESS):
    cv.string,
    vol.Required(CONF_PORT):
    cv.port,
    vol.Optional(CONF_API_KEY, default=DEFAULT_API_KEY):
    cv.string,
    vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT):
    cv.positive_int,
    vol.Optional(CONF_CUSTOM_MODEL, default=""):
    cv.string,
    vol.Optional(CONF_TARGETS, default=DEFAULT_TARGETS):
    vol.All(cv.ensure_list, [vol.Schema(TARGETS_SCHEMA)]),
    vol.Optional(CONF_ROI_Y_MIN, default=DEFAULT_ROI_Y_MIN):
    cv.small_float,
    vol.Optional(CONF_ROI_X_MIN, default=DEFAULT_ROI_X_MIN):
    cv.small_float,
    vol.Optional(CONF_ROI_Y_MAX, default=DEFAULT_ROI_Y_MAX):
    cv.small_float,
    vol.Optional(CONF_ROI_X_MAX, default=DEFAULT_ROI_X_MAX):
    cv.small_float,
    vol.Optional(CONF_SCALE, default=DEAULT_SCALE):
    vol.All(vol.Coerce(float, vol.Range(min=0.1, max=1))),
    vol.Optional(CONF_SAVE_FILE_FOLDER):
    cv.isdir,
    vol.Optional(CONF_SAVE_FILE_FORMAT, default=JPG):
    vol.In([JPG, PNG]),
    vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False):
    cv.boolean,
    vol.Optional(CONF_ALWAYS_SAVE_LATEST_FILE, default=False):
    cv.boolean,
    vol.Optional(CONF_SHOW_BOXES, default=True):
    cv.boolean,
})
Exemplo n.º 32
0
REQUIREMENTS = ['face_recognition==0.2.0']

_LOGGER = logging.getLogger(__name__)

ATTR_NAME = 'name'
CONF_FACES = 'faces'
CONF_KNOWN_FACES = 'keep_known_faces'
CONF_UNKNOWN_FACES = 'keep_unknown_faces'

DEFAULT_KNOWN_FACES_DIR = 'dlib_known_faces'
DEFAULT_UNKNOWN_FACES_DIR = 'dlib_unknown_faces'

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_FACES): {cv.string: cv.isfile},
    vol.Optional(CONF_KNOWN_FACES, default=False): cv.boolean,
    vol.Optional(CONF_UNKNOWN_FACES, default=False): cv.boolean,
})


def keep_image(image, filename):
    """Save image for troubleshooting."""
    directory = os.path.dirname(filename)

    if not os.path.isdir(directory):
        os.mkdir(directory)

    with open(filename, 'wb') as fdb:
        fdb.write(image.getvalue())

]

CONF_ATTRIBUTES = 'attributes'
DEFAULT_ATTRIBUTES = [ATTR_AGE, ATTR_GENDER]


def validate_attributes(list_attributes):
    """Validate face attributes."""
    for attr in list_attributes:
        if attr not in SUPPORTED_ATTRIBUTES:
            raise vol.Invalid("Invalid attribtue {0}".format(attr))
    return list_attributes


PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Optional(CONF_ATTRIBUTES, default=DEFAULT_ATTRIBUTES):
        vol.All(cv.ensure_list, validate_attributes),
})


@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
    """Set up the Microsoft Face detection platform."""
    api = hass.data[DATA_MICROSOFT_FACE]
    attributes = config[CONF_ATTRIBUTES]

    entities = []
    for camera in config[CONF_SOURCE]:
        entities.append(MicrosoftFaceDetectEntity(
            camera[CONF_ENTITY_ID], api, attributes, camera.get(CONF_NAME)
        ))
Exemplo n.º 34
0
from homeassistant.core import split_entity_id
import homeassistant.helpers.config_validation as cv
from homeassistant.components.image_processing import (
    PLATFORM_SCHEMA, ImageProcessingEntity, CONF_SOURCE, CONF_ENTITY_ID,
    CONF_NAME)
from homeassistant.const import CONF_API_KEY

_LOGGER = logging.getLogger(__name__)

BASE_URL = "https://dev.sighthoundapi.com/v1/detections"
CLASSIFIER = 'sighthound'
TIMEOUT = 9

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_API_KEY): cv.string,
})


def encode_image(image):
    """base64 encode an image stream."""
    base64_img = base64.b64encode(image).decode('ascii')
    return base64_img


def parse_api_response(response):
    """Parse the response from the API. """
    faces = []
    persons = []
    for obj in response.json()['objects']:
        if obj['type'] == 'face':
Exemplo n.º 35
0
})

CATEGORY_SCHEMA = vol.Schema({
    vol.Required(CONF_CATEGORY): cv.string,
    vol.Optional(CONF_AREA): AREA_SCHEMA
})

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Optional(CONF_FILE_OUT, default=[]):
    vol.All(cv.ensure_list, [cv.template]),
    vol.Required(CONF_MODEL):
    vol.Schema({
        vol.Required(CONF_GRAPH):
        cv.isfile,
        vol.Optional(CONF_AREA):
        AREA_SCHEMA,
        vol.Optional(CONF_CATEGORIES, default=[]):
        vol.All(cv.ensure_list, [vol.Any(cv.string, CATEGORY_SCHEMA)]),
        vol.Optional(CONF_LABELS):
        cv.isfile,
        vol.Optional(CONF_MODEL_DIR):
        cv.isdir,
    }),
})


def setup_platform(hass, config, add_entities, discovery_info=None):
    """Set up the TensorFlow image processing platform."""
    model_config = config.get(CONF_MODEL)
    model_dir = model_config.get(CONF_MODEL_DIR) or hass.config.path(
        "tensorflow")
Exemplo n.º 36
0
    vol.Optional(CONF_LEFT, default=0): cv.small_float,
    vol.Optional(CONF_RIGHT, default=1): cv.small_float,
    vol.Optional(CONF_TOP, default=0): cv.small_float,
})

CATEGORY_SCHEMA = vol.Schema({
    vol.Required(CONF_CATEGORY): cv.string,
    vol.Optional(CONF_AREA): AREA_SCHEMA,
})

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Optional(CONF_FILE_OUT, default=[]):
        vol.All(cv.ensure_list, [cv.template]),
    vol.Required(CONF_MODEL): vol.Schema({
        vol.Required(CONF_GRAPH): cv.isfile,
        vol.Optional(CONF_AREA): AREA_SCHEMA,
        vol.Optional(CONF_CATEGORIES, default=[]):
            vol.All(cv.ensure_list, [vol.Any(cv.string, CATEGORY_SCHEMA)]),
        vol.Optional(CONF_LABELS): cv.isfile,
        vol.Optional(CONF_MODEL_DIR): cv.isdir,
    })
})


def draw_box(draw, box, img_width,
             img_height, text='', color=(255, 255, 0)):
    """Draw bounding box on image."""
    ymin, xmin, ymax, xmax = box
    (left, right, top, bottom) = (xmin * img_width, xmax * img_width,
                                  ymin * img_height, ymax * img_height)
    draw.line([(left, top), (left, bottom), (right, bottom),
               (right, top), (left, top)], width=5, fill=color)
Exemplo n.º 37
0
    "gb",
    "kr",
    "kr2",
    "mx",
    "sg",
    "us",
    "vn2",
]

CONF_ALPR_BIN = "alpr_bin"

DEFAULT_BINARY = "alpr"

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_REGION):
    vol.All(vol.Lower, vol.In(OPENALPR_REGIONS)),
    vol.Optional(CONF_ALPR_BIN, default=DEFAULT_BINARY):
    cv.string,
})


async def async_setup_platform(
    hass: HomeAssistant,
    config: ConfigType,
    async_add_entities: AddEntitiesCallback,
    discovery_info: DiscoveryInfoType | None = None,
) -> None:
    """Set up the OpenALPR local platform."""
    create_issue(
        hass,
        "openalpr_local",
        "pending_removal",
Exemplo n.º 38
0
from homeassistant.core import split_entity_id
from homeassistant.components.image_processing import (
    PLATFORM_SCHEMA, CONF_SOURCE, CONF_ENTITY_ID, CONF_NAME)
from homeassistant.components.image_processing.microsoft_face_identify import (
    ImageProcessingFaceEntity)
import homeassistant.helpers.config_validation as cv

REQUIREMENTS = ['face_recognition==0.2.2']

_LOGGER = logging.getLogger(__name__)

ATTR_NAME = 'name'
CONF_FACES = 'faces'

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_FACES): {cv.string: cv.isfile},
})


def setup_platform(hass, config, add_devices, discovery_info=None):
    """Set up the Dlib Face detection platform."""
    entities = []
    for camera in config[CONF_SOURCE]:
        entities.append(DlibFaceIdentifyEntity(
            camera[CONF_ENTITY_ID], config[CONF_FACES], camera.get(CONF_NAME)
        ))

    add_devices(entities)


class DlibFaceIdentifyEntity(ImageProcessingFaceEntity):
Exemplo n.º 39
0
    'fr',
    'gb',
    'kr',
    'kr2',
    'mx',
    'sg',
    'us',
    'vn2'
]

CONF_ALPR_BIN = 'alp_bin'

DEFAULT_BINARY = 'alpr'

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_REGION): vol.All(vol.Lower, vol.In(OPENALPR_REGIONS)),
    vol.Optional(CONF_ALPR_BIN, default=DEFAULT_BINARY): cv.string,
})


async def async_setup_platform(hass, config, async_add_entities,
                               discovery_info=None):
    """Set up the OpenALPR local platform."""
    command = [config[CONF_ALPR_BIN], '-c', config[CONF_REGION], '-']
    confidence = config[CONF_CONFIDENCE]

    entities = []
    for camera in config[CONF_SOURCE]:
        entities.append(OpenAlprLocalEntity(
            camera[CONF_ENTITY_ID], command, confidence, camera.get(CONF_NAME)
        ))
DATETIME_FORMAT = "%Y-%m-%d_%H-%M-%S"
DEFAULT_API_KEY = ""
DEFAULT_TIMEOUT = 10

CLASSIFIER = "deepstack_face"
DATA_DEEPSTACK = "deepstack_classifiers"
FILE_PATH = "file_path"
SERVICE_TEACH_FACE = "deepstack_teach_face"


PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
    {
        vol.Required(CONF_IP_ADDRESS): cv.string,
        vol.Required(CONF_PORT): cv.port,
        vol.Optional(CONF_API_KEY, default=DEFAULT_API_KEY): cv.string,
        vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
        vol.Optional(CONF_DETECT_ONLY, default=False): cv.boolean,
        vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir,
        vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False): cv.boolean,
    }
)

SERVICE_TEACH_SCHEMA = vol.Schema(
    {
        vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
        vol.Required(ATTR_NAME): cv.string,
        vol.Required(FILE_PATH): cv.string,
    }
)

Exemplo n.º 41
0
    "eu",
    "fr",
    "gb",
    "kr",
    "kr2",
    "mx",
    "sg",
    "us",
    "vn2",
]

CONF_REGION = "region"

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_API_KEY):
    cv.string,
    vol.Required(CONF_REGION):
    vol.All(vol.Lower, vol.In(OPENALPR_REGIONS)),
})


async def async_setup_platform(hass,
                               config,
                               async_add_entities,
                               discovery_info=None):
    """Set up the OpenALPR cloud API platform."""
    confidence = config[CONF_CONFIDENCE]
    params = {
        "secret_key": config[CONF_API_KEY],
        "tasks": "plate",
        "return_image": 0,
        "country": config[CONF_REGION],
Exemplo n.º 42
0
    'gb',
    'kr',
    'mx',
    'sg',
]

CONF_REGION = 'region'
CONF_ALPR_BIN = 'alp_bin'

DEFAULT_BINARY = 'alpr'
DEFAULT_CONFIDENCE = 80

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Required(CONF_REGION):
        vol.All(vol.Lower, vol.In(OPENALPR_REGIONS)),
    vol.Optional(CONF_ALPR_BIN, default=DEFAULT_BINARY): cv.string,
    vol.Optional(CONF_CONFIDENCE, default=DEFAULT_CONFIDENCE):
        vol.All(vol.Coerce(float), vol.Range(min=0, max=100))
})


@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
    """Set up the openalpr local platform."""
    command = [config[CONF_ALPR_BIN], '-c', config[CONF_REGION], '-']
    confidence = config[CONF_CONFIDENCE]

    entities = []
    for camera in config[CONF_SOURCE]:
        entities.append(OpenAlprLocalEntity(
            camera[CONF_ENTITY_ID], command, confidence, camera.get(CONF_NAME)