Ejemplo n.º 1
0
def _exclude_events(events, config):
    """Get lists of excluded entities and platforms."""
    excluded_entities = []
    excluded_domains = []
    exclude = config[DOMAIN].get(CONF_EXCLUDE)
    if exclude:
        excluded_entities = exclude[CONF_ENTITIES]
        excluded_domains = exclude[CONF_DOMAINS]

    filtered_events = []
    for event in events:
        if event.event_type == EVENT_STATE_CHANGED:
            to_state = State.from_dict(event.data.get('new_state'))
            # Do not report on new entities
            if not to_state:
                continue

            # exclude entities which are customized hidden
            hidden = to_state.attributes.get(ATTR_HIDDEN, False)
            if hidden:
                continue

            domain = to_state.domain
            # check if logbook entry is excluded for this domain
            if domain in excluded_domains:
                continue
            # check if logbook entry is excluded for this entity
            if to_state.entity_id in excluded_entities:
                continue
        filtered_events.append(event)
    return filtered_events
Ejemplo n.º 2
0
def mock_restore_cache(hass, states):
    """Mock the DATA_RESTORE_CACHE."""
    key = restore_state.DATA_RESTORE_STATE_TASK
    data = restore_state.RestoreStateData(hass)
    now = date_util.utcnow()

    last_states = {}
    for state in states:
        restored_state = state.as_dict()
        restored_state["attributes"] = json.loads(
            json.dumps(restored_state["attributes"], cls=JSONEncoder)
        )
        last_states[state.entity_id] = restore_state.StoredState(
            State.from_dict(restored_state), now
        )
    data.last_states = last_states
    _LOGGER.debug("Restore cache: %s", data.last_states)
    assert len(data.last_states) == len(states), "Duplicate entity_id? {}".format(
        states
    )

    async def get_restore_state_data() -> restore_state.RestoreStateData:
        return data

    # Patch the singleton task in hass.data to return our new RestoreStateData
    hass.data[key] = hass.async_create_task(get_restore_state_data())
Ejemplo n.º 3
0
    def _event_receiver(topic, payload, qos):
        """
        Receive events published by the other HA instance and fire
        them on this hass instance.
        """
        event = json.loads(payload)
        event_type = event.get('event_type')
        event_data = event.get('event_data')

        # Special case handling for event STATE_CHANGED
        # We will try to convert state dicts back to State objects
        # Copied over from the _handle_api_post_events_event method
        # of the api component.
        if event_type == EVENT_STATE_CHANGED and event_data:
            for key in ('old_state', 'new_state'):
                state = State.from_dict(event_data.get(key))

                if state:
                    event_data[key] = state

        hass.bus.fire(
            event_type,
            event_data=event_data,
            origin=EventOrigin.remote
        )
Ejemplo n.º 4
0
    def _event_receiver(topic, payload, qos):
        """
        Receive events published by the other HA instance and fire
        them on this hass instance.
        """
        event = json.loads(payload)
        event_type = event.get('event_type')
        event_data = event.get('event_data')

        # Special case handling for event STATE_CHANGED
        # We will try to convert state dicts back to State objects
        # Copied over from the _handle_api_post_events_event method
        # of the api component.
        if event_type == EVENT_STATE_CHANGED and event_data:
            for key in ('old_state', 'new_state'):
                state = State.from_dict(event_data.get(key))

                if state:
                    event_data[key] = state

        hass.bus.fire(
            event_type,
            event_data=event_data,
            origin=EventOrigin.remote
        )
Ejemplo n.º 5
0
def _exclude_events(events, config):
    """Get lists of excluded entities and platforms."""
    excluded_entities = []
    excluded_domains = []
    exclude = config[DOMAIN].get(CONF_EXCLUDE)
    if exclude:
        excluded_entities = exclude[CONF_ENTITIES]
        excluded_domains = exclude[CONF_DOMAINS]

    filtered_events = []
    for event in events:
        if event.event_type == EVENT_STATE_CHANGED:
            to_state = State.from_dict(event.data.get('new_state'))
            # Do not report on new entities
            if not to_state:
                continue

            # exclude entities which are customized hidden
            hidden = to_state.attributes.get(ATTR_HIDDEN, False)
            if hidden:
                continue

            domain = to_state.domain
            # check if logbook entry is excluded for this domain
            if domain in excluded_domains:
                continue
            # check if logbook entry is excluded for this entity
            if to_state.entity_id in excluded_entities:
                continue
        filtered_events.append(event)
    return filtered_events
Ejemplo n.º 6
0
def _exclude_events(events, config):
    """Get lists of excluded entities and platforms."""
    # pylint: disable=too-many-branches
    excluded_entities = []
    excluded_domains = []
    included_entities = []
    included_domains = []
    exclude = config[DOMAIN].get(CONF_EXCLUDE)
    if exclude:
        excluded_entities = exclude[CONF_ENTITIES]
        excluded_domains = exclude[CONF_DOMAINS]
    include = config[DOMAIN].get(CONF_INCLUDE)
    if include:
        included_entities = include[CONF_ENTITIES]
        included_domains = include[CONF_DOMAINS]

    filtered_events = []
    for event in events:
        if event.event_type == EVENT_STATE_CHANGED:
            to_state = State.from_dict(event.data.get('new_state'))
            # Do not report on new entities
            if not to_state:
                continue

            # exclude entities which are customized hidden
            hidden = to_state.attributes.get(ATTR_HIDDEN, False)
            if hidden:
                continue

            domain = to_state.domain
            entity_id = to_state.entity_id
            # filter if only excluded is configured for this domain
            if excluded_domains and domain in excluded_domains and \
                    not included_domains:
                if (included_entities and entity_id not in included_entities) \
                        or not included_entities:
                    continue
            # filter if only included is configured for this domain
            elif not excluded_domains and included_domains and \
                    domain not in included_domains:
                if (included_entities and entity_id not in included_entities) \
                        or not included_entities:
                    continue
            # filter if included and excluded is configured for this domain
            elif excluded_domains and included_domains and \
                    (domain not in included_domains or
                     domain in excluded_domains):
                if (included_entities and entity_id not in included_entities) \
                        or not included_entities or domain in excluded_domains:
                    continue
            # filter if only included is configured for this entity
            elif not excluded_domains and not included_domains and \
                    included_entities and entity_id not in included_entities:
                continue
            # check if logbook entry is excluded for this entity
            if entity_id in excluded_entities:
                continue
        filtered_events.append(event)
    return filtered_events
    def _event_data_to_state(event_data):
        for key in ("old_state", "new_state"):
            state = State.from_dict(event_data.get(key))

            if state:
                event_data[key] = state

        return event_data
Ejemplo n.º 8
0
    def from_dict(cls: type[_StoredStateT], json_dict: dict) -> _StoredStateT:
        """Initialize a stored state from a dict."""
        last_seen = json_dict["last_seen"]

        if isinstance(last_seen, str):
            last_seen = dt_util.parse_datetime(last_seen)

        return cls(cast(State, State.from_dict(json_dict["state"])), last_seen)
Ejemplo n.º 9
0
    def from_dict(cls, json_dict: Dict) -> "StoredState":
        """Initialize a stored state from a dict."""
        last_seen = json_dict["last_seen"]

        if isinstance(last_seen, str):
            last_seen = dt_util.parse_datetime(last_seen)

        return cls(State.from_dict(json_dict["state"]), last_seen)
Ejemplo n.º 10
0
    def from_dict(cls: type[_StoredStateT], json_dict: dict) -> _StoredStateT:
        """Initialize a stored state from a dict."""
        extra_data_dict = json_dict.get("extra_data")
        extra_data = RestoredExtraData(
            extra_data_dict) if extra_data_dict else None
        last_seen = json_dict["last_seen"]

        if isinstance(last_seen, str):
            last_seen = dt_util.parse_datetime(last_seen)

        return cls(cast(State, State.from_dict(json_dict["state"])),
                   extra_data, last_seen)
Ejemplo n.º 11
0
    def async_restore_entity_removed(self, entity_id: str) -> None:
        """Unregister this entity from saving state."""
        # When an entity is being removed from hass, store its last state. This
        # allows us to support state restoration if the entity is removed, then
        # re-added while hass is still running.
        state = self.hass.states.get(entity_id)
        # To fully mimic all the attribute data types when loaded from storage,
        # we're going to serialize it to JSON and then re-load it.
        if state is not None:
            state = State.from_dict(_encode_complex(state.as_dict()))
        if state is not None:
            self.last_states[entity_id] = StoredState(state, dt_util.utcnow())

        self.entity_ids.remove(entity_id)
Ejemplo n.º 12
0
def mock_restore_cache(hass, states):
    """Mock the DATA_RESTORE_CACHE."""
    key = restore_state.DATA_RESTORE_STATE_TASK
    data = restore_state.RestoreStateData(hass)
    now = date_util.utcnow()

    last_states = {}
    for state in states:
        restored_state = state.as_dict()
        restored_state["attributes"] = json.loads(
            json.dumps(restored_state["attributes"], cls=JSONEncoder))
        last_states[state.entity_id] = restore_state.StoredState(
            State.from_dict(restored_state), now)
    data.last_states = last_states
    _LOGGER.debug("Restore cache: %s", data.last_states)
    assert len(
        data.last_states) == len(states), f"Duplicate entity_id? {states}"

    hass.data[key] = data
Ejemplo n.º 13
0
    def _event_receiver(msg):
        """Receive events published by and fire them on this hass instance."""
        event = json.loads(msg.payload)
        event_type = event.get("event_type")
        event_data = event.get("event_data")

        # Don't fire HOMEASSISTANT_* events on this instance
        if event_type in BLOCKED_EVENTS:
            return

        # Special case handling for event STATE_CHANGED
        # We will try to convert state dicts back to State objects
        # Copied over from the _handle_api_post_events_event method
        # of the api component.
        if event_type == EVENT_STATE_CHANGED and event_data:
            for key in ("old_state", "new_state"):
                state = State.from_dict(event_data.get(key))

                if state:
                    event_data[key] = state

        hass.bus.async_fire(event_type,
                            event_data=event_data,
                            origin=EventOrigin.remote)
Ejemplo n.º 14
0
def _exclude_events(events, config):
    """Get lists of excluded entities and platforms."""
    excluded_entities = []
    excluded_domains = []
    included_entities = []
    included_domains = []
    exclude = config.get(CONF_EXCLUDE)
    if exclude:
        excluded_entities = exclude[CONF_ENTITIES]
        excluded_domains = exclude[CONF_DOMAINS]
    include = config.get(CONF_INCLUDE)
    if include:
        included_entities = include[CONF_ENTITIES]
        included_domains = include[CONF_DOMAINS]

    filtered_events = []
    for event in events:
        domain, entity_id = None, None

        if event.event_type == EVENT_STATE_CHANGED:
            to_state = State.from_dict(event.data.get('new_state'))
            # Do not report on new entities
            if event.data.get('old_state') is None:
                continue

            # Do not report on entity removal
            if not to_state:
                continue

            # exclude entities which are customized hidden
            hidden = to_state.attributes.get(ATTR_HIDDEN, False)
            if hidden:
                continue

            domain = to_state.domain
            entity_id = to_state.entity_id

        elif event.event_type == EVENT_LOGBOOK_ENTRY:
            domain = event.data.get(ATTR_DOMAIN)
            entity_id = event.data.get(ATTR_ENTITY_ID)

        if domain or entity_id:
            # filter if only excluded is configured for this domain
            if excluded_domains and domain in excluded_domains and \
                    not included_domains:
                if (included_entities and entity_id not in included_entities) \
                        or not included_entities:
                    continue
            # filter if only included is configured for this domain
            elif not excluded_domains and included_domains and \
                    domain not in included_domains:
                if (included_entities and entity_id not in included_entities) \
                        or not included_entities:
                    continue
            # filter if included and excluded is configured for this domain
            elif excluded_domains and included_domains and \
                    (domain not in included_domains or
                     domain in excluded_domains):
                if (included_entities and entity_id not in included_entities) \
                        or not included_entities or domain in excluded_domains:
                    continue
            # filter if only included is configured for this entity
            elif not excluded_domains and not included_domains and \
                    included_entities and entity_id not in included_entities:
                continue
            # check if logbook entry is excluded for this entity
            if entity_id in excluded_entities:
                continue
        filtered_events.append(event)
    return filtered_events
Ejemplo n.º 15
0
def humanify(events):
    """Generate a converted list of events into Entry objects.

    Will try to group events if possible:
    - if 2+ sensor updates in GROUP_BY_MINUTES, show last
    - if home assistant stop and start happen in same minute call it restarted
    """
    # Group events in batches of GROUP_BY_MINUTES
    for _, g_events in groupby(
            events,
            lambda event: event.time_fired.minute // GROUP_BY_MINUTES):

        events_batch = list(g_events)

        # Keep track of last sensor states
        last_sensor_event = {}

        # Group HA start/stop events
        # Maps minute of event to 1: stop, 2: stop + start
        start_stop_events = {}

        # Process events
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:
                entity_id = event.data.get('entity_id')

                if entity_id is None:
                    continue

                if entity_id.startswith(tuple('{}.'.format(
                        domain) for domain in CONTINUOUS_DOMAINS)):
                    last_sensor_event[entity_id] = event

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if event.time_fired.minute in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 1

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if event.time_fired.minute not in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 2

        # Yield entries
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:

                to_state = State.from_dict(event.data.get('new_state'))

                # If last_changed != last_updated only attributes have changed
                # we do not report on that yet. Also filter auto groups.
                if not to_state or \
                   to_state.last_changed != to_state.last_updated or \
                   to_state.domain == 'group' and \
                   to_state.attributes.get('auto', False):
                    continue

                domain = to_state.domain

                # Skip all but the last sensor state
                if domain in CONTINUOUS_DOMAINS and \
                   event != last_sensor_event[to_state.entity_id]:
                    continue

                # Don't show continuous sensor value changes in the logbook
                if domain in CONTINUOUS_DOMAINS and \
                   to_state.attributes.get('unit_of_measurement'):
                    continue

                yield Entry(
                    event.time_fired,
                    name=to_state.name,
                    message=_entry_message_from_state(domain, to_state),
                    domain=domain,
                    entity_id=to_state.entity_id)

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    continue

                yield Entry(
                    event.time_fired, "Home Assistant", "started",
                    domain=HA_DOMAIN)

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    action = "restarted"
                else:
                    action = "stopped"

                yield Entry(
                    event.time_fired, "Home Assistant", action,
                    domain=HA_DOMAIN)

            elif event.event_type == EVENT_LOGBOOK_ENTRY:
                domain = event.data.get(ATTR_DOMAIN)
                entity_id = event.data.get(ATTR_ENTITY_ID)
                if domain is None and entity_id is not None:
                    try:
                        domain = split_entity_id(str(entity_id))[0]
                    except IndexError:
                        pass

                yield Entry(
                    event.time_fired, event.data.get(ATTR_NAME),
                    event.data.get(ATTR_MESSAGE), domain,
                    entity_id)
Ejemplo n.º 16
0
def humanify(hass, events):
    """Generate a converted list of events into Entry objects.

    Will try to group events if possible:
    - if 2+ sensor updates in GROUP_BY_MINUTES, show last
    - if home assistant stop and start happen in same minute call it restarted
    """
    domain_prefixes = tuple('{}.'.format(dom) for dom in CONTINUOUS_DOMAINS)

    # Group events in batches of GROUP_BY_MINUTES
    for _, g_events in groupby(
            events,
            lambda event: event.time_fired.minute // GROUP_BY_MINUTES):

        events_batch = list(g_events)

        # Keep track of last sensor states
        last_sensor_event = {}

        # Group HA start/stop events
        # Maps minute of event to 1: stop, 2: stop + start
        start_stop_events = {}

        # Process events
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:
                entity_id = event.data.get('entity_id')

                if entity_id.startswith(domain_prefixes):
                    last_sensor_event[entity_id] = event

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if event.time_fired.minute in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 1

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if event.time_fired.minute not in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 2

        # Yield entries
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:

                to_state = State.from_dict(event.data.get('new_state'))

                domain = to_state.domain

                # Skip all but the last sensor state
                if domain in CONTINUOUS_DOMAINS and \
                   event != last_sensor_event[to_state.entity_id]:
                    continue

                # Don't show continuous sensor value changes in the logbook
                if domain in CONTINUOUS_DOMAINS and \
                   to_state.attributes.get('unit_of_measurement'):
                    continue

                yield {
                    'when': event.time_fired,
                    'name': to_state.name,
                    'message': _entry_message_from_state(domain, to_state),
                    'domain': domain,
                    'entity_id': to_state.entity_id,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    continue

                yield {
                    'when': event.time_fired,
                    'name': "Home Assistant",
                    'message': "started",
                    'domain': HA_DOMAIN,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    action = "restarted"
                else:
                    action = "stopped"

                yield {
                    'when': event.time_fired,
                    'name': "Home Assistant",
                    'message': action,
                    'domain': HA_DOMAIN,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_LOGBOOK_ENTRY:
                domain = event.data.get(ATTR_DOMAIN)
                entity_id = event.data.get(ATTR_ENTITY_ID)
                if domain is None and entity_id is not None:
                    try:
                        domain = split_entity_id(str(entity_id))[0]
                    except IndexError:
                        pass

                yield {
                    'when': event.time_fired,
                    'name': event.data.get(ATTR_NAME),
                    'message': event.data.get(ATTR_MESSAGE),
                    'domain': domain,
                    'entity_id': entity_id,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_ALEXA_SMART_HOME:
                data = event.data
                entity_id = data['request'].get('entity_id')

                if entity_id:
                    state = hass.states.get(entity_id)
                    name = state.name if state else entity_id
                    message = "send command {}/{} for {}".format(
                        data['request']['namespace'],
                        data['request']['name'], name)
                else:
                    message = "send command {}/{}".format(
                        data['request']['namespace'], data['request']['name'])

                yield {
                    'when': event.time_fired,
                    'name': 'Amazon Alexa',
                    'message': message,
                    'domain': 'alexa',
                    'entity_id': entity_id,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_HOMEKIT_CHANGED:
                data = event.data
                entity_id = data.get(ATTR_ENTITY_ID)
                value = data.get(ATTR_VALUE)

                value_msg = " to {}".format(value) if value else ''
                message = "send command {}{} for {}".format(
                    data[ATTR_SERVICE], value_msg, data[ATTR_DISPLAY_NAME])

                yield {
                    'when': event.time_fired,
                    'name': 'HomeKit',
                    'message': message,
                    'domain': DOMAIN_HOMEKIT,
                    'entity_id': entity_id,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_AUTOMATION_TRIGGERED:
                yield {
                    'when': event.time_fired,
                    'name': event.data.get(ATTR_NAME),
                    'message': "has been triggered",
                    'domain': 'automation',
                    'entity_id': event.data.get(ATTR_ENTITY_ID),
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_SCRIPT_STARTED:
                yield {
                    'when': event.time_fired,
                    'name': event.data.get(ATTR_NAME),
                    'message': 'started',
                    'domain': 'script',
                    'entity_id': event.data.get(ATTR_ENTITY_ID),
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }
Ejemplo n.º 17
0
def humanify(hass, events):
    """Generate a converted list of events into Entry objects.

    Will try to group events if possible:
    - if 2+ sensor updates in GROUP_BY_MINUTES, show last
    - if home assistant stop and start happen in same minute call it restarted
    """
    domain_prefixes = tuple('{}.'.format(dom) for dom in CONTINUOUS_DOMAINS)

    # Group events in batches of GROUP_BY_MINUTES
    for _, g_events in groupby(
            events,
            lambda event: event.time_fired.minute // GROUP_BY_MINUTES):

        events_batch = list(g_events)

        # Keep track of last sensor states
        last_sensor_event = {}

        # Group HA start/stop events
        # Maps minute of event to 1: stop, 2: stop + start
        start_stop_events = {}

        # Process events
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:
                entity_id = event.data.get('entity_id')

                if entity_id.startswith(domain_prefixes):
                    last_sensor_event[entity_id] = event

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if event.time_fired.minute in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 1

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if event.time_fired.minute not in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 2

        # Yield entries
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:

                to_state = State.from_dict(event.data.get('new_state'))

                domain = to_state.domain

                # Skip all but the last sensor state
                if domain in CONTINUOUS_DOMAINS and \
                   event != last_sensor_event[to_state.entity_id]:
                    continue

                # Don't show continuous sensor value changes in the logbook
                if domain in CONTINUOUS_DOMAINS and \
                   to_state.attributes.get('unit_of_measurement'):
                    continue

                yield {
                    'when': event.time_fired,
                    'name': to_state.name,
                    'message': _entry_message_from_state(domain, to_state),
                    'domain': domain,
                    'entity_id': to_state.entity_id,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    continue

                yield {
                    'when': event.time_fired,
                    'name': "Home Assistant",
                    'message': "started",
                    'domain': HA_DOMAIN,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    action = "restarted"
                else:
                    action = "stopped"

                yield {
                    'when': event.time_fired,
                    'name': "Home Assistant",
                    'message': action,
                    'domain': HA_DOMAIN,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_LOGBOOK_ENTRY:
                domain = event.data.get(ATTR_DOMAIN)
                entity_id = event.data.get(ATTR_ENTITY_ID)
                if domain is None and entity_id is not None:
                    try:
                        domain = split_entity_id(str(entity_id))[0]
                    except IndexError:
                        pass

                yield {
                    'when': event.time_fired,
                    'name': event.data.get(ATTR_NAME),
                    'message': event.data.get(ATTR_MESSAGE),
                    'domain': domain,
                    'entity_id': entity_id,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_ALEXA_SMART_HOME:
                data = event.data
                entity_id = data['request'].get('entity_id')

                if entity_id:
                    state = hass.states.get(entity_id)
                    name = state.name if state else entity_id
                    message = "send command {}/{} for {}".format(
                        data['request']['namespace'],
                        data['request']['name'], name)
                else:
                    message = "send command {}/{}".format(
                        data['request']['namespace'], data['request']['name'])

                yield {
                    'when': event.time_fired,
                    'name': 'Amazon Alexa',
                    'message': message,
                    'domain': 'alexa',
                    'entity_id': entity_id,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_HOMEKIT_CHANGED:
                data = event.data
                entity_id = data.get(ATTR_ENTITY_ID)
                value = data.get(ATTR_VALUE)

                value_msg = " to {}".format(value) if value else ''
                message = "send command {}{} for {}".format(
                    data[ATTR_SERVICE], value_msg, data[ATTR_DISPLAY_NAME])

                yield {
                    'when': event.time_fired,
                    'name': 'HomeKit',
                    'message': message,
                    'domain': DOMAIN_HOMEKIT,
                    'entity_id': entity_id,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_AUTOMATION_TRIGGERED:
                yield {
                    'when': event.time_fired,
                    'name': event.data.get(ATTR_NAME),
                    'message': "has been triggered",
                    'domain': 'automation',
                    'entity_id': event.data.get(ATTR_ENTITY_ID),
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_SCRIPT_STARTED:
                yield {
                    'when': event.time_fired,
                    'name': event.data.get(ATTR_NAME),
                    'message': 'started',
                    'domain': 'script',
                    'entity_id': event.data.get(ATTR_ENTITY_ID),
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }
Ejemplo n.º 18
0
def humanify(hass, events):
    """Generate a converted list of events into Entry objects.

    Will try to group events if possible:
    - if 2+ sensor updates in GROUP_BY_MINUTES, show last
    - if Home Assistant stop and start happen in same minute call it restarted
    """
    domain_prefixes = tuple(f"{dom}." for dom in CONTINUOUS_DOMAINS)

    # Group events in batches of GROUP_BY_MINUTES
    for _, g_events in groupby(
            events, lambda event: event.time_fired.minute // GROUP_BY_MINUTES):

        events_batch = list(g_events)

        # Keep track of last sensor states
        last_sensor_event = {}

        # Group HA start/stop events
        # Maps minute of event to 1: stop, 2: stop + start
        start_stop_events = {}

        # Process events
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:
                entity_id = event.data.get("entity_id")

                if entity_id.startswith(domain_prefixes):
                    last_sensor_event[entity_id] = event

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if event.time_fired.minute in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 1

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if event.time_fired.minute not in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 2

        # Yield entries
        external_events = hass.data.get(DOMAIN, {})
        for event in events_batch:
            if event.event_type in external_events:
                domain, describe_event = external_events[event.event_type]
                data = describe_event(event)
                data["when"] = event.time_fired
                data["domain"] = domain
                data["context_id"] = event.context.id
                data["context_user_id"] = event.context.user_id
                yield data

            if event.event_type == EVENT_STATE_CHANGED:
                to_state = State.from_dict(event.data.get("new_state"))

                domain = to_state.domain

                # Skip all but the last sensor state
                if (domain in CONTINUOUS_DOMAINS
                        and event != last_sensor_event[to_state.entity_id]):
                    continue

                # Don't show continuous sensor value changes in the logbook
                if domain in CONTINUOUS_DOMAINS and to_state.attributes.get(
                        "unit_of_measurement"):
                    continue

                yield {
                    "when": event.time_fired,
                    "name": to_state.name,
                    "message": _entry_message_from_state(domain, to_state),
                    "domain": domain,
                    "entity_id": to_state.entity_id,
                    "context_id": event.context.id,
                    "context_user_id": event.context.user_id,
                }

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    continue

                yield {
                    "when": event.time_fired,
                    "name": "Home Assistant",
                    "message": "started",
                    "domain": HA_DOMAIN,
                    "context_id": event.context.id,
                    "context_user_id": event.context.user_id,
                }

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    action = "restarted"
                else:
                    action = "stopped"

                yield {
                    "when": event.time_fired,
                    "name": "Home Assistant",
                    "message": action,
                    "domain": HA_DOMAIN,
                    "context_id": event.context.id,
                    "context_user_id": event.context.user_id,
                }

            elif event.event_type == EVENT_LOGBOOK_ENTRY:
                domain = event.data.get(ATTR_DOMAIN)
                entity_id = event.data.get(ATTR_ENTITY_ID)
                if domain is None and entity_id is not None:
                    try:
                        domain = split_entity_id(str(entity_id))[0]
                    except IndexError:
                        pass

                yield {
                    "when": event.time_fired,
                    "name": event.data.get(ATTR_NAME),
                    "message": event.data.get(ATTR_MESSAGE),
                    "domain": domain,
                    "entity_id": entity_id,
                    "context_id": event.context.id,
                    "context_user_id": event.context.user_id,
                }

            elif event.event_type == EVENT_SCRIPT_STARTED:
                yield {
                    "when": event.time_fired,
                    "name": event.data.get(ATTR_NAME),
                    "message": "started",
                    "domain": "script",
                    "entity_id": event.data.get(ATTR_ENTITY_ID),
                    "context_id": event.context.id,
                    "context_user_id": event.context.user_id,
                }
Ejemplo n.º 19
0
def run(script_args: List) -> int:
    """Run the actual script."""
    from sqlalchemy import create_engine
    from sqlalchemy import func
    from sqlalchemy.orm import sessionmaker
    from influxdb import InfluxDBClient
    from homeassistant.components.recorder import models
    from homeassistant.helpers import state as state_helper
    from homeassistant.core import State
    from homeassistant.core import HomeAssistantError

    parser = argparse.ArgumentParser(
        description="import data to influxDB.")
    parser.add_argument(
        '-c', '--config',
        metavar='path_to_config_dir',
        default=config_util.get_default_config_dir(),
        help="Directory that contains the Home Assistant configuration")
    parser.add_argument(
        '--uri',
        type=str,
        help="Connect to URI and import (if other than default sqlite) "
             "eg: mysql://localhost/homeassistant")
    parser.add_argument(
        '-d', '--dbname',
        metavar='dbname',
        required=True,
        help="InfluxDB database name")
    parser.add_argument(
        '-H', '--host',
        metavar='host',
        default='127.0.0.1',
        help="InfluxDB host address")
    parser.add_argument(
        '-P', '--port',
        metavar='port',
        default=8086,
        help="InfluxDB host port")
    parser.add_argument(
        '-u', '--username',
        metavar='username',
        default='root',
        help="InfluxDB username")
    parser.add_argument(
        '-p', '--password',
        metavar='password',
        default='root',
        help="InfluxDB password")
    parser.add_argument(
        '-s', '--step',
        metavar='step',
        default=1000,
        help="How many points to import at the same time")
    parser.add_argument(
        '-t', '--tags',
        metavar='tags',
        default="",
        help="Comma separated list of tags (key:value) for all points")
    parser.add_argument(
        '-D', '--default-measurement',
        metavar='default_measurement',
        default="",
        help="Store all your points in the same measurement")
    parser.add_argument(
        '-o', '--override-measurement',
        metavar='override_measurement',
        default="",
        help="Store all your points in the same measurement")
    parser.add_argument(
        '-e', '--exclude_entities',
        metavar='exclude_entities',
        default="",
        help="Comma separated list of excluded entities")
    parser.add_argument(
        '-E', '--exclude_domains',
        metavar='exclude_domains',
        default="",
        help="Comma separated list of excluded domains")
    parser.add_argument(
        "-S", "--simulate",
        default=False,
        action="store_true",
        help=("Do not write points but simulate preprocessing and print "
              "statistics"))
    parser.add_argument(
        '--script',
        choices=['influxdb_import'])

    args = parser.parse_args()
    simulate = args.simulate

    client = None
    if not simulate:
        client = InfluxDBClient(
            args.host, args.port, args.username, args.password)
        client.switch_database(args.dbname)

    config_dir = os.path.join(os.getcwd(), args.config)  # type: str

    # Test if configuration directory exists
    if not os.path.isdir(config_dir):
        if config_dir != config_util.get_default_config_dir():
            print(('Fatal Error: Specified configuration directory does '
                   'not exist {} ').format(config_dir))
            return 1

    src_db = '{}/home-assistant_v2.db'.format(config_dir)

    if not os.path.exists(src_db) and not args.uri:
        print("Fatal Error: Database '{}' does not exist "
              "and no URI given".format(src_db))
        return 1

    uri = args.uri or 'sqlite:///{}'.format(src_db)
    engine = create_engine(uri, echo=False)
    session_factory = sessionmaker(bind=engine)
    session = session_factory()
    step = int(args.step)
    step_start = 0

    tags = {}
    if args.tags:
        tags.update(dict(elem.split(':') for elem in args.tags.split(',')))
    excl_entities = args.exclude_entities.split(',')
    excl_domains = args.exclude_domains.split(',')
    override_measurement = args.override_measurement
    default_measurement = args.default_measurement

    query = session.query(func.count(models.Events.event_type)).filter(
        models.Events.event_type == 'state_changed')

    total_events = query.scalar()
    prefix_format = '{} of {}'

    points = []
    invalid_points = []
    count = 0
    from collections import defaultdict
    entities = defaultdict(int)
    print_progress(0, total_events, prefix_format.format(0, total_events))

    while True:

        step_stop = step_start + step
        if step_start > total_events:
            print_progress(total_events, total_events, prefix_format.format(
                total_events, total_events))
            break
        query = session.query(models.Events).filter(
            models.Events.event_type == 'state_changed').order_by(
                models.Events.time_fired).slice(step_start, step_stop)

        for event in query:
            event_data = json.loads(event.event_data)

            if not ('entity_id' in event_data) or (
                    excl_entities and event_data[
                        'entity_id'] in excl_entities) or (
                            excl_domains and event_data[
                                'entity_id'].split('.')[0] in excl_domains):
                session.expunge(event)
                continue

            try:
                state = State.from_dict(event_data.get('new_state'))
            except HomeAssistantError:
                invalid_points.append(event_data)

            if not state:
                invalid_points.append(event_data)
                continue

            try:
                _state = float(state_helper.state_as_number(state))
                _state_key = 'value'
            except ValueError:
                _state = state.state
                _state_key = 'state'

            if override_measurement:
                measurement = override_measurement
            else:
                measurement = state.attributes.get('unit_of_measurement')
                if measurement in (None, ''):
                    if default_measurement:
                        measurement = default_measurement
                    else:
                        measurement = state.entity_id

            point = {
                'measurement': measurement,
                'tags': {
                    'domain': state.domain,
                    'entity_id': state.object_id,
                },
                'time': event.time_fired,
                'fields': {
                    _state_key: _state,
                }
            }

            for key, value in state.attributes.items():
                if key != 'unit_of_measurement':
                    # If the key is already in fields
                    if key in point['fields']:
                        key = key + '_'
                    # Prevent column data errors in influxDB.
                    # For each value we try to cast it as float
                    # But if we can not do it we store the value
                    # as string add "_str" postfix to the field key
                    try:
                        point['fields'][key] = float(value)
                    except (ValueError, TypeError):
                        new_key = '{}_str'.format(key)
                        point['fields'][new_key] = str(value)

            entities[state.entity_id] += 1
            point['tags'].update(tags)
            points.append(point)
            session.expunge(event)

        if points:
            if not simulate:
                client.write_points(points)
            count += len(points)
            # This prevents the progress bar from going over 100% when
            # the last step happens
            print_progress((step_start + len(
                points)), total_events, prefix_format.format(
                    step_start, total_events))
        else:
            print_progress(
                (step_start + step), total_events, prefix_format.format(
                    step_start, total_events))

        points = []
        step_start += step

    print("\nStatistics:")
    print("\n".join(["{:6}: {}".format(v, k) for k, v
                     in sorted(entities.items(), key=lambda x: x[1])]))
    print("\nInvalid Points: {}".format(len(invalid_points)))
    print("\nImport finished: {} points written".format(count))
    return 0
Ejemplo n.º 20
0
def humanify(hass, events):
    """Generate a converted list of events into Entry objects.

    Will try to group events if possible:
    - if 2+ sensor updates in GROUP_BY_MINUTES, show last
    - if Home Assistant stop and start happen in same minute call it restarted
    """
    domain_prefixes = tuple(f"{dom}." for dom in CONTINUOUS_DOMAINS)

    # Track last states to filter out duplicates
    last_state = {}

    # Group events in batches of GROUP_BY_MINUTES
    for _, g_events in groupby(
            events, lambda event: event.time_fired.minute // GROUP_BY_MINUTES):

        events_batch = list(g_events)

        # Keep track of last sensor states
        last_sensor_event = {}

        # Group HA start/stop events
        # Maps minute of event to 1: stop, 2: stop + start
        start_stop_events = {}

        # Process events
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:
                entity_id = event.data.get("entity_id")

                if entity_id.startswith(domain_prefixes):
                    last_sensor_event[entity_id] = event

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if event.time_fired.minute in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 1

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if event.time_fired.minute not in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 2

        # Yield entries
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:
                to_state = State.from_dict(event.data.get("new_state"))

                # Filter out states that become same state again (force_update=True)
                # or light becoming different color
                if last_state.get(to_state.entity_id) == to_state.state:
                    continue

                last_state[to_state.entity_id] = to_state.state

                domain = to_state.domain

                # Skip all but the last sensor state
                if (domain in CONTINUOUS_DOMAINS
                        and event != last_sensor_event[to_state.entity_id]):
                    continue

                # Don't show continuous sensor value changes in the logbook
                if domain in CONTINUOUS_DOMAINS and to_state.attributes.get(
                        "unit_of_measurement"):
                    continue

                yield {
                    "when": event.time_fired,
                    "name": to_state.name,
                    "message": _entry_message_from_state(domain, to_state),
                    "domain": domain,
                    "entity_id": to_state.entity_id,
                    "context_id": event.context.id,
                    "context_user_id": event.context.user_id,
                }

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    continue

                yield {
                    "when": event.time_fired,
                    "name": "Home Assistant",
                    "message": "started",
                    "domain": HA_DOMAIN,
                    "context_id": event.context.id,
                    "context_user_id": event.context.user_id,
                }

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    action = "restarted"
                else:
                    action = "stopped"

                yield {
                    "when": event.time_fired,
                    "name": "Home Assistant",
                    "message": action,
                    "domain": HA_DOMAIN,
                    "context_id": event.context.id,
                    "context_user_id": event.context.user_id,
                }

            elif event.event_type == EVENT_LOGBOOK_ENTRY:
                domain = event.data.get(ATTR_DOMAIN)
                entity_id = event.data.get(ATTR_ENTITY_ID)
                if domain is None and entity_id is not None:
                    try:
                        domain = split_entity_id(str(entity_id))[0]
                    except IndexError:
                        pass

                yield {
                    "when": event.time_fired,
                    "name": event.data.get(ATTR_NAME),
                    "message": event.data.get(ATTR_MESSAGE),
                    "domain": domain,
                    "entity_id": entity_id,
                    "context_id": event.context.id,
                    "context_user_id": event.context.user_id,
                }

            elif event.event_type == EVENT_ALEXA_SMART_HOME:
                data = event.data
                entity_id = data["request"].get("entity_id")

                if entity_id:
                    state = hass.states.get(entity_id)
                    name = state.name if state else entity_id
                    message = "send command {}/{} for {}".format(
                        data["request"]["namespace"], data["request"]["name"],
                        name)
                else:
                    message = "send command {}/{}".format(
                        data["request"]["namespace"], data["request"]["name"])

                yield {
                    "when": event.time_fired,
                    "name": "Amazon Alexa",
                    "message": message,
                    "domain": "alexa",
                    "entity_id": entity_id,
                    "context_id": event.context.id,
                    "context_user_id": event.context.user_id,
                }

            elif event.event_type == EVENT_HOMEKIT_CHANGED:
                data = event.data
                entity_id = data.get(ATTR_ENTITY_ID)
                value = data.get(ATTR_VALUE)

                value_msg = f" to {value}" if value else ""
                message = "send command {}{} for {}".format(
                    data[ATTR_SERVICE], value_msg, data[ATTR_DISPLAY_NAME])

                yield {
                    "when": event.time_fired,
                    "name": "HomeKit",
                    "message": message,
                    "domain": DOMAIN_HOMEKIT,
                    "entity_id": entity_id,
                    "context_id": event.context.id,
                    "context_user_id": event.context.user_id,
                }

            elif event.event_type == EVENT_AUTOMATION_TRIGGERED:
                yield {
                    "when": event.time_fired,
                    "name": event.data.get(ATTR_NAME),
                    "message": "has been triggered",
                    "domain": "automation",
                    "entity_id": event.data.get(ATTR_ENTITY_ID),
                    "context_id": event.context.id,
                    "context_user_id": event.context.user_id,
                }

            elif event.event_type == EVENT_SCRIPT_STARTED:
                yield {
                    "when": event.time_fired,
                    "name": event.data.get(ATTR_NAME),
                    "message": "started",
                    "domain": "script",
                    "entity_id": event.data.get(ATTR_ENTITY_ID),
                    "context_id": event.context.id,
                    "context_user_id": event.context.user_id,
                }
Ejemplo n.º 21
0
def humanify(events):
    """
    Generator that converts a list of events into Entry objects.

    Will try to group events if possible:
     - if 2+ sensor updates in GROUP_BY_MINUTES, show last
     - if home assistant stop and start happen in same minute call it restarted
    """
    # pylint: disable=too-many-branches

    # Group events in batches of GROUP_BY_MINUTES
    for _, g_events in groupby(events, lambda event: event.time_fired.minute // GROUP_BY_MINUTES):

        events_batch = list(g_events)

        # Keep track of last sensor states
        last_sensor_event = {}

        # group HA start/stop events
        # Maps minute of event to 1: stop, 2: stop + start
        start_stop_events = {}

        # Process events
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:
                entity_id = event.data.get("entity_id")

                if entity_id is None:
                    continue

                if entity_id.startswith("sensor."):
                    last_sensor_event[entity_id] = event

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if event.time_fired.minute in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 1

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if event.time_fired.minute not in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 2

        # Yield entries
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:

                # Do not report on new entities
                if "old_state" not in event.data:
                    continue

                to_state = State.from_dict(event.data.get("new_state"))

                # if last_changed != last_updated only attributes have changed
                # we do not report on that yet. Also filter auto groups.
                if (
                    not to_state
                    or to_state.last_changed != to_state.last_updated
                    or to_state.domain == "group"
                    and to_state.attributes.get("auto", False)
                ):
                    continue

                domain = to_state.domain

                # Skip all but the last sensor state
                if domain == "sensor" and event != last_sensor_event[to_state.entity_id]:
                    continue

                yield Entry(
                    event.time_fired,
                    name=to_state.name,
                    message=_entry_message_from_state(domain, to_state),
                    domain=domain,
                    entity_id=to_state.entity_id,
                )

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    continue

                yield Entry(event.time_fired, "Home Assistant", "started", domain=HA_DOMAIN)

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    action = "restarted"
                else:
                    action = "stopped"

                yield Entry(event.time_fired, "Home Assistant", action, domain=HA_DOMAIN)

            elif event.event_type.lower() == EVENT_LOGBOOK_ENTRY:
                domain = event.data.get(ATTR_DOMAIN)
                entity_id = event.data.get(ATTR_ENTITY_ID)
                if domain is None and entity_id is not None:
                    try:
                        domain = util.split_entity_id(str(entity_id))[0]
                    except IndexError:
                        pass

                yield Entry(
                    event.time_fired, event.data.get(ATTR_NAME), event.data.get(ATTR_MESSAGE), domain, entity_id
                )
Ejemplo n.º 22
0
def humanify(events):
    """
    Generator that converts a list of events into Entry objects.

    Will try to group events if possible:
     - if 2+ sensor updates in GROUP_BY_MINUTES, show last
     - if home assistant stop and start happen in same minute call it restarted
    """
    # pylint: disable=too-many-branches

    # Group events in batches of GROUP_BY_MINUTES
    for _, g_events in groupby(
            events,
            lambda event: event.time_fired.minute // GROUP_BY_MINUTES):

        events_batch = list(g_events)

        # Keep track of last sensor states
        last_sensor_event = {}

        # group HA start/stop events
        # Maps minute of event to 1: stop, 2: stop + start
        start_stop_events = {}

        # Process events
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:
                entity_id = event.data['entity_id']

                if entity_id.startswith('sensor.'):
                    last_sensor_event[entity_id] = event

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if event.time_fired.minute in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 1

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if event.time_fired.minute not in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 2

        # Yield entries
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:

                # Do not report on new entities
                if 'old_state' not in event.data:
                    continue

                to_state = State.from_dict(event.data.get('new_state'))

                # if last_changed == last_updated only attributes have changed
                # we do not report on that yet.
                if not to_state or \
                   to_state.last_changed != to_state.last_updated:
                    continue

                domain = to_state.domain

                # Skip all but the last sensor state
                if domain == 'sensor' and \
                   event != last_sensor_event[to_state.entity_id]:
                    continue

                yield Entry(
                    event.time_fired,
                    name=to_state.name,
                    message=_entry_message_from_state(domain, to_state),
                    domain=domain,
                    entity_id=to_state.entity_id)

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    continue

                yield Entry(
                    event.time_fired, "Home Assistant", "started",
                    domain=HA_DOMAIN)

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    action = "restarted"
                else:
                    action = "stopped"

                yield Entry(
                    event.time_fired, "Home Assistant", action,
                    domain=HA_DOMAIN)
Ejemplo n.º 23
0
def humanify(events):
    """
    Generator that converts a list of events into Entry objects.

    Will try to group events if possible:
     - if 2+ sensor updates in GROUP_BY_MINUTES, show last
     - if home assistant stop and start happen in same minute call it restarted
    """
    # pylint: disable=too-many-branches

    # Group events in batches of GROUP_BY_MINUTES
    for _, g_events in groupby(
            events, lambda event: event.time_fired.minute // GROUP_BY_MINUTES):

        events_batch = list(g_events)

        # Keep track of last sensor states
        last_sensor_event = {}

        # group HA start/stop events
        # Maps minute of event to 1: stop, 2: stop + start
        start_stop_events = {}

        # Process events
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:
                entity_id = event.data['entity_id']

                if entity_id.startswith('sensor.'):
                    last_sensor_event[entity_id] = event

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if event.time_fired.minute in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 1

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if event.time_fired.minute not in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 2

        # Yield entries
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:

                # Do not report on new entities
                if 'old_state' not in event.data:
                    continue

                to_state = State.from_dict(event.data.get('new_state'))

                # if last_changed == last_updated only attributes have changed
                # we do not report on that yet.
                if not to_state or \
                   to_state.last_changed != to_state.last_updated:
                    continue

                domain = to_state.domain

                # Skip all but the last sensor state
                if domain == 'sensor' and \
                   event != last_sensor_event[to_state.entity_id]:
                    continue

                yield Entry(event.time_fired,
                            name=to_state.name,
                            message=_entry_message_from_state(
                                domain, to_state),
                            domain=domain,
                            entity_id=to_state.entity_id)

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    continue

                yield Entry(event.time_fired,
                            "Home Assistant",
                            "started",
                            domain=HA_DOMAIN)

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    action = "restarted"
                else:
                    action = "stopped"

                yield Entry(event.time_fired,
                            "Home Assistant",
                            action,
                            domain=HA_DOMAIN)
Ejemplo n.º 24
0
def humanify(events):
    """Generate a converted list of events into Entry objects.

    Will try to group events if possible:
    - if 2+ sensor updates in GROUP_BY_MINUTES, show last
    - if home assistant stop and start happen in same minute call it restarted
    """
    domain_prefixes = tuple('{}.'.format(dom) for dom in CONTINUOUS_DOMAINS)

    # Group events in batches of GROUP_BY_MINUTES
    for _, g_events in groupby(
            events, lambda event: event.time_fired.minute // GROUP_BY_MINUTES):

        events_batch = list(g_events)

        # Keep track of last sensor states
        last_sensor_event = {}

        # Group HA start/stop events
        # Maps minute of event to 1: stop, 2: stop + start
        start_stop_events = {}

        # Process events
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:
                entity_id = event.data.get('entity_id')

                if entity_id.startswith(domain_prefixes):
                    last_sensor_event[entity_id] = event

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if event.time_fired.minute in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 1

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if event.time_fired.minute not in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 2

        # Yield entries
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:

                to_state = State.from_dict(event.data.get('new_state'))

                domain = to_state.domain

                # Skip all but the last sensor state
                if domain in CONTINUOUS_DOMAINS and \
                   event != last_sensor_event[to_state.entity_id]:
                    continue

                # Don't show continuous sensor value changes in the logbook
                if domain in CONTINUOUS_DOMAINS and \
                   to_state.attributes.get('unit_of_measurement'):
                    continue

                yield Entry(event.time_fired,
                            name=to_state.name,
                            message=_entry_message_from_state(
                                domain, to_state),
                            domain=domain,
                            entity_id=to_state.entity_id)

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    continue

                yield Entry(event.time_fired,
                            "Home Assistant",
                            "started",
                            domain=HA_DOMAIN)

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    action = "restarted"
                else:
                    action = "stopped"

                yield Entry(event.time_fired,
                            "Home Assistant",
                            action,
                            domain=HA_DOMAIN)

            elif event.event_type == EVENT_LOGBOOK_ENTRY:
                domain = event.data.get(ATTR_DOMAIN)
                entity_id = event.data.get(ATTR_ENTITY_ID)
                if domain is None and entity_id is not None:
                    try:
                        domain = split_entity_id(str(entity_id))[0]
                    except IndexError:
                        pass

                yield Entry(event.time_fired, event.data.get(ATTR_NAME),
                            event.data.get(ATTR_MESSAGE), domain, entity_id)
Ejemplo n.º 25
0
def run(script_args: List) -> int:
    """Run the actual script."""
    from sqlalchemy import create_engine
    from sqlalchemy import func
    from sqlalchemy.orm import sessionmaker
    from influxdb import InfluxDBClient
    from homeassistant.components.recorder import models
    from homeassistant.helpers import state as state_helper
    from homeassistant.core import State
    from homeassistant.core import HomeAssistantError

    parser = argparse.ArgumentParser(description="import data to influxDB.")
    parser.add_argument(
        '-c',
        '--config',
        metavar='path_to_config_dir',
        default=config_util.get_default_config_dir(),
        help="Directory that contains the Home Assistant configuration")
    parser.add_argument(
        '--uri',
        type=str,
        help="Connect to URI and import (if other than default sqlite) "
        "eg: mysql://localhost/homeassistant")
    parser.add_argument('-d',
                        '--dbname',
                        metavar='dbname',
                        required=True,
                        help="InfluxDB database name")
    parser.add_argument('-H',
                        '--host',
                        metavar='host',
                        default='127.0.0.1',
                        help="InfluxDB host address")
    parser.add_argument('-P',
                        '--port',
                        metavar='port',
                        default=8086,
                        help="InfluxDB host port")
    parser.add_argument('-u',
                        '--username',
                        metavar='username',
                        default='root',
                        help="InfluxDB username")
    parser.add_argument('-p',
                        '--password',
                        metavar='password',
                        default='root',
                        help="InfluxDB password")
    parser.add_argument('-s',
                        '--step',
                        metavar='step',
                        default=1000,
                        help="How many points to import at the same time")
    parser.add_argument(
        '-t',
        '--tags',
        metavar='tags',
        default="",
        help="Comma separated list of tags (key:value) for all points")
    parser.add_argument('-D',
                        '--default-measurement',
                        metavar='default_measurement',
                        default="",
                        help="Store all your points in the same measurement")
    parser.add_argument('-o',
                        '--override-measurement',
                        metavar='override_measurement',
                        default="",
                        help="Store all your points in the same measurement")
    parser.add_argument('-e',
                        '--exclude_entities',
                        metavar='exclude_entities',
                        default="",
                        help="Comma separated list of excluded entities")
    parser.add_argument('-E',
                        '--exclude_domains',
                        metavar='exclude_domains',
                        default="",
                        help="Comma separated list of excluded domains")
    parser.add_argument(
        "-S",
        "--simulate",
        default=False,
        action="store_true",
        help=("Do not write points but simulate preprocessing and print "
              "statistics"))
    parser.add_argument('--script', choices=['influxdb_import'])

    args = parser.parse_args()
    simulate = args.simulate

    client = None
    if not simulate:
        client = InfluxDBClient(args.host, args.port, args.username,
                                args.password)
        client.switch_database(args.dbname)

    config_dir = os.path.join(os.getcwd(), args.config)  # type: str

    # Test if configuration directory exists
    if not os.path.isdir(config_dir):
        if config_dir != config_util.get_default_config_dir():
            print(('Fatal Error: Specified configuration directory does '
                   'not exist {} ').format(config_dir))
            return 1

    src_db = '{}/home-assistant_v2.db'.format(config_dir)

    if not os.path.exists(src_db) and not args.uri:
        print("Fatal Error: Database '{}' does not exist "
              "and no URI given".format(src_db))
        return 1

    uri = args.uri or 'sqlite:///{}'.format(src_db)
    engine = create_engine(uri, echo=False)
    session_factory = sessionmaker(bind=engine)
    session = session_factory()
    step = int(args.step)
    step_start = 0

    tags = {}
    if args.tags:
        tags.update(dict(elem.split(':') for elem in args.tags.split(',')))
    excl_entities = args.exclude_entities.split(',')
    excl_domains = args.exclude_domains.split(',')
    override_measurement = args.override_measurement
    default_measurement = args.default_measurement

    # pylint: disable=assignment-from-no-return
    query = session.query(func.count(models.Events.event_type)).filter(
        models.Events.event_type == 'state_changed')

    total_events = query.scalar()
    prefix_format = '{} of {}'

    points = []
    invalid_points = []
    count = 0
    from collections import defaultdict
    entities = defaultdict(int)
    print_progress(0, total_events, prefix_format.format(0, total_events))

    while True:

        step_stop = step_start + step
        if step_start > total_events:
            print_progress(total_events, total_events,
                           prefix_format.format(total_events, total_events))
            break
        query = session.query(models.Events).filter(
            models.Events.event_type == 'state_changed').order_by(
                models.Events.time_fired).slice(step_start, step_stop)

        for event in query:
            event_data = json.loads(event.event_data)

            if not ('entity_id' in event_data) or (
                    excl_entities and event_data['entity_id'] in excl_entities
            ) or (excl_domains
                  and event_data['entity_id'].split('.')[0] in excl_domains):
                session.expunge(event)
                continue

            try:
                state = State.from_dict(event_data.get('new_state'))
            except HomeAssistantError:
                invalid_points.append(event_data)

            if not state:
                invalid_points.append(event_data)
                continue

            try:
                _state = float(state_helper.state_as_number(state))
                _state_key = 'value'
            except ValueError:
                _state = state.state
                _state_key = 'state'

            if override_measurement:
                measurement = override_measurement
            else:
                measurement = state.attributes.get('unit_of_measurement')
                if measurement in (None, ''):
                    if default_measurement:
                        measurement = default_measurement
                    else:
                        measurement = state.entity_id

            point = {
                'measurement': measurement,
                'tags': {
                    'domain': state.domain,
                    'entity_id': state.object_id,
                },
                'time': event.time_fired,
                'fields': {
                    _state_key: _state,
                }
            }

            for key, value in state.attributes.items():
                if key != 'unit_of_measurement':
                    # If the key is already in fields
                    if key in point['fields']:
                        key = key + '_'
                    # Prevent column data errors in influxDB.
                    # For each value we try to cast it as float
                    # But if we can not do it we store the value
                    # as string add "_str" postfix to the field key
                    try:
                        point['fields'][key] = float(value)
                    except (ValueError, TypeError):
                        new_key = '{}_str'.format(key)
                        point['fields'][new_key] = str(value)

            entities[state.entity_id] += 1
            point['tags'].update(tags)
            points.append(point)
            session.expunge(event)

        if points:
            if not simulate:
                client.write_points(points)
            count += len(points)
            # This prevents the progress bar from going over 100% when
            # the last step happens
            print_progress((step_start + len(points)), total_events,
                           prefix_format.format(step_start, total_events))
        else:
            print_progress((step_start + step), total_events,
                           prefix_format.format(step_start, total_events))

        points = []
        step_start += step

    print("\nStatistics:")
    print("\n".join([
        "{:6}: {}".format(v, k)
        for k, v in sorted(entities.items(), key=lambda x: x[1])
    ]))
    print("\nInvalid Points: {}".format(len(invalid_points)))
    print("\nImport finished: {} points written".format(count))
    return 0