示例#1
0
def _exclude_events(events, config):
    """Get lists of excluded entities and platforms."""
    excluded_entities = []
    excluded_domains = []
    exclude = config[DOMAIN].get(CONF_EXCLUDE)
    if exclude:
        excluded_entities = exclude[CONF_ENTITIES]
        excluded_domains = exclude[CONF_DOMAINS]

    filtered_events = []
    for event in events:
        if event.event_type == EVENT_STATE_CHANGED:
            to_state = State.from_dict(event.data.get('new_state'))
            # Do not report on new entities
            if not to_state:
                continue

            # exclude entities which are customized hidden
            hidden = to_state.attributes.get(ATTR_HIDDEN, False)
            if hidden:
                continue

            domain = to_state.domain
            # check if logbook entry is excluded for this domain
            if domain in excluded_domains:
                continue
            # check if logbook entry is excluded for this entity
            if to_state.entity_id in excluded_entities:
                continue
        filtered_events.append(event)
    return filtered_events
    def _event_receiver(topic, payload, qos):
        """
        Receive events published by the other HA instance and fire
        them on this hass instance.
        """
        event = json.loads(payload)
        event_type = event.get('event_type')
        event_data = event.get('event_data')

        # Special case handling for event STATE_CHANGED
        # We will try to convert state dicts back to State objects
        # Copied over from the _handle_api_post_events_event method
        # of the api component.
        if event_type == EVENT_STATE_CHANGED and event_data:
            for key in ('old_state', 'new_state'):
                state = State.from_dict(event_data.get(key))

                if state:
                    event_data[key] = state

        hass.bus.fire(
            event_type,
            event_data=event_data,
            origin=EventOrigin.remote
        )
示例#3
0
async def test_startstop_vacuum(hass):
    """Test startStop trait support for vacuum domain."""
    assert trait.StartStopTrait.supported(vacuum.DOMAIN, 0, None)

    trt = trait.StartStopTrait(
        hass,
        State('vacuum.bla', vacuum.STATE_PAUSED, {
            ATTR_SUPPORTED_FEATURES: vacuum.SUPPORT_PAUSE,
        }), BASIC_CONFIG)

    assert trt.sync_attributes() == {'pausable': True}

    assert trt.query_attributes() == {'isRunning': False, 'isPaused': True}

    start_calls = async_mock_service(hass, vacuum.DOMAIN, vacuum.SERVICE_START)
    await trt.execute(trait.COMMAND_STARTSTOP, BASIC_DATA, {'start': True})
    assert len(start_calls) == 1
    assert start_calls[0].data == {
        ATTR_ENTITY_ID: 'vacuum.bla',
    }

    stop_calls = async_mock_service(hass, vacuum.DOMAIN, vacuum.SERVICE_STOP)
    await trt.execute(trait.COMMAND_STARTSTOP, BASIC_DATA, {'start': False})
    assert len(stop_calls) == 1
    assert stop_calls[0].data == {
        ATTR_ENTITY_ID: 'vacuum.bla',
    }

    pause_calls = async_mock_service(hass, vacuum.DOMAIN, vacuum.SERVICE_PAUSE)
    await trt.execute(trait.COMMAND_PAUSEUNPAUSE, BASIC_DATA, {'pause': True})
    assert len(pause_calls) == 1
    assert pause_calls[0].data == {
        ATTR_ENTITY_ID: 'vacuum.bla',
    }

    unpause_calls = async_mock_service(hass, vacuum.DOMAIN,
                                       vacuum.SERVICE_START)
    await trt.execute(trait.COMMAND_PAUSEUNPAUSE, BASIC_DATA, {'pause': False})
    assert len(unpause_calls) == 1
    assert unpause_calls[0].data == {
        ATTR_ENTITY_ID: 'vacuum.bla',
    }
示例#4
0
async def test_scene_script(hass):
    """Test Scene trait support for script domain."""
    assert helpers.get_google_type(script.DOMAIN, None) is not None
    assert trait.SceneTrait.supported(script.DOMAIN, 0, None)

    trt = trait.SceneTrait(hass, State('script.bla', STATE_OFF), BASIC_CONFIG)
    assert trt.sync_attributes() == {}
    assert trt.query_attributes() == {}
    assert trt.can_execute(trait.COMMAND_ACTIVATE_SCENE, {})

    calls = async_mock_service(hass, script.DOMAIN, SERVICE_TURN_ON)
    await trt.execute(trait.COMMAND_ACTIVATE_SCENE, BASIC_DATA, {}, {})

    # We don't wait till script execution is done.
    await hass.async_block_till_done()

    assert len(calls) == 1
    assert calls[0].data == {
        ATTR_ENTITY_ID: 'script.bla',
    }
示例#5
0
async def test_brightness_light(hass):
    """Test brightness trait support for light domain."""
    assert trait.BrightnessTrait.supported(light.DOMAIN,
                                           light.SUPPORT_BRIGHTNESS)

    trt = trait.BrightnessTrait(
        hass, State('light.bla', light.STATE_ON, {light.ATTR_BRIGHTNESS: 243}),
        BASIC_CONFIG)

    assert trt.sync_attributes() == {}

    assert trt.query_attributes() == {'brightness': 95}

    calls = async_mock_service(hass, light.DOMAIN, light.SERVICE_TURN_ON)
    await trt.execute(trait.COMMAND_BRIGHTNESS_ABSOLUTE, {'brightness': 50})
    assert len(calls) == 1
    assert calls[0].data == {
        ATTR_ENTITY_ID: 'light.bla',
        light.ATTR_BRIGHTNESS_PCT: 50
    }
示例#6
0
def mock_restore_cache(hass, states):
    """Mock the DATA_RESTORE_CACHE."""
    key = restore_state.DATA_RESTORE_STATE_TASK
    data = restore_state.RestoreStateData(hass)
    now = date_util.utcnow()

    last_states = {}
    for state in states:
        restored_state = state.as_dict()
        restored_state["attributes"] = json.loads(
            json.dumps(restored_state["attributes"], cls=JSONEncoder)
        )
        last_states[state.entity_id] = restore_state.StoredState(
            State.from_dict(restored_state), now
        )
    data.last_states = last_states
    _LOGGER.debug("Restore cache: %s", data.last_states)
    assert len(data.last_states) == len(states), f"Duplicate entity_id? {states}"

    hass.data[key] = data
示例#7
0
    def test_state_changed_event_sends_message(self, mock_utcnow, mock_pub):
        """Test the sending of a new message if event changed."""
        e_id = "fake.entity"
        base_topic = "pub"

        # Add the statestream component for publishing state updates
        assert self.add_statestream(base_topic=base_topic)
        self.hass.block_till_done()

        # Reset the mock because it will have already gotten calls for the
        # mqtt_statestream state change on initialization, etc.
        mock_pub.reset_mock()

        # Set a state of an entity
        mock_state_change_event(self.hass, State(e_id, "on"))
        self.hass.block_till_done()

        # Make sure 'on' was published to pub/fake/entity/state
        mock_pub.assert_called_with(self.hass, "pub/fake/entity/state", "on", 1, True)
        assert mock_pub.called
示例#8
0
async def test_restore_state_last_off(hass):
    """Test restoring state when the last state is off."""
    mock_restore_cache(hass, [State("switch.flux", "off")])

    assert await async_setup_component(
        hass,
        "switch",
        {
            "switch": {
                "platform": "flux",
                "name": "flux",
                "lights": ["light.desk", "light.lamp"],
            }
        },
    )
    await hass.async_block_till_done()

    state = hass.states.get("switch.flux")
    assert state
    assert state.state == "off"
示例#9
0
    def _event_receiver(topic, payload, qos):
        """Receive events published by and fire them on this hass instance."""
        event = json.loads(payload)
        event_type = event.get('event_type')
        event_data = event.get('event_data')

        # Special case handling for event STATE_CHANGED
        # We will try to convert state dicts back to State objects
        # Copied over from the _handle_api_post_events_event method
        # of the api component.
        if event_type == EVENT_STATE_CHANGED and event_data:
            for key in ('old_state', 'new_state'):
                state = State.from_dict(event_data.get(key))

                if state:
                    event_data[key] = state

        hass.bus.async_fire(event_type,
                            event_data=event_data,
                            origin=EventOrigin.remote)
示例#10
0
async def test_lock_unlock_lock(hass):
    """Test LockUnlock trait locking support for lock domain."""
    assert trait.LockUnlockTrait.supported(lock.DOMAIN, lock.SUPPORT_OPEN,
                                           None)

    trt = trait.LockUnlockTrait(hass,
                                State('lock.front_door', lock.STATE_UNLOCKED),
                                BASIC_CONFIG)

    assert trt.sync_attributes() == {}

    assert trt.query_attributes() == {'isLocked': False}

    assert trt.can_execute(trait.COMMAND_LOCKUNLOCK, {'lock': True})

    calls = async_mock_service(hass, lock.DOMAIN, lock.SERVICE_LOCK)
    await trt.execute(trait.COMMAND_LOCKUNLOCK, BASIC_DATA, {'lock': True})

    assert len(calls) == 1
    assert calls[0].data == {ATTR_ENTITY_ID: 'lock.front_door'}
示例#11
0
async def test_attribute_no_state(hass):
    """Test that no state service call is made with none state."""
    calls_1 = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
    calls_2 = async_mock_service(hass, DOMAIN, SERVICE_TURN_OFF)
    calls_3 = async_mock_service(hass, DOMAIN, SERVICE_SET_OPERATION_MODE)

    value = "dummy"

    await async_reproduce_states(
        hass, [State(ENTITY_1, None, {ATTR_OPERATION_MODE: value})])

    await hass.async_block_till_done()

    assert len(calls_1) == 0
    assert len(calls_2) == 0
    assert len(calls_3) == 1
    assert calls_3[0].data == {
        'entity_id': ENTITY_1,
        ATTR_OPERATION_MODE: value
    }
async def test_reproducing_states(hass, caplog):
    """Test reproducing NEW_NAME states."""
    hass.states.async_set("NEW_DOMAIN.entity_off", "off", {})
    hass.states.async_set("NEW_DOMAIN.entity_on", "on", {"color": "red"})

    turn_on_calls = async_mock_service(hass, "NEW_DOMAIN", "turn_on")
    turn_off_calls = async_mock_service(hass, "NEW_DOMAIN", "turn_off")

    # These calls should do nothing as entities already in desired state
    await hass.helpers.state.async_reproduce_state(
        [
            State("NEW_DOMAIN.entity_off", "off"),
            State("NEW_DOMAIN.entity_on", "on", {"color": "red"}),
        ],
        blocking=True,
    )

    assert len(turn_on_calls) == 0
    assert len(turn_off_calls) == 0

    # Test invalid state is handled
    await hass.helpers.state.async_reproduce_state(
        [State("NEW_DOMAIN.entity_off", "not_supported")], blocking=True)

    assert "not_supported" in caplog.text
    assert len(turn_on_calls) == 0
    assert len(turn_off_calls) == 0

    # Make sure correct services are called
    await hass.helpers.state.async_reproduce_state(
        [
            State("NEW_DOMAIN.entity_on", "off"),
            State("NEW_DOMAIN.entity_off", "on", {"color": "red"}),
            # Should not raise
            State("NEW_DOMAIN.non_existing", "on"),
        ],
        blocking=True,
    )

    assert len(turn_on_calls) == 1
    assert turn_on_calls[0].domain == "NEW_DOMAIN"
    assert turn_on_calls[0].data == {
        "entity_id": "NEW_DOMAIN.entity_off",
        "color": "red",
    }

    assert len(turn_off_calls) == 1
    assert turn_off_calls[0].domain == "NEW_DOMAIN"
    assert turn_off_calls[0].data == {"entity_id": "NEW_DOMAIN.entity_on"}
示例#13
0
def test_restore_state(hass):
    """Ensure states are restored on startup."""
    mock_restore_cache(
        hass,
        (State('climate.test_thermostat', '0', {ATTR_TEMPERATURE: "20"}), ))

    hass.state = CoreState.starting

    yield from async_setup_component(
        hass, climate.DOMAIN, {
            'climate': {
                'platform': 'generic_thermostat',
                'name': 'test_thermostat',
                'heater': ENT_SWITCH,
                'target_sensor': ENT_SENSOR,
            }
        })

    state = hass.states.get('climate.test_thermostat')
    assert (state.attributes[ATTR_TEMPERATURE] == 20)
示例#14
0
async def test_dock_vacuum(hass):
    """Test dock trait support for vacuum domain."""
    assert trait.DockTrait.supported(vacuum.DOMAIN, 0)

    trt = trait.DockTrait(hass, State('vacuum.bla', vacuum.STATE_IDLE),
                          BASIC_CONFIG)

    assert trt.sync_attributes() == {}

    assert trt.query_attributes() == {
        'isDocked': False
    }

    calls = async_mock_service(hass, vacuum.DOMAIN,
                               vacuum.SERVICE_RETURN_TO_BASE)
    await trt.execute(trait.COMMAND_DOCK, {})
    assert len(calls) == 1
    assert calls[0].data == {
        ATTR_ENTITY_ID: 'vacuum.bla',
    }
示例#15
0
async def test_list_google_entities(hass, hass_ws_client, setup_api, mock_cloud_login):
    """Test that we can list Google entities."""
    client = await hass_ws_client(hass)
    entity = GoogleEntity(
        hass, MockConfig(should_expose=lambda *_: False), State("light.kitchen", "on")
    )
    with patch(
        "homeassistant.components.google_assistant.helpers" ".async_get_entities",
        return_value=[entity],
    ):
        await client.send_json({"id": 5, "type": "cloud/google_assistant/entities"})
        response = await client.receive_json()

    assert response["success"]
    assert len(response["result"]) == 1
    assert response["result"][0] == {
        "entity_id": "light.kitchen",
        "might_2fa": False,
        "traits": ["action.devices.traits.OnOff"],
    }
示例#16
0
async def test_list_alexa_entities(hass, hass_ws_client, setup_api, mock_cloud_login):
    """Test that we can list Alexa entities."""
    client = await hass_ws_client(hass)
    entity = LightCapabilities(
        hass, MagicMock(entity_config={}), State("light.kitchen", "on")
    )
    with patch(
        "homeassistant.components.alexa.entities" ".async_get_entities",
        return_value=[entity],
    ):
        await client.send_json({"id": 5, "type": "cloud/alexa/entities"})
        response = await client.receive_json()

    assert response["success"]
    assert len(response["result"]) == 1
    assert response["result"][0] == {
        "entity_id": "light.kitchen",
        "display_categories": ["LIGHT"],
        "interfaces": ["Alexa.PowerController", "Alexa.EndpointHealth"],
    }
async def test_state_with_context(hass):
    """Test that context is forwarded."""
    hass.states.async_set(
        ENTITY_1,
        "something",
        {ATTR_SUPPORTED_FEATURES: MediaPlayerEntityFeature.TURN_ON},
    )

    calls = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)

    context = Context()

    await async_reproduce_states(hass, [State(ENTITY_1, "on")],
                                 context=context)

    await hass.async_block_till_done()

    assert len(calls) == 1
    assert calls[0].data == {"entity_id": ENTITY_1}
    assert calls[0].context == context
示例#18
0
async def test_capability_custom_range_random_access(hass):
    state = State('switch.test', '30', {})
    hass.states.async_set(state.entity_id, state.state)
    cap = CustomRangeCapability(
        hass, BASIC_CONFIG, state, 'test_range', {
            const.CONF_ENTITY_CUSTOM_CAPABILITY_STATE_ENTITY_ID:
            state.entity_id,
            const.CONF_ENTITY_RANGE: {
                const.CONF_ENTITY_RANGE_MIN: 10,
                const.CONF_ENTITY_RANGE_MAX: 50,
                const.CONF_ENTITY_RANGE_PRECISION: 3,
            },
            const.CONF_ENTITY_CUSTOM_RANGE_SET_VALUE: {
                CONF_SERVICE: 'test.set_value',
                ATTR_ENTITY_ID: 'input_number.test',
                CONF_SERVICE_DATA: {
                    'value': dynamic_template('value: {{ value|int }}')
                }
            },
        })
    assert cap.supported()
    assert cap.retrievable
    assert cap.support_random_access
    assert cap.get_value() == 30

    calls = async_mock_service(hass, 'test', 'set_value')
    await cap.set_state(BASIC_DATA, {'value': 40})
    await cap.set_state(BASIC_DATA, {'value': 100})
    await cap.set_state(BASIC_DATA, {'value': 10, 'relative': True})
    await cap.set_state(BASIC_DATA, {'value': -3, 'relative': True})
    await cap.set_state(BASIC_DATA, {'value': -50, 'relative': True})

    assert len(calls) == 5
    for i in range(0, len(calls)):
        assert calls[i].data[ATTR_ENTITY_ID] == 'input_number.test'

    assert calls[0].data['value'] == 'value: 40'
    assert calls[1].data['value'] == 'value: 100'
    assert calls[2].data['value'] == 'value: 40'
    assert calls[3].data['value'] == 'value: 27'
    assert calls[4].data['value'] == 'value: 10'
示例#19
0
async def test_temperature_setting_climate_onoff(hass):
    """Test TemperatureSetting trait support for climate domain - range."""
    assert helpers.get_google_type(climate.DOMAIN, None) is not None
    assert trait.TemperatureSettingTrait.supported(climate.DOMAIN, 0, None)

    hass.config.units.temperature_unit = TEMP_FAHRENHEIT

    trt = trait.TemperatureSettingTrait(
        hass,
        State(
            'climate.bla', climate.HVAC_MODE_AUTO, {
                ATTR_SUPPORTED_FEATURES:
                climate.SUPPORT_TARGET_TEMPERATURE_RANGE,
                climate.ATTR_HVAC_MODES: [
                    climate.HVAC_MODE_OFF,
                    climate.HVAC_MODE_COOL,
                    climate.HVAC_MODE_HEAT,
                    climate.HVAC_MODE_HEAT_COOL,
                ],
                climate.ATTR_MIN_TEMP:
                None,
                climate.ATTR_MAX_TEMP:
                None,
            }), BASIC_CONFIG)
    assert trt.sync_attributes() == {
        'availableThermostatModes': 'off,cool,heat,heatcool,on',
        'thermostatTemperatureUnit': 'F',
    }
    assert trt.can_execute(trait.COMMAND_THERMOSTAT_SET_MODE, {})

    calls = async_mock_service(hass, climate.DOMAIN, SERVICE_TURN_ON)
    await trt.execute(trait.COMMAND_THERMOSTAT_SET_MODE, BASIC_DATA, {
        'thermostatMode': 'on',
    }, {})
    assert len(calls) == 1

    calls = async_mock_service(hass, climate.DOMAIN, SERVICE_TURN_OFF)
    await trt.execute(trait.COMMAND_THERMOSTAT_SET_MODE, BASIC_DATA, {
        'thermostatMode': 'off',
    }, {})
    assert len(calls) == 1
示例#20
0
async def test_trigger_entity_restore_state_auto_off(hass, count, domain,
                                                     config, restored_state,
                                                     freezer):
    """Test restoring trigger template binary sensor."""

    freezer.move_to("2022-02-02 12:02:00+00:00")
    fake_state = State(
        "binary_sensor.test",
        restored_state,
        {},
    )
    fake_extra_data = {
        "auto_off_time": {
            "__type":
            "<class 'datetime.datetime'>",
            "isoformat":
            datetime(2022, 2, 2, 12, 2, 2, tzinfo=timezone.utc).isoformat(),
        },
    }
    mock_restore_cache_with_extra_data(hass, ((fake_state, fake_extra_data), ))
    with assert_setup_component(count, domain):
        assert await async_setup_component(
            hass,
            domain,
            config,
        )

        await hass.async_block_till_done()
        await hass.async_start()
        await hass.async_block_till_done()

    state = hass.states.get("binary_sensor.test")
    assert state.state == restored_state

    # Now wait for the auto-off
    freezer.move_to("2022-02-02 12:02:03+00:00")
    await hass.async_block_till_done()
    await hass.async_block_till_done()

    state = hass.states.get("binary_sensor.test")
    assert state.state == OFF
    def test_wrong_ignored_event_sends_over_stream(self, mock_pub):
        """Test the ignoring of sending events if defined."""
        assert self.add_eventstream(pub_topic="bar",
                                    ignore_event=["statee_changed"])
        self.hass.block_till_done()

        e_id = "entity.test_id"
        event = {}
        event["event_type"] = EVENT_STATE_CHANGED
        new_state = {"state": "on", "entity_id": e_id, "attributes": {}}
        event["event_data"] = {"new_state": new_state, "entity_id": e_id}

        # Reset the mock because it will have already gotten calls for the
        # mqtt_eventstream state change on initialization, etc.
        mock_pub.reset_mock()

        # Set a state of an entity
        mock_state_change_event(self.hass, State(e_id, "on"))
        self.hass.block_till_done()

        assert mock_pub.called
示例#22
0
async def test_restore_disarmed_state(hass):
    """Ensure disarmed state is restored on startup."""
    mock_restore_cache(hass, (
        State('alarm_control_panel.test', STATE_ALARM_DISARMED),
        ))

    hass.state = CoreState.starting
    mock_component(hass, 'recorder')

    assert await async_setup_component(hass, alarm_control_panel.DOMAIN, {
        'alarm_control_panel': {
            'platform': 'manual',
            'name': 'test',
            'pending_time': 0,
            'trigger_time': 0,
            'disarm_after_trigger': False
        }})

    state = hass.states.get('alarm_control_panel.test')
    assert state
    assert state.state == STATE_ALARM_DISARMED
示例#23
0
async def test_supported_features_ignore_cache(hass, client):
    """Test ignore cached supported features if device is on at startup."""
    mock_restore_cache(
        hass,
        [
            State(
                ENTITY_ID,
                STATE_OFF,
                attributes={
                    ATTR_SUPPORTED_FEATURES:
                    SUPPORT_WEBOSTV | SUPPORT_WEBOSTV_VOLUME,
                },
            )
        ],
    )
    await setup_webostv(hass)

    supported = SUPPORT_WEBOSTV | SUPPORT_WEBOSTV_VOLUME | SUPPORT_VOLUME_SET
    attrs = hass.states.get(ENTITY_ID).attributes

    assert attrs[ATTR_SUPPORTED_FEATURES] == supported
示例#24
0
async def test_state_changed_event_sends_message(hass, mqtt_mock):
    """Test the sending of a new message if event changed."""
    e_id = "fake.entity"
    base_topic = "pub"

    # Add the statestream component for publishing state updates
    assert await add_statestream(hass, base_topic=base_topic)
    await hass.async_block_till_done()

    # Reset the mock because it will have already gotten calls for the
    # mqtt_statestream state change on initialization, etc.
    mqtt_mock.async_publish.reset_mock()

    # Set a state of an entity
    mock_state_change_event(hass, State(e_id, "on"))
    await hass.async_block_till_done()
    await hass.async_block_till_done()

    # Make sure 'on' was published to pub/fake/entity/state
    mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
    assert mqtt_mock.async_publish.called
示例#25
0
async def test_no_initial_value_and_restore_off(hass):
    """Test initial value off and restored state is turned on."""
    calls = async_mock_service(hass, "test", "automation")
    mock_restore_cache(hass, (State("automation.hello", STATE_OFF),))

    assert await async_setup_component(
        hass,
        automation.DOMAIN,
        {
            automation.DOMAIN: {
                "alias": "hello",
                "trigger": {"platform": "event", "event_type": "test_event"},
                "action": {"service": "test.automation", "entity_id": "hello.world"},
            }
        },
    )
    assert not automation.is_on(hass, "automation.hello")

    hass.bus.async_fire("test_event")
    await hass.async_block_till_done()
    assert len(calls) == 0
示例#26
0
async def test_restore_state_fan(hass):
    """Run test for fan restore state."""

    fan_name = "test_fan"
    entity_id = f"{FAN_DOMAIN}.{fan_name}"
    test_value = STATE_ON
    config_fan = {CONF_NAME: fan_name, CONF_ADDRESS: 17}
    mock_restore_cache(
        hass,
        (State(f"{entity_id}", test_value), ),
    )
    await base_config_test(
        hass,
        config_fan,
        fan_name,
        FAN_DOMAIN,
        CONF_FANS,
        None,
        method_discovery=True,
    )
    assert hass.states.get(entity_id).state == test_value
示例#27
0
async def test_restore_state_binary_sensor(hass):
    """Run test for binary sensor restore state."""

    sensor_name = "test_binary_sensor"
    test_value = STATE_ON
    config_sensor = {CONF_NAME: sensor_name, CONF_ADDRESS: 17}
    mock_restore_cache(
        hass,
        (State(f"{SENSOR_DOMAIN}.{sensor_name}", test_value),),
    )
    await base_config_test(
        hass,
        config_sensor,
        sensor_name,
        SENSOR_DOMAIN,
        CONF_BINARY_SENSORS,
        None,
        method_discovery=True,
    )
    entity_id = f"{SENSOR_DOMAIN}.{sensor_name}"
    assert hass.states.get(entity_id).state == test_value
示例#28
0
async def test_google_config_expose_entity_prefs(mock_conf, cloud_prefs):
    """Test Google config should expose using prefs."""
    entity_conf = {"should_expose": False}
    await cloud_prefs.async_update(
        google_entity_configs={"light.kitchen": entity_conf},
        google_default_expose=["light"],
    )

    state = State("light.kitchen", "on")

    assert not mock_conf.should_expose(state)
    entity_conf["should_expose"] = True
    assert mock_conf.should_expose(state)

    entity_conf["should_expose"] = None
    assert mock_conf.should_expose(state)

    await cloud_prefs.async_update(
        google_default_expose=["sensor"],
    )
    assert not mock_conf.should_expose(state)
示例#29
0
async def test_state_restore(hass, rfxtrx, state):
    """State restoration."""

    entity_id = "cover.lightwaverf_siemens_0213c7_242"

    mock_restore_cache(hass, [State(entity_id, state)])

    entry_data = create_rfx_test_cfg(
        devices={"0b1400cd0213c7f20d010f51": {
            "signal_repetitions": 1
        }})
    mock_entry = MockConfigEntry(domain="rfxtrx",
                                 unique_id=DOMAIN,
                                 data=entry_data)

    mock_entry.add_to_hass(hass)

    await hass.config_entries.async_setup(mock_entry.entry_id)
    await hass.async_block_till_done()

    assert hass.states.get(entity_id).state == state
示例#30
0
async def test_restore_state(hass, zha_device_restored, zigpy_shade_device):
    """Ensure states are restored on startup."""

    mock_restore_cache(
        hass,
        (State(
            "cover.fakemanufacturer_fakemodel_e769900a_level_on_off_shade",
            STATE_OPEN,
            {ATTR_CURRENT_POSITION: 50},
        ), ),
    )

    hass.state = CoreState.starting

    zha_device = await zha_device_restored(zigpy_shade_device)
    entity_id = await find_entity_id(DOMAIN, zha_device, hass)
    assert entity_id is not None

    # test that the cover was created and that it is unavailable
    assert hass.states.get(entity_id).state == STATE_OPEN
    assert hass.states.get(entity_id).attributes[ATTR_CURRENT_POSITION] == 50
示例#31
0
async def test_state_restore(hass, rfxtrx, state, brightness):
    """State restoration."""

    entity_id = "light.ac_213c7f2_16"

    mock_restore_cache(
        hass, [State(entity_id, state, attributes={ATTR_BRIGHTNESS: brightness})]
    )

    entry_data = create_rfx_test_cfg(
        devices={"0b1100cd0213c7f210020f51": {"signal_repetitions": 1}}
    )
    mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)

    mock_entry.add_to_hass(hass)

    await hass.config_entries.async_setup(mock_entry.entry_id)
    await hass.async_block_till_done()

    assert hass.states.get(entity_id).state == state
    assert hass.states.get(entity_id).attributes.get(ATTR_BRIGHTNESS) == brightness
示例#32
0
async def test_restore_state_switch(hass):
    """Run test for sensor restore state."""

    switch_name = "test_switch"
    entity_id = f"{SWITCH_DOMAIN}.{switch_name}"
    test_value = STATE_ON
    config_switch = {CONF_NAME: switch_name, CONF_ADDRESS: 17}
    mock_restore_cache(
        hass,
        (State(f"{entity_id}", test_value), ),
    )
    await base_config_test(
        hass,
        config_switch,
        switch_name,
        SWITCH_DOMAIN,
        CONF_SWITCHES,
        None,
        method_discovery=True,
    )
    assert hass.states.get(entity_id).state == test_value
示例#33
0
def humanify(events):
    """Generate a converted list of events into Entry objects.

    Will try to group events if possible:
    - if 2+ sensor updates in GROUP_BY_MINUTES, show last
    - if home assistant stop and start happen in same minute call it restarted
    """
    # Group events in batches of GROUP_BY_MINUTES
    for _, g_events in groupby(
            events,
            lambda event: event.time_fired.minute // GROUP_BY_MINUTES):

        events_batch = list(g_events)

        # Keep track of last sensor states
        last_sensor_event = {}

        # Group HA start/stop events
        # Maps minute of event to 1: stop, 2: stop + start
        start_stop_events = {}

        # Process events
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:
                entity_id = event.data.get('entity_id')

                if entity_id is None:
                    continue

                if entity_id.startswith(tuple('{}.'.format(
                        domain) for domain in CONTINUOUS_DOMAINS)):
                    last_sensor_event[entity_id] = event

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if event.time_fired.minute in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 1

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if event.time_fired.minute not in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 2

        # Yield entries
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:

                to_state = State.from_dict(event.data.get('new_state'))

                # If last_changed != last_updated only attributes have changed
                # we do not report on that yet. Also filter auto groups.
                if not to_state or \
                   to_state.last_changed != to_state.last_updated or \
                   to_state.domain == 'group' and \
                   to_state.attributes.get('auto', False):
                    continue

                domain = to_state.domain

                # Skip all but the last sensor state
                if domain in CONTINUOUS_DOMAINS and \
                   event != last_sensor_event[to_state.entity_id]:
                    continue

                # Don't show continuous sensor value changes in the logbook
                if domain in CONTINUOUS_DOMAINS and \
                   to_state.attributes.get('unit_of_measurement'):
                    continue

                yield Entry(
                    event.time_fired,
                    name=to_state.name,
                    message=_entry_message_from_state(domain, to_state),
                    domain=domain,
                    entity_id=to_state.entity_id)

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    continue

                yield Entry(
                    event.time_fired, "Home Assistant", "started",
                    domain=HA_DOMAIN)

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    action = "restarted"
                else:
                    action = "stopped"

                yield Entry(
                    event.time_fired, "Home Assistant", action,
                    domain=HA_DOMAIN)

            elif event.event_type == EVENT_LOGBOOK_ENTRY:
                domain = event.data.get(ATTR_DOMAIN)
                entity_id = event.data.get(ATTR_ENTITY_ID)
                if domain is None and entity_id is not None:
                    try:
                        domain = split_entity_id(str(entity_id))[0]
                    except IndexError:
                        pass

                yield Entry(
                    event.time_fired, event.data.get(ATTR_NAME),
                    event.data.get(ATTR_MESSAGE), domain,
                    entity_id)
示例#34
0
def run(script_args: List) -> int:
    """Run the actual script."""
    from sqlalchemy import create_engine
    from sqlalchemy import func
    from sqlalchemy.orm import sessionmaker
    from influxdb import InfluxDBClient
    from homeassistant.components.recorder import models
    from homeassistant.helpers import state as state_helper
    from homeassistant.core import State
    from homeassistant.core import HomeAssistantError

    parser = argparse.ArgumentParser(
        description="import data to influxDB.")
    parser.add_argument(
        '-c', '--config',
        metavar='path_to_config_dir',
        default=config_util.get_default_config_dir(),
        help="Directory that contains the Home Assistant configuration")
    parser.add_argument(
        '--uri',
        type=str,
        help="Connect to URI and import (if other than default sqlite) "
             "eg: mysql://localhost/homeassistant")
    parser.add_argument(
        '-d', '--dbname',
        metavar='dbname',
        required=True,
        help="InfluxDB database name")
    parser.add_argument(
        '-H', '--host',
        metavar='host',
        default='127.0.0.1',
        help="InfluxDB host address")
    parser.add_argument(
        '-P', '--port',
        metavar='port',
        default=8086,
        help="InfluxDB host port")
    parser.add_argument(
        '-u', '--username',
        metavar='username',
        default='root',
        help="InfluxDB username")
    parser.add_argument(
        '-p', '--password',
        metavar='password',
        default='root',
        help="InfluxDB password")
    parser.add_argument(
        '-s', '--step',
        metavar='step',
        default=1000,
        help="How many points to import at the same time")
    parser.add_argument(
        '-t', '--tags',
        metavar='tags',
        default="",
        help="Comma separated list of tags (key:value) for all points")
    parser.add_argument(
        '-D', '--default-measurement',
        metavar='default_measurement',
        default="",
        help="Store all your points in the same measurement")
    parser.add_argument(
        '-o', '--override-measurement',
        metavar='override_measurement',
        default="",
        help="Store all your points in the same measurement")
    parser.add_argument(
        '-e', '--exclude_entities',
        metavar='exclude_entities',
        default="",
        help="Comma separated list of excluded entities")
    parser.add_argument(
        '-E', '--exclude_domains',
        metavar='exclude_domains',
        default="",
        help="Comma separated list of excluded domains")
    parser.add_argument(
        "-S", "--simulate",
        default=False,
        action="store_true",
        help=("Do not write points but simulate preprocessing and print "
              "statistics"))
    parser.add_argument(
        '--script',
        choices=['influxdb_import'])

    args = parser.parse_args()
    simulate = args.simulate

    client = None
    if not simulate:
        client = InfluxDBClient(
            args.host, args.port, args.username, args.password)
        client.switch_database(args.dbname)

    config_dir = os.path.join(os.getcwd(), args.config)  # type: str

    # Test if configuration directory exists
    if not os.path.isdir(config_dir):
        if config_dir != config_util.get_default_config_dir():
            print(('Fatal Error: Specified configuration directory does '
                   'not exist {} ').format(config_dir))
            return 1

    src_db = '{}/home-assistant_v2.db'.format(config_dir)

    if not os.path.exists(src_db) and not args.uri:
        print("Fatal Error: Database '{}' does not exist "
              "and no URI given".format(src_db))
        return 1

    uri = args.uri or 'sqlite:///{}'.format(src_db)
    engine = create_engine(uri, echo=False)
    session_factory = sessionmaker(bind=engine)
    session = session_factory()
    step = int(args.step)
    step_start = 0

    tags = {}
    if args.tags:
        tags.update(dict(elem.split(':') for elem in args.tags.split(',')))
    excl_entities = args.exclude_entities.split(',')
    excl_domains = args.exclude_domains.split(',')
    override_measurement = args.override_measurement
    default_measurement = args.default_measurement

    query = session.query(func.count(models.Events.event_type)).filter(
        models.Events.event_type == 'state_changed')

    total_events = query.scalar()
    prefix_format = '{} of {}'

    points = []
    invalid_points = []
    count = 0
    from collections import defaultdict
    entities = defaultdict(int)
    print_progress(0, total_events, prefix_format.format(0, total_events))

    while True:

        step_stop = step_start + step
        if step_start > total_events:
            print_progress(total_events, total_events, prefix_format.format(
                total_events, total_events))
            break
        query = session.query(models.Events).filter(
            models.Events.event_type == 'state_changed').order_by(
                models.Events.time_fired).slice(step_start, step_stop)

        for event in query:
            event_data = json.loads(event.event_data)

            if not ('entity_id' in event_data) or (
                    excl_entities and event_data[
                        'entity_id'] in excl_entities) or (
                            excl_domains and event_data[
                                'entity_id'].split('.')[0] in excl_domains):
                session.expunge(event)
                continue

            try:
                state = State.from_dict(event_data.get('new_state'))
            except HomeAssistantError:
                invalid_points.append(event_data)

            if not state:
                invalid_points.append(event_data)
                continue

            try:
                _state = float(state_helper.state_as_number(state))
                _state_key = 'value'
            except ValueError:
                _state = state.state
                _state_key = 'state'

            if override_measurement:
                measurement = override_measurement
            else:
                measurement = state.attributes.get('unit_of_measurement')
                if measurement in (None, ''):
                    if default_measurement:
                        measurement = default_measurement
                    else:
                        measurement = state.entity_id

            point = {
                'measurement': measurement,
                'tags': {
                    'domain': state.domain,
                    'entity_id': state.object_id,
                },
                'time': event.time_fired,
                'fields': {
                    _state_key: _state,
                }
            }

            for key, value in state.attributes.items():
                if key != 'unit_of_measurement':
                    # If the key is already in fields
                    if key in point['fields']:
                        key = key + '_'
                    # Prevent column data errors in influxDB.
                    # For each value we try to cast it as float
                    # But if we can not do it we store the value
                    # as string add "_str" postfix to the field key
                    try:
                        point['fields'][key] = float(value)
                    except (ValueError, TypeError):
                        new_key = '{}_str'.format(key)
                        point['fields'][new_key] = str(value)

            entities[state.entity_id] += 1
            point['tags'].update(tags)
            points.append(point)
            session.expunge(event)

        if points:
            if not simulate:
                client.write_points(points)
            count += len(points)
            # This prevents the progress bar from going over 100% when
            # the last step happens
            print_progress((step_start + len(
                points)), total_events, prefix_format.format(
                    step_start, total_events))
        else:
            print_progress(
                (step_start + step), total_events, prefix_format.format(
                    step_start, total_events))

        points = []
        step_start += step

    print("\nStatistics:")
    print("\n".join(["{:6}: {}".format(v, k) for k, v
                     in sorted(entities.items(), key=lambda x: x[1])]))
    print("\nInvalid Points: {}".format(len(invalid_points)))
    print("\nImport finished: {} points written".format(count))
    return 0
示例#35
0
def humanify(events):
    """
    Generator that converts a list of events into Entry objects.

    Will try to group events if possible:
     - if 2+ sensor updates in GROUP_BY_MINUTES, show last
     - if home assistant stop and start happen in same minute call it restarted
    """
    # pylint: disable=too-many-branches

    # Group events in batches of GROUP_BY_MINUTES
    for _, g_events in groupby(events, lambda event: event.time_fired.minute // GROUP_BY_MINUTES):

        events_batch = list(g_events)

        # Keep track of last sensor states
        last_sensor_event = {}

        # group HA start/stop events
        # Maps minute of event to 1: stop, 2: stop + start
        start_stop_events = {}

        # Process events
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:
                entity_id = event.data.get("entity_id")

                if entity_id is None:
                    continue

                if entity_id.startswith("sensor."):
                    last_sensor_event[entity_id] = event

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if event.time_fired.minute in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 1

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if event.time_fired.minute not in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 2

        # Yield entries
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:

                # Do not report on new entities
                if "old_state" not in event.data:
                    continue

                to_state = State.from_dict(event.data.get("new_state"))

                # if last_changed != last_updated only attributes have changed
                # we do not report on that yet. Also filter auto groups.
                if (
                    not to_state
                    or to_state.last_changed != to_state.last_updated
                    or to_state.domain == "group"
                    and to_state.attributes.get("auto", False)
                ):
                    continue

                domain = to_state.domain

                # Skip all but the last sensor state
                if domain == "sensor" and event != last_sensor_event[to_state.entity_id]:
                    continue

                yield Entry(
                    event.time_fired,
                    name=to_state.name,
                    message=_entry_message_from_state(domain, to_state),
                    domain=domain,
                    entity_id=to_state.entity_id,
                )

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    continue

                yield Entry(event.time_fired, "Home Assistant", "started", domain=HA_DOMAIN)

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    action = "restarted"
                else:
                    action = "stopped"

                yield Entry(event.time_fired, "Home Assistant", action, domain=HA_DOMAIN)

            elif event.event_type.lower() == EVENT_LOGBOOK_ENTRY:
                domain = event.data.get(ATTR_DOMAIN)
                entity_id = event.data.get(ATTR_ENTITY_ID)
                if domain is None and entity_id is not None:
                    try:
                        domain = util.split_entity_id(str(entity_id))[0]
                    except IndexError:
                        pass

                yield Entry(
                    event.time_fired, event.data.get(ATTR_NAME), event.data.get(ATTR_MESSAGE), domain, entity_id
                )
示例#36
0
def _exclude_events(events, config):
    """Get lists of excluded entities and platforms."""
    excluded_entities = []
    excluded_domains = []
    included_entities = []
    included_domains = []
    exclude = config.get(CONF_EXCLUDE)
    if exclude:
        excluded_entities = exclude[CONF_ENTITIES]
        excluded_domains = exclude[CONF_DOMAINS]
    include = config.get(CONF_INCLUDE)
    if include:
        included_entities = include[CONF_ENTITIES]
        included_domains = include[CONF_DOMAINS]

    filtered_events = []
    for event in events:
        domain, entity_id = None, None

        if event.event_type == EVENT_STATE_CHANGED:
            to_state = State.from_dict(event.data.get('new_state'))
            # Do not report on new entities
            if event.data.get('old_state') is None:
                continue

            # Do not report on entity removal
            if not to_state:
                continue

            # exclude entities which are customized hidden
            hidden = to_state.attributes.get(ATTR_HIDDEN, False)
            if hidden:
                continue

            domain = to_state.domain
            entity_id = to_state.entity_id

        elif event.event_type == EVENT_LOGBOOK_ENTRY:
            domain = event.data.get(ATTR_DOMAIN)
            entity_id = event.data.get(ATTR_ENTITY_ID)

        if domain or entity_id:
            # filter if only excluded is configured for this domain
            if excluded_domains and domain in excluded_domains and \
                    not included_domains:
                if (included_entities and entity_id not in included_entities) \
                        or not included_entities:
                    continue
            # filter if only included is configured for this domain
            elif not excluded_domains and included_domains and \
                    domain not in included_domains:
                if (included_entities and entity_id not in included_entities) \
                        or not included_entities:
                    continue
            # filter if included and excluded is configured for this domain
            elif excluded_domains and included_domains and \
                    (domain not in included_domains or
                     domain in excluded_domains):
                if (included_entities and entity_id not in included_entities) \
                        or not included_entities or domain in excluded_domains:
                    continue
            # filter if only included is configured for this entity
            elif not excluded_domains and not included_domains and \
                    included_entities and entity_id not in included_entities:
                continue
            # check if logbook entry is excluded for this entity
            if entity_id in excluded_entities:
                continue
        filtered_events.append(event)
    return filtered_events
示例#37
0
def humanify(events):
    """
    Generator that converts a list of events into Entry objects.

    Will try to group events if possible:
     - if 2+ sensor updates in GROUP_BY_MINUTES, show last
     - if home assistant stop and start happen in same minute call it restarted
    """
    # pylint: disable=too-many-branches

    # Group events in batches of GROUP_BY_MINUTES
    for _, g_events in groupby(
            events,
            lambda event: event.time_fired.minute // GROUP_BY_MINUTES):

        events_batch = list(g_events)

        # Keep track of last sensor states
        last_sensor_event = {}

        # group HA start/stop events
        # Maps minute of event to 1: stop, 2: stop + start
        start_stop_events = {}

        # Process events
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:
                entity_id = event.data['entity_id']

                if entity_id.startswith('sensor.'):
                    last_sensor_event[entity_id] = event

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if event.time_fired.minute in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 1

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if event.time_fired.minute not in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 2

        # Yield entries
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:

                # Do not report on new entities
                if 'old_state' not in event.data:
                    continue

                to_state = State.from_dict(event.data.get('new_state'))

                # if last_changed == last_updated only attributes have changed
                # we do not report on that yet.
                if not to_state or \
                   to_state.last_changed != to_state.last_updated:
                    continue

                domain = to_state.domain

                # Skip all but the last sensor state
                if domain == 'sensor' and \
                   event != last_sensor_event[to_state.entity_id]:
                    continue

                yield Entry(
                    event.time_fired,
                    name=to_state.name,
                    message=_entry_message_from_state(domain, to_state),
                    domain=domain,
                    entity_id=to_state.entity_id)

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    continue

                yield Entry(
                    event.time_fired, "Home Assistant", "started",
                    domain=HA_DOMAIN)

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    action = "restarted"
                else:
                    action = "stopped"

                yield Entry(
                    event.time_fired, "Home Assistant", action,
                    domain=HA_DOMAIN)
示例#38
0
def humanify(hass, events):
    """Generate a converted list of events into Entry objects.

    Will try to group events if possible:
    - if 2+ sensor updates in GROUP_BY_MINUTES, show last
    - if home assistant stop and start happen in same minute call it restarted
    """
    domain_prefixes = tuple('{}.'.format(dom) for dom in CONTINUOUS_DOMAINS)

    # Group events in batches of GROUP_BY_MINUTES
    for _, g_events in groupby(
            events,
            lambda event: event.time_fired.minute // GROUP_BY_MINUTES):

        events_batch = list(g_events)

        # Keep track of last sensor states
        last_sensor_event = {}

        # Group HA start/stop events
        # Maps minute of event to 1: stop, 2: stop + start
        start_stop_events = {}

        # Process events
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:
                entity_id = event.data.get('entity_id')

                if entity_id.startswith(domain_prefixes):
                    last_sensor_event[entity_id] = event

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if event.time_fired.minute in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 1

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if event.time_fired.minute not in start_stop_events:
                    continue

                start_stop_events[event.time_fired.minute] = 2

        # Yield entries
        for event in events_batch:
            if event.event_type == EVENT_STATE_CHANGED:

                to_state = State.from_dict(event.data.get('new_state'))

                domain = to_state.domain

                # Skip all but the last sensor state
                if domain in CONTINUOUS_DOMAINS and \
                   event != last_sensor_event[to_state.entity_id]:
                    continue

                # Don't show continuous sensor value changes in the logbook
                if domain in CONTINUOUS_DOMAINS and \
                   to_state.attributes.get('unit_of_measurement'):
                    continue

                yield {
                    'when': event.time_fired,
                    'name': to_state.name,
                    'message': _entry_message_from_state(domain, to_state),
                    'domain': domain,
                    'entity_id': to_state.entity_id,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_HOMEASSISTANT_START:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    continue

                yield {
                    'when': event.time_fired,
                    'name': "Home Assistant",
                    'message': "started",
                    'domain': HA_DOMAIN,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_HOMEASSISTANT_STOP:
                if start_stop_events.get(event.time_fired.minute) == 2:
                    action = "restarted"
                else:
                    action = "stopped"

                yield {
                    'when': event.time_fired,
                    'name': "Home Assistant",
                    'message': action,
                    'domain': HA_DOMAIN,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_LOGBOOK_ENTRY:
                domain = event.data.get(ATTR_DOMAIN)
                entity_id = event.data.get(ATTR_ENTITY_ID)
                if domain is None and entity_id is not None:
                    try:
                        domain = split_entity_id(str(entity_id))[0]
                    except IndexError:
                        pass

                yield {
                    'when': event.time_fired,
                    'name': event.data.get(ATTR_NAME),
                    'message': event.data.get(ATTR_MESSAGE),
                    'domain': domain,
                    'entity_id': entity_id,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_ALEXA_SMART_HOME:
                data = event.data
                entity_id = data['request'].get('entity_id')

                if entity_id:
                    state = hass.states.get(entity_id)
                    name = state.name if state else entity_id
                    message = "send command {}/{} for {}".format(
                        data['request']['namespace'],
                        data['request']['name'], name)
                else:
                    message = "send command {}/{}".format(
                        data['request']['namespace'], data['request']['name'])

                yield {
                    'when': event.time_fired,
                    'name': 'Amazon Alexa',
                    'message': message,
                    'domain': 'alexa',
                    'entity_id': entity_id,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_HOMEKIT_CHANGED:
                data = event.data
                entity_id = data.get(ATTR_ENTITY_ID)
                value = data.get(ATTR_VALUE)

                value_msg = " to {}".format(value) if value else ''
                message = "send command {}{} for {}".format(
                    data[ATTR_SERVICE], value_msg, data[ATTR_DISPLAY_NAME])

                yield {
                    'when': event.time_fired,
                    'name': 'HomeKit',
                    'message': message,
                    'domain': DOMAIN_HOMEKIT,
                    'entity_id': entity_id,
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_AUTOMATION_TRIGGERED:
                yield {
                    'when': event.time_fired,
                    'name': event.data.get(ATTR_NAME),
                    'message': "has been triggered",
                    'domain': 'automation',
                    'entity_id': event.data.get(ATTR_ENTITY_ID),
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }

            elif event.event_type == EVENT_SCRIPT_STARTED:
                yield {
                    'when': event.time_fired,
                    'name': event.data.get(ATTR_NAME),
                    'message': 'started',
                    'domain': 'script',
                    'entity_id': event.data.get(ATTR_ENTITY_ID),
                    'context_id': event.context.id,
                    'context_user_id': event.context.user_id
                }