Exemplo n.º 1
0
def test_job_missed(event_fixture: fixture, caplog: fixture) -> None:
    # grace time is 30 seconds, so attempt to run 40 seconds ago
    # get a task ID.. task isnt' used
    p_id, t_id = create_demo_task()

    # create an interval task (only interval and cron can capture
    # a missed job and add to logs. one-off jobs disappear after run.)
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        args=[str(t_id)],
        run_date=datetime.now() - timedelta(minutes=1),
        replace_existing=True,
    )
    # wait for logs to be added by background process
    time.sleep(1)
    # check that log was added
    log = TaskLog.query.filter_by(task_id=t_id, status_id=6, error=1).first()
    assert "Job missed. Scheduled for:" in log.message

    # check logs
    assert "was missed by 0:01" in caplog.text
    for record in caplog.records:
        assert record.levelname not in ["CRITICAL", "ERROR"]

    caplog.clear()

    # check without id
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id="asdf",
        name="test job 2",
        args=[str(t_id)],
        run_date=datetime.now() - timedelta(minutes=1),
        replace_existing=True,
    )
    time.sleep(1)
    assert "was missed by 0:01" in caplog.text
    caplog.clear()

    # task that does not exists
    p_id, t_id = (9, 9)
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        args=[str(t_id)],
        run_date=datetime.now() - timedelta(minutes=1),
        replace_existing=True,
    )
    # wait for logs to be added by background process
    time.sleep(1)
    # check that log was added
    log = TaskLog.query.filter_by(task_id=t_id, status_id=6, error=1).first()
    assert "Job missed. Scheduled for:" in log.message
Exemplo n.º 2
0
def test_job_error(event_fixture: fixture, caplog: fixture) -> None:
    # get a dummy task id
    p_id, t_id = create_demo_task()

    # add a task that will fail
    atlas_scheduler.add_job(
        func=bad_demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        replace_existing=True,
    )

    time.sleep(1)

    # verify failure
    log = (
        TaskLog.query.filter_by(task_id=t_id, status_id=6, error=1).filter(
            TaskLog.message.like("%Job error. Scheduled for:%")
        )  # type: ignore[attr-defined,union-attr]
        .first())
    assert log is not None

    assert "raised an exception" in caplog.text

    caplog.clear()

    # try with invalid job
    p_id, t_id = (9, 9)
    atlas_scheduler.add_job(
        func=bad_demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        replace_existing=True,
    )
    time.sleep(1)
    # verify failure
    log = (
        TaskLog.query.filter_by(task_id=t_id, status_id=6, error=1).filter(
            TaskLog.message.like("%Job error. Scheduled for:%")
        )  # type: ignore[attr-defined,union-attr]
        .first())
    assert log is None

    # test job without id
    atlas_scheduler.add_job(
        func=bad_demo_task,
        trigger="date",
        id="ooff",
        name="test job 2",
        replace_existing=True,
    )

    time.sleep(1)
Exemplo n.º 3
0
async def _test_update_availability_switch(
    hass: HomeAssistantType,
    initial_power_state: Optional[bool],
    final_power_state: Optional[bool],
    caplog: pytest.fixture,
) -> None:
    now = dt_util.utcnow()
    future_interval = timedelta(minutes=1)

    # Setup device as if time is right now
    with patch("homeassistant.util.dt.utcnow", return_value=now):
        await _test_setup_speaker(hass, initial_power_state)

    # Clear captured logs so that only availability state changes are captured for
    # future assertion
    caplog.clear()

    # Fast forward time to future twice to trigger update and assert vizio log message
    for i in range(1, 3):
        future = now + (future_interval * i)
        with patch(
            "homeassistant.components.vizio.media_player.VizioAsync.get_power_state",
            return_value=final_power_state,
        ), patch("homeassistant.util.dt.utcnow", return_value=future), patch(
            "homeassistant.util.utcnow", return_value=future
        ):
            async_fire_time_changed(hass, future)
            await hass.async_block_till_done()
            if final_power_state is None:
                assert hass.states.get(ENTITY_ID).state == STATE_UNAVAILABLE
            else:
                assert hass.states.get(ENTITY_ID).state != STATE_UNAVAILABLE

    # Ensure connection status messages from vizio.media_player appear exactly once
    # (on availability state change)
    vizio_log_list = [
        log
        for log in caplog.records
        if log.name == "homeassistant.components.vizio.media_player"
    ]
    assert len(vizio_log_list) == 1
Exemplo n.º 4
0
def test_job_removed(event_fixture: fixture, caplog: fixture) -> None:
    # get a dummy task id
    p_id, t_id = create_demo_task()

    # add a task that will run
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        replace_existing=True,
    )

    # give time to process
    time.sleep(1)

    assert "Removed job" in caplog.text
    for record in caplog.records:
        assert record.levelname not in ["CRITICAL", "ERROR"]

    log = TaskLog.query.filter_by(task_id=t_id,
                                  status_id=6,
                                  message="Job removed.").first()
    assert log is not None
    caplog.clear()
    # try invalid task_id

    p_id, t_id = (9, 9)
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        replace_existing=True,
    )
    time.sleep(1)

    assert "Removed job" in caplog.text

    for record in caplog.records:
        assert record.levelname not in ["CRITICAL", "ERROR"]

    log = TaskLog.query.filter_by(task_id=t_id,
                                  status_id=6,
                                  message="Job removed.").first()
    assert log is None
    caplog.clear()

    # test with no id
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id="ooff",
        name="test job 2",
        replace_existing=True,
    )
    time.sleep(1)

    assert "Removed job" in caplog.text
    for record in caplog.records:
        assert record.levelname not in ["CRITICAL", "ERROR"]

    log = TaskLog.query.filter_by(task_id=t_id,
                                  status_id=6,
                                  message="Job removed.").first()
    assert log is None
Exemplo n.º 5
0
def test_job_executed(event_fixture: fixture, caplog: fixture) -> None:
    # get a dummy task id
    p_id, t_id = create_demo_task()

    # add a task that will run
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        replace_existing=True,
    )
    # give time to execute
    time.sleep(1)

    # verify execution
    # the 2nd to last log should be that the job has executed.
    # last log will be job removed.
    log = (
        TaskLog.query.filter_by(task_id=t_id, status_id=6).filter(
            TaskLog.message.like(
                "%Job excecuted in%"))  # type: ignore[attr-defined,union-attr]
        .first())
    assert log is not None

    assert "Running job" in caplog.text
    assert "executed successfully" in caplog.text

    for record in caplog.records:
        assert record.levelname not in ["CRITICAL", "ERROR"]

    caplog.clear()

    # try with invalid job
    p_id, t_id = (9, 9)
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        replace_existing=True,
    )

    time.sleep(1)

    # verify no logs
    log = (
        TaskLog.query.filter_by(task_id=t_id, status_id=6).filter(
            TaskLog.message.like(
                "%Job excecuted in%"))  # type: ignore[attr-defined,union-attr]
        .first())
    assert log is None

    for record in caplog.records:
        assert record.levelname not in ["CRITICAL", "ERROR"]

    caplog.clear()

    # test job without id
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id="ooff",
        name="test job 2",
        replace_existing=True,
    )

    time.sleep(1)

    for record in caplog.records:
        assert record.levelname not in ["CRITICAL", "ERROR"]