async def test_turn_off_encrypted_websocket(
        hass: HomeAssistant, remoteencws: Mock,
        caplog: pytest.LogCaptureFixture) -> None:
    """Test for turn_off."""
    entry_data = deepcopy(MOCK_ENTRYDATA_ENCRYPTED_WS)
    entry_data[CONF_MODEL] = "UE48UNKNOWN"
    await setup_samsungtv_entry(hass, entry_data)

    remoteencws.send_commands.reset_mock()

    caplog.clear()
    assert await hass.services.async_call(DOMAIN, SERVICE_TURN_OFF,
                                          {ATTR_ENTITY_ID: ENTITY_ID}, True)
    # key called
    assert remoteencws.send_commands.call_count == 1
    commands = remoteencws.send_commands.call_args_list[0].args[0]
    assert len(commands) == 2
    assert isinstance(command := commands[0], SamsungTVEncryptedCommand)
    assert command.body["param3"] == "KEY_POWEROFF"
    assert isinstance(command := commands[1], SamsungTVEncryptedCommand)
    assert command.body["param3"] == "KEY_POWER"
    assert "Unknown power_off command for UE48UNKNOWN (fake_host)" in caplog.text

    # commands not sent : power off in progress
    remoteencws.send_commands.reset_mock()
    assert await hass.services.async_call(DOMAIN, SERVICE_VOLUME_UP,
                                          {ATTR_ENTITY_ID: ENTITY_ID}, True)
    assert "TV is powering off, not sending keys: ['KEY_VOLUP']" in caplog.text
    remoteencws.send_commands.assert_not_called()
示例#2
0
    def test_select(self, patch_curses: Any, caplog: pytest.LogCaptureFixture,
                    selection: Set[str]) -> None:
        """Test `cobib.tui.tui.TUI.select`.

        Args:
            patch_curses: the `tests.tui.tui_test.TUITest.patch_curses` fixture.
            caplog: the built-in pytest fixture.
            selection: the set of selected labels.
        """
        stdscr = MockCursesPad()
        stdscr.size = (24, 80)
        tui = TUI(stdscr, debug=True)
        tui.selection = copy.deepcopy(selection)
        caplog.clear()

        tui.select()
        assert tui.selection == (set() if selection else {"knuthwebsite"})
        expected_log = [
            ("cobib.tui.tui", 10, "Select command triggered."),
            ("cobib.tui.frame", 10, 'Obtaining current label "under" cursor.'),
            ("cobib.tui.frame", 10, 'Current label at "0" is "knuthwebsite".'),
        ]
        if selection:
            expected_log.append(
                ("cobib.tui.tui", 20,
                 "Removing 'knuthwebsite' from the selection."))
        else:
            expected_log.append(("cobib.tui.tui", 20,
                                 "Adding 'knuthwebsite' to the selection."))
        assert [
            record for record in caplog.record_tuples
            if record[0] in ("cobib.tui.frame", "cobib.tui.tui")
        ] == expected_log
示例#3
0
def test_internal_server_error(setup: None, use_traceback: None,
                               mock_glab_request: Callable,
                               caplog: pytest.LogCaptureFixture):
    @aurweb.asgi.app.get("/internal_server_error")
    async def internal_server_error(request: fastapi.Request):
        raise ValueError("test exception")

    with mock.patch("aurweb.config.get", side_effect=mock_glab_config()):
        with TestClient(app=aurweb.asgi.app) as request:
            mock_glab_request(FakeResponse())
            # Test with a ?query=string to cover the request.url.query path.
            resp = request.get("/internal_server_error?query=string")
    assert resp.status_code == int(http.HTTPStatus.INTERNAL_SERVER_ERROR)

    # Assert that the exception got logged with with its traceback id.
    expr = r"FATAL\[.{7}\]"
    assert re.search(expr, caplog.text)

    # Let's do it again to exercise the cached path.
    caplog.clear()
    with mock.patch("aurweb.config.get", side_effect=mock_glab_config()):
        with TestClient(app=aurweb.asgi.app) as request:
            mock_glab_request(FakeResponse())
            resp = request.get("/internal_server_error")
    assert resp.status_code == int(http.HTTPStatus.INTERNAL_SERVER_ERROR)
    assert "FATAL" not in caplog.text
示例#4
0
def test_run_failure_plan_execute(tmp_path: pathlib.Path,
                                  sample_catalog_minimal: Catalog,
                                  monkeypatch: MonkeyPatch,
                                  caplog: pytest.LogCaptureFixture):
    """Test failure plan execute() in _run on RemoveCmd."""
    # Plant this specific logged error for failing execution in mock_execute:
    logged_error = 'fail_execute'

    def mock_execute(*args, **kwargs):
        raise err.TrestleError(logged_error)

    # Create a temporary file as a valid arg for trestle remove:
    content_type = FileContentType.JSON
    catalog_def_dir, catalog_def_file = test_utils.prepare_trestle_project_dir(
        tmp_path, content_type, sample_catalog_minimal,
        test_utils.CATALOGS_DIR)
    monkeypatch.chdir(tmp_path)
    # Add remarks here, so it is a valid removal target,
    testargs = [
        'trestle', 'create', '-f',
        str(catalog_def_file), '-e', 'catalog.metadata.remarks'
    ]
    monkeypatch.setattr(sys, 'argv', testargs)
    Trestle().run()
    # .. then attempt to remove it here, but mocking a failed execute:
    testargs = [
        'trestle', 'remove', '-f',
        str(catalog_def_file), '-e', 'catalog.metadata.remarks'
    ]
    monkeypatch.setattr(Plan, 'execute', mock_execute)
    monkeypatch.setattr(sys, 'argv', testargs)
    caplog.clear()
    rc = Trestle().run()
    assert rc > 0
示例#5
0
def test_warning_redirects(
    caplog: pytest.LogCaptureFixture,
    monkeypatch: pytest.MonkeyPatch,
    warnings_filter: pcdsutils.log.LogWarningLevelFilter,
):
    caplog.set_level(logging.DEBUG)
    normal_warning_count = 0
    original_impl = warnings._showwarnmsg_impl

    def showwarnmsg_and_count(msg):
        nonlocal normal_warning_count
        normal_warning_count += 1
        original_impl(msg)

    monkeypatch.setattr(warnings, "_showwarnmsg_impl", showwarnmsg_and_count)

    message = "test_warning_redirects"
    for cnt in range(10):
        caplog.clear()
        warnings.warn(message)
        assert normal_warning_count == 0, (f"Saw a normal warning! cnt={cnt}")
        assert caplog.records, (f"Did not find log records! cnt={cnt}")
        assert len(caplog.records) == 1, (f"Expected only 1 record! cnt={cnt}")
        assert message in caplog.records[0].message, (
            f"Wrong record! cnt={cnt}")

    pcdsutils.log.uninstall_log_warning_handler()

    for cnt in range(10):
        caplog.clear()
        warnings.warn(message)
        assert not caplog.records, (
            f"Has log records after uninstall! cnt={cnt}")
        assert normal_warning_count == cnt + 1, (
            f"No normal warning! cnt={cnt}")
示例#6
0
    def test_prompt_print(self, patch_curses: Any,
                          caplog: pytest.LogCaptureFixture,
                          text: List[str]) -> None:
        """Test `cobib.tui.tui.TUI.prompt_print`.

        Args:
            patch_curses: the `tests.tui.tui_test.TUITest.patch_curses` fixture.
            caplog: the built-in pytest fixture.
            text: the text to print to the prompt.
        """
        stdscr = MockCursesPad()
        stdscr.size = (24, 80)
        tui = TUI(stdscr, debug=True)
        caplog.clear()

        tui.prompt_print("\n".join(text))
        assert tui.prompt.lines == [text[0]]  # type: ignore
        if len(text) > 1:
            # assert popup on multi-line text messages
            assert (
                "cobib.tui.buffer",
                10,
                "Appending string to text buffer: " + "\n".join(text),
            ) in caplog.record_tuples
            assert ("cobib.tui.buffer", 10,
                    "Create popup window.") in caplog.record_tuples
示例#7
0
    def test_help(self, patch_curses: Any,
                  caplog: pytest.LogCaptureFixture) -> None:
        # pylint: disable=consider-using-f-string
        """Test `cobib.tui.tui.TUI.help`.

        Args:
            patch_curses: the `tests.tui.tui_test.TUITest.patch_curses` fixture.
            caplog: the built-in pytest fixture.
        """
        stdscr = MockCursesPad()
        stdscr.size = (24, 80)
        tui = TUI(stdscr, debug=True)
        caplog.clear()

        tui.help()
        expected_log = [
            ("cobib.tui.tui", 10, "Help command triggered."),
            ("cobib.tui.tui", 10, "Generating help text."),
            ("MockCursesPad", 10, "erase"),
            ("MockCursesPad", 10, "refresh: 0 0 0 0 22 80"),
            ("MockCursesPad", 10, "resize: 22 80"),
            ("MockCursesPad", 10,
             "addstr: 1 1                              coBib TUI Help"),
            ("MockCursesPad", 10, "addstr: 2 1   Key    Command  Description"),
            ("MockCursesPad", 10, "bkgd:   (6,)"),
            ("MockCursesPad", 10, "box"),
            ("MockCursesPad", 10, "refresh: 0 0 0 0 22 80"),
            ("MockCursesPad", 10, "getch"),
            ("MockCursesPad", 10, "clear"),
            ("cobib.tui.tui", 10, "Handling resize event."),
        ]
        inv_keys = {}
        for key, cmd in TUI.KEYDICT.items():
            if cmd in TUI.HELP_DICT:
                inv_keys[cmd] = "ENTER" if key in (10, 13) else chr(key)
        for idx, (cmd, desc) in enumerate(TUI.HELP_DICT.items()):
            expected_log.insert(
                -6,
                (
                    "MockCursesPad",
                    10,
                    f"addstr: {3+idx} 1 " + "{:^8} {:<8} {}".format(
                        "[" + config.tui.key_bindings[cmd.lower()] + "]",
                        cmd + ":", desc),
                ),
            )
        for log, truth in zip(
                expected_log,
            [
                record for record in caplog.record_tuples
                if record[0] in ("MockCursesPad", "cobib.tui.tui")
            ],
        ):
            assert log == truth
示例#8
0
    def test_quit(
        self,
        patch_curses: Any,
        caplog: pytest.LogCaptureFixture,
        prompt_quit: bool,
        returned_char: int,
        mode: str,
    ) -> None:
        """Test `cobib.tui.tui.TUI.quit`.

        Args:
            patch_curses: the `tests.tui.tui_test.TUITest.patch_curses` fixture.
            caplog: the built-in pytest fixture.
            prompt_quit: whether to prompt before actually quitting.
            returned_char: the value for `tests.tui.mock_curses.MockCursesPad.returned_chars`.
            mode: the `cobib.tui.state.Mode` value.
        """
        stdscr = MockCursesPad()
        stdscr.size = (24, 80)
        config.tui.prompt_before_quit = prompt_quit
        tui = TUI(stdscr, debug=True)
        STATE.mode = mode
        caplog.clear()

        tui.prompt.returned_chars = [returned_char]  # type: ignore

        expected_log = []
        if mode == Mode.LIST.value:
            expected_log.append(
                ("cobib.tui.tui", 10, "Quitting from lowest level."))
        else:
            expected_log.append(
                ("cobib.tui.tui", 10,
                 "Quitting higher menu level. Falling back to list view."))
        if prompt_quit:
            expected_log.append(("TUITest", 10, "curs_set: (1,)"))

        if returned_char == ord("n"):
            expected_log.append(
                ("cobib.tui.tui", 20, "User aborted quitting."))
            expected_log.append(("TUITest", 10, "curs_set: (0,)"))

        if mode == Mode.LIST.value and returned_char != ord("n"):
            with pytest.raises(StopIteration):
                tui.quit()
        else:
            tui.quit()
        assert [
            record for record in caplog.record_tuples
            if record[0] in ("cobib.tui.tui", "TUITest")
        ] == expected_log
示例#9
0
def test_projects(caplog: LogCaptureFixture) -> None:
    with event_loop():
        runner = CliRunner()
        result = runner.invoke(cli.main, ["--projects=STDIN,asdf"])
        assert result.exit_code == 0
        assert "1 / 1 succeeded" in result.output
        assert "Projects not found: {'asdf'}" in caplog.text

    caplog.clear()

    with event_loop():
        runner = CliRunner()
        result = runner.invoke(cli.main, ["--projects=fdsa,STDIN"])
        assert result.exit_code == 0
        assert "1 / 1 succeeded" in result.output
        assert "Projects not found: {'fdsa'}" in caplog.text
示例#10
0
def test_matrix_unused(caplog: pytest.LogCaptureFixture) -> None:
    """test that unused CI matrix dimensions trigger a warning"""
    obj = yaml_load((FIXTURES / "matrix06" / "matrix.yaml").read_text())
    mtx = CIMatrix(obj, "master", False)
    assert not mtx.secrets
    assert not mtx.jobs
    assert any(rec.levelname == "WARNING"
               for rec in caplog.get_records("call"))

    caplog.clear()
    del obj["language"]
    mtx = CIMatrix(obj, "master", False)
    assert not mtx.secrets
    assert not mtx.jobs
    assert any(rec.levelname == "WARNING"
               for rec in caplog.get_records("call"))
示例#11
0
    def test_skipping_undone_commits(self, setup: Any,
                                     caplog: pytest.LogCaptureFixture) -> None:
        """Test skipping already undone commits.

        Args:
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
            caplog: the built-in pytest fixture.
        """
        AddCommand().execute(["-b", EXAMPLE_MULTI_FILE_ENTRY_BIB])
        AddCommand().execute(["-b", get_resource("example_entry.bib")])
        UndoCommand().execute([])
        caplog.clear()

        UndoCommand().execute([])
        self._assert()
        assert "Storing undone commit" in caplog.record_tuples[4][2]
        assert "Skipping" in caplog.record_tuples[6][2]
示例#12
0
def test_check_existence(caplog: pytest.LogCaptureFixture) -> None:
    """Test the JournalAbbreviations.check_existence method.

    Args:
        caplog: the built-in pytest fixture.
    """
    assert JournalAbbreviations.check_existence("Test Journal") is False
    for scope, level, message in caplog.record_tuples:
        if (scope == "cobib.utils.journal_abbreviations" and level == 30
                and "'Test Journal' was not found" in message):
            break
    else:
        assert False, "Warning not raised upon missing journal!"
    caplog.clear()
    config.utils.journal_abbreviations = [("Test Journal", "Test J.")]
    assert JournalAbbreviations.check_existence("Test Journal")
    assert JournalAbbreviations.check_existence("Test J.")
    assert JournalAbbreviations.check_existence("Test J")
示例#13
0
def test_demotion_on_leveled_root_handler(
    callback_demoter_on_caplog_handler: SimpleDemoter,
    unique_log: logging.Logger,
    caplog: pytest.LogCaptureFixture,
):
    # Apply to all makes it easier to test
    callback_demoter_on_caplog_handler.only_duplicates = False

    # We need to set caplog's logger and handler levels
    caplog.set_level(logging.DEBUG)
    caplog.handler.setLevel(logging.INFO)

    # Now we expect these to pass through to the handler, then get filtered
    caplog.clear()

    for _ in range(10):
        unique_log.info('should be filtered')

    assert not caplog.records
示例#14
0
    def test_extend_serverless_yml(
        self,
        caplog: LogCaptureFixture,
        mocker: MockerFixture,
        runway_context: MockRunwayContext,
        tmp_path: Path,
    ) -> None:
        """Test extend_serverless_yml."""
        # pylint: disable=no-member
        mock_merge = mocker.patch("runway.module.serverless.merge_dicts")
        caplog.set_level(logging.DEBUG, logger="runway")
        mock_func = MagicMock()
        mock_merge.return_value = {"key": "val"}
        mocker.patch.object(Serverless, "npm_install", MagicMock())
        mocker.patch.object(Serverless, "sls_print",
                            MagicMock(return_value="original"))
        mocker.patch.object(ServerlessOptions, "update_args", MagicMock())

        options = {"extend_serverless_yml": {"new-key": "val"}}
        obj = Serverless(runway_context, module_root=tmp_path, options=options)

        assert not obj.extend_serverless_yml(mock_func)
        obj.npm_install.assert_called_once()
        obj.sls_print.assert_called_once()
        mock_merge.assert_called_once_with("original",
                                           options["extend_serverless_yml"])
        mock_func.assert_called_once_with(skip_install=True)
        obj.options.update_args.assert_called_once_with("config", ANY)

        tmp_file = obj.options.update_args.call_args[0][1]
        # 'no way to check the prefix since it will be a uuid'
        assert tmp_file.endswith(".tmp.serverless.yml")
        assert not (tmp_path / tmp_file
                    ).exists(), 'should always be deleted after calling "func"'

        caplog.clear()
        mocker.patch("pathlib.Path.unlink",
                     MagicMock(side_effect=OSError("test OSError")))
        assert not obj.extend_serverless_yml(mock_func)
        assert ("{}:encountered an error when trying to delete the "
                "temporary Serverless config".format(tmp_path.name)
                in caplog.messages)
示例#15
0
def test_demotion_on_leveled_logger(
    callback_demoter_on_unique_log: SimpleDemoter,
    unique_log: logging.Logger,
    caplog: pytest.LogCaptureFixture,
):
    # Apply to all makes it easier to test
    callback_demoter_on_unique_log.only_duplicates = False
    # Log messages that get demoted to DEBUG should not go through!
    unique_log.setLevel(logging.INFO)
    # If they do go through, caplog would see them!
    caplog.set_level(logging.DEBUG)

    # Trigger a bunch of filtered callback exceptions
    # caplog must see NOTHING
    caplog.clear()

    for _ in range(10):
        unique_log.info('should be filtered')

    assert not caplog.records
示例#16
0
    def test_validate_account_credentials(
        self,
        caplog: LogCaptureFixture,
        mocker: MockerFixture,
        fx_deployments: YamlLoaderDeployment,
        runway_context: MockRunwayContext,
    ) -> None:
        """Test validate_account_credentials."""
        caplog.set_level(logging.INFO, logger="runway")
        mock_aws = mocker.patch(f"{MODULE}.aws")
        obj = Deployment(context=runway_context,
                         definition=fx_deployments.load("validate_account"))

        account = MagicMock()
        account.aliases = ["no-match"]
        account.id = "111111111111"
        mock_aws.AccountDetails.return_value = account
        with pytest.raises(SystemExit) as excinfo:
            assert obj.validate_account_credentials()
        assert excinfo.value.code == 1
        assert 'does not match required account "123456789012"' in "\n".join(
            caplog.messages)
        caplog.clear()
        del excinfo

        account.id = "123456789012"
        with pytest.raises(SystemExit) as excinfo:
            assert obj.validate_account_credentials()
        assert excinfo.value.code == 1
        logs = "\n".join(caplog.messages)
        assert "verified current AWS account matches required account id" in logs
        assert 'do not match required account alias "test"' in logs
        caplog.clear()
        del logs
        del excinfo

        account.aliases = ["test"]
        assert not obj.validate_account_credentials()
        logs = "\n".join(caplog.messages)
        assert "verified current AWS account matches required account id" in logs
        assert "verified current AWS account alias matches required alias" in logs
示例#17
0
    async def test_ban_and_unban_exceptions(
        self,
        mocker: MockerFixture,
        caplog: pytest.LogCaptureFixture,
    ) -> None:
        target_user = MagicMock()
        target_user.id = 1002
        cog = BanCog(self.bot)

        mocker.patch("spellbot.cogs.ban_cog.set_banned",
                     AsyncMock(side_effect=RuntimeError()))

        with pytest.raises(RuntimeError):
            await self.run(cog, cog.ban, self.context, str(target_user.id))
        assert "rolling back database session due to unhandled exception" in caplog.text

        caplog.clear()

        with pytest.raises(RuntimeError):
            await self.run(cog, cog.unban, self.context, str(target_user.id))
        assert "rolling back database session due to unhandled exception" in caplog.text
示例#18
0
    def test_skip(
        self,
        caplog: LogCaptureFixture,
        mocker: MockerFixture,
        runway_context: MockRunwayContext,
        tmp_path: Path,
    ) -> None:
        """Test skip."""
        caplog.set_level(logging.INFO, logger="runway")
        obj = Serverless(runway_context, module_root=tmp_path)
        mocker.patch.object(obj, "package_json_missing", lambda: True)
        mocker.patch.object(obj, "env_file", False)

        assert obj.skip
        assert [
            '{}:skipped; package.json with "serverless" in devDependencies'
            " is required for this module type".format(tmp_path.name)
        ] == caplog.messages
        caplog.clear()

        mocker.patch.object(obj, "package_json_missing", lambda: False)
        assert obj.skip
        assert [
            "{}:skipped; config file for this stage/region not found"
            " -- looking for one of: {}".format(
                tmp_path.name,
                ", ".join(gen_sls_config_files(obj.stage, obj.region)))
        ] == caplog.messages
        caplog.clear()

        obj.explicitly_enabled = True
        assert not obj.skip
        obj.explicitly_enabled = False

        obj.parameters = True  # type: ignore
        assert not obj.skip
        obj.parameters = False  # type: ignore

        obj.env_file = True  # type: ignore
        assert not obj.skip
示例#19
0
def test_app_factory(caplog: pytest.LogCaptureFixture) -> None:
    def create_app() -> ASGIApplication:
        return asgi_app

    config = Config(app=create_app, factory=True, proxy_headers=False)
    config.load()
    assert config.loaded_app is asgi_app

    # Flag not passed. In this case, successfully load the app, but issue a warning
    # to indicate that an explicit flag is preferred.
    caplog.clear()
    config = Config(app=create_app, proxy_headers=False)
    with caplog.at_level(logging.WARNING):
        config.load()
    assert config.loaded_app is asgi_app
    assert len(caplog.records) == 1
    assert "--factory" in caplog.records[0].message

    # App not a no-arguments callable.
    config = Config(app=asgi_app, factory=True)
    with pytest.raises(SystemExit):
        config.load()
示例#20
0
async def test_turn_off_encrypted_websocket_key_type(
    hass: HomeAssistant,
    remoteencws: Mock,
    caplog: pytest.LogCaptureFixture,
    model: str,
    expected_key_type: str,
) -> None:
    """Test for turn_off."""
    entry_data = deepcopy(MOCK_ENTRYDATA_ENCRYPTED_WS)
    entry_data[CONF_MODEL] = model
    await setup_samsungtv_entry(hass, entry_data)

    remoteencws.send_commands.reset_mock()

    caplog.clear()
    assert await hass.services.async_call(DOMAIN, SERVICE_TURN_OFF,
                                          {ATTR_ENTITY_ID: ENTITY_ID}, True)
    # key called
    assert remoteencws.send_commands.call_count == 1
    commands = remoteencws.send_commands.call_args_list[0].args[0]
    assert len(commands) == 1
    assert isinstance(command := commands[0], SamsungTVEncryptedCommand)
    assert command.body["param3"] == expected_key_type
    assert "Unknown power_off command for" not in caplog.text
示例#21
0
    def test_execute(
        self,
        action: str,
        caplog: LogCaptureFixture,
        mocker: MockerFixture,
        runway_context: MockRunwayContext,
        tmp_path: Path,
    ) -> None:
        """Test executing a Runway action."""
        caplog.set_level(LogLevels.DEBUG, logger=MODULE)
        mocker.patch.object(Terraform, "handle_backend", MagicMock())
        mocker.patch.object(Terraform, "skip", True)
        mocker.patch.object(Terraform, "cleanup_dot_terraform", MagicMock())
        mocker.patch.object(Terraform, "handle_parameters", MagicMock())
        mocker.patch.object(Terraform, "terraform_init", MagicMock())
        mocker.patch.object(Terraform, "current_workspace", "test")
        mocker.patch.object(Terraform, "terraform_workspace_list",
                            MagicMock(return_value="* test"))
        mocker.patch.object(Terraform, "terraform_workspace_select",
                            MagicMock())
        mocker.patch.object(Terraform, "terraform_workspace_new", MagicMock())
        mocker.patch.object(Terraform, "terraform_get", MagicMock())
        mocker.patch.object(Terraform, "terraform_apply", MagicMock())
        mocker.patch.object(Terraform, "terraform_destroy", MagicMock())
        mocker.patch.object(Terraform, "terraform_plan", MagicMock())
        mocker.patch.object(
            Terraform,
            "auto_tfvars",
            MagicMock(exists=MagicMock(return_value=True), unlink=MagicMock()),
        )
        command = "apply" if action == "deploy" else action

        # pylint: disable=no-member
        # module is skipped
        obj = Terraform(runway_context, module_root=tmp_path)
        assert not obj[action]()
        obj.handle_backend.assert_called_once_with()
        obj.cleanup_dot_terraform.assert_not_called()
        obj.handle_parameters.assert_not_called()
        obj.auto_tfvars.exists.assert_called_once_with()
        obj.auto_tfvars.unlink.assert_called_once_with()
        caplog.clear()

        # module is run; workspace matches
        obj.auto_tfvars.exists.return_value = False
        mocker.patch.object(obj, "skip", False)
        assert not obj[action]()
        obj.cleanup_dot_terraform.assert_called_once_with()
        obj.handle_parameters.assert_called_once_with()
        obj.terraform_init.assert_called_once_with()
        obj.terraform_workspace_list.assert_not_called()
        obj.terraform_workspace_select.assert_not_called()
        obj.terraform_workspace_new.assert_not_called()
        obj.terraform_get.assert_called_once_with()
        obj["terraform_" + command].assert_called_once_with()
        assert obj.auto_tfvars.exists.call_count == 2
        assert obj.auto_tfvars.unlink.call_count == 1
        logs = "\n".join(caplog.messages)
        assert "init (in progress)" in logs
        assert "init (complete)" in logs
        assert "re-running init after workspace change..." not in logs
        assert "{} (in progress)".format(command) in logs
        assert "{} (complete)".format(command) in logs
        caplog.clear()

        # module is run; switch to workspace
        mocker.patch.object(Terraform, "current_workspace", "default")
        assert not obj[action]()
        obj.terraform_workspace_list.assert_called_once_with()
        obj.terraform_workspace_select.assert_called_once_with("test")
        obj.terraform_workspace_new.assert_not_called()
        logs = "\n".join(caplog.messages)
        assert "re-running init after workspace change..." in logs

        # module is run; create workspace
        mocker.patch.object(Terraform, "terraform_workspace_list",
                            MagicMock(return_value=""))
        assert not obj[action]()
        obj.terraform_workspace_new.assert_called_once_with("test")
示例#22
0
async def test_rgb_light_custom_effects(
    hass: HomeAssistant, caplog: pytest.LogCaptureFixture
) -> None:
    """Test an rgb light with a custom effect."""
    config_entry = MockConfigEntry(
        domain=DOMAIN,
        data={CONF_HOST: IP_ADDRESS, CONF_NAME: DEFAULT_ENTRY_TITLE},
        unique_id=MAC_ADDRESS,
        options={
            CONF_MODE: MODE_AUTO,
            CONF_CUSTOM_EFFECT_COLORS: "[0,0,255], [255,0,0]",
            CONF_CUSTOM_EFFECT_SPEED_PCT: 88,
            CONF_CUSTOM_EFFECT_TRANSITION: TRANSITION_JUMP,
        },
    )
    config_entry.add_to_hass(hass)
    bulb = _mocked_bulb()
    with _patch_discovery(device=bulb), _patch_wifibulb(device=bulb):
        await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}})
        await hass.async_block_till_done()

    entity_id = "light.az120444_aabbccddeeff"

    state = hass.states.get(entity_id)
    assert state.state == STATE_ON
    attributes = state.attributes
    assert attributes[ATTR_BRIGHTNESS] == 128
    assert attributes[ATTR_COLOR_MODE] == "rgbw"
    assert attributes[ATTR_EFFECT_LIST] == [*FLUX_EFFECT_LIST, "custom"]
    assert attributes[ATTR_SUPPORTED_COLOR_MODES] == ["color_temp", "hs", "rgbw"]
    assert attributes[ATTR_HS_COLOR] == (0, 100)

    await hass.services.async_call(
        LIGHT_DOMAIN, "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
    )
    bulb.turnOff.assert_called_once()

    bulb.is_on = False
    async_fire_time_changed(hass, utcnow() + timedelta(seconds=10))
    await hass.async_block_till_done()
    assert hass.states.get(entity_id).state == STATE_OFF

    await hass.services.async_call(
        LIGHT_DOMAIN,
        "turn_on",
        {ATTR_ENTITY_ID: entity_id, ATTR_EFFECT: "custom"},
        blocking=True,
    )
    bulb.setCustomPattern.assert_called_with([[0, 0, 255], [255, 0, 0]], 88, "jump")
    bulb.setCustomPattern.reset_mock()
    bulb.raw_state = [0, 0, 0, EFFECT_CUSTOM_CODE, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    bulb.is_on = True
    async_fire_time_changed(hass, utcnow() + timedelta(seconds=20))
    await hass.async_block_till_done()
    state = hass.states.get(entity_id)
    assert state.state == STATE_ON
    attributes = state.attributes
    assert attributes[ATTR_EFFECT] == "custom"

    caplog.clear()
    await hass.services.async_call(
        LIGHT_DOMAIN,
        "turn_on",
        {ATTR_ENTITY_ID: entity_id, ATTR_BRIGHTNESS: 55, ATTR_EFFECT: "custom"},
        blocking=True,
    )
    bulb.setCustomPattern.assert_called_with([[0, 0, 255], [255, 0, 0]], 88, "jump")
    bulb.setCustomPattern.reset_mock()
    bulb.raw_state = [0, 0, 0, EFFECT_CUSTOM_CODE, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    bulb.is_on = True
    async_fire_time_changed(hass, utcnow() + timedelta(seconds=20))
    await hass.async_block_till_done()
    state = hass.states.get(entity_id)
    assert state.state == STATE_ON
    attributes = state.attributes
    assert attributes[ATTR_EFFECT] == "custom"
    assert "RGB, brightness and white level are ignored when" in caplog.text
示例#23
0
    def test_test(
        self,
        caplog: LogCaptureFixture,
        monkeypatch: MonkeyPatch,
        runway_config: MockRunwayConfig,
        runway_context: MockRunwayContext,
    ) -> None:
        """Test test."""
        caplog.set_level(logging.ERROR, logger="runway")
        test_handlers = {
            "exception":
            MagicMock(handle=MagicMock(side_effect=Exception())),
            "fail_system_exit_0":
            MagicMock(handle=MagicMock(side_effect=SystemExit(0))),
            "fail_system_exit_1":
            MagicMock(handle=MagicMock(side_effect=SystemExit(1))),
            "success":
            MagicMock(),
        }
        monkeypatch.setattr(MODULE + "._TEST_HANDLERS", test_handlers)
        obj = Runway(runway_config, runway_context)  # type: ignore

        obj.tests = [  # type: ignore
            MagicMock(type="success"),
            MagicMock(type="fail_system_exit_0"),
        ]
        assert not obj.test()
        assert "the following tests failed" not in "\n".join(caplog.messages)
        test_handlers["success"].handle.assert_called_with(
            obj.tests[0].name, obj.tests[0].args)
        test_handlers["fail_system_exit_0"].handle.assert_called_with(
            obj.tests[1].name, obj.tests[1].args)
        obj.tests[0].resolve.called_once_with(
            runway_context, variables=runway_config.variables)
        obj.tests[1].resolve.called_once_with(
            runway_context, variables=runway_config.variables)

        obj.tests = [  # type: ignore
            MagicMock(type="fail_system_exit_1", required=False),
            MagicMock(type="fail_system_exit_0"),
        ]
        obj.tests[0].name = "fail_system_exit_1"
        with pytest.raises(SystemExit) as excinfo:
            assert not obj.test()
        assert excinfo.value.code == 1
        assert "the following tests failed: fail_system_exit_1" in caplog.messages
        test_handlers["fail_system_exit_1"].handle.assert_called_with(
            obj.tests[0].name, obj.tests[0].args)
        test_handlers["fail_system_exit_0"].handle.assert_called_with(
            obj.tests[1].name, obj.tests[1].args)
        caplog.clear()

        obj.tests = [  # type: ignore
            MagicMock(type="exception", required=True),
            MagicMock(type="success"),
        ]
        obj.tests[0].name = "exception"
        with pytest.raises(SystemExit) as excinfo:
            assert not obj.test()
        assert excinfo.value.code == 1
        assert "exception:running test (fail)" in caplog.messages
        assert (
            "exception:test required; the remaining tests have been skipped"
            in caplog.messages)
        test_handlers["exception"].handle.assert_called_with(
            obj.tests[0].name, obj.tests[0].args)
        assert test_handlers["success"].handle.call_count == 1
示例#24
0
async def test_turn_on_off(mock_aio_protocol,
                           caplog: pytest.LogCaptureFixture):
    """Test we can turn on and off."""
    light = AIOWifiLedBulb("192.168.1.166")

    def _updated_callback(*args, **kwargs):
        pass

    task = asyncio.create_task(light.async_setup(_updated_callback))
    await mock_aio_protocol()
    light._aio_protocol.data_received(
        b"\x81\x25\x23\x61\x05\x10\xb6\x00\x98\x19\x04\x25\x0f\xde")
    await task

    task = asyncio.create_task(light.async_turn_off())
    # Wait for the future to get added
    await asyncio.sleep(0)
    light._aio_protocol.data_received(
        b"\x81\x25\x24\x61\x05\x10\xb6\x00\x98\x19\x04\x25\x0f\xdf")
    await asyncio.sleep(0)
    assert light.is_on is False
    await task

    task = asyncio.create_task(light.async_turn_on())
    await asyncio.sleep(0)
    light._aio_protocol.data_received(
        b"\x81\x25\x23\x61\x05\x10\xb6\x00\x98\x19\x04\x25\x0f\xde")
    await asyncio.sleep(0)
    assert light.is_on is True
    await task

    await asyncio.sleep(0)
    caplog.clear()
    caplog.set_level(logging.DEBUG)
    # Handle the failure case
    with patch.object(aiodevice, "POWER_STATE_TIMEOUT", 0.05):
        await asyncio.create_task(light.async_turn_off())
        assert light.is_on is True
        assert "Failed to turn off (1/3)" in caplog.text
        assert "Failed to turn off (2/3)" in caplog.text
        assert "Failed to turn off (3/3)" in caplog.text

    with patch.object(aiodevice, "POWER_STATE_TIMEOUT", 0.05):
        task = asyncio.create_task(light.async_turn_off())
        # Do NOT wait for the future to get added, we know the retry logic works
        light._aio_protocol.data_received(
            b"\x81\x25\x24\x61\x05\x10\xb6\x00\x98\x19\x04\x25\x0f\xdf")
        await asyncio.sleep(0)
        assert light.is_on is False
        await task

    await asyncio.sleep(0)
    caplog.clear()
    caplog.set_level(logging.DEBUG)
    # Handle the failure case
    with patch.object(aiodevice, "POWER_STATE_TIMEOUT", 0.05):
        await asyncio.create_task(light.async_turn_on())
        assert light.is_on is False
        assert "Failed to turn on (1/3)" in caplog.text
        assert "Failed to turn on (2/3)" in caplog.text
        assert "Failed to turn on (3/3)" in caplog.text
示例#25
0
    def test_resize(self, patch_curses: Any,
                    caplog: pytest.LogCaptureFixture) -> None:
        """Test `cobib.tui.tui.TUI.resize_handler`.

        Args:
            patch_curses: the `tests.tui.tui_test.TUITest.patch_curses` fixture.
            caplog: the built-in pytest fixture.
        """
        stdscr = MockCursesPad()
        stdscr.size = (24, 80)
        tui = TUI(stdscr, debug=True)
        caplog.clear()

        tui.height, tui.width = (12, 70)
        tui.resize_handler(None, None)
        assert tui.width == 70
        assert tui.height == 12
        assert tui.topbar.size[1] == 70  # type: ignore
        assert tui.botbar.size[1] == 70  # type: ignore
        assert tui.prompt.size[1] == 70  # type: ignore
        expected_log = [
            ("TUITest", 10, "resize_term"),
            ("MockCursesPad", 10, "keypad: True"),
            ("MockCursesPad", 10, "clear"),
            ("MockCursesPad", 10, "refresh: None None None None None None"),
            ("MockCursesPad", 10, "resize: 1 70"),
            ("MockCursesPad", 10, "erase"),
            ("MockCursesPad", 10, "getmaxyx"),
            ("MockCursesPad", 10,
             "addnstr: 0 0 coBib VERSION - 3 Entries 69 0"),  # will be skipped
            ("MockCursesPad", 10, "refresh: None None None None None None"),
            ("MockCursesPad", 10, "refresh: None None None None None None"),
            ("MockCursesPad", 10, "resize: 1 70"),
            ("MockCursesPad", 10, "mvwin: 10 0"),
            ("MockCursesPad", 10, "erase"),
            ("MockCursesPad", 10, "getmaxyx"),
            (
                "MockCursesPad",
                10,
                "addnstr: 0 0 a:Add d:Delete e:Edit x:Export f:Filter ?:Help i:Import m:Modify "
                "o:Open ::Prompt q:Quit r:Redo /:Search v:Select ENTER:Show s:Sort u:Undo w:Wrap "
                "69 0",
            ),
            ("MockCursesPad", 10, "refresh: None None None None None None"),
            ("MockCursesPad", 10, "refresh: None None None None None None"),
            ("MockCursesPad", 10, "resize: 1 70"),
            ("MockCursesPad", 10, "refresh: 0 0 11 0 12 69"),
            ("MockCursesPad", 10, "refresh: 0 0 1 0 9 69"),
        ]
        for log, truth in zip(
                expected_log,
            [
                record for record in caplog.record_tuples
                if record[0] in ("MockCursesPad", "TUITest")
            ],
        ):
            assert log[0] == truth[0]
            assert log[1] == truth[1]
            if truth[2].startswith("addnstr: 0 0 coBib v"):
                # skip version-containing log
                continue
            assert log[2] == truth[2]