def test_eth_ticker(
        empty_proxy: PaywalledProxy,
        session: Session,
        sender_privkey: str,
        receiver_privkey: str,
        monkeypatch: MonkeyPatch
):
    def get_patched(*args, **kwargs):
        body = {
            'mid': '682.435', 'bid': '682.18', 'ask': '682.69', 'last_price': '683.16',
            'low': '532.97', 'high': '684.0', 'volume': '724266.25906224',
            'timestamp': '1513167820.721733'
        }
        return jsonify(body)

    monkeypatch.setattr(PaywalledProxyUrl, 'get', get_patched)

    ETHTickerProxy(receiver_privkey, proxy=empty_proxy)
    ticker = ETHTickerClient(sender_privkey, session=session, poll_interval=0.5)

    def post():
        ticker.close()

        assert ticker.pricevar.get() == '683.16 USD'
        assert len(session.client.get_open_channels()) == 0
        ticker.success = True

    session.close_channel_on_exit = True
    ticker.success = False
    ticker.root.after(1500, post)
    ticker.run()
    assert ticker.success
Exemple #2
0
 def _prepare(self):
     self.counters = {}
     basename = None
     cli_tempdir_basename = self.config.getvalue('tempdir_basename')
     if cli_tempdir_basename is not None:
         basename = cli_tempdir_basename
     else:
         # Let's see if we have a pytest_tempdir_basename hook implementation
         basename = self.config.hook.pytest_tempdir_basename()
     if basename is None:
         # If by now, basename is still None, use the current directory name
         basename = os.path.basename(py.path.local().strpath)  # pylint: disable=no-member
     mpatch = MonkeyPatch()
     temproot = py.path.local.get_temproot()  # pylint: disable=no-member
     # Let's get the full real path to the tempdir
     tempdir = temproot.join(basename).realpath()
     if tempdir.exists():
         # If it exists, it's a stale tempdir. Remove it
         log.warning('Removing stale tempdir: %s', tempdir.strpath)
         tempdir.remove(rec=True, ignore_errors=True)
     # Make sure the tempdir is created
     tempdir.ensure(dir=True)
     # Store a reference the tempdir for cleanup purposes when ending the test
     # session
     mpatch.setattr(self.config, '_tempdir', self, raising=False)
     # Register the cleanup actions
     self.config._cleanup.extend([
         mpatch.undo,
         self._clean_up_tempdir
     ])
     self.tempdir = tempdir
Exemple #3
0
def test_issue156_undo_staticmethod(Sample):
    monkeypatch = MonkeyPatch()

    monkeypatch.setattr(Sample, "hello", None)
    assert Sample.hello is None

    monkeypatch.undo()
    assert Sample.hello()
def test_client_private_key_path(
        patched_contract,
        monkeypatch: MonkeyPatch,
        sender_privkey: str,
        tmpdir: LocalPath,
        web3: Web3,
        channel_manager_address: str
):
    def check_permission_safety_patched(path: str):
        return True

    monkeypatch.setattr(
        microraiden.utils.private_key,
        'check_permission_safety',
        check_permission_safety_patched
    )

    privkey_file = tmpdir.join('private_key.txt')
    privkey_file.write(sender_privkey)

    with pytest.raises(AssertionError):
        Client(
            private_key='0xthis_is_not_a_private_key',
            channel_manager_address=channel_manager_address,
            web3=web3
        )

    with pytest.raises(AssertionError):
        Client(
            private_key='0xcorrect_length_but_still_not_a_private_key_12345678901234567',
            channel_manager_address=channel_manager_address,
            web3=web3
        )

    with pytest.raises(AssertionError):
        Client(
            private_key='/nonexisting/path',
            channel_manager_address=channel_manager_address,
            web3=web3
        )

    Client(
        private_key=sender_privkey,
        channel_manager_address=channel_manager_address,
        web3=web3
    )

    Client(
        private_key=sender_privkey[2:],
        channel_manager_address=channel_manager_address,
        web3=web3
    )

    Client(
        private_key=str(tmpdir.join('private_key.txt')),
        channel_manager_address=channel_manager_address,
        web3=web3
    )
Exemple #5
0
def pytest_configure(config):
    """Create a TempdirFactory and attach it to the config object.

    This is to comply with existing plugins which expect the handler to be
    available at pytest_configure time, but ideally should be moved entirely
    to the tmpdir_factory session fixture.
    """
    mp = MonkeyPatch()
    t = TempdirFactory(config)
    config._cleanup.extend([mp.undo, t.finish])
    mp.setattr(config, '_tmpdirhandler', t, raising=False)
    mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False)
    def pytest_configure(self, config):
        if not config.getoption('ast_as_python'):
            return

        mp = MonkeyPatch()
        mp.setattr(
            '_pytest.assertion.rewrite.rewrite_asserts',
            make_replacement_rewrite_asserts(self.store))

        # written pyc files will bypass our patch, so disable reading them
        mp.setattr(
            '_pytest.assertion.rewrite._read_pyc',
            lambda source, pyc, trace=None: None)

        config._cleanup.append(mp.undo)
Exemple #7
0
def block_unmocked_requests():
    """
    Prevents requests from being made unless they are mocked.

    Helps avoid inadvertent dependencies on external resources during the test run.
    """
    def mocked_send(*args, **kwargs):
        raise RuntimeError('Tests must mock all HTTP requests!')

    # The standard monkeypatch fixture cannot be used with session scope:
    # https://github.com/pytest-dev/pytest/issues/363
    monkeypatch = MonkeyPatch()
    # Monkeypatching here since any higher level would break responses:
    # https://github.com/getsentry/responses/blob/0.5.1/responses.py#L295
    monkeypatch.setattr('requests.adapters.HTTPAdapter.send', mocked_send)
    yield monkeypatch
    monkeypatch.undo()
def test_delattr():
    class A:
        x = 1

    monkeypatch = MonkeyPatch()
    monkeypatch.delattr(A, 'x')
    assert not hasattr(A, 'x')
    monkeypatch.undo()
    assert A.x == 1

    monkeypatch = MonkeyPatch()
    monkeypatch.delattr(A, 'x')
    pytest.raises(AttributeError, "monkeypatch.delattr(A, 'y')")
    monkeypatch.delattr(A, 'y', raising=False)
    monkeypatch.setattr(A, 'x', 5, raising=False)
    assert A.x == 5
    monkeypatch.undo()
    assert A.x == 1
Exemple #9
0
def test_delattr():
    class A(object):
        x = 1

    monkeypatch = MonkeyPatch()
    monkeypatch.delattr(A, "x")
    assert not hasattr(A, "x")
    monkeypatch.undo()
    assert A.x == 1

    monkeypatch = MonkeyPatch()
    monkeypatch.delattr(A, "x")
    pytest.raises(AttributeError, "monkeypatch.delattr(A, 'y')")
    monkeypatch.delattr(A, "y", raising=False)
    monkeypatch.setattr(A, "x", 5, raising=False)
    assert A.x == 5
    monkeypatch.undo()
    assert A.x == 1
Exemple #10
0
def test_model_finetuning_nlu(
    tmp_path: Path,
    monkeypatch: MonkeyPatch,
    trained_nlu_moodbot_path: Text,
    use_latest_model: bool,
):
    mocked_nlu_training = AsyncMock(return_value="")
    monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training)

    mock_interpreter_create = Mock(wraps=Interpreter.create)
    monkeypatch.setattr(Interpreter, "create", mock_interpreter_create)

    mock_DIET_load = Mock(wraps=DIETClassifier.load)
    monkeypatch.setattr(DIETClassifier, "load", mock_DIET_load)

    (tmp_path / "models").mkdir()
    output = str(tmp_path / "models")

    if use_latest_model:
        trained_nlu_moodbot_path = str(Path(trained_nlu_moodbot_path).parent)

    # Typically models will be fine-tuned with a smaller number of epochs than training
    # from scratch.
    # Fine-tuning will use the number of epochs in the new config.
    old_config = rasa.shared.utils.io.read_yaml_file(
        "examples/moodbot/config.yml")
    old_config["pipeline"][-1][EPOCHS] = 10
    new_config_path = tmp_path / "new_config.yml"
    rasa.shared.utils.io.write_yaml(old_config, new_config_path)

    old_nlu = rasa.shared.utils.io.read_yaml_file(
        "examples/moodbot/data/nlu.yml")
    old_nlu["nlu"][-1]["examples"] = "-something else"
    new_nlu_path = tmp_path / "new_nlu.yml"
    rasa.shared.utils.io.write_yaml(old_nlu, new_nlu_path)

    train_nlu(
        str(new_config_path),
        str(new_nlu_path),
        domain="examples/moodbot/domain.yml",
        output=output,
        model_to_finetune=trained_nlu_moodbot_path,
        finetuning_epoch_fraction=0.2,
    )

    assert mock_interpreter_create.call_args[1]["should_finetune"]

    mocked_nlu_training.assert_called_once()
    _, nlu_train_kwargs = mocked_nlu_training.call_args
    model_to_finetune = nlu_train_kwargs["model_to_finetune"]
    assert isinstance(model_to_finetune, Interpreter)

    _, diet_kwargs = mock_DIET_load.call_args
    assert diet_kwargs["should_finetune"] is True

    new_diet_metadata = model_to_finetune.model_metadata.metadata["pipeline"][
        -1]
    assert new_diet_metadata["name"] == "DIETClassifier"
    assert new_diet_metadata[EPOCHS] == 2
Exemple #11
0
async def test_fail_on_prediction_errors(
    monkeypatch: MonkeyPatch,
    tmp_path: Path,
    _train_rule_based_agent: Callable[[Path, bool], Coroutine],
):
    monkeypatch.setattr(
        DefaultPolicyPredictionEnsemble,
        "combine_predictions_from_kwargs",
        _probabilities_with_action_unlikely_intent_for(["mood_unhappy"]),
    )

    file_name = tmp_path / "test_action_unlikely_intent_2.yml"
    file_name.write_text(f"""
        version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}"
        stories:
          - story: unlikely path (with action_unlikely_intent)
            steps:
              - user: |
                  very terrible
                intent: mood_unhappy
              - action: utter_cheer_up
              - action: action_unlikely_intent
              - action: utter_did_that_help
              - intent: affirm
              - action: utter_happy
        """)

    # We train on the above story so that RulePolicy can memorize
    # it and we don't have to worry about other actions being
    # predicted correctly.
    agent = await _train_rule_based_agent(file_name, False)

    with pytest.raises(rasa.core.test.WrongPredictionException):
        await rasa.core.test.test(
            str(file_name),
            agent,
            out_directory=str(tmp_path),
            fail_on_prediction_errors=True,
        )
def test_forecast_model_raises_configuration_exception(
        monkeypatch: MonkeyPatch) -> None:
    mock_model_config = Mock(
        spec=BaseModelConfig,
        DEFAULT_HYPER_PARAMS=BaseModelConfig.DEFAULT_HYPER_PARAMS,
        HYPER_SPACE=BaseModelConfig.HYPER_SPACE,
        OVERRIDE_HYPER_PARAMS={},
    )

    mock_time_series = Mock(spec=TimeSeries)
    monkeypatch.setattr(H2OGradientBoostingModel, "train",
                        Mock(side_effect=ValueError("Test Value Error")))

    with pytest.raises(
            ConfigurationException,
            match=
            ".*Please check configuration of --prediction-start-month or --prediction-end-month.",
    ):
        forecast.forecast_model(mock_model_config,
                                mock_time_series,
                                Mock(spec=DataOutput),
                                optimize_hyperparameters=False)
Exemple #13
0
def readline_param(request):
    m = MonkeyPatch()

    if request.param == "pyrepl":
        old_stdin = sys.stdin

        class fake_stdin:
            """Missing fileno() to skip pyrepl.readline._setup.

            This is required to make tests not hang without capturing (`-s`)."""

        sys.stdin = fake_stdin()
        try:
            import pyrepl.readline  # noqa: F401
        except ImportError as exc:
            pytest.skip(msg="pyrepl not available: {}".format(exc))
        finally:
            sys.stdin = old_stdin
        m.setattr("fancycompleter.DefaultConfig.prefer_pyrepl", True)
    else:
        m.setattr("fancycompleter.DefaultConfig.prefer_pyrepl", False)
    return request.param
Exemple #14
0
def test_ensure_schema_exists(
    monkeypatch: MonkeyPatch,
    is_postgres: bool,
    schema_env: Optional[Text],
    schema_exists: bool,
    raises_context: ContextManager,
):
    set_or_delete_postgresql_schema_env_var(monkeypatch, schema_env)
    monkeypatch.setattr(
        rasa.core.tracker_store, "is_postgresql_url", lambda _: is_postgres
    )
    monkeypatch.setattr(sqlalchemy, "exists", Mock())

    # mock the `session.query().scalar()` query which returns whether the schema
    # exists in the db
    scalar = Mock(return_value=schema_exists)
    query = Mock(scalar=scalar)
    session = Mock()
    session.query = Mock(return_value=query)

    with raises_context:
        rasa.core.tracker_store.ensure_schema_exists(session)
Exemple #15
0
def test_no_interactive_without_core_data(default_stack_config: Text,
                                          monkeypatch: MonkeyPatch) -> None:
    parser = argparse.ArgumentParser()
    sub_parser = parser.add_subparsers()
    interactive.add_subparser(sub_parser, [])

    args = parser.parse_args([
        "interactive", "--config", default_stack_config, "--data",
        DEFAULT_NLU_DATA
    ])
    interactive._set_not_required_args(args)

    mock = Mock()
    monkeypatch.setattr(train, "train", mock.train_model)
    monkeypatch.setattr(interactive, "perform_interactive_learning",
                        mock.perform_interactive_learning)

    with pytest.raises(SystemExit):
        interactive.interactive(args)

    mock.train_model.assert_not_called()
    mock.perform_interactive_learning.assert_not_called()
def test_compare_structure_database(prepare_database: Iterator[None],
                                    cli_runner: CliRunner,
                                    monkeypatch: MonkeyPatch,
                                    caplog: LogCaptureFixture) -> None:
    def mock_get_expected_structure(
        test_parameters: forecast_structure.
        ExpectedForecastStructureParameters,
    ) -> DataFrameStructure:
        if test_parameters.account_name == "model_run_with_one_forecast_data_row":
            number_of_rows = 1
        elif test_parameters.account_name == "model_run_with_zero_forecast_data_rows":
            number_of_rows = 0
        else:
            assert False, "Inconsistent test setup, check test model names in fixture"
        return DataFrameStructure(
            columns=pd.Index(["0"], dtype="object"),
            dtypes=pd.Series([np.dtype("object")]),
            shape=(number_of_rows, 1),
        )

    monkeypatch.setattr(compare_structure_database_helpers,
                        "get_expected_forecast_structure",
                        mock_get_expected_structure)
    monkeypatch.setattr(master_config, "model_configs",
                        ["ModelConfigAccount1", "ModelConfigAccount2"])

    # As we are always querying all values in dsx_write database, it is not easy to achieve
    # a reproducible consistent state for integration test. As a workaround the dsx outputs are not asserted here
    monkeypatch.setattr(compare_structure_database, "assert_dsx_output_count",
                        Mock())
    monkeypatch.setattr(compare_structure_database,
                        "assert_dsx_output_total_sum", Mock())

    result = cli_runner.invoke(
        compare_structure_database.compare_structure_database_command)

    assert result.exit_code == 0
    assert "Asserted number of completed model_runs (2)" in caplog.messages
    assert any(
        re.search(
            r"Asserted forecast data count \(1\) for model_run_with_one_forecast_data_row.*",
            message) for message in caplog.messages)
    assert any(
        re.search(
            r"Asserted forecast data count \(0\) for model_run_with_zero_forecast_data_rows.*",
            message) for message in caplog.messages)
    assert re.search(
        r"All database entries for last production run have valid structure",
        result.output, re.MULTILINE)
Exemple #17
0
async def test_correct_question_for_action_name_was_asked(
    monkeypatch: MonkeyPatch,
    mock_endpoint: EndpointConfig,
    action_name: Text,
    question: Text,
    is_marked_as_correct: bool,
    sent_action_name: Text,
):
    conversation_id = "conversation_id"
    policy = "policy"
    tracker = DialogueStateTracker.from_events("some_sender", [])

    monkeypatch.setattr(
        interactive,
        "retrieve_tracker",
        AsyncMock(return_value=tracker.current_state()),
    )
    monkeypatch.setattr(
        interactive, "_ask_questions", AsyncMock(return_value=is_marked_as_correct)
    )
    monkeypatch.setattr(
        interactive,
        "_request_action_from_user",
        AsyncMock(return_value=("action_another_one", False,)),
    )

    mocked_send_action = AsyncMock()
    monkeypatch.setattr(interactive, "send_action", mocked_send_action)

    mocked_confirm = Mock(return_value=None)
    monkeypatch.setattr(interactive.questionary, "confirm", mocked_confirm)

    # validate the action and make sure that the correct question was asked
    await interactive._validate_action(
        action_name, policy, 1.0, [], mock_endpoint, conversation_id
    )
    mocked_confirm.assert_called_once_with(question)
    args, kwargs = mocked_send_action.call_args_list[-1]
    assert args[2] == sent_action_name
def test_dockerfile_tmpdir_prefix(tmp_path: Path,
                                  monkeypatch: MonkeyPatch) -> None:
    """Test that DockerCommandLineJob.get_image respects temp directory directives."""
    monkeypatch.setattr(target=subprocess,
                        name="check_call",
                        value=lambda *args, **kwargs: True)
    tmp_outdir_prefix = tmp_path / "1"
    assert DockerCommandLineJob.get_image(
        {
            "class": "DockerRequirement",
            "dockerFile": "FROM debian:stable",
            "dockerImageId": sys._getframe().f_code.co_name,
        },
        pull_image=True,
        force_pull=True,
        tmp_outdir_prefix=str(tmp_outdir_prefix),
    )
    children = sorted(tmp_outdir_prefix.parent.glob("*"))
    assert len(children) == 1
    subdir = tmp_path / children[0]
    assert len(sorted(subdir.glob("*"))) == 1
    assert (subdir / "Dockerfile").exists()
Exemple #19
0
def test_custom_intent_symbol(text: Text, expected_tokens: List[Text],
                              monkeypatch: MonkeyPatch):

    monkeypatch.setattr(ConveRTTokenizer, "_get_validated_model_url",
                        lambda x: RESTRICTED_ACCESS_URL)

    component_config = {
        "name": "ConveRTTokenizer",
        "model_url": RESTRICTED_ACCESS_URL,
        "intent_tokenization": True,
        "intent_split_symbol": "+",
    }

    tokenizer = ConveRTTokenizer(component_config)

    message = Message(data={TEXT: text})
    message.set(INTENT, text)

    tokenizer.train(TrainingData([message]))

    assert [t.text
            for t in message.get(TOKENS_NAMES[INTENT])] == expected_tokens
def test_routing_disjoint_case2(token_networks: List[TokenNetwork],
                                populate_token_networks_case_2: None,
                                addresses: List[Address],
                                monkeypatch: MonkeyPatch):
    token_network = token_networks[0]

    # test default diversity penalty
    paths = token_network.get_paths(addresses[0], addresses[4], value=10, k=3)
    assert len(paths) == 3
    assert paths[0]['path'] == [
        addresses[0], addresses[2], addresses[5], addresses[4]
    ]
    assert paths[0]['estimated_fee'] == 3000

    assert paths[1]['path'] == [
        addresses[0], addresses[2], addresses[3], addresses[4]
    ]
    assert paths[1]['estimated_fee'] == 3500

    assert paths[2]['path'] == [addresses[0], addresses[1], addresses[4]]
    assert paths[2]['estimated_fee'] == 5000

    # set diversity penalty higher
    monkeypatch.setattr(pathfinder.model.token_network,
                        'DIVERSITY_PEN_DEFAULT', 10000)
    paths = token_network.get_paths(addresses[0], addresses[4], value=10, k=3)
    assert len(paths) == 3
    assert paths[0]['path'] == [
        addresses[0], addresses[2], addresses[5], addresses[4]
    ]
    assert paths[0]['estimated_fee'] == 3000

    assert paths[1]['path'] == [addresses[0], addresses[1], addresses[4]]
    assert paths[1]['estimated_fee'] == 5000

    assert paths[2]['path'] == [
        addresses[0], addresses[2], addresses[3], addresses[4]
    ]
    assert paths[2]['estimated_fee'] == 3500
def mock_environment(
    event_loop: AbstractEventLoop,
    monkeypatch_module: MonkeyPatch,
    mock_dy_volumes: Path,
    compose_namespace: str,
    inputs_dir: Path,
    outputs_dir: Path,
    state_paths_dirs: List[Path],
    state_exclude_dirs: List[Path],
    user_id: UserID,
    project_id: ProjectID,
    node_id: NodeID,
    rabbit_service: RabbitSettings,
) -> None:
    monkeypatch_module.setenv("SC_BOOT_MODE", "production")
    monkeypatch_module.setenv("DYNAMIC_SIDECAR_COMPOSE_NAMESPACE",
                              compose_namespace)
    monkeypatch_module.setenv("REGISTRY_AUTH", "false")
    monkeypatch_module.setenv("REGISTRY_USER", "test")
    monkeypatch_module.setenv("REGISTRY_PW", "test")
    monkeypatch_module.setenv("REGISTRY_SSL", "false")
    monkeypatch_module.setenv("DY_SIDECAR_USER_ID", f"{user_id}")
    monkeypatch_module.setenv("DY_SIDECAR_PROJECT_ID", f"{project_id}")
    monkeypatch_module.setenv("DY_SIDECAR_NODE_ID", f"{node_id}")
    monkeypatch_module.setenv("DY_SIDECAR_PATH_INPUTS", str(inputs_dir))
    monkeypatch_module.setenv("DY_SIDECAR_PATH_OUTPUTS", str(outputs_dir))
    monkeypatch_module.setenv("DY_SIDECAR_STATE_PATHS",
                              json.dumps([str(x) for x in state_paths_dirs]))
    monkeypatch_module.setenv("DY_SIDECAR_STATE_EXCLUDE",
                              json.dumps([str(x) for x in state_exclude_dirs]))
    # TODO: PC->ANE: this is already guaranteed in the pytest_simcore.rabbit_service fixture
    monkeypatch_module.setenv("RABBIT_HOST", rabbit_service.RABBIT_HOST)
    monkeypatch_module.setenv("RABBIT_PORT", f"{rabbit_service.RABBIT_PORT}")
    monkeypatch_module.setenv("RABBIT_USER", rabbit_service.RABBIT_USER)
    monkeypatch_module.setenv(
        "RABBIT_PASSWORD", rabbit_service.RABBIT_PASSWORD.get_secret_value())
    # ---

    monkeypatch_module.setattr(mounted_fs, "DY_VOLUMES", mock_dy_volumes)
Exemple #22
0
def mock_no_user__sync_config():
    """
    Mock user configuration path
    """
    def exists(self):
        if self in DEFAULT_CONFIGURATION_PATHS:
            return False
        return os.path.exists(str(self))

    def is_file(self):
        if self in DEFAULT_CONFIGURATION_PATHS:
            return False
        return os.path.isfile(str(self))

    # pylint: disable=import-outside-toplevel
    from _pytest.monkeypatch import MonkeyPatch
    monkeypatch = MonkeyPatch()
    monkeypatch.setattr(Path, 'exists', exists)
    monkeypatch.setattr(Path, 'is_file', is_file)

    yield monkeypatch
    monkeypatch.undo()
Exemple #23
0
def test_model_finetuning(
    tmp_path: Path,
    monkeypatch: MonkeyPatch,
    default_domain_path: Text,
    default_stories_file: Text,
    default_stack_config: Text,
    default_nlu_data: Text,
    trained_rasa_model: Text,
    use_latest_model: bool,
):
    mocked_nlu_training = Mock(wraps=rasa.nlu.train)
    monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training)

    mocked_core_training = Mock(wraps=rasa.core.train)
    monkeypatch.setattr(rasa.core, rasa.core.train.__name__, mocked_core_training)

    (tmp_path / "models").mkdir()
    output = str(tmp_path / "models")

    if use_latest_model:
        trained_rasa_model = str(Path(trained_rasa_model).parent)

    train(
        default_domain_path,
        default_stack_config,
        [default_stories_file, default_nlu_data],
        output=output,
        force_training=True,
        model_to_finetune=trained_rasa_model,
        finetuning_epoch_fraction=0.1,
    )

    mocked_core_training.assert_called_once()
    _, kwargs = mocked_core_training.call_args
    assert isinstance(kwargs["model_to_finetune"], Agent)

    mocked_nlu_training.assert_called_once()
    _, kwargs = mocked_nlu_training.call_args
    assert isinstance(kwargs["model_to_finetune"], Interpreter)
def test_attention_mask(
    actual_sequence_length: int,
    max_input_sequence_length: int,
    zero_start_index: int,
    create_language_model_featurizer: Callable[
        [Dict[Text, Any]], LanguageModelFeaturizerGraphComponent
    ],
    monkeypatch: MonkeyPatch,
):
    monkeypatch.setattr(
        LanguageModelFeaturizerGraphComponent, "_load_model_instance", lambda _: None,
    )
    component = create_language_model_featurizer({"model_name": "bert"})

    attention_mask = component._compute_attention_mask(
        [actual_sequence_length], max_input_sequence_length
    )
    mask_ones = attention_mask[0][:zero_start_index]
    mask_zeros = attention_mask[0][zero_start_index:]

    assert np.all(mask_ones == 1)
    assert np.all(mask_zeros == 0)
def test_role_refs_validator(name, mode, parent, test_id, code,
                             tmp_trestle_dir: pathlib.Path,
                             monkeypatch: MonkeyPatch) -> None:
    """Test validation of roles and references to them in responsible-parties."""
    (tmp_trestle_dir / 'assessment-plans/my_ap').mkdir(exist_ok=True,
                                                       parents=True)
    roles = [
        Role(id='id1', title='title1'),
        Role(id='id2', title='title2'),
        Role(id='id3', title='title3')
    ]
    party1 = ResponsibleParty(role_id=test_id,
                              party_uuids=[PartyUuid(__root__=str(uuid4()))])
    party2 = ResponsibleParty(role_id='id2',
                              party_uuids=[PartyUuid(__root__=str(uuid4()))])
    responsible_parties = [party1, party2]
    ap_obj = generate_sample_model(ap.AssessmentPlan)
    ap_obj.metadata.roles = roles
    ap_obj.metadata.responsible_parties = responsible_parties
    ap_path = tmp_trestle_dir / 'assessment-plans/my_ap/assessment-plan.json'
    ap_obj.oscal_write(ap_path)

    if mode == '-f':
        if not parent:
            testcmd = f'trestle validate {mode} {ap_path}'
        else:
            testcmd = f'trestle validate {mode} {ap_path.parent}'
    elif mode == '-n':
        testcmd = f'trestle validate -t assessment-plan -n {name}'
    elif mode == '-t':
        testcmd = 'trestle validate -t assessment-plan'
    else:
        testcmd = 'trestle validate -a'

    monkeypatch.setattr(sys, 'argv', testcmd.split())
    with pytest.raises(SystemExit) as pytest_wrapped_e:
        cli.run()
    assert pytest_wrapped_e.type == SystemExit
    assert pytest_wrapped_e.value.code == code
def test_compare_structure_database_if_different_number_of_model_runs(
        prepare_database: Iterator[None], cli_runner: CliRunner,
        monkeypatch: MonkeyPatch, caplog: LogCaptureFixture) -> None:

    monkeypatch.setattr(master_config, "model_configs",
                        ["ModelConfigAccount1", "ModelConfigAccount2"])
    monkeypatch.setattr(compare_structure_database,
                        "get_model_run_ids_for_forecast_run",
                        Mock(return_value=["ModelConfigAccount1"]))

    result = cli_runner.invoke(
        compare_structure_database.compare_structure_database_command)

    assert result.exit_code == 1
    assert (
        "Number of completed model runs (1) does not match the number of model_configs in master_config.py (2)"
        in caplog.messages)
    assert re.search(
        r"Number of completed model runs \(1\) does not match the number of model_configs in master_config.py \(2\)",
        result.output,
        re.MULTILINE,
    )
Exemple #27
0
def test_session_scope_error(monkeypatch: MonkeyPatch, capsys: CaptureFixture,
                             domain: Domain):
    tracker_store = SQLTrackerStore(domain)
    tracker_store.sessionmaker = Mock()

    requested_schema = uuid.uuid4().hex

    # `ensure_schema_exists()` raises `ValueError`
    mocked_ensure_schema_exists = Mock(
        side_effect=ValueError(requested_schema))
    monkeypatch.setattr(rasa.core.tracker_store, "ensure_schema_exists",
                        mocked_ensure_schema_exists)

    # `SystemExit` is triggered by failing `ensure_schema_exists()`
    with pytest.raises(SystemExit):
        with tracker_store.session_scope() as _:
            pass

    # error message is printed
    assert (
        f"Requested PostgreSQL schema '{requested_schema}' was not found in the "
        f"database." in capsys.readouterr()[0])
Exemple #28
0
 def test_restore(self, monkeypatch: MonkeyPatch, path_type) -> None:
     other_path_type = self.other_path[path_type]
     for i in range(10):
         assert self.path(i) not in getattr(sys, path_type)
     sys_path = [self.path(i) for i in range(6)]
     monkeypatch.setattr(sys, path_type, sys_path)
     original = list(sys_path)
     original_other = list(getattr(sys, other_path_type))
     snapshot = SysPathsSnapshot()
     transformation = {"source": (0, 1, 2, 3, 4, 5), "target": (6, 2, 9, 7, 5, 8)}
     assert sys_path == [self.path(x) for x in transformation["source"]]
     sys_path[1] = self.path(6)
     sys_path[3] = self.path(7)
     sys_path.append(self.path(8))
     del sys_path[4]
     sys_path[3:3] = [self.path(9)]
     del sys_path[0]
     assert sys_path == [self.path(x) for x in transformation["target"]]
     snapshot.restore()
     assert getattr(sys, path_type) is sys_path
     assert getattr(sys, path_type) == original
     assert getattr(sys, other_path_type) == original_other
Exemple #29
0
def test_render_w_vega_cli_options(
    monkeypatch: MonkeyPatch,
    capsys: SysCapture,
    chart: alt.TopLevelMixin,
    vega_cli_options: Optional[List[str]],
) -> None:
    """Tests that `vega_cli_options` works with both NodeSaver and other Savers"""
    monkeypatch.setattr(NodeSaver, "enabled", lambda: True)
    monkeypatch.setattr(SeleniumSaver, "enabled", lambda: False)
    bundle = render(chart, fmts=["html", "svg"], vega_cli_options=vega_cli_options)
    assert len(bundle) == 2
    for mimetype, content in bundle.items():
        assert content is not None
        fmt = mimetype_to_fmt(mimetype)
        if isinstance(content, dict):
            check_output(json.dumps(content), fmt)
        else:
            check_output(content, fmt)

    stderr = capsys.readouterr().err
    if vega_cli_options:
        assert "DEBUG" in stderr
Exemple #30
0
def test_pass_conversation_id_to_interactive_learning(
        monkeypatch: MonkeyPatch):
    parser = argparse.ArgumentParser()
    sub_parser = parser.add_subparsers()
    interactive.add_subparser(sub_parser, [])

    expected_conversation_id = "🎁"
    args = parser.parse_args([
        "interactive",
        "--conversation-id",
        expected_conversation_id,
        "--skip-visualization",
    ])

    _serve_application = Mock()
    monkeypatch.setattr(interactive_learning, "_serve_application",
                        _serve_application)

    do_interactive_learning(args, Mock())

    _serve_application.assert_called_once_with(ANY, ANY, True,
                                               expected_conversation_id, 5005)
Exemple #31
0
    def test_normalization(
        self,
        trained_policy: Policy,
        tracker: DialogueStateTracker,
        default_domain: Domain,
        monkeypatch: MonkeyPatch,
    ):
        # first check the output is what we expect
        predicted_probabilities = trained_policy.predict_action_probabilities(
            tracker, default_domain, RegexInterpreter()
        ).probabilities
        # there should be no normalization
        assert all([confidence > 0 for confidence in predicted_probabilities])

        # also check our function is not called
        mock = Mock()
        monkeypatch.setattr(train_utils, "normalize", mock.normalize)
        trained_policy.predict_action_probabilities(
            tracker, default_domain, RegexInterpreter()
        )

        mock.normalize.assert_not_called()
Exemple #32
0
def test_tmp_path_factory_handles_invalid_dir_characters(
        tmp_path_factory: TempPathFactory, monkeypatch: MonkeyPatch) -> None:
    monkeypatch.setattr("getpass.getuser", lambda: "os/<:*?;>agnostic")
    # _basetemp / _given_basetemp are cached / set in parallel runs, patch them
    monkeypatch.setattr(tmp_path_factory, "_basetemp", None)
    monkeypatch.setattr(tmp_path_factory, "_given_basetemp", None)
    p = tmp_path_factory.getbasetemp()
    assert "pytest-of-unknown" in str(p)
Exemple #33
0
async def test_metadata_verify(monkeypatch: MonkeyPatch) -> None:
    fa = FakeArgs()
    fc = FakeConfig()
    monkeypatch.setattr(bandersnatch.verify, "verify_producer", do_nothing)
    monkeypatch.setattr(bandersnatch.verify, "delete_unowned_files",
                        do_nothing)
    monkeypatch.setattr(bandersnatch.verify.os, "listdir", some_dirs)
    await metadata_verify(fc, fa)  # type: ignore
Exemple #34
0
async def test_inner_linear_normalization(
    component_builder: ComponentBuilder,
    tmp_path: Path,
    classifier_params: Dict[Text, Any],
    data_path: Text,
    monkeypatch: MonkeyPatch,
):
    pipeline = as_pipeline("WhitespaceTokenizer", "CountVectorsFeaturizer",
                           "DIETClassifier")
    assert pipeline[2]["name"] == "DIETClassifier"
    pipeline[2].update(classifier_params)

    _config = RasaNLUModelConfig({"pipeline": pipeline})
    (trained_model, _, persisted_path) = await rasa.nlu.train.train(
        _config,
        path=str(tmp_path),
        data=data_path,
        component_builder=component_builder,
    )
    loaded = Interpreter.load(persisted_path, component_builder)

    mock = Mock()
    monkeypatch.setattr(train_utils, "normalize", mock.normalize)

    parse_data = loaded.parse("hello")
    intent_ranking = parse_data.get("intent_ranking")

    # check whether normalization had the expected effect
    output_sums_to_1 = sum([
        intent.get("confidence") for intent in intent_ranking
    ]) == pytest.approx(1)
    assert output_sums_to_1

    # check whether the normalization of rankings is reflected in intent prediction
    assert parse_data.get("intent") == intent_ranking[0]

    # normalize shouldn't have been called
    mock.normalize.assert_not_called()
Exemple #35
0
    def test_lastfailed_usecase(self, pytester: Pytester,
                                monkeypatch: MonkeyPatch) -> None:
        monkeypatch.setattr("sys.dont_write_bytecode", True)
        p = pytester.makepyfile("""
            def test_1(): assert 0
            def test_2(): assert 0
            def test_3(): assert 1
            """)
        result = pytester.runpytest(str(p))
        result.stdout.fnmatch_lines(["*2 failed*"])
        p = pytester.makepyfile("""
            def test_1(): assert 1
            def test_2(): assert 1
            def test_3(): assert 0
            """)
        result = pytester.runpytest(str(p), "--lf")
        result.stdout.fnmatch_lines([
            "collected 3 items / 1 deselected / 2 selected",
            "run-last-failure: rerun previous 2 failures",
            "*= 2 passed, 1 deselected in *",
        ])
        result = pytester.runpytest(str(p), "--lf")
        result.stdout.fnmatch_lines([
            "collected 3 items",
            "run-last-failure: no previously failed tests, not deselecting items.",
            "*1 failed*2 passed*",
        ])
        pytester.path.joinpath(".pytest_cache", ".git").mkdir(parents=True)
        result = pytester.runpytest(str(p), "--lf", "--cache-clear")
        result.stdout.fnmatch_lines(["*1 failed*2 passed*"])
        assert pytester.path.joinpath(".pytest_cache", "README.md").is_file()
        assert pytester.path.joinpath(".pytest_cache", ".git").is_dir()

        # Run this again to make sure clear-cache is robust
        if os.path.isdir(".pytest_cache"):
            shutil.rmtree(".pytest_cache")
        result = pytester.runpytest("--lf", "--cache-clear")
        result.stdout.fnmatch_lines(["*1 failed*2 passed*"])
def test_train_called_when_no_model_passed(default_stack_config: Text,
                                           monkeypatch: MonkeyPatch) -> None:
    parser = argparse.ArgumentParser()
    sub_parser = parser.add_subparsers()
    interactive.add_subparser(sub_parser, [])

    args = parser.parse_args([
        "interactive",
        "--config",
        default_stack_config,
        "--data",
        "examples/moodbot/data",
    ])
    interactive._set_not_required_args(args)

    # Mock actual training and interactive learning methods
    mock = Mock()
    monkeypatch.setattr(train, "train", mock.train_model)
    monkeypatch.setattr(interactive, "perform_interactive_learning",
                        mock.perform_interactive_learning)

    interactive.interactive(args)
    mock.train_model.assert_called_once()
def test_convert_featurizer_token_edge_cases(
    text: Text,
    expected_tokens: List[Text],
    expected_indices: List[Tuple[int]],
    monkeypatch: MonkeyPatch,
):
    tokenizer = WhitespaceTokenizer()

    monkeypatch.setattr(ConveRTFeaturizer, "_get_validated_model_url",
                        lambda x: RESTRICTED_ACCESS_URL)
    component_config = {
        "name": "ConveRTFeaturizer",
        "model_url": RESTRICTED_ACCESS_URL
    }
    featurizer = ConveRTFeaturizer(component_config)
    message = Message.build(text=text)
    td = TrainingData([message])
    tokenizer.train(td)
    tokens = featurizer.tokenize(message, attribute=TEXT)

    assert [t.text for t in tokens] == expected_tokens
    assert [t.start for t in tokens] == [i[0] for i in expected_indices]
    assert [t.end for t in tokens] == [i[1] for i in expected_indices]
Exemple #38
0
def test_sequence_length_overflow_train(
    input_sequence_length: int,
    model_name: Text,
    should_overflow: bool,
    create_language_model_featurizer: Callable[[Dict[Text, Any]],
                                               LanguageModelFeaturizer],
    monkeypatch: MonkeyPatch,
):
    monkeypatch.setattr(LanguageModelFeaturizer, "_load_model_instance",
                        lambda _: None)
    component = create_language_model_featurizer({"model_name": model_name})
    message = Message.build(text=" ".join(["hi"] * input_sequence_length))
    if should_overflow:
        with pytest.raises(RuntimeError):
            component._validate_sequence_lengths([input_sequence_length],
                                                 [message],
                                                 "text",
                                                 inference_mode=False)
    else:
        component._validate_sequence_lengths([input_sequence_length],
                                             [message],
                                             "text",
                                             inference_mode=False)
Exemple #39
0
def test_ignore_flag(testdata_dir: pathlib.Path, tmp_trestle_dir: pathlib.Path,
                     monkeypatch: MonkeyPatch) -> None:
    """Test that ignored files are not validated. Validation will fail if attempted."""
    task_template_folder = tmp_trestle_dir / '.trestle/author/test_task/'
    test_template_folder = testdata_dir / 'author/governed_folders/template_folder_with_drawio'
    test_instances_folder = testdata_dir / 'author/governed_folders/ignored_files'
    task_instance_folder = tmp_trestle_dir / 'test_task/folder_1'

    hidden_file = testdata_dir / pathlib.Path(
        'author/governed_folders/template_folder_with_drawio/.hidden_does_not_affect'
    )
    test_utils.make_file_hidden(hidden_file)

    test_utils.copy_tree_or_file_with_hidden(test_template_folder,
                                             task_template_folder)

    # copy all files
    shutil.copytree(test_instances_folder, task_instance_folder)

    command_string_validate_content = 'trestle author folders validate -tn test_task -ig ^_.*'
    monkeypatch.setattr(sys, 'argv', command_string_validate_content.split())
    rc = trestle.cli.Trestle().run()
    assert rc == 0
Exemple #40
0
def pytest_configure(config):
    """Create a TempdirFactory and attach it to the config object.

    This is to comply with existing plugins which expect the handler to be
    available at pytest_configure time, but ideally should be moved entirely
    to the tmpdir_factory session fixture.
    """
    mp = MonkeyPatch()
    tmppath_handler = TempPathFactory.from_config(config)
    t = TempdirFactory(tmppath_handler)
    config._cleanup.append(mp.undo)
    mp.setattr(config, "_tmp_path_factory", tmppath_handler, raising=False)
    mp.setattr(config, "_tmpdirhandler", t, raising=False)
    mp.setattr(pytest, "ensuretemp", t.ensuretemp, raising=False)
class TestRelatedFieldHTMLCutoff(APISimpleTestCase):
    def setUp(self):
        self.queryset = MockQueryset([
            MockObject(pk=i, name=str(i)) for i in range(0, 1100)
        ])
        self.monkeypatch = MonkeyPatch()

    def test_no_settings(self):
        # The default is 1,000, so sans settings it should be 1,000 plus one.
        for many in (False, True):
            field = serializers.PrimaryKeyRelatedField(queryset=self.queryset,
                                                       many=many)
            options = list(field.iter_options())
            assert len(options) == 1001
            assert options[-1].display_text == "More than 1000 items..."

    def test_settings_cutoff(self):
        self.monkeypatch.setattr(relations, "api_settings",
                                 MockApiSettings(2, "Cut Off"))
        for many in (False, True):
            field = serializers.PrimaryKeyRelatedField(queryset=self.queryset,
                                                       many=many)
            options = list(field.iter_options())
            assert len(options) == 3  # 2 real items plus the 'Cut Off' item.
            assert options[-1].display_text == "Cut Off"

    def test_settings_cutoff_none(self):
        # Setting it to None should mean no limit; the default limit is 1,000.
        self.monkeypatch.setattr(relations, "api_settings",
                                 MockApiSettings(None, "Cut Off"))
        for many in (False, True):
            field = serializers.PrimaryKeyRelatedField(queryset=self.queryset,
                                                       many=many)
            options = list(field.iter_options())
            assert len(options) == 1100

    def test_settings_kwargs_cutoff(self):
        # The explicit argument should override the settings.
        self.monkeypatch.setattr(relations, "api_settings",
                                 MockApiSettings(2, "Cut Off"))
        for many in (False, True):
            field = serializers.PrimaryKeyRelatedField(queryset=self.queryset,
                                                       many=many,
                                                       html_cutoff=100)
            options = list(field.iter_options())
            assert len(options) == 101
            assert options[-1].display_text == "Cut Off"
Exemple #42
0
def test_setattr():
    class A(object):
        x = 1

    monkeypatch = MonkeyPatch()
    pytest.raises(AttributeError, "monkeypatch.setattr(A, 'notexists', 2)")
    monkeypatch.setattr(A, "y", 2, raising=False)
    assert A.y == 2
    monkeypatch.undo()
    assert not hasattr(A, "y")

    monkeypatch = MonkeyPatch()
    monkeypatch.setattr(A, "x", 2)
    assert A.x == 2
    monkeypatch.setattr(A, "x", 3)
    assert A.x == 3
    monkeypatch.undo()
    assert A.x == 1

    A.x = 5
    monkeypatch.undo()  # double-undo makes no modification
    assert A.x == 5
Exemple #43
0
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.

"""Monkeypatching fixtures."""

from _pytest.monkeypatch import MonkeyPatch


mp = MonkeyPatch()


class FakeJob(object):
    id = 'FAKE_JOB_ID'


mp.setattr('rq.get_current_job', lambda: FakeJob())