def test_that_retrain_calls_eval_method_correctly(mocker):
    mock_classifier = mocker.Mock()
    mock_classifier.fit == mocker.Mock()
    mock_classifier.predict_proba == mocker.Mock()

    mock_eval_method = mocker.Mock(
        return_value={"test_score": np.array([0.8])}
    )

    test_array = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
    widget = SemiSupervisor(
        features=test_array,
        classifier=mock_classifier,
        eval_method=mock_eval_method,
    )

    widget._annotation_loop.send({"source": "", "value": "dummy label 1"})
    widget._annotation_loop.send({"source": "", "value": "dummy label 2"})
    widget.retrain()

    assert mock_eval_method.call_count == 1

    call_arguments = mock_eval_method.call_args[0]
    assert call_arguments[0] is mock_classifier
    assert (call_arguments[1] == test_array[:2, :]).all()
    assert pytest.helpers.same_elements(
        call_arguments[2], ["dummy label 1", "dummy label 2"]
    )
    assert widget.model_performance.value == "Score: 0.80"
def test_that_retrain_calls_reorder_correctly(mocker):

    test_probabilities = np.array([[0.2, 0.3], [0.1, 0.4]])

    mock_eval_method = mocker.Mock(
        return_value={"test_score": np.array([0.8])}
    )

    mock_reordering = mocker.Mock(return_value=[0, 1])

    test_array = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])

    widget = SemiSupervisor(
        features=test_array,
        classifier=LogisticRegression(),
        eval_method=mock_eval_method,
        reorder=mock_reordering,
        shuffle_prop=0.2,
    )

    mocker.patch.object(
        widget.classifier, "fit", return_value=LogisticRegression()
    )
    mocker.patch.object(
        widget.classifier, "predict_proba", return_value=test_probabilities
    )

    widget._annotation_loop.send({"source": "", "value": "dummy label 1"})
    widget._annotation_loop.send({"source": "", "value": "dummy label 2"})
    widget.retrain()

    assert mock_reordering.call_count == 1

    call_args, call_kwargs = mock_reordering.call_args_list[0]

    assert (call_args[0] == test_probabilities).all()
    assert call_kwargs["shuffle_prop"] == 0.2
def test_that_retrain_with_no_labels_sets_warnings(mocker):
    mock_classifier = mocker.Mock()
    mock_classifier.fit == mocker.Mock()
    mock_classifier.predict_proba == mocker.Mock()

    mock_eval_method = mocker.Mock(
        return_value={"test_score": np.array([0.8])}
    )

    test_array = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
    widget = SemiSupervisor(
        features=test_array,
        classifier=mock_classifier,
        eval_method=mock_eval_method,
    )

    widget.retrain()
    assert (
        widget.model_performance.value
        == "Score: Not enough labels to retrain."
    )

    widget._annotation_loop.send({"source": "", "value": "dummy label 1"})

    widget.retrain()
    assert (
        widget.model_performance.value
        == "Score: Not enough labels to retrain."
    )

    widget._annotation_loop.send({"source": "", "value": "dummy label 2"})

    widget.retrain()

    assert mock_eval_method.call_count == 1
    assert widget.model_performance.value == "Score: 0.80"
def test_that_calling_retrain_without_classifier_breaks():
    with pytest.raises(ValueError):
        test_array = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
        widget = SemiSupervisor(features=test_array)
        widget.retrain()