Esempio n. 1
0
def test_X_None():
    X = None
    y = None
    from tests.mock_model import MockModel
    model = MockModel()
    models = [model]
    with pytest.raises(TypeError):
        eval_models(X, y, models)
Esempio n. 2
0
def test_y_None():
    path = os.path.join(THIS_DIR, os.pardir, 'tests/dummy.txt')
    df = read_data(path)
    X = df[['value']]
    y = None
    from tests.mock_model import MockModel
    model = MockModel()
    models = [model]
    with pytest.raises(TypeError):
        eval_models(X, y, models)
Esempio n. 3
0
def test_y_None():
    path = os.path.join(THIS_DIR, os.pardir, 'tests/data/dummy.txt')
    df = read_data(path)
    X = df[['value']]
    y = None

    model = MockModel()
    models = [model]
    with pytest.raises(TypeError):
        eval_models(X, y, models, label_col_name='is_anomaly')
Esempio n. 4
0
def test_X_None():
    X = None
    y = None
    model = MockModel()
    models = [model]
    try:
        res = eval_models(X, y, models, label_col_name='is_anomaly')
    except TypeError:
        assert True
        return
    assert False
Esempio n. 5
0
def test_eval_models_all_false():
    path = os.path.join(THIS_DIR, os.pardir, 'tests/dummy.txt')
    df = read_data(path)
    df['is_anomaly'] = 0
    from tests.mock_model import MockModel
    model = MockModel(prediction=0)
    models = [model]
    X = df[['value']]
    y = df[['is_anomaly']]
    res = eval_models(X, y, models)
    assert math.isnan(res['MockModel(prediction=0)']['f1'])
    assert math.isnan(res['MockModel(prediction=0)']['precision'])
    assert math.isnan(res['MockModel(prediction=0)']['recall'])
Esempio n. 6
0
def test_n_splits_big():
    path = os.path.join(THIS_DIR, os.pardir, 'tests/dummy2.txt')
    df = read_data(path)

    from tests.mock_model import MockModel
    model = MockModel()
    models = [model]
    X = df[['value']]
    y = df[['is_anomaly']]
    res = eval_models(X, y, models, n_splits=40000, verbose=True)
    assert res['MockModel(prediction=1)']['f1'] == 1.0
    assert res['MockModel(prediction=1)']['precision'] == 1.0
    assert res['MockModel(prediction=1)']['recall'] == 1.0
Esempio n. 7
0
def test_eval_models_all_false():
    path = os.path.join(THIS_DIR, os.pardir, 'tests/data/dummy.txt')
    df = read_data(path)
    df['is_anomaly'] = 0

    model = MockModel()
    models = [model]
    X = df[['value']]
    y = df[['is_anomaly']]
    res = eval_models(X, y, models, label_col_name='is_anomaly')
    assert math.isnan(res['MockModel']['f1'])
    assert res['MockModel']['precision'] == 0
    assert math.isnan(res['MockModel']['recall'])
Esempio n. 8
0
def test_eval_models_half_false():
    path = os.path.join(THIS_DIR, os.pardir, 'tests/dummy2.txt')
    df = read_data(path)
    df['is_anomaly'] = 0
    df.iloc[-1]['is_anomaly'] = 1
    df.iloc[-2]['is_anomaly'] = 1
    from tests.mock_model import MockModel
    model = MockModel()
    models = [model]
    X = df[['value']]
    y = df[['is_anomaly']]
    res = eval_models(X, y, models)
    assert res['MockModel(prediction=1)']['precision'] == 0.5
    assert res['MockModel(prediction=1)']['recall'] == 1.0
Esempio n. 9
0
def test_eval_models_all_true():
    path = os.path.join(THIS_DIR, os.pardir, 'tests/dummy2.txt')
    df = read_data(path)

    from tests.mock_model import MockModel
    model = MockModel()
    models = [model]
    X = df[['value']]
    y = df[['is_anomaly']]
    res = eval_models(X, y, models)
    print(res)
    assert res['MockModel(prediction=1)']['f1'] == 1.0
    assert res['MockModel(prediction=1)']['precision'] == 1.0
    assert res['MockModel(prediction=1)']['recall'] == 1.0
Esempio n. 10
0
def test_n_splits_big():
    path = os.path.join(THIS_DIR, os.pardir, 'tests/data/dummy2.txt')
    df = read_data(path)

    model = MockModel()
    models = [model]
    X = df[['value']]
    y = df[['is_anomaly']]
    res = eval_models_CV(X,
                         y,
                         models,
                         n_splits=40000,
                         verbose=True,
                         label_col_name='is_anomaly')
    assert res['MockModel']['f1'] == 1.0
    assert res['MockModel']['precision'] == 1.0
    assert res['MockModel']['recall'] == 1.0
    def test_articles_not_modified_by_predictor(self):
        """
        Test if articles fields 'themes' and 'verified_themes' are not modified
        by the predictor.
        :return:
        """

        tokenizer_init_article = Article(
            id="0",
            title="",
            summary="theme1 theme2 theme3",
            themes=["theme1", "theme2", "theme3"],
            verified_themes=["theme1", "theme2", "theme3"],
            predicted_themes=[])

        articleOne = Article(id="1",
                             title="",
                             summary="theme1 theme2",
                             themes=["one", "two"],
                             verified_themes=["one", "two", "three"],
                             predicted_themes=["three"])

        article_tokenizer = ArticleTextTokenizer(
            Articles([tokenizer_init_article]), 3)
        theme_tokenizer = ArticleThemeTokenizer(
            Articles([tokenizer_init_article]))

        predictor = ArticlePredictor(
            classifier_model=MockModel.get_model(),
            supported_themes=["theme1", "theme2", "theme3"],
            preprocessor=MockPreprocessor(),
            article_tokenizer=article_tokenizer,
            theme_tokenizer=theme_tokenizer)

        prediction = predictor.predict_preprocessed(
            Articles(article=articleOne))

        article_with_predictions = prediction.get_articles_with_predictions(
        )[0]

        self.assertEqual(["one", "two"], article_with_predictions.themes)
        self.assertEqual(["one", "two", "three"],
                         article_with_predictions.verified_themes)
        self.assertEqual(["theme1", "theme2"],
                         article_with_predictions.predicted_themes)
Esempio n. 12
0
def test_eval_models_half_false():
    path = os.path.join(THIS_DIR, os.pardir, 'tests/data/dummy2.txt')
    df = read_data(path)
    df['is_anomaly'] = 0
    df.iloc[-1]['is_anomaly'] = 1
    df.iloc[-2]['is_anomaly'] = 1

    model = MockModel()
    models = [model]
    X = df[['value']]
    y = df[['is_anomaly']]
    res = eval_models(X,
                      y,
                      models,
                      label_col_name='is_anomaly',
                      window_size_for_metrics=0)
    assert res['MockModel']['precision'] == 0.5
    assert res['MockModel']['recall'] == 1.0
def build_response_data(req):
    """build_response_data

    :param req: request dict
    """
    model = MockModel(req=req)
    predictions = req.get("test_predictions", [])
    sample_predictions = req.get("test_predictions", [])
    rounded = req.get("test_predictions", [])
    accuracy = req.get("test_accuracy", {"accuracy": 52.5})
    error = req.get("test_error", None)
    image_file = req.get("image_file", None)
    history = req.get("history", None)
    histories = req.get("histories", None)
    indexes = req.get("test_indexes", None)
    scores = req.get("test_scores", None)
    cm = req.get("test_cm", None)
    predicts_merged = req.get("test_predicts_merged", False)
    merge_df = req.get("test_merge_df", None)
    data = {
        "predictions": predictions,
        "rounded_predictions": rounded,
        "sample_predictions": sample_predictions,
        "acc": accuracy,
        "scores": scores,
        "history": history,
        "histories": histories,
        "image_file": image_file,
        "model": model,
        "indexes": indexes,
        "confusion_matrix": cm,
        "are_predicts_merged": predicts_merged,
        "merge_df": merge_df,
        "err": error
    }
    return data