def test_calibrationbelt_algorithm_federated(test_input, expected):
    result = get_algorithm_result(CalibrationBelt, test_input, num_workers=5)

    expected = expected[0]
    assert int(result["n_obs"]) == int(expected["n_obs"])
    assert int(result["Model degree"]) == int(expected["Model degree"])
    assert np.isclose(result["p value"], expected["p value"], atol=1e-3)
Exemple #2
0
def test_pca_algorithm_federated(test_input, expected):
    result = get_algorithm_result(PCA, test_input, num_workers=10)

    assert int(result["n_obs"]) == int(expected["n_obs"])
    assert np.isclose(result["eigenvalues"], expected["eigen_vals"],
                      atol=1e-3).all()
    for u, v in zip(result["eigenvectors"], expected["eigen_vecs"]):
        assert are_collinear(u, v)
Exemple #3
0
def test_pearson_algorithm_federated(test_input, expected):
    result = get_algorithm_result(Pearson, test_input, num_workers=10)

    assert np.isclose(
        result["Pearson correlation coefficient"],
        expected["Pearson correlation coefficient"],
        atol=1e-3,
    ).all()
    assert np.isclose(result["p-value"], expected["p-value"], atol=1e-3).all()
    assert int(result["n_obs"]) == int(expected["n_obs"])
def test_logistic_regression_algorithm_local(test_input, expected):
    result = get_algorithm_result(LogisticRegression,
                                  test_input,
                                  num_workers=1)

    # There is no way to choose which level will be the positive and which the
    # negative level in sklearn's LogisticRegression. Sometimes the choice it
    # makes agrees with our choice and sometimes it doesn't. In the latter case
    # the coefficients lie on the same axis but have opposite orientation,
    # hence we only check if the two results are collinear.
    assert are_collinear(result["Coefficients"], expected["coeff"])
Exemple #5
0
def test_anova_algorithm_federated(test_input, expected):
    result = get_algorithm_result(Anova, test_input, num_workers=10)
    aov = result["anova_table"]
    tukey = result["tukey_table"]
    e_aov = {k: v for k, v in expected.items() if k != "tukey_test"}
    e_tukey = expected["tukey_test"]
    assert set(e_aov) == set(aov.keys())
    for key, e_val in e_aov.items():
        r_val = aov[key]
        assert np.isclose(e_val, r_val)
    for et, rt in zip(e_tukey, tukey):
        for key, e_val in et.items():
            r_val = rt[key]
            assert e_val == r_val or np.isclose(e_val, r_val)
def test_logistic_regression_algorithm_federated(test_input, expected):
    result = get_algorithm_result(LogisticRegression, test_input, 10)

    assert are_collinear(result["Coefficients"], expected["coeff"])