Example #1
0
def test_perturber_high():
    N = 1000
    classes = 10
    # bonus = 0.5 # Accuracy parameter = low
    bonus = 3  # Accuracy parameter = high

    model = FakeModel(lambda x: fake_dist_w_bias(x))
    X_test = np.zeros((N, 2))
    X_test[:, 0] = (10 * np.arange(N) / N).astype(np.int)
    # X_test = np.random.random(size=(N,2))*classes
    X_test[:, 1] = bonus * np.arange(N) / N
    y_test = X_test.astype(int)[:, 0]
    y = model.predict(X_test)
    # yc = np.argmax(y, axis=1)

    fake_p = (10 * np.arange(N) / N).astype(int)

    probe = sensie.Probe(model)
    result = probe.predict_and_measure_perturbed(X_test,
                                                 y_test,
                                                 perturb_dont,
                                                 p_min=1,
                                                 p_max=10,
                                                 steps=10,
                                                 plot=False,
                                                 label="fake")
    result.set_credible_intervals()
    assert np.abs(result.tests['fake'].beta) < 0.002 and (
        result.tests['fake'].get_significance() == "low")
Example #2
0
def test_no_correlation():
    N = 1000
    classes = 10
    b = 3

    model = FakeModel(lambda x: fake_dist(x, b))
    X_test = np.random.random(size=(N, 1)) * classes
    y_test = X_test.astype(int)[:, 0]
    y = model.predict(X_test)
    yc = np.argmax(y, axis=1)
    fake_p = np.random.randint(10, size=1000)
    probe = sensie.Probe(model)
    result = probe.predict_and_measure(X_test,
                                       y_test,
                                       fake_p,
                                       plot=False,
                                       propnames=["fake"])
    result.set_credible_intervals()
    assert np.abs(result.tests['fake'].beta) < 0.002 and (
        result.tests['fake'].get_significance() == "low")
Example #3
0
def test_class_disparity():
    N = 1000
    classes = 10

    model = FakeModel(lambda x: fake_dist_w_bias(x))
    X_test = np.random.random(size=(N, 2)) * classes
    X_test[:, 1] = X_test[:, 0] / 3
    y_test = X_test.astype(int)[:, 0]
    y = model.predict(X_test)
    yc = np.argmax(y, axis=1)

    X_test[800:900, 1] = X_test[0:100, 1]

    fake_p = (10 * np.arange(N) / N).astype(int)

    probe = sensie.Probe(model)
    result = probe.predict_and_measure(X_test,
                                       y_test,
                                       fake_p,
                                       plot=False,
                                       propnames=["fake"])

    result = probe.test_class_sensitivity(X_test, y_test, plot=False)
    assert result.tests['class'].means[-1] / result.tests['class'].means[0] > 3
Example #4
0
def test_high_correlation():
    N = 1000
    classes = 10
    # bonus = 0.5 # Accuracy parameter = low
    bonus = 3  # Accuracy parameter = high

    model = FakeModel(lambda x: fake_dist_w_bias(x))
    X_test = np.zeros((N, 2))
    X_test[:, 0] = (10 * np.arange(N) / N).astype(np.int)
    X_test[:, 1] = bonus * np.arange(N) / N
    y_test = X_test.astype(int)[:, 0]
    y = model.predict(X_test)

    fake_p = (10 * np.arange(N) / N).astype(int)

    probe = sensie.Probe(model)
    result = probe.predict_and_measure(X_test,
                                       y_test,
                                       fake_p,
                                       plot=False,
                                       propnames=["fake"])
    result.set_credible_intervals()
    assert np.abs(result.tests['fake'].beta) > 0.05 and (
        result.tests['fake'].get_significance() == "high")