Beispiel #1
0
def test_saved_bitstring_distribution_set_can_be_loaded(mock_open):
    """Saved bitstring distribution set can be loaded to obtain the same distribution set."""
    fake_file = StringIO()
    mock_open().__enter__.return_value = fake_file
    distributions = [
        BitstringDistribution({"000": 0.1, "111": 0.9}),
        BitstringDistribution({"01000": 0.5, "10110": 0.5}),
    ]

    save_bitstring_distribution_set(distributions, "distributions.json")
    fake_file.seek(0)

    loaded_distributions = load_bitstring_distribution_set(fake_file)
    assert all(
        (
            math.isclose(
                distribution.distribution_dict[key],
                loaded_distribution.distribution_dict[key],
            )
            for key in distribution.distribution_dict.keys()
        )
        for distribution, loaded_distribution in zip(
            distributions, loaded_distributions
        )
    )

    assert all(
        distribution.distribution_dict.keys()
        == loaded_distribution.distribution_dict.keys()
        for distribution, loaded_distribution in zip(
            distributions, loaded_distributions
        )
    )
Beispiel #2
0
def test_saving_bitstring_distributions_writes_correct_json_data_to_file(
        mock_open):
    distributions = [
        BitstringDistribution({
            "000": 0.1,
            "111": 0.9
        }),
        BitstringDistribution({
            "01000": 0.5,
            "10110": 0.5
        }),
    ]

    expected_dict = {
        "bitstring_distribution":
        [distribution.distribution_dict for distribution in distributions],
        "schema":
        SCHEMA_VERSION + "-bitstring-probability-distribution-set",
    }

    save_bitstring_distributions(distributions,
                                 "/some/path/to/distribution/set.json")

    written_data = mock_open().__enter__().write.call_args[0][0]
    assert json.loads(written_data) == expected_dict
Beispiel #3
0
def test_distribution_distance_cant_be_computed_if_only_one_distribution_is_normalized(
        normalize_target, normalize_measured, distance_measure):
    target = BitstringDistribution({"0": 10, "1": 5}, normalize_target)
    measured = BitstringDistribution({"0": 10, "1": 5}, normalize_measured)

    with pytest.raises(RuntimeError):
        evaluate_distribution_distance(target, measured, distance_measure)
Beispiel #4
0
def test_distribution_distance_cannot_be_evaluated_if_supports_are_incompatible(
    distance_measure, ):
    target = BitstringDistribution({"0": 10, "1": 5})
    measured = BitstringDistribution({"00": 10, "10": 5})

    with pytest.raises(RuntimeError):
        evaluate_distribution_distance(target, measured, distance_measure)
Beispiel #5
0
def test_distribution_distance_cannot_be_computed_if_distributions_differ_in_normalization(
        normalize_target, normalize_measured, distance_measure):
    """Distribution distance cannot be computed if only one distribution is normalized."""
    target = BitstringDistribution({"0": 10, "1": 5}, normalize_target)
    measured = BitstringDistribution({"0": 10, "1": 5}, normalize_measured)

    with pytest.raises(RuntimeError):
        evaluate_distribution_distance(target, measured, distance_measure)
Beispiel #6
0
def test_clipped_negative_log_likelihood_is_computed_correctly():
    target_distr = BitstringDistribution({"000": 0.5, "111": 0.5})
    measured_dist = BitstringDistribution({"000": 0.1, "111": 0.9})
    distance_measure_params = {"epsilon": 0.1}
    clipped_log_likelihood = compute_clipped_negative_log_likelihood(
        target_distr, measured_dist, distance_measure_params)

    assert clipped_log_likelihood == 1.203972804325936
Beispiel #7
0
def test_jensen_shannon_divergence_is_computed_correctly():
    """jensen shannon divergence between distributions is computed correctly."""
    target_distr = BitstringDistribution({"000": 0.5, "111": 0.5})
    measured_dist = BitstringDistribution({"000": 0.1, "111": 0.9})
    distance_measure_params = {"epsilon": 0.1}
    jensen_shannon_divergence = compute_jensen_shannon_divergence(
        target_distr, measured_dist, distance_measure_params)

    assert jensen_shannon_divergence == 0.9485599924429406
Beispiel #8
0
def test_distance_measure_default_parameters_are_set_correctly(
        distance_measure_function, expected_default_values):
    target_distr = BitstringDistribution({"000": 0.5, "111": 0.5})
    measured_distr = BitstringDistribution({"000": 0.1, "111": 0.9})
    distance = distance_measure_function(target_distr, measured_distr, {})
    expected_distance = distance_measure_function(target_distr, measured_distr,
                                                  expected_default_values)

    assert distance == expected_distance
Beispiel #9
0
def test_passed_measure_is_used_for_evaluating_distribution_distance():
    """Evaluating distance distribution uses distance measure passed as an argument."""
    target_distribution = BitstringDistribution({"0": 10, "1": 5})
    measured_distribution = BitstringDistribution({"0": 10, "1": 5})
    distance_function = mock.Mock()

    distance = evaluate_distribution_distance(target_distribution,
                                              measured_distribution,
                                              distance_function)

    distance_function.assert_called_once_with(target_distribution,
                                              measured_distribution)
    assert distance == distance_function.return_value
Beispiel #10
0
def test_saving_bitstring_distribution_set_opens_file_for_writing_using_context_manager(
    mock_open,
):
    """Saving bitstring distribution set opens file for writing using context manager."""
    distributions = [
        BitstringDistribution({"000": 0.1, "111": 0.9}),
        BitstringDistribution({"01000": 0.5, "10110": 0.5}),
    ]
    save_bitstring_distribution_set(
        distributions, "/some/path/to/distribution/set.json"
    )

    mock_open.assert_called_once_with("/some/path/to/distribution/set.json", "w")
    mock_open().__enter__.assert_called_once()
    mock_open().__exit__.assert_called_once()
Beispiel #11
0
def test_gaussian_mmd_is_computed_correctly(measured_dist,
                                            distance_measure_params,
                                            expected_mmd):
    """Maximum mean discrepancy (MMD) with gaussian kernel between distributions is computed correctly."""
    target_distr = BitstringDistribution({"000": 0.5, "111": 0.5})
    mmd = compute_mmd(target_distr, measured_dist, distance_measure_params)

    assert mmd == expected_mmd
Beispiel #12
0
def test_saving_bitstring_distribution_opens_file_for_writing_using_context_manager(
    mock_open, ):
    distribution = BitstringDistribution({"000": 0.1, "111": 0.9})
    save_bitstring_distribution(distribution,
                                "/some/path/to/distribution.json")

    mock_open.assert_called_once_with("/some/path/to/distribution.json", "w")
    mock_open().__enter__.assert_called_once()
    mock_open().__exit__.assert_called_once()
Beispiel #13
0
def test_bitstring_distribution_keeps_original_dict_if_normalization_isnt_requested(
):
    distribution_dict = {"000": 0.1, "111": 9}
    distribution = BitstringDistribution({
        "000": 0.1,
        "111": 9
    },
                                         normalize=False)
    assert distribution.distribution_dict == distribution_dict
Beispiel #14
0
def test_uses_epsilon_instead_of_zero_in_target_distribution():
    log_spy = mock.Mock(wraps=math.log)
    with mock.patch("zquantum.core.bitstring_distribution.math.log", log_spy):
        target_distr = BitstringDistribution({
            "000": 0.5,
            "111": 0.4,
            "010": 0.0
        })
        measured_dist = BitstringDistribution({
            "000": 0.1,
            "111": 0.9,
            "010": 0.0
        })
        distance_measure_params = {"epsilon": 0.01}
        compute_clipped_negative_log_likelihood(target_distr, measured_dist,
                                                distance_measure_params)

        log_spy.assert_has_calls(
            [mock.call(0.1), mock.call(0.9),
             mock.call(0.01)], any_order=True)
Beispiel #15
0
def test_uses_epsilon_instead_of_zero_in_target_distribution():
    """Computing jensen shannon divergence uses epsilon instead of zeros in log."""
    log_spy = mock.Mock(wraps=math.log)
    with mock.patch("zquantum.core.bitstring_distribution.math.log", log_spy):
        target_distr = BitstringDistribution({
            "000": 0.5,
            "111": 0.4,
            "010": 0.0
        })
        measured_dist = BitstringDistribution({
            "000": 0.1,
            "111": 0.9,
            "010": 0.0
        })
        distance_measure_params = {"epsilon": 0.01}
        compute_jensen_shannon_divergence(target_distr, measured_dist,
                                          distance_measure_params)

        log_spy.assert_has_calls(
            [mock.call(0.1), mock.call(0.9),
             mock.call(0.01)], any_order=True)
Beispiel #16
0
def test_saving_bitstring_distribution_writes_correct_json_data_to_file(mock_open):
    """Saving bitstring distribution writes correct json dictionary to file."""
    distribution = BitstringDistribution({"000": 0.1, "111": 0.9})

    expected_dict = {
        "bitstring_distribution": distribution.distribution_dict,
        "schema": SCHEMA_VERSION + "-bitstring-probability-distribution",
    }

    save_bitstring_distribution(distribution, "/some/path/to/distribution.json")

    written_data = mock_open().__enter__().write.call_args[0][0]
    assert json.loads(written_data) == expected_dict
Beispiel #17
0
def test_saved_bitstring_distribution_can_be_loaded(mock_open):
    """Saved bitstring distribution can be loaded to obtain the same distribution."""
    fake_file = StringIO()
    mock_open().__enter__.return_value = fake_file
    dist = BitstringDistribution({"000": 0.1, "111": 0.9})

    save_bitstring_distribution(dist, "distribution.json")
    fake_file.seek(0)

    loaded_dist = load_bitstring_distribution(fake_file)
    assert all(
        math.isclose(dist.distribution_dict[key], loaded_dist.distribution_dict[key])
        for key in dist.distribution_dict.keys()
    )

    assert dist.distribution_dict.keys() == loaded_dist.distribution_dict.keys()
Beispiel #18
0
def test_bitstring_distribution_keeps_original_dict_if_normalization_should_not_be_performed():
    """Bistring distribution keeps original dict if normalization is not requested."""
    distribution_dict = {"000": 0.1, "111": 9}
    distribution = BitstringDistribution({"000": 0.1, "111": 9}, normalize=False)
    assert distribution.distribution_dict == distribution_dict
Beispiel #19
0
def test_bitstring_distribution_gets_normalized_by_default(distribution):
    """Constructing bitstring distribution normalizes it by default."""
    distribution = BitstringDistribution(distribution)
    assert is_normalized(distribution.distribution_dict)
Beispiel #20
0
        })
        distance_measure_params = {"epsilon": 0.01}
        compute_clipped_negative_log_likelihood(target_distr, measured_dist,
                                                distance_measure_params)

        log_spy.assert_has_calls(
            [mock.call(0.1), mock.call(0.9),
             mock.call(0.01)], any_order=True)


@pytest.mark.parametrize(
    "measured_dist,distance_measure_params,expected_mmd",
    [
        (
            BitstringDistribution({
                "000": 0.1,
                "111": 0.9
            }),
            {
                "sigma": 0.5
            },
            0.32000000000000006,
        ),
        (
            BitstringDistribution({
                "000": 0.5,
                "111": 0.5
            }),
            {
                "sigma": 1
            },
            0.00,
Beispiel #21
0
        {"000": 1e-3, "111": 0, "100": 100},
    ],
)
def test_normalizing_distribution_gives_normalized_distribution(distribution):
    """Normalizing bitstring distribution returns normalized bitstring distribution."""
    assert not is_normalized(distribution)
    normalize_bitstring_distribution(distribution)
    assert is_normalized(distribution)


@pytest.mark.parametrize(
    "prob_dist,expected_bitstring_dist",
    [
        (
            np.asarray([0.25, 0, 0.5, 0.25]),
            BitstringDistribution({"00": 0.25, "01": 0.5, "10": 0.0, "11": 0.25}),
        ),
        (
            np.ones(2 ** 5) / 2 ** 5,
            BitstringDistribution(
                {"".join(string): 1 / 2 ** 5 for string in product("01", repeat=5)}
            ),
        ),
    ],
)
def test_constructs_correct_bitstring_distribution_from_probability_distribution(
    prob_dist, expected_bitstring_dist
):
    """Probability distributions is converted to matching bitstring distributions.

    The bitstring distributions constructed from prabability distribution should have:
Beispiel #22
0
def test_bitstring_distribution_gets_normalized_by_default(distribution):
    distribution = BitstringDistribution(distribution)
    assert is_normalized(distribution.distribution_dict)