Exemple #1
0
 def test_invalid_n_mean(self):
     """Test if function raises a ``ValueError`` when the mean photon number is specified to
     be negative."""
     g = nx.complete_graph(5)
     with pytest.raises(ValueError,
                        match="Mean photon number must be non-negative"):
         similarity.prob_event_mc(g, 2, 2, n_mean=-1)
Exemple #2
0
 def test_invalid_samples(self):
     """Test if function raises a ``ValueError`` when a number of samples less than one is
     requested."""
     g = nx.complete_graph(5)
     with pytest.raises(ValueError,
                        match="Number of samples must be at least one"):
         similarity.prob_event_mc(g, 2, 2, samples=0)
Exemple #3
0
 def test_low_count(self):
     """Test if function raises a ``ValueError`` if ``max_count_per_mode`` is negative."""
     g = nx.complete_graph(5)
     with pytest.raises(
             ValueError,
             match="Maximum number of photons per mode must be non-negative"
     ):
         similarity.prob_event_mc(g, 2, max_count_per_mode=-1)
Exemple #4
0
 def test_invalid_loss(self):
     """Test if function raises a ``ValueError`` when the loss parameter is specified outside
     of range."""
     g = nx.complete_graph(5)
     with pytest.raises(
             ValueError,
             match="Loss parameter must take a value between zero and one"):
         similarity.prob_event_mc(g, 2, 2, loss=2)
Exemple #5
0
    def test_known_result(self):
        """Tests if the probability for known cases is correctly
        reproduced."""
        g = nx.complete_graph(4)
        p = similarity.prob_event_mc(g, 2, 1, 4)
        assert np.allclose(p, 0.2108778117852639)

        graph = nx.complete_graph(20)
        assert np.allclose(
            similarity.prob_event_mc(graph, 20, 1, 1, samples=10), 0)
    def test_no_loss(self, monkeypatch):
        """Test if function correctly creates the SF program for GBS without loss."""
        graph = nx.complete_graph(5)
        mock_eng_run = mock.MagicMock()

        with monkeypatch.context() as m:
            m.setattr(sf.LocalEngine, "run", mock_eng_run)
            similarity.prob_event_mc(graph, 6, 3, samples=1)
            p_func = mock_eng_run.call_args[0][0]

        assert not all([isinstance(op, sf.ops.LossChannel) for op in p_func.circuit])
    def test_all_loss(self, monkeypatch):
        """Test if function samples from the vacuum when maximum loss is applied."""
        dim = 5
        graph = nx.complete_graph(dim)
        mock_eng_run = mock.MagicMock()

        with monkeypatch.context() as m:
            m.setattr(sf.LocalEngine, "run", mock_eng_run)
            similarity.prob_event_mc(graph, 6, 3, samples=1, loss=1)
            p_func = mock_eng_run.call_args[0][0]

        eng = sf.LocalEngine(backend="gaussian")

        state = eng.run(p_func).state
        cov = state.cov()
        disp = state.displacement()

        assert np.allclose(cov, 0.5 * state.hbar * np.eye(2 * dim))
        assert np.allclose(disp, np.zeros(dim))
Exemple #8
0
 def test_mean_computation_event(self, monkeypatch):
     """Tests if the calculation of the sample mean is performed correctly. The test
     monkeypatches the fock_prob function so that the probability is the same for each sample
     and is equal to 1/216, i.e., one over the number of samples in the event with 5 modes,
     6 photons, and max 3 photons per mode."""
     graph = nx.complete_graph(6)
     with monkeypatch.context() as m:
         m.setattr(
             "strawberryfields.backends.BaseGaussianState.fock_prob",
             lambda *args, **kwargs: 1.0 / 336,
         )
         assert np.allclose(similarity.prob_event_mc(graph, 6, 3), 1.0)
    def test_prob_vacuum_event(self):
        """Tests if the function gives the right probability for an event with zero photons when
        the GBS device has been configured to have zero mean photon number."""
        graph = nx.complete_graph(10)

        assert similarity.prob_event_mc(graph, 0, 0, 0) == 1.0
 def test_invalid_photon_number(self):
     """Test if function raises a ``ValueError`` when a photon number below zero is specified"""
     g = nx.complete_graph(10)
     with pytest.raises(ValueError, match="Photon number must not be below zero"):
         similarity.prob_event_mc(g, -1, 2)
Exemple #11
0
 def test_max_loss(self):
     """Test if function samples from the vacuum when maximum loss is applied."""
     graph = nx.complete_graph(6)
     assert similarity.prob_event_mc(graph, 6, 3, samples=1, loss=1) == 0.0
Exemple #12
0
 def test_odd_photon_numbers(self, k, nmax):
     """Test if function returns zero probability for odd number of total photons."""
     graph = nx.complete_graph(10)
     assert similarity.prob_event_mc(graph, k, nmax) == 0.0
Exemple #13
0
##############################################################################
# To avoid calculating a large number of sample probabilities, an alternative is to perform a
# Monte Carlo approximation. Here, samples within an event are selected uniformly at random and
# their resultant probabilities are calculated. If :math:`N` samples :math:`\{S_{1}, S_{2},
# \ldots , S_{N}\}` are generated, then the event probability can be approximated as
#
# .. math::
#     p(E_{k, n_{\max}}) \approx \frac{1}{N}\sum_{i=1}^N p(S_i) |E_{k, n_{\max}}|,
#
# with :math:`|E_{k, n_{\max}}|` denoting the cardinality of the event.
#
# This method can be accessed using the :func:`~.prob_event_mc` function. The 4-photon event is
# approximated as:

print(
    similarity.prob_event_mc(nx.Graph(m0_a), 4, max_count_per_mode=2,
                             n_mean=6))

##############################################################################
# The feature vector can then be calculated through Monte Carlo sampling using
# :func:`~.feature_vector_mc`.
#
# .. note::
#     The results of :func:`~.prob_event_mc` and :func:`~.feature_vector_mc` are probabilistic and
#     may vary between runs. Increasing the optional ``samples`` parameter will increase accuracy
#     but slow down calculation.
#
# The second method of Monte Carlo approximation is intended for use in scenarios where it is
# computationally intensive to pre-calculate a statistically significant dataset of samples from
# GBS.
#
# Machine learning with GBS graph kernels