def test_bad_event_photon_numbers(self): """Test if function raises a ``ValueError`` when input a minimum photon number that is below zero.""" with pytest.raises( ValueError, match="Cannot request events with photon number below zero"): similarity.feature_vector_sampling([[1, 1, 0], [1, 0, 1]], [-1, 4], 1)
def test_bad_max_count(self): """Test if function raises a ``ValueError`` when input a non-positive value for the maximum photon count per mode.""" with pytest.raises( ValueError, match="Maximum number of photons per mode must be at least"): similarity.feature_vector_sampling([[1, 1, 0], [1, 0, 1]], [2, 4], 0)
def test_correct_distribution(self, monkeypatch): """Test if function correctly constructs the feature vector corresponding to some hard coded samples. This test uses a set of samples, corresponding events, and resultant feature vector to test against the output of ``feature_vector_sampling``. The ``sample_to_event`` function called within ``feature_vector_sampling`` is monkeypatched to return the hard coded events corresponding to the samples.""" samples_events_mapping = { # max_count_per_mode = 1 (1, 1, 0, 0, 0): 2, (1, 1, 1, 0, 0): 3, (1, 1, 1, 1, 0): 4, (1, 1, 1, 1, 1): 5, (2, 0, 0, 0, 0): None, (3, 0, 0, 0, 0): None, (4, 0, 0, 0, 0): None, (5, 0, 0, 0, 0): None, (0, 1, 1, 0, 0): 2, } samples = list(samples_events_mapping.keys()) + [(1, 1, 1, 1, 1) ] # add a repetition event_photon_numbers = [2, 1, 3, 5] # test alternative ordering fv_true = [0.2, 0, 0.1, 0.2] with monkeypatch.context() as m: m.setattr(similarity, "sample_to_event", lambda x, _: samples_events_mapping[x]) fv = similarity.feature_vector_sampling(samples, event_photon_numbers, 1) assert fv_true == fv
# Calculating a feature vector # ---------------------------- # # We provide two methods for calculating a feature vector of GBS event probabilities in # Strawberry Fields: # # 1. Through sampling. # 2. Using a Monte Carlo estimate of the probability. # # In the first method, all one needs to do is generate some GBS samples from the graph of # interest and fix the composition of the feature vector. For example, for a feature vector # :math:`f_{\mathbf{k} = (2, 4, 6), n_{\max}=2}` we use: print( similarity.feature_vector_sampling(m0, event_photon_numbers=[2, 4, 6], max_count_per_mode=2)) ############################################################################## # For the second method, suppose we want to calculate the event probabilities exactly rather than # through sampling. To do this, we consider the event probability :math:`p_{k, n_{\max}}` as the # sum over all sample probabilities in the event. In GBS, each sample probability is determined by # the hafnian of a relevant sub-adjacency matrix. While this is tough to calculate, what makes # calculating :math:`p_{k, n_{\max}}` really challenging is the number of samples the corresponding # event contains! For example, the 6-photon event over 17 modes :math:`E_{k=6, n_{\max}=2}` # contains the following number of samples : print(similarity.event_cardinality(6, 2, 17)) ############################################################################## # To avoid calculating a large number of sample probabilities, an alternative is to perform a
def test_low_count(self): """Test if function raises a ``ValueError`` if ``max_count_per_mode`` is negative.""" with pytest.raises(ValueError, match="Maximum number of photons"): similarity.feature_vector_sampling([[1, 1, 0], [1, 0, 1]], [2, 4], -1)