Ejemplo n.º 1
0
    def test_intermediate_cost(self, dim, n_mean, simple_embedding):
        """Test that the cost function evaluates as expected on non-initial parameters. This is
        done by comparing the cost function calculated using train.Stochastic with a manual
        calculation. The manual calculation involves sampling from VGBS with the non-initial
        params and averaging the cost function over the result."""
        n_samples = 10000
        objectives = np.linspace(0.5, 1.5, dim)
        h = self.h_setup(objectives)
        A = np.eye(dim)
        vgbs = train.VGBS(A, n_mean, simple_embedding, threshold=False)

        n_mean_by_mode = [n_mean / dim] * dim
        samples = self.identity_sampler(n_mean_by_mode, n_samples=n_samples)
        vgbs.add_A_init_samples(samples)

        params = np.linspace(0, 1 / dim, dim)
        cost_fn = train.Stochastic(h, vgbs)
        cost = cost_fn(params, n_samples=n_samples)

        # We need to generate new samples and then calculate the cost with respect to them
        new_n_mean_by_mode = vgbs.mean_photons_by_mode(params)
        new_samples = self.identity_sampler(new_n_mean_by_mode, n_samples=n_samples)
        expected_cost = np.mean(np.sum((new_samples - objectives) ** 2, axis=1))

        assert np.allclose(cost, expected_cost, rtol=0.1)
Ejemplo n.º 2
0
 def test_h_reparametized_example(self, dim, vgbs, params, embedding):
     """Test that the h_reparametrized method returns the correct value when compared to
     working the result out by hand"""
     cost_fn = train.Stochastic(h, vgbs)
     sample = np.ones(dim)
     h_reparam = cost_fn.h_reparametrized(sample, params)
     h_reparam_expected = -1.088925964188385
     assert np.allclose(h_reparam, h_reparam_expected)
Ejemplo n.º 3
0
    def test_gradient_one_sample(self, vgbs, dim, params, threshold):
        """Test that the _gradient_one_sample method returns the correct values when compared to
        calculations done by hand"""
        cost_fn = train.Stochastic(h, vgbs)
        sample = np.ones(dim)
        gradient = cost_fn._gradient_one_sample(sample, params)

        if threshold:
            grad_target = np.array([18.46861628, 22.53873502, 26.60885377])
        else:
            grad_target = np.array([17.17157601, 20.94382262, 24.71606922])

        assert np.allclose(gradient, grad_target)
Ejemplo n.º 4
0
    def test_gradient(self, dim, n_mean, simple_embedding):
        """Test that the gradient evaluates as expected when compared to a value calculated by
        hand.

        Consider the problem with respect to a single mode. We want to calculate
        E((s - x) ** 2) with s the number of photons in the mode, x the element of the fixed
        vector, and with the expectation value calculated with respect to the (twice) negative
        binomial distribution. We know that E(s) = 2 * r * (1 - q) / q and
        Var(s) = 4 * (1 - q) * r / q ** 2 in terms of the r and q parameters of the negative
        binomial distribution. Using q = 1 / (1 + n_mean) with n_mean the mean number of
        photons in that mode and r = 0.5, we can calculate
        E((s - x) ** 2) = 3 * n_mean ** 2 + 2 * (1 - x) * n_mean + x ** 2,
        This can be differentiated to give the derivative:
        d/dx E((s - x) ** 2) = 6 * n_mean + 2 * (1 - x).
        """
        n_samples = 20000  # We need a lot of shots due to the high variance in the distribution
        objectives = np.linspace(0.5, 1.5, dim)
        h = self.h_setup(objectives)
        A = np.eye(dim)
        vgbs = train.VGBS(A, n_mean, simple_embedding, threshold=False)

        n_mean_by_mode = [n_mean / dim] * dim
        samples = self.identity_sampler(n_mean_by_mode, n_samples=n_samples)
        vgbs.add_A_init_samples(samples)

        params = np.linspace(0, 1 / dim, dim)
        cost_fn = train.Stochastic(h, vgbs)

        # We want to calculate dcost_by_dn as this is available analytically, where n is the mean
        # photon number. The following calculates this using the chain rule:
        dcost_by_dtheta = cost_fn.grad(params, n_samples=n_samples)
        dtheta_by_dw = 1 / np.diag(simple_embedding.jacobian(params))
        A_diag = np.diag(vgbs.A(params))
        A_init_diag = np.diag(vgbs.A_init)
        # Differentiate Eq. (8) of https://arxiv.org/abs/2004.04770 and invert for the next line
        dw_by_dn = (1 - A_diag**2) ** 2 / (2 * A_diag * A_init_diag)
        # Now use the chain rule
        dcost_by_dn = dcost_by_dtheta * dtheta_by_dw * dw_by_dn

        n_mean_by_mode = vgbs.mean_photons_by_mode(params)

        dcost_by_dn_expected = 6 * n_mean_by_mode + 2 * (1 - objectives)

        assert np.allclose(dcost_by_dn, dcost_by_dn_expected, 0.5)
Ejemplo n.º 5
0
    def test_gradient(self, vgbs, dim, params):
        """Test that the gradient method returns the expected value when the VGBS class is
        preloaded with a dataset where half of the datapoints are zeros and half of the
        datapoints are ones. The expected result of the gradient method is then simply the
        average of _gradient_one_sample applied to a ones vector and a zeros vector."""
        n_samples = 10
        zeros = np.zeros((n_samples, dim))
        ones = np.ones((n_samples, dim))
        samples = np.vstack([zeros, ones])
        vgbs.add_A_init_samples(samples)

        cost_fn = train.Stochastic(h, vgbs)
        g0 = cost_fn._gradient_one_sample(zeros[0], params)
        g1 = cost_fn._gradient_one_sample(ones[0], params)
        grad_expected = (g0 + g1) * 0.5

        grad = cost_fn.grad(params, 2 * n_samples)
        assert np.allclose(grad_expected, grad)
        assert grad.shape == (dim - 1,)
Ejemplo n.º 6
0
    def test_initial_cost(self, dim, n_mean, simple_embedding):
        """Test that the cost function evaluates as expected on initial parameters of all zeros"""
        n_samples = 1000
        objectives = np.linspace(0.5, 1.5, dim)
        h = self.h_setup(objectives)
        A = np.eye(dim)
        vgbs = train.VGBS(A, n_mean, simple_embedding, threshold=False)

        n_mean_by_mode = [n_mean / dim] * dim
        samples = self.identity_sampler(n_mean_by_mode, n_samples=n_samples)
        vgbs.add_A_init_samples(samples)

        params = np.zeros(dim)
        cost_fn = train.Stochastic(h, vgbs)
        cost = cost_fn(params, n_samples=n_samples)

        # We can directly calculate the cost with respect to the samples
        expected_cost = np.mean(np.sum((samples - objectives) ** 2, axis=1))

        assert np.allclose(cost, expected_cost)
Ejemplo n.º 7
0
    def test_evaluate(self, vgbs, dim, params):
        """Test that the evaluate method returns the expected value when the VGBS class is
        preloaded with a dataset where half of the datapoints are zeros and half of the
        datapoints are ones. The expected result of the evaluate method is then simply the
        average of h_reparametrized applied to a ones vector and a zeros vector."""
        n_samples = 10
        zeros = np.zeros((n_samples, dim))
        ones = np.ones((n_samples, dim))
        samples = np.vstack([zeros, ones])
        vgbs.add_A_init_samples(samples)

        cost_fn = train.Stochastic(h, vgbs)
        h0 = cost_fn.h_reparametrized(zeros[0], params)
        h1 = cost_fn.h_reparametrized(ones[0], params)
        eval_expected = (h0 + h1) * 0.5

        eval = cost_fn(params, 2 * n_samples)  # Note that calling an instance of Stochastic uses
        # the evaluate method

        assert np.allclose(eval, eval_expected)
Ejemplo n.º 8
0
    def test_h_reparametrized(self, vgbs, dim, params):
        """Test that the h_reparametrized method behaves as expected by calculating the cost
        function over a fixed set of PNR samples using both the reparametrized and
        non-reparametrized methods"""
        cost_fn = train.Stochastic(h, vgbs)
        possible_samples = list(itertools.product([0, 1, 2], repeat=dim))

        probs = [vgbs.prob_sample(params, s) for s in possible_samples]
        probs_init = [vgbs.prob_sample(np.zeros(dim - 1), s) for s in possible_samples]

        cost = sum([probs[i] * h(s) for i, s in enumerate(possible_samples)])

        cost_reparam_sum = [
            probs_init[i] * cost_fn.h_reparametrized(s, params)
            for i, s in enumerate(possible_samples)
        ]

        cost_reparam = sum(cost_reparam_sum)

        assert np.allclose(cost, cost_reparam)