class TestTwoLayerQNN(QiskitMachineLearningTestCase):
    """Two Layer QNN Tests."""
    def setUp(self):
        super().setUp()

        # specify "run configuration"
        backend = Aer.get_backend('statevector_simulator')
        quantum_instance = QuantumInstance(backend)

        # define QNN
        feature_map = ZZFeatureMap(2)
        var_form = RealAmplitudes(2, reps=1)
        self.qnn = TwoLayerQNN(2,
                               feature_map=feature_map,
                               var_form=var_form,
                               quantum_instance=quantum_instance)

    def test_two_layer_qnn1(self):
        """ Opflow QNN Test """

        input_data = np.zeros(self.qnn.num_inputs)
        weights = np.zeros(self.qnn.num_weights)

        # test forward pass
        result = self.qnn.forward(input_data, weights)
        self.assertEqual(result.shape, (1, *self.qnn.output_shape))

        # test backward pass
        result = self.qnn.backward(input_data, weights)
        self.assertEqual(result[0].shape,
                         (1, *self.qnn.output_shape, self.qnn.num_inputs))
        self.assertEqual(result[1].shape,
                         (1, *self.qnn.output_shape, self.qnn.num_weights))
예제 #2
0
    def test_batch_gradients(self):
        """Test backward pass for batch input."""

        # construct random data set
        num_inputs = 2
        num_samples = 10
        x = np.random.rand(num_samples, num_inputs)

        # set up QNN
        qnn = TwoLayerQNN(num_qubits=num_inputs,
                          quantum_instance=self.sv_quantum_instance)

        # set up PyTorch module
        initial_weights = np.random.rand(qnn.num_weights)
        model = TorchConnector(qnn, initial_weights=initial_weights)

        # test single gradient
        w = model.weights.detach().numpy()
        res_qnn = qnn.forward(x[0, :], w)

        # construct finite difference gradient for weights
        eps = 1e-4
        grad = np.zeros(w.shape)
        for k in range(len(w)):
            delta = np.zeros(w.shape)
            delta[k] += eps

            f_1 = qnn.forward(x[0, :], w + delta)
            f_2 = qnn.forward(x[0, :], w - delta)

            grad[k] = (f_1 - f_2) / (2 * eps)

        grad_qnn = qnn.backward(x[0, :], w)[1][0, 0, :]
        self.assertAlmostEqual(np.linalg.norm(grad - grad_qnn), 0.0, places=4)

        model.zero_grad()
        res_model = model(Tensor(x[0, :]))
        self.assertAlmostEqual(np.linalg.norm(res_model.detach().numpy() -
                                              res_qnn[0]),
                               0.0,
                               places=4)
        res_model.backward()
        grad_model = model.weights.grad
        self.assertAlmostEqual(np.linalg.norm(grad_model.detach().numpy() -
                                              grad_qnn),
                               0.0,
                               places=4)

        # test batch input
        batch_grad = np.zeros((*w.shape, num_samples, 1))
        for k in range(len(w)):
            delta = np.zeros(w.shape)
            delta[k] += eps

            f_1 = qnn.forward(x, w + delta)
            f_2 = qnn.forward(x, w - delta)

            batch_grad[k] = (f_1 - f_2) / (2 * eps)

        batch_grad = np.sum(batch_grad, axis=1)
        batch_grad_qnn = np.sum(qnn.backward(x, w)[1], axis=0)
        self.assertAlmostEqual(np.linalg.norm(batch_grad -
                                              batch_grad_qnn.transpose()),
                               0.0,
                               places=4)

        model.zero_grad()
        batch_res_model = sum(model(Tensor(x)))
        batch_res_model.backward()
        self.assertAlmostEqual(np.linalg.norm(model.weights.grad.numpy() -
                                              batch_grad.transpose()[0]),
                               0.0,
                               places=4)