Beispiel #1
0
    def test_cartesian_product(self):
        """Ensure cartesian_product works. inputs are any iterable you want."""
        result1 = list(util.kwargs_cartesian_product(a=[1, 2], b='hi'))
        self.assertEqual(result1, [{
            'a': 1,
            'b': 'h'
        }, {
            'a': 1,
            'b': 'i'
        }, {
            'a': 2,
            'b': 'h'
        }, {
            'a': 2,
            'b': 'i'
        }])

        result2 = list(
            util.kwargs_cartesian_product(**{
                'one': [1, 2, 3],
                'two': [4, 5]
            }))
        self.assertEqual(result2, [{
            'one': 1,
            'two': 4
        }, {
            'one': 1,
            'two': 5
        }, {
            'one': 2,
            'two': 4
        }, {
            'one': 2,
            'two': 5
        }, {
            'one': 3,
            'two': 4
        }, {
            'one': 3,
            'two': 5
        }])

        with self.assertRaisesRegex(ValueError, expected_regex='not iterable'):
            list(util.kwargs_cartesian_product(a=[1, 2], b=-1))
class GradientBenchmarksTest(tf.test.TestCase, parameterized.TestCase):
    """Test the Gradient benchmarking class."""
    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'diff': [
                        linear_combination.ForwardDifference(),
                        linear_combination.CentralDifference(),
                        parameter_shift.ParameterShift(),
                        stochastic_differentiator.SGDifferentiator(),
                    ],
                    'params': [TEST_PARAMS_1, TEST_PARAMS_2]
                })))
    def testBenchmarkGradient(self, diff, params):
        """Test that op constructs and runs correctly."""

        bench_name = "GradientBenchmarks.{}_{}_{}_{}_{}".format(
            diff.__class__.__name__, params.n_qubits, params.n_moments,
            params.batch_size, params.n_symbols)
        proto_file_path = os.path.join(SRC, "reports/",
                                       "{}".format(bench_name))
        self.addCleanup(os.remove, proto_file_path)

        bench = GradientBenchmarks(params=params)
        bench.setup()
        bench._benchmark_tfq_differentiator(diff, params)

        res = benchmark_util.read_benchmark_entry(proto_file_path)
        self.assertEqual(res.name, bench_name)
        self.assertEqual(
            res.extras.get("n_qubits").double_value, params.n_qubits)
        self.assertEqual(
            res.extras.get("n_moments").double_value, params.n_moments)
        self.assertEqual(
            res.extras.get("op_density").double_value, params.op_density)
        assert hasattr(res, 'iters')
        assert hasattr(res, 'wall_time')
class ControlledPQCTest(tf.test.TestCase, parameterized.TestCase):
    """Tests for the ControlledPQC layer."""
    def test_controlled_pqc_instantiate(self):
        """Basic creation test."""
        symbol = sympy.Symbol('alpha')
        bit = cirq.GridQubit(0, 0)
        learnable_flip = cirq.Circuit(cirq.X(bit)**symbol)
        controlled_pqc.ControlledPQC(learnable_flip, cirq.Z(bit))
        controlled_pqc.ControlledPQC(learnable_flip,
                                     cirq.Z(bit),
                                     repetitions=500)

    def test_controlled_pqc_noisy_error(self):
        """Ensure error refers to alternate layer."""
        symbol = sympy.Symbol('alpha')
        qubit = cirq.GridQubit(0, 0)
        learnable_flip = cirq.Circuit(cirq.X(qubit)**symbol)
        with self.assertRaisesRegex(
                ValueError, expected_regex='tfq.layers.NoisyControlledPQC'):
            controlled_pqc.ControlledPQC(learnable_flip,
                                         cirq.Z(qubit),
                                         backend='noisy')

    def test_controlled_pqc_backend_error(self):
        """Test that invalid backends error properly."""
        symbol = sympy.Symbol('alpha')
        bit = cirq.GridQubit(0, 0)
        learnable_flip = cirq.Circuit(cirq.X(bit)**symbol)

        class MyExpectation(cirq.sim.simulator.SimulatesExpectationValues):
            """My expectation values simulator."""
            def simulate_expectation_values_sweep(self):
                """do nothing."""
                return

        class MySample(cirq.Sampler):
            """My state simulator."""
            def run_sweep(self):
                """do nothing."""
                return

        with self.assertRaisesRegex(
                TypeError,
                expected_regex="cirq.sim.simulator.SimulatesExpectation"):
            controlled_pqc.ControlledPQC(learnable_flip,
                                         cirq.Z(bit),
                                         backend='junk')

        with self.assertRaisesRegex(
                TypeError,
                expected_regex="cirq.sim.simulator.SimulatesExpectation"):
            controlled_pqc.ControlledPQC(learnable_flip,
                                         cirq.Z(bit),
                                         repetitions=None,
                                         backend=MySample)

        with self.assertRaisesRegex(TypeError, expected_regex="cirq.Sampler"):
            controlled_pqc.ControlledPQC(learnable_flip,
                                         cirq.Z(bit),
                                         repetitions=500,
                                         backend=MyExpectation)

    def test_controlled_pqc_model_circuit_error(self):
        """Test that invalid circuits error properly."""
        bit = cirq.GridQubit(0, 0)
        no_symbols = cirq.Circuit(cirq.X(bit))

        with self.assertRaisesRegex(TypeError, expected_regex="cirq.Circuit"):
            controlled_pqc.ControlledPQC('junk', cirq.Z(bit))

        with self.assertRaisesRegex(ValueError,
                                    expected_regex="no sympy.Symbols"):
            controlled_pqc.ControlledPQC(no_symbols, cirq.Z(bit))

    def test_controlled_pqc_operators_error(self):
        """Test that invalid operators error properly."""
        symbol = sympy.Symbol('alpha')
        bit = cirq.GridQubit(0, 0)
        learnable_flip = cirq.Circuit(cirq.X(bit)**symbol)

        with self.assertRaisesRegex(
                TypeError, expected_regex="cirq.PauliSum or cirq.PauliString"):
            controlled_pqc.ControlledPQC(learnable_flip, 'junk')

        with self.assertRaisesRegex(TypeError, expected_regex="Each element"):
            controlled_pqc.ControlledPQC(learnable_flip, [[cirq.Z(bit)]])

        with self.assertRaisesRegex(TypeError, expected_regex="Each element"):
            controlled_pqc.ControlledPQC(learnable_flip, [cirq.Z(bit), 'bad'])

    def test_controlled_pqc_repetitions_error(self):
        """Test that invalid repetitions error properly."""
        symbol = sympy.Symbol('alpha')
        bit = cirq.GridQubit(0, 0)
        learnable_flip = cirq.Circuit(cirq.X(bit)**symbol)

        with self.assertRaisesRegex(ValueError,
                                    expected_regex="greater than zero."):
            controlled_pqc.ControlledPQC(learnable_flip,
                                         cirq.Z(bit),
                                         repetitions=-100)

        with self.assertRaisesRegex(TypeError,
                                    expected_regex="positive integer value"):
            controlled_pqc.ControlledPQC(learnable_flip,
                                         cirq.Z(bit),
                                         repetitions='junk')

    def test_controlled_pqc_symbols_property(self):
        """Test that the `symbols` property returns the symbols."""
        c, b, a, d = sympy.symbols('c b a d')
        bit = cirq.GridQubit(0, 0)
        test_circuit = cirq.Circuit(
            cirq.H(bit)**a,
            cirq.Z(bit)**b,
            cirq.X(bit)**d,
            cirq.Y(bit)**c)
        layer = controlled_pqc.ControlledPQC(test_circuit, cirq.Z(bit))
        self.assertEqual(layer.symbols, [a, b, c, d])

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(repetitions=[None, 5000],
                                          backend=[None,
                                                   cirq.Simulator()])))
    def test_controlled_pqc_simple_learn(self, backend, repetitions):
        """Test a simple learning scenario using analytic and sample expectation
        on many backends."""
        bit = cirq.GridQubit(0, 0)
        circuit = \
            cirq.Circuit(cirq.rx(sympy.Symbol('theta'))(bit))

        inputs = tf.keras.Input(shape=(1, ), dtype=tf.dtypes.float32)
        quantum_datum = tf.keras.Input(shape=(), dtype=tf.dtypes.string)
        l1 = tf.keras.layers.Dense(10)(inputs)
        l2 = tf.keras.layers.Dense(1)(l1)
        outputs = controlled_pqc.ControlledPQC(circuit,
                                               cirq.Z(bit),
                                               repetitions=repetitions,
                                               backend=backend)(
                                                   [quantum_datum, l2])
        model = tf.keras.Model(inputs=[quantum_datum, inputs], outputs=outputs)

        data_in = np.array([[1], [0]], dtype=np.float32)
        data_out = np.array([[1], [-1]], dtype=np.float32)

        model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.05),
                      loss=tf.keras.losses.mean_squared_error)

        data_circuits = util.convert_to_tensor(
            [cirq.Circuit(cirq.X(bit)),
             cirq.Circuit()])

        history = model.fit(x=[data_circuits, data_in], y=data_out, epochs=30)
        self.assertAllClose(history.history['loss'][-1], 0, atol=1e-1)
Beispiel #4
0
class PQCTest(tf.test.TestCase, parameterized.TestCase):
    """Tests for the PQC layer."""
    def test_pqc_instantiate(self):
        """Basic creation test."""
        symbol = sympy.Symbol('alpha')
        qubit = cirq.GridQubit(0, 0)
        learnable_flip = cirq.Circuit(cirq.X(qubit)**symbol)
        pqc.PQC(learnable_flip, cirq.Z(qubit))
        pqc.PQC(learnable_flip, cirq.Z(qubit), repetitions=500)

    def test_pqc_model_circuit_error(self):
        """Test that invalid circuits error properly."""
        qubit = cirq.GridQubit(0, 0)
        no_symbols = cirq.Circuit(cirq.X(qubit))

        with self.assertRaisesRegex(
                TypeError,
                expected_regex="model_circuit must be a cirq.Circuit"):
            pqc.PQC('junk', cirq.Z(qubit))

        with self.assertRaisesRegex(
                ValueError,
                expected_regex="model_circuit has no sympy.Symbols"):
            pqc.PQC(no_symbols, cirq.Z(qubit))

    def test_pqc_operators_error(self):
        """Test that invalid operators error properly."""
        symbol = sympy.Symbol('alpha')
        qubit = cirq.GridQubit(0, 0)
        learnable_flip = cirq.Circuit(cirq.X(qubit)**symbol)

        with self.assertRaisesRegex(
                TypeError, expected_regex="cirq.PauliSum or cirq.PauliString"):
            pqc.PQC(learnable_flip, 'junk')

        with self.assertRaisesRegex(TypeError, expected_regex="Each element"):
            pqc.PQC(learnable_flip, [[cirq.Z(qubit)]])

        with self.assertRaisesRegex(TypeError, expected_regex="Each element"):
            pqc.PQC(learnable_flip, [cirq.Z(qubit), 'bad'])

    def test_pqc_repetitions_error(self):
        """Test that invalid repetitions error properly."""
        symbol = sympy.Symbol('alpha')
        qubit = cirq.GridQubit(0, 0)
        learnable_flip = cirq.Circuit(cirq.X(qubit)**symbol)

        with self.assertRaisesRegex(TypeError,
                                    expected_regex="positive integer value"):
            pqc.PQC(learnable_flip, cirq.Z(qubit), repetitions='junk')

        with self.assertRaisesRegex(ValueError,
                                    expected_regex="greater than zero."):
            pqc.PQC(learnable_flip, cirq.Z(qubit), repetitions=-100)

        with self.assertRaisesRegex(ValueError,
                                    expected_regex="greater than zero."):
            pqc.PQC(learnable_flip, cirq.Z(qubit), repetitions=0)

    def test_pqc_backend_error(self):
        """Test that invalid backends error properly."""
        symbol = sympy.Symbol('alpha')
        qubit = cirq.GridQubit(0, 0)
        learnable_flip = cirq.Circuit(cirq.X(qubit)**symbol)

        class MyState(cirq.SimulatesFinalState):
            """My state simulator."""
            def simulate_sweep(self):
                """do nothing."""
                return

        class MySample(cirq.Sampler):
            """My state simulator."""
            def run_sweep(self):
                """do nothing."""
                return

        with self.assertRaisesRegex(TypeError, expected_regex="cirq.Sampler"):
            pqc.PQC(learnable_flip,
                    cirq.Z(qubit),
                    backend=MyState,
                    repetitions=500)

        with self.assertRaisesRegex(TypeError,
                                    expected_regex="cirq.SimulatesFinalState"):
            pqc.PQC(learnable_flip,
                    cirq.Z(qubit),
                    backend=MySample,
                    repetitions=None)

    def test_pqc_initializer(self):
        """Test action of initializer."""
        (a, b, c) = sympy.symbols("a b c")
        qubit = cirq.GridQubit(0, 0)
        three_parameters = cirq.Circuit(
            [cirq.X(qubit)**a,
             cirq.Y(qubit)**b,
             cirq.Z(qubit)**c])
        mpqc_zeros = pqc.PQC(three_parameters,
                             cirq.Z(qubit),
                             initializer='zeros')
        mpqc_ones = pqc.PQC(three_parameters,
                            cirq.Z(qubit),
                            initializer='ones')
        self.assertAllEqual([[0, 0, 0]], mpqc_zeros.get_weights())
        self.assertAllEqual([[1, 1, 1]], mpqc_ones.get_weights())

    def test_pqc_regularizer(self):
        """Test attachment of regularizer to layer."""
        (a, b, c) = sympy.symbols("a b c")
        qubit = cirq.GridQubit(0, 0)
        three_parameters = cirq.Circuit(
            [cirq.X(qubit)**a,
             cirq.Y(qubit)**b,
             cirq.Z(qubit)**c])
        mpqc = pqc.PQC(three_parameters, cirq.Z(qubit))
        mpqc_r = pqc.PQC(three_parameters, cirq.Z(qubit), regularizer='l2')
        self.assertEqual(0, len(mpqc.losses))
        self.assertEqual(1, len(mpqc_r.losses))

    def test_pqc_constraint(self):
        """Test attachment of constraint to layer."""
        my_constraint = tf.keras.constraints.NonNeg()
        (a, b, c) = sympy.symbols("a b c")
        qubit = cirq.GridQubit(0, 0)
        three_parameters = cirq.Circuit(
            [cirq.X(qubit)**a,
             cirq.Y(qubit)**b,
             cirq.Z(qubit)**c])
        mpqc = pqc.PQC(three_parameters,
                       cirq.Z(qubit),
                       constraint=my_constraint)
        self.assertEqual(my_constraint, mpqc.parameters.constraint)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(backend=[None,
                                                   cirq.Simulator()],
                                          repetitions=[None, 5000])))
    def test_pqc_simple_learn(self, backend, repetitions):
        """Test a simple learning scenario using analytic and sample expectation
        on many backends."""
        qubit = cirq.GridQubit(0, 0)
        circuit = cirq.Circuit(cirq.X(qubit)**sympy.Symbol('bit'))

        quantum_datum = tf.keras.Input(shape=(), dtype=tf.dtypes.string)
        mpqc = pqc.PQC(circuit,
                       cirq.Z(qubit),
                       backend=backend,
                       repetitions=repetitions,
                       initializer=tf.keras.initializers.Constant(value=0.5))
        outputs = mpqc(quantum_datum)
        model = tf.keras.Model(inputs=quantum_datum, outputs=outputs)

        model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.03),
                      loss=tf.keras.losses.mean_squared_error)

        data_circuits = util.convert_to_tensor(
            [cirq.Circuit(cirq.X(qubit)),
             cirq.Circuit()])
        print(data_circuits)
        data_out = np.array([[1], [-1]], dtype=np.float32)

        # Model should learn to flip the qubit
        self.assertNear(mpqc.get_weights()[0][0], 0.5, 1e-1)
        history = model.fit(x=data_circuits, y=data_out, epochs=40)
        self.assertAllClose(history.history['loss'][-1], 0, atol=1e-1)
        self.assertNear(mpqc.get_weights()[0][0], 1, 1e-1)
Beispiel #5
0
class ExecutionOpsConsistentyTest(tf.test.TestCase, parameterized.TestCase):
    """Test all ops produce equivalent output to one another."""
    @parameterized.parameters([{
        'op_and_sim': (op, sim)
    } for (op, sim) in zip(STATE_OPS, SIMS)])
    def test_supported_gates_consistent(self, op_and_sim):
        """Ensure that supported gates are consistent across backends."""
        op = op_and_sim[0]
        sim = op_and_sim[1]
        qubits = cirq.GridQubit.rect(1, 5)
        circuit_batch = []

        gate_ref = util.get_supported_gates()
        for gate in gate_ref:
            # Create a circuit with non zero entries on real
            # and imaginary values.
            c = cirq.Circuit()
            for qubit in qubits:
                c += cirq.Circuit(cirq.Y(qubit)**0.125)

            if gate_ref[gate] == 2:
                op_qubits = np.random.choice(qubits, size=2, replace=False)
                c += cirq.Circuit(gate(*op_qubits))
            elif gate_ref[gate] == 1:
                op_qubits = np.random.choice(qubits, size=1, replace=False)
                c += cirq.Circuit(gate(*op_qubits))
            else:
                raise ValueError(
                    "Unable to test supported gates across all ops."
                    "please update circuit_execution_ops_test.py")

            circuit_batch.append(c)

        op_states = op(util.convert_to_tensor(circuit_batch), [],
                       [[]] * len(circuit_batch)).to_list()
        cirq_states = batch_util.batch_calculate_state(
            circuit_batch, [cirq.ParamResolver({}) for _ in circuit_batch],
            sim)

        self.assertAllClose(cirq_states, op_states, atol=1e-5, rtol=1e-5)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'op_and_sim': [(op, sim)
                                   for (op, sim) in zip(STATE_OPS, SIMS)],
                    'n_qubits': [3, 7]
                })))
    def test_simulate_state_no_symbols(self, op_and_sim, n_qubits):
        """Compute states using cirq and tfq without symbols."""
        op = op_and_sim[0]
        sim = op_and_sim[1]

        circuit_batch, resolver_batch = util.random_circuit_resolver_batch(
            cirq.GridQubit.rect(1, n_qubits), BATCH_SIZE)

        op_states = op(util.convert_to_tensor(circuit_batch), [],
                       [[]] * BATCH_SIZE).to_list()
        cirq_states = batch_util.batch_calculate_state(circuit_batch,
                                                       resolver_batch, sim)

        self.assertAllClose(cirq_states, op_states, atol=1e-5, rtol=1e-5)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'op_and_sim': [(op, sim)
                                   for (op, sim) in zip(STATE_OPS, SIMS)],
                    'n_qubits': [3, 7],
                    'symbol_names': [['a'], ['a', 'b'],
                                     ['a', 'b', 'c', 'd', 'e']]
                })))
    def test_simulate_state_with_symbols(self, op_and_sim, n_qubits,
                                         symbol_names):
        """Compute states using cirq and tfq with symbols."""
        op = op_and_sim[0]
        sim = op_and_sim[1]

        circuit_batch, resolver_batch = \
            util.random_symbol_circuit_resolver_batch(
                cirq.GridQubit.rect(1, n_qubits), symbol_names, BATCH_SIZE)

        symbol_values_array = np.array(
            [[resolver[symbol] for symbol in symbol_names]
             for resolver in resolver_batch])

        op_states = op(util.convert_to_tensor(circuit_batch), symbol_names,
                       symbol_values_array).to_list()

        cirq_states = batch_util.batch_calculate_state(circuit_batch,
                                                       resolver_batch, sim)

        self.assertAllClose(cirq_states, op_states, atol=1e-5, rtol=1e-5)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(**{
                'op_and_sim': [(op, sim)
                               for (op, sim) in zip(STATE_OPS, SIMS)],
            })))
    def test_simulate_state_empty(self, op_and_sim):
        """Test empty circuits for states using cirq and tfq."""
        op = op_and_sim[0]
        sim = op_and_sim[1]

        circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)]
        resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)]

        op_states = op(util.convert_to_tensor(circuit_batch), [],
                       [[]] * BATCH_SIZE).to_list()
        cirq_states = batch_util.batch_calculate_state(circuit_batch,
                                                       resolver_batch, sim)

        self.assertAllClose(cirq_states, op_states, atol=1e-5, rtol=1e-5)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'op_and_sim': [(op, sim)
                                   for (op,
                                        sim) in zip(EXPECTATION_OPS, SIMS)],
                    'n_qubits': [3, 7],
                    'symbol_names': [['a', 'b', 'c', 'd', 'e']],
                    'max_paulisum_length': [6]
                })))
    def test_analytical_expectation(self, op_and_sim, n_qubits, symbol_names,
                                    max_paulisum_length):
        """Compute expectations using cirq and tfq."""
        op = op_and_sim[0]
        sim = op_and_sim[1]

        qubits = cirq.GridQubit.rect(1, n_qubits)
        circuit_batch, resolver_batch = \
            util.random_symbol_circuit_resolver_batch(
                qubits, symbol_names, BATCH_SIZE)

        symbol_values_array = np.array(
            [[resolver[symbol] for symbol in symbol_names]
             for resolver in resolver_batch])

        pauli_sums = util.random_pauli_sums(qubits, max_paulisum_length,
                                            BATCH_SIZE)

        op_expectations = op(
            util.convert_to_tensor(circuit_batch), symbol_names,
            symbol_values_array,
            util.convert_to_tensor([[psum] for psum in pauli_sums]))

        cirq_expectations = batch_util.batch_calculate_expectation(
            circuit_batch, resolver_batch, [[x] for x in pauli_sums], sim)

        self.assertAllClose(op_expectations.numpy().flatten(),
                            cirq_expectations.flatten(),
                            rtol=1e-5,
                            atol=1e-5)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'op_and_sim': [(op, sim)
                                   for (op,
                                        sim) in zip(EXPECTATION_OPS, SIMS)],
                    'n_qubits': [3],
                    'symbol_names': [['a', 'b', 'c', 'd', 'e']],
                    'max_paulisum_length': [6]
                })))
    def test_analytical_expectation_empty(self, op_and_sim, n_qubits,
                                          symbol_names, max_paulisum_length):
        """Test empty circuits for analytical expectation using cirq and tfq."""
        op = op_and_sim[0]
        sim = op_and_sim[1]

        qubits = cirq.GridQubit.rect(1, n_qubits)
        circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)]
        resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)]

        symbol_values_array = np.array([[0.0 for _ in symbol_names]
                                        for _ in resolver_batch])

        pauli_sums = util.random_pauli_sums(qubits, max_paulisum_length,
                                            BATCH_SIZE)

        op_expectations = op(
            util.convert_to_tensor(circuit_batch), symbol_names,
            symbol_values_array,
            util.convert_to_tensor([[psum] for psum in pauli_sums]))

        cirq_expectations = batch_util.batch_calculate_expectation(
            circuit_batch, resolver_batch, [[x] for x in pauli_sums], sim)

        self.assertAllClose(op_expectations.numpy().flatten(),
                            cirq_expectations.flatten(),
                            rtol=1e-5,
                            atol=1e-5)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'op_and_sim': [(op, sim) for (
                        op, sim) in zip(SAMPLED_EXPECTATION_OPS, SIMS)],
                    'n_qubits': [3, 7],
                    'symbol_names': [['a', 'b', 'c', 'd', 'e']],
                    'max_paulisum_length': [6]
                })))
    def test_sampled_expectation(self, op_and_sim, n_qubits, symbol_names,
                                 max_paulisum_length):
        """Compute sampled expectations using cirq and tfq."""
        op = op_and_sim[0]
        sim = op_and_sim[1]

        qubits = cirq.GridQubit.rect(1, n_qubits)
        circuit_batch, resolver_batch = \
            util.random_symbol_circuit_resolver_batch(
                qubits, symbol_names, BATCH_SIZE)

        symbol_values_array = np.array(
            [[resolver[symbol] for symbol in symbol_names]
             for resolver in resolver_batch])

        pauli_sums = util.random_pauli_sums(qubits, max_paulisum_length,
                                            BATCH_SIZE)
        num_samples = [[2000]] * BATCH_SIZE

        op_expectations = op(
            util.convert_to_tensor(circuit_batch), symbol_names,
            symbol_values_array,
            util.convert_to_tensor([[psum] for psum in pauli_sums]),
            num_samples)

        cirq_expectations = batch_util.batch_calculate_sampled_expectation(
            circuit_batch, resolver_batch, [[x] for x in pauli_sums],
            num_samples, sim)

        self.assertAllClose(op_expectations.numpy().flatten(),
                            cirq_expectations.flatten(),
                            rtol=1e-1,
                            atol=1e-1)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'op_and_sim': [(op, sim) for (
                        op, sim) in zip(SAMPLED_EXPECTATION_OPS, SIMS)],
                    'n_qubits': [3],
                    'symbol_names': [['a', 'b', 'c', 'd', 'e']],
                    'max_paulisum_length': [6]
                })))
    def test_sampled_expectation_empty(self, op_and_sim, n_qubits,
                                       symbol_names, max_paulisum_length):
        """Test empty circuits for sampled expectation using cirq and tfq."""
        op = op_and_sim[0]
        sim = op_and_sim[1]

        qubits = cirq.GridQubit.rect(1, n_qubits)
        circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)]
        resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)]

        symbol_values_array = np.array([[0.0 for _ in symbol_names]
                                        for _ in resolver_batch])

        pauli_sums = util.random_pauli_sums(qubits, max_paulisum_length,
                                            BATCH_SIZE)
        num_samples = [[1000]] * BATCH_SIZE

        op_expectations = op(
            util.convert_to_tensor(circuit_batch), symbol_names,
            symbol_values_array,
            util.convert_to_tensor([[psum] for psum in pauli_sums]),
            num_samples)

        cirq_expectations = batch_util.batch_calculate_sampled_expectation(
            circuit_batch, resolver_batch, [[x] for x in pauli_sums],
            num_samples, sim)

        self.assertAllClose(op_expectations.numpy().flatten(),
                            cirq_expectations.flatten(),
                            rtol=1e-1,
                            atol=1e-1)

    # keep the qubit count low here, all computations scale exponentially
    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'op_and_sim': [(op, sim)
                                   for (op, sim) in zip(SAMPLING_OPS, SIMS)],
                    'n_qubits': [6],
                    'symbol_names': [['a', 'b', 'c', 'd', 'e']]
                })))
    def test_sampling(self, op_and_sim, n_qubits, symbol_names):
        """Compare sampling with tfq ops and Cirq."""
        op = op_and_sim[0]
        sim = op_and_sim[1]
        qubits = cirq.GridQubit.rect(1, n_qubits)
        n_samples = int((2**n_qubits) * 1000)

        circuit_batch, resolver_batch = \
            util.random_symbol_circuit_resolver_batch(
                qubits, symbol_names, BATCH_SIZE, 30)
        for i in range(BATCH_SIZE):
            circuit_batch[i] += cirq.Circuit(
                *[cirq.H(qubit) for qubit in qubits])

        symbol_values_array = np.array(
            [[resolver[symbol] for symbol in symbol_names]
             for resolver in resolver_batch])

        op_samples = np.array(
            op(util.convert_to_tensor(circuit_batch), symbol_names,
               symbol_values_array, [n_samples]).to_list())

        op_histograms = [
            np.histogram(
                sample.dot(1 << np.arange(sample.shape[-1] - 1, -1, -1)),
                range=(0, 2**len(qubits)),
                bins=2**len(qubits))[0] for sample in op_samples
        ]

        cirq_samples = batch_util.batch_sample(circuit_batch, resolver_batch,
                                               n_samples, sim)

        cirq_histograms = [
            np.histogram(
                sample.dot(1 << np.arange(sample.shape[-1] - 1, -1, -1)),
                range=(0, 2**len(qubits)),
                bins=2**len(qubits))[0] for sample in cirq_samples
        ]

        for a, b in zip(op_histograms, cirq_histograms):
            self.assertLess(stats.entropy(a + 1e-8, b + 1e-8), 0.005)

    # keep the qubit count low here, all computations scale exponentially
    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'op_and_sim': [(op, sim)
                                   for (op, sim) in zip(SAMPLING_OPS, SIMS)],
                    'n_qubits': [3],
                    'symbol_names': [['a', 'b', 'c', 'd', 'e']]
                })))
    def test_sampling_empty(self, op_and_sim, n_qubits, symbol_names):
        """Test empty circuits for sampling using cirq and tfq."""
        op = op_and_sim[0]
        sim = op_and_sim[1]
        qubits = cirq.GridQubit.rect(1, n_qubits)
        n_samples = int((2**n_qubits) * 1000)

        circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)]
        resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)]

        symbol_values_array = np.array([[0.0 for _ in symbol_names]
                                        for _ in resolver_batch])

        op_samples = np.array(
            op(util.convert_to_tensor(circuit_batch), symbol_names,
               symbol_values_array, [n_samples]).to_list())

        op_histograms = [
            np.histogram(
                sample.dot(1 << np.arange(sample.shape[-1] - 1, -1, -1)),
                range=(0, 2**len(qubits)),
                bins=2**len(qubits))[0] for sample in op_samples
        ]

        cirq_samples = batch_util.batch_sample(circuit_batch, resolver_batch,
                                               n_samples, sim)

        cirq_histograms = [
            np.histogram(
                sample.dot(1 << np.arange(sample.shape[-1] - 1, -1, -1)),
                range=(0, 2**len(qubits)),
                bins=2**len(qubits))[0] for sample in cirq_samples
        ]

        for a, b in zip(op_histograms, cirq_histograms):
            self.assertLess(stats.entropy(a + 1e-8, b + 1e-8), 0.005)
class LinearCombinationTest(tf.test.TestCase, parameterized.TestCase):
    """Test the LinearCombination based Differentiators."""
    def test_linear_combination_instantiate(self):
        """Test LinearCombinationDifferentiator type checking."""
        linear_combination.LinearCombination([1, 1], [1, 0])
        with self.assertRaisesRegex(TypeError,
                                    expected_regex="weights must be"):
            linear_combination.LinearCombination("junk", [1, 0])
        with self.assertRaisesRegex(TypeError,
                                    expected_regex="perturbations must be"):
            linear_combination.LinearCombination([1, 1], "junk")
        with self.assertRaisesRegex(TypeError,
                                    expected_regex="weight in weights"):
            linear_combination.LinearCombination([1, "junk"], [1, 0])
        with self.assertRaisesRegex(
                TypeError, expected_regex="perturbation in perturbations"):
            linear_combination.LinearCombination([1, 1], [1, "junk"])
        with self.assertRaisesRegex(ValueError, expected_regex="length"):
            linear_combination.LinearCombination([1, 1, 1], [1, 0])
        with self.assertRaisesRegex(ValueError, expected_regex="at least two"):
            linear_combination.LinearCombination([1], [1])
        with self.assertRaisesRegex(ValueError, expected_regex="unique"):
            linear_combination.LinearCombination([1, 1], [1, 1])

    def test_forward_instantiate(self):
        """Test ForwardDifference type checking."""
        linear_combination.ForwardDifference()
        linear_combination.ForwardDifference(1, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive integer"):
            linear_combination.ForwardDifference(0.1, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive integer"):
            linear_combination.ForwardDifference(-1, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive integer"):
            linear_combination.ForwardDifference(0, 0.1)
        with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"):
            linear_combination.ForwardDifference(1, -0.1)
        with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"):
            linear_combination.ForwardDifference(1, 1j)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'order_coef_perturbs': [(1, (-1, 1), (
                        0, 1)), (2, (-3 / 2, 2, -1 / 2), (0, 1, 2))],
                    'grid_spacing': [0.1, 0.01, 0.5, 1, 0.05]
                })))
    def test_forward_coeffecients(self, order_coef_perturbs, grid_spacing):
        """Test that ForwardDifference produces the right coeffecients for
        common first and second order cases."""
        order = order_coef_perturbs[0]
        expected_std_coeffs = order_coef_perturbs[1]
        expected_perturbations = order_coef_perturbs[2]
        forward = linear_combination.ForwardDifference(order, grid_spacing)
        self.assertAllClose(
            np.array(expected_std_coeffs) / grid_spacing, forward.weights)
        self.assertAllClose(
            np.array(expected_perturbations) * grid_spacing,
            forward.perturbations)

    def test_central_instantiate(self):
        """Test CentralDifference type checking."""
        linear_combination.CentralDifference()
        linear_combination.CentralDifference(2, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive, even"):
            linear_combination.CentralDifference(0.1, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive, even"):
            linear_combination.CentralDifference(-1, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive, even"):
            linear_combination.CentralDifference(0, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive, even"):
            linear_combination.CentralDifference(1, 0.1)
        with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"):
            linear_combination.CentralDifference(2, -0.1)
        with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"):
            linear_combination.CentralDifference(2, 1j)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'order_coef_perturbs': [(2, (-1 / 2, 1 / 2), (-1, 1)),
                                            (4, (1 / 12, -8 / 12, 8 / 12,
                                                 -1 / 12), (-2, -1, 1, 2))],
                    'grid_spacing': [0.1, 0.01, 0.5, 1, 0.05]
                })))
    def test_central_coefficients(self, order_coef_perturbs, grid_spacing):
        """Test that CentralDifference produces the right coefficients for
        common first and second order cases."""
        order = order_coef_perturbs[0]
        expected_std_coeffs = order_coef_perturbs[1]
        expected_perturbations = order_coef_perturbs[2]
        forward = linear_combination.CentralDifference(order, grid_spacing)
        self.assertAllClose(
            np.array(expected_std_coeffs) / grid_spacing, forward.weights)
        self.assertAllClose(
            np.array(expected_perturbations) * grid_spacing,
            forward.perturbations)

    @parameterized.parameters([{
        'diff': linear_combination.ForwardDifference()
    }, {
        'diff': linear_combination.CentralDifference()
    }])
    def test_analytic_functional(self, diff):
        """Test that the differentiate_analytic function WORKS."""
        differentiable_op = diff.generate_differentiable_op(
            analytic_op=circuit_execution_ops.get_expectation_op())
        circuit, names, values, ops, _, true_f, true_g = _simple_op_inputs()
        with tf.GradientTape() as g:
            g.watch(values)
            res = differentiable_op(circuit, names, values, ops)

        # Just check that it computes without failing.
        self.assertAllClose(true_f, res, atol=1e-2, rtol=1e-2)
        self.assertAllClose(true_g,
                            g.gradient(res, values),
                            atol=1e-2,
                            rtol=1e-2)

    @parameterized.parameters([{
        'diff':
        linear_combination.ForwardDifference(grid_spacing=0.01)
    }, {
        'diff':
        linear_combination.CentralDifference(grid_spacing=0.01)
    }])
    def test_sampled_functional(self, diff):
        """Test that the differentiate_sampled function WORKS."""
        differentiable_op = diff.generate_differentiable_op(
            sampled_op=circuit_execution_ops.get_sampled_expectation_op())
        circuit, names, values, ops, n_samples, true_f, true_g = \
            _simple_op_inputs()
        with tf.GradientTape() as g:
            g.watch(values)
            res = differentiable_op(circuit, names, values, ops, n_samples)

        # Just check that it computes without failing.
        self.assertAllClose(true_f, res, atol=1e-1, rtol=1e-1)
        self.assertAllClose(true_g,
                            g.gradient(res, values),
                            atol=1e-1,
                            rtol=1e-1)

    def test_get_gradient_circuits(self):
        """Test that the correct objects are returned."""

        # Minimal linear combination.
        input_weights = [1.0, -0.5]
        input_perturbations = [1.0, -1.5]
        diff = linear_combination.LinearCombination(input_weights,
                                                    input_perturbations)

        # Circuits to differentiate.
        symbols = [sympy.Symbol("s0"), sympy.Symbol("s1")]
        q0 = cirq.GridQubit(0, 0)
        q1 = cirq.GridQubit(1, 2)
        input_programs = util.convert_to_tensor([
            cirq.Circuit(cirq.X(q0)**symbols[0],
                         cirq.ry(symbols[1])(q1)),
            cirq.Circuit(cirq.rx(symbols[0])(q0),
                         cirq.Y(q1)**symbols[1]),
        ])
        input_symbol_names = tf.constant([str(s) for s in symbols])
        input_symbol_values = tf.constant([[1.5, -2.7], [-0.3, 0.9]])

        # For each program in the input batch: LinearCombination creates a copy
        # of that program for each symbol in the batch; then for each symbol,
        # the program is copied for each non-zero perturbation; finally, a
        # single copy is added for the zero perturbation (no zero pert here).
        expected_batch_programs = tf.stack([[input_programs[0]] * 4,
                                            [input_programs[1]] * 4])
        expected_new_symbol_names = input_symbol_names

        # For each program in the input batch: first, the input symbol_values
        # for the program are tiled to the number of copies in the output.
        tiled_symbol_values = tf.stack([[input_symbol_values[0]] * 4,
                                        [input_symbol_values[1]] * 4])
        # Then we create the tensor of perturbations to apply to these symbol
        # values: for each symbol we tile out the non-zero perturbations at that
        # symbol's index, keeping all the other symbol perturbations at zero.
        # Perturbations are the same for each program.
        single_program_perturbations = tf.stack([[input_perturbations[0], 0.0],
                                                 [input_perturbations[1], 0.0],
                                                 [0.0, input_perturbations[0]],
                                                 [0.0,
                                                  input_perturbations[1]]])
        tiled_perturbations = tf.stack(
            [single_program_perturbations, single_program_perturbations])
        # Finally we add the perturbations to the original symbol values.
        expected_batch_symbol_values = tiled_symbol_values + tiled_perturbations

        # The weights for LinearCombination is the same for every program.
        individual_batch_weights = tf.stack(
            [[input_weights[0], input_weights[1]],
             [input_weights[0], input_weights[1]]])
        expected_batch_weights = tf.stack(
            [individual_batch_weights, individual_batch_weights])

        # The mapper selects the expectations.
        single_program_mapper = tf.constant([[0, 1], [2, 3]])
        expected_batch_mapper = tf.tile(
            tf.expand_dims(single_program_mapper, 0), [2, 1, 1])

        (test_batch_programs, test_new_symbol_names, test_batch_symbol_values,
         test_batch_weights, test_batch_mapper) = diff.get_gradient_circuits(
             input_programs, input_symbol_names, input_symbol_values)
        self.assertAllEqual(expected_batch_programs, test_batch_programs)
        self.assertAllEqual(expected_new_symbol_names, test_new_symbol_names)
        self.assertAllClose(expected_batch_symbol_values,
                            test_batch_symbol_values,
                            atol=1e-6)
        self.assertAllClose(expected_batch_weights,
                            test_batch_weights,
                            atol=1e-6)
        self.assertAllEqual(expected_batch_mapper, test_batch_mapper)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'differentiator': [
                        linear_combination.ForwardDifference(),
                        linear_combination.CentralDifference()
                    ],
                    'n_qubits': [5],
                    'n_programs': [3],
                    'n_ops': [3],
                    'symbol_names': [['a', 'b']]
                })))
    def test_gradient_circuits_grad_comparison(self, differentiator, n_qubits,
                                               n_programs, n_ops,
                                               symbol_names):
        """Test that analytic gradient agrees with the one from grad circuits"""
        # Get random circuits to check.
        qubits = cirq.GridQubit.rect(1, n_qubits)
        circuit_batch, resolver_batch = \
            util.random_symbol_circuit_resolver_batch(
                cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs)
        psums = [
            util.random_pauli_sums(qubits, 1, n_ops) for _ in circuit_batch
        ]

        # Convert to tensors.
        symbol_names_array = np.array(symbol_names)
        symbol_values_array = np.array(
            [[resolver[symbol] for symbol in symbol_names]
             for resolver in resolver_batch],
            dtype=np.float32)
        symbol_names_tensor = tf.convert_to_tensor(symbol_names_array)
        symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
        programs = util.convert_to_tensor(circuit_batch)
        ops_tensor = util.convert_to_tensor(psums)

        # Get gradients using expectations of gradient circuits.
        (batch_programs, new_symbol_names, batch_symbol_values, batch_weights,
         batch_mapper) = differentiator.get_gradient_circuits(
             programs, symbol_names_tensor, symbol_values_tensor)
        analytic_op = circuit_execution_ops.get_expectation_op()
        batch_pauli_sums = tf.tile(tf.expand_dims(ops_tensor, 1),
                                   [1, tf.shape(batch_programs)[1], 1])
        n_batch_programs = tf.reduce_prod(tf.shape(batch_programs))
        n_symbols = len(symbol_names)
        batch_expectations = analytic_op(
            tf.reshape(batch_programs, [n_batch_programs]), new_symbol_names,
            tf.reshape(batch_symbol_values, [n_batch_programs, n_symbols]),
            tf.reshape(batch_pauli_sums, [n_batch_programs, n_ops]))
        batch_expectations = tf.reshape(batch_expectations,
                                        tf.shape(batch_pauli_sums))

        batch_jacobian = tf.map_fn(
            lambda x: tf.einsum('km,kmp->kp', x[0], tf.gather(x[1], x[2])),
            (batch_weights, batch_expectations, batch_mapper),
            fn_output_signature=tf.float32)
        grad_manual = tf.reduce_sum(batch_jacobian, -1)

        # Get gradients using autodiff.
        differentiator.refresh()
        differentiable_op = differentiator.generate_differentiable_op(
            analytic_op=analytic_op)
        with tf.GradientTape() as g:
            g.watch(symbol_values_tensor)
            exact_outputs = differentiable_op(programs, symbol_names_tensor,
                                              symbol_values_tensor, ops_tensor)
        grad_auto = g.gradient(exact_outputs, symbol_values_tensor)
        self.assertAllClose(grad_manual, grad_auto)
Beispiel #7
0
class ResolveParametersOpTest(tf.test.TestCase, parameterized.TestCase):
    """Test the in-graph parameter resolving op."""

    def _compare_gate_parameters(self, tg_value, eg_value):
        """TODO(zaqqwerty): Remove this function and the gate-specific tests
        below once https://github.com/quantumlib/Cirq/issues/3192 is resolved"""
        rounding_digits = 3
        if isinstance(tg_value, int):
            self.assertAlmostEqual(tg_value, eg_value)
        elif isinstance(tg_value, float):
            self.assertAlmostEqual(tg_value, eg_value, places=rounding_digits)
        else:
            test_value = 1
            exp_value = 1
            for v in tg_value.args:
                if not isinstance(v, sympy.Symbol):
                    test_value *= sympy.N(v)
            for v in eg_value.args:
                if not isinstance(v, sympy.Symbol):
                    exp_value *= sympy.N(v)
            self.assertAlmostEqual(test_value,
                                   exp_value,
                                   delta=0.1**rounding_digits)

    def test_resolve_parameters_input_checking(self):
        """Check that the resolve parameters op has correct input checking."""
        n_qubits = 5
        batch_size = 5
        symbol_names = ['alpha']
        qubits = cirq.GridQubit.rect(1, n_qubits)
        circuit_batch, resolver_batch = \
            util.random_symbol_circuit_resolver_batch(
                qubits, symbol_names, batch_size)

        symbol_values_array = np.array(
            [[resolver[symbol]
              for symbol in symbol_names]
             for resolver in resolver_batch])

        with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
                                    'must be rank 1'):
            # programs tensor has the wrong shape (too many dims).
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor([circuit_batch]), symbol_names,
                symbol_values_array)

        with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
                                    'must be rank 1'):
            # programs tensor has the wrong shape (too few dims).
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch)[0], symbol_names,
                symbol_values_array)

        with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
                                    'must be rank 1'):
            # symbol_names tensor has the wrong shape (too many dims).
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch), np.array([symbol_names]),
                symbol_values_array)

        with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
                                    'must be rank 1'):
            # symbol_names tensor has the wrong shape (too few dims).
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch), symbol_names[0],
                symbol_values_array)

        with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
                                    'symbol_values must be rank 2'):
            # symbol_values tensor has the wrong shape (too many dims).
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch), symbol_names,
                np.array([symbol_values_array]))

        with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
                                    'symbol_values must be rank 2'):
            # symbol_values tensor has the wrong shape (too few dims).
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch), symbol_names,
                symbol_values_array[0])

        with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
                                    'Unparseable proto'):
            # programs tensor has the right type, but invalid value.
            tfq_utility_ops.resolve_parameters(['junk'] * batch_size,
                                               symbol_names,
                                               symbol_values_array)

        with self.assertRaisesRegex(TypeError, 'Cannot convert'):
            # programs tensor has the wrong type.
            tfq_utility_ops.resolve_parameters([1] * batch_size, symbol_names,
                                               symbol_values_array)

        with self.assertRaisesRegex(TypeError, 'Cannot convert'):
            # symbol_names tensor has the wrong type.
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch), [1], symbol_values_array)

        with self.assertRaisesRegex(tf.errors.UnimplementedError,
                                    'Cast string to float is not supported'):
            # symbol_values tensor has the wrong type.
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch), symbol_names,
                [['junk']] * batch_size)

        with self.assertRaisesRegex(TypeError, 'missing'):
            # too few tensors.
            # pylint: disable=no-value-for-parameter
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch), symbol_names)
            # pylint: enable=no-value-for-parameter

    def test_resolve_parameters_consistency_basic(self):
        """Compare tfq op to cirq resolving."""
        qubits = cirq.GridQubit.rect(1, 4)
        circuit = cirq.Circuit()
        symbols = []
        for n, q in enumerate(qubits):
            new_bit = sympy.Symbol("bit_{}".format(n))
            circuit += cirq.X(q)**new_bit
            symbols.append(new_bit)
        symbol_names = [str(s) for s in symbols]

        bitstring_list = [[0, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 1]]
        circuit_list = []
        resolver_list = []
        for bitstring in bitstring_list:
            resolve_dict = {}
            for s, b in zip(symbols, bitstring):
                resolve_dict[s] = b
            resolver_list.append(cirq.ParamResolver(resolve_dict))
            circuit_list.append(circuit)

        test_resolved_circuits = util.from_tensor(
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_list), symbol_names,
                np.asarray(bitstring_list)))

        expected_resolved_circuits = []
        for circuit, resolver in zip(circuit_list, resolver_list):
            expected_resolved_circuits.append(
                cirq.resolve_parameters(circuit, resolver))

        for exp_c, test_c in zip(expected_resolved_circuits,
                                 test_resolved_circuits):
            self.assertAllEqual(exp_c, test_c)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'n_qubits': [3, 7],
                    'symbol_names': [['a'], ['a', 'b'],
                                     ['a', 'b', 'c', 'd', 'e']]
                })))
    def test_resolve_parameters_consistency(self, n_qubits, symbol_names):
        """Compare tfq op to cirq resolving for randomized circuits."""

        # Get random circuit batches
        qubits = cirq.GridQubit.rect(1, n_qubits)
        batch_size = 15
        n_moments = 15
        circuit_batch, resolver_batch = \
            util.random_symbol_circuit_resolver_batch(
                qubits, symbol_names, batch_size, n_moments)

        # Remove one of the symbols from the resolvers
        symbol_names_partial = symbol_names[1:]
        symbol_values_array_partial = np.array(
            [[resolver[symbol]
              for symbol in symbol_names_partial]
             for resolver in resolver_batch])
        resolver_batch_partial = [
            cirq.ParamResolver(
                {symbol: resolver[symbol]
                 for symbol in symbol_names_partial})
            for resolver in resolver_batch
        ]

        # Resolve in two ways and compare results
        test_resolved_circuits = util.from_tensor(
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch), symbol_names_partial,
                symbol_values_array_partial))
        expected_resolved_circuits = []
        for circuit, resolver in zip(circuit_batch, resolver_batch_partial):
            expected_resolved_circuits.append(
                cirq.resolve_parameters(circuit, resolver))
        # TODO(zaqqwerty): Find a way to eliminate parsing.
        for test_c, exp_c in zip(test_resolved_circuits,
                                 expected_resolved_circuits):
            for test_m, exp_m in zip(test_c, exp_c):
                for test_o, exp_o in zip(test_m, exp_m):
                    tg = test_o.gate
                    eg = exp_o.gate
                    self.assertEqual(type(tg), type(eg))
                    # TODO(zaqqwerty): simplify parsing when cirq build parser
                    # see core/serialize/serializer.py
                    if isinstance(tg, cirq.IdentityGate):
                        # all identity gates are the same
                        continue
                    elif isinstance(tg, cirq.EigenGate):
                        self._compare_gate_parameters(tg._global_shift,
                                                      eg._global_shift)
                        self._compare_gate_parameters(tg._exponent,
                                                      eg._exponent)
                    elif isinstance(tg, cirq.FSimGate):
                        self._compare_gate_parameters(tg.theta, eg.theta)
                        self._compare_gate_parameters(tg.phi, eg.phi)
                    elif isinstance(
                            tg, (cirq.PhasedXPowGate, cirq.PhasedISwapPowGate)):
                        self._compare_gate_parameters(tg._global_shift,
                                                      eg._global_shift)
                        self._compare_gate_parameters(tg._exponent,
                                                      eg._exponent)
                        self._compare_gate_parameters(tg._phase_exponent,
                                                      eg._phase_exponent)
                    else:
                        self.assertTrue(False,
                                        msg="Some gate in the randomizer "
                                        "is not being checked: "
                                        "{}".format(type(tg)))
class StochasticGradientConvergenceTest(tf.test.TestCase,
                                        parameterized.TestCase):
    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'differentiator_num_runs': DIFFS_NUM_RUNS,
                    'n_qubits': [3],
                    'n_programs': [3],
                    'n_ops': [3],
                    'symbol_names': [['a', 'b']],
                    'eps': [0.1]
                })))
    def test_gradients_vs_cirq_finite_difference(self, differentiator_num_runs,
                                                 n_qubits, n_programs, n_ops,
                                                 symbol_names, eps):
        """Convergence tests on SGDifferentiator variants."""

        # TODO(trevormccrt): remove this once I build the user-facing op
        #  interface
        differentiator, num_runs = differentiator_num_runs
        differentiator.refresh()
        op = differentiator.generate_differentiable_op(
            analytic_op=tfq_simulate_ops.tfq_simulate_expectation)

        qubits = cirq.GridQubit.rect(1, n_qubits)
        circuit_batch, resolver_batch = \
            util.random_symbol_circuit_resolver_batch(
                cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs)

        psums = [
            util.random_pauli_sums(qubits, 1, n_ops) for _ in circuit_batch
        ]

        symbol_values_array = np.array(
            [[resolver[symbol] for symbol in symbol_names]
             for resolver in resolver_batch],
            dtype=np.float32)

        # calculate tfq gradient
        symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
        programs = util.convert_to_tensor(circuit_batch)
        ops = util.convert_to_tensor(psums)

        def _get_gradient():
            with tf.GradientTape() as g:
                g.watch(symbol_values_tensor)
                expectations = op(programs, symbol_names, symbol_values_tensor,
                                  ops)
            return tf.cast(g.gradient(expectations, symbol_values_tensor),
                           dtype=tf.float64)

        # warm-up & initialize tfq_grads.
        grads_sum = _get_gradient()
        tfq_grads = grads_sum

        # calculate gradients in cirq using a very simple forward differencing
        # scheme
        cirq_grads = _cirq_simple_finite_difference(circuit_batch,
                                                    resolver_batch,
                                                    symbol_names, psums)
        cnt = 1
        # Since self.assertAllClose() has more strict atol than that of
        # np.allclose(), it is required to set smaller value to np.allclose()
        total_time = 0
        while cnt < num_runs and (not np.allclose(
                tfq_grads, cirq_grads, atol=eps * 0.9)):
            cnt = cnt + 1
            s = time.time()
            grads_sum = grads_sum + _get_gradient()
            total_time += time.time() - s
            tfq_grads = grads_sum / cnt

        self.assertAllClose(cirq_grads, tfq_grads, atol=eps)
        print('Passed: count {}, total_time {} ({}sec/shot)'.format(
            cnt, total_time, total_time / cnt))
class ResolveParametersOpTest(tf.test.TestCase, parameterized.TestCase):
    """Test the in-graph parameter resolving op."""
    def test_resolve_parameters_input_checking(self):
        """Check that the resolve parameters op has correct input checking."""
        n_qubits = 5
        batch_size = 5
        symbol_names = ['alpha']
        qubits = cirq.GridQubit.rect(1, n_qubits)
        circuit_batch, resolver_batch = \
            util.random_symbol_circuit_resolver_batch(
                qubits, symbol_names, batch_size)

        symbol_values_array = np.array(
            [[resolver[symbol] for symbol in symbol_names]
             for resolver in resolver_batch])

        with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
                                    'must be rank 1'):
            # programs tensor has the wrong shape (too many dims).
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor([circuit_batch]), symbol_names,
                symbol_values_array)

        with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
                                    'must be rank 1'):
            # programs tensor has the wrong shape (too few dims).
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch)[0], symbol_names,
                symbol_values_array)

        with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
                                    'must be rank 1'):
            # symbol_names tensor has the wrong shape (too many dims).
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch),
                np.array([symbol_names]), symbol_values_array)

        with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
                                    'must be rank 1'):
            # symbol_names tensor has the wrong shape (too few dims).
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch), symbol_names[0],
                symbol_values_array)

        with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
                                    'symbol_values must be rank 2'):
            # symbol_values tensor has the wrong shape (too many dims).
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch), symbol_names,
                np.array([symbol_values_array]))

        with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
                                    'symbol_values must be rank 2'):
            # symbol_values tensor has the wrong shape (too few dims).
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch), symbol_names,
                symbol_values_array[0])

        with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
                                    'Unparseable proto'):
            # programs tensor has the right type, but invalid value.
            tfq_utility_ops.resolve_parameters(['junk'] * batch_size,
                                               symbol_names,
                                               symbol_values_array)

        with self.assertRaisesRegex(TypeError, 'Cannot convert'):
            # programs tensor has the wrong type.
            tfq_utility_ops.resolve_parameters([1] * batch_size, symbol_names,
                                               symbol_values_array)

        with self.assertRaisesRegex(TypeError, 'Cannot convert'):
            # symbol_names tensor has the wrong type.
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch), [1],
                symbol_values_array)

        with self.assertRaisesRegex(tf.errors.UnimplementedError,
                                    'Cast string to float is not supported'):
            # symbol_values tensor has the wrong type.
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch), symbol_names,
                [['junk']] * batch_size)

        with self.assertRaisesRegex(TypeError, 'missing'):
            # too few tensors.
            # pylint: disable=no-value-for-parameter
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch), symbol_names)
            # pylint: enable=no-value-for-parameter

    def test_resolve_parameters_consistency_basic(self):
        """Compare tfq op to cirq resolving."""
        qubits = cirq.GridQubit.rect(1, 4)
        circuit = cirq.Circuit()
        symbols = []
        for n, q in enumerate(qubits):
            new_bit = sympy.Symbol("bit_{}".format(n))
            circuit += cirq.X(q)**new_bit
            symbols.append(new_bit)
        symbol_names = [str(s) for s in symbols]

        bitstring_list = [[0, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 1]]
        circuit_list = []
        resolver_list = []
        for bitstring in bitstring_list:
            resolve_dict = {}
            for s, b in zip(symbols, bitstring):
                resolve_dict[s] = b
            resolver_list.append(cirq.ParamResolver(resolve_dict))
            circuit_list.append(circuit)

        test_resolved_circuits = util.from_tensor(
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_list), symbol_names,
                np.asarray(bitstring_list)))

        expected_resolved_circuits = []
        for circuit, resolver in zip(circuit_list, resolver_list):
            expected_resolved_circuits.append(
                cirq.resolve_parameters(circuit, resolver))

        for exp_c, test_c in zip(expected_resolved_circuits,
                                 test_resolved_circuits):
            self.assertAllEqual(exp_c, test_c)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'n_qubits': [3, 7],
                    'symbol_names': [['a'], ['a', 'b'],
                                     ['a', 'b', 'c', 'd', 'e']]
                })))
    def test_resolve_parameters_consistency(self, n_qubits, symbol_names):
        """Compare tfq op to cirq resolving for randomized circuits."""

        # Get random circuit batches
        qubits = cirq.GridQubit.rect(1, n_qubits)
        batch_size = 15
        n_moments = 15
        circuit_batch, resolver_batch = \
            util.random_symbol_circuit_resolver_batch(
                qubits, symbol_names, batch_size, n_moments)

        # Remove one of the symbols from the resolvers
        symbol_names_partial = symbol_names[1:]
        symbol_values_array_partial = np.array(
            [[resolver[symbol] for symbol in symbol_names_partial]
             for resolver in resolver_batch])
        resolver_batch_partial = [
            cirq.ParamResolver(
                {symbol: resolver[symbol]
                 for symbol in symbol_names_partial})
            for resolver in resolver_batch
        ]

        # Resolve in two ways and compare results
        test_resolved_circuits = util.from_tensor(
            tfq_utility_ops.resolve_parameters(
                util.convert_to_tensor(circuit_batch), symbol_names_partial,
                symbol_values_array_partial))
        expected_resolved_circuits = []
        for circuit, resolver in zip(circuit_batch, resolver_batch_partial):
            expected_resolved_circuits.append(
                cirq.resolve_parameters(circuit, resolver))
        # TODO(zaqqwerty): Find a way to eliminate parsing.
        for test_c, exp_c in zip(test_resolved_circuits,
                                 expected_resolved_circuits):
            for test_m, exp_m in zip(test_c, exp_c):
                for test_o, exp_o in zip(test_m, exp_m):
                    self.assertTrue(
                        util.gate_approx_eq(test_o.gate, exp_o.gate))
Beispiel #10
0
class StochasticDifferentiatorCorrectnessTest(tf.test.TestCase,
                                              parameterized.TestCase):
    """Test correctness of the stochastic differentiators to reference cirq
    algorithm.
    DISCLAIMER: this test allows for a larger margin of error and as long
    as convergence is happening then it passes"""

    # TODO(zaqqwerty): only this test was failing after adding cirq.I
    #    support, so it is disabled pending diagnosis
    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'differentiator': STOCHASTIC_DIFFS,
                    'op': OPS,
                    'n_qubits': [5],
                    'n_programs': [3],
                    'n_ops': [3],
                    'symbol_names': [['a', 'b']],
                    'stochastic_cost_eps': [(False, 5e-1), (True, 7e-1)],
                })))
    def gradients_vs_cirq_finite_difference(self, differentiator, op, n_qubits,
                                            n_programs, n_ops, symbol_names,
                                            stochastic_cost_eps):
        """Compare TFQ differentiators to fine-grained noiseless cirq finite
        differencing with a larger margin of error."""

        # TODO (jaeyoo): cleanup this hacky wordkaround so variable
        #   assignment doesn't need to take place like this.
        differentiator.stochastic_cost, eps = stochastic_cost_eps
        differentiator.refresh()
        op = differentiator.generate_differentiable_op(analytic_op=op)

        qubits = cirq.GridQubit.rect(1, n_qubits)
        circuit_batch, resolver_batch = \
            util.random_symbol_circuit_resolver_batch(
                cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs)

        psums = [
            util.random_pauli_sums(qubits, 1, n_ops) for _ in circuit_batch
        ]

        symbol_values_array = np.array(
            [[resolver[symbol] for symbol in symbol_names]
             for resolver in resolver_batch],
            dtype=np.float32)

        # calculate tfq gradient
        symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
        programs = util.convert_to_tensor(circuit_batch)
        ops = util.convert_to_tensor(psums)

        # calculate gradients in cirq using a very simple forward differencing
        # scheme
        cirq_grads = _cirq_simple_finite_difference(circuit_batch,
                                                    resolver_batch,
                                                    symbol_names, psums)

        def _get_gradient():
            with tf.GradientTape() as g:
                g.watch(symbol_values_tensor)
                expectations = op(programs, tf.convert_to_tesor(symbol_names),
                                  symbol_values_tensor, ops)
            return g.gradient(expectations, symbol_values_tensor)

        def _abs_diff(grad, mask):
            return np.sum(np.abs(grad - cirq_grads * mask))

        def _get_nonzero_mask(grad):
            return (grad.numpy() != 0.0).astype(np.float32)

        # Get the non-zero mask because a few initial gradients have not sampled
        # zero values.
        tfq_grads_1 = _get_gradient()
        mask_1 = _get_nonzero_mask(tfq_grads_1)

        if not np.allclose(tfq_grads_1, cirq_grads * mask_1, atol=eps):
            tfq_grads_2 = 0.5 * (tfq_grads_1 + _get_gradient())
            mask_2 = _get_nonzero_mask(tfq_grads_2)
            # Check if the 2nd error becomes smaller that 1st one.
            if not _abs_diff(tfq_grads_1, mask_1) > _abs_diff(
                    tfq_grads_2, mask_2):
                cnt = 2
                tfq_grads = (cnt * tfq_grads_2 + _get_gradient()) / (cnt + 1)
                while (cnt < 10
                       and not np.allclose(cirq_grads, tfq_grads, atol=eps)):
                    cnt += 1
                    tfq_grads = (cnt * tfq_grads + _get_gradient()) / (cnt + 1)
                self.assertAllClose(cirq_grads, tfq_grads, atol=eps)
Beispiel #11
0
class GradientCorrectnessTest(tf.test.TestCase, parameterized.TestCase):
    """Test correctness of the differentiators to reference cirq algorithm."""
    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'differentiator': DIFFS + STOCHASTIC_DIFFS,
                    'op': OPS,
                    'stochastic_cost': [False, True]
                })) + [{
                    'differentiator': adjoint.Adjoint(),
                    'op': circuit_execution_ops.get_expectation_op(),
                    'stochastic_cost': False
                }])
    def test_backprop(self, differentiator, op, stochastic_cost):
        """Test that gradients are correctly backpropagated through a quantum
        circuit via comparison to analytical results.
        """
        # hack to add stoachastic cost. TODO (jaeyoo): remove this hack.
        differentiator.stochastic_cost = stochastic_cost
        differentiator.refresh()
        op = differentiator.generate_differentiable_op(analytic_op=op)

        def exact_grad(theta):
            new_theta = 2 * np.pi * theta
            return -2 * np.pi * np.sin(new_theta) * np.exp(np.cos(new_theta))

        bit = cirq.GridQubit(0, 0)
        circuits = util.convert_to_tensor(
            [cirq.Circuit(cirq.X(bit)**sympy.Symbol('rx')) for _ in range(2)])
        pstring = util.convert_to_tensor([[
            cirq.PauliSum.from_pauli_strings([cirq.PauliString({bit: cirq.Z})])
        ] for _ in circuits])
        base_rot_angles = tf.constant([[0.25], [0.125]])
        with tf.GradientTape() as g:
            g.watch(base_rot_angles)
            input_angles = 2 * base_rot_angles
            exp_res = tf.exp(
                op(circuits, tf.convert_to_tensor(['rx']), input_angles,
                   pstring))

        grad = g.gradient(exp_res, base_rot_angles)
        exact = [[exact_grad(0.25)], [exact_grad(0.125)]]

        # will this be too tight? time will tell.
        self.assertAllClose(exact, grad.numpy(), rtol=0.01, atol=0.01)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'differentiator': DIFFS,
                    'op': OPS,
                    'n_qubits': [5],
                    'n_programs': [3],
                    'n_ops': [3],
                    'symbol_names': [['a', 'b']]
                })) + [{
                    'differentiator': adjoint.Adjoint(),
                    'op': circuit_execution_ops.get_expectation_op(),
                    'n_qubits': 5,
                    'n_programs': 5,
                    'n_ops': 3,
                    'symbol_names': ['a', 'b']
                }])
    def test_gradients_vs_cirq_finite_difference(self, differentiator, op,
                                                 n_qubits, n_programs, n_ops,
                                                 symbol_names):
        """Compare TFQ differentiators to fine-grained noiseless cirq finite
        differencing.
        DISCLAIMER : the consistency of STOCHASTIC_DIFFS is hard to be checked.
        Its expectation value should be checked, but it takes long time because
        SGDifferentiator is not optimized. Until optimized, the consistency
        will be performed in benchmarks/scripts/differentiators:convergence_test
        TODO(jaeyoo) : move convergence_test here once SGDifferentiator is
         optimized.
        """
        differentiator.refresh()
        op = differentiator.generate_differentiable_op(analytic_op=op)

        qubits = cirq.GridQubit.rect(1, n_qubits)
        circuit_batch, resolver_batch = \
            util.random_symbol_circuit_resolver_batch(
                cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs)

        psums = [
            util.random_pauli_sums(qubits, 1, n_ops) for _ in circuit_batch
        ]

        symbol_values_array = np.array(
            [[resolver[symbol] for symbol in symbol_names]
             for resolver in resolver_batch],
            dtype=np.float32)

        # calculate tfq gradient
        symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
        programs = util.convert_to_tensor(circuit_batch)
        ops = util.convert_to_tensor(psums)
        with tf.GradientTape() as g:
            g.watch(symbol_values_tensor)
            expectations = op(programs, tf.convert_to_tensor(symbol_names),
                              symbol_values_tensor, ops)
        tfq_grads = g.gradient(expectations, symbol_values_tensor)

        # calculate gradients in cirq using a very simple forward differencing
        # scheme
        cirq_grads = _cirq_simple_finite_difference(circuit_batch,
                                                    resolver_batch,
                                                    symbol_names, psums)

        # will this be too tight? time will tell.
        self.assertAllClose(cirq_grads, tfq_grads, rtol=1e-2, atol=1e-2)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'differentiator': DIFFS + STOCHASTIC_DIFFS,
                    'op': OPS,
                    'stochastic_cost': [False, True]
                })) + [{
                    'differentiator': adjoint.Adjoint(),
                    'op': circuit_execution_ops.get_expectation_op(),
                    'stochastic_cost': False
                }])
    def test_analytic_value_with_simple_circuit(self, differentiator, op,
                                                stochastic_cost):
        """Test the value of differentiator with simple circuit.
        Since there are only one symbol, one gate and one op, there is only one
        samling result, STOCHATIC_DIFFS shows the same result with that of
        deterministic differentiators."""
        # Get an expectation op, with this differentiator attached.
        differentiator.refresh()
        differentiator.stochastic_cost = stochastic_cost
        op = differentiator.generate_differentiable_op(analytic_op=op)
        qubit = cirq.GridQubit(0, 0)
        circuit = util.convert_to_tensor(
            [cirq.Circuit(cirq.X(qubit)**sympy.Symbol('alpha'))])
        psums = util.convert_to_tensor([[cirq.Z(qubit)]])
        symbol_values_array = np.array([[0.123]], dtype=np.float32)
        # Calculate tfq gradient.
        symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
        with tf.GradientTape() as g:
            g.watch(symbol_values_tensor)
            expectations = op(circuit, tf.convert_to_tensor(['alpha']),
                              symbol_values_tensor, psums)
        grads = g.gradient(expectations, symbol_values_tensor)
        ground_truth_grads = np.array([[-1.1839752]])
        self.assertAllClose(ground_truth_grads, grads, rtol=1e-2, atol=1e-2)
class SGDifferentiatorUtilTest(tf.test.TestCase, parameterized.TestCase):
    """Test the stochastic_differentiator_util module."""
    @parameterized.parameters([{'eps': 1e-7}])
    def test_get_parse_pauli_sums(self, eps):
        """Input & output check for _get_parse_pauli_sums()."""
        n_programs = 3
        n_ops = 2
        ops, psums, coeffs = _example_ops_helper(n_programs, n_ops)

        parser = sd_util._get_parse_pauli_sums()

        # input should be tensorflow tensor.
        with self.assertRaises(ValueError):
            # psums is used instead of ops.
            parser(psums, n_programs, n_ops)

        observable_coeff = parser(ops, n_programs, n_ops)
        # shape check
        tf.assert_equal([n_programs, n_ops], tf.shape(observable_coeff))
        # value check
        true_coeff = np.array(
            [np.sum(np.abs(coeff_list)) for coeff_list in coeffs])
        self.assertAllClose(np.ones([n_programs, n_ops]) * true_coeff,
                            observable_coeff,
                            atol=eps,
                            rtol=eps)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(**{
                'uniform_sampling': [True, False],
                'eps': [1e-6]
            })))
    def test_get_pdist_cost(self, uniform_sampling, eps):
        """Input & output check for _get_pdist_cost()."""
        n_programs = 3
        n_ops = 2
        ops, psums, _ = _example_ops_helper(n_programs, n_ops)

        parser = sd_util._get_parse_pauli_sums()

        # input should be tensorflow tensor.
        with self.assertRaises(ValueError):
            # psums is used instead of ops.
            parser(psums, n_programs, n_ops)

        observable_coeff = parser(ops, n_programs, n_ops)

        correction_factor_ops, pdist = \
            sd_util._get_pdist_cost(observable_coeff, uniform_sampling)
        if uniform_sampling:
            ground_truth_correction_factor = np.array([[2.0, 2.0]])
            ground_truth_pdist = np.array([[0.5, 0.5]])
        else:
            ground_truth_correction_factor = np.array([[2.5, 5.0 / 3.0]])
            # pdist is weighted by each coefficients.
            ground_truth_pdist = np.array([[0.4, 0.6]])

        self.assertAllClose(ground_truth_correction_factor,
                            correction_factor_ops,
                            atol=eps,
                            rtol=eps)
        self.assertAllClose(ground_truth_pdist, pdist, atol=eps, rtol=eps)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(**{
                'uniform_sampling': [True, False],
                'eps': [0.1]
            })))
    def test_stochastic_cost_preprocessor(self, uniform_sampling, eps):
        """Input & output check for stochastic_cost_preprocessor().
        The consistency of the estimated average gradient is checked by:
        //benchmarks/scripts/differentiators:convergence_test"""
        n_programs = 3
        n_ops = 2
        ops, psums, _ = _example_ops_helper(n_programs, n_ops)

        # all inputs should be tensorflow tensors.
        with self.assertRaises(ValueError):
            # psums is used instead of ops.
            new_pauli_sums, cost_relocator, n_ops = \
                sd_util.stochastic_cost_preprocessor(
                    psums, n_programs, n_ops, uniform_sampling)

        new_pauli_sums, cost_relocator, new_n_ops = \
            sd_util.stochastic_cost_preprocessor(
                ops, n_programs, n_ops, uniform_sampling)
        # n_ops should be 1 because the only one op is sampled.
        self.assertEqual(new_n_ops, 1, "n_ops should be 1")
        ground_truth_shape = np.array([n_programs, new_n_ops], dtype=np.int32)
        tf.assert_equal(ground_truth_shape, tf.shape(new_pauli_sums))
        ground_truth_shape = np.array([n_programs, n_ops], dtype=np.int32)
        tf.assert_equal(ground_truth_shape, tf.shape(cost_relocator))

        if uniform_sampling:
            ground_truth_pdist = [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]
            ground_truth_cost_relocator = [[2.0, 0.0], [0.0, 2.0]]

        else:
            ground_truth_pdist = [[0.4, 0.6], [0.4, 0.6], [0.4, 0.6]]
            ground_truth_cost_relocator = [[2.5, 0.0], [0.0, 5 / 3.0]]

        # Sampling ops and estimate probabilistic distribution of them.
        cost_relocator_hist = np.zeros((n_programs, n_ops))
        n_samples = 700
        for _ in range(n_samples):
            _, cost_relocator, _ = sd_util.stochastic_cost_preprocessor(
                ops, n_programs, n_ops, uniform_sampling)
            for i, cost_per_program in enumerate(cost_relocator):
                loc = np.where(
                    np.isclose(ground_truth_cost_relocator,
                               cost_per_program))[0][0]
                cost_relocator_hist[i][loc] += 1.0

        pdist = cost_relocator_hist / n_samples
        self.assertAllClose(ground_truth_pdist, pdist, atol=eps, rtol=eps)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(**{
                'uniform_sampling': [True, False],
                'eps': [1e-6]
            })))
    def test_get_pdist_shifts(self, uniform_sampling, eps):
        """value check of _get_pdist_shifts()"""
        weights = np.array([[[[0.5, -0.5], [1.5, -1.5], [2.5, -2.5]],
                             [[0.5, -0.5], [1.5, -1.5], [2.5, -2.5]],
                             [[0.5, -0.5], [1.5, -1.5], [2.5, -2.5]]],
                            [[[-1., 1.], [-2., 2.], [0., -0.]],
                             [[-1., 1.], [-2., 2.], [0., -0.]],
                             [[-1., 1.], [-2., 2.], [0., -0.]]]])
        # Transpose to [n_symbols, n_programs, n_shifts, n_param_gates]
        weights = np.transpose(weights, [0, 1, 3, 2])
        # Reshape to [sub_total_programs, n_param_gates]
        sub_total_programs = np.prod(weights.shape[:-1])
        n_param_gates = weights.shape[-1]
        weights = np.reshape(weights, [sub_total_programs, n_param_gates])

        corrected_weights, pdist = \
            sd_util._get_pdist_shifts(weights, uniform_sampling)
        if uniform_sampling:
            ground_truth_corrected_weights = np.array([[1.5, 4.5, 7.5],
                                                       [-1.5, -4.5, -7.5],
                                                       [1.5, 4.5, 7.5],
                                                       [-1.5, -4.5, -7.5],
                                                       [1.5, 4.5, 7.5],
                                                       [-1.5, -4.5, -7.5],
                                                       [-2.0, -4.0, 0.0],
                                                       [2.0, 4.0, -0.0],
                                                       [-2.0, -4.0, 0.0],
                                                       [2.0, 4.0, -0.0],
                                                       [-2.0, -4.0, 0.0],
                                                       [2.0, 4.0, -0.0]])
            ground_truth_pdist = np.array([[1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0],
                                           [1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0],
                                           [1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0],
                                           [1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0],
                                           [1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0],
                                           [1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0],
                                           [0.5, 0.5, 0.0], [0.5, 0.5, 0.0],
                                           [0.5, 0.5, 0.0], [0.5, 0.5, 0.0],
                                           [0.5, 0.5, 0.0], [0.5, 0.5, 0.0]])
        else:
            ground_truth_corrected_weights = np.array([[4.5, 4.5, 4.5],
                                                       [-4.5, -4.5, -4.5],
                                                       [4.5, 4.5, 4.5],
                                                       [-4.5, -4.5, -4.5],
                                                       [4.5, 4.5, 4.5],
                                                       [-4.5, -4.5, -4.5],
                                                       [-3.0, -3.0, 0.0],
                                                       [3.0, 3.0, -0.0],
                                                       [-3.0, -3.0, 0.0],
                                                       [3.0, 3.0, -0.0],
                                                       [-3.0, -3.0, 0.0],
                                                       [3.0, 3.0, -0.0]])
            # pdist is weighted by each coefficients.
            ground_truth_pdist = np.array(
                [[1.0 / 9.0, 1.0 / 3.0, 5.0 / 9.0],
                 [1.0 / 9.0, 1.0 / 3.0, 5.0 / 9.0],
                 [1.0 / 9.0, 1.0 / 3.0, 5.0 / 9.0],
                 [1.0 / 9.0, 1.0 / 3.0, 5.0 / 9.0],
                 [1.0 / 9.0, 1.0 / 3.0, 5.0 / 9.0],
                 [1.0 / 9.0, 1.0 / 3.0, 5.0 / 9.0],
                 [1.0 / 3.0, 2.0 / 3.0, 0.0], [1.0 / 3.0, 2.0 / 3.0, 0.0],
                 [1.0 / 3.0, 2.0 / 3.0, 0.0], [1.0 / 3.0, 2.0 / 3.0, 0.0],
                 [1.0 / 3.0, 2.0 / 3.0, 0.0], [1.0 / 3.0, 2.0 / 3.0, 0.0]],
                dtype=np.float32)

        self.assertAllClose(ground_truth_corrected_weights,
                            corrected_weights,
                            atol=eps,
                            rtol=eps)
        self.assertAllClose(ground_truth_pdist, pdist, atol=eps, rtol=eps)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(**{
                'uniform_sampling': [True, False],
                'eps': [0.1]
            })))
    def test_stochastic_generator_preprocessor(self, uniform_sampling, eps):
        """Input & output check for stochastic_generator_preprocessor().
        The consistency of the estimated average gradient is checked by:
        //benchmarks/scripts/differentiators:convergence_test"""
        n_qubits = 5
        n_programs = 3
        symbol_names = ['a', 'b']

        programs, symbol_values_tensor, n_symbols, n_shifts = \
            _example_circuit_helper(n_qubits, n_programs)

        new_programs_before, weights_before, shifts_before, \
        n_param_gates_before = parameter_shift_util.parse_programs(
            programs, symbol_names, symbol_values_tensor, n_symbols)

        new_programs, weights, shifts, n_param_gates = \
            sd_util.stochastic_generator_preprocessor(
                new_programs_before, weights_before, shifts_before, n_programs,
                n_symbols, n_param_gates_before, n_shifts, uniform_sampling)

        # n_param_gates should be 1 because the only one generator is sampled.
        self.assertEqual(n_param_gates, 1, "n_param_gates should be 1")
        ground_truth_shape = np.array(
            [n_symbols, n_programs, n_param_gates, n_shifts], dtype=np.int32)
        tf.assert_equal(ground_truth_shape, tf.shape(new_programs))
        tf.assert_equal(ground_truth_shape, tf.shape(weights))
        tf.assert_equal(ground_truth_shape, tf.shape(shifts))

        # Estimate probability of sampling each shifts
        ground_truth_shifts = [[[1.5707964, -1.5707964],
                                [0.5235988, -0.5235988],
                                [0.31415927, -0.31415927]],
                               [[0.21460181, 1.7853982],
                                [0.6073009, 1.3926991], [1.0, 1.0]]]
        if uniform_sampling:
            ground_truth_pdist = [[0.333333, 0.333333, 0.333333],
                                  [0.5, 0.5, 0.0]]
        else:
            ground_truth_pdist = [[0.111111, 0.333333, 0.555555],
                                  [0.333333, 0.666666, 0.0]]

        shifts_hist = np.zeros((n_symbols, n_programs))
        n_samples = 700
        for _ in range(n_samples):
            _, _, shifts, _ = \
                sd_util.stochastic_generator_preprocessor(
                    new_programs_before, weights_before, shifts_before,
                    n_programs, n_symbols, n_param_gates_before, n_shifts,
                    uniform_sampling)
            for i, shifts_per_symbol in enumerate(shifts):
                for s in shifts_per_symbol:  # per program
                    loc = np.where(np.isclose(ground_truth_shifts, s))[1][0]
                    shifts_hist[i][loc] += 1.0

        shifts_pdist = shifts_hist / n_samples / n_programs
        self.assertAllClose(ground_truth_pdist,
                            shifts_pdist,
                            atol=eps,
                            rtol=eps)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(**{
                'uniform_sampling': [True, False],
                'eps': [1e-6]
            })))
    def test_get_pdist_symbols(self, uniform_sampling, eps):
        """value check of _get_pdist_symbols()"""
        weights = np.array([[[[0.5, -0.5], [1.5, -1.5], [2.5, -2.5]],
                             [[0.5, -0.5], [1.5, -1.5], [2.5, -2.5]],
                             [[0.5, -0.5], [1.5, -1.5], [2.5, -2.5]]],
                            [[[-1., 1.], [-2., 2.], [0., -0.]],
                             [[-1., 1.], [-2., 2.], [0., -0.]],
                             [[-1., 1.], [-2., 2.], [0., -0.]]]])
        # Transpose to [n_param_gates, n_shifts, n_programs, n_symbols]
        weights = np.transpose(weights, [1, 2, 3, 0])
        # Reshape to [sub_total_programs, n_param_gates]
        sub_total_programs = np.prod(weights.shape[:-1])
        n_symbols = weights.shape[-1]
        weights = np.reshape(weights, [sub_total_programs, n_symbols])

        corrected_weights, pdist = sd_util._get_pdist_symbols(
            weights, uniform_sampling)
        # In this case, both pdist's of uniform_sampling=True & False are equal.
        ground_truth_corrected_weights = np.array([[0.8333333, -2.5],
                                                   [-0.8333333, 2.5],
                                                   [2.5, -5.0], [-2.5, 5.0],
                                                   [4.1666665, 0.0],
                                                   [-4.1666665, -0.0],
                                                   [0.8333333, -2.5],
                                                   [-0.8333333, 2.5],
                                                   [2.5, -5.0], [-2.5, 5.0],
                                                   [4.1666665, 0.0],
                                                   [-4.1666665, -0.0],
                                                   [0.8333333, -2.5],
                                                   [-0.8333333, 2.5],
                                                   [2.5, -5.0], [-2.5, 5.0],
                                                   [4.1666665, 0.0],
                                                   [-4.1666665, -0.0]])
        ground_truth_pdist = np.array([[0.6, 0.4]])

        self.assertAllClose(ground_truth_corrected_weights,
                            corrected_weights,
                            atol=eps,
                            rtol=eps)
        self.assertAllClose(ground_truth_pdist, pdist, atol=eps, rtol=eps)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(**{
                'uniform_sampling': [True, False],
                'eps': [0.1]
            })))
    def test_stochastic_coordinate_preprocessor(self, uniform_sampling, eps):
        """Input & output check for stochastic_coordinate_preprocessor().
        The consistency of the estimated average gradient is checked by:
        //benchmarks/scripts/differentiators:convergence_test"""
        n_qubits = 5
        n_programs = 3
        symbol_names = ['a', 'b']

        programs, symbol_values_tensor, n_symbols, n_shifts = \
            _example_circuit_helper(n_qubits, n_programs)

        n_ops = 2
        ops, psums, _ = _example_ops_helper(n_programs, n_ops)

        new_programs, weights_before, shifts, n_param_gates = \
            parameter_shift_util.parse_programs(
                programs, symbol_names, symbol_values_tensor, n_symbols)

        # all inputs should be tensorflow tensors.
        with self.assertRaises(ValueError):
            # symbol_values_array is used instead of symbol_values_tensor.
            sd_util.stochastic_coordinate_preprocessor(
                new_programs, symbol_values_tensor.numpy(), ops,
                weights_before, shifts, n_programs, n_symbols, n_param_gates,
                n_shifts, n_ops, uniform_sampling)
            # psums is used instead of ops.
            sd_util.stochastic_coordinate_preprocessor(
                new_programs, symbol_values_tensor, psums, weights_before,
                shifts, n_programs, n_symbols, n_param_gates, n_shifts, n_ops,
                uniform_sampling)

        flat_programs, flat_perturbations, flat_ops, _, weights, \
        coordinate_relocator = \
            sd_util.stochastic_coordinate_preprocessor(
                new_programs, symbol_values_tensor, ops, weights_before,
                shifts, n_programs, n_symbols, n_param_gates, n_shifts,
                n_ops, uniform_sampling)

        # n_symbols should not be 1 because it doesn't fit the input format of
        # expectation_op or sampling_op.
        total_programs = n_programs * n_param_gates * n_shifts
        # flat_programs should have n_programs * n_param_gates * n_shifts * 1
        # because only one symbol is sampled now.
        self.assertAllClose([total_programs],
                            tf.shape(flat_programs),
                            atol=eps,
                            rtol=eps)
        # perturbation symbol is added, so the number of symbol should be
        # n_symbol+1
        self.assertAllClose([total_programs, n_symbols + 1],
                            tf.shape(flat_perturbations),
                            atol=eps,
                            rtol=eps)
        # shape check on flat_ops.
        self.assertAllClose([total_programs, n_ops],
                            tf.shape(flat_ops),
                            atol=eps,
                            rtol=eps)
        # resampled weights is in
        # [n_symbols, n_param_gates, n_shifts, n_programs]
        self.assertAllClose([n_symbols, n_param_gates, n_shifts, n_programs],
                            tf.shape(weights),
                            atol=eps,
                            rtol=eps)
        # resampled coordinate_relocator is in [total_programs, n_symbols]
        self.assertAllClose([total_programs, n_symbols],
                            tf.shape(coordinate_relocator),
                            atol=eps,
                            rtol=eps)

        # Estimate probability of sampling each shifts
        ground_truth_shifts = [[
            1.5707964, -1.5707964, 0.5235988, -0.5235988, 0.31415927,
            -0.31415927
        ], [0.21460181, 1.7853982, 0.6073009, 1.3926991, 1.0, 1.0]]

        ground_truth_pdist = [0.6, 0.4]

        shifts_hist = np.zeros((n_symbols, ))
        n_samples = 700
        cnt = 0.0
        for _ in range(n_samples):
            _, flat_perturbations, _, _, _, _ = \
                sd_util.stochastic_coordinate_preprocessor(
                    new_programs, symbol_values_tensor, ops, weights_before,
                    shifts, n_programs, n_symbols, n_param_gates, n_shifts,
                    n_ops, uniform_sampling)

            for s in flat_perturbations[:, -1]:  # See only shift symbols.
                sym = np.where(np.isclose(ground_truth_shifts, s))[0][0]
                shifts_hist[sym] += 1.0
                cnt += 1.0

        shifts_pdist = shifts_hist / cnt
        self.assertAllClose(ground_truth_pdist,
                            shifts_pdist,
                            atol=eps,
                            rtol=eps)
class SGDifferentiatorTest(tf.test.TestCase, parameterized.TestCase):
    """Test the SGDifferentiator will run end to end."""

    def test_stochastic_differentiator_instantiate(self):
        """Test SGDifferentiator type checking."""
        stochastic_differentiator.SGDifferentiator()
        with self.assertRaisesRegex(
                TypeError, expected_regex="stochastic_coordinate must be"):
            stochastic_differentiator.SGDifferentiator(stochastic_coordinate=1)
            stochastic_differentiator.SGDifferentiator(
                stochastic_coordinate=0.1)
            stochastic_differentiator.SGDifferentiator(
                stochastic_coordinate=[1])
            stochastic_differentiator.SGDifferentiator(
                stochastic_coordinate="junk")
        with self.assertRaisesRegex(
                TypeError, expected_regex="stochastic_generator must be"):
            stochastic_differentiator.SGDifferentiator(stochastic_generator=1)
            stochastic_differentiator.SGDifferentiator(stochastic_generator=0.1)
            stochastic_differentiator.SGDifferentiator(stochastic_generator=[1])
            stochastic_differentiator.SGDifferentiator(
                stochastic_generator="junk")
        with self.assertRaisesRegex(TypeError,
                                    expected_regex="stochastic_cost must be"):
            stochastic_differentiator.SGDifferentiator(stochastic_cost=1)
            stochastic_differentiator.SGDifferentiator(stochastic_cost=0.1)
            stochastic_differentiator.SGDifferentiator(stochastic_cost=[1])
            stochastic_differentiator.SGDifferentiator(stochastic_cost="junk")

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'coordinate': [True, False],
                    'generator': [True, False],
                    'cost': [True, False],
                    'uniform': [True, False]
                })))
    def test_stochastic_differentiator_call_analytic(self, coordinate,
                                                     generator, cost, uniform):
        """Test if SGDifferentiator.differentiate_analytical doesn't crash
            before running."""
        programs, names, values, ops, _, true_f, true_g = \
        _simple_op_inputs()
        diff = stochastic_differentiator.SGDifferentiator(
            coordinate, generator, cost, uniform)
        op = diff.generate_differentiable_op(
            analytic_op=circuit_execution_ops.get_expectation_op())

        with tf.GradientTape() as g:
            g.watch(values)
            expectations = op(programs, names, values, ops)
        grads = g.gradient(expectations, values)
        self.assertAllClose(expectations, true_f, atol=1e-2, rtol=1e-2)
        self.assertAllClose(grads, true_g, atol=1e-2, rtol=1e-2)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'coordinate': [True, False],
                    'generator': [True, False],
                    'cost': [True, False],
                    'uniform': [True, False]
                })))
    def test_stochastic_differentiator_call_sampled(self, coordinate, generator,
                                                    cost, uniform):
        """Test if SGDifferentiator.differentiate_sampled doesn't crash before
            running."""
        programs, names, values, ops, n_samples, true_f, true_g = \
        _simple_op_inputs()
        diff = stochastic_differentiator.SGDifferentiator(
            coordinate, generator, cost, uniform)
        op = diff.generate_differentiable_op(
            sampled_op=circuit_execution_ops.get_sampled_expectation_op())

        with tf.GradientTape() as g:
            g.watch(values)
            expectations = op(programs, names, values, ops, n_samples)
        grads = g.gradient(expectations, values)
        self.assertAllClose(expectations, true_f, atol=1e-1, rtol=1e-1)
        self.assertAllClose(grads, true_g, atol=1e-1, rtol=1e-1)
Beispiel #14
0
class ParameterShiftTest(tf.test.TestCase, parameterized.TestCase):
    """Test the ParameterShift Differentiator will run end to end."""
    def test_parameter_shift_analytic(self):
        """Test if ParameterShift.differentiate_analytical doesn't crash before
        running."""
        programs, names, values, ops, _, true_f, true_g = \
        _simple_op_inputs()

        ps = parameter_shift.ParameterShift()
        op = ps.generate_differentiable_op(
            analytic_op=circuit_execution_ops.get_expectation_op())

        with tf.GradientTape() as g:
            g.watch(values)
            expectations = op(programs, names, values, ops)
        grads = g.gradient(expectations, values)
        self.assertAllClose(expectations, true_f, atol=1e-2, rtol=1e-2)
        self.assertAllClose(grads, true_g, atol=1e-2, rtol=1e-2)

    def test_parameter_shift_sampled(self):
        """Test if ParameterShift.differentiate_sampled doesn't crash before
        running."""
        programs, names, values, ops, n_samples, true_f, true_g = \
        _simple_op_inputs()
        ps = parameter_shift.ParameterShift()
        op = ps.generate_differentiable_op(
            sampled_op=circuit_execution_ops.get_sampled_expectation_op())

        with tf.GradientTape() as g:
            g.watch(values)
            expectations = op(programs, names, values, ops, n_samples)
        grads = g.gradient(expectations, values)
        self.assertAllClose(expectations, true_f, atol=1e-1, rtol=1e-1)
        self.assertAllClose(grads, true_g, atol=1e-1, rtol=1e-1)

    def test_get_gradient_circuits(self):
        """Test that the correct objects are returned."""

        diff = parameter_shift.ParameterShift()

        # Circuits to differentiate.
        symbols = [sympy.Symbol("s0"), sympy.Symbol("s1")]
        q0 = cirq.GridQubit(0, 0)
        q1 = cirq.GridQubit(1, 2)
        input_programs = util.convert_to_tensor([
            cirq.Circuit(
                cirq.X(q0)**symbols[0],
                cirq.Y(q0)**symbols[0],
                cirq.ry(symbols[1])(q1)),
            cirq.Circuit(cirq.Y(q1)**symbols[1]),
        ])
        input_symbol_names = tf.constant([str(s) for s in symbols])
        input_symbol_values = tf.constant([[1.5, -2.7], [-0.3, 0.9]])

        # First, for each symbol `s`, check how many times `s` appears in each
        # program `p`, `n_ps`. Let `n_param_gates` be the maximum of `n_ps` over
        # all symbols and programs. Then, the shape of `batch_programs` will be
        # [n_programs, n_symbols * n_param_gates * n_shifts], where `n_shifts`
        # is 2 because we decompose into gates with 2 eigenvalues. For row index
        # `p` we have for column indices between `i * n_param_gates * n_shifts`
        # and `(i + 1) * n_param_gates * n_shifts`, the first `n_pi * 2`
        # programs are parameter shifted versions of `input_programs[p]` and the
        # remaining programs are empty.
        # Here, `n_param_gates` is 2.
        impurity_symbol_name = "_impurity_for_param_shift"
        impurity_symbol = sympy.Symbol(impurity_symbol_name)
        expected_batch_programs_0 = util.convert_to_tensor([
            cirq.Circuit(
                cirq.X(q0)**impurity_symbol,
                cirq.Y(q0)**symbols[0],
                cirq.ry(symbols[1])(q1)),
            cirq.Circuit(
                cirq.X(q0)**impurity_symbol,
                cirq.Y(q0)**symbols[0],
                cirq.ry(symbols[1])(q1)),
            cirq.Circuit(
                cirq.X(q0)**symbols[0],
                cirq.Y(q0)**impurity_symbol,
                cirq.ry(symbols[1])(q1)),
            cirq.Circuit(
                cirq.X(q0)**symbols[0],
                cirq.Y(q0)**impurity_symbol,
                cirq.ry(symbols[1])(q1)),
            cirq.Circuit(
                cirq.X(q0)**symbols[0],
                cirq.Y(q0)**symbols[0],
                cirq.ry(impurity_symbol)(q1)),
            cirq.Circuit(
                cirq.X(q0)**symbols[0],
                cirq.Y(q0)**symbols[0],
                cirq.ry(impurity_symbol)(q1)),
            cirq.Circuit(),
            cirq.Circuit()
        ])
        expected_batch_programs_1 = util.convert_to_tensor([
            cirq.Circuit(),
            cirq.Circuit(),
            cirq.Circuit(),
            cirq.Circuit(),
            cirq.Circuit(cirq.Y(q1)**impurity_symbol),
            cirq.Circuit(cirq.Y(q1)**impurity_symbol),
            cirq.Circuit(),
            cirq.Circuit()
        ])
        expected_batch_programs = tf.stack(
            [expected_batch_programs_0, expected_batch_programs_1])

        # The new symbols are the old ones, with an extra used for shifting.
        expected_new_symbol_names = tf.concat(
            [input_symbol_names,
             tf.constant([impurity_symbol_name])], 0)

        # The batch symbol values are the input symbol values, tiled and with
        # shifted values appended. Locations that have empty programs should
        # also have zero for the shift.
        # The shifted values are the original value plus 1/2 divided by the
        # `exponent_scalar` of the gate.
        expected_batch_symbol_values = tf.constant(
            [[[1.5, -2.7, 1.5 + 0.5], [1.5, -2.7, 1.5 - 0.5],
              [1.5, -2.7, 1.5 + 0.5], [1.5, -2.7, 1.5 - 0.5],
              [1.5, -2.7, -2.7 + np.pi / 2], [1.5, -2.7, -2.7 - np.pi / 2],
              [1.5, -2.7, -2.7], [1.5, -2.7, -2.7]],
             [[-0.3, 0.9, -0.3], [-0.3, 0.9, -0.3], [-0.3, 0.9, -0.3],
              [-0.3, 0.9, -0.3], [-0.3, 0.9, 0.9 + 0.5],
              [-0.3, 0.9, 0.9 - 0.5], [-0.3, 0.9, 0.9], [-0.3, 0.9, 0.9]]])

        # Empty program locations are given zero weight.
        expected_batch_weights = tf.constant(
            [[[np.pi / 2, -np.pi / 2, np.pi / 2, -np.pi / 2],
              [0.5, -0.5, 0.0, 0.0]],
             [[0.0, 0.0, 0.0, 0.0], [np.pi / 2, -np.pi / 2, 0.0, 0.0]]])

        expected_batch_mapper = tf.constant([[[0, 1, 2, 3], [4, 5, 6, 7]],
                                             [[0, 1, 2, 3], [4, 5, 6, 7]]])

        (test_batch_programs, test_new_symbol_names, test_batch_symbol_values,
         test_batch_weights, test_batch_mapper) = diff.get_gradient_circuits(
             input_programs, input_symbol_names, input_symbol_values)
        for i in range(tf.shape(input_programs)[0]):
            self.assertAllEqual(util.from_tensor(expected_batch_programs[i]),
                                util.from_tensor(test_batch_programs[i]))
        self.assertAllEqual(expected_new_symbol_names, test_new_symbol_names)
        self.assertAllClose(expected_batch_symbol_values,
                            test_batch_symbol_values,
                            atol=1e-5)
        self.assertAllClose(expected_batch_weights,
                            test_batch_weights,
                            atol=1e-5)
        self.assertAllEqual(expected_batch_mapper, test_batch_mapper)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'differentiator': [
                        parameter_shift.ParameterShift(),
                    ],
                    'n_qubits': [5],
                    'n_programs': [3],
                    'n_ops': [3],
                    'symbol_names': [['a', 'b']]
                })))
    def test_gradient_circuits_grad_comparison(self, differentiator, n_qubits,
                                               n_programs, n_ops,
                                               symbol_names):
        """Test that analytic gradient agrees with the one from grad circuits"""
        # Get random circuits to check.
        qubits = cirq.GridQubit.rect(1, n_qubits)
        circuit_batch, resolver_batch = \
            util.random_symbol_circuit_resolver_batch(
                cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs)
        psums = [
            util.random_pauli_sums(qubits, 1, n_ops) for _ in circuit_batch
        ]

        # Convert to tensors.
        symbol_names_array = np.array(symbol_names)
        symbol_values_array = np.array(
            [[resolver[symbol] for symbol in symbol_names]
             for resolver in resolver_batch],
            dtype=np.float32)
        symbol_names_tensor = tf.convert_to_tensor(symbol_names_array)
        symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
        programs = util.convert_to_tensor(circuit_batch)
        ops_tensor = util.convert_to_tensor(psums)

        # Get gradients using expectations of gradient circuits.
        (batch_programs, new_symbol_names, batch_symbol_values, batch_weights,
         batch_mapper) = differentiator.get_gradient_circuits(
             programs, symbol_names_tensor, symbol_values_tensor)
        analytic_op = circuit_execution_ops.get_expectation_op()
        batch_pauli_sums = tf.tile(tf.expand_dims(ops_tensor, 1),
                                   [1, tf.shape(batch_programs)[1], 1])
        n_batch_programs = tf.reduce_prod(tf.shape(batch_programs))
        n_symbols = tf.shape(new_symbol_names)[0]
        batch_expectations = analytic_op(
            tf.reshape(batch_programs, [n_batch_programs]), new_symbol_names,
            tf.reshape(batch_symbol_values, [n_batch_programs, n_symbols]),
            tf.reshape(batch_pauli_sums, [n_batch_programs, n_ops]))
        batch_expectations = tf.reshape(batch_expectations,
                                        tf.shape(batch_pauli_sums))
        batch_jacobian = tf.map_fn(
            lambda x: tf.einsum('km,kmp->kp', x[0], tf.gather(x[1], x[2])),
            (batch_weights, batch_expectations, batch_mapper),
            fn_output_signature=tf.float32)
        grad_manual = tf.reduce_sum(batch_jacobian, -1)

        # Get gradients using autodiff.
        differentiator.refresh()
        differentiable_op = differentiator.generate_differentiable_op(
            analytic_op=analytic_op)
        with tf.GradientTape() as g:
            g.watch(symbol_values_tensor)
            exact_outputs = differentiable_op(programs, symbol_names_tensor,
                                              symbol_values_tensor, ops_tensor)
        grad_auto = g.gradient(exact_outputs, symbol_values_tensor)
        self.assertAllClose(grad_manual, grad_auto, atol=1e-5)
Beispiel #15
0
class AnalyticGradientCorrectnessTest(tf.test.TestCase,
                                      parameterized.TestCase):
    """Test correctness of the differentiators to reference cirq algorithm."""
    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(**{
                'differentiator': ANALYTIC_DIFFS,
                'op': ANALYTIC_OPS
            })) + [{
                'differentiator': adjoint.Adjoint(),
                'op': circuit_execution_ops.get_expectation_op()
            }])
    def test_backprop(self, differentiator, op):
        """Test that gradients are correctly backpropagated through a quantum
        circuit via comparison to analytical results.
        """
        differentiator.refresh()
        op = differentiator.generate_differentiable_op(analytic_op=op)

        def exact_grad(theta):
            new_theta = 2 * np.pi * theta
            return -2 * np.pi * np.sin(new_theta) * np.exp(np.cos(new_theta))

        bit = cirq.GridQubit(0, 0)
        circuits = util.convert_to_tensor(
            [cirq.Circuit(cirq.X(bit)**sympy.Symbol('rx')) for _ in range(2)])
        pstring = util.convert_to_tensor([[
            cirq.PauliSum.from_pauli_strings([cirq.PauliString({bit: cirq.Z})])
        ] for _ in circuits])
        base_rot_angles = tf.constant([[0.25], [0.125]])
        with tf.GradientTape() as g:
            g.watch(base_rot_angles)
            input_angles = 2 * base_rot_angles
            exp_res = tf.exp(
                op(circuits, tf.convert_to_tensor(['rx']), input_angles,
                   pstring))

        grad = g.gradient(exp_res, base_rot_angles)
        exact = [[exact_grad(0.25)], [exact_grad(0.125)]]

        # will this be too tight? time will tell.
        self.assertAllClose(exact, grad.numpy(), rtol=0.01, atol=0.01)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'differentiator': ANALYTIC_DIFFS,
                    'op': ANALYTIC_OPS,
                    'n_qubits': [5],
                    'n_programs': [3],
                    'n_ops': [3],
                    'symbol_names': [['a', 'b']]
                })) + [{
                    'differentiator': adjoint.Adjoint(),
                    'op': circuit_execution_ops.get_expectation_op(),
                    'n_qubits': 10,
                    'n_programs': 5,
                    'n_ops': 3,
                    'symbol_names': ['a', 'b']
                }])
    def test_gradients_vs_cirq_finite_difference(self, differentiator, op,
                                                 n_qubits, n_programs, n_ops,
                                                 symbol_names):
        """Compare TFQ differentiators to fine-grained noiseless cirq finite
        differencing.
        """
        differentiator.refresh()
        op = differentiator.generate_differentiable_op(analytic_op=op)

        qubits = cirq.GridQubit.rect(1, n_qubits)
        circuit_batch, resolver_batch = \
            util.random_symbol_circuit_resolver_batch(
                cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs)

        psums = [
            util.random_pauli_sums(qubits, 1, n_ops) for _ in circuit_batch
        ]

        symbol_values_array = np.array(
            [[resolver[symbol] for symbol in symbol_names]
             for resolver in resolver_batch],
            dtype=np.float32)

        # calculate tfq gradient
        symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
        programs = util.convert_to_tensor(circuit_batch)
        ops = util.convert_to_tensor(psums)
        with tf.GradientTape() as g:
            g.watch(symbol_values_tensor)
            expectations = op(programs, tf.convert_to_tensor(symbol_names),
                              symbol_values_tensor, ops)
        tfq_grads = g.gradient(expectations, symbol_values_tensor)

        # calculate gradients in cirq using a very simple forward differencing
        # scheme
        cirq_grads = _cirq_simple_finite_difference(circuit_batch,
                                                    resolver_batch,
                                                    symbol_names, psums)

        # will this be too tight? time will tell.
        self.assertAllClose(cirq_grads, tfq_grads, rtol=2e-2, atol=2e-2)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(**{
                'differentiator': ANALYTIC_DIFFS,
                'op': ANALYTIC_OPS,
            })) + [{
                'differentiator': adjoint.Adjoint(),
                'op': circuit_execution_ops.get_expectation_op(),
            }])
    def test_analytic_value_with_simple_circuit(self, differentiator, op):
        """Test the value of differentiator with simple circuit."""
        # Get an expectation op, with this differentiator attached.
        differentiator.refresh()
        op = differentiator.generate_differentiable_op(analytic_op=op)
        qubit = cirq.GridQubit(0, 0)
        circuit = util.convert_to_tensor(
            [cirq.Circuit(cirq.X(qubit)**sympy.Symbol('alpha'))])
        psums = util.convert_to_tensor([[cirq.Z(qubit)]])
        symbol_values_array = np.array([[0.123]], dtype=np.float32)
        # Calculate tfq gradient.
        symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
        with tf.GradientTape() as g:
            g.watch(symbol_values_tensor)
            expectations = op(circuit, tf.convert_to_tensor(['alpha']),
                              symbol_values_tensor, psums)
        grads = g.gradient(expectations, symbol_values_tensor)
        ground_truth_grads = np.array([[-1.1839752]])
        self.assertAllClose(ground_truth_grads, grads, rtol=1e-2, atol=1e-2)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(**{
                'differentiator': ANALYTIC_DIFFS,
                'op': ANALYTIC_OPS,
            })) + [{
                'differentiator': adjoint.Adjoint(),
                'op': circuit_execution_ops.get_expectation_op(),
            }])
    def test_empty_circuit_grad(self, differentiator, op):
        """Test that providing no circuits will fail gracefully."""
        differentiator.refresh()
        op = differentiator.generate_differentiable_op(analytic_op=op)
        circuit = tf.convert_to_tensor([], dtype=tf.string)
        psums = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)

        # Calculate tfq gradient.
        symbol_values_tensor = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)
        symbol_names_tensor = tf.convert_to_tensor([], dtype=tf.string)
        with tf.GradientTape() as g:
            g.watch(symbol_values_tensor)
            expectations = op(circuit, symbol_names_tensor,
                              symbol_values_tensor, psums)
        grads = g.gradient(expectations, symbol_values_tensor)
        self.assertShapeEqual(grads.numpy(),
                              tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32))
Beispiel #16
0
class SampleTest(tf.test.TestCase, parameterized.TestCase):
    """Tests for the Sample layer."""
    def test_sample_create(self):
        """Test that sample instantiates correctly."""
        sample.Sample(backend=cirq.Simulator())
        sample.Sample()
        with self.assertRaisesRegex(TypeError,
                                    expected_regex="junk is invalid"):
            sample.Sample(backend='junk')

    def test_sample_invalid_type_inputs(self):
        """Test that sample rejects bad inputs."""
        sampler = sample.Sample()
        with self.assertRaisesRegex(
                TypeError, expected_regex="circuits cannot be parsed"):
            sampler('junk_circuit', repetitions=10)

        with self.assertRaisesRegex(
                TypeError, expected_regex="symbol_values cannot be parsed"):
            sampler(cirq.Circuit(), symbol_values='junk', repetitions=10)

        with self.assertRaisesRegex(
                TypeError, expected_regex="symbol_names cannot be parsed"):
            sampler(cirq.Circuit(),
                    symbol_values=[],
                    symbol_names='junk',
                    repetitions=10)

        with self.assertRaisesRegex(TypeError,
                                    expected_regex="Cannot convert"):
            sampler(cirq.Circuit(),
                    symbol_values=[['bad']],
                    symbol_names=['name'],
                    repetitions=10)

        with self.assertRaisesRegex(TypeError,
                                    expected_regex="must be a string."):
            sampler(cirq.Circuit(),
                    symbol_values=[[0.5]],
                    symbol_names=[0.33333],
                    repetitions=10)

        with self.assertRaisesRegex(ValueError,
                                    expected_regex="must be unique."):
            sampler(cirq.Circuit(),
                    symbol_values=[[0.5]],
                    symbol_names=['duplicate', 'duplicate'],
                    repetitions=10)

        with self.assertRaisesRegex(
                ValueError, expected_regex="repetitions not specified"):
            sampler(cirq.Circuit())

        with self.assertRaisesRegex(ValueError,
                                    expected_regex="greater than zero"):
            sampler(cirq.Circuit(), repetitions=-1)

        with self.assertRaisesRegex(
                TypeError, expected_regex="cannot be parsed to int32"):
            sampler(cirq.Circuit(), repetitions='junk')

    def test_sample_invalid_shape_inputs(self):
        """Test that sample rejects bad input shapes."""
        sampler = sample.Sample()
        with self.assertRaisesRegex(TypeError,
                                    expected_regex="string or sympy.Symbol"):
            sampler(cirq.Circuit(),
                    symbol_values=[[0.5]],
                    symbol_names=[[]],
                    repetitions=10)

        with self.assertRaisesRegex(ValueError,
                                    expected_regex="rank 2 but is rank 1"):
            sampler(cirq.Circuit(),
                    symbol_values=[0.5],
                    symbol_names=['name'],
                    repetitions=10)

        with self.assertRaisesRegex(ValueError,
                                    expected_regex="rank 1 but is rank 2"):
            sampler([[cirq.Circuit()]],
                    symbol_values=[[0.5]],
                    symbol_names=['name'],
                    repetitions=10)

        with self.assertRaisesRegex(
                TypeError, expected_regex="cannot be parsed to int32 tensor"):
            sampler([cirq.Circuit()], repetitions=[10])

    @parameterized.parameters([{
        'backend': None
    }, {
        'backend': cirq.Simulator()
    }, {
        'backend': cirq.DensityMatrixSimulator()
    }])
    def test_sample_invalid_combinations(self, backend):
        """Test with valid type inputs and valid value, but incorrect combo."""
        sampler = sample.Sample(backend)
        symbol = sympy.Symbol('alpha')
        circuit = cirq.Circuit(cirq.H(cirq.GridQubit(0, 0))**symbol)
        with self.assertRaisesRegex(Exception, expected_regex=""):
            # no value provided.
            sampler([circuit, circuit], symbol_names=[symbol], repetitions=5)

        with self.assertRaisesRegex(Exception, expected_regex=""):
            # no name provided.
            sampler([circuit, circuit],
                    symbol_names=[],
                    symbol_values=[[2.0], [3.0]],
                    repetitions=5)

        with self.assertRaisesRegex(Exception, expected_regex=""):
            # deceptive, but the circuit shouldn't be in a list. otherwise fine.
            sampler([circuit],
                    symbol_names=['alpha'],
                    symbol_values=[[2.0], [3.0]],
                    repetitions=5)

        with self.assertRaisesRegex(Exception, expected_regex=""):
            # wrong symbol name.
            sampler([circuit],
                    symbol_names=['alphaaaa'],
                    symbol_values=[[2.0], [3.0]],
                    repetitions=5)

        with self.assertRaisesRegex(Exception, expected_regex=""):
            # too many symbol values provided.
            sampler(circuit,
                    symbol_names=['alpha'],
                    symbol_values=[[2.0, 4.0], [3.0, 5.0]],
                    repetitions=5)

    def test_sample_basic_inputs(self):
        """Test that sample ingests inputs correctly in simple settings."""
        sampler = sample.Sample()
        sampler(cirq.Circuit(), repetitions=10)
        sampler([cirq.Circuit()], repetitions=10)
        sampler(cirq.Circuit(),
                symbol_names=['name'],
                symbol_values=[[0.5]],
                repetitions=10)
        sampler(cirq.Circuit(),
                symbol_names=[sympy.Symbol('name')],
                symbol_values=[[0.5]],
                repetitions=10)

    def test_sample_outputs_simple(self):
        """Test the simplest call where nothing but circuits are provided."""
        sampler = sample.Sample()
        circuit = cirq.Circuit(cirq.H(cirq.GridQubit(0, 0)))
        output = sampler([circuit, circuit], repetitions=5)
        self.assertShapeEqual(np.empty((2, 5, 1)), output.to_tensor())

    # TODO(trevormccrt): add QuantumEngineSampler to this once it is available
    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(backend=[
                None, cirq.Simulator(),
                cirq.DensityMatrixSimulator()
            ],
                                          all_n_qubits=[[3], [8], [3, 4],
                                                        [3, 4, 10]],
                                          n_samples=[1, 10, 100],
                                          symbol_names=[[], ['a', 'b']])))
    def test_sample_output(self, backend, all_n_qubits, n_samples,
                           symbol_names):
        """Test that expected output format is preserved.

        Check that any pre or post processing done inside the layers does not
        cause what is output from the layer to structurally deviate from what
        is expected.
        """
        sampler = sample.Sample(backend=backend)
        bits = cirq.GridQubit.rect(1, max(all_n_qubits))
        programs = []
        expected_outputs = []
        for n_qubits in all_n_qubits:
            programs.append(cirq.Circuit(*cirq.X.on_each(*bits[0:n_qubits])))
            expected_outputs.append([[1] * n_qubits for _ in range(n_samples)])
        symbol_values = np.random.random(
            (len(all_n_qubits), len(symbol_names)))
        layer_output = sampler(programs,
                               symbol_names=symbol_names,
                               symbol_values=symbol_values,
                               repetitions=n_samples).to_list()
        self.assertEqual(expected_outputs, layer_output)
Beispiel #17
0
class SampledGradientCorrectnessTest(tf.test.TestCase, parameterized.TestCase):
    """Test approximate correctness to analytical methods."""
    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'differentiator': SAMPLED_DIFFS,
                    'op': SAMPLED_OPS,
                    'num_samples': [10000]
                })))
    def test_sampled_value_with_simple_circuit(self, differentiator, op,
                                               num_samples):
        """Test the value of sampled differentiator with simple circuit."""
        # Get an expectation op, with this differentiator attached.
        differentiator.refresh()
        op = differentiator.generate_differentiable_op(sampled_op=op)
        qubit = cirq.GridQubit(0, 0)
        circuit = util.convert_to_tensor(
            [cirq.Circuit(cirq.X(qubit)**sympy.Symbol('alpha'))])
        psums = util.convert_to_tensor([[cirq.Z(qubit)]])
        symbol_values_array = np.array([[0.123]], dtype=np.float32)
        # Calculate tfq gradient.
        symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
        with tf.GradientTape() as g:
            g.watch(symbol_values_tensor)
            expectations = op(circuit, tf.convert_to_tensor(['alpha']),
                              symbol_values_tensor, psums,
                              tf.convert_to_tensor([[num_samples]]))
        grads = g.gradient(expectations, symbol_values_tensor)
        ground_truth_grads = np.array([[-1.1839752]])
        self.assertAllClose(ground_truth_grads, grads, rtol=0.2, atol=0.2)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'diff_and_tol': zip(SAMPLED_DIFFS, SAMPLED_DIFFS_TOLS),
                    'op': SAMPLED_OPS,
                    'n_qubits': [3],
                    'n_programs': [5],
                    'n_ops': [2],
                    'symbol_names': [['a', 'b']],
                    'num_samples': [30000]
                })))
    def test_approx_equality_shallow(self, diff_and_tol, op, n_qubits,
                                     symbol_names, n_ops, n_programs,
                                     num_samples):
        """Test small circuits with limited depth."""
        differentiator, tol = diff_and_tol
        differentiator.refresh()
        op = differentiator.generate_differentiable_op(sampled_op=op)

        qubits = cirq.GridQubit.rect(1, n_qubits)
        circuit_batch, resolver_batch = \
            util.random_symbol_circuit_resolver_batch(
                cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs)

        # Prepare random pauli sums and add initial superposition gates.
        psums = []
        for i in range(len(circuit_batch)):
            psums.append(util.random_pauli_sums(qubits, 1, n_ops))
            circuit_batch[i] = cirq.Circuit(
                cirq.H.on_each(qubits)) + circuit_batch[i]

        symbol_values_array = np.array(
            [[resolver[symbol] for symbol in symbol_names]
             for resolver in resolver_batch],
            dtype=np.float32)

        # calculate tfq gradient
        symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
        programs = util.convert_to_tensor(circuit_batch)
        ops = util.convert_to_tensor(psums)
        with tf.GradientTape() as g:
            g.watch(symbol_values_tensor)
            expectations = op(
                programs, tf.convert_to_tensor(symbol_names),
                symbol_values_tensor, ops,
                tf.convert_to_tensor([[num_samples] * n_ops] * n_programs))
        tfq_grads = g.gradient(expectations, symbol_values_tensor)

        # calculate gradients in cirq using a very simple forward differencing
        # scheme
        cirq_grads = _cirq_simple_finite_difference(circuit_batch,
                                                    resolver_batch,
                                                    symbol_names, psums)

        self.assertAllClose(cirq_grads, tfq_grads, rtol=tol, atol=tol)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(**{
                'differentiator': SAMPLED_DIFFS,
                'op': SAMPLED_OPS,
            })))
    def test_empty_circuit_sampled_grad(self, differentiator, op):
        """Test that providing no circuits will fail gracefully."""
        differentiator.refresh()
        op = differentiator.generate_differentiable_op(sampled_op=op)
        circuit = tf.convert_to_tensor([], dtype=tf.string)
        psums = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)

        # Calculate tfq gradient.
        symbol_values_tensor = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)
        symbol_names_tensor = tf.convert_to_tensor([], dtype=tf.string)
        n_samples_tensor = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.int32)
        with tf.GradientTape() as g:
            g.watch(symbol_values_tensor)
            expectations = op(circuit, symbol_names_tensor,
                              symbol_values_tensor, psums, n_samples_tensor)
        grads = g.gradient(expectations, symbol_values_tensor)
        self.assertShapeEqual(grads.numpy(),
                              tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32))
Beispiel #18
0
class LinearCombinationTest(tf.test.TestCase, parameterized.TestCase):
    """Test the LinearCombination based Differentiators."""
    def test_linear_combination_instantiate(self):
        """Test LinearCombinationDifferentiator type checking."""
        linear_combination.LinearCombination([1, 1], [1, 0])
        with self.assertRaisesRegex(TypeError,
                                    expected_regex="weights must be"):
            linear_combination.LinearCombination("junk", [1, 0])
        with self.assertRaisesRegex(TypeError,
                                    expected_regex="perturbations must be"):
            linear_combination.LinearCombination([1, 1], "junk")
        with self.assertRaisesRegex(TypeError,
                                    expected_regex="weight in weights"):
            linear_combination.LinearCombination([1, "junk"], [1, 0])
        with self.assertRaisesRegex(
                TypeError, expected_regex="perturbation in perturbations"):
            linear_combination.LinearCombination([1, 1], [1, "junk"])
        with self.assertRaisesRegex(ValueError, expected_regex="length"):
            linear_combination.LinearCombination([1, 1, 1], [1, 0])
        with self.assertRaisesRegex(ValueError, expected_regex="unique"):
            linear_combination.LinearCombination([1, 1], [1, 1])

    def test_no_gradient_circuits(self):
        """Confirm LinearCombination differentiator has no gradient circuits."""
        dif = linear_combination.LinearCombination([1, 1], [1, 0])
        with self.assertRaisesRegex(NotImplementedError,
                                    expected_regex="not currently available"):
            _ = dif.get_gradient_circuits(None, None, None)

    def test_forward_instantiate(self):
        """Test ForwardDifference type checking."""
        linear_combination.ForwardDifference()
        linear_combination.ForwardDifference(1, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive integer"):
            linear_combination.ForwardDifference(0.1, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive integer"):
            linear_combination.ForwardDifference(-1, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive integer"):
            linear_combination.ForwardDifference(0, 0.1)
        with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"):
            linear_combination.ForwardDifference(1, -0.1)
        with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"):
            linear_combination.ForwardDifference(1, 1j)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'order_coef_perturbs': [(1, (-1, 1), (
                        0, 1)), (2, (-3 / 2, 2, -1 / 2), (0, 1, 2))],
                    'grid_spacing': [0.1, 0.01, 0.5, 1, 0.05]
                })))
    def test_forward_coeffecients(self, order_coef_perturbs, grid_spacing):
        """Test that ForwardDifference produces the right coeffecients for
        common first and second order cases."""
        order = order_coef_perturbs[0]
        expected_std_coeffs = order_coef_perturbs[1]
        expected_perturbations = order_coef_perturbs[2]
        forward = linear_combination.ForwardDifference(order, grid_spacing)
        self.assertAllClose(
            np.array(expected_std_coeffs) / grid_spacing, forward.weights)
        self.assertAllClose(
            np.array(expected_perturbations) * grid_spacing,
            forward.perturbations)

    def test_central_instantiate(self):
        """Test CentralDifference type checking."""
        linear_combination.CentralDifference()
        linear_combination.CentralDifference(2, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive, even"):
            linear_combination.CentralDifference(0.1, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive, even"):
            linear_combination.CentralDifference(-1, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive, even"):
            linear_combination.CentralDifference(0, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive, even"):
            linear_combination.CentralDifference(1, 0.1)
        with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"):
            linear_combination.CentralDifference(2, -0.1)
        with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"):
            linear_combination.CentralDifference(2, 1j)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'order_coef_perturbs': [(2, (-1 / 2, 1 / 2), (-1, 1)),
                                            (4, (1 / 12, -8 / 12, 8 / 12,
                                                 -1 / 12), (-2, -1, 1, 2))],
                    'grid_spacing': [0.1, 0.01, 0.5, 1, 0.05]
                })))
    def test_central_coefficients(self, order_coef_perturbs, grid_spacing):
        """Test that CentralDifference produces the right coefficients for
        common first and second order cases."""
        order = order_coef_perturbs[0]
        expected_std_coeffs = order_coef_perturbs[1]
        expected_perturbations = order_coef_perturbs[2]
        forward = linear_combination.CentralDifference(order, grid_spacing)
        self.assertAllClose(
            np.array(expected_std_coeffs) / grid_spacing, forward.weights)
        self.assertAllClose(
            np.array(expected_perturbations) * grid_spacing,
            forward.perturbations)

    @parameterized.parameters([{
        'diff': linear_combination.ForwardDifference()
    }, {
        'diff': linear_combination.CentralDifference()
    }])
    def test_analytic_functional(self, diff):
        """Test that the differentiate_analytic function WORKS."""
        differentiable_op = diff.generate_differentiable_op(
            analytic_op=circuit_execution_ops.get_expectation_op())
        circuit, names, values, ops, _, true_f, true_g = _simple_op_inputs()
        with tf.GradientTape() as g:
            g.watch(values)
            res = differentiable_op(circuit, names, values, ops)

        # Just check that it computes without failing.
        self.assertAllClose(true_f, res, atol=1e-2, rtol=1e-2)
        self.assertAllClose(true_g,
                            g.gradient(res, values),
                            atol=1e-2,
                            rtol=1e-2)

    @parameterized.parameters([{
        'diff': linear_combination.ForwardDifference()
    }, {
        'diff': linear_combination.CentralDifference()
    }])
    def test_sampled_functional(self, diff):
        """Test that the differentiate_sampled function WORKS."""
        differentiable_op = diff.generate_differentiable_op(
            sampled_op=circuit_execution_ops.get_sampled_expectation_op())
        circuit, names, values, ops, n_samples, true_f, true_g = \
            _simple_op_inputs()
        with tf.GradientTape() as g:
            g.watch(values)
            res = differentiable_op(circuit, names, values, ops, n_samples)

        # Just check that it computes without failing.
        self.assertAllClose(true_f, res, atol=1e-1, rtol=1e-1)
        self.assertAllClose(true_g,
                            g.gradient(res, values),
                            atol=1e-1,
                            rtol=1e-1)
class LinearCombinationTest(tf.test.TestCase, parameterized.TestCase):
    """Test the LinearCombination based Differentiators."""
    def test_linear_combination_instantiate(self):
        """Test LinearCombinationDifferentiator type checking."""
        linear_combination.LinearCombination([1, 1], [1, 0])
        with self.assertRaisesRegex(TypeError,
                                    expected_regex="weights must be"):
            linear_combination.LinearCombination("junk", [1, 0])
        with self.assertRaisesRegex(TypeError,
                                    expected_regex="perturbations must be"):
            linear_combination.LinearCombination([1, 1], "junk")
        with self.assertRaisesRegex(TypeError,
                                    expected_regex="weight in weights"):
            linear_combination.LinearCombination([1, "junk"], [1, 0])
        with self.assertRaisesRegex(
                TypeError, expected_regex="perturbation in perturbations"):
            linear_combination.LinearCombination([1, 1], [1, "junk"])
        with self.assertRaisesRegex(ValueError, expected_regex="length"):
            linear_combination.LinearCombination([1, 1, 1], [1, 0])
        with self.assertRaisesRegex(ValueError, expected_regex="at least two"):
            linear_combination.LinearCombination([1], [1])
        with self.assertRaisesRegex(ValueError, expected_regex="unique"):
            linear_combination.LinearCombination([1, 1], [1, 1])

    def test_forward_instantiate(self):
        """Test ForwardDifference type checking."""
        linear_combination.ForwardDifference()
        linear_combination.ForwardDifference(1, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive integer"):
            linear_combination.ForwardDifference(0.1, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive integer"):
            linear_combination.ForwardDifference(-1, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive integer"):
            linear_combination.ForwardDifference(0, 0.1)
        with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"):
            linear_combination.ForwardDifference(1, -0.1)
        with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"):
            linear_combination.ForwardDifference(1, 1j)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'order_coef_perturbs': [(1, (-1, 1), (
                        0, 1)), (2, (-3 / 2, 2, -1 / 2), (0, 1, 2))],
                    'grid_spacing': [0.1, 0.01, 0.5, 1, 0.05]
                })))
    def test_forward_coeffecients(self, order_coef_perturbs, grid_spacing):
        """Test that ForwardDifference produces the right coeffecients for
        common first and second order cases."""
        order = order_coef_perturbs[0]
        expected_std_coeffs = order_coef_perturbs[1]
        expected_perturbations = order_coef_perturbs[2]
        forward = linear_combination.ForwardDifference(order, grid_spacing)
        self.assertAllClose(
            np.array(expected_std_coeffs) / grid_spacing, forward.weights)
        self.assertAllClose(
            np.array(expected_perturbations) * grid_spacing,
            forward.perturbations)

    def test_central_instantiate(self):
        """Test CentralDifference type checking."""
        linear_combination.CentralDifference()
        linear_combination.CentralDifference(2, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive, even"):
            linear_combination.CentralDifference(0.1, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive, even"):
            linear_combination.CentralDifference(-1, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive, even"):
            linear_combination.CentralDifference(0, 0.1)
        with self.assertRaisesRegex(ValueError,
                                    expected_regex="positive, even"):
            linear_combination.CentralDifference(1, 0.1)
        with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"):
            linear_combination.CentralDifference(2, -0.1)
        with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"):
            linear_combination.CentralDifference(2, 1j)

    @parameterized.parameters(
        list(
            util.kwargs_cartesian_product(
                **{
                    'order_coef_perturbs': [(2, (-1 / 2, 1 / 2), (-1, 1)),
                                            (4, (1 / 12, -8 / 12, 8 / 12,
                                                 -1 / 12), (-2, -1, 1, 2))],
                    'grid_spacing': [0.1, 0.01, 0.5, 1, 0.05]
                })))
    def test_central_coefficients(self, order_coef_perturbs, grid_spacing):
        """Test that CentralDifference produces the right coefficients for
        common first and second order cases."""
        order = order_coef_perturbs[0]
        expected_std_coeffs = order_coef_perturbs[1]
        expected_perturbations = order_coef_perturbs[2]
        forward = linear_combination.CentralDifference(order, grid_spacing)
        self.assertAllClose(
            np.array(expected_std_coeffs) / grid_spacing, forward.weights)
        self.assertAllClose(
            np.array(expected_perturbations) * grid_spacing,
            forward.perturbations)

    @parameterized.parameters([{
        'diff': linear_combination.ForwardDifference()
    }, {
        'diff': linear_combination.CentralDifference()
    }])
    def test_analytic_functional(self, diff):
        """Test that the differentiate_analytic function WORKS."""
        differentiable_op = diff.generate_differentiable_op(
            analytic_op=circuit_execution_ops.get_expectation_op())
        circuit, names, values, ops, _, true_f, true_g = _simple_op_inputs()
        with tf.GradientTape() as g:
            g.watch(values)
            res = differentiable_op(circuit, names, values, ops)

        # Just check that it computes without failing.
        self.assertAllClose(true_f, res, atol=1e-2, rtol=1e-2)
        self.assertAllClose(true_g,
                            g.gradient(res, values),
                            atol=1e-2,
                            rtol=1e-2)

    @parameterized.parameters([{
        'diff':
        linear_combination.ForwardDifference(grid_spacing=0.01)
    }, {
        'diff':
        linear_combination.CentralDifference(grid_spacing=0.01)
    }])
    def test_sampled_functional(self, diff):
        """Test that the differentiate_sampled function WORKS."""
        differentiable_op = diff.generate_differentiable_op(
            sampled_op=circuit_execution_ops.get_sampled_expectation_op())
        circuit, names, values, ops, n_samples, true_f, true_g = \
            _simple_op_inputs()
        with tf.GradientTape() as g:
            g.watch(values)
            res = differentiable_op(circuit, names, values, ops, n_samples)

        # Just check that it computes without failing.
        self.assertAllClose(true_f, res, atol=1e-1, rtol=1e-1)
        self.assertAllClose(true_g,
                            g.gradient(res, values),
                            atol=1e-1,
                            rtol=1e-1)

    def test_get_gradient_circuits(self):
        """Test that the correct objects are returned."""

        # Minimal linear combination.
        input_weights = [1.0, -0.5]
        input_perturbations = [1.0, -1.5]
        diff = linear_combination.LinearCombination(input_weights,
                                                    input_perturbations)

        # Circuits to differentiate.
        symbols = [sympy.Symbol("s0"), sympy.Symbol("s1")]
        q0 = cirq.GridQubit(0, 0)
        q1 = cirq.GridQubit(1, 2)
        input_programs = util.convert_to_tensor([
            cirq.Circuit(cirq.X(q0)**symbols[0],
                         cirq.ry(symbols[1])(q1)),
            cirq.Circuit(cirq.rx(symbols[0])(q0),
                         cirq.Y(q1)**symbols[1]),
        ])
        input_symbol_names = tf.constant([str(s) for s in symbols])
        input_symbol_values = tf.constant([[1.5, -2.7], [-0.3, 0.9]])

        # For each program in the input batch: LinearCombination creates a copy
        # of that program for each symbol in the batch; then for each symbol,
        # the program is copied for each non-zero perturbation; finally, a
        # single copy is added for the zero perturbation (no zero pert here).
        expected_batch_programs = tf.stack([[input_programs[0]] * 4,
                                            [input_programs[1]] * 4])
        expected_new_symbol_names = input_symbol_names

        # For each program in the input batch: first, the input symbol_values
        # for the program are tiled to the number of copies in the output.
        tiled_symbol_values = tf.stack([[input_symbol_values[0]] * 4,
                                        [input_symbol_values[1]] * 4])
        # Then we create the tensor of perturbations to apply to these symbol
        # values: for each symbol we tile out the non-zero perturbations at that
        # symbol's index, keeping all the other symbol perturbations at zero.
        # Perturbations are the same for each program.
        single_program_perturbations = tf.stack([[input_perturbations[0], 0.0],
                                                 [input_perturbations[1], 0.0],
                                                 [0.0, input_perturbations[0]],
                                                 [0.0,
                                                  input_perturbations[1]]])
        tiled_perturbations = tf.stack(
            [single_program_perturbations, single_program_perturbations])
        # Finally we add the perturbations to the original symbol values.
        expected_batch_symbol_values = tiled_symbol_values + tiled_perturbations

        # The weights for LinearCombination is the same for every program.
        individual_batch_weights = tf.stack(
            [[input_weights[0], input_weights[1]],
             [input_weights[0], input_weights[1]]])
        expected_batch_weights = tf.stack(
            [individual_batch_weights, individual_batch_weights])

        # The mapper selects the expectations.
        single_program_mapper = tf.constant([[0, 1], [2, 3]])
        expected_batch_mapper = tf.tile(
            tf.expand_dims(single_program_mapper, 0), [2, 1, 1])

        (test_batch_programs, test_new_symbol_names, test_batch_symbol_values,
         test_batch_weights, test_batch_mapper) = diff.get_gradient_circuits(
             input_programs, input_symbol_names, input_symbol_values)
        self.assertAllEqual(expected_batch_programs, test_batch_programs)
        self.assertAllEqual(expected_new_symbol_names, test_new_symbol_names)
        self.assertAllClose(expected_batch_symbol_values,
                            test_batch_symbol_values,
                            atol=1e-5)
        self.assertAllClose(expected_batch_weights,
                            test_batch_weights,
                            atol=1e-5)
        self.assertAllEqual(expected_batch_mapper, test_batch_mapper)