def test_get_repetitions_and_sweeps(get_job): job = cg.EngineJob('a', 'b', 'steve', EngineContext()) get_job.return_value = qtypes.QuantumJob(run_context=util.pack_any( v2.run_context_pb2.RunContext(parameter_sweeps=[ v2.run_context_pb2.ParameterSweep(repetitions=10) ]))) assert job.get_repetitions_and_sweeps() == (10, [cirq.UnitSweep]) get_job.assert_called_once_with('a', 'b', 'steve', True)
def _serialize_run_context( self, sweeps: 'cirq.Sweepable', repetitions: int, ) -> any_pb2.Any: if self.proto_version != ProtoVersion.V2: raise ValueError( f'invalid run context proto version: {self.proto_version}') return util.pack_any(v2.run_context_to_proto(sweeps, repetitions))
async def create_calibration_program_async( self, layers: List['cirq_google.CalibrationLayer'], program_id: Optional[str] = None, description: Optional[str] = None, labels: Optional[Dict[str, str]] = None, ) -> engine_program.EngineProgram: """Wraps a list of calibration layers into an Any for Quantum Engine. Args: layers: The calibration routines to execute. All layers will be executed within the same API call in the order specified, though some layers may be interleaved together using hardware-specific batching. program_id: A user-provided identifier for the program. This must be unique within the Google Cloud project being used. If this parameter is not provided, a random id of the format 'calibration-################YYMMDD' will be generated, where # is alphanumeric and YYMMDD is the current year, month, and day. description: An optional description to set on the program. labels: Optional set of labels to set on the program. Returns: A EngineProgram for the newly created program. Raises: ValueError: If not gate set is given. """ if not program_id: program_id = _make_random_id('calibration-') calibration = v2.calibration_pb2.FocusedCalibration() for layer in layers: new_layer = calibration.layers.add() new_layer.calibration_type = layer.calibration_type for arg in layer.args: arg_to_proto(layer.args[arg], out=new_layer.args[arg]) self.context.serializer.serialize(layer.program, msg=new_layer.layer) new_program_id, new_program = await self.context.client.create_program_async( self.project_id, program_id, code=util.pack_any(calibration), description=description, labels=labels, ) return engine_program.EngineProgram( self.project_id, new_program_id, self.context, new_program, result_type=ResultType.Calibration, )
def test_get_circuit_v2_unknown_gateset(get_program): program = cg.EngineProgram('a', 'b', EngineContext()) get_program.return_value = quantum.QuantumProgram( code=util.pack_any( v2.program_pb2.Program(language=v2.program_pb2.Language(gate_set="BAD_GATESET")) ) ) with pytest.raises(ValueError, match='BAD_GATESET'): program.get_circuit()
def _serialize_program( self, program: cirq.AbstractCircuit, serializer: Optional[Serializer] = None) -> any_pb2.Any: if not isinstance(program, cirq.AbstractCircuit): raise TypeError(f'Unrecognized program type: {type(program)}') if serializer is None: serializer = self.serializer if self.proto_version != ProtoVersion.V2: raise ValueError( f'invalid program proto version: {self.proto_version}') return util.pack_any(serializer.serialize(program))
def test_run_delegation(create_job, get_results): dt = datetime.datetime.now(tz=datetime.timezone.utc) create_job.return_value = ( 'steve', quantum.QuantumJob( name='projects/a/programs/b/jobs/steve', execution_status=quantum.ExecutionStatus(state=quantum.ExecutionStatus.State.SUCCESS), update_time=dt, ), ) get_results.return_value = quantum.QuantumResult( result=util.pack_any( Merge( """sweep_results: [{ repetitions: 4, parameterized_results: [{ params: { assignments: { key: 'a' value: 1 } }, measurement_results: { key: 'q' qubit_measurement_results: [{ qubit: { id: '1_1' } results: '\006' }] } }] }] """, v2.result_pb2.Result(), ) ) ) program = cg.EngineProgram('a', 'b', EngineContext()) param_resolver = cirq.ParamResolver({}) results = program.run( job_id='steve', repetitions=10, param_resolver=param_resolver, processor_ids=['mine'] ) assert results == cg.EngineResult( params=cirq.ParamResolver({'a': 1.0}), measurements={'q': np.array([[False], [True], [True], [False]], dtype=bool)}, job_id='steve', job_finished_time=dt, )
def test_get_engine_device(get_processor): device_spec = util.pack_any( Merge( """ valid_gate_sets: [{ name: 'test_set', valid_gates: [{ id: 'x', number_of_qubits: 1, gate_duration_picos: 1000, valid_targets: ['1q_targets'] }] }], valid_qubits: ['0_0', '1_1'], valid_targets: [{ name: '1q_targets', target_ordering: SYMMETRIC, targets: [{ ids: ['0_0'] }] }] """, v2.device_pb2.DeviceSpecification(), )) gate_set = cg.SerializableGateSet( gate_set_name='x_gate_set', serializers=[ cg.GateOpSerializer(gate_type=cirq.XPowGate, serialized_gate_id='x', args=[]) ], deserializers=[ cg.GateOpDeserializer(serialized_gate_id='x', gate_constructor=cirq.XPowGate, args=[]) ], ) get_processor.return_value = quantum.QuantumProcessor( device_spec=device_spec) device = cirq_google.get_engine_device('rainbow', 'project', gatesets=[gate_set]) assert set(device.qubits) == {cirq.GridQubit(0, 0), cirq.GridQubit(1, 1)} device.validate_operation(cirq.X(cirq.GridQubit(0, 0))) with pytest.raises(ValueError): device.validate_operation(cirq.X(cirq.GridQubit(1, 2))) with pytest.raises(ValueError): device.validate_operation(cirq.Y(cirq.GridQubit(0, 0)))
def create_batch_program( self, programs: Sequence[cirq.AbstractCircuit], program_id: Optional[str] = None, gate_set: Optional[Serializer] = None, description: Optional[str] = None, labels: Optional[Dict[str, str]] = None, ) -> engine_program.EngineProgram: """Wraps a list of Circuits into a BatchProgram for the Quantum Engine. Args: programs: The Circuits to execute within a batch. program_id: A user-provided identifier for the program. This must be unique within the Google Cloud project being used. If this parameter is not provided, a random id of the format 'prog-################YYMMDD' will be generated, where # is alphanumeric and YYMMDD is the current year, month, and day. gate_set: The gate set used to serialize the circuit. The gate set must be supported by the selected processor description: An optional description to set on the program. labels: Optional set of labels to set on the program. Returns: A EngineProgram for the newly created program. Raises: ValueError: If no gate set is provided. """ if not gate_set: gate_set = self.context.serializer if not program_id: program_id = _make_random_id('prog-') batch = v2.batch_pb2.BatchProgram() for program in programs: gate_set.serialize(program, msg=batch.programs.add()) new_program_id, new_program = self.context.client.create_program( self.project_id, program_id, code=util.pack_any(batch), description=description, labels=labels, ) return engine_program.EngineProgram(self.project_id, new_program_id, self.context, new_program, result_type=ResultType.Batch)
def test_calibration_defaults(get_job_results): qjob = qtypes.QuantumJob(execution_status=qtypes.ExecutionStatus( state=qtypes.ExecutionStatus.State.SUCCESS)) result = v2.calibration_pb2.FocusedCalibrationResult() result.results.add() get_job_results.return_value = qtypes.QuantumResult( result=util.pack_any(result)) job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob) data = job.calibration_results() get_job_results.assert_called_once_with('a', 'b', 'steve') assert len(data) == 1 assert data[0].code == v2.calibration_pb2.CALIBRATION_RESULT_UNSPECIFIED assert data[0].error_message is None assert data[0].token is None assert data[0].valid_until is None assert len(data[0].metrics) == 0
def test_run_calibration(client): client().create_program.return_value = ( 'prog', qtypes.QuantumProgram(name='projects/proj/programs/prog'), ) client().create_job.return_value = ( 'job-id', qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job-id', execution_status={'state': 'READY'}), ) client().get_job.return_value = qtypes.QuantumJob( execution_status={'state': 'SUCCESS'}) client().get_job_results.return_value = qtypes.QuantumResult( result=_CALIBRATION_RESULTS_V2) q1 = cirq.GridQubit(2, 3) q2 = cirq.GridQubit(2, 4) layer1 = cg.CalibrationLayer('xeb', cirq.Circuit(cirq.CZ(q1, q2)), {'num_layers': 42}) layer2 = cg.CalibrationLayer('readout', cirq.Circuit(cirq.measure(q1, q2)), {'num_samples': 4242}) processor = cg.EngineProcessor('proj', 'mysim', EngineContext()) job = processor.run_calibration(gate_set=cg.FSIM_GATESET, layers=[layer1, layer2], job_id='job-id') results = job.calibration_results() assert len(results) == 2 assert results[0].code == v2.calibration_pb2.SUCCESS assert results[0].error_message == 'First success' assert results[0].token == 'abc123' assert len(results[0].metrics) == 1 assert len(results[0].metrics['fidelity']) == 1 assert results[0].metrics['fidelity'][(q1, q2)] == [0.75] assert results[1].code == v2.calibration_pb2.SUCCESS assert results[1].error_message == 'Second success' # assert label is correct client().create_job.assert_called_once_with( project_id='proj', program_id='prog', job_id='job-id', processor_ids=['mysim'], run_context=util.pack_any(v2.run_context_pb2.RunContext()), description=None, labels={'calibration': ''}, )
def test_get_engine_device(get_processor): device_spec = util.pack_any( Merge( """ valid_qubits: "0_0" valid_qubits: "1_1" valid_qubits: "2_2" valid_targets { name: "2_qubit_targets" target_ordering: SYMMETRIC targets { ids: "0_0" ids: "1_1" } } valid_gates { gate_duration_picos: 1000 cz { } } valid_gates { phased_xz { } } """, v2.device_pb2.DeviceSpecification(), )) get_processor.return_value = quantum.QuantumProcessor( device_spec=device_spec) device = cirq_google.get_engine_device('rainbow', 'project') assert device.metadata.qubit_set == frozenset( [cirq.GridQubit(0, 0), cirq.GridQubit(1, 1), cirq.GridQubit(2, 2)]) device.validate_operation(cirq.X(cirq.GridQubit(2, 2))) device.validate_operation( cirq.CZ(cirq.GridQubit(0, 0), cirq.GridQubit(1, 1))) with pytest.raises(ValueError): device.validate_operation(cirq.X(cirq.GridQubit(1, 2))) with pytest.raises(ValueError): device.validate_operation(cirq.H(cirq.GridQubit(0, 0))) with pytest.raises(ValueError): device.validate_operation( cirq.CZ(cirq.GridQubit(1, 1), cirq.GridQubit(2, 2)))
def test_run_sweep_params(client): client().create_program.return_value = ( 'prog', quantum.QuantumProgram(name='projects/proj/programs/prog'), ) client().create_job.return_value = ( 'job-id', quantum.QuantumJob(name='projects/proj/programs/prog/jobs/job-id', execution_status={'state': 'READY'}), ) client().get_job.return_value = quantum.QuantumJob( execution_status={'state': 'SUCCESS'}, update_time=_to_timestamp('2019-07-09T23:39:59Z')) client().get_job_results.return_value = quantum.QuantumResult( result=util.pack_any(_RESULTS_V2)) processor = cg.EngineProcessor('a', 'p', EngineContext()) job = processor.run_sweep( program=_CIRCUIT, params=[cirq.ParamResolver({'a': 1}), cirq.ParamResolver({'a': 2})]) results = job.results() assert len(results) == 2 for i, v in enumerate([1, 2]): assert results[i].repetitions == 1 assert results[i].params.param_dict == {'a': v} assert results[i].measurements == {'q': np.array([[0]], dtype='uint8')} for result in results: assert result.job_id == job.id() assert result.job_finished_time is not None assert results == cirq.read_json(json_text=cirq.to_json(results)) client().create_program.assert_called_once() client().create_job.assert_called_once() run_context = v2.run_context_pb2.RunContext() client().create_job.call_args[1]['run_context'].Unpack(run_context) sweeps = run_context.parameter_sweeps assert len(sweeps) == 2 for i, v in enumerate([1.0, 2.0]): assert sweeps[i].repetitions == 1 assert sweeps[i].sweep.sweep_function.sweeps[ 0].single_sweep.points.points == [v] client().get_job.assert_called_once() client().get_job_results.assert_called_once()
def test_default_gate_sets(): # Sycamore should have valid gate sets with default processor = cg.EngineProcessor( 'a', 'p', EngineContext(), _processor=quantum.QuantumProcessor( device_spec=util.pack_any(known_devices.SYCAMORE_PROTO)), ) device = processor.get_device() device.validate_operation(cirq.X(cirq.GridQubit(5, 4))) # Test that a device with no standard gatesets doesn't blow up processor = cg.EngineProcessor( 'a', 'p', EngineContext(), _processor=quantum.QuantumProcessor(device_spec=_DEVICE_SPEC)) device = processor.get_device() assert device.qubits == [cirq.GridQubit(0, 0), cirq.GridQubit(1, 1)]
def test_run_circuit(client): setup_run_circuit_with_result_(client, _A_RESULT) engine = cg.Engine(project_id='proj', service_args={'client_info': 1}) result = engine.run( program=_CIRCUIT, program_id='prog', job_id='job-id', processor_ids=['mysim'], ) assert result.repetitions == 1 assert result.params.param_dict == {'a': 1} assert result.measurements == {'q': np.array([[0]], dtype='uint8')} client.assert_called_with(service_args={'client_info': 1}, verbose=None) client.create_program.called_once_with() client.create_job.called_once_with( 'projects/project-id/programs/test', qtypes.QuantumJob( name='projects/project-id/programs/test/jobs/job-id', scheduling_config={ 'priority': 50, 'processor_selector': { 'processor_names': ['projects/project-id/processors/mysim'] }, }, run_context=util.pack_any( v2.run_context_pb2.RunContext(parameter_sweeps=[ v2.run_context_pb2.ParameterSweep(repetitions=1) ])), ), False, ) client.get_job.called_once_with('proj', 'prog') client.get_job_result.called_once_with()
def test_run_calibration(client): setup_run_circuit_with_result_(client, _CALIBRATION_RESULTS_V2) engine = cg.Engine( project_id='proj', proto_version=cg.engine.engine.ProtoVersion.V2, ) q1 = cirq.GridQubit(2, 3) q2 = cirq.GridQubit(2, 4) layer1 = cg.CalibrationLayer('xeb', cirq.Circuit(cirq.CZ(q1, q2)), {'num_layers': 42}) layer2 = cg.CalibrationLayer('readout', cirq.Circuit(cirq.measure(q1, q2)), {'num_samples': 4242}) job = engine.run_calibration(layers=[layer1, layer2], job_id='job-id', processor_id='mysim') results = job.calibration_results() assert len(results) == 2 assert results[0].code == v2.calibration_pb2.SUCCESS assert results[0].error_message == 'First success' assert results[0].token == 'abc123' assert len(results[0].metrics) == 1 assert len(results[0].metrics['fidelity']) == 1 assert results[0].metrics['fidelity'][(q1, q2)] == [0.75] assert results[1].code == v2.calibration_pb2.SUCCESS assert results[1].error_message == 'Second success' # assert label is correct client().create_job.assert_called_once_with( project_id='proj', program_id='prog', job_id='job-id', processor_ids=['mysim'], run_context=util.pack_any(v2.run_context_pb2.RunContext()), description=None, labels={'calibration': ''}, )
def _set_get_processor_return(get_processor): # from engine_test.py from google.protobuf.text_format import Merge from cirq_google.api import v2 from cirq_google.engine import util from cirq_google.engine.client.quantum_v1alpha1 import types as qtypes device_spec = util.pack_any( Merge( """ valid_gate_sets: [{ name: 'test_set', valid_gates: [{ id: 'x', number_of_qubits: 1, gate_duration_picos: 1000, valid_targets: ['1q_targets'] }] }], valid_qubits: ['0_0', '1_1'], valid_targets: [{ name: '1q_targets', target_ordering: SYMMETRIC, targets: [{ ids: ['0_0'] }] }] """, v2.device_pb2.DeviceSpecification(), )) get_processor.return_value = qtypes.QuantumProcessor( device_spec=device_spec) return get_processor
async def run_calibration_async( self, job_id: Optional[str] = None, processor_ids: Sequence[str] = (), description: Optional[str] = None, labels: Optional[Dict[str, str]] = None, ) -> engine_job.EngineJob: """Runs layers of calibration routines on the Quantum Engine. This method should only be used if the Program object was created with a `FocusedCalibration`. This method does not block until a result is returned. However, no results will be available until all calibration routines complete. Args: job_id: Optional job id to use. If this is not provided, a random id of the format 'calibration-################YYMMDD' will be generated, where # is alphanumeric and YYMMDD is the current year, month, and day. processor_ids: The engine processors that should be candidates to run the program. Only one of these will be scheduled for execution. description: An optional description to set on the job. labels: Optional set of labels to set on the job. Returns: An EngineJob. Results can be accessed with calibration_results(). Raises: ValueError: If no processors are specified. """ import cirq_google.engine.engine as engine_base if not job_id: job_id = engine_base._make_random_id('calibration-') if not processor_ids: raise ValueError('No processors specified') # Default run context # Note that Quantum Engine currently requires a valid type url # on a run context in order to succeed validation. run_context = v2.run_context_pb2.RunContext() created_job_id, job = await self.context.client.create_job_async( project_id=self.project_id, program_id=self.program_id, job_id=job_id, processor_ids=processor_ids, run_context=util.pack_any(run_context), description=description, labels=labels, ) return engine_job.EngineJob( self.project_id, self.program_id, created_job_id, self.context, job, result_type=ResultType.Batch, )
async def run_batch_async( self, job_id: Optional[str] = None, params_list: List[cirq.Sweepable] = None, repetitions: int = 1, processor_ids: Sequence[str] = (), description: Optional[str] = None, labels: Optional[Dict[str, str]] = None, ) -> engine_job.EngineJob: """Runs a batch of circuits on the QuantumEngine. This method should only be used if the Program object was created with a BatchProgram. The number of parameter sweeps should match the number of circuits within that BatchProgram. This method does not block until a result is returned. However, no results will be available until the entire batch is complete. Args: job_id: Optional job id to use. If this is not provided, a random id of the format 'job-################YYMMDD' will be generated, where # is alphanumeric and YYMMDD is the current year, month, and day. params_list: Parameter sweeps to run with the program. There must be one Sweepable object for each circuit in the batch. If this is None, it is assumed that the circuits are not parameterized and do not require sweeps. repetitions: The number of circuit repetitions to run. processor_ids: The engine processors that should be candidates to run the program. Only one of these will be scheduled for execution. description: An optional description to set on the job. labels: Optional set of labels to set on the job. Returns: An EngineJob. If this is iterated over it returns a list of TrialResults. All TrialResults for the first circuit are listed first, then the TrialResults for the second, etc. The TrialResults for a circuit are listed in the order imposed by the associated parameter sweep. Raises: ValueError: if the program was not a batch program or no processors were supplied. """ import cirq_google.engine.engine as engine_base if self.result_type != ResultType.Batch: raise ValueError('Can only use run_batch() in batch mode.') if params_list is None: params_list = [None] * self.batch_size() if not job_id: job_id = engine_base._make_random_id('job-') if not processor_ids: raise ValueError('No processors specified') # Pack the run contexts into batches batch_context = v2.batch_run_context_to_proto( (params, repetitions) for params in params_list) created_job_id, job = await self.context.client.create_job_async( project_id=self.project_id, program_id=self.program_id, job_id=job_id, processor_ids=processor_ids, run_context=util.pack_any(batch_context), description=description, labels=labels, ) return engine_job.EngineJob( self.project_id, self.program_id, created_job_id, self.context, job, result_type=ResultType.Batch, )
data=util.pack_any( v2.metrics_pb2.MetricsSnapshot( timestamp_ms=1562544000021, metrics=[ v2.metrics_pb2.Metric( name='xeb', targets=['0_0', '0_1'], values=[v2.metrics_pb2.Value(double_val=0.9999)], ), v2.metrics_pb2.Metric( name='xeb', targets=['0_0', '1_0'], values=[v2.metrics_pb2.Value(double_val=0.9998)], ), v2.metrics_pb2.Metric( name='t1', targets=['0_0'], values=[v2.metrics_pb2.Value(double_val=321)]), v2.metrics_pb2.Metric( name='t1', targets=['0_1'], values=[v2.metrics_pb2.Value(double_val=911)]), v2.metrics_pb2.Metric( name='t1', targets=['0_1'], values=[v2.metrics_pb2.Value(double_val=505)]), v2.metrics_pb2.Metric( name='globalMetric', values=[v2.metrics_pb2.Value(int32_val=12300)]), ], )),
_BATCH_PROGRAM_V2 = util.pack_any( Merge( """programs { language { gate_set: "xmon" } circuit { scheduling_strategy: MOMENT_BY_MOMENT moments { operations { gate { id: "xy" } args { key: "axis_half_turns" value { arg_value { float_value: 0.0 } } } args { key: "half_turns" value { arg_value { float_value: 0.5 } } } qubits { id: "5_2" } } } moments { operations { gate { id: "meas" } args { key: "invert_mask" value { arg_value { bool_values { } } } } args { key: "key" value { arg_value { string_value: "result" } } } qubits { id: "5_2" } } } } } """, v2.batch_pb2.BatchProgram(), ))
return timestamp_proto _A_RESULT = util.pack_any( Merge( """ sweep_results: [{ repetitions: 1, measurement_keys: [{ key: 'q', qubits: [{ row: 1, col: 1 }] }], parameterized_results: [{ params: { assignments: { key: 'a' value: 1 } }, measurement_results: '\000\001' }] }] """, v1.program_pb2.Result(), )) _RESULTS = util.pack_any( Merge(
def test_get_circuit_v1(get_program): program = cg.EngineProgram('a', 'b', EngineContext()) get_program.return_value = quantum.QuantumProgram(code=util.pack_any(v1.program_pb2.Program())) with pytest.raises(ValueError, match='v1 Program is not supported'): program.get_circuit()
RESULTS = quantum.QuantumResult(result=util.pack_any( Merge( """ sweep_results: [{ repetitions: 4, parameterized_results: [{ params: { assignments: { key: 'a' value: 1 } }, measurement_results: { key: 'q' qubit_measurement_results: [{ qubit: { id: '1_1' } results: '\006' }] } },{ params: { assignments: { key: 'a' value: 2 } }, measurement_results: { key: 'q' qubit_measurement_results: [{ qubit: { id: '1_1' } results: '\005' }] } }] }] """, v2.result_pb2.Result(), )))