def test_set_job_labels(client_constructor): grpc_client = setup_mock_(client_constructor) grpc_client.get_quantum_job.return_value = qtypes.QuantumJob( labels={ 'color': 'red', 'weather': 'sun', 'run': '1' }, label_fingerprint='hash') result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0') grpc_client.update_quantum_job.return_value = result client = EngineClient() labels = {'hello': 'world', 'color': 'blue', 'run': '1'} assert client.set_job_labels('proj', 'prog', 'job0', labels) == result assert grpc_client.update_quantum_job.call_args[0] == ( 'projects/proj/programs/prog/jobs/job0', qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0', labels=labels, label_fingerprint='hash'), qtypes.field_mask_pb2.FieldMask(paths=['labels'])) assert client.set_job_labels('proj', 'prog', 'job0', {}) == result assert grpc_client.update_quantum_job.call_args[0] == ( 'projects/proj/programs/prog/jobs/job0', qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0', label_fingerprint='hash'), qtypes.field_mask_pb2.FieldMask(paths=['labels']))
def test_run_circuit_failed_missing_processor_name(client): client().create_program.return_value = ( 'prog', qtypes.QuantumProgram(name='projects/proj/programs/prog'), ) client().create_job.return_value = ( 'job-id', qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job-id', execution_status={'state': 'READY'}), ) client().get_job.return_value = qtypes.QuantumJob( name='projects/proj/programs/prog/jobs/job-id', execution_status={ 'state': 'FAILURE', 'failure': { 'error_code': 'SYSTEM_ERROR', 'error_message': 'Not good' }, }, ) engine = cg.Engine(project_id='proj') with pytest.raises( RuntimeError, match='Job projects/proj/programs/prog/jobs/job-id on processor' ' UNKNOWN failed. SYSTEM_ERROR: Not good', ): engine.run(program=_CIRCUIT, gate_set=cg.XMON)
def test_add_labels(add_job_labels): job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qtypes.QuantumJob(labels={})) assert job.labels() == {} add_job_labels.return_value = qtypes.QuantumJob(labels={ 'a': '1', }) assert job.add_labels({'a': '1'}).labels() == {'a': '1'} add_job_labels.assert_called_with('a', 'b', 'steve', {'a': '1'}) add_job_labels.return_value = qtypes.QuantumJob(labels={ 'a': '2', 'b': '1' }) assert job.add_labels({ 'a': '2', 'b': '1' }).labels() == { 'a': '2', 'b': '1' } add_job_labels.assert_called_with('a', 'b', 'steve', {'a': '2', 'b': '1'})
def test_sampler(client): client().create_program.return_value = ( 'prog', qtypes.QuantumProgram(name='projects/proj/programs/prog'), ) client().create_job.return_value = ( 'job-id', qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job-id', execution_status={'state': 'READY'}), ) client().get_job.return_value = qtypes.QuantumJob( execution_status={'state': 'SUCCESS'}) client().get_job_results.return_value = qtypes.QuantumResult( result=_to_any(_RESULTS_V2)) processor = cg.EngineProcessor('proj', 'mysim', EngineContext()) sampler = processor.get_sampler(gate_set=cg.XMON) results = sampler.run_sweep( program=_CIRCUIT, params=[cirq.ParamResolver({'a': 1}), cirq.ParamResolver({'a': 2})]) assert len(results) == 2 for i, v in enumerate([1, 2]): assert results[i].repetitions == 1 assert results[i].params.param_dict == {'a': v} assert results[i].measurements == {'q': np.array([[0]], dtype='uint8')} assert client().create_program.call_args[0][0] == 'proj'
def test_run_calibration_delegation(create_job): create_job.return_value = ('dogs', qtypes.QuantumJob()) program = cg.EngineProgram('woof', 'woof', EngineContext(), result_type=ResultType.Calibration) job = program.run_calibration(processor_ids=['lazydog']) assert job._job == qtypes.QuantumJob()
def test_set_description(set_job_description): job = cg.EngineJob('a', 'b', 'steve', EngineContext()) set_job_description.return_value = qtypes.QuantumJob(description='world') assert job.set_description('world').description() == 'world' set_job_description.assert_called_with('a', 'b', 'steve', 'world') set_job_description.return_value = qtypes.QuantumJob(description='') assert job.set_description('').description() == '' set_job_description.assert_called_with('a', 'b', 'steve', '')
def test_run_sweeps_delegation(create_job): create_job.return_value = ('steve', qtypes.QuantumJob()) program = cg.EngineProgram('my-proj', 'my-prog', EngineContext()) param_resolver = cirq.ParamResolver({}) job = program.run_sweep(job_id='steve', repetitions=10, params=param_resolver, processor_ids=['mine']) assert job._job == qtypes.QuantumJob()
def test_description(get_job): job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qtypes.QuantumJob(description='hello')) assert job.description() == 'hello' get_job.return_value = qtypes.QuantumJob(description='hello') assert cg.EngineJob('a', 'b', 'steve', EngineContext()).description() == 'hello' get_job.assert_called_once_with('a', 'b', 'steve', False)
def setup_run_circuit_with_result_(client, result): client().create_program.return_value = ( 'prog', qtypes.QuantumProgram(name='projects/proj/programs/prog'), ) client().create_job.return_value = ( 'job-id', qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job-id', execution_status={'state': 'READY'}), ) client().get_job.return_value = qtypes.QuantumJob( execution_status={'state': 'SUCCESS'}) client().get_job_results.return_value = qtypes.QuantumResult(result=result)
def test_run_batch_delegation(create_job): create_job.return_value = ('kittens', qtypes.QuantumJob()) program = cg.EngineProgram('my-meow', 'my-meow', EngineContext(), result_type=ResultType.Batch) resolver_list = [ cirq.Points('cats', [1.0, 2.0, 3.0]), cirq.Points('cats', [4.0, 5.0, 6.0]) ] job = program.run_batch(job_id='steve', repetitions=10, params_list=resolver_list, processor_ids=['lazykitty']) assert job._job == qtypes.QuantumJob()
def test_get_repetitions_and_sweeps_v1(get_job): job = cg.EngineJob('a', 'b', 'steve', EngineContext()) get_job.return_value = qtypes.QuantumJob(run_context=_to_any( v1.program_pb2.RunContext( parameter_sweeps=[v1.params_pb2.ParameterSweep(repetitions=10)]))) with pytest.raises(ValueError, match='v1 RunContext is not supported'): job.get_repetitions_and_sweeps()
def test_labels(): job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qtypes.QuantumJob(labels={'t': '1'})) assert job.labels() == {'t': '1'}
def test_get_repetitions_and_sweeps_unsupported(get_job): job = cg.EngineJob('a', 'b', 'steve', EngineContext()) get_job.return_value = qtypes.QuantumJob(run_context=qtypes.any_pb2.Any( type_url='type.googleapis.com/unknown.proto')) with pytest.raises(ValueError, match='unsupported run_context type: unknown.proto'): job.get_repetitions_and_sweeps()
def test_get_calibration(get_calibration): qjob = qtypes.QuantumJob(execution_status=qtypes.ExecutionStatus( calibration_name='projects/a/processors/p/calibrations/123')) calibration = qtypes.QuantumCalibration(data=_to_any( Merge( """ timestamp_ms: 123000, metrics: [{ name: 'xeb', targets: ['0_0', '0_1'], values: [{ double_val: .9999 }] }, { name: 't1', targets: ['0_0'], values: [{ double_val: 321 }] }, { name: 'globalMetric', values: [{ int32_val: 12300 }] }] """, v2.metrics_pb2.MetricsSnapshot(), ))) get_calibration.return_value = calibration job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob) assert list(job.get_calibration()) == ['xeb', 't1', 'globalMetric'] get_calibration.assert_called_once_with('a', 'p', 123)
def test_results_len(get_job_results): qjob = qtypes.QuantumJob(execution_status=qtypes.ExecutionStatus( state=qtypes.ExecutionStatus.State.SUCCESS)) get_job_results.return_value = RESULTS job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob) assert len(job) == 2
def test_timeout(patched_time_sleep, get_job): qjob = qtypes.QuantumJob(execution_status=qtypes.ExecutionStatus( state=qtypes.ExecutionStatus.State.RUNNING)) get_job.return_value = qjob job = cg.EngineJob('a', 'b', 'steve', EngineContext(timeout=500)) with pytest.raises(RuntimeError, match='Timed out'): job.results()
def test_calibration_results_not_a_calibration(get_job_results): qjob = qtypes.QuantumJob(execution_status=qtypes.ExecutionStatus( state=qtypes.ExecutionStatus.State.SUCCESS)) get_job_results.return_value = RESULTS job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob) with pytest.raises(ValueError, match='calibration results'): job.calibration_results()
def test_run_calibration(client): client().create_program.return_value = ( 'prog', qtypes.QuantumProgram(name='projects/proj/programs/prog'), ) client().create_job.return_value = ( 'job-id', qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job-id', execution_status={'state': 'READY'}), ) client().get_job.return_value = qtypes.QuantumJob( execution_status={'state': 'SUCCESS'}) client().get_job_results.return_value = qtypes.QuantumResult( result=_CALIBRATION_RESULTS_V2) q1 = cirq.GridQubit(2, 3) q2 = cirq.GridQubit(2, 4) layer1 = cg.CalibrationLayer('xeb', cirq.Circuit(cirq.CZ(q1, q2)), {'num_layers': 42}) layer2 = cg.CalibrationLayer('readout', cirq.Circuit(cirq.measure(q1, q2)), {'num_samples': 4242}) processor = cg.EngineProcessor('proj', 'mysim', EngineContext()) job = processor.run_calibration(gate_set=cg.FSIM_GATESET, layers=[layer1, layer2], job_id='job-id') results = job.calibration_results() assert len(results) == 2 assert results[0].code == v2.calibration_pb2.SUCCESS assert results[0].error_message == 'First success' assert results[0].token == 'abc123' assert len(results[0].metrics) == 1 assert len(results[0].metrics['fidelity']) == 1 assert results[0].metrics['fidelity'][(q1, q2)] == [0.75] assert results[1].code == v2.calibration_pb2.SUCCESS assert results[1].error_message == 'Second success' # assert label is correct client().create_job.assert_called_once_with( project_id='proj', program_id='prog', job_id='job-id', processor_ids=['mysim'], run_context=_to_any(v2.run_context_pb2.RunContext()), description=None, labels={'calibration': ''}, )
def test_status(get_job): qjob = qtypes.QuantumJob(execution_status=qtypes.ExecutionStatus( state=qtypes.ExecutionStatus.State.RUNNING)) get_job.return_value = qjob job = cg.EngineJob('a', 'b', 'steve', EngineContext()) assert job.status() == 'RUNNING' get_job.assert_called_once()
def test_get_repetitions_and_sweeps(get_job): job = cg.EngineJob('a', 'b', 'steve', EngineContext()) get_job.return_value = qtypes.QuantumJob(run_context=_to_any( v2.run_context_pb2.RunContext(parameter_sweeps=[ v2.run_context_pb2.ParameterSweep(repetitions=10) ]))) assert job.get_repetitions_and_sweeps() == (10, [cirq.UnitSweep]) get_job.assert_called_once_with('a', 'b', 'steve', True)
def test_run_calibration_no_processors(create_job): create_job.return_value = ('dogs', qtypes.QuantumJob()) program = cg.EngineProgram('woof', 'woof', EngineContext(), result_type=ResultType.Calibration) with pytest.raises(ValueError, match='No processors specified'): _ = program.run_calibration(job_id='spot')
def test_run_batch_no_sweeps(create_job): # Running with no sweeps is fine. Uses program's batch size to create # proper empty sweeps. create_job.return_value = ('kittens', qtypes.QuantumJob()) program = cg.EngineProgram( 'my-meow', 'my-meow', _program=qtypes.QuantumProgram(code=_BATCH_PROGRAM_V2), context=EngineContext(), result_type=ResultType.Batch, ) job = program.run_batch(job_id='steve', repetitions=10, processor_ids=['lazykitty']) assert job._job == qtypes.QuantumJob() batch_run_context = v2.batch_pb2.BatchRunContext() create_job.call_args[1]['run_context'].Unpack(batch_run_context) assert len(batch_run_context.run_contexts) == 1
def test_set_labels(set_job_labels): job = cg.EngineJob('a', 'b', 'steve', EngineContext()) set_job_labels.return_value = qtypes.QuantumJob(labels={ 'a': '1', 'b': '1' }) assert job.set_labels({ 'a': '1', 'b': '1' }).labels() == { 'a': '1', 'b': '1' } set_job_labels.assert_called_with('a', 'b', 'steve', {'a': '1', 'b': '1'}) set_job_labels.return_value = qtypes.QuantumJob() assert job.set_labels({}).labels() == {} set_job_labels.assert_called_with('a', 'b', 'steve', {})
def test_run_batch(client): client().create_program.return_value = ( 'prog', qtypes.QuantumProgram(name='projects/proj/programs/prog'), ) client().create_job.return_value = ( 'job-id', qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job-id', execution_status={'state': 'READY'}), ) client().get_job.return_value = qtypes.QuantumJob( execution_status={'state': 'SUCCESS'}) client().get_job_results.return_value = qtypes.QuantumResult( result=_BATCH_RESULTS_V2) processor = cg.EngineProcessor('a', 'p', EngineContext()) job = processor.run_batch( gate_set=cg.XMON, programs=[_CIRCUIT, _CIRCUIT], job_id='job-id', params_list=[cirq.Points('a', [1, 2]), cirq.Points('a', [3, 4])], ) results = job.results() assert len(results) == 4 for i, v in enumerate([1, 2, 3, 4]): assert results[i].repetitions == 1 assert results[i].params.param_dict == {'a': v} assert results[i].measurements == {'q': np.array([[0]], dtype='uint8')} client().create_program.assert_called_once() client().create_job.assert_called_once() run_context = v2.batch_pb2.BatchRunContext() client().create_job.call_args[1]['run_context'].Unpack(run_context) assert len(run_context.run_contexts) == 2 for idx, rc in enumerate(run_context.run_contexts): sweeps = rc.parameter_sweeps assert len(sweeps) == 1 assert sweeps[0].repetitions == 1 if idx == 0: assert sweeps[0].sweep.single_sweep.points.points == [1.0, 2.0] if idx == 1: assert sweeps[0].sweep.single_sweep.points.points == [3.0, 4.0] client().get_job.assert_called_once() client().get_job_results.assert_called_once()
def test_id(): job = cg.EngineJob( 'a', 'b', 'steve', EngineContext(), _job=qtypes.QuantumJob(create_time=qtypes.timestamp_pb2.Timestamp( seconds=1581515101)), ) assert job.id() == 'steve'
def test_failure_with_no_error(): job = cg.EngineJob( 'a', 'b', 'steve', EngineContext(), _job=qtypes.QuantumJob(execution_status=qtypes.ExecutionStatus( state=qtypes.ExecutionStatus.State.SUCCESS, )), ) assert not job.failure()
def test_results_getitem(get_job_results): qjob = qtypes.QuantumJob(execution_status=qtypes.ExecutionStatus( state=qtypes.ExecutionStatus.State.SUCCESS)) get_job_results.return_value = RESULTS job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob) assert str(job[0]) == 'q=0110' assert str(job[1]) == 'q=1010' with pytest.raises(IndexError): _ = job[2]
def test_create_time(): job = cg.EngineJob( 'a', 'b', 'steve', EngineContext(), _job=qtypes.QuantumJob(create_time=qtypes.timestamp_pb2.Timestamp( seconds=1581515101)), ) assert job.create_time() == datetime.datetime(2020, 2, 12, 13, 45, 1)
def test_results_iter(get_job_results): qjob = qtypes.QuantumJob(execution_status=qtypes.ExecutionStatus( state=qtypes.ExecutionStatus.State.SUCCESS)) get_job_results.return_value = RESULTS job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob) results = [str(r) for r in job] assert len(results) == 2 assert results[0] == 'q=0110' assert results[1] == 'q=1010'
def test_set_job_description(client_constructor): grpc_client = setup_mock_(client_constructor) result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0') grpc_client.update_quantum_job.return_value = result client = EngineClient() assert client.set_job_description('proj', 'prog', 'job0', 'A job') == result assert grpc_client.update_quantum_job.call_args[0] == ( 'projects/proj/programs/prog/jobs/job0', qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0', description='A job'), qtypes.field_mask_pb2.FieldMask(paths=['description'])) assert client.set_job_description('proj', 'prog', 'job0', '') == result assert grpc_client.update_quantum_job.call_args[0] == ( 'projects/proj/programs/prog/jobs/job0', qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0'), qtypes.field_mask_pb2.FieldMask(paths=['description']))