def test_mix_local_remote_jobs(self, QE_TOKEN, QE_URL): """test mixing local and remote jobs Internally local jobs execute in seperate processes since they are CPU bound and remote jobs execute in seperate threads since they are I/O bound. The module gets results from potentially both kinds in one list. Test that this works. """ provider = IBMQProvider(QE_TOKEN, QE_URL) remote_backend = provider.available_backends({'simulator': True})[0] local_backend = get_backend('local_qasm_simulator') njobs = 6 job_list = [] backend_type = [local_backend, remote_backend] i = 0 for circuit in self.rqg.get_circuits(format_='QuantumCircuit')[:njobs]: compiled_circuit = compile_circuit(circuit) backend = backend_type[i % len(backend_type)] self.log.info(backend) quantum_job = QuantumJob(compiled_circuit, backend=backend) job_list.append(quantum_job) i += 1 jp = jobprocessor.JobProcessor(job_list, max_workers=None, callback=None) jp.submit()
def test_get_jobs_filter_counts(self, qe_token, qe_url): provider = IBMQProvider(qe_token, qe_url) # TODO: consider generalizing backend name # TODO: this tests depends on the previous executions of the user backend = provider.get_backend('ibmq_qasm_simulator') my_filter = { 'backend.name': 'ibmq_qasm_simulator', 'shots': 1024, 'qasms.result.data.counts.00': { 'lt': 500 } } self.log.info('searching for at most 5 jobs with 1024 shots, a count ' 'for "00" of < 500, on the ibmq_qasm_simulator backend') job_list = backend.jobs(limit=5, skip=0, db_filter=my_filter) self.log.info('found %s matching jobs', len(job_list)) for i, job in enumerate(job_list): self.log.info('match #%d', i) result = job.result() self.assertTrue( any(cresult['data']['counts']['00'] < 500 for cresult in result._result['result'])) for circuit_name in result.get_names(): self.log.info('\tcircuit_name: %s', circuit_name) if circuit_name: counts = result.get_counts(circuit_name) self.log.info('\t%s', str(counts))
def test_compile_two_remote(self, QE_TOKEN, QE_URL, hub=None, group=None, project=None): """Test Compiler remote on two circuits. If all correct some should exists. """ provider = IBMQProvider(QE_TOKEN, QE_URL, hub, group, project) backend = lowest_pending_jobs( provider.available_backends({ 'local': False, 'simulator': False })) qubit_reg = qiskit.QuantumRegister(2, name='q') clbit_reg = qiskit.ClassicalRegister(2, name='c') qc = qiskit.QuantumCircuit(qubit_reg, clbit_reg, name="bell") qc.h(qubit_reg[0]) qc.cx(qubit_reg[0], qubit_reg[1]) qc.measure(qubit_reg, clbit_reg) qc_extra = qiskit.QuantumCircuit(qubit_reg, clbit_reg, name="extra") qc_extra.measure(qubit_reg, clbit_reg) qobj = transpiler.compile([qc, qc_extra], backend) # FIXME should test against the qobj when defined self.assertEqual(len(qobj), 3)
def test_run_remote_simulator_compile(self, QE_TOKEN, QE_URL): provider = IBMQProvider(QE_TOKEN, QE_URL) backend = provider.get_backend('ibmqx_qasm_simulator') quantum_job = QuantumJob(self.qc, do_compile=True, backend=backend) jobprocessor.run_backend(quantum_job)
def test_compile_two_run_remote(self, QE_TOKEN, QE_URL, hub=None, group=None, project=None): """Test Compiler and run two circuits. If all correct some should exists. """ provider = IBMQProvider(QE_TOKEN, QE_URL, hub, group, project) backend = provider.available_backends({'simulator': True})[0] qubit_reg = qiskit.QuantumRegister(2, name='q') clbit_reg = qiskit.ClassicalRegister(2, name='c') qc = qiskit.QuantumCircuit(qubit_reg, clbit_reg, name="bell") qc.h(qubit_reg[0]) qc.cx(qubit_reg[0], qubit_reg[1]) qc.measure(qubit_reg, clbit_reg) qc_extra = qiskit.QuantumCircuit(qubit_reg, clbit_reg, name="extra") qc_extra.measure(qubit_reg, clbit_reg) qobj = qiskit._compiler.compile([qc, qc_extra], backend) job = backend.run( qiskit.QuantumJob(qobj, backend=backend, preformatted=True)) result = job.result() self.assertIsInstance(result, Result)
def test_online_qasm_simulator_two_registers(self, qe_token, qe_url): """Test online_qasm_simulator_two_registers. If all correct should return correct counts. """ provider = IBMQProvider(qe_token, qe_url) backend = provider.get_backend('ibmq_qasm_simulator') q1 = QuantumRegister(2) c1 = ClassicalRegister(2) q2 = QuantumRegister(2) c2 = ClassicalRegister(2) qc1 = QuantumCircuit(q1, q2, c1, c2) qc2 = QuantumCircuit(q1, q2, c1, c2) qc1.x(q1[0]) qc2.x(q2[1]) qc1.measure(q1[0], c1[0]) qc1.measure(q1[1], c1[1]) qc1.measure(q2[0], c2[0]) qc1.measure(q2[1], c2[1]) qc2.measure(q1[0], c1[0]) qc2.measure(q1[1], c1[1]) qc2.measure(q2[0], c2[0]) qc2.measure(q2[1], c2[1]) shots = 1024 qobj = transpiler.compile([qc1, qc2], backend, seed=8458, shots=shots) job = backend.run(qobj) result = job.result() result1 = result.get_counts(qc1) result2 = result.get_counts(qc2) self.assertEqual(result1, {'00 01': 1024}) self.assertEqual(result2, {'10 00': 1024})
def test_job_id(self, qe_token, qe_url): provider = IBMQProvider(qe_token, qe_url) backend = provider.get_backend('ibmq_qasm_simulator') qobj = transpiler.compile(self._qc, backend) job = backend.run(qobj) self.log.info('job_id: %s', job.id()) self.assertTrue(job.id() is not None)
def test_get_backend_name(self, qe_token, qe_url): provider = IBMQProvider(qe_token, qe_url) backend_name = 'ibmq_qasm_simulator' backend = provider.get_backend(backend_name) qobj = transpiler.compile(self._qc, backend) job = backend.run(qobj) self.assertTrue(job.backend_name() == backend_name)
def test_run_device(self, qe_token, qe_url): provider = IBMQProvider(qe_token, qe_url) backends = [ backend for backend in provider.available_backends() if not backend.configuration['simulator'] ] self.log.info('devices: %s', [b.name for b in backends]) backend = _least_busy(backends) self.log.info('using backend: %s', backend.name) qobj = transpiler.compile(self._qc, backend) shots = qobj.config.shots job = backend.run(qobj) while not job.status() is JobStatus.DONE: self.log.info(job.status()) time.sleep(4) self.log.info(job.status) result = job.result() counts_qx = result.get_counts(result.get_names()[0]) counts_ex = {'00': shots / 2, '11': shots / 2} states = counts_qx.keys() | counts_ex.keys() # contingency table ctable = numpy.array([[counts_qx.get(key, 0) for key in states], [counts_ex.get(key, 0) for key in states]]) self.log.info('states: %s', str(states)) self.log.info('ctable: %s', str(ctable)) contingency = chi2_contingency(ctable) self.log.info('chi2_contingency: %s', str(contingency)) self.assertDictAlmostEqual(counts_qx, counts_ex, shots * 0.1)
def test_run_remote_simulator(self, QE_TOKEN, QE_URL): provider = IBMQProvider(QE_TOKEN, QE_URL) backend = provider.get_backend('ibmqx_qasm_simulator') compiled_circuit = compile_circuit(self.qc) quantum_job = QuantumJob(compiled_circuit, do_compile=False, backend=backend) jobprocessor.run_backend(quantum_job)
def test_double_submit_fails(self, qe_token, qe_url): provider = IBMQProvider(qe_token, qe_url) backend = provider.get_backend('ibmq_qasm_simulator') qobj = transpiler.compile(self._qc, backend) # backend.run() will automatically call job.submit() job = backend.run(qobj) with self.assertRaises(JobError): job.submit()
def test_retrieve_job_error(self, qe_token, qe_url): provider = IBMQProvider(qe_token, qe_url) backends = [ backend for backend in provider.available_backends() if not backend.configuration['simulator'] ] backend = _least_busy(backends) self.assertRaises(IBMQBackendError, backend.retrieve_job, 'BAD_JOB_ID')
def test_run_remote_simulator(self, QE_TOKEN, QE_URL): provider = IBMQProvider(QE_TOKEN, QE_URL) backend = provider.available_backends({'simulator': True})[0] compiled_circuit = compile_circuit(self.qc) quantum_job = QuantumJob(compiled_circuit, do_compile=False, backend=backend) jobprocessor.run_backend(quantum_job)
def test_get_jobs_filter_job_status(self, qe_token, qe_url): provider = IBMQProvider(qe_token, qe_url) backends = provider.available_backends() backend = _least_busy(backends) job_list = backend.jobs(limit=5, skip=0, status=JobStatus.DONE) self.log.info('found %s matching jobs', len(job_list)) for i, job in enumerate(job_list): self.log.info('match #%d: %s', i, job.result()._result['status']) self.assertTrue(job.status() is JobStatus.DONE)
def test_remote_backends_exist(self, QE_TOKEN, QE_URL): """Test if there are remote backends. If all correct some should exists. """ ibmq_provider = IBMQProvider(QE_TOKEN, QE_URL) remote = ibmq_provider.available_backends({'local': False}) self.log.info(remote) self.assertTrue(len(remote) > 0)
def test_remote_backends_exist_real_device(self, QE_TOKEN, QE_URL): """Test if there are remote backends that are devices. If all correct some should exists. """ ibmq_provider = IBMQProvider(QE_TOKEN, QE_URL) remote = ibmq_provider.available_backends({'local': False, 'simulator': False}) self.log.info(remote) self.assertTrue(remote)
def test_retrieve_job(self, qe_token, qe_url): provider = IBMQProvider(qe_token, qe_url) backend = provider.get_backend('ibmq_qasm_simulator') qobj = transpiler.compile(self._qc, backend) job = backend.run(qobj) rjob = backend.retrieve_job(job.id()) self.assertTrue(job.id() == rjob.id()) self.assertTrue( job.result().get_counts() == rjob.result().get_counts())
def test_remote_backends_exist_real_device(self, qe_token, qe_url): """Test if there are remote backends that are devices. If all correct some should exists. """ ibmq_provider = IBMQProvider(qe_token, qe_url) remote = ibmq_provider.available_backends() remote = [r for r in remote if not r.configuration()['simulator']] self.log.info(remote) self.assertTrue(remote)
def test_remote_backends_exist_simulator(self, QE_TOKEN, QE_URL, hub=None, group=None, project=None): """Test if there are remote backends that are simulators. If all correct some should exists. """ ibmq_provider = IBMQProvider(QE_TOKEN, QE_URL, hub, group, project) remote = ibmq_provider.available_backends({'local': False, 'simulator': True}) self.log.info(remote) self.assertTrue(remote)
def test_get_jobs_filter_date(self, qe_token, qe_url): provider = IBMQProvider(qe_token, qe_url) backends = provider.available_backends() backend = _least_busy(backends) my_filter = {'creationDate': {'lt': '2017-01-01T00:00:00.00'}} job_list = backend.jobs(limit=5, db_filter=my_filter) self.log.info('found %s matching jobs', len(job_list)) for i, job in enumerate(job_list): self.log.info('match #%d: %s', i, job.creation_date) self.assertTrue(job.creation_date < '2017-01-01T00:00:00.00')
def test_remote_backends_exist_simulator(self, QE_TOKEN, QE_URL): """Test if there are remote backends that are simulators. If all correct some should exists. """ ibmq_provider = IBMQProvider(QE_TOKEN, QE_URL) remote = ibmq_provider.available_backends() remote = [r for r in remote if r.configuration['simulator']] self.log.info(remote) self.assertTrue(remote)
def test_remote_backends_exist(self, QE_TOKEN, QE_URL): """Test if there are remote backends. If all correct some should exists. """ ibmq_provider = IBMQProvider(QE_TOKEN, QE_URL) remotes = ibmq_provider.available_backends() remotes = remove_backends_from_list(remotes) self.log.info(remotes) self.assertTrue(len(remotes) > 0)
def test_remote_backends_exist_simulator(self, QE_TOKEN, QE_URL, hub=None, group=None, project=None): """Test if there are remote backends that are simulators. If all correct some should exists. """ ibmq_provider = IBMQProvider(QE_TOKEN, QE_URL, hub, group, project) remote = ibmq_provider.available_backends({'local': False, 'simulator': True}) self.log.info(remote) self.assertTrue(remote)
def test_remote_backends_exist(self, QE_TOKEN, QE_URL, hub=None, group=None, project=None): """Test if there are remote backends. If all correct some should exists. """ ibmq_provider = IBMQProvider(QE_TOKEN, QE_URL, hub, group, project) remotes = ibmq_provider.available_backends({'local': False}) remotes = remove_backends_from_list(remotes) self.log.info(remotes) self.assertTrue(len(remotes) > 0)
def test_cancel(self, qe_token, qe_url): provider = IBMQProvider(qe_token, qe_url) backend_name = ('ibmq_20_tokyo' if self.using_ibmq_credentials else 'ibmqx4') backend = provider.get_backend(backend_name) qobj = transpiler.compile(self._qc, backend) job = backend.run(qobj) self.wait_for_initialization(job, timeout=5) can_cancel = job.cancel() self.assertTrue(can_cancel) self.assertStatus(job, JobStatus.CANCELLED)
def test_remote_backends_exist(self, QE_TOKEN, QE_URL, hub=None, group=None, project=None): """Test if there are remote backends. If all correct some should exists. """ ibmq_provider = IBMQProvider(QE_TOKEN, QE_URL, hub, group, project) remotes = ibmq_provider.available_backends({'local': False}) remotes = remove_backends_from_list(remotes) self.log.info(remotes) self.assertTrue(len(remotes) > 0)
def test_remote_backends_exist_real_device(self, QE_TOKEN, QE_URL, hub=None, group=None, project=None): """Test if there are remote backends that are devices. If all correct some should exists. """ ibmq_provider = IBMQProvider(QE_TOKEN, QE_URL, hub, group, project) remote = ibmq_provider.available_backends() remote = [r for r in remote if not r.configuration['simulator']] self.log.info(remote) self.assertTrue(remote)
def setUp(self, qe_token, qe_url): # pylint: disable=arguments-differ super().setUp() # create QuantumCircuit qr = QuantumRegister(2, 'q') cr = ClassicalRegister(2, 'c') qc = QuantumCircuit(qr, cr) qc.h(qr[0]) qc.cx(qr[0], qr[1]) qc.measure(qr, cr) self._qc = qc self._provider = IBMQProvider(qe_token, qe_url)
def test_run_job_processor_online(self, QE_TOKEN, QE_URL): provider = IBMQProvider(QE_TOKEN, QE_URL) backend = provider.available_backends({'simulator': True})[0] njobs = 1 job_list = [] for _ in range(njobs): compiled_circuit = compile_circuit(self.qc) quantum_job = QuantumJob(compiled_circuit, backend=backend) job_list.append(quantum_job) jp = jobprocessor.JobProcessor(job_list, callback=None) jp.submit()
def setUp(self, QE_TOKEN, QE_URL, hub=None, group=None, project=None): # pylint: disable=arguments-differ super().setUp() # create QuantumCircuit qr = QuantumRegister(2, 'q') cr = ClassicalRegister(2, 'c') qc = QuantumCircuit(qr, cr) qc.h(qr[0]) qc.cx(qr[0], qr[1]) qc.measure(qr, cr) self._qc = qc self._provider = IBMQProvider(QE_TOKEN, QE_URL, hub, group, project)
def test_run_async_simulator(self, qe_token, qe_url): provider = IBMQProvider(qe_token, qe_url) IBMQJob._executor = futures.ThreadPoolExecutor(max_workers=2) backend = provider.get_backend('ibmq_qasm_simulator') self.log.info('submitting to backend %s', backend.name) num_qubits = 16 qr = QuantumRegister(num_qubits, 'qr') cr = ClassicalRegister(num_qubits, 'cr') qc = QuantumCircuit(qr, cr) for i in range(num_qubits - 1): qc.cx(qr[i], qr[i + 1]) qc.measure(qr, cr) qobj = transpiler.compile([qc] * 10, backend) num_jobs = 5 job_array = [backend.run(qobj) for _ in range(num_jobs)] found_async_jobs = False timeout = 30 start_time = time.time() while not found_async_jobs: check = sum( [job.status() is JobStatus.RUNNING for job in job_array]) if check >= 2: self.log.info('found %d simultaneous jobs', check) break if all([job.status() is JobStatus.DONE for job in job_array]): # done too soon? don't generate error self.log.warning('all jobs completed before simultaneous jobs ' 'could be detected') break for job in job_array: self.log.info('%s %s %s %s', job.status(), job.status() is JobStatus.RUNNING, check, job.id()) self.log.info('-' * 20 + ' ' + str(time.time() - start_time)) if time.time() - start_time > timeout: raise TimeoutError('failed to see multiple running jobs after ' '{0} s'.format(timeout)) time.sleep(0.2) result_array = [job.result() for job in job_array] self.log.info('got back all job results') # Ensure all jobs have finished. self.assertTrue( all([job.status() is JobStatus.DONE for job in job_array])) self.assertTrue( all([ result.get_status() == 'COMPLETED' for result in result_array ])) # Ensure job ids are unique. job_ids = [job.id() for job in job_array] self.assertEqual(sorted(job_ids), sorted(list(set(job_ids))))
def test_run_async_device(self, qe_token, qe_url): provider = IBMQProvider(qe_token, qe_url) backends = [ backend for backend in provider.available_backends() if not backend.configuration['simulator'] ] backend = _least_busy(backends) self.log.info('submitting to backend %s', backend.name) num_qubits = 5 qr = QuantumRegister(num_qubits, 'qr') cr = ClassicalRegister(num_qubits, 'cr') qc = QuantumCircuit(qr, cr) for i in range(num_qubits - 1): qc.cx(qr[i], qr[i + 1]) qc.measure(qr, cr) qobj = transpiler.compile(qc, backend) num_jobs = 3 job_array = [backend.run(qobj) for _ in range(num_jobs)] time.sleep(3) # give time for jobs to start (better way?) job_status = [job.status() for job in job_array] num_init = sum( [status is JobStatus.INITIALIZING for status in job_status]) num_queued = sum([status is JobStatus.QUEUED for status in job_status]) num_running = sum( [status is JobStatus.RUNNING for status in job_status]) num_done = sum([status is JobStatus.DONE for status in job_status]) num_error = sum([status is JobStatus.ERROR for status in job_status]) self.log.info('number of currently initializing jobs: %d/%d', num_init, num_jobs) self.log.info('number of currently queued jobs: %d/%d', num_queued, num_jobs) self.log.info('number of currently running jobs: %d/%d', num_running, num_jobs) self.log.info('number of currently done jobs: %d/%d', num_done, num_jobs) self.log.info('number of errored jobs: %d/%d', num_error, num_jobs) self.assertTrue(num_jobs - num_error - num_done > 0) # Wait for all the results. result_array = [job.result() for job in job_array] # Ensure all jobs have finished. self.assertTrue( all([job.status() is JobStatus.DONE for job in job_array])) self.assertTrue( all([ result.get_status() == 'COMPLETED' for result in result_array ])) # Ensure job ids are unique. job_ids = [job.id() for job in job_array] self.assertEqual(sorted(job_ids), sorted(list(set(job_ids))))
def test_remote_backend_configuration(self, QE_TOKEN, QE_URL, hub=None, group=None, project=None): """Test backend configuration. If all correct should pass the validation. """ ibmq_provider = IBMQProvider(QE_TOKEN, QE_URL, hub, group, project) remotes = ibmq_provider.available_backends({'local': False}) remotes = remove_backends_from_list(remotes) for backend in remotes: configuration = backend.configuration schema_path = self._get_resource_path( 'deprecated/backends/backend_configuration_schema_old_py.json', path=Path.SCHEMAS) with open(schema_path, 'r') as schema_file: schema = json.load(schema_file) jsonschema.validate(configuration, schema)
def test_remote_backend_calibration(self, QE_TOKEN, QE_URL, hub=None, group=None, project=None): """Test backend calibration. If all correct should pass the validation. """ ibmq_provider = IBMQProvider(QE_TOKEN, QE_URL, hub, group, project) remotes = ibmq_provider.available_backends({'local': False}) remotes = remove_backends_from_list(remotes) for backend in remotes: calibration = backend.calibration # FIXME test against schema and decide what calibration # is for a simulator if backend.configuration['simulator']: self.assertEqual(len(calibration), 0) else: self.assertEqual(len(calibration), 4)
def test_remote_backend_parameters(self, QE_TOKEN, QE_URL, hub=None, group=None, project=None): """Test backend parameters. If all correct should pass the validation. """ ibmq_provider = IBMQProvider(QE_TOKEN, QE_URL, hub, group, project) remotes = ibmq_provider.available_backends({'local': False}) remotes = remove_backends_from_list(remotes) for backend in remotes: self.log.info(backend.name) parameters = backend.parameters # FIXME test against schema and decide what parameters # is for a simulator if backend.configuration['simulator']: self.assertEqual(len(parameters), 0) else: self.assertTrue(all(key in parameters for key in ( 'last_update_date', 'qubits', 'backend')))