def test_composite_tags(self): """ Test the tags setter, add_tags_recursive, remove_tags_recursive """ exp1 = FakeExperiment([0, 2]) exp2 = FakeExperiment([1, 3]) par_exp = BatchExperiment([exp1, exp2]) expdata = par_exp.run(FakeBackend()) self.assertExperimentDone(expdata) data1 = expdata.child_data(0) data2 = expdata.child_data(1) expdata.tags = ["a", "c", "a"] data1.tags = ["b"] self.assertEqual(sorted(expdata.tags), ["a", "c"]) self.assertEqual(sorted(data1.tags), ["b"]) self.assertEqual(sorted(data2.tags), []) expdata.add_tags_recursive(["d", "c"]) self.assertEqual(sorted(expdata.tags), ["a", "c", "d"]) self.assertEqual(sorted(data1.tags), ["b", "c", "d"]) self.assertEqual(sorted(data2.tags), ["c", "d"]) expdata.remove_tags_recursive(["a", "b"]) self.assertEqual(sorted(expdata.tags), ["c", "d"]) self.assertEqual(sorted(data1.tags), ["c", "d"]) self.assertEqual(sorted(data2.tags), ["c", "d"])
def test_job_splitting(self, max_experiments): """Test job splitting""" num_circuits = 10 backend = FakeBackend(max_experiments=max_experiments) class Experiment(FakeExperiment): """Fake Experiment to test job splitting""" def circuits(self): """Generate fake circuits""" qc = QuantumCircuit(1) qc.measure_all() return num_circuits * [qc] exp = Experiment([0]) expdata = exp.run(backend) self.assertExperimentDone(expdata) job_ids = expdata.job_ids # Comptue expected number of jobs if max_experiments is None: num_jobs = 1 else: num_jobs = num_circuits // max_experiments if num_circuits % max_experiments: num_jobs += 1 self.assertEqual(len(job_ids), num_jobs)
def test_parallel_options(self): """ Test parallel experiments overriding sub-experiment run and transpile options. """ # These options will all be overridden exp0 = FakeExperiment([0]) exp0.set_transpile_options(optimization_level=1) exp2 = FakeExperiment([2]) exp2.set_experiment_options(dummyoption="test") exp2.set_run_options(shots=2000) exp2.set_transpile_options(optimization_level=1) exp2.analysis.set_options(dummyoption="test") par_exp = ParallelExperiment([exp0, exp2]) with self.assertWarnsRegex( Warning, "Sub-experiment run and transpile options" " are overridden by composite experiment options.", ): self.assertEqual(par_exp.experiment_options, Options()) self.assertEqual(par_exp.run_options, Options(meas_level=2)) self.assertEqual(par_exp.transpile_options, Options(optimization_level=0)) self.assertEqual(par_exp.analysis.options, Options()) expdata = par_exp.run(FakeBackend()) self.assertExperimentDone(expdata)
def test_nested_composite(self): """ Test nested parallel experiments. """ exp1 = FakeExperiment([0, 2]) exp2 = FakeExperiment([1, 3]) exp3 = ParallelExperiment([exp1, exp2]) exp4 = BatchExperiment([exp3, exp1]) exp5 = ParallelExperiment([exp4, FakeExperiment([4])]) nested_exp = BatchExperiment([exp5, exp3]) expdata = nested_exp.run(FakeBackend()) self.assertExperimentDone(expdata)
def test_analysis_replace_results_false(self): """ Test replace_results of composite experiment data """ exp1 = FakeExperiment([0, 2]) exp2 = FakeExperiment([1, 3]) par_exp = BatchExperiment([exp1, exp2]) data1 = par_exp.run(FakeBackend()) self.assertExperimentDone(data1) # Additional data not part of composite experiment exp3 = FakeExperiment([0, 1]) extra_data = exp3.run(FakeBackend()) self.assertExperimentDone(extra_data) data1.add_child_data(extra_data) # Replace results data2 = par_exp.analysis.run(data1, replace_results=False) self.assertExperimentDone(data2) self.assertNotEqual(data1.experiment_id, data2.experiment_id) self.assertEqual(len(data1.child_data()), len(data2.child_data())) for sub1, sub2 in zip(data1.child_data(), data2.child_data()): self.assertNotEqual(sub1.experiment_id, sub2.experiment_id)
def setUp(self): super().setUp() self.backend = FakeBackend() self.share_level = "hey" exp1 = FakeExperiment([0, 2]) exp2 = FakeExperiment([1, 3]) par_exp = ParallelExperiment([exp1, exp2]) exp3 = FakeExperiment([0, 1, 2, 3]) batch_exp = BatchExperiment([par_exp, exp3]) self.rootdata = batch_exp.run(backend=self.backend) self.assertExperimentDone(self.rootdata) self.assertEqual(len(self.rootdata.child_data()), 2) self.rootdata.share_level = self.share_level
def test_flatten_results_nested(self): """Test combining results.""" exp0 = FakeExperiment([0]) exp1 = FakeExperiment([1]) exp2 = FakeExperiment([2]) exp3 = FakeExperiment([3]) comp_exp = ParallelExperiment( [ BatchExperiment(2 * [ParallelExperiment([exp0, exp1])]), BatchExperiment(3 * [ParallelExperiment([exp2, exp3])]), ], flatten_results=True, ) expdata = comp_exp.run(FakeBackend()) self.assertExperimentDone(expdata) # Check no child data was saved self.assertEqual(len(expdata.child_data()), 0) # Check right number of analysis results is returned self.assertEqual(len(expdata.analysis_results()), 30)
def test_flatten_results_partial(self): """Test flattening results.""" exp0 = FakeExperiment([0]) exp1 = FakeExperiment([1]) exp2 = FakeExperiment([2]) exp3 = FakeExperiment([3]) comp_exp = BatchExperiment([ ParallelExperiment([exp0, exp1, exp2], flatten_results=True), ParallelExperiment([exp2, exp3], flatten_results=True), ], ) expdata = comp_exp.run(FakeBackend()) self.assertExperimentDone(expdata) # Check out experiment wasnt flattened self.assertEqual(len(expdata.child_data()), 2) self.assertEqual(len(expdata.analysis_results()), 0) # check inner experiments were flattened child0 = expdata.child_data(0) child1 = expdata.child_data(1) self.assertEqual(len(child0.child_data()), 0) self.assertEqual(len(child1.child_data()), 0) # Check right number of analysis results is returned self.assertEqual(len(child0.analysis_results()), 9) self.assertEqual(len(child1.analysis_results()), 6)
def create_experiment( self, experiment_type: str, backend_name: str, metadata: Optional[Dict] = None, experiment_id: Optional[str] = None, parent_id: Optional[str] = None, job_ids: Optional[List[str]] = None, tags: Optional[List[str]] = None, notes: Optional[str] = None, json_encoder: Type[json.JSONEncoder] = json.JSONEncoder, **kwargs: Any, ) -> str: if experiment_id is None: experiment_id = uuid.uuid4() if experiment_id in self.exps.experiment_id.values: raise DbExperimentEntryExists( "Cannot add experiment with existing id") # Clarifications about some of the columns: # share_level - not a parameter of `DatabaseService.create_experiment` but a parameter of # `IBMExperimentService.create_experiment`. It must be supported because it is used # in `DbExperimentData`. # device_components - the user speicifies the device components when adding a result # (this is not a local decision of the fake service but the interface of DatabaseService # and IBMExperimentService). The components of the different results of the same # experiment are aggregated here in the device_components column. # start_datetime - not a parameter of `DatabaseService.create_experiment` but a parameter of # `IBMExperimentService.create_experiment`. Since `DbExperimentData` does not set it # via kwargs (as it does with share_level), the user cannot control the time and the # service alone decides about it. Here we've chosen to set a unique time for each # experiment, with the first experiment dated to midnight of January 1st, 2022, the # second experiment an hour later, etc. # figure_names - the fake service currently does not support figures. The column # (degenerated to []) is required to prevent a flaw in the work with DbExperimentData. # backend - the query methods `experiment` and `experiments` are supposed to return an # an instansiated backend object, and not only the backend name. We assume that the fake # service works with the fake backend (class FakeBackend). self.exps = pd.concat( [ self.exps, pd.DataFrame( [{ "experiment_type": experiment_type, "experiment_id": experiment_id, "parent_id": parent_id, "backend_name": backend_name, "metadata": metadata, "job_ids": job_ids, "tags": tags, "notes": notes, "share_level": kwargs.get("share_level", None), "device_components": [], "start_datetime": datetime(2022, 1, 1) + timedelta(hours=len(self.exps)), "figure_names": [], "backend": FakeBackend(backend_name), }], columns=self.exps.columns, ), ], ignore_index=True, ) return experiment_id