def test_nest_3d(indep_params, dep_params): px, x, tablex = indep_params["x"] py, y, tabley = indep_params["y"] pz, z, tablez = indep_params["z"] def f(vx, vy, vz): return vx**2 + vy**2 + vz**2 pi, i, tablei = dep_params["i"] pi.get = lambda: f(px(), py(), pz()) sweep_values_x = [0, 1, 2] sweep_values_y = [5, 6, 7] sweep_values_z = [8, 9, 10] nest = Nest( Sweep(x, tablex, lambda: sweep_values_x), Sweep(y, tabley, lambda: sweep_values_y), Sweep(z, tablez, lambda: sweep_values_z), Measure(i, tablei) ) meas = SweepMeasurement() meas.register_sweep(nest) interdeps = meas._interdeps assert interdeps.dependencies == { ParamSpecBase('i', 'numeric', '', ''): (ParamSpecBase('x', 'numeric', '', ''), ParamSpecBase('y', 'numeric', '', ''), ParamSpecBase('z', 'numeric', '', ''))} assert interdeps.inferences == {} assert interdeps.standalones == set()
def test_add_data_array(): exps = experiments() assert len(exps) == 1 exp = exps[0] assert exp.name == "test-experiment" assert exp.sample_name == "test-sample" assert exp.last_counter == 0 idps = InterDependencies_( standalones=(ParamSpecBase("x", "numeric"), ParamSpecBase("y", "array"))) mydataset = new_data_set("test") mydataset.set_interdependencies(idps) mydataset.mark_started() expected_x = [] expected_y = [] for x in range(100): expected_x.append([x]) y = np.random.random_sample(10) expected_y.append([y]) mydataset.add_result({"x": x, "y": y}) shadow_ds = make_shadow_dataset(mydataset) assert mydataset.get_data('x') == expected_x assert shadow_ds.get_data('x') == expected_x y_data = mydataset.get_data('y') np.testing.assert_allclose(y_data, expected_y) y_data = shadow_ds.get_data('y') np.testing.assert_allclose(y_data, expected_y)
def test_set_interdependencies(dataset): exps = experiments() assert len(exps) == 1 exp = exps[0] assert exp.name == "test-experiment" assert exp.sample_name == "test-sample" assert exp.last_counter == 1 parameter_a = ParamSpecBase("a_param", "NUMERIC") parameter_b = ParamSpecBase("b_param", "NUMERIC") parameter_c = ParamSpecBase("c_param", "array") idps = InterDependencies_( inferences={parameter_c: (parameter_a, parameter_b)}) dataset.set_interdependencies(idps) # write the parameters to disk dataset.mark_started() # Now retrieve the paramspecs shadow_ds = make_shadow_dataset(dataset) paramspecs = shadow_ds.paramspecs expected_keys = ['a_param', 'b_param', 'c_param'] keys = sorted(list(paramspecs.keys())) assert keys == expected_keys for expected_param_name in expected_keys: ps = paramspecs[expected_param_name] assert ps.name == expected_param_name assert paramspecs == dataset.paramspecs
def standalone_parameters_dataset(dataset): n_params = 3 n_rows = 10**3 params_indep = [ ParamSpecBase(f'param_{i}', 'numeric', label=f'param_{i}', unit='V') for i in range(n_params) ] param_dep = ParamSpecBase(f'param_{n_params}', 'numeric', label=f'param_{n_params}', unit='Ohm') params_all = params_indep + [param_dep] idps = InterDependencies_( dependencies={param_dep: tuple(params_indep[0:1])}, standalones=tuple(params_indep[1:])) dataset.set_interdependencies(idps) dataset.mark_started() dataset.add_results([{ p.name: np.int(n_rows * 10 * pn + i) for pn, p in enumerate(params_all) } for i in range(n_rows)]) dataset.mark_completed() yield dataset
def dataset_with_outliers_generator(ds, data_offset=5, low_outlier=-3, high_outlier=1, background_noise=True): x = ParamSpecBase('x', 'numeric', label='Flux', unit='e^2/hbar') t = ParamSpecBase('t', 'numeric', label='Time', unit='s') z = ParamSpecBase('z', 'numeric', label='Majorana number', unit='Anyon') idps = InterDependencies_(dependencies={z: (x, t)}) ds.set_interdependencies(idps) ds.mark_started() npoints = 50 xvals = np.linspace(0, 1, npoints) tvals = np.linspace(0, 1, npoints) for counter, xv in enumerate(xvals): if background_noise and (counter < round(npoints / 2.3) or counter > round(npoints / 1.8)): data = np.random.rand(npoints) - data_offset else: data = xv * np.linspace(0, 1, npoints) if counter == round(npoints / 1.9): data[round(npoints / 1.9)] = high_outlier if counter == round(npoints / 2.1): data[round(npoints / 2.5)] = low_outlier ds.add_results([{ 'x': xv, 't': tv, 'z': z } for z, tv in zip(data, tvals)]) ds.mark_completed() return ds
def test_adding_too_many_results(): """ This test really tests the "chunking" functionality of the insert_many_values function of the sqlite_base module """ dataset = new_data_set("test_adding_too_many_results") xparam = ParamSpecBase("x", "numeric", label="x parameter", unit='V') yparam = ParamSpecBase("y", 'numeric', label='y parameter', unit='Hz') idps = InterDependencies_(dependencies={yparam: (xparam,)}) dataset.set_interdependencies(idps) dataset.mark_started() n_max = qc.SQLiteSettings.limits['MAX_VARIABLE_NUMBER'] vals = np.linspace(0, 1, int(n_max/2)+2) results = [{'x': val} for val in vals] dataset.add_results(results) vals = np.linspace(0, 1, int(n_max/2)+1) results = [{'x': val, 'y': val} for val in vals] dataset.add_results(results) vals = np.linspace(0, 1, n_max*3) results = [{'x': val} for val in vals] dataset.add_results(results)
def test_get_data_by_id_order(dataset): """ Test that the added values of setpoints end up associated with the correct setpoint parameter, irrespective of the ordering of those setpoint parameters """ indepA = ParamSpecBase('indep1', "numeric") indepB = ParamSpecBase('indep2', "numeric") depAB = ParamSpecBase('depAB', "numeric") depBA = ParamSpecBase('depBA', "numeric") idps = InterDependencies_( dependencies={depAB: (indepA, indepB), depBA: (indepB, indepA)}) dataset.set_interdependencies(idps) dataset.mark_started() dataset.add_result({'depAB': 12, 'indep2': 2, 'indep1': 1}) dataset.add_result({'depBA': 21, 'indep2': 2, 'indep1': 1}) dataset.mark_completed() data = get_data_by_id(dataset.run_id) data_dict = {el['name']: el['data'] for el in data[0]} assert data_dict['indep1'] == 1 assert data_dict['indep2'] == 2 data_dict = {el['name']: el['data'] for el in data[1]} assert data_dict['indep1'] == 1 assert data_dict['indep2'] == 2
def some_paramspecbases(): psb1 = ParamSpecBase('psb1', paramtype='text', label='blah', unit='') psb2 = ParamSpecBase('psb2', paramtype='array', label='', unit='V') psb3 = ParamSpecBase('psb3', paramtype='array', label='', unit='V') psb4 = ParamSpecBase('psb4', paramtype='numeric', label='number', unit='') return (psb1, psb2, psb3, psb4)
def test_saving_numeric_values_as_text(numeric_type): """ Test the saving numeric values into 'text' parameter raises an exception """ p = ParamSpecBase("p", "text") test_set = qc.new_data_set("test-dataset") test_set.set_interdependencies(InterDependencies_(standalones=(p, ))) test_set.mark_started() idps = InterDependencies_(standalones=(p, )) data_saver = DataSaver(dataset=test_set, write_period=0, interdeps=idps) try: value = numeric_type(2) gottype = np.array(value).dtype msg = re.escape(f'Parameter {p.name} is of type ' f'"{p.type}", but got a result of ' f'type {gottype} ({value}).') with pytest.raises(ValueError, match=msg): data_saver.add_result((p.name, value)) finally: data_saver.dataset.conn.close()
def test_numpy_types(): """ Test that we can save numpy types in the data set """ p = ParamSpecBase(name="p", paramtype="numeric") test_set = qc.new_data_set("test-dataset") test_set.set_interdependencies(InterDependencies_(standalones=(p, ))) test_set.mark_started() idps = InterDependencies_(standalones=(p, )) data_saver = DataSaver(dataset=test_set, write_period=0, interdeps=idps) dtypes = [ np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64 ] for dtype in dtypes: data_saver.add_result(("p", dtype(2))) data_saver.flush_data_to_database() data = test_set.get_data("p") assert data == [[2] for _ in range(len(dtypes))]
def _register_parameter(self: T, name: str, label: Optional[str], unit: Optional[str], setpoints: Optional[setpoints_type], basis: Optional[setpoints_type], paramtype: str) -> T: """ Update the interdependencies object with a new group """ parameter: Optional[ParamSpecBase] try: parameter = self._interdeps[name] except KeyError: parameter = None paramspec = ParamSpecBase(name=name, paramtype=paramtype, label=label, unit=unit) # We want to allow the registration of the exact same parameter twice, # the reason being that e.g. two ArrayParameters could share the same # setpoint parameter, which would then be registered along with each # dependent (array)parameter if parameter is not None and parameter != paramspec: raise ValueError("Parameter already registered " "in this Measurement.") if setpoints is not None: sp_strings = [str(sp) for sp in setpoints] else: sp_strings = [] if basis is not None: bs_strings = [str(bs) for bs in basis] else: bs_strings = [] # get the ParamSpecBases depends_on, inf_from = self._paramspecbase_from_strings(name, sp_strings, bs_strings) if depends_on: self._interdeps = self._interdeps.extend( dependencies={paramspec: depends_on}) if inf_from: self._interdeps = self._interdeps.extend( inferences={paramspec: inf_from}) if not(depends_on or inf_from): self._interdeps = self._interdeps.extend(standalones=(paramspec,)) log.info(f'Registered {name} in the Measurement.') return self
class TestGetData: x = ParamSpecBase("x", paramtype='numeric') n_vals = 5 xvals = list(range(n_vals)) # this is the format of how data is returned by DataSet.get_data # which means "a list of table rows" xdata = [[x] for x in xvals] @pytest.fixture(autouse=True) def ds_with_vals(self, dataset): """ This fixture creates a DataSet with values that is to be used by all the tests in this class """ idps = InterDependencies_(standalones=(self.x,)) dataset.set_interdependencies(idps) dataset.mark_started() for xv in self.xvals: dataset.add_result({self.x.name: xv}) return dataset @pytest.mark.parametrize( ("start", "end", "expected"), [ # test without start and end (None, None, xdata), # test for start only (0, None, xdata), (2, None, xdata[(2-1):]), (-2, None, xdata), (n_vals, None, xdata[(n_vals-1):]), (n_vals + 1, None, []), (n_vals + 2, None, []), # test for end only (None, 0, []), (None, 2, xdata[:2]), (None, -2, []), (None, n_vals, xdata), (None, n_vals + 1, xdata), (None, n_vals + 2, xdata), # test for start and end (0, 0, []), (1, 1, [xdata[1-1]]), (2, 1, []), (2, 0, []), (1, 0, []), (n_vals, n_vals, [xdata[n_vals-1]]), (n_vals, n_vals - 1, []), (2, 4, xdata[(2-1):4]), ], ) def test_get_data_with_start_and_end_args(self, ds_with_vals, start, end, expected): assert expected == ds_with_vals.get_data(self.x, start=start, end=end)
def base_version(self) -> ParamSpecBase: """ Return a ParamSpecBase object with the same name, paramtype, label and unit as this QcodesParamSpec """ return ParamSpecBase(name=self.name, paramtype=self.type, label=self.label, unit=self.unit)
def test_missing_keys(dataset): """ Test that we can now have partial results with keys missing. This is for example handy when having an interleaved 1D and 2D sweep. """ x = ParamSpecBase("x", paramtype='numeric') y = ParamSpecBase("y", paramtype='numeric') a = ParamSpecBase("a", paramtype='numeric') b = ParamSpecBase("b", paramtype='numeric') idps = InterDependencies_(dependencies={a: (x,), b: (x, y)}) dataset.set_interdependencies(idps) dataset.mark_started() def fa(xv): return xv + 1 def fb(xv, yv): return xv + 2 - yv * 3 results = [] xvals = [1, 2, 3] yvals = [2, 3, 4] for xv in xvals: results.append({"x": xv, "a": fa(xv)}) for yv in yvals: results.append({"x": xv, "y": yv, "b": fb(xv, yv)}) dataset.add_results(results) assert dataset.get_values("x") == [[r["x"]] for r in results] assert dataset.get_values("y") == [[r["y"]] for r in results if "y" in r] assert dataset.get_values("a") == [[r["a"]] for r in results if "a" in r] assert dataset.get_values("b") == [[r["b"]] for r in results if "b" in r] assert dataset.get_setpoints("a")['x'] == [[xv] for xv in xvals] tmp = [list(t) for t in zip(*(itertools.product(xvals, yvals)))] expected_setpoints = [[[v] for v in vals] for vals in tmp] assert dataset.get_setpoints("b")['x'] == expected_setpoints[0] assert dataset.get_setpoints("b")['y'] == expected_setpoints[1]
def test_add_data_1d(): exps = experiments() assert len(exps) == 1 exp = exps[0] assert exp.name == "test-experiment" assert exp.sample_name == "test-sample" assert exp.last_counter == 0 psx = ParamSpecBase("x", "numeric") psy = ParamSpecBase("y", "numeric") idps = InterDependencies_(dependencies={psy: (psx,)}) mydataset = new_data_set("test-dataset") mydataset.set_interdependencies(idps) mydataset.mark_started() expected_x = [] expected_y = [] for x in range(100): expected_x.append([x]) y = 3 * x + 10 expected_y.append([y]) mydataset.add_result({"x": x, "y": y}) shadow_ds = make_shadow_dataset(mydataset) assert mydataset.get_data('x') == expected_x assert mydataset.get_data('y') == expected_y assert shadow_ds.get_data('x') == expected_x assert shadow_ds.get_data('y') == expected_y with pytest.raises(ValueError): mydataset.add_result({'y': 500}) assert mydataset.completed is False mydataset.mark_completed() assert mydataset.completed is True with pytest.raises(CompletedError): mydataset.add_result({'y': 500}) with pytest.raises(CompletedError): mydataset.add_result({'x': 5})
def test_numpy_nan(dataset): parameter_m = ParamSpecBase("m", "numeric") idps = InterDependencies_(standalones=(parameter_m,)) dataset.set_interdependencies(idps) dataset.mark_started() data_dict = [{"m": value} for value in [0.0, np.nan, 1.0]] dataset.add_results(data_dict) retrieved = dataset.get_data("m") assert np.isnan(retrieved[1])
def test_basic_subscription(dataset, basic_subscriber): xparam = ParamSpecBase(name='x', paramtype='numeric', label='x parameter', unit='V') yparam = ParamSpecBase(name='y', paramtype='numeric', label='y parameter', unit='Hz') idps = InterDependencies_(dependencies={yparam: (xparam,)}) dataset.set_interdependencies(idps) dataset.mark_started() sub_id = dataset.subscribe(basic_subscriber, min_wait=0, min_count=1, state={}) assert len(dataset.subscribers) == 1 assert list(dataset.subscribers.keys()) == [sub_id] expected_state = {} for x in range(10): y = -x**2 dataset.add_result({'x': x, 'y': y}) expected_state[x+1] = [(x, y)] @retry_until_does_not_throw( exception_class_to_expect=AssertionError, delay=0, tries=10) def assert_expected_state(): assert dataset.subscribers[sub_id].state == expected_state assert_expected_state() dataset.unsubscribe(sub_id) assert len(dataset.subscribers) == 0 assert list(dataset.subscribers.keys()) == [] # Ensure the trigger for the subscriber has been removed from the database get_triggers_sql = "SELECT * FROM sqlite_master WHERE TYPE = 'trigger';" triggers = atomic_transaction( dataset.conn, get_triggers_sql).fetchall() assert len(triggers) == 0
def test_base_version(paramspecs): kwargs = paramspecs[0] ps = ParamSpec(**kwargs) ps_base = ParamSpecBase(name=kwargs['name'], paramtype=kwargs['paramtype'], label=kwargs['label'], unit=kwargs['unit']) assert ps.base_version() == ps_base
def test_chain_simple(indep_params): px, x, tablex = indep_params["x"] py, y, tabley = indep_params["y"] sweep_values_x = [0, 1, 2] sweep_values_y = [4, 5, 6] parameter_sweep = Chain( Sweep(x, tablex, lambda: sweep_values_x), Sweep(y, tabley, lambda: sweep_values_y) ) meas = SweepMeasurement() meas.register_sweep(parameter_sweep) interdeps = meas._interdeps assert interdeps.dependencies == {} assert interdeps.inferences == {} assert interdeps.standalones == { ParamSpecBase('y', 'numeric', '', ''), ParamSpecBase('x', 'numeric', '', '')}
def test_interleave_1d_2d(indep_params, dep_params): px, x, tablex = indep_params["x"] py, y, tabley = indep_params["y"] pi, i, tablei = dep_params["i"] pj, j, tablej = dep_params["j"] def f(vx): return vx ** 2 pi.get = lambda: f(px()) def g(vx, vy): return vx ** 2 + vy ** 2 pj.get = lambda: g(px(), py()) sweep_values_x = [0, 1, 2] sweep_values_y = [4, 5, 6] sweep_object = Nest( Sweep(x, tablex, lambda: sweep_values_x), Chain( Measure(i, tablei), Nest( Sweep(y, tabley, lambda: sweep_values_y), Measure(j, tablej) ) ) ) meas = SweepMeasurement() meas.register_sweep(sweep_object) interdeps = meas._interdeps assert interdeps.dependencies == { ParamSpecBase('i', 'numeric', '', ''): (ParamSpecBase('x', 'numeric', '', ''),), ParamSpecBase('j', 'numeric', '', ''): (ParamSpecBase('x', 'numeric', '', ''), ParamSpecBase('y', 'numeric', '', ''))} assert interdeps.inferences == {} assert interdeps.standalones == set()
def test_numpy_inf(dataset): """ Test that we can insert and retrieve numpy inf in the data set """ parameter_m = ParamSpecBase("m", "numeric") idps = InterDependencies_(standalones=(parameter_m,)) dataset.set_interdependencies(idps) dataset.mark_started() data_dict = [{"m": value} for value in [-np.inf, np.inf]] dataset.add_results(data_dict) retrieved = dataset.get_data("m") assert np.isinf(retrieved).all()
def test_nest_in_chain_2_whatever(indep_params, dep_params): px, x, tablex = indep_params["x"] pi, i, tablei = dep_params["i"] pj, j, tablej = dep_params["j"] sweep_values = [0, 1, 2] sweep_object = Nest( Sweep(x, tablex, lambda: sweep_values), Chain( Measure(i, tablei) ) ) meas = SweepMeasurement() meas.register_sweep(sweep_object) interdeps = meas._interdeps assert interdeps.dependencies == { ParamSpecBase('i', 'numeric', '', ''): (ParamSpecBase('x', 'numeric', '', ''),)} assert interdeps.inferences == {} assert interdeps.standalones == set()
def test_nest(indep_params, dep_params): px, x, tablex = indep_params["x"] pi, i, tablei = dep_params["i"] def f(value): return value**2 pi.get = lambda: f(px()) sweep_values = [0, 1, 2] nest = Nest( Sweep(x, tablex, lambda: sweep_values), Measure(i, tablei) ) meas = SweepMeasurement() meas.register_sweep(nest) interdeps = meas._interdeps assert interdeps.dependencies == { ParamSpecBase('i', 'numeric', '', ''): (ParamSpecBase('x', 'numeric', '', ''),)} assert interdeps.inferences == {} assert interdeps.standalones == set()
def test_sweep_parameter(indep_params): px, x, table = indep_params["x"] sweep_values = [0, 1, 2] parameter_sweep = Sweep(x, table, lambda: sweep_values) meas = SweepMeasurement() meas.register_sweep(parameter_sweep) interdeps = meas._interdeps assert interdeps.dependencies == {} assert interdeps.inferences == {} assert interdeps.standalones == {ParamSpecBase('x', 'numeric', '', '')}
def test_numpy_floats(dataset): """ Test that we can insert numpy floats in the data set """ float_param = ParamSpecBase('y', 'numeric') idps = InterDependencies_(standalones=(float_param,)) dataset.set_interdependencies(idps) dataset.mark_started() numpy_floats = [np.float, np.float16, np.float32, np.float64] results = [{"y": tp(1.2)} for tp in numpy_floats] dataset.add_results(results) expected_result = [[tp(1.2)] for tp in numpy_floats] assert np.allclose(dataset.get_data("y"), expected_result, atol=1E-8)
def scalar_dataset_with_nulls(dataset): """ A very simple dataset. A scalar is varied, and two parameters are measured one by one """ sp = ParamSpecBase('setpoint', 'numeric') val1 = ParamSpecBase('first_value', 'numeric') val2 = ParamSpecBase('second_value', 'numeric') idps = InterDependencies_(dependencies={val1: (sp, ), val2: (sp, )}) dataset.set_interdependencies(idps) dataset.mark_started() dataset.add_results([{ sp.name: 0, val1.name: 1 }, { sp.name: 0, val2.name: 2 }]) dataset.mark_completed() yield dataset
def test_string_via_dataset(experiment): """ Test that we can save text into database via DataSet API """ p = ParamSpecBase("p", "text") test_set = qc.new_data_set("test-dataset") idps = InterDependencies_(standalones=(p, )) test_set.set_interdependencies(idps) test_set.mark_started() test_set.add_result({"p": "some text"}) test_set.mark_completed() assert test_set.get_data("p") == [["some text"]]
def test_numpy_ints(dataset): """ Test that we can insert numpy integers in the data set """ xparam = ParamSpecBase('x', 'numeric') idps = InterDependencies_(standalones=(xparam,)) dataset.set_interdependencies(idps) dataset.mark_started() numpy_ints = [ np.int, np.int8, np.int16, np.int32, np.int64, np.uint, np.uint8, np.uint16, np.uint32, np.uint64 ] results = [{"x": tp(1)} for tp in numpy_ints] dataset.add_results(results) expected_result = len(numpy_ints) * [[1]] assert dataset.get_data("x") == expected_result
def test_string_via_datasaver(experiment): """ Test that we can save text into database via DataSaver API """ p = ParamSpecBase(name="p", paramtype="text") test_set = qc.new_data_set("test-dataset") idps = InterDependencies_(standalones=(p, )) test_set.set_interdependencies(idps) test_set.mark_started() idps = InterDependencies_(standalones=(p, )) data_saver = DataSaver(dataset=test_set, write_period=0, interdeps=idps) data_saver.add_result(("p", "some text")) data_saver.flush_data_to_database() assert test_set.get_data("p") == [["some text"]]
def test_validate_subset(some_paramspecbases): ps1, ps2, ps3, ps4 = some_paramspecbases idps = InterDependencies_(dependencies={ps1: (ps2, ps3)}, inferences={ ps2: (ps4, ), ps3: (ps4, ) }) idps.validate_subset((ps4, )) idps.validate_subset((ps2, ps4)) idps.validate_subset((ps2, ps3, ps4)) idps.validate_subset(()) idps.validate_subset([]) with pytest.raises(DependencyError) as exc_info: idps.validate_subset((ps1, )) assert exc_info.value._param_name == 'psb1' assert exc_info.value._missing_params == {'psb2', 'psb3'} with pytest.raises(DependencyError) as exc_info: idps.validate_subset((ps1, ps2, ps4)) assert exc_info.value._param_name == 'psb1' assert exc_info.value._missing_params == {'psb3'} with pytest.raises(InferenceError) as exc_info: idps.validate_subset((ps3, )) assert exc_info.value._param_name == 'psb3' assert exc_info.value._missing_params == {'psb4'} with pytest.raises(InferenceError) as exc_info: idps2 = InterDependencies_(dependencies={ps1: (ps2, ps3)}, inferences={ps3: (ps4, )}) idps2.validate_subset((ps1, ps2, ps3)) assert exc_info.value._param_name == 'psb3' assert exc_info.value._missing_params == {'psb4'} with pytest.raises(ValueError, match='ps42'): ps42 = ParamSpecBase('ps42', paramtype='text', label='', unit='it') idps.validate_subset((ps2, ps42, ps4))