Beispiel #1
0
def test_set_interdependencies(dataset):
    exps = experiments()
    assert len(exps) == 1
    exp = exps[0]
    assert exp.name == "test-experiment"
    assert exp.sample_name == "test-sample"
    assert exp.last_counter == 1

    parameter_a = ParamSpecBase("a_param", "NUMERIC")
    parameter_b = ParamSpecBase("b_param", "NUMERIC")
    parameter_c = ParamSpecBase("c_param", "array")

    idps = InterDependencies_(
        inferences={parameter_c: (parameter_a, parameter_b)})

    dataset.set_interdependencies(idps)

    # write the parameters to disk
    dataset.mark_started()

    # Now retrieve the paramspecs

    shadow_ds = make_shadow_dataset(dataset)

    paramspecs = shadow_ds.paramspecs

    expected_keys = ['a_param', 'b_param', 'c_param']
    keys = sorted(list(paramspecs.keys()))
    assert keys == expected_keys
    for expected_param_name in expected_keys:
        ps = paramspecs[expected_param_name]
        assert ps.name == expected_param_name

    assert paramspecs == dataset.paramspecs
Beispiel #2
0
def standalone_parameters_dataset(dataset):
    n_params = 3
    n_rows = 10**3
    params_indep = [
        ParamSpecBase(f'param_{i}', 'numeric', label=f'param_{i}', unit='V')
        for i in range(n_params)
    ]

    param_dep = ParamSpecBase(f'param_{n_params}',
                              'numeric',
                              label=f'param_{n_params}',
                              unit='Ohm')

    params_all = params_indep + [param_dep]

    idps = InterDependencies_(
        dependencies={param_dep: tuple(params_indep[0:1])},
        standalones=tuple(params_indep[1:]))

    dataset.set_interdependencies(idps)

    dataset.mark_started()
    dataset.add_results([{
        p.name: np.int(n_rows * 10 * pn + i)
        for pn, p in enumerate(params_all)
    } for i in range(n_rows)])
    dataset.mark_completed()
    yield dataset
def test_get_data_by_id_order(dataset):
    """
    Test that the added values of setpoints end up associated with the correct
    setpoint parameter, irrespective of the ordering of those setpoint
    parameters
    """
    indepA = ParamSpecBase('indep1', "numeric")
    indepB = ParamSpecBase('indep2', "numeric")
    depAB = ParamSpecBase('depAB', "numeric")
    depBA = ParamSpecBase('depBA', "numeric")

    idps = InterDependencies_(
        dependencies={depAB: (indepA, indepB), depBA: (indepB, indepA)})

    dataset.set_interdependencies(idps)

    dataset.mark_started()

    dataset.add_result({'depAB': 12,
                        'indep2': 2,
                        'indep1': 1})

    dataset.add_result({'depBA': 21,
                        'indep2': 2,
                        'indep1': 1})
    dataset.mark_completed()

    data = get_data_by_id(dataset.run_id)
    data_dict = {el['name']: el['data'] for el in data[0]}
    assert data_dict['indep1'] == 1
    assert data_dict['indep2'] == 2

    data_dict = {el['name']: el['data'] for el in data[1]}
    assert data_dict['indep1'] == 1
    assert data_dict['indep2'] == 2
def test_write_data_to_text_file_length_exception():
    dataset = new_data_set("dataset")
    xparam = ParamSpecBase("x", 'numeric')
    yparam = ParamSpecBase("y", 'numeric')
    zparam = ParamSpecBase("z", 'numeric')
    idps = InterDependencies_(dependencies={
        yparam: (xparam, ),
        zparam: (xparam, )
    })
    dataset.set_interdependencies(idps)

    dataset.mark_started()
    results1 = [{'x': 0, 'y': 1}]
    results2 = [{'x': 0, 'z': 2}]
    results3 = [{'x': 1, 'z': 3}]
    dataset.add_results(results1)
    dataset.add_results(results2)
    dataset.add_results(results3)
    dataset.mark_completed()

    with tempfile.TemporaryDirectory() as temp_dir, pytest.raises(
            Exception, match='different length'):
        dataset.write_data_to_text_file(path=temp_dir,
                                        single_file=True,
                                        single_file_name='yz')
Beispiel #5
0
def test_add_paramspec(dataset):
    exps = experiments()
    assert len(exps) == 1
    exp = exps[0]
    assert exp.name == "test-experiment"
    assert exp.sample_name == "test-sample"
    assert exp.last_counter == 1

    parameter_a = ParamSpec("a_param", "NUMERIC")
    parameter_b = ParamSpec("b_param", "NUMERIC", key="value", number=1)
    parameter_c = ParamSpec("c_param", "array", inferred_from=[parameter_a,
                                                               parameter_b])
    dataset.add_parameter(parameter_a)
    dataset.add_parameter(parameter_b)
    dataset.add_parameter(parameter_c)

    # write the parameters to disk
    dataset.mark_started()

    # Now retrieve the paramspecs

    shadow_ds = make_shadow_dataset(dataset)

    paramspecs = shadow_ds.paramspecs

    expected_keys = ['a_param', 'b_param', 'c_param']
    keys = sorted(list(paramspecs.keys()))
    assert keys == expected_keys
    for expected_param_name in expected_keys:
        ps = paramspecs[expected_param_name]
        assert ps.name == expected_param_name

    assert paramspecs['c_param'].inferred_from == 'a_param, b_param'

    assert paramspecs == dataset.paramspecs
Beispiel #6
0
def test_adding_too_many_results():
    """
    This test really tests the "chunking" functionality of the
    insert_many_values function of the sqlite_base module
    """
    dataset = new_data_set("test_adding_too_many_results")
    xparam = ParamSpec("x", "numeric", label="x parameter",
                       unit='V')
    yparam = ParamSpec("y", 'numeric', label='y parameter',
                       unit='Hz', depends_on=[xparam])
    dataset.add_parameter(xparam)
    dataset.add_parameter(yparam)
    dataset.mark_started()
    n_max = qc.SQLiteSettings.limits['MAX_VARIABLE_NUMBER']

    vals = np.linspace(0, 1, int(n_max/2)+2)
    results = [{'x': val} for val in vals]
    dataset.add_results(results)

    vals = np.linspace(0, 1, int(n_max/2)+1)
    results = [{'x': val, 'y': val} for val in vals]
    dataset.add_results(results)

    vals = np.linspace(0, 1, n_max*3)
    results = [{'x': val} for val in vals]
    dataset.add_results(results)
def test_get_data_by_id_order(dataset):
    """
    Test if the values of the setpoints/dependent parameters is dependent on
    the order of the `depends_on` value. This sounds far fetch but was
    actually the case before #1250.
    """
    indepA = ParamSpec('indep1', "numeric")
    indepB = ParamSpec('indep2', "numeric")
    depAB = ParamSpec('depAB', "numeric", depends_on=[indepA, indepB])
    depBA = ParamSpec('depBA', "numeric", depends_on=[indepB, indepA])
    dataset.add_parameter(indepA)
    dataset.add_parameter(indepB)
    dataset.add_parameter(depAB)
    dataset.add_parameter(depBA)

    dataset.mark_started()

    dataset.add_result({'depAB': 12, 'indep2': 2, 'indep1': 1})

    dataset.add_result({'depBA': 21, 'indep2': 2, 'indep1': 1})
    dataset.mark_completed()

    data = get_data_by_id(dataset.run_id)
    data_dict = {el['name']: el['data'] for el in data[0]}
    assert data_dict['indep1'] == 1
    assert data_dict['indep2'] == 2

    data_dict = {el['name']: el['data'] for el in data[1]}
    assert data_dict['indep1'] == 1
    assert data_dict['indep2'] == 2
Beispiel #8
0
def test_adding_too_many_results():
    """
    This test really tests the "chunking" functionality of the
    insert_many_values function of the sqlite.query_helpers module
    """
    dataset = new_data_set("test_adding_too_many_results")
    xparam = ParamSpecBase("x", "numeric", label="x parameter",
                           unit='V')
    yparam = ParamSpecBase("y", 'numeric', label='y parameter',
                           unit='Hz')
    idps = InterDependencies_(dependencies={yparam: (xparam,)})
    dataset.set_interdependencies(idps)
    dataset.mark_started()
    n_max = qc.SQLiteSettings.limits['MAX_VARIABLE_NUMBER']

    vals = np.linspace(0, 1, int(n_max/2)+2)
    results = [{'x': val} for val in vals]
    dataset.add_results(results)

    vals = np.linspace(0, 1, int(n_max/2)+1)
    results = [{'x': val, 'y': val} for val in vals]
    dataset.add_results(results)

    vals = np.linspace(0, 1, n_max*3)
    results = [{'x': val} for val in vals]
    dataset.add_results(results)
Beispiel #9
0
def test_numpy_nan(dataset):
    parameter_m = ParamSpec("m", "numeric")
    dataset.add_parameter(parameter_m)
    dataset.mark_started()

    data_dict = [{"m": value} for value in [0.0, np.nan, 1.0]]
    dataset.add_results(data_dict)
    retrieved = dataset.get_data("m")
    assert np.isnan(retrieved[1])
Beispiel #10
0
def test_numpy_nan(dataset):
    parameter_m = ParamSpecBase("m", "numeric")
    idps = InterDependencies_(standalones=(parameter_m, ))
    dataset.set_interdependencies(idps)
    dataset.mark_started()

    data_dict = [{"m": value} for value in [0.0, np.nan, 1.0]]
    dataset.add_results(data_dict)
    retrieved = dataset.get_data("m")
    assert np.isnan(retrieved[1])
Beispiel #11
0
    def ds_with_vals(self, dataset):
        """
        This fixture creates a DataSet with values that is to be used by all
        the tests in this class
        """
        dataset.add_parameter(self.x)
        dataset.mark_started()
        for xv in self.xvals:
            dataset.add_result({self.x.name: xv})

        return dataset
Beispiel #12
0
def test_numpy_inf(dataset):
    """
    Test that we can insert and retrieve numpy inf in the data set
    """
    parameter_m = ParamSpec("m", "numeric")
    dataset.add_parameter(parameter_m)
    dataset.mark_started()

    data_dict = [{"m": value} for value in [-np.inf, np.inf]]
    dataset.add_results(data_dict)
    retrieved = dataset.get_data("m")
    assert np.isinf(retrieved).all()
Beispiel #13
0
    def ds_with_vals(self, dataset):
        """
        This fixture creates a DataSet with values that is to be used by all
        the tests in this class
        """
        idps = InterDependencies_(standalones=(self.x, ))
        dataset.set_interdependencies(idps)
        dataset.mark_started()
        for xv in self.xvals:
            dataset.add_result({self.x.name: xv})

        return dataset
Beispiel #14
0
def test_numpy_inf(dataset):
    """
    Test that we can insert and retrieve numpy inf in the data set
    """
    parameter_m = ParamSpecBase("m", "numeric")
    idps = InterDependencies_(standalones=(parameter_m, ))
    dataset.set_interdependencies(idps)
    dataset.mark_started()

    data_dict = [{"m": value} for value in [-np.inf, np.inf]]
    dataset.add_results(data_dict)
    retrieved = dataset.get_data("m")
    assert np.isinf(retrieved).all()
Beispiel #15
0
def test_numpy_floats(dataset):
    """
    Test that we can insert numpy floats in the data set
    """
    float_param = ParamSpec('y', 'numeric')
    dataset.add_parameter(float_param)
    dataset.mark_started()

    numpy_floats = [np.float, np.float16, np.float32, np.float64]
    results = [{"y": tp(1.2)} for tp in numpy_floats]
    dataset.add_results(results)
    expected_result = [[tp(1.2)] for tp in numpy_floats]
    assert np.allclose(dataset.get_data("y"), expected_result, atol=1E-8)
Beispiel #16
0
def test_numpy_floats(dataset):
    """
    Test that we can insert numpy floats in the data set
    """
    float_param = ParamSpecBase('y', 'numeric')
    idps = InterDependencies_(standalones=(float_param, ))
    dataset.set_interdependencies(idps)
    dataset.mark_started()

    numpy_floats = [np.float, np.float16, np.float32, np.float64]
    results = [{"y": tp(1.2)} for tp in numpy_floats]
    dataset.add_results(results)
    expected_result = [[tp(1.2)] for tp in numpy_floats]
    assert np.allclose(dataset.get_data("y"), expected_result, atol=1E-8)
Beispiel #17
0
def test_write_data_to_text_file_save(tmp_path_factory):
    dataset = new_data_set("dataset")
    xparam = ParamSpecBase("x", 'numeric')
    yparam = ParamSpecBase("y", 'numeric')
    idps = InterDependencies_(dependencies={yparam: (xparam, )})
    dataset.set_interdependencies(idps)

    dataset.mark_started()
    results = [{'x': 0, 'y': 1}]
    dataset.add_results(results)
    dataset.mark_completed()

    path = str(tmp_path_factory.mktemp("write_data_to_text_file_save"))
    dataset.write_data_to_text_file(path=path)
    assert os.listdir(path) == ['y.dat']
    with open(os.path.join(path, "y.dat")) as f:
        assert f.readlines() == ['0\t1\n']
Beispiel #18
0
def test_missing_keys(dataset):
    """
    Test that we can now have partial results with keys missing. This is for
    example handy when having an interleaved 1D and 2D sweep.
    """

    x = ParamSpec("x", paramtype='numeric')
    y = ParamSpec("y", paramtype='numeric')
    a = ParamSpec("a", paramtype='numeric', depends_on=[x])
    b = ParamSpec("b", paramtype='numeric', depends_on=[x, y])

    dataset.add_parameter(x)
    dataset.add_parameter(y)
    dataset.add_parameter(a)
    dataset.add_parameter(b)
    dataset.mark_started()

    def fa(xv):
        return xv + 1

    def fb(xv, yv):
        return xv + 2 - yv * 3

    results = []
    xvals = [1, 2, 3]
    yvals = [2, 3, 4]

    for xv in xvals:
        results.append({"x": xv, "a": fa(xv)})
        for yv in yvals:
            results.append({"x": xv, "y": yv, "b": fb(xv, yv)})

    dataset.add_results(results)

    assert dataset.get_values("x") == [[r["x"]] for r in results]
    assert dataset.get_values("y") == [[r["y"]] for r in results if "y" in r]
    assert dataset.get_values("a") == [[r["a"]] for r in results if "a" in r]
    assert dataset.get_values("b") == [[r["b"]] for r in results if "b" in r]

    assert dataset.get_setpoints("a")['x'] == [[xv] for xv in xvals]

    tmp = [list(t) for t in zip(*(itertools.product(xvals, yvals)))]
    expected_setpoints = [[[v] for v in vals] for vals in tmp]

    assert dataset.get_setpoints("b")['x'] == expected_setpoints[0]
    assert dataset.get_setpoints("b")['y'] == expected_setpoints[1]
def test_write_data_to_text_file_save():
    dataset = new_data_set("dataset")
    xparam = ParamSpecBase("x", 'numeric')
    yparam = ParamSpecBase("y", 'numeric')
    idps = InterDependencies_(dependencies={yparam: (xparam, )})
    dataset.set_interdependencies(idps)

    dataset.mark_started()
    results = [{'x': 0, 'y': 1}]
    dataset.add_results(results)
    dataset.mark_completed()

    with tempfile.TemporaryDirectory() as temp_dir:
        dataset.write_data_to_text_file(path=temp_dir)
        assert os.listdir(temp_dir) == ['y.dat']
        with open(temp_dir + "//y.dat") as f:
            assert f.readlines() == ['0\t1\n']
Beispiel #20
0
def test_numpy_ints(dataset):
    """
     Test that we can insert numpy integers in the data set
    """
    xparam = ParamSpec('x', 'numeric')
    dataset.add_parameter(xparam)
    dataset.mark_started()

    numpy_ints = [
        np.int, np.int8, np.int16, np.int32, np.int64,
        np.uint, np.uint8, np.uint16, np.uint32, np.uint64
    ]

    results = [{"x": tp(1)} for tp in numpy_ints]
    dataset.add_results(results)
    expected_result = len(numpy_ints) * [[1]]
    assert dataset.get_data("x") == expected_result
Beispiel #21
0
def test_basic_subscription(dataset, basic_subscriber):
    xparam = ParamSpecBase(name='x',
                           paramtype='numeric',
                           label='x parameter',
                           unit='V')
    yparam = ParamSpecBase(name='y',
                           paramtype='numeric',
                           label='y parameter',
                           unit='Hz')
    idps = InterDependencies_(dependencies={yparam: (xparam, )})
    dataset.set_interdependencies(idps)
    dataset.mark_started()

    sub_id = dataset.subscribe(basic_subscriber,
                               min_wait=0,
                               min_count=1,
                               state={})

    assert len(dataset.subscribers) == 1
    assert list(dataset.subscribers.keys()) == [sub_id]

    expected_state = {}

    for x in range(10):
        y = -x**2
        dataset.add_results([{'x': x, 'y': y}])
        expected_state[x + 1] = [(x, y)]

        @retry_until_does_not_throw(exception_class_to_expect=AssertionError,
                                    delay=0,
                                    tries=10)
        def assert_expected_state():
            assert dataset.subscribers[sub_id].state == expected_state

        assert_expected_state()

    dataset.unsubscribe(sub_id)

    assert len(dataset.subscribers) == 0
    assert list(dataset.subscribers.keys()) == []

    # Ensure the trigger for the subscriber has been removed from the database
    get_triggers_sql = "SELECT * FROM sqlite_master WHERE TYPE = 'trigger';"
    triggers = atomic_transaction(dataset.conn, get_triggers_sql).fetchall()
    assert len(triggers) == 0
Beispiel #22
0
def test_add_paramspec_one_by_one(dataset):
    exps = experiments()
    assert len(exps) == 1
    exp = exps[0]
    assert exp.name == "test-experiment"
    assert exp.sample_name == "test-sample"
    assert exp.last_counter == 1

    parameters = [
        ParamSpec("a", "NUMERIC"),
        ParamSpec("b", "NUMERIC", key="value", number=1),
        ParamSpec("c", "array")
    ]
    for parameter in parameters:
        dataset.add_parameter(parameter)

    # test that we can not re-add any parameter already added once
    for param in parameters:
        with pytest.raises(ValueError,
                           match=f'Duplicate parameter name: '
                           f'{param.name}'):
            dataset.add_parameter(param)

    dataset.mark_started()
    shadow_ds = make_shadow_dataset(dataset)

    paramspecs = shadow_ds.paramspecs

    expected_keys = ['a', 'b', 'c']
    keys = sorted(list(paramspecs.keys()))
    assert keys == expected_keys
    for expected_param_name in expected_keys:
        ps = paramspecs[expected_param_name]
        assert ps.name == expected_param_name

    assert paramspecs == dataset.paramspecs

    # Test that is not possible to add any parameter to the dataset
    with pytest.raises(RuntimeError,
                       match='Can not add parameters to a '
                       'DataSet that has been started.'):
        dataset.add_parameter(parameters[0])

    assert len(dataset.paramspecs.keys()) == 3
    assert len(shadow_ds.paramspecs.keys()) == 3
Beispiel #23
0
def scalar_dataset_with_nulls(dataset):
    """
    A very simple dataset. A scalar is varied, and two parameters are measured
    one by one
    """
    sp = ParamSpec('setpoint', 'numeric')
    val1 = ParamSpec('first_value', 'numeric', depends_on=(sp,))
    val2 = ParamSpec('second_value', 'numeric', depends_on=(sp,))

    for p in [sp, val1, val2]:
        dataset.add_parameter(p)

    dataset.mark_started()

    dataset.add_results([{sp.name: 0, val1.name: 1},
                         {sp.name: 0, val2.name: 2}])
    dataset.mark_completed()
    yield dataset
Beispiel #24
0
def test_numpy_ints(dataset):
    """
     Test that we can insert numpy integers in the data set
    """
    xparam = ParamSpecBase('x', 'numeric')
    idps = InterDependencies_(standalones=(xparam, ))
    dataset.set_interdependencies(idps)
    dataset.mark_started()

    numpy_ints = [
        np.int, np.int8, np.int16, np.int32, np.int64, np.uint, np.uint8,
        np.uint16, np.uint32, np.uint64
    ]

    results = [{"x": tp(1)} for tp in numpy_ints]
    dataset.add_results(results)
    expected_result = len(numpy_ints) * [[1]]
    assert dataset.get_data("x") == expected_result
Beispiel #25
0
def test_write_data_to_text_file_name_exception(tmp_path):
    dataset = new_data_set("dataset")
    xparam = ParamSpecBase("x", 'numeric')
    yparam = ParamSpecBase("y", 'numeric')
    zparam = ParamSpecBase("z", 'numeric')
    idps = InterDependencies_(dependencies={
        yparam: (xparam, ),
        zparam: (xparam, )
    })
    dataset.set_interdependencies(idps)

    dataset.mark_started()
    results = [{'x': 0, 'y': 1, 'z': 2}]
    dataset.add_results(results)
    dataset.mark_completed()

    temp_dir = str(tmp_path)
    with pytest.raises(Exception, match='desired file name'):
        dataset.write_data_to_text_file(path=temp_dir,
                                        single_file=True,
                                        single_file_name=None)
Beispiel #26
0
def scalar_dataset(dataset):
    n_params = 3
    n_rows = 10**3
    params_indep = [ParamSpec(f'param_{i}',
                              'numeric',
                              label=f'param_{i}',
                              unit='V')
                    for i in range(n_params)]
    params = params_indep + [ParamSpec(f'param_{n_params}',
                                       'numeric',
                                       label=f'param_{n_params}',
                                       unit='Ohm',
                                       depends_on=params_indep)]
    for p in params:
        dataset.add_parameter(p)
    dataset.mark_started()
    dataset.add_results([{p.name: np.int(n_rows*10*pn+i)
                          for pn, p in enumerate(params)}
                         for i in range(n_rows)])
    dataset.mark_completed()
    yield dataset
Beispiel #27
0
def scalar_dataset_with_nulls(dataset):
    """
    A very simple dataset. A scalar is varied, and two parameters are measured
    one by one
    """
    sp = ParamSpecBase('setpoint', 'numeric')
    val1 = ParamSpecBase('first_value', 'numeric')
    val2 = ParamSpecBase('second_value', 'numeric')

    idps = InterDependencies_(dependencies={val1: (sp, ), val2: (sp, )})
    dataset.set_interdependencies(idps)

    dataset.mark_started()

    dataset.add_results([{
        sp.name: 0,
        val1.name: 1
    }, {
        sp.name: 0,
        val2.name: 2
    }])
    dataset.mark_completed()
    yield dataset
Beispiel #28
0
def test_subscription_from_config(dataset, basic_subscriber):
    """
    This test is similar to `test_basic_subscription`, with the only
    difference that another subscriber from a config file is added.
    """
    # This string represents the config file in the home directory:
    config = """
    {
        "subscription":{
            "subscribers":{
                "test_subscriber":{
                    "factory": "qcodes.tests.dataset.test_subscribing.MockSubscriber",
                    "factory_kwargs":{
                        "lg": false
                    },
                    "subscription_kwargs":{
                        "min_wait": 0,
                        "min_count": 1,
                        "callback_kwargs": {}
                    }
                }
            }
        }
    }
    """
    # This little dance around the db_location is due to the fact that the
    # dataset fixture creates a dataset in a db in a temporary directory.
    # Therefore we need to 'backup' the path to the db when using the
    # default configuration.
    db_location = qcodes.config.core.db_location
    with default_config(user_config=config):
        qcodes.config.core.db_location = db_location

        assert 'test_subscriber' in qcodes.config.subscription.subscribers

        xparam = ParamSpecBase(name='x',
                           paramtype='numeric',
                           label='x parameter',
                           unit='V')
        yparam = ParamSpecBase(name='y',
                              paramtype='numeric',
                              label='y parameter',
                              unit='Hz')
        idps = InterDependencies_(dependencies={yparam: (xparam,)})
        dataset.set_interdependencies(idps)

        dataset.mark_started()

        sub_id = dataset.subscribe(basic_subscriber, min_wait=0, min_count=1,
                                   state={})
        sub_id_c = dataset.subscribe_from_config('test_subscriber')
        assert len(dataset.subscribers) == 2
        assert list(dataset.subscribers.keys()) == [sub_id, sub_id_c]

        expected_state = {}

        # Here we are only testing 2 to reduce the CI time
        for x in range(2):
            y = -x**2
            dataset.add_result({'x': x, 'y': y})
            expected_state[x+1] = [(x, y)]

            @retry_until_does_not_throw(
                exception_class_to_expect=AssertionError, delay=0, tries=10)
            def assert_expected_state():
                assert dataset.subscribers[sub_id].state == expected_state
                assert dataset.subscribers[sub_id_c].state == expected_state

            assert_expected_state()