Exemplo n.º 1
0
def standalone_parameters_dataset(dataset):
    n_params = 3
    n_rows = 10**3
    params_indep = [
        ParamSpecBase(f'param_{i}', 'numeric', label=f'param_{i}', unit='V')
        for i in range(n_params)
    ]

    param_dep = ParamSpecBase(f'param_{n_params}',
                              'numeric',
                              label=f'param_{n_params}',
                              unit='Ohm')

    params_all = params_indep + [param_dep]

    idps = InterDependencies_(
        dependencies={param_dep: tuple(params_indep[0:1])},
        standalones=tuple(params_indep[1:]))

    dataset.set_interdependencies(idps)

    dataset.mark_started()
    dataset.add_results([{
        p.name: np.int(n_rows * 10 * pn + i)
        for pn, p in enumerate(params_all)
    } for i in range(n_rows)])
    dataset.mark_completed()
    yield dataset
Exemplo n.º 2
0
def test_get_data_by_id_order(dataset):
    """
    Test that the added values of setpoints end up associated with the correct
    setpoint parameter, irrespective of the ordering of those setpoint
    parameters
    """
    indepA = ParamSpecBase('indep1', "numeric")
    indepB = ParamSpecBase('indep2', "numeric")
    depAB = ParamSpecBase('depAB', "numeric")
    depBA = ParamSpecBase('depBA', "numeric")

    idps = InterDependencies_(dependencies={
        depAB: (indepA, indepB),
        depBA: (indepB, indepA)
    })

    dataset.set_interdependencies(idps)

    dataset.mark_started()

    dataset.add_results([{'depAB': 12, 'indep2': 2, 'indep1': 1}])

    dataset.add_results([{'depBA': 21, 'indep2': 2, 'indep1': 1}])
    dataset.mark_completed()

    data = get_data_by_id(dataset.run_id)
    data_dict = {el['name']: el['data'] for el in data[0]}
    assert data_dict['indep1'] == 1
    assert data_dict['indep2'] == 2

    data_dict = {el['name']: el['data'] for el in data[1]}
    assert data_dict['indep1'] == 1
    assert data_dict['indep2'] == 2
Exemplo n.º 3
0
def test_numpy_nan(dataset):
    parameter_m = ParamSpec("m", "numeric")
    dataset.add_parameters([parameter_m])

    data_dict = [{"m": value} for value in [0.0, np.nan, 1.0]]
    dataset.add_results(data_dict)
    retrieved = dataset.get_data("m")
    assert np.isnan(retrieved[1])
Exemplo n.º 4
0
def test_numpy_nan(dataset):
    parameter_m = ParamSpecBase("m", "numeric")
    idps = InterDependencies_(standalones=(parameter_m, ))
    dataset.set_interdependencies(idps)
    dataset.mark_started()

    data_dict = [{"m": value} for value in [0.0, np.nan, 1.0]]
    dataset.add_results(data_dict)
    retrieved = dataset.get_data("m")
    assert np.isnan(retrieved[1])
Exemplo n.º 5
0
    def ds_with_vals(self, dataset):
        """
        This fixture creates a DataSet with values that is to be used by all
        the tests in this class
        """
        idps = InterDependencies_(standalones=(self.x, ))
        dataset.set_interdependencies(idps)
        dataset.mark_started()
        for xv in self.xvals:
            dataset.add_results([{self.x.name: xv}])

        return dataset
Exemplo n.º 6
0
def test_numpy_floats(dataset):
    """
    Test that we can insert numpy floats in the data set
    """
    float_param = ParamSpec('y', 'numeric')
    dataset.add_parameters([float_param])

    numpy_floats = [np.float, np.float16, np.float32, np.float64]
    results = [{"y": tp(1.2)} for tp in numpy_floats]
    dataset.add_results(results)
    expected_result = [[tp(1.2)] for tp in numpy_floats]
    assert np.allclose(dataset.get_data("y"), expected_result, atol=1E-8)
Exemplo n.º 7
0
def test_numpy_inf(dataset):
    """
    Test that we can insert and retrieve numpy inf in the data set
    """
    parameter_m = ParamSpec("m", "numeric")
    dataset.add_parameter(parameter_m)
    dataset.mark_started()

    data_dict = [{"m": value} for value in [-np.inf, np.inf]]
    dataset.add_results(data_dict)
    retrieved = dataset.get_data("m")
    assert np.isinf(retrieved).all()
Exemplo n.º 8
0
def test_numpy_inf(dataset):
    """
    Test that we can insert and retrieve numpy inf in the data set
    """
    parameter_m = ParamSpecBase("m", "numeric")
    idps = InterDependencies_(standalones=(parameter_m, ))
    dataset.set_interdependencies(idps)
    dataset.mark_started()

    data_dict = [{"m": value} for value in [-np.inf, np.inf]]
    dataset.add_results(data_dict)
    retrieved = dataset.get_data("m")
    assert np.isinf(retrieved).all()
Exemplo n.º 9
0
def test_numpy_floats(dataset):
    """
    Test that we can insert numpy floats in the data set
    """
    float_param = ParamSpecBase('y', 'numeric')
    idps = InterDependencies_(standalones=(float_param, ))
    dataset.set_interdependencies(idps)
    dataset.mark_started()

    numpy_floats = [np.float, np.float16, np.float32, np.float64]
    results = [{"y": tp(1.2)} for tp in numpy_floats]
    dataset.add_results(results)
    expected_result = [[tp(1.2)] for tp in numpy_floats]
    assert np.allclose(dataset.get_data("y"), expected_result, atol=1E-8)
Exemplo n.º 10
0
def test_numpy_ints(dataset):
    """
     Test that we can insert numpy integers in the data set
    """
    xparam = ParamSpec('x', 'numeric')
    dataset.add_parameters([xparam])

    numpy_ints = [
        np.int, np.int8, np.int16, np.int32, np.int64,
        np.uint, np.uint8, np.uint16, np.uint32, np.uint64
    ]

    results = [{"x": tp(1)} for tp in numpy_ints]
    dataset.add_results(results)
    expected_result = len(numpy_ints) * [[1]]
    assert dataset.get_data("x") == expected_result
Exemplo n.º 11
0
def test_write_data_to_text_file_save(tmp_path_factory):
    dataset = new_data_set("dataset")
    xparam = ParamSpecBase("x", 'numeric')
    yparam = ParamSpecBase("y", 'numeric')
    idps = InterDependencies_(dependencies={yparam: (xparam, )})
    dataset.set_interdependencies(idps)

    dataset.mark_started()
    results = [{'x': 0, 'y': 1}]
    dataset.add_results(results)
    dataset.mark_completed()

    path = str(tmp_path_factory.mktemp("write_data_to_text_file_save"))
    dataset.write_data_to_text_file(path=path)
    assert os.listdir(path) == ['y.dat']
    with open(os.path.join(path, "y.dat")) as f:
        assert f.readlines() == ['0\t1\n']
Exemplo n.º 12
0
def test_write_data_to_text_file_save():
    dataset = new_data_set("dataset")
    xparam = ParamSpecBase("x", 'numeric')
    yparam = ParamSpecBase("y", 'numeric')
    idps = InterDependencies_(dependencies={yparam: (xparam, )})
    dataset.set_interdependencies(idps)

    dataset.mark_started()
    results = [{'x': 0, 'y': 1}]
    dataset.add_results(results)
    dataset.mark_completed()

    with tempfile.TemporaryDirectory() as temp_dir:
        dataset.write_data_to_text_file(path=temp_dir)
        assert os.listdir(temp_dir) == ['y.dat']
        with open(temp_dir + "//y.dat") as f:
            assert f.readlines() == ['0\t1\n']
Exemplo n.º 13
0
def test_missing_keys(dataset):
    """
    Test that we can now have partial results with keys missing. This is for
    example handy when having an interleaved 1D and 2D sweep.
    """

    x = ParamSpec("x", paramtype='numeric')
    y = ParamSpec("y", paramtype='numeric')
    a = ParamSpec("a", paramtype='numeric', depends_on=[x])
    b = ParamSpec("b", paramtype='numeric', depends_on=[x, y])

    dataset.add_parameter(x)
    dataset.add_parameter(y)
    dataset.add_parameter(a)
    dataset.add_parameter(b)
    dataset.mark_started()

    def fa(xv):
        return xv + 1

    def fb(xv, yv):
        return xv + 2 - yv * 3

    results = []
    xvals = [1, 2, 3]
    yvals = [2, 3, 4]

    for xv in xvals:
        results.append({"x": xv, "a": fa(xv)})
        for yv in yvals:
            results.append({"x": xv, "y": yv, "b": fb(xv, yv)})

    dataset.add_results(results)

    assert dataset.get_values("x") == [[r["x"]] for r in results]
    assert dataset.get_values("y") == [[r["y"]] for r in results if "y" in r]
    assert dataset.get_values("a") == [[r["a"]] for r in results if "a" in r]
    assert dataset.get_values("b") == [[r["b"]] for r in results if "b" in r]

    assert dataset.get_setpoints("a")['x'] == [[xv] for xv in xvals]

    tmp = [list(t) for t in zip(*(itertools.product(xvals, yvals)))]
    expected_setpoints = [[[v] for v in vals] for vals in tmp]

    assert dataset.get_setpoints("b")['x'] == expected_setpoints[0]
    assert dataset.get_setpoints("b")['y'] == expected_setpoints[1]
Exemplo n.º 14
0
def test_basic_subscription(dataset, basic_subscriber):
    xparam = ParamSpecBase(name='x',
                           paramtype='numeric',
                           label='x parameter',
                           unit='V')
    yparam = ParamSpecBase(name='y',
                           paramtype='numeric',
                           label='y parameter',
                           unit='Hz')
    idps = InterDependencies_(dependencies={yparam: (xparam, )})
    dataset.set_interdependencies(idps)
    dataset.mark_started()

    sub_id = dataset.subscribe(basic_subscriber,
                               min_wait=0,
                               min_count=1,
                               state={})

    assert len(dataset.subscribers) == 1
    assert list(dataset.subscribers.keys()) == [sub_id]

    expected_state = {}

    for x in range(10):
        y = -x**2
        dataset.add_results([{'x': x, 'y': y}])
        expected_state[x + 1] = [(x, y)]

        @retry_until_does_not_throw(exception_class_to_expect=AssertionError,
                                    delay=0,
                                    tries=10)
        def assert_expected_state():
            assert dataset.subscribers[sub_id].state == expected_state

        assert_expected_state()

    dataset.unsubscribe(sub_id)

    assert len(dataset.subscribers) == 0
    assert list(dataset.subscribers.keys()) == []

    # Ensure the trigger for the subscriber has been removed from the database
    get_triggers_sql = "SELECT * FROM sqlite_master WHERE TYPE = 'trigger';"
    triggers = atomic_transaction(dataset.conn, get_triggers_sql).fetchall()
    assert len(triggers) == 0
Exemplo n.º 15
0
def test_numpy_ints(dataset):
    """
     Test that we can insert numpy integers in the data set
    """
    xparam = ParamSpecBase('x', 'numeric')
    idps = InterDependencies_(standalones=(xparam, ))
    dataset.set_interdependencies(idps)
    dataset.mark_started()

    numpy_ints = [
        np.int, np.int8, np.int16, np.int32, np.int64, np.uint, np.uint8,
        np.uint16, np.uint32, np.uint64
    ]

    results = [{"x": tp(1)} for tp in numpy_ints]
    dataset.add_results(results)
    expected_result = len(numpy_ints) * [[1]]
    assert dataset.get_data("x") == expected_result
Exemplo n.º 16
0
def scalar_dataset_with_nulls(dataset):
    """
    A very simple dataset. A scalar is varied, and two parameters are measured
    one by one
    """
    sp = ParamSpec('setpoint', 'numeric')
    val1 = ParamSpec('first_value', 'numeric', depends_on=(sp,))
    val2 = ParamSpec('second_value', 'numeric', depends_on=(sp,))

    for p in [sp, val1, val2]:
        dataset.add_parameter(p)

    dataset.mark_started()

    dataset.add_results([{sp.name: 0, val1.name: 1},
                         {sp.name: 0, val2.name: 2}])
    dataset.mark_completed()
    yield dataset
Exemplo n.º 17
0
def scalar_dataset(dataset):
    n_params = 3
    n_rows = 10**3
    params_indep = [ParamSpec(f'param_{i}',
                              'numeric',
                              label=f'param_{i}',
                              unit='V')
                    for i in range(n_params)]
    params = params_indep + [ParamSpec(f'param_{n_params}',
                                       'numeric',
                                       label=f'param_{n_params}',
                                       unit='Ohm',
                                       depends_on=params_indep)]
    for p in params:
        dataset.add_parameter(p)
    dataset.mark_started()
    dataset.add_results([{p.name: np.int(n_rows*10*pn+i)
                          for pn, p in enumerate(params)}
                         for i in range(n_rows)])
    dataset.mark_completed()
    yield dataset
Exemplo n.º 18
0
def test_write_data_to_text_file_name_exception():
    dataset = new_data_set("dataset")
    xparam = ParamSpecBase("x", 'numeric')
    yparam = ParamSpecBase("y", 'numeric')
    zparam = ParamSpecBase("z", 'numeric')
    idps = InterDependencies_(dependencies={
        yparam: (xparam, ),
        zparam: (xparam, )
    })
    dataset.set_interdependencies(idps)

    dataset.mark_started()
    results = [{'x': 0, 'y': 1, 'z': 2}]
    dataset.add_results(results)
    dataset.mark_completed()

    with tempfile.TemporaryDirectory() as temp_dir, pytest.raises(
            Exception, match='desired file name'):
        dataset.write_data_to_text_file(path=temp_dir,
                                        single_file=True,
                                        single_file_name=None)
Exemplo n.º 19
0
def test_adding_too_many_results():
    """
    This test really tests the "chunking" functionality of the
    insert_many_values function of the sqlite.query_helpers module
    """
    dataset = new_data_set("test_adding_too_many_results")
    xparam = ParamSpecBase("x", "numeric", label="x parameter",
                           unit='V')
    yparam = ParamSpecBase("y", 'numeric', label='y parameter',
                           unit='Hz')
    idps = InterDependencies_(dependencies={yparam: (xparam,)})
    dataset.set_interdependencies(idps)
    dataset.mark_started()
    n_max = qc.SQLiteSettings.limits['MAX_VARIABLE_NUMBER']

    vals = np.linspace(0, 1, int(n_max/2)+2)
    results = [{'x': val} for val in vals]
    dataset.add_results(results)

    vals = np.linspace(0, 1, int(n_max/2)+1)
    results = [{'x': val, 'y': val} for val in vals]
    dataset.add_results(results)

    vals = np.linspace(0, 1, n_max*3)
    results = [{'x': val} for val in vals]
    dataset.add_results(results)
Exemplo n.º 20
0
def test_write_data_to_text_file_length_exception(tmp_path):
    dataset = new_data_set("dataset")
    xparam = ParamSpecBase("x", 'numeric')
    yparam = ParamSpecBase("y", 'numeric')
    zparam = ParamSpecBase("z", 'numeric')
    idps = InterDependencies_(dependencies={
        yparam: (xparam, ),
        zparam: (xparam, )
    })
    dataset.set_interdependencies(idps)

    dataset.mark_started()
    results1 = [{'x': 0, 'y': 1}]
    results2 = [{'x': 0, 'z': 2}]
    results3 = [{'x': 1, 'z': 3}]
    dataset.add_results(results1)
    dataset.add_results(results2)
    dataset.add_results(results3)
    dataset.mark_completed()

    temp_dir = str(tmp_path)
    with pytest.raises(Exception, match='different length'):
        dataset.write_data_to_text_file(path=temp_dir,
                                        single_file=True,
                                        single_file_name='yz')
Exemplo n.º 21
0
def test_adding_too_many_results():
    """
    This test really tests the "chunking" functionality of the
    insert_many_values function of the sqlite_base module
    """
    dataset = new_data_set("test_adding_too_many_results")
    xparam = ParamSpec("x", "numeric", label="x parameter",
                       unit='V')
    yparam = ParamSpec("y", 'numeric', label='y parameter',
                       unit='Hz', depends_on=[xparam])
    dataset.add_parameter(xparam)
    dataset.add_parameter(yparam)
    dataset.mark_started()
    n_max = qc.SQLiteSettings.limits['MAX_VARIABLE_NUMBER']

    vals = np.linspace(0, 1, int(n_max/2)+2)
    results = [{'x': val} for val in vals]
    dataset.add_results(results)

    vals = np.linspace(0, 1, int(n_max/2)+1)
    results = [{'x': val, 'y': val} for val in vals]
    dataset.add_results(results)

    vals = np.linspace(0, 1, n_max*3)
    results = [{'x': val} for val in vals]
    dataset.add_results(results)
Exemplo n.º 22
0
def scalar_dataset_with_nulls(dataset):
    """
    A very simple dataset. A scalar is varied, and two parameters are measured
    one by one
    """
    sp = ParamSpecBase('setpoint', 'numeric')
    val1 = ParamSpecBase('first_value', 'numeric')
    val2 = ParamSpecBase('second_value', 'numeric')

    idps = InterDependencies_(dependencies={val1: (sp, ), val2: (sp, )})
    dataset.set_interdependencies(idps)

    dataset.mark_started()

    dataset.add_results([{
        sp.name: 0,
        val1.name: 1
    }, {
        sp.name: 0,
        val2.name: 2
    }])
    dataset.mark_completed()
    yield dataset
Exemplo n.º 23
0
def test_add_parameter_values(dataset):
    n = 2
    m = n + 1

    xparam = ParamSpec('x', 'numeric')
    dataset.add_parameter(xparam)

    x_results = [{'x': x} for x in range(n)]
    dataset.add_results(x_results)

    yparam = ParamSpec("y", "numeric")

    match_str = f'Need to have {n} values but got {m}.'
    match_str = re.escape(match_str)
    with pytest.raises(ValueError, match=match_str):
        pytest.deprecated_call(dataset.add_parameter_values, yparam,
                               [y for y in range(m)])

    yvals = [y for y in range(n)]

    # Unlike what the docstring of the method suggests,
    # `add_parameter_values` does NOT add a new parameter and values for it
    # "NEXT TO the columns of values of existing parameters".
    #
    # In other words, if the initial state of the table is:
    #
    # |   x  |
    # --------
    # |   1  |
    # |   2  |
    #
    # then the state of the table after calling `add_parameter_values` is
    # going to be:
    #
    # |   x  |   y  |
    # ---------------
    # |   1  | NULL |
    # |   2  | NULL |
    # | NULL |  25  |
    # | NULL |  42  |
    #
    # while the docstring suggests the following state:
    #
    # |   x  |   y  |
    # ---------------
    # |   1  |  25  |
    # |   2  |  42  |
    #

    y_expected = [[None]] * n + [[y] for y in yvals]
    pytest.deprecated_call(dataset.add_parameter_values, yparam, yvals)

    shadow_ds = make_shadow_dataset(dataset)

    try:
        assert y_expected == dataset.get_data(yparam)
        assert y_expected == shadow_ds.get_data(yparam)

        dataset.mark_complete()

        # and now let's test that dataset's connection does not commit anymore
        # when `atomic` is used
        dataset.add_results([{yparam.name: -2}])
        y_expected_2 = y_expected + [[-2]]

        assert y_expected_2 == dataset.get_data(yparam)
        assert y_expected_2 == shadow_ds.get_data(yparam)

    finally:
        shadow_ds.conn.close()
Exemplo n.º 24
0
def test_subscription_from_config(dataset, basic_subscriber):
    """
    This test is similar to `test_basic_subscription`, with the only
    difference that another subscriber from a config file is added.
    """
    # This string represents the config file in the home directory:
    config = """
    {
        "subscription":{
            "subscribers":{
                "test_subscriber":{
                    "factory": "qcodes.tests.dataset.test_subscribing.MockSubscriber",
                    "factory_kwargs":{
                        "lg": false
                    },
                    "subscription_kwargs":{
                        "min_wait": 0,
                        "min_count": 1,
                        "callback_kwargs": {}
                    }
                }
            }
        }
    }
    """
    # This little dance around the db_location is due to the fact that the
    # dataset fixture creates a dataset in a db in a temporary directory.
    # Therefore we need to 'backup' the path to the db when using the
    # default configuration.
    db_location = qcodes.config.core.db_location
    with default_config(user_config=config):
        qcodes.config.core.db_location = db_location

        assert 'test_subscriber' in qcodes.config.subscription.subscribers

        xparam = ParamSpecBase(name='x',
                               paramtype='numeric',
                               label='x parameter',
                               unit='V')
        yparam = ParamSpecBase(name='y',
                               paramtype='numeric',
                               label='y parameter',
                               unit='Hz')
        idps = InterDependencies_(dependencies={yparam: (xparam, )})
        dataset.set_interdependencies(idps)

        dataset.mark_started()

        sub_id = dataset.subscribe(basic_subscriber,
                                   min_wait=0,
                                   min_count=1,
                                   state={})
        sub_id_c = dataset.subscribe_from_config('test_subscriber')
        assert len(dataset.subscribers) == 2
        assert list(dataset.subscribers.keys()) == [sub_id, sub_id_c]

        expected_state = {}

        # Here we are only testing 2 to reduce the CI time
        for x in range(2):
            y = -x**2
            dataset.add_results([{'x': x, 'y': y}])
            expected_state[x + 1] = [(x, y)]

            @retry_until_does_not_throw(
                exception_class_to_expect=AssertionError, tries=10)
            def assert_expected_state():
                assert dataset.subscribers[sub_id].state == expected_state
                assert dataset.subscribers[sub_id_c].state == expected_state

            assert_expected_state()