示例#1
0
def test_load_by_X_functions(two_empty_temp_db_connections, some_interdeps):
    """
    Test some different loading functions
    """
    source_conn, target_conn = two_empty_temp_db_connections

    source_path = path_to_dbfile(source_conn)
    target_path = path_to_dbfile(target_conn)

    source_exp1 = Experiment(conn=source_conn)
    source_ds_1_1 = DataSet(conn=source_conn, exp_id=source_exp1.exp_id)

    source_exp2 = Experiment(conn=source_conn)
    source_ds_2_1 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id)

    source_ds_2_2 = DataSet(conn=source_conn,
                            exp_id=source_exp2.exp_id,
                            name="customname")

    for ds in (source_ds_1_1, source_ds_2_1, source_ds_2_2):
        ds.set_interdependencies(some_interdeps[1])
        ds.mark_started()
        ds.add_result({name: 0.0 for name in some_interdeps[1].names})
        ds.mark_completed()

    extract_runs_into_db(source_path, target_path, source_ds_2_2.run_id)

    test_ds = load_by_guid(source_ds_2_2.guid, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)

    test_ds = load_by_id(1, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)

    test_ds = load_by_counter(1, 1, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)
示例#2
0
def test_parent_dataset_links(some_interdeps):
    """
    Test that we can set links and retrieve them when loading the dataset
    """
    links = generate_some_links(3)

    ds = DataSet()

    for link in links:
        link.head = ds.guid

    ds.set_interdependencies(some_interdeps[1])

    ds.parent_dataset_links = links[:2]
    # setting it again/overwriting it should be okay
    ds.parent_dataset_links = links

    ds.mark_started()

    match = re.escape('Can not set parent dataset links on a dataset '
                      'that has been started.')
    with pytest.raises(RuntimeError, match=match):
        ds.parent_dataset_links = links

    ds.add_results([{'ps1': 1, 'ps2': 2}])
    run_id = ds.run_id

    ds_loaded = DataSet(run_id=run_id)

    assert ds_loaded.parent_dataset_links == links
示例#3
0
def test_has_attributes_after_init():
    """
    Ensure that all attributes are populated after __init__ in BOTH cases
    (run_id is None / run_id is not None)
    """

    attrs = ['path_to_db', 'conn', '_run_id', 'run_id',
             '_debug', 'subscribers', '_completed', 'name', 'table_name',
             'guid', 'number_of_results', 'counter', 'parameters',
             'paramspecs', 'exp_id', 'exp_name', 'sample_name',
             'run_timestamp_raw', 'completed_timestamp_raw', 'completed',
             'snapshot', 'snapshot_raw']

    path_to_db = get_DB_location()
    ds = DataSet(path_to_db, run_id=None)

    for attr in attrs:
        assert hasattr(ds, attr)
        getattr(ds, attr)

    ds = DataSet(path_to_db, run_id=1)

    for attr in attrs:
        assert hasattr(ds, attr)
        getattr(ds, attr)
示例#4
0
def test_foreground_twice(empty_temp_db_connection):
    new_experiment("test", "test1", conn=empty_temp_db_connection)
    ds1 = DataSet(conn=empty_temp_db_connection)
    ds1.mark_started(start_bg_writer=False)

    ds2 = DataSet(conn=empty_temp_db_connection)
    ds2.mark_started(start_bg_writer=False)
示例#5
0
def test_get_description(experiment, some_interdeps):


    ds = DataSet()

    assert ds.run_id == 1

    desc = ds.description
    assert desc == RunDescriber(InterDependencies_())

    ds.set_interdependencies(some_interdeps[1])

    assert ds.description.interdeps == some_interdeps[1]

    # the run description gets written as the dataset is marked as started,
    # so now no description should be stored in the database
    prematurely_loaded_ds = DataSet(run_id=1)
    assert prematurely_loaded_ds.description == RunDescriber(
                                                    InterDependencies_())

    ds.mark_started()

    loaded_ds = DataSet(run_id=1)

    expected_desc = RunDescriber(some_interdeps[1])

    assert loaded_ds.description == expected_desc
示例#6
0
def test_fix_wrong_run_descriptions():
    v3fixpath = os.path.join(fixturepath, 'db_files', 'version3')

    dbname_old = os.path.join(v3fixpath,
                              'some_runs_without_run_description.db')

    if not os.path.exists(dbname_old):
        pytest.skip("No db-file fixtures found. You can generate test db-files"
                    " using the scripts in the legacy_DB_generation folder")

    with temporarily_copied_DB(dbname_old, debug=False, version=3) as conn:

        assert get_user_version(conn) == 3

        ds1 = DataSet(conn=conn, run_id=1)
        expected_description = ds1.description

        empty_description = RunDescriber(InterDependencies_())

        _fix_wrong_run_descriptions(conn, [1, 2, 3, 4])

        ds2 = DataSet(conn=conn, run_id=2)
        assert expected_description == ds2.description

        ds3 = DataSet(conn=conn, run_id=3)
        assert expected_description == ds3.description

        ds4 = DataSet(conn=conn, run_id=4)
        assert empty_description == ds4.description
示例#7
0
def test_dependent_parameters():

    pss: List[ParamSpecBase] = []

    for n in range(5):
        pss.append(ParamSpecBase(f'ps{n}', paramtype='numeric'))

    idps = InterDependencies_(dependencies={pss[0]: (pss[1], pss[2])})
    ds = DataSet(specs=idps)
    assert ds.dependent_parameters == (pss[0],)

    idps = InterDependencies_(dependencies={pss[0]: (pss[1], pss[2])},
                              standalones=(pss[3], pss[4]))
    ds = DataSet(specs=idps)
    assert ds.dependent_parameters == (pss[0],)

    idps = InterDependencies_(dependencies={pss[0]: (pss[1], pss[2]),
                                            pss[3]: (pss[4],)})

    ds = DataSet(specs=idps)
    assert ds.dependent_parameters == (pss[0], pss[3])

    idps = InterDependencies_(dependencies={pss[3]: (pss[1], pss[2]),
                                            pss[0]: (pss[4],)})

    ds = DataSet(specs=idps)
    assert ds.dependent_parameters == (pss[3], pss[0])
示例#8
0
def test_metadata(experiment, request):

    metadata1 = {'number': 1, "string": "Once upon a time..."}
    metadata2 = {'more': 'meta'}

    ds1 = DataSet(metadata=metadata1)
    request.addfinalizer(ds1.conn.close)
    ds2 = DataSet(metadata=metadata2)
    request.addfinalizer(ds2.conn.close)

    assert ds1.run_id == 1
    assert ds1.metadata == metadata1
    assert ds2.run_id == 2
    assert ds2.metadata == metadata2

    loaded_ds1 = DataSet(run_id=1)
    request.addfinalizer(loaded_ds1.conn.close)
    assert loaded_ds1.metadata == metadata1
    loaded_ds2 = DataSet(run_id=2)
    request.addfinalizer(loaded_ds2.conn.close)
    assert loaded_ds2.metadata == metadata2

    badtag = 'lex luthor'
    sorry_metadata = {'superman': 1, badtag: None, 'spiderman': 'two'}

    bad_tag_msg = (f'Tag {badtag} has value None. '
                    ' That is not a valid metadata value!')

    with pytest.raises(RuntimeError,
                       match='Rolling back due to unhandled exception') as e:
        for tag, value in sorry_metadata.items():
            ds1.add_metadata(tag, value)

    assert error_caused_by(e, bad_tag_msg)
示例#9
0
def test_get_description(some_paramspecs):

    paramspecs = some_paramspecs[2]

    ds = DataSet()

    assert ds.run_id == 1

    desc = ds.description
    assert desc == RunDescriber(InterDependencies())

    ds.add_parameter(paramspecs['ps1'])
    desc = ds.description
    assert desc == RunDescriber(InterDependencies(paramspecs['ps1']))

    ds.add_parameter(paramspecs['ps2'])
    desc = ds.description
    assert desc == RunDescriber(
        InterDependencies(paramspecs['ps1'], paramspecs['ps2']))

    # the run description gets written as the first data point is added,
    # so now no description should be stored in the database
    prematurely_loaded_ds = DataSet(run_id=1)
    assert prematurely_loaded_ds.description == RunDescriber(
        InterDependencies())

    ds.add_result({'ps1': 1, 'ps2': 2})

    loaded_ds = DataSet(run_id=1)

    assert loaded_ds.description == desc
示例#10
0
def test_get_description(experiment, some_paramspecs):

    paramspecs = some_paramspecs[2]

    ds = DataSet()

    assert ds.run_id == 1

    desc = ds.description
    assert desc == RunDescriber(InterDependencies())

    ds.add_parameter(paramspecs['ps1'])
    desc = ds.description
    assert desc == RunDescriber(InterDependencies(paramspecs['ps1']))

    ds.add_parameter(paramspecs['ps2'])
    desc = ds.description
    assert desc == RunDescriber(InterDependencies(paramspecs['ps1'],
                                                  paramspecs['ps2']))

    # the run description gets written as the dataset is marked as started,
    # so now no description should be stored in the database
    prematurely_loaded_ds = DataSet(run_id=1)
    assert prematurely_loaded_ds.description == RunDescriber(InterDependencies())

    ds.mark_started()

    loaded_ds = DataSet(run_id=1)

    assert loaded_ds.description == desc
示例#11
0
def test_atomicity(two_empty_temp_db_connections, some_interdeps):
    """
    Test the atomicity of the transaction by extracting and inserting two
    runs where the second one is not completed. The not completed error must
    roll back any changes to the target
    """
    source_conn, target_conn = two_empty_temp_db_connections

    source_path = path_to_dbfile(source_conn)
    target_path = path_to_dbfile(target_conn)

    # The target file must exist for us to be able to see whether it has
    # changed
    Path(target_path).touch()

    source_exp = Experiment(conn=source_conn)
    source_ds_1 = DataSet(conn=source_conn, exp_id=source_exp.exp_id)
    source_ds_2 = DataSet(conn=source_conn, exp_id=source_exp.exp_id)

    for ds in (source_ds_1, source_ds_2):
        ds.set_interdependencies(some_interdeps[1])
        ds.mark_started()
        ds.add_result({name: 2.1 for name in some_interdeps[1].names})

    # importantly, source_ds_2 is NOT marked as completed
    source_ds_1.mark_completed()

    # now check that the target file is untouched
    with raise_if_file_changed(target_path):
        # although the not completed error is a ValueError, we get the
        # RuntimeError from SQLite
        with pytest.raises(RuntimeError):
            extract_runs_into_db(source_path, target_path, 1, 2)
示例#12
0
def test_runs_from_different_experiments_raises(two_empty_temp_db_connections,
                                                some_paramspecs):
    """
    Test that inserting runs from multiple experiments raises
    """
    source_conn, target_conn = two_empty_temp_db_connections

    source_path = path_to_dbfile(source_conn)
    target_path = path_to_dbfile(target_conn)

    source_exp_1 = Experiment(conn=source_conn)
    source_exp_2 = Experiment(conn=source_conn)

    # make 5 runs in first experiment

    exp_1_run_ids = []
    for _ in range(5):

        source_dataset = DataSet(conn=source_conn, exp_id=source_exp_1.exp_id)
        exp_1_run_ids.append(source_dataset.run_id)

        for ps in some_paramspecs[2].values():
            source_dataset.add_parameter(ps)

        source_dataset.mark_started()

        for val in range(10):
            source_dataset.add_result(
                {ps.name: val
                 for ps in some_paramspecs[2].values()})
        source_dataset.mark_completed()

    # make 5 runs in second experiment

    exp_2_run_ids = []
    for _ in range(5):

        source_dataset = DataSet(conn=source_conn, exp_id=source_exp_2.exp_id)
        exp_2_run_ids.append(source_dataset.run_id)

        for ps in some_paramspecs[2].values():
            source_dataset.add_parameter(ps)

        source_dataset.mark_started()

        for val in range(10):
            source_dataset.add_result(
                {ps.name: val
                 for ps in some_paramspecs[2].values()})
        source_dataset.mark_completed()

    run_ids = exp_1_run_ids + exp_2_run_ids
    source_exp_ids = np.unique([1, 2])
    matchstring = ('Did not receive runs from a single experiment\\. '
                   f'Got runs from experiments {source_exp_ids}')
    # make the matchstring safe to use as a regexp
    matchstring = matchstring.replace('[', '\\[').replace(']', '\\]')
    with pytest.raises(ValueError, match=matchstring):
        extract_runs_into_db(source_path, target_path, *run_ids)
示例#13
0
def test_foreground_after_background_raises(empty_temp_db_connection):
    new_experiment("test", "test1", conn=empty_temp_db_connection)
    ds1 = DataSet(conn=empty_temp_db_connection)
    ds1.mark_started(start_bg_writer=True)

    ds2 = DataSet(conn=empty_temp_db_connection)
    with pytest.raises(RuntimeError, match="All datasets written"):
        ds2.mark_started(start_bg_writer=False)
示例#14
0
def test_the_same_dataset_as(some_paramspecs, experiment):
    paramspecs = some_paramspecs[2]
    ds = DataSet()
    ds.add_parameter(paramspecs['ps1'])
    ds.add_parameter(paramspecs['ps2'])
    ds.add_result({'ps1': 1, 'ps2': 2})

    same_ds_from_load = DataSet(run_id=ds.run_id)
    assert ds.the_same_dataset_as(same_ds_from_load)

    new_ds = DataSet()
    assert not ds.the_same_dataset_as(new_ds)
示例#15
0
def test_the_same_dataset_as(some_interdeps, experiment):

    ds = DataSet()
    ds.set_interdependencies(some_interdeps[1])
    ds.mark_started()
    ds.add_results([{'ps1': 1, 'ps2': 2}])

    same_ds_from_load = DataSet(run_id=ds.run_id)
    assert ds.the_same_dataset_as(same_ds_from_load)

    new_ds = DataSet()
    assert not ds.the_same_dataset_as(new_ds)
示例#16
0
def test_foreground_after_background_non_concurrent(empty_temp_db_connection):
    new_experiment("test", "test1", conn=empty_temp_db_connection)
    ds1 = DataSet(conn=empty_temp_db_connection)
    ds1.mark_started(start_bg_writer=True)
    ds1.mark_completed()

    ds2 = DataSet(conn=empty_temp_db_connection)
    ds2.mark_started(start_bg_writer=False)
    ds2.mark_completed()

    ds3 = DataSet(conn=empty_temp_db_connection)
    ds3.mark_started(start_bg_writer=True)
    ds3.mark_completed()
示例#17
0
def test_load_by_X_functions(two_empty_temp_db_connections, some_paramspecs):
    """
    Test some different loading functions
    """
    source_conn, target_conn = two_empty_temp_db_connections

    source_path = path_to_dbfile(source_conn)
    target_path = path_to_dbfile(target_conn)

    source_exp1 = Experiment(conn=source_conn)
    source_ds_1_1 = DataSet(conn=source_conn, exp_id=source_exp1.exp_id)
    for ps in some_paramspecs[2].values():
        source_ds_1_1.add_parameter(ps)
    source_ds_1_1.mark_started()
    source_ds_1_1.add_result(
        {ps.name: 0.0
         for ps in some_paramspecs[2].values()})
    source_ds_1_1.mark_completed()

    source_exp2 = Experiment(conn=source_conn)
    source_ds_2_1 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id)
    for ps in some_paramspecs[2].values():
        source_ds_2_1.add_parameter(ps)
    source_ds_2_1.mark_started()
    source_ds_2_1.add_result(
        {ps.name: 0.0
         for ps in some_paramspecs[2].values()})
    source_ds_2_1.mark_completed()
    source_ds_2_2 = DataSet(conn=source_conn,
                            exp_id=source_exp2.exp_id,
                            name="customname")
    for ps in some_paramspecs[2].values():
        source_ds_2_2.add_parameter(ps)
    source_ds_2_2.mark_started()
    source_ds_2_2.add_result(
        {ps.name: 0.0
         for ps in some_paramspecs[2].values()})
    source_ds_2_2.mark_completed()

    extract_runs_into_db(source_path, target_path, source_ds_2_2.run_id)

    test_ds = load_by_guid(source_ds_2_2.guid, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)

    test_ds = load_by_id(1, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)

    test_ds = load_by_counter(1, 1, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)
def test_load_by_X_functions(two_empty_temp_db_connections,
                             some_interdeps):
    """
    Test some different loading functions
    """
    source_conn, target_conn = two_empty_temp_db_connections

    source_path = path_to_dbfile(source_conn)
    target_path = path_to_dbfile(target_conn)

    source_exp1 = Experiment(conn=source_conn)
    source_ds_1_1 = DataSet(conn=source_conn, exp_id=source_exp1.exp_id)

    source_exp2 = Experiment(conn=source_conn)
    source_ds_2_1 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id)

    source_ds_2_2 = DataSet(conn=source_conn,
                            exp_id=source_exp2.exp_id,
                            name="customname")

    for ds in (source_ds_1_1, source_ds_2_1, source_ds_2_2):
        ds.set_interdependencies(some_interdeps[1])
        ds.mark_started()
        ds.add_results([{name: 0.0 for name in some_interdeps[1].names}])
        ds.mark_completed()

    extract_runs_into_db(source_path, target_path, source_ds_2_2.run_id)
    extract_runs_into_db(source_path, target_path, source_ds_2_1.run_id)
    extract_runs_into_db(source_path, target_path, source_ds_1_1.run_id)

    test_ds = load_by_guid(source_ds_2_2.guid, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)

    test_ds = load_by_id(1, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)

    test_ds = load_by_run_spec(captured_run_id=source_ds_2_2.captured_run_id,
                               conn=target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)

    assert source_exp2.exp_id == 2

    # this is now the first run in the db so run_id is 1
    target_run_id = 1
    # and the experiment ids will be interchanged.
    target_exp_id = 1

    test_ds = load_by_counter(target_run_id, target_exp_id, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)
示例#19
0
def test_result_table_naming_and_run_id(two_empty_temp_db_connections,
                                        some_paramspecs):
    """
    Check that a correct result table name is given and that a correct run_id
    is assigned
    """
    source_conn, target_conn = two_empty_temp_db_connections

    source_path = path_to_dbfile(source_conn)
    target_path = path_to_dbfile(target_conn)

    source_exp1 = Experiment(conn=source_conn)
    source_ds_1_1 = DataSet(conn=source_conn, exp_id=source_exp1.exp_id)
    for ps in some_paramspecs[2].values():
        source_ds_1_1.add_parameter(ps)
    source_ds_1_1.mark_started()
    source_ds_1_1.add_result(
        {ps.name: 0.0
         for ps in some_paramspecs[2].values()})
    source_ds_1_1.mark_completed()

    source_exp2 = Experiment(conn=source_conn)
    source_ds_2_1 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id)
    for ps in some_paramspecs[2].values():
        source_ds_2_1.add_parameter(ps)
    source_ds_2_1.mark_started()
    source_ds_2_1.add_result(
        {ps.name: 0.0
         for ps in some_paramspecs[2].values()})
    source_ds_2_1.mark_completed()
    source_ds_2_2 = DataSet(conn=source_conn,
                            exp_id=source_exp2.exp_id,
                            name="customname")
    for ps in some_paramspecs[2].values():
        source_ds_2_2.add_parameter(ps)
    source_ds_2_2.mark_started()
    source_ds_2_2.add_result(
        {ps.name: 0.0
         for ps in some_paramspecs[2].values()})
    source_ds_2_2.mark_completed()

    extract_runs_into_db(source_path, target_path, source_ds_2_2.run_id)

    # The target ds ought to have a runs table "customname-1-1"
    # and ought to be the same dataset as its "ancestor"
    target_ds = DataSet(conn=target_conn, run_id=1)

    assert target_ds.table_name == "customname-1-1"
    assert target_ds.the_same_dataset_as(source_ds_2_2)
示例#20
0
def test_mark_complete_is_deprecated_and_marks_as_completed(experiment):
    """Test that the deprecated `mark_complete` calls `mark_completed`"""
    ds = DataSet()

    with patch.object(ds, 'mark_completed', autospec=True) as mark_completed:
        pytest.deprecated_call(ds.mark_complete)
        mark_completed.assert_called_once()
示例#21
0
def test_create_dataset_pass_both_connection_and_path_to_db(experiment):
    with pytest.raises(ValueError,
                       match="Received BOTH conn and path_to_db. "
                       "Please provide only one or "
                       "the other."):
        some_valid_connection = experiment.conn
        _ = DataSet(path_to_db="some valid path", conn=some_valid_connection)
示例#22
0
def get_ds_info(conn: Connection, run_id: int,
                get_structure: bool = True) -> Dict[str, str]:
    """
    Get some info on a run in dict form from a db connection and runId.

    if get_structure is True: return the datastructure in that dataset
    as well (key is `structure' then).
    """
    ds = DataSet(conn=conn, run_id=run_id)

    ret = {}
    ret['experiment'] = ds.exp_name
    ret['sample'] = ds.sample_name

    _complete_ts = ds.completed_timestamp()
    if _complete_ts is not None:
        ret['completed date'] = _complete_ts[:10]
        ret['completed time'] = _complete_ts[11:]
    else:
        ret['completed date'] = ''
        ret['completed time'] = ''

    _start_ts = ds.run_timestamp()
    ret['started date'] = _start_ts[:10]
    ret['started time'] = _start_ts[11:]

    if get_structure:
        ret['structure'] = get_ds_structure(ds)

    ret['records'] = ds.number_of_results

    return ret
示例#23
0
def test_create_dataset_pass_both_connection_and_path_to_db(experiment):
    with pytest.raises(ValueError, match="Both `path_to_db` and `conn` "
                                         "arguments have been passed together "
                                         "with non-None values. This is not "
                                         "allowed."):
        some_valid_connection = experiment.conn
        _ = DataSet(path_to_db="some valid path", conn=some_valid_connection)
def test_integration_station_and_measurement(two_empty_temp_db_connections,
                                             inst):
    """
    An integration test where the runs in the source DB file are produced
    with the Measurement object and there is a Station as well
    """
    source_conn, target_conn = two_empty_temp_db_connections
    source_path = path_to_dbfile(source_conn)
    target_path = path_to_dbfile(target_conn)

    source_exp = Experiment(conn=source_conn)

    # Set up measurement scenario
    station = Station(inst)

    meas = Measurement(exp=source_exp, station=station)
    meas.register_parameter(inst.back)
    meas.register_parameter(inst.plunger)
    meas.register_parameter(inst.cutter, setpoints=(inst.back, inst.plunger))

    with meas.run() as datasaver:
        for back_v in [1, 2, 3]:
            for plung_v in [-3, -2.5, 0]:
                datasaver.add_result((inst.back, back_v),
                                     (inst.plunger, plung_v),
                                     (inst.cutter, back_v+plung_v))

    extract_runs_into_db(source_path, target_path, 1)

    target_ds = DataSet(conn=target_conn, run_id=1)

    assert datasaver.dataset.the_same_dataset_as(target_ds)
示例#25
0
def do_experiment(experiment_name,
                  sweep_object,
                  setup=None,
                  cleanup=None,
                  station=None,
                  live_plot=False):

    if "/" in experiment_name:
        experiment_name, sample_name = experiment_name.split("/")
    else:
        sample_name = None

    try:
        experiment = load_experiment_by_name(experiment_name, sample_name)
    except ValueError:  # experiment does not exist yet
        db_location = qcodes.config["core"]["db_location"]
        DataSet(db_location)
        experiment = new_experiment(experiment_name, sample_name)

    def add_actions(action, callables):
        if callables is None:
            return

        for cabble in np.atleast_1d(callables):
            if not isinstance(cabble, tuple):
                cabble = (cabble, ())

            action(*cabble)

    if live_plot:
        try:
            from plottr.qcodes_dataset import QcodesDatasetSubscriber
            from plottr.tools import start_listener

            start_listener()

        except ImportError:
            warn("Cannot perform live plots, plottr not installed")
            live_plot = False

    meas = SweepMeasurement(exp=experiment, station=station)
    meas.register_sweep(sweep_object)

    add_actions(meas.add_before_run, setup)
    add_actions(meas.add_after_run, cleanup)

    with meas.run() as datasaver:

        if live_plot:
            datasaver.dataset.subscribe(QcodesDatasetSubscriber(
                datasaver.dataset),
                                        state=[],
                                        min_wait=0,
                                        min_count=1)

        for data in sweep_object:
            datasaver.add_result(*data.items())

    return _DataExtractor(datasaver)
示例#26
0
def test_mark_complete_is_deprecated_and_marks_as_completed(experiment):
    """Test that the deprecated `mark_complete` calls `mark_completed`"""
    ds = DataSet()

    with patch.object(ds, 'mark_completed', autospec=True) as mark_completed:
        with pytest.warns(QCoDeSDeprecationWarning):
            ds.mark_complete()
        mark_completed.assert_called_once()
示例#27
0
def get_ds_info_from_path(path: str, run_id: int, get_structure: bool = True):
    """
    Convenience function that determines the dataset from `path` and
    `run_id`, then calls `get_ds_info`.
    """

    ds = DataSet(path_to_db=path, run_id=run_id)
    return get_ds_info(ds.conn, run_id, get_structure=get_structure)
示例#28
0
 def process(self, **kw):
     if not None in self._pathAndId:
         path, runId = self._pathAndId
         ds = DataSet(path_to_db=path, run_id=runId)
         if ds.number_of_results > self.nLoadedRecords:
             data = ds_to_datadict(ds)
             self.nLoadedRecords = ds.number_of_results
             return dict(dataOut=data)
示例#29
0
def test_load_by_guid(some_interdeps):
    ds = DataSet()
    ds.set_interdependencies(some_interdeps[1])
    ds.mark_started()
    ds.add_results([{'ps1': 1, 'ps2': 2}])

    loaded_ds = load_by_guid(ds.guid)

    assert loaded_ds.the_same_dataset_as(ds)
示例#30
0
def test_mark_completed_twice(start_bg_writer):
    """
    Ensure that its not an error to call mark_completed
    on an already completed dataset
    """
    ds = DataSet()
    ds.mark_started(start_bg_writer=start_bg_writer)
    ds.mark_completed()
    ds.mark_completed()