コード例 #1
0
def test_perform_actual_upgrade_6_to_7():

    fixpath = os.path.join(fixturepath, 'db_files', 'version6')

    db_file = 'some_runs.db'
    dbname_old = os.path.join(fixpath, db_file)

    if not os.path.exists(dbname_old):
        pytest.skip("No db-file fixtures found. You can generate test db-files"
                    " using the scripts in the "
                    "https://github.com/QCoDeS/qcodes_generate_test_db/ repo")

    with temporarily_copied_DB(dbname_old, debug=False, version=6) as conn:
        assert isinstance(conn, ConnectionPlus)
        perform_db_upgrade_6_to_7(conn)
        assert get_user_version(conn) == 7

        no_of_runs_query = "SELECT max(run_id) FROM runs"
        no_of_runs = one(atomic_transaction(conn, no_of_runs_query),
                         'max(run_id)')
        assert no_of_runs == 10

        columns = atomic_transaction(conn,
                                     "PRAGMA table_info(runs)").fetchall()
        col_names = [col['name'] for col in columns]

        assert 'captured_run_id' in col_names
        assert 'captured_counter' in col_names

        for run_id in range(1, no_of_runs + 1):
            ds1 = load_by_id(run_id, conn)
            ds2 = load_by_run_spec(captured_run_id=run_id, conn=conn)

            assert ds1.the_same_dataset_as(ds2)

            assert ds1.run_id == run_id
            assert ds1.run_id == ds1.captured_run_id
            assert ds2.run_id == run_id
            assert ds2.run_id == ds2.captured_run_id

        exp_id = 1
        for counter in range(1, no_of_runs + 1):
            ds1 = load_by_counter(counter, exp_id, conn)
            ds2 = load_by_run_spec(captured_counter=counter, conn=conn)

            assert ds1.the_same_dataset_as(ds2)
            assert ds1.counter == counter
            assert ds1.counter == ds1.captured_counter
            assert ds2.counter == counter
            assert ds2.counter == ds2.captured_counter
コード例 #2
0
ファイル: plotting.py プロジェクト: spauka/Qcodes
def plot_by_id(
    run_id: int,
    axes: Optional[Union[matplotlib.axes.Axes,
                         Sequence[matplotlib.axes.Axes]]] = None,
    colorbars: Optional[Union[matplotlib.colorbar.Colorbar,
                              Sequence[matplotlib.colorbar.Colorbar]]] = None,
    rescale_axes: bool = True,
    auto_color_scale: Optional[bool] = None,
    cutoff_percentile: Optional[Union[Tuple[float, float], float]] = None,
    complex_plot_type: str = "real_and_imag",
    complex_plot_phase: str = "radians",
    **kwargs: Any,
) -> AxesTupleList:
    """
    Construct all plots for a given `run_id`. Here `run_id` is an
    alias for `captured_run_id` for historical reasons. See the docs
    of :func:`.load_by_run_spec` for details of loading runs.
    All other arguments are forwarded
    to :func:`.plot_dataset`, see this for more details.
    """

    dataset = load_by_run_spec(captured_run_id=run_id)
    return plot_dataset(dataset, axes, colorbars, rescale_axes,
                        auto_color_scale, cutoff_percentile, complex_plot_type,
                        complex_plot_phase, **kwargs)
コード例 #3
0
ファイル: GUI_Measureit.py プロジェクト: nanophys/MeasureIt
    def export_all_datasets(self):
        directory = QFileDialog.getExistingDirectory(
            self, "Save Data to .csv",
            f'{os.environ["MeasureItHome"]}\\Origin Files\\')
        if len(directory) == 0:
            return

        unsaved_sets = []
        for ds_info in self.datasets:
            try:
                ds = load_by_run_spec(experiment_name=ds_info['exp name'],
                                      sample_name=ds_info['sample name'],
                                      captured_run_id=ds_info['run id'])

                filename = f"{directory}\\{ds.run_id}_{ds.exp_name}_{ds.sample_name}.csv"
                save_to_csv(ds, filename)
            except:
                unsaved_sets.append(
                    f"{ds.run_id}_{ds.exp_name}_{ds.sample_name}")

        if len(unsaved_sets) > 0:
            error_text = 'Failed to export the following datasets:\n\n'
            for i, ds in enumerate(unsaved_sets):
                error_text += ds
                if i + 1 != len(unsaved_sets):
                    error_text += ', '
            error_text += '.\n\nThis is possibly due to a file name conflict or due to no data being stored in that run.'
            self.show_error('Error', error_text)
コード例 #4
0
def get_dataset_by_identifier(identifier: Union[int, str]):
    """
        returns a dataset for a given identifier

        identifier (str or int): run_id or guid
    """
    if isinstance(identifier, int):
        dataset = load_by_run_spec(captured_run_id=identifier)
    elif isinstance(identifier, str):
        validate_guid_format(identifier)
        dataset = load_by_guid(identifier)

    return dataset
コード例 #5
0
def test_load_by_X_functions(two_empty_temp_db_connections,
                             some_interdeps):
    """
    Test some different loading functions
    """
    source_conn, target_conn = two_empty_temp_db_connections

    source_path = path_to_dbfile(source_conn)
    target_path = path_to_dbfile(target_conn)

    source_exp1 = Experiment(conn=source_conn)
    source_ds_1_1 = DataSet(conn=source_conn, exp_id=source_exp1.exp_id)

    source_exp2 = Experiment(conn=source_conn)
    source_ds_2_1 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id)

    source_ds_2_2 = DataSet(conn=source_conn,
                            exp_id=source_exp2.exp_id,
                            name="customname")

    for ds in (source_ds_1_1, source_ds_2_1, source_ds_2_2):
        ds.set_interdependencies(some_interdeps[1])
        ds.mark_started()
        ds.add_results([{name: 0.0 for name in some_interdeps[1].names}])
        ds.mark_completed()

    extract_runs_into_db(source_path, target_path, source_ds_2_2.run_id)
    extract_runs_into_db(source_path, target_path, source_ds_2_1.run_id)
    extract_runs_into_db(source_path, target_path, source_ds_1_1.run_id)

    test_ds = load_by_guid(source_ds_2_2.guid, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)

    test_ds = load_by_id(1, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)

    test_ds = load_by_run_spec(captured_run_id=source_ds_2_2.captured_run_id,
                               conn=target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)

    assert source_exp2.exp_id == 2

    # this is now the first run in the db so run_id is 1
    target_run_id = 1
    # and the experiment ids will be interchanged.
    target_exp_id = 1

    test_ds = load_by_counter(target_run_id, target_exp_id, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)
コード例 #6
0
def test_perform_actual_upgrade_6_to_newest_add_new_data():
    """
    Insert new runs on top of existing runs upgraded and verify that they
    get the correct captured_run_id and captured_counter
    """
    from qcodes.dataset.measurements import Measurement
    from qcodes.instrument.parameter import Parameter
    import numpy as np

    fixpath = os.path.join(fixturepath, 'db_files', 'version6')

    db_file = 'some_runs.db'
    dbname_old = os.path.join(fixpath, db_file)

    if not os.path.exists(dbname_old):
        pytest.skip("No db-file fixtures found. You can generate test db-files"
                    " using the scripts in the "
                    "https://github.com/QCoDeS/qcodes_generate_test_db/ repo")

    with temporarily_copied_DB(dbname_old, debug=False, version=6) as conn:
        assert isinstance(conn, ConnectionPlus)
        perform_db_upgrade(conn)
        assert get_user_version(conn) >= 7
        no_of_runs_query = "SELECT max(run_id) FROM runs"
        no_of_runs = one(
            atomic_transaction(conn, no_of_runs_query), 'max(run_id)')

        # Now let's insert new runs and ensure that they also get
        # captured_run_id assigned.
        params = []
        for n in range(5):
            params.append(Parameter(f'p{n}', label=f'Parameter {n}',
                                    unit=f'unit {n}', set_cmd=None,
                                    get_cmd=None))

        # Set up an experiment
        exp = new_experiment('some-exp', 'some-sample', conn=conn)
        meas = Measurement(exp=exp)
        meas.register_parameter(params[0])
        meas.register_parameter(params[1])
        meas.register_parameter(params[2], basis=(params[0],))
        meas.register_parameter(params[3], basis=(params[1],))
        meas.register_parameter(params[4], setpoints=(params[2], params[3]))

        # Make a number of identical runs
        for _ in range(10):
            with meas.run() as datasaver:
                for x in np.random.rand(10):
                    for y in np.random.rand(10):
                        z = np.random.rand()
                        datasaver.add_result((params[0], 0),
                                             (params[1], 1),
                                             (params[2], x),
                                             (params[3], y),
                                             (params[4], z))

        no_of_runs_new = one(
            atomic_transaction(conn, no_of_runs_query), 'max(run_id)')
        assert no_of_runs_new == 20

        # check that run_id is equivalent to captured_run_id for new
        # runs
        for run_id in range(no_of_runs, no_of_runs_new + 1):
            ds1 = load_by_id(run_id, conn)
            ds2 = load_by_run_spec(captured_run_id=run_id, conn=conn)

            assert ds1.the_same_dataset_as(ds2)

            assert ds1.run_id == run_id
            assert ds1.run_id == ds1.captured_run_id
            assert ds2.run_id == run_id
            assert ds2.run_id == ds2.captured_run_id

        # we are creating a new experiment into a db with one exp so:
        exp_id = 2

        # check that counter is equivalent to captured_counter for new
        # runs
        for counter in range(1, no_of_runs_new - no_of_runs + 1):
            ds1 = load_by_counter(counter, exp_id, conn)
            # giving only the counter is not unique since we have 2 experiments
            with pytest.raises(NameError, match="More than one"
                                                " matching dataset"):
                load_by_run_spec(captured_counter=counter, conn=conn)
            # however we can supply counter and experiment
            ds2 = load_by_run_spec(captured_counter=counter,
                                   experiment_name='some-exp',
                                   conn=conn)

            assert ds1.the_same_dataset_as(ds2)
            assert ds1.counter == counter
            assert ds1.counter == ds1.captured_counter
            assert ds2.counter == counter
            assert ds2.counter == ds2.captured_counter
コード例 #7
0
def test_load_by_run_spec(empty_temp_db, some_interdeps):
    def create_ds_with_exp_id(exp_id):
        ds = DataSet(exp_id=exp_id)
        ds.set_interdependencies(some_interdeps[1])
        ds.mark_started()
        ds.add_results([{'ps1': 1, 'ps2': 2}])
        return ds

    # create 3 experiments that mix two experiment names and two sample names
    exp_names = ["te1", "te2", "te1"]
    sample_names = ["ts1", "ts2", "ts2"]

    exps = [
        new_experiment(exp_name, sample_name=sample_name)
        for exp_name, sample_name in zip(exp_names, sample_names)
    ]

    created_ds = [create_ds_with_exp_id(exp.exp_id) for exp in exps]

    conn = created_ds[0].conn

    guids = get_guids_from_run_spec(conn=conn)
    assert len(guids) == 3

    # since we are not copying runs from multiple dbs we can always load by
    # captured_run_id and this is equivalent to load_by_id
    for i in range(1, 4):
        loaded_ds = load_by_run_spec(captured_run_id=i, conn=conn)
        assert loaded_ds.guid == guids[i - 1]
        assert loaded_ds.the_same_dataset_as(created_ds[i - 1])

    # All the datasets datasets have the same captured counter
    # so we cannot load by that alone
    guids_cc1 = get_guids_from_run_spec(captured_counter=1, conn=conn)
    assert len(guids_cc1) == 3
    with pytest.raises(NameError, match="More than one matching"):
        load_by_run_spec(captured_counter=1)

    # there are two different experiments with exp name "test-experiment1"
    # and thus 2 different datasets with counter=1 and that exp name
    guids_cc1_te1 = get_guids_from_run_spec(captured_counter=1,
                                            experiment_name='te1',
                                            conn=conn)
    assert len(guids_cc1_te1) == 2
    with pytest.raises(NameError, match="More than one matching"):
        load_by_run_spec(captured_counter=1, experiment_name="te1", conn=conn)

    # but for "test-experiment2" there is only one
    guids_cc1_te2 = get_guids_from_run_spec(captured_counter=1,
                                            experiment_name='te2',
                                            conn=conn)
    assert len(guids_cc1_te2) == 1
    loaded_ds = load_by_run_spec(captured_counter=1,
                                 experiment_name="te2",
                                 conn=conn)
    assert loaded_ds.guid == guids_cc1_te2[0]
    assert loaded_ds.the_same_dataset_as(created_ds[1])

    # there are two different experiments with sample name "test_sample2" but
    # different exp names so the counter is not unique
    guids_cc1_ts2 = get_guids_from_run_spec(captured_counter=1,
                                            sample_name='ts2',
                                            conn=conn)
    assert len(guids_cc1_ts2) == 2
    with pytest.raises(NameError, match="More than one matching"):
        load_by_run_spec(captured_counter=1, sample_name="ts2", conn=conn)

    # but for  "test_sample1" there is only one
    guids_cc1_ts1 = get_guids_from_run_spec(captured_counter=1,
                                            sample_name='ts1',
                                            conn=conn)
    assert len(guids_cc1_ts1) == 1
    loaded_ds = load_by_run_spec(captured_counter=1,
                                 sample_name="ts1",
                                 conn=conn)
    assert loaded_ds.the_same_dataset_as(created_ds[0])
    assert loaded_ds.guid == guids_cc1_ts1[0]

    # we can load all 3 if we are specific.
    for i in range(3):
        loaded_ds = load_by_run_spec(captured_counter=1,
                                     experiment_name=exp_names[i],
                                     sample_name=sample_names[i],
                                     conn=conn)
        assert loaded_ds.the_same_dataset_as(created_ds[i])
        assert loaded_ds.guid == guids[i]

    # load a non-existing run
    with pytest.raises(NameError, match="No run matching"):
        load_by_run_spec(captured_counter=10000, sample_name="ts2", conn=conn)

    empty_guid_list = get_guids_from_run_spec(conn=conn,
                                              experiment_name='nosuchexp')
    assert empty_guid_list == []
コード例 #8
0
def test_copy_datasets_and_add_new(two_empty_temp_db_connections,
                                   some_interdeps):
    """
    Test that new runs get the correct captured_run_id and captured_counter
    when adding on top of a dataset with partial exports
    """
    source_conn, target_conn = two_empty_temp_db_connections

    source_exp_1 = Experiment(conn=source_conn,
                              name='exp1',
                              sample_name='no_sample')
    source_exp_2 = Experiment(conn=source_conn,
                              name='exp2',
                              sample_name='no_sample')
    source_datasets_1 = [
        DataSet(conn=source_conn, exp_id=source_exp_1.exp_id) for i in range(5)
    ]
    source_datasets_2 = [
        DataSet(conn=source_conn, exp_id=source_exp_2.exp_id) for i in range(5)
    ]
    source_datasets = source_datasets_1 + source_datasets_2

    for ds in source_datasets:
        ds.set_interdependencies(some_interdeps[1])
        ds.mark_started()
        ds.add_results([{name: 0.0 for name in some_interdeps[1].names}])
        ds.mark_completed()

    # now let's insert only some of the datasets
    # and verify that the ids and counters are set correctly
    for ds in source_datasets[-3:]:
        extract_runs_into_db(ds.conn.path_to_dbfile,
                             target_conn.path_to_dbfile, ds.run_id)

    loaded_datasets = [
        load_by_run_spec(captured_run_id=i, conn=target_conn)
        for i in range(8, 11)
    ]
    expected_run_ids = [1, 2, 3]
    expected_captured_run_ids = [8, 9, 10]
    expected_counter = [1, 2, 3]
    expected_captured_counter = [3, 4, 5]

    for ds, eri, ecri, ec, ecc in zip(loaded_datasets, expected_run_ids,
                                      expected_captured_run_ids,
                                      expected_counter,
                                      expected_captured_counter):
        assert ds.run_id == eri
        assert ds.captured_run_id == ecri
        assert ds.counter == ec
        assert ds.captured_counter == ecc

    exp = load_experiment_by_name('exp2', conn=target_conn)

    # add additional runs and verify that the ids and counters increase as
    # expected
    new_datasets = [
        DataSet(conn=target_conn, exp_id=exp.exp_id) for i in range(3)
    ]

    for ds in new_datasets:
        ds.set_interdependencies(some_interdeps[1])
        ds.mark_started()
        ds.add_results([{name: 0.0 for name in some_interdeps[1].names}])
        ds.mark_completed()

    expected_run_ids = [4, 5, 6]
    expected_captured_run_ids = [11, 12, 13]
    expected_counter = [4, 5, 6]
    expected_captured_counter = [6, 7, 8]

    for ds, eri, ecri, ec, ecc in zip(new_datasets, expected_run_ids,
                                      expected_captured_run_ids,
                                      expected_counter,
                                      expected_captured_counter):
        assert ds.run_id == eri
        assert ds.captured_run_id == ecri
        assert ds.counter == ec
        assert ds.captured_counter == ecc
コード例 #9
0
def test_combine_runs(two_empty_temp_db_connections, empty_temp_db_connection,
                      some_interdeps):
    """
    Test that datasets that are exported in random order from 2 datasets
    can be reloaded by the original captured_run_id and the experiment
    name.
    """
    source_conn_1, source_conn_2 = two_empty_temp_db_connections
    target_conn = empty_temp_db_connection

    source_1_exp = Experiment(conn=source_conn_1,
                              name='exp1',
                              sample_name='no_sample')
    source_1_datasets = [
        DataSet(conn=source_conn_1, exp_id=source_1_exp.exp_id)
        for i in range(10)
    ]

    source_2_exp = Experiment(conn=source_conn_2,
                              name='exp2',
                              sample_name='no_sample')

    source_2_datasets = [
        DataSet(conn=source_conn_2, exp_id=source_2_exp.exp_id)
        for i in range(10)
    ]

    source_all_datasets = source_1_datasets + source_2_datasets

    shuffled_datasets = source_all_datasets.copy()
    random.shuffle(shuffled_datasets)

    for ds in source_all_datasets:
        ds.set_interdependencies(some_interdeps[1])
        ds.mark_started()
        ds.add_results([{name: 0.0 for name in some_interdeps[1].names}])
        ds.mark_completed()

    # now let's insert all datasets in random order
    for ds in shuffled_datasets:
        extract_runs_into_db(ds.conn.path_to_dbfile,
                             target_conn.path_to_dbfile, ds.run_id)

    for ds in source_all_datasets:
        loaded_ds = load_by_run_spec(captured_run_id=ds.captured_run_id,
                                     experiment_name=ds.exp_name,
                                     conn=target_conn)
        assert ds.the_same_dataset_as(loaded_ds)

    for ds in source_all_datasets:
        loaded_ds = load_by_run_spec(captured_run_id=ds.captured_counter,
                                     experiment_name=ds.exp_name,
                                     conn=target_conn)
        assert ds.the_same_dataset_as(loaded_ds)

    # Now test that we generate the correct table for the guids above
    # this could be split out into its own test
    # but the test above has the useful side effect of
    # setting up datasets for this test.
    guids = [ds.guid for ds in source_all_datasets]

    table = generate_dataset_table(guids, conn=target_conn)
    lines = table.split('\n')
    headers = re.split(r'\s+', lines[0].strip())

    cfg = qc.config
    guid_comp = cfg['GUID_components']

    # borrowed fallback logic from generate_guid
    sampleint = guid_comp['sample']
    if sampleint == 0:
        sampleint = int('a' * 8, base=16)

    for i in range(2, len(lines)):
        split_line = re.split(r'\s+', lines[i].strip())
        mydict = {headers[j]: split_line[j] for j in range(len(split_line))}
        ds = load_by_guid(guids[i - 2], conn=target_conn)
        assert ds.captured_run_id == int(mydict['captured_run_id'])
        assert ds.captured_counter == int(mydict['captured_counter'])
        assert ds.exp_name == mydict['experiment_name']
        assert ds.sample_name == mydict['sample_name']
        assert int(mydict['sample_id']) == sampleint
        assert guid_comp['location'] == int(mydict['location'])
        assert guid_comp['work_station'] == int(mydict['work_station'])