def test_old_versions_not_touched(two_empty_temp_db_connections,
                                  some_interdeps):

    source_conn, target_conn = two_empty_temp_db_connections

    target_path = path_to_dbfile(target_conn)
    source_path = path_to_dbfile(source_conn)

    _, new_v = get_db_version_and_newest_available_version(source_path)

    fixturepath = os.sep.join(qcodes.tests.dataset.__file__.split(os.sep)[:-1])
    fixturepath = os.path.join(fixturepath,
                               'fixtures', 'db_files', 'version2',
                               'some_runs.db')
    if not os.path.exists(fixturepath):
        pytest.skip("No db-file fixtures found. You can generate test db-files"
                    " using the scripts in the legacy_DB_generation folder")

    # First test that we cannot use an old version as source

    with raise_if_file_changed(fixturepath):
        with pytest.warns(UserWarning) as warning:
            extract_runs_into_db(fixturepath, target_path, 1)
            expected_mssg = ('Source DB version is 2, but this '
                             f'function needs it to be in version {new_v}. '
                             'Run this function again with '
                             'upgrade_source_db=True to auto-upgrade '
                             'the source DB file.')
            assert warning[0].message.args[0] == expected_mssg

    # Then test that we cannot use an old version as target

    # first create a run in the new version source
    source_exp = Experiment(conn=source_conn)
    source_ds = DataSet(conn=source_conn, exp_id=source_exp.exp_id)

    source_ds.set_interdependencies(some_interdeps[1])

    source_ds.mark_started()
    source_ds.add_results([{name: 0.0
                            for name in some_interdeps[1].names}])
    source_ds.mark_completed()

    with raise_if_file_changed(fixturepath):
        with pytest.warns(UserWarning) as warning:
            extract_runs_into_db(source_path, fixturepath, 1)
            expected_mssg = ('Target DB version is 2, but this '
                             f'function needs it to be in version {new_v}. '
                             'Run this function again with '
                             'upgrade_target_db=True to auto-upgrade '
                             'the target DB file.')
            assert warning[0].message.args[0] == expected_mssg
def test_getting_db_version(version):

    fixpath = os.path.join(fixturepath, 'db_files', f'version{version}')

    dbname = os.path.join(fixpath, 'empty.db')

    if not os.path.exists(dbname):
        pytest.skip("No db-file fixtures found. You can generate test db-files"
                    " using the scripts in the "
                    "https://github.com/QCoDeS/qcodes_generate_test_db/ repo")

    (db_v, new_v) = get_db_version_and_newest_available_version(dbname)

    assert db_v == version
    assert new_v == LATEST_VERSION
Пример #3
0
def extract_runs_into_db(source_db_path: str,
                         target_db_path: str, *run_ids: int,
                         upgrade_source_db: bool = False,
                         upgrade_target_db: bool = False) -> None:
    """
    Extract a selection of runs into another DB file. All runs must come from
    the same experiment. They will be added to an experiment with the same name
    and ``sample_name`` in the target db. If such an experiment does not exist, it
    will be created.

    Args:
        source_db_path: Path to the source DB file
        target_db_path: Path to the target DB file. The target DB file will be
          created if it does not exist.
        run_ids: The ``run_id``'s of the runs to copy into the target DB file
        upgrade_source_db: If the source DB is found to be in a version that is
          not the newest, should it be upgraded?
        upgrade_target_db: If the target DB is found to be in a version that is
          not the newest, should it be upgraded?
    """
    # Check for versions
    (s_v, new_v) = get_db_version_and_newest_available_version(source_db_path)
    if s_v < new_v and not upgrade_source_db:
        warn(f'Source DB version is {s_v}, but this function needs it to be'
             f' in version {new_v}. Run this function again with '
             'upgrade_source_db=True to auto-upgrade the source DB file.')
        return

    if os.path.exists(target_db_path):
        (t_v, new_v) = get_db_version_and_newest_available_version(target_db_path)
        if t_v < new_v and not upgrade_target_db:
            warn(f'Target DB version is {t_v}, but this function needs it to '
                 f'be in version {new_v}. Run this function again with '
                 'upgrade_target_db=True to auto-upgrade the target DB file.')
            return

    source_conn = connect(source_db_path)

    # Validate that all runs are in the source database
    do_runs_exist = is_run_id_in_database(source_conn, *run_ids)
    if False in do_runs_exist.values():
        source_conn.close()
        non_existing_ids = [rid for rid in run_ids if not do_runs_exist[rid]]
        err_mssg = ("Error: not all run_ids exist in the source database. "
                    "The following run(s) is/are not present: "
                    f"{non_existing_ids}")
        raise ValueError(err_mssg)

    # Validate that all runs are from the same experiment

    source_exp_ids = np.unique(get_exp_ids_from_run_ids(source_conn, run_ids))
    if len(source_exp_ids) != 1:
        source_conn.close()
        raise ValueError('Did not receive runs from a single experiment. '
                         f'Got runs from experiments {source_exp_ids}')

    # Fetch the attributes of the runs' experiment
    # hopefully, this is enough to uniquely identify the experiment
    exp_attrs = get_experiment_attributes_by_exp_id(source_conn, source_exp_ids[0])

    # Massage the target DB file to accommodate the runs
    # (create new experiment if needed)

    target_conn = connect(target_db_path)

    # this function raises if the target DB file has several experiments
    # matching both the name and sample_name

    try:
        with atomic(target_conn) as target_conn:

            target_exp_id = _create_exp_if_needed(target_conn,
                                                  exp_attrs['name'],
                                                  exp_attrs['sample_name'],
                                                  exp_attrs['format_string'],
                                                  exp_attrs['start_time'],
                                                  exp_attrs['end_time'])

            # Finally insert the runs
            for run_id in run_ids:
                _extract_single_dataset_into_db(DataSet(run_id=run_id,
                                                        conn=source_conn),
                                                target_conn,
                                                target_exp_id)
    finally:
        source_conn.close()
        target_conn.close()