def test_load_by_counter(): exp = new_experiment(name="for_loading", sample_name="no_sample") ds = new_data_set("my_first_ds") loaded_ds = load_by_counter(exp.exp_id, 1) assert loaded_ds.completed is False ds.mark_complete() loaded_ds = load_by_counter(exp.exp_id, 1) assert loaded_ds.completed is True
def test_load_by_X_functions(two_empty_temp_db_connections, some_interdeps): """ Test some different loading functions """ source_conn, target_conn = two_empty_temp_db_connections source_path = path_to_dbfile(source_conn) target_path = path_to_dbfile(target_conn) source_exp1 = Experiment(conn=source_conn) source_ds_1_1 = DataSet(conn=source_conn, exp_id=source_exp1.exp_id) source_exp2 = Experiment(conn=source_conn) source_ds_2_1 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id) source_ds_2_2 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id, name="customname") for ds in (source_ds_1_1, source_ds_2_1, source_ds_2_2): ds.set_interdependencies(some_interdeps[1]) ds.mark_started() ds.add_result({name: 0.0 for name in some_interdeps[1].names}) ds.mark_completed() extract_runs_into_db(source_path, target_path, source_ds_2_2.run_id) test_ds = load_by_guid(source_ds_2_2.guid, target_conn) assert source_ds_2_2.the_same_dataset_as(test_ds) test_ds = load_by_id(1, target_conn) assert source_ds_2_2.the_same_dataset_as(test_ds) test_ds = load_by_counter(1, 1, target_conn) assert source_ds_2_2.the_same_dataset_as(test_ds)
def data_set(self, counter: int) -> DataSet: """ Get dataset with the secified counter from this experiment Args: counter: the counter we want to load Returns: the dataset """ return load_by_counter(counter, self.exp_id)
def test_perform_actual_upgrade_6_to_7(): fixpath = os.path.join(fixturepath, 'db_files', 'version6') db_file = 'some_runs.db' dbname_old = os.path.join(fixpath, db_file) if not os.path.exists(dbname_old): pytest.skip("No db-file fixtures found. You can generate test db-files" " using the scripts in the " "https://github.com/QCoDeS/qcodes_generate_test_db/ repo") with temporarily_copied_DB(dbname_old, debug=False, version=6) as conn: assert isinstance(conn, ConnectionPlus) perform_db_upgrade_6_to_7(conn) assert get_user_version(conn) == 7 no_of_runs_query = "SELECT max(run_id) FROM runs" no_of_runs = one(atomic_transaction(conn, no_of_runs_query), 'max(run_id)') assert no_of_runs == 10 columns = atomic_transaction(conn, "PRAGMA table_info(runs)").fetchall() col_names = [col['name'] for col in columns] assert 'captured_run_id' in col_names assert 'captured_counter' in col_names for run_id in range(1, no_of_runs + 1): ds1 = load_by_id(run_id, conn) ds2 = load_by_run_spec(captured_run_id=run_id, conn=conn) assert ds1.the_same_dataset_as(ds2) assert ds1.run_id == run_id assert ds1.run_id == ds1.captured_run_id assert ds2.run_id == run_id assert ds2.run_id == ds2.captured_run_id exp_id = 1 for counter in range(1, no_of_runs + 1): ds1 = load_by_counter(counter, exp_id, conn) ds2 = load_by_run_spec(captured_counter=counter, conn=conn) assert ds1.the_same_dataset_as(ds2) assert ds1.counter == counter assert ds1.counter == ds1.captured_counter assert ds2.counter == counter assert ds2.counter == ds2.captured_counter
def test_load_by_X_functions(two_empty_temp_db_connections, some_interdeps): """ Test some different loading functions """ source_conn, target_conn = two_empty_temp_db_connections source_path = path_to_dbfile(source_conn) target_path = path_to_dbfile(target_conn) source_exp1 = Experiment(conn=source_conn) source_ds_1_1 = DataSet(conn=source_conn, exp_id=source_exp1.exp_id) source_exp2 = Experiment(conn=source_conn) source_ds_2_1 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id) source_ds_2_2 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id, name="customname") for ds in (source_ds_1_1, source_ds_2_1, source_ds_2_2): ds.set_interdependencies(some_interdeps[1]) ds.mark_started() ds.add_results([{name: 0.0 for name in some_interdeps[1].names}]) ds.mark_completed() extract_runs_into_db(source_path, target_path, source_ds_2_2.run_id) extract_runs_into_db(source_path, target_path, source_ds_2_1.run_id) extract_runs_into_db(source_path, target_path, source_ds_1_1.run_id) test_ds = load_by_guid(source_ds_2_2.guid, target_conn) assert source_ds_2_2.the_same_dataset_as(test_ds) test_ds = load_by_id(1, target_conn) assert source_ds_2_2.the_same_dataset_as(test_ds) test_ds = load_by_run_spec(captured_run_id=source_ds_2_2.captured_run_id, conn=target_conn) assert source_ds_2_2.the_same_dataset_as(test_ds) assert source_exp2.exp_id == 2 # this is now the first run in the db so run_id is 1 target_run_id = 1 # and the experiment ids will be interchanged. target_exp_id = 1 test_ds = load_by_counter(target_run_id, target_exp_id, target_conn) assert source_ds_2_2.the_same_dataset_as(test_ds)
def test_load_by_X_functions(two_empty_temp_db_connections, some_paramspecs): """ Test some different loading functions """ source_conn, target_conn = two_empty_temp_db_connections source_path = path_to_dbfile(source_conn) target_path = path_to_dbfile(target_conn) source_exp1 = Experiment(conn=source_conn) source_ds_1_1 = DataSet(conn=source_conn, exp_id=source_exp1.exp_id) for ps in some_paramspecs[2].values(): source_ds_1_1.add_parameter(ps) source_ds_1_1.mark_started() source_ds_1_1.add_result( {ps.name: 0.0 for ps in some_paramspecs[2].values()}) source_ds_1_1.mark_completed() source_exp2 = Experiment(conn=source_conn) source_ds_2_1 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id) for ps in some_paramspecs[2].values(): source_ds_2_1.add_parameter(ps) source_ds_2_1.mark_started() source_ds_2_1.add_result( {ps.name: 0.0 for ps in some_paramspecs[2].values()}) source_ds_2_1.mark_completed() source_ds_2_2 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id, name="customname") for ps in some_paramspecs[2].values(): source_ds_2_2.add_parameter(ps) source_ds_2_2.mark_started() source_ds_2_2.add_result( {ps.name: 0.0 for ps in some_paramspecs[2].values()}) source_ds_2_2.mark_completed() extract_runs_into_db(source_path, target_path, source_ds_2_2.run_id) test_ds = load_by_guid(source_ds_2_2.guid, target_conn) assert source_ds_2_2.the_same_dataset_as(test_ds) test_ds = load_by_id(1, target_conn) assert source_ds_2_2.the_same_dataset_as(test_ds) test_ds = load_by_counter(1, 1, target_conn) assert source_ds_2_2.the_same_dataset_as(test_ds)
def test_perform_actual_upgrade_6_to_newest_add_new_data(): """ Insert new runs on top of existing runs upgraded and verify that they get the correct captured_run_id and captured_counter """ from qcodes.dataset.measurements import Measurement from qcodes.instrument.parameter import Parameter import numpy as np fixpath = os.path.join(fixturepath, 'db_files', 'version6') db_file = 'some_runs.db' dbname_old = os.path.join(fixpath, db_file) if not os.path.exists(dbname_old): pytest.skip("No db-file fixtures found. You can generate test db-files" " using the scripts in the " "https://github.com/QCoDeS/qcodes_generate_test_db/ repo") with temporarily_copied_DB(dbname_old, debug=False, version=6) as conn: assert isinstance(conn, ConnectionPlus) perform_db_upgrade(conn) assert get_user_version(conn) >= 7 no_of_runs_query = "SELECT max(run_id) FROM runs" no_of_runs = one( atomic_transaction(conn, no_of_runs_query), 'max(run_id)') # Now let's insert new runs and ensure that they also get # captured_run_id assigned. params = [] for n in range(5): params.append(Parameter(f'p{n}', label=f'Parameter {n}', unit=f'unit {n}', set_cmd=None, get_cmd=None)) # Set up an experiment exp = new_experiment('some-exp', 'some-sample', conn=conn) meas = Measurement(exp=exp) meas.register_parameter(params[0]) meas.register_parameter(params[1]) meas.register_parameter(params[2], basis=(params[0],)) meas.register_parameter(params[3], basis=(params[1],)) meas.register_parameter(params[4], setpoints=(params[2], params[3])) # Make a number of identical runs for _ in range(10): with meas.run() as datasaver: for x in np.random.rand(10): for y in np.random.rand(10): z = np.random.rand() datasaver.add_result((params[0], 0), (params[1], 1), (params[2], x), (params[3], y), (params[4], z)) no_of_runs_new = one( atomic_transaction(conn, no_of_runs_query), 'max(run_id)') assert no_of_runs_new == 20 # check that run_id is equivalent to captured_run_id for new # runs for run_id in range(no_of_runs, no_of_runs_new + 1): ds1 = load_by_id(run_id, conn) ds2 = load_by_run_spec(captured_run_id=run_id, conn=conn) assert ds1.the_same_dataset_as(ds2) assert ds1.run_id == run_id assert ds1.run_id == ds1.captured_run_id assert ds2.run_id == run_id assert ds2.run_id == ds2.captured_run_id # we are creating a new experiment into a db with one exp so: exp_id = 2 # check that counter is equivalent to captured_counter for new # runs for counter in range(1, no_of_runs_new - no_of_runs + 1): ds1 = load_by_counter(counter, exp_id, conn) # giving only the counter is not unique since we have 2 experiments with pytest.raises(NameError, match="More than one" " matching dataset"): load_by_run_spec(captured_counter=counter, conn=conn) # however we can supply counter and experiment ds2 = load_by_run_spec(captured_counter=counter, experiment_name='some-exp', conn=conn) assert ds1.the_same_dataset_as(ds2) assert ds1.counter == counter assert ds1.counter == ds1.captured_counter assert ds2.counter == counter assert ds2.counter == ds2.captured_counter