def generate_DB_file_with_some_runs(version=VERSION): """ Generate a .db-file with a handful of runs with some interdependent parameters """ # This function will run often on CI and re-generate the .db-files # That should ideally be a deterministic action # (although this hopefully plays no role) np.random.seed(0) vNfixturepath = os.path.join(utils.fixturepath, f'version{version}') os.makedirs(vNfixturepath, exist_ok=True) path = os.path.join(vNfixturepath, 'some_runs.db') if os.path.exists(path): os.remove(path) from qcodes.dataset.sqlite_base import connect from qcodes.dataset.measurements import Measurement from qcodes.dataset.experiment_container import Experiment from qcodes import Parameter connect(path) exp = Experiment(path_to_db=path, name='experiment_1', sample_name='no_sample_1') # Now make some parameters to use in measurements params = [] for n in range(5): params.append( Parameter(f'p{n}', label=f'Parameter {n}', unit=f'unit {n}', set_cmd=None, get_cmd=None)) # Set up an experiment meas = Measurement(exp) meas.register_parameter(params[0]) meas.register_parameter(params[1]) meas.register_parameter(params[2], basis=(params[0], )) meas.register_parameter(params[3], basis=(params[1], )) meas.register_parameter(params[4], setpoints=(params[2], params[3])) # Make a number of identical runs for _ in range(10): with meas.run() as datasaver: for x in np.random.rand(10): for y in np.random.rand(10): z = np.random.rand() datasaver.add_result((params[0], 0), (params[1], 1), (params[2], x), (params[3], y), (params[4], z))
def test_cannot_connect_to_newer_db(): conn = connect(qc.config["core"]["db_location"], qc.config["core"]["db_debug"]) current_version = get_user_version(conn) set_user_version(conn, current_version + 1) conn.close() err_msg = f'is version {current_version + 1} but this version of QCoDeS ' \ f'supports up to version {current_version}' with pytest.raises(RuntimeError, match=err_msg): conn = connect(qc.config["core"]["db_location"], qc.config["core"]["db_debug"])
def generate_empty_DB_file(): """ Generate the bare minimal DB file with no runs """ import qcodes.dataset.sqlite_base as sqlite_base v0fixturepath = os.path.join(utils.fixturepath, 'version1') os.makedirs(v0fixturepath, exist_ok=True) path = os.path.join(v0fixturepath, 'empty.db') if os.path.exists(path): os.remove(path) sqlite_base.connect(path)
def generate_upgraded_v2_runs(): """ Generate some runs by upgradeing from v2 db. This is needed since the bug we want to test against is in the v2 to v3 upgrade and not in v3 it self. This requires the v2 generation to be run before this one """ import qcodes.dataset.sqlite_base as sqlite_base v2fixture_path = os.path.join(utils.fixturepath, 'version2', 'some_runs.db') v3fixturepath = os.path.join(utils.fixturepath, 'version3', 'some_runs_upgraded_2.db') shutil.copy2(v2fixture_path, v3fixturepath) sqlite_base.connect(v3fixturepath)
def generate_empty_DB_file(version=VERSION): """ Generate an empty DB file with no runs """ import qcodes.dataset.sqlite_base as sqlite_base vNfixturepath = os.path.join(utils.fixturepath, f'version{version}') os.makedirs(vNfixturepath, exist_ok=True) path = os.path.join(vNfixturepath, 'empty.db') if os.path.exists(path): os.remove(path) sqlite_base.connect(path)
def two_empty_temp_db_connections(): """ Yield the paths of two empty files. Meant for use with the test_database_copy_paste """ with tempfile.TemporaryDirectory() as tmpdirname: source_path = os.path.join(tmpdirname, 'source.db') target_path = os.path.join(tmpdirname, 'target.db') source_conn = connect(source_path) target_conn = connect(target_path) try: yield (source_conn, target_conn) finally: source_conn.close() target_conn.close()
def load_by_counter(counter: int, exp_id: int, conn: Optional[ConnectionPlus] = None) -> DataSet: """ Load a dataset given its counter in a given experiment Lookup is performed in the database file that is specified in the config. Args: counter: counter of the dataset within the given experiment exp_id: id of the experiment where to look for the dataset conn: connection to the database to load from. If not provided, a connection to the DB file specified in the config is made Returns: dataset of the given counter in the given experiment """ conn = conn or connect(get_DB_location()) sql = """ SELECT run_id FROM runs WHERE result_counter= ? AND exp_id = ? """ c = transaction(conn, sql, counter, exp_id) run_id = one(c, 'run_id') d = DataSet(conn=conn, run_id=run_id) return d
def load_by_guid(guid: str, conn: Optional[ConnectionPlus] = None) -> DataSet: """ Load a dataset by its GUID If no connection is provided, lookup is performed in the database file that is specified in the config. Args: guid: guid of the dataset conn: connection to the database to load from Returns: dataset with the given guid Raises: NameError: if no run with the given GUID exists in the database RuntimeError: if several runs with the given GUID are found """ conn = conn or connect(get_DB_location()) # this function raises a RuntimeError if more than one run matches the GUID run_id = get_runid_from_guid(conn, guid) if run_id == -1: raise NameError(f'No run with GUID: {guid} found in database.') return DataSet(run_id=run_id, conn=conn)
def load_or_create_experiment( experiment_name: str, sample_name: Optional[str] = None, conn: Optional[ConnectionPlus] = None) -> Experiment: """ Find and return an experiment with the given name and sample name, or create one if not found. Args: experiment_name: Name of the experiment to find or create sample_name: Name of the sample conn: Connection to the database. If not supplied, a new connection to the DB file specified in the config is made Returns: The found or created experiment """ conn = conn or connect(get_DB_location()) try: experiment = load_experiment_by_name(experiment_name, sample_name, conn=conn) except ValueError as exception: if "Experiment not found" in str(exception): experiment = new_experiment(experiment_name, sample_name, conn=conn) else: raise exception return experiment
def load_experiment_by_name( name: str, sample: Optional[str] = None, conn: Optional[ConnectionPlus] = None) -> Experiment: """ Try to load experiment with the specified name. Nothing stops you from having many experiments with the same name and sample_name. In that case this won't work. And warn you. Args: name: the name of the experiment sample: the name of the sample conn: connection to the database. If not supplied, a new connection to the DB file specified in the config is made Returns: the requested experiment Raises: ValueError if the name is not unique and sample name is None. """ conn = conn or connect(get_DB_location()) if sample: sql = """ SELECT * FROM experiments WHERE sample_name = ? AND name = ? """ c = transaction(conn, sql, sample, name) else: sql = """ SELECT * FROM experiments WHERE name = ? """ c = transaction(conn, sql, name) rows = c.fetchall() if len(rows) == 0: raise ValueError("Experiment not found") elif len(rows) > 1: _repr = [] for row in rows: s = (f"exp_id:{row['exp_id']} ({row['name']}-{row['sample_name']})" f" started at ({row['start_time']})") _repr.append(s) _repr_str = "\n".join(_repr) raise ValueError(f"Many experiments matching your request" f" found:\n{_repr_str}") else: e = Experiment(exp_id=rows[0]['exp_id'], conn=conn) return e
def __init__(self, path_to_db: str, run_id: Optional[int]=None, conn=None) -> None: """ Create a new DataSet object. The object can either be intended to hold a new run or and old run. Args: path_to_db: path to the sqlite file on disk run_id: provide this when loading an existing run, leave it as None when creating a new run conn: connection to the DB """ # TODO: handle fail here by defaulting to # a standard db self.path_to_db = path_to_db if conn is None: self.conn = connect(self.path_to_db) else: self.conn = conn self.run_id = run_id self._debug = False self.subscribers: Dict[str, _Subscriber] = {} if run_id: self._completed = completed(self.conn, self.run_id)
def get_runs_from_db(path: str, start: int = 0, stop: Union[None, int] = None, get_structure: bool = False): """ Get a db 'overview' dictionary from the db located in `path`. `start` and `stop` refer to indices of the runs in the db that we want to have details on; if `stop` is None, we'll use runs until the end. if `get_structure` is True, include info on the run data structure in the return dict. """ conn = connect(path) runs = get_runs(conn) if stop is None: stop = len(runs) runs = runs[start:stop] overview = {} for run in runs: run_id = run['run_id'] overview[run_id] = get_ds_info(conn, run_id, get_structure=get_structure) return overview
def load_by_counter(counter, exp_id): """ Load a dataset given its counter in one experiment Args: counter: Counter of the dataset exp_id: Experiment the dataset belongs to Returns: the dataset """ conn = connect(get_DB_location()) sql = """ SELECT run_id FROM runs WHERE result_counter= ? AND exp_id = ? """ c = transaction(conn, sql, counter, exp_id) run_id = one(c, 'run_id') conn.close() d = DataSet(get_DB_location(), run_id=run_id) return d
def test_tabels_exists(empty_temp_db): print(qc.config["core"]["db_location"]) conn = connect(qc.config["core"]["db_location"], qc.config["core"]["db_debug"]) cursor = conn.execute("select sql from sqlite_master where type = 'table'") expected_tables = ['experiments', 'runs', 'layouts', 'dependencies'] for row, expected_table in zip(cursor, expected_tables): assert expected_table in row['sql'] conn.close()
def test_path_to_dbfile(): with tempfile.TemporaryDirectory() as tempdir: tempdb = os.path.join(tempdir, 'database.db') conn = mut.connect(tempdb) try: assert path_to_dbfile(conn) == tempdb finally: conn.close()
def toggle_debug(self): """ Toggle debug mode, if debug mode is on all the queries made are echoed back. """ self._debug = not self._debug self.conn.close() self.conn = connect(self.path_to_db, self._debug)
def shadow_conn(path_to_db: str): """ Simple context manager to create a connection for testing and close it on exit """ conn = mut.connect(path_to_db) yield conn conn.close()
def test_connect(): conn = connect(':memory:') assert isinstance(conn, sqlite3.Connection) assert isinstance(conn, ConnectionPlus) assert False is conn.atomic_in_progress assert sqlite3.Row is conn.row_factory
def load_last_experiment() -> Experiment: """ Load last experiment Returns: last experiment """ conn = connect(get_DB_location()) return Experiment(exp_id=get_last_experiment(conn))
def __init__(self, path_to_db: Optional[str] = None, exp_id: Optional[int] = None, name: Optional[str] = None, sample_name: Optional[str] = None, format_string: str = "{}-{}-{}", conn: Optional[ConnectionPlus] = None) -> None: """ Create or load an experiment. If exp_id is None, a new experiment is created. If exp_id is not None, an experiment is loaded. Args: path_to_db: The path of the database file to create in/load from. If a conn is passed together with path_to_db, an exception is raised exp_id: The id of the experiment to load name: The name of the experiment to create. Ignored if exp_id is not None sample_name: The sample name for this experiment. Ignored if exp_id is not None format_string: The format string used to name result-tables. Ignored if exp_id is not None. conn: connection to the database. If not supplied, the constructor first tries to use path_to_db to figure out where to connect to. If path_to_db is not supplied either, a new connection to the DB file specified in the config is made """ if path_to_db is not None and conn is not None: raise ValueError('Received BOTH conn and path_to_db. Please ' 'provide only one or the other.') self._path_to_db = path_to_db or get_DB_location() self.conn = conn or connect(self.path_to_db, get_DB_debug()) max_id = len(get_experiments(self.conn)) if exp_id is not None: if exp_id not in range(1, max_id + 1): raise ValueError('No such experiment in the database') self._exp_id = exp_id else: # it is better to catch an invalid format string earlier than later try: # the sqlite_base will try to format # (name, exp_id, run_counter) format_string.format("name", 1, 1) except Exception as e: raise ValueError("Invalid format string. Can not format " "(name, exp_id, run_counter)") from e log.info("creating new experiment in {}".format(self.path_to_db)) name = name or f"experiment_{max_id+1}" sample_name = sample_name or "some_sample" self._exp_id = ne(self.conn, name, sample_name, format_string)
def __init__(self, path_to_db: str, conn=None) -> None: # TODO: handle fail here by defaulting to # a standard db self.path_to_db = path_to_db if conn is None: self.conn = connect(self.path_to_db) else: self.conn = conn self._debug = False
def load_last_experiment() -> Experiment: """ Load last experiment (from database file from config) Returns: last experiment """ last_exp_id = get_last_experiment(connect(get_DB_location())) if last_exp_id is None: raise ValueError('There are no experiments in the database file') return Experiment(exp_id=last_exp_id)
def test_database_upgrade(empty_temp_db): connection = connect(qc.config["core"]["db_location"], qc.config["core"]["db_debug"]) userversion = get_user_version(connection) if userversion != 0: raise RuntimeError("trying to upgrade from version 0" " but your database is version" " {}".format(userversion)) sql = 'ALTER TABLE "runs" ADD COLUMN "quality"' atomic_transaction(connection, sql) set_user_version(connection, 1)
def test_tables_exist(empty_temp_db, version): conn = connect(qc.config["core"]["db_location"], qc.config["core"]["db_debug"], version=version) cursor = conn.execute("select sql from sqlite_master" " where type = 'table'") expected_tables = ['experiments', 'runs', 'layouts', 'dependencies'] rows = [row for row in cursor] assert len(rows) == len(expected_tables) for row, expected_table in zip(rows, expected_tables): assert expected_table in row['sql'] conn.close()
def test_runs_table_columns(empty_temp_db): """ Ensure that the column names of a pristine runs table are what we expect """ colnames = mut.RUNS_TABLE_COLUMNS.copy() conn = mut.connect(get_DB_location()) query = "PRAGMA table_info(runs)" cursor = conn.cursor() for row in cursor.execute(query): colnames.remove(row['name']) assert colnames == []
def experiments() -> List[Experiment]: """ List all the experiments in the container (database file from config) Returns: All the experiments in the container """ log.info("loading experiments from {}".format(get_DB_location())) rows = get_experiments(connect(get_DB_location(), get_DB_debug())) experiments = [] for row in rows: experiments.append(load_experiment(row['exp_id'])) return experiments
def experiments() -> List[Experiment]: """ List all the experiments in the container Returns: All the experiments in the container """ log.info("loading experiments from {}".format(DB)) rows = get_experiments(connect(DB, debug_db)) experiments = [] for row in rows: experiments.append(load_experiment(row['exp_id'])) return experiments
def generate_DB_file_with_empty_runs(): """ Generate a DB file that holds empty runs and runs with no interdependencies """ v2fixturepath = os.path.join(fixturepath, 'version2') os.makedirs(v2fixturepath, exist_ok=True) path = os.path.join(v2fixturepath, 'empty_runs.db') if os.path.exists(path): os.remove(path) from qcodes.dataset.sqlite_base import connect from qcodes.dataset.measurements import Measurement from qcodes.dataset.experiment_container import Experiment from qcodes import Parameter from qcodes.dataset.data_set import DataSet conn = connect(path) exp = Experiment(path) exp._new(name='experiment_1', sample_name='no_sample_1') # Now make some parameters to use in measurements params = [] for n in range(5): params.append(Parameter(f'p{n}', label=f'Parameter {n}', unit=f'unit {n}', set_cmd=None, get_cmd=None)) # truly empty run, no layouts table, no nothing dataset = DataSet(path, conn=conn) dataset._new('empty_dataset', exp_id=exp.exp_id) # empty run meas = Measurement(exp) with meas.run() as datasaver: pass # run with no interdeps meas = Measurement(exp) for param in params: meas.register_parameter(param) with meas.run() as datasaver: pass with meas.run() as datasaver: for _ in range(10): res = tuple((p, 0.0) for p in params) datasaver.add_result(*res)
def empty_temp_db(): # create a temp database for testing with tempfile.TemporaryDirectory() as tmpdirname: qc.config["core"]["db_location"] = os.path.join(tmpdirname, 'temp.db') qc.config["core"]["db_debug"] = False # this is somewhat annoying but these module scope variables # are initialized at import time so they need to be overwritten qc.dataset.experiment_container.DB = qc.config["core"]["db_location"] qc.dataset.data_set.DB = qc.config["core"]["db_location"] qc.dataset.experiment_container.debug_db = qc.config["core"]["db_debug"] _c = mut.connect(qc.config["core"]["db_location"], qc.config["core"]["db_debug"]) mut.init_db(_c) _c.close() yield
def test_perform_actual_upgrade_0_to_1(): # we cannot use the empty_temp_db, since that has already called connect # and is therefore latest version already connection = connect(':memory:', debug=False, version=0) assert get_user_version(connection) == 0 guid_table_query = "SELECT guid FROM runs" with pytest.raises(RuntimeError): atomic_transaction(connection, guid_table_query) perform_db_upgrade_0_to_1(connection) assert get_user_version(connection) == 1 c = atomic_transaction(connection, guid_table_query) assert len(c.fetchall()) == 0