def _add_to_dyn_column_if_in_db(self, tag: str, data: Any) -> None: if self._dataset_is_in_runs_table(): with contextlib.closing( conn_from_dbpath_or_conn( conn=None, path_to_db=self._path_to_db)) as conn: with atomic(conn) as aconn: add_data_to_dynamic_columns(aconn, self.run_id, {tag: data})
def _complete(self, value: bool) -> None: with contextlib.closing( conn_from_dbpath_or_conn(conn=None, path_to_db=self._path_to_db)) as conn: if value: self._completed_timestamp_raw = time.time() mark_run_complete(conn, self.run_id, self._completed_timestamp_raw)
def __init__(self, path_to_db: Optional[str] = None, exp_id: Optional[int] = None, name: Optional[str] = None, sample_name: Optional[str] = None, format_string: str = "{}-{}-{}", conn: Optional[ConnectionPlus] = None) -> None: """ Create or load an experiment. If exp_id is None, a new experiment is created. If exp_id is not None, an experiment is loaded. Args: path_to_db: The path of the database file to create in/load from. If a conn is passed together with path_to_db, an exception is raised exp_id: The id of the experiment to load name: The name of the experiment to create. Ignored if exp_id is not None sample_name: The sample name for this experiment. Ignored if exp_id is not None format_string: The format string used to name result-tables. Ignored if exp_id is not None. conn: connection to the database. If not supplied, the constructor first tries to use path_to_db to figure out where to connect to. If path_to_db is not supplied either, a new connection to the DB file specified in the config is made """ self.conn = conn_from_dbpath_or_conn(conn, path_to_db) max_id = len(get_experiments(self.conn)) if exp_id is not None: if exp_id not in range(1, max_id + 1): raise ValueError('No such experiment in the database') self._exp_id = exp_id else: # it is better to catch an invalid format string earlier than later try: # the corresponding function from sqlite module will try to # format as `(name, exp_id, run_counter)`, hence we prepare # for that here format_string.format("name", 1, 1) except Exception as e: raise ValueError("Invalid format string. Can not format " "(name, exp_id, run_counter)") from e log.info("creating new experiment in {}".format(self.path_to_db)) name = name or f"experiment_{max_id+1}" sample_name = sample_name or "some_sample" self._exp_id = ne(self.conn, name, sample_name, format_string)
def _dataset_is_in_runs_table(self, path_to_db: Optional[Union[str, Path]] = None ) -> bool: """ Does this run exist in the given db """ if isinstance(path_to_db, Path): path_to_db = str(path_to_db) with contextlib.closing( conn_from_dbpath_or_conn(conn=None, path_to_db=path_to_db)) as conn: run_id = get_runid_from_guid(conn, self.guid) return run_id is not None
def experiments(conn: Optional[ConnectionPlus] = None) -> List[Experiment]: """ List all the experiments in the container (database file from config) Args: conn: connection to the database. If not supplied, a new connection to the DB file specified in the config is made Returns: All the experiments in the container """ conn = conn_from_dbpath_or_conn(conn=conn, path_to_db=None) log.info(f"loading experiments from {conn.path_to_dbfile}") rows = get_experiments(conn) return [load_experiment(row['exp_id'], conn) for row in rows]
def _create_new_run( cls, name: str, path_to_db: Optional[Union[Path, str]] = None, exp_id: Optional[int] = None, ) -> DataSetInMem: if path_to_db is not None: path_to_db = str(path_to_db) with contextlib.closing( conn_from_dbpath_or_conn(conn=None, path_to_db=path_to_db)) as conn: if exp_id is None: exp_id = get_default_experiment_id(conn) name = name or "dataset" sample_name = get_sample_name_from_experiment_id(conn, exp_id) exp_name = get_experiment_name_from_experiment_id(conn, exp_id) guid = generate_guid() run_counter, run_id, _ = create_run(conn, exp_id, name, guid=guid, parameters=None, create_run_table=False) ds = cls( run_id=run_id, captured_run_id=run_id, counter=run_counter, captured_counter=run_counter, name=name, exp_id=exp_id, exp_name=exp_name, sample_name=sample_name, guid=guid, path_to_db=conn.path_to_dbfile, run_timestamp_raw=None, completed_timestamp_raw=None, metadata=None, ) return ds
def load_experiment(exp_id: int, conn: Optional[ConnectionPlus] = None) -> Experiment: """ Load experiment with the specified id (from database file from config) Args: exp_id: experiment id conn: connection to the database. If not supplied, a new connection to the DB file specified in the config is made Returns: experiment with the specified id """ conn = conn_from_dbpath_or_conn(conn=conn, path_to_db=None) if not isinstance(exp_id, int): raise ValueError('Experiment ID must be an integer') experiment = Experiment(exp_id=exp_id, conn=conn) _set_default_experiment_id(path_to_dbfile(conn), experiment.exp_id) return experiment
def test_active_experiment(empty_temp_db): conn = conn_from_dbpath_or_conn(conn=None, path_to_db=empty_temp_db) with pytest.raises(ValueError): get_default_experiment_id(conn) exp_1 = load_or_create_experiment("test_exp", sample_name="no_sample") assert get_default_experiment_id(conn) == exp_1.exp_id exp_2 = new_experiment("test_exp_2", sample_name="no_sample") assert get_default_experiment_id(conn) == exp_2.exp_id exp_3 = load_experiment(1) assert get_default_experiment_id(conn) == exp_1.exp_id assert get_default_experiment_id(conn) == exp_3.exp_id exp_4 = new_experiment("test_exp_3", sample_name="no_sample") exp_5 = load_experiment_by_name("test_exp_2", sample="no_sample") assert get_default_experiment_id(conn) == exp_2.exp_id assert get_default_experiment_id(conn) == exp_5.exp_id exp_6 = load_last_experiment() assert get_default_experiment_id(conn) == exp_4.exp_id assert get_default_experiment_id(conn) == exp_6.exp_id last_exp = new_experiment("last_exp", sample_name="no_sample") load_experiment(3) reset_default_experiment_id(conn) assert get_default_experiment_id(conn) is last_exp.exp_id load_experiment(exp_1.exp_id) assert get_default_experiment_id(conn) == exp_1.exp_id reset_default_experiment_id() assert get_default_experiment_id(conn) is last_exp.exp_id
def _perform_start_actions(self) -> None: """ Perform the actions that must take place once the run has been started """ with contextlib.closing( conn_from_dbpath_or_conn(conn=None, path_to_db=self._path_to_db)) as conn: paramspecs = new_to_old(self.description.interdeps).paramspecs for spec in paramspecs: add_parameter(spec, conn=conn, run_id=self.run_id, insert_into_results_table=False) desc_str = serial.to_json_for_storage(self.description) update_run_description(conn, self.run_id, desc_str) self._run_timestamp_raw = time.time() set_run_timestamp(conn, self.run_id, self._run_timestamp_raw) pdl_str = links_to_str(self._parent_dataset_links) update_parent_datasets(conn, self.run_id, pdl_str)
def _load_from_netcdf( cls, path: Union[Path, str], path_to_db: Optional[Union[Path, str]] = None) -> DataSetInMem: """ Create a in memory dataset from a netcdf file. The netcdf file is expected to contain a QCoDeS dataset that has been exported using the QCoDeS netcdf export functions. Args: path: Path to the netcdf file to import. path_to_db: Optional path to a database where this dataset may be exported to. If not supplied the path can be given at export time or the dataset exported to the default db as set in the QCoDeS config. Returns: The loaded dataset. """ # in the code below floats and ints loaded from attributes are explicitly casted # this is due to some older versions of qcodes writing them with a different backend # reading them back results in a numpy array of one element import xarray as xr loaded_data = xr.load_dataset(path, engine="h5netcdf") parent_dataset_links = str_to_links( loaded_data.attrs.get("parent_dataset_links", "[]")) if path_to_db is not None: path_to_db = str(path_to_db) with contextlib.closing( conn_from_dbpath_or_conn(conn=None, path_to_db=path_to_db)) as conn: run_data = get_raw_run_attributes(conn, guid=loaded_data.guid) path_to_db = conn.path_to_dbfile if run_data is not None: run_id = run_data["run_id"] counter = run_data["counter"] else: run_id = int(loaded_data.captured_run_id) counter = int(loaded_data.captured_counter) path = str(path) path = os.path.abspath(path) export_info = ExportInfo.from_str( loaded_data.attrs.get("export_info", "")) export_info.export_paths["nc"] = path non_metadata = { "run_timestamp_raw", "completed_timestamp_raw", "ds_name", "exp_name", "sample_name", "export_info", "parent_dataset_links", } metadata_keys = (set(loaded_data.attrs.keys()) - set(RUNS_TABLE_COLUMNS) - non_metadata) metadata = {} for key in metadata_keys: data = loaded_data.attrs[key] if isinstance(data, np.ndarray) and data.size == 1: data = data[0] metadata[key] = data ds = cls( run_id=run_id, captured_run_id=int(loaded_data.captured_run_id), counter=counter, captured_counter=int(loaded_data.captured_counter), name=loaded_data.ds_name, exp_id=0, exp_name=loaded_data.exp_name, sample_name=loaded_data.sample_name, guid=loaded_data.guid, path_to_db=path_to_db, run_timestamp_raw=float(loaded_data.run_timestamp_raw), completed_timestamp_raw=float(loaded_data.completed_timestamp_raw), metadata=metadata, rundescriber=serial.from_json_to_current( loaded_data.run_description), parent_dataset_links=parent_dataset_links, export_info=export_info, snapshot=loaded_data.snapshot, ) ds._cache = DataSetCacheInMem(ds) ds._cache._data = cls._from_xarray_dataset_to_qcodes_raw_data( loaded_data) return ds
def __init__(self, path_to_db: str = None, run_id: Optional[int] = None, conn: Optional[ConnectionPlus] = None, exp_id=None, name: str = None, specs: Optional[SpecsOrInterDeps] = None, values=None, metadata=None) -> None: """ Create a new DataSet object. The object can either hold a new run or an already existing run. If a run_id is provided, then an old run is looked up, else a new run is created. Args: path_to_db: path to the sqlite file on disk. If not provided, the path will be read from the config. run_id: provide this when loading an existing run, leave it as None when creating a new run conn: connection to the DB; if provided and `path_to_db` is provided as well, then a ValueError is raised (this is to prevent the possibility of providing a connection to a DB file that is different from `path_to_db`) exp_id: the id of the experiment in which to create a new run. Ignored if run_id is provided. name: the name of the dataset. Ignored if run_id is provided. specs: paramspecs belonging to the dataset. Ignored if run_id is provided. values: values to insert into the dataset. Ignored if run_id is provided. metadata: metadata to insert into the dataset. Ignored if run_id is provided. """ self.conn = conn_from_dbpath_or_conn(conn, path_to_db) self._run_id = run_id self._debug = False self.subscribers: Dict[str, _Subscriber] = {} self._interdeps: InterDependencies_ if run_id is not None: if not run_exists(self.conn, run_id): raise ValueError(f"Run with run_id {run_id} does not exist in " f"the database") self._completed = completed(self.conn, self.run_id) run_desc = self._get_run_description_from_db() self._interdeps = run_desc.interdeps self._metadata = get_metadata_from_run_id(self.conn, run_id) self._started = self.run_timestamp_raw is not None else: # Actually perform all the side effects needed for the creation # of a new dataset. Note that a dataset is created (in the DB) # with no parameters; they are written to disk when the dataset # is marked as started if exp_id is None: if len(get_experiments(self.conn)) > 0: exp_id = get_last_experiment(self.conn) else: raise ValueError("No experiments found." "You can start a new one with:" " new_experiment(name, sample_name)") name = name or "dataset" _, run_id, __ = create_run(self.conn, exp_id, name, generate_guid(), parameters=None, values=values, metadata=metadata) # this is really the UUID (an ever increasing count in the db) self._run_id = run_id self._completed = False self._started = False if isinstance(specs, InterDependencies_): self._interdeps = specs elif specs is not None: self._interdeps = old_to_new(InterDependencies(*specs)) else: self._interdeps = InterDependencies_() self._metadata = get_metadata_from_run_id(self.conn, self.run_id)