def test_run_loaded_experiment(empty_temp_db): """ Test that we can resume a measurement after loading by name """ new_experiment("test", "test1") exp_loaded = load_experiment_by_name("test", "test1") meas = Measurement(exp=exp_loaded) with meas.run(): pass with meas.run(): pass
def test_run_loaded_experiment(): """ Test that we can resume a measurement after loading by name """ new_experiment("test", "test1") exp_loaded = load_experiment_by_name("test", "test1") meas = Measurement(exp=exp_loaded) meas.register_custom_parameter(name='dummy', paramtype='text') with meas.run(): pass with meas.run(): pass
def test_setting_write_period(empty_temp_db, wp): new_experiment('firstexp', sample_name='no sample') meas = Measurement() if isinstance(wp, str): with pytest.raises(ValueError): meas.write_period = wp elif wp < 1e-3: with pytest.raises(ValueError): meas.write_period = wp else: meas.write_period = wp assert meas._write_period == wp with meas.run() as datasaver: assert datasaver.write_period == wp
def test_experiment_info_in_dataset(): exp = new_experiment(name="for_loading", sample_name="no_sample") ds = new_data_set("my_first_ds") assert ds.exp_id == exp.exp_id assert ds.exp_name == exp.name assert ds.sample_name == exp.sample_name
def do_experiment(experiment_name, sweep_object, setup=None, cleanup=None, station=None, live_plot=False): if "/" in experiment_name: experiment_name, sample_name = experiment_name.split("/") else: sample_name = None try: experiment = load_experiment_by_name(experiment_name, sample_name) except ValueError: # experiment does not exist yet db_location = qcodes.config["core"]["db_location"] DataSet(db_location) experiment = new_experiment(experiment_name, sample_name) def add_actions(action, callables): if callables is None: return for cabble in np.atleast_1d(callables): if not isinstance(cabble, tuple): cabble = (cabble, ()) action(*cabble) if live_plot: try: from plottr.qcodes_dataset import QcodesDatasetSubscriber from plottr.tools import start_listener start_listener() except ImportError: warn("Cannot perform live plots, plottr not installed") live_plot = False meas = SweepMeasurement(exp=experiment, station=station) meas.register_sweep(sweep_object) add_actions(meas.add_before_run, setup) add_actions(meas.add_after_run, cleanup) with meas.run() as datasaver: if live_plot: datasaver.dataset.subscribe(QcodesDatasetSubscriber( datasaver.dataset), state=[], min_wait=0, min_count=1) for data in sweep_object: datasaver.add_result(*data.items()) return _DataExtractor(datasaver)
def test_datasaver_arrays(empty_temp_db, N): new_experiment('firstexp', sample_name='no sample') meas = Measurement() meas.register_custom_parameter(name='freqax', label='Frequency axis', unit='Hz') meas.register_custom_parameter(name='signal', label='qubit signal', unit='Majorana number', setpoints=('freqax',)) with meas.run() as datasaver: freqax = np.linspace(1e6, 2e6, N) signal = np.random.randn(N) datasaver.add_result(('freqax', freqax), ('signal', signal)) assert datasaver.points_written == N with meas.run() as datasaver: freqax = np.linspace(1e6, 2e6, N) signal = np.random.randn(N-1) with pytest.raises(ValueError): datasaver.add_result(('freqax', freqax), ('signal', signal)) meas.register_custom_parameter(name='gate_voltage', label='Gate tuning potential', unit='V') meas.register_custom_parameter(name='signal', label='qubit signal', unit='Majorana flux', setpoints=('freqax', 'gate_voltage')) with meas.run() as datasaver: freqax = np.linspace(1e6, 2e6, N) signal = np.random.randn(N) datasaver.add_result(('freqax', freqax), ('signal', signal), ('gate_voltage', 0)) assert datasaver.points_written == N
def test_completed_timestamp_for_not_completed_dataset(): _ = new_experiment(name="for_loading", sample_name="no_sample") ds = new_data_set("my_first_ds") assert False is ds.completed assert None is ds.completed_timestamp_raw assert None is ds.completed_timestamp()
def test_run_timestamp(): _ = new_experiment(name="for_loading", sample_name="no_sample") t_before_data_set = time.time() ds = new_data_set("my_first_ds") t_after_data_set = time.time() actual_run_timestamp_raw = ds.run_timestamp_raw assert t_before_data_set <= actual_run_timestamp_raw <= t_after_data_set
def generate_local_run(dbpath: Path) -> str: with initialised_database_at(str(dbpath)): new_experiment(sample_name="fivehundredtest_sample", name="fivehundredtest_name") p1 = Parameter('Voltage', set_cmd=None) p2 = Parameter('Current', get_cmd=np.random.randn) meas = Measurement() meas.register_parameter(p1).register_parameter(p2, setpoints=[p1]) with meas.run() as datasaver: for v in np.linspace(0, 2, 250): p1(v) datasaver.add_result((p1, cast(float, p1())), (p2, cast(float, p2()))) guid = datasaver.dataset.guid datasaver.flush_data_to_database(block=True) return guid
def create_database(self, filename, experiment_name, sample_name): #database.initialise_database() initialise_or_create_database_at(filename) try: experiment = exc.load_experiment_by_name(name=experiment_name, sample=sample_name) except ValueError: experiment = exc.new_experiment(name=experiment_name, sample_name=sample_name) print('new_experiment')
def test_completed_timestamp_for_not_completed_dataset(): _ = new_experiment(name="for_loading", sample_name="no_sample") ds = new_data_set("my_first_ds") assert ds.pristine is True assert ds.started is False assert ds.running is False assert ds.completed is False assert ds.completed_timestamp_raw is None assert ds.completed_timestamp() is None
def test_load_by_counter(): exp = new_experiment(name="for_loading", sample_name="no_sample") ds = new_data_set("my_first_ds") loaded_ds = load_by_counter(exp.exp_id, 1) assert loaded_ds.completed is False ds.mark_complete() loaded_ds = load_by_counter(exp.exp_id, 1) assert loaded_ds.completed is True
def test_new_experiment_duplicate_name_and_sample_name(empty_temp_db, caplog): """ Test new_experiment to raise warning if it wants to create experiment with a duplicate experiment name and sample name. """ exp_1 = new_experiment("exp", "sample") warn_msg = ( f"There is (are) already experiment(s) with the name of {exp_1.name} " f"and sample name of {exp_1.sample_name} in the database.") caplog.clear() with caplog.at_level(logging.WARNING): new_experiment("exp", "sample") for record in caplog.records: assert record.levelname == "WARNING" assert warn_msg in caplog.text exp_2 = new_experiment("exp_2", None) warn_msg = ( f"There is (are) already experiment(s) with the name of {exp_2.name} " f"and sample name of {exp_2.sample_name} in the database.") assert exp_2.sample_name == "some_sample" caplog.clear() with caplog.at_level(logging.WARNING): new_experiment("exp_2", None) for record in caplog.records: assert record.levelname == "WARNING" assert warn_msg in caplog.text caplog.clear()
def test_completed_timestamp(): _ = new_experiment(name="for_loading", sample_name="no_sample") ds = new_data_set("my_first_ds") t_before_complete = time.time() ds.mark_complete() t_after_complete = time.time() actual_completed_timestamp_raw = ds.completed_timestamp_raw assert t_before_complete \ <= actual_completed_timestamp_raw \ <= t_after_complete
def test_load_or_create_experiment_different_sample_name(): """ Test that an experiment is created for the case when the experiment name is the same, but the sample name is different """ exp = new_experiment("experiment_name", "sample_name_1") exp_2 = load_or_create_experiment("experiment_name", "sample_name_2") actual_experiments = experiments() assert len(actual_experiments) == 2 assert exp.name == exp_2.name assert exp.sample_name != exp_2.sample_name
def test_active_experiment(empty_temp_db): conn = conn_from_dbpath_or_conn(conn=None, path_to_db=empty_temp_db) with pytest.raises(ValueError): get_default_experiment_id(conn) exp_1 = load_or_create_experiment("test_exp", sample_name="no_sample") assert get_default_experiment_id(conn) == exp_1.exp_id exp_2 = new_experiment("test_exp_2", sample_name="no_sample") assert get_default_experiment_id(conn) == exp_2.exp_id exp_3 = load_experiment(1) assert get_default_experiment_id(conn) == exp_1.exp_id assert get_default_experiment_id(conn) == exp_3.exp_id exp_4 = new_experiment("test_exp_3", sample_name="no_sample") exp_5 = load_experiment_by_name("test_exp_2", sample="no_sample") assert get_default_experiment_id(conn) == exp_2.exp_id assert get_default_experiment_id(conn) == exp_5.exp_id exp_6 = load_last_experiment() assert get_default_experiment_id(conn) == exp_4.exp_id assert get_default_experiment_id(conn) == exp_6.exp_id last_exp = new_experiment("last_exp", sample_name="no_sample") load_experiment(3) reset_default_experiment_id(conn) assert get_default_experiment_id(conn) is last_exp.exp_id load_experiment(exp_1.exp_id) assert get_default_experiment_id(conn) == exp_1.exp_id reset_default_experiment_id() assert get_default_experiment_id(conn) is last_exp.exp_id
def test_datasaver_unsized_arrays(empty_temp_db, N): new_experiment('firstexp', sample_name='no sample') meas = Measurement() meas.register_custom_parameter(name='freqax', label='Frequency axis', unit='Hz') meas.register_custom_parameter(name='signal', label='qubit signal', unit='Majorana number', setpoints=('freqax', )) # note that np.array(some_number) is not the same as the number # its also not an array with a shape. Check here that we handle it # correctly with meas.run() as datasaver: freqax = np.linspace(1e6, 2e6, N) signal = np.random.randn(N) for i in range(N): myfreq = np.array(freqax[i]) mysignal = np.array(signal[i]) datasaver.add_result(('freqax', myfreq), ('signal', mysignal)) assert datasaver.points_written == N
def select_experiment(exp_name, sample_name): """ Convenience function that will check if the experiment/sample combination already exists in the current database. If so, it'll return the existing one. Otherwise it will create a new one and return that. Potential issue: if multiple experiments with the same experiment/sample combination exist, our detection method will fail, and another copy of this combination will be created. """ try: exp = load_experiment_by_name(exp_name, sample_name) except ValueError: exp = new_experiment(exp_name, sample_name) return exp
def create_database_experiment_and_folders(self): # -- set the path where the raw data should be saved to (pngs, txts) self.raw_path = ('C:\\Users\\nanospin\\Nextcloud\\Lab-Shared\\measurements\\chris\\keysight_tests_data' + '\\' + self.cooldown_date + '_' + self.sample_name + '\\' 'raw') # set the .db path qc.config["core"]["db_location"] = ( os.path.join('C:\\Users\\nanospin\\Nextcloud\\Lab-Shared\\measurements\\chris\\keysight_tests_data', 'keysight_tests.db')) # store a qcodesrc file with the loaded .db path to the measurements folder qc.config.save_config( os.path.join("C:\\Users\\nanospin\\Nextcloud\\Lab-Shared\\measurements", ".qcodesrc")) # self.raw_path = 'C:\\Users\\nanospin\\Nextcloud\\Lab-Shared\\measurements\\Data\\'+self.cooldown_date+'_'+self.sample_name+"\\raw\\" # qc.config["core"]["db_location"] = ( # os.path.join('C:\\Users\\nanospin\\Nextcloud\\Lab-Shared\\measurements\\Data', 'experiments.db')) # -- check if in the standard folder -see qcodes config file- an experiment with exp_name already exists # if not, create a new folder at path # if so, just print the last exp. ID and go on try: # qcodes interface of loading an experiment: # -- tries to connect to a database (specificed in config data structure) and searches for the exp_name self.exp = load_experiment_by_name( self.exp_name, sample=self.sample_name) # keep track of the experiment number print('Experiment loaded. Last ID no: ', self.exp.last_counter) except ValueError: print("Experiment name `", self.exp_name, "` with sample name `", self.sample_name, "` not found in ", qc.config["core"]["db_location"]) print('Starting new experiment.') self.exp = new_experiment(self.exp_name, self.sample_name) os.makedirs(self.raw_path, exist_ok=True) # ---- always create a new folder for each day of taking measurements self.raw_path_with_date = os.path.join( self.raw_path, date.today().strftime("%y-%m-%d")) if not os.path.isdir(self.raw_path_with_date): # force-create the directory os.makedirs(self.raw_path_with_date, exist_ok=True)
def test_run_timestamp_with_default_format(): _ = new_experiment(name="for_loading", sample_name="no_sample") t_before_data_set = time.time() ds = new_data_set("my_first_ds") t_after_data_set = time.time() # Note that here we also test the default format of `run_timestamp` actual_run_timestamp_raw = time.mktime( time.strptime(ds.run_timestamp(), "%Y-%m-%d %H:%M:%S")) # Note that because the default format precision is 1 second, we add this # second to the right side of the comparison t_before_data_set_secs = floor(t_before_data_set) t_after_data_set_secs = floor(t_after_data_set) assert t_before_data_set_secs \ <= actual_run_timestamp_raw \ <= t_after_data_set_secs + 1
def setup(self, bench_param): # Init DB self.tmpdir = tempfile.mkdtemp() qcodes.config["core"]["db_location"] = os.path.join( self.tmpdir, 'temp.db') qcodes.config["core"]["db_debug"] = False initialise_database() # Create experiment self.experiment = new_experiment("test-experiment", sample_name="test-sample") # Create measurement meas = Measurement(self.experiment) x1 = ManualParameter('x1') x2 = ManualParameter('x2') x3 = ManualParameter('x3') y1 = ManualParameter('y1') y2 = ManualParameter('y2') meas.register_parameter(x1, paramtype=bench_param['paramtype']) meas.register_parameter(x2, paramtype=bench_param['paramtype']) meas.register_parameter(x3, paramtype=bench_param['paramtype']) meas.register_parameter(y1, setpoints=[x1, x2, x3], paramtype=bench_param['paramtype']) meas.register_parameter(y2, setpoints=[x1, x2, x3], paramtype=bench_param['paramtype']) self.parameters = [x1, x2, x3, y1, y2] # Create the Runner context manager self.runner = meas.run() # Enter Runner and create DataSaver self.datasaver = self.runner.__enter__() # Create values for parameters for _ in range(len(self.parameters)): self.values.append(np.random.rand(bench_param['n_values']))
def name_exp(self, exp_type='', **kwargs): """ Set name of the experiment args: ext_type[str]: any label for experiment **kwars[eg Bfield = 50e-6] Dict of the parameter name and its value to add returns: new_experimenrt object """ self.db_connect() name = '{:s}_'.format(exp_type) for var, val in kwargs.items(): name += '__{}= {}'.format(var, eng(val)) sample_name = "{}".format(self.sample) return new_experiment(name=name, sample_name=sample_name)
def __init__( self, name: str, data_settings: Dict[str, Any], classifiers: Dict[str, Classifier], setpoint_settings: Dict[str, Any], fit_options: Optional[Dict[str, Dict[str, Any]]] = None, ) -> None: super().__init__(name) self.classifiers = classifiers assert 'db_name' in data_settings.keys() if 'db_folder' in data_settings.keys(): nt.set_database(data_settings['db_name'], db_folder=data_settings['db_folder']) else: nt.set_database(data_settings['db_name']) if data_settings.get('qc_experiment_id') is None: try: self.qcodes_experiment = load_last_experiment() except ValueError: logger.warning( 'No qcodes experiment found. Starting a new ' 'one called "automated_tuning", with an unknown sample.') self.qcodes_experiment = new_experiment("automated_tuning", sample_name="unknown") exp_id = self.qcodes_experiment.exp_id data_settings['qc_experiment_id'] = exp_id self._data_settings = data_settings super().add_parameter( name="data_settings", label="data_settings", docstring="", set_cmd=self.update_data_settings, get_cmd=self.get_data_settings, initial_value=data_settings, vals=vals.Dict(), ) if fit_options is None or not fit_options: fit_options = { key: {} for key in nt.config['core']['implemented_fits'] } self._fit_options = fit_options super().add_parameter( name="fit_options", label="fit_options", docstring="", set_cmd=self.set_fit_options, get_cmd=self.get_fit_options, initial_value=fit_options, vals=vals.Dict(), ) super().add_parameter( name="setpoint_settings", label="setpoint_settings", docstring="options for setpoint determination", set_cmd=None, get_cmd=None, initial_value=setpoint_settings, vals=vals.Dict(), )
def do_experiment(base_path, setup, sweep_object, cleanup, live_plot_axes=None, return_format=None): """ Perform a sweep experiment and put the result in a QCoDeS data set Args: base_path (str): Experiment database base path in the format <experiment_name>/<sample_name> The eventual path of the data set will be given by <experiment_name>/<sample_name>/<run number> Note: This is *not* a path on the file system. Use the "get_results_from_db_path" function to retrieve your data. setup (list): A list of tuples, e.g. [(function1, args1), (function2, args2), etc...] sweep_object: Defining the experiment cleanup (list): A list of tuples, e.g. [(function1, args1), (function2, args2), etc...] live_plot_axes (dict): The keys are the axis labels and the values are the columns to be plotted. No plots will be shown if None is given return_format (list): Defines in which way(s) we want to return the results of the experiment. Possible options are: data_set_path, dataid, dataset, experiment, measurement. Default value is "data_set_path". """ # By default we only return the data set path. if return_format is None: return_format = ["data_set_path"] name_parts = base_path.split("/") experiment_name = name_parts[0] if len(name_parts) == 1: sample_name = "None" else: sample_name = "/".join(name_parts[1:]) try: experiment = load_experiment_by_name(experiment_name, sample_name) experiment.id = experiment.exp_id # This is needed because of a bug # in the "load_experiment_by_name" method # A PR for a fix has been submitted (PR 997) except ValueError: # experiment does not exist yet db_location = qcodes.config["core"]["db_location"] DataSet(db_location) experiment = new_experiment(experiment_name, sample_name) counter = experiment.last_counter measurement = SweepMeasurement(exp=experiment) if live_plot_axes is not None: for live_plot_axis in live_plot_axes: measurement.add_subscriber(Plot1DSubscriber(live_plot_axis), {}) # init for func, args in setup: measurement.add_before_run(func, args) # meas measurement.register_sweep(sweep_object) measurement.write_period = 1.0 # end for func, args in cleanup: measurement.add_after_run(func, args) # perform exp with measurement.run() as datasaver: for data in sweep_object: datasaver.add_result(*data.items()) dataid = datasaver.run_id data_set_path = f"{base_path}/{counter}" dataset = datasaver.dataset print(f"Completed measurement. Database path: {data_set_path}") results = { "data_set_path": data_set_path, "dataid": dataid, "dataset": dataset, "experiment": experiment, "measurement": measurement } return [results[k] for k in return_format]
def experiment(empty_temp_db): e = new_experiment("test-experiment", sample_name="test-sample") yield e e.conn.close()
# Close any instruments that may already be open instruments = list(qc.Instrument._all_instruments.keys()) for instrument in instruments: instr = qc.Instrument._all_instruments.pop(instrument) instr = instr() instr.close() # Set up experiment exp_name = 'qcodes_controls_mdac' sample_name = 'mdac' try: exp = load_experiment_by_name(exp_name, sample=sample_name) print('Experiment loaded. Last ID no:', exp.last_counter) except ValueError: exp = new_experiment(exp_name, sample_name) print('Started new experiment') scfg = StationConfigurator() mdac = scfg.load_instrument('mdac') #lockin = scfg.load_instrument('sr860') #ithaco = scfg.load_instrument('ithaco') multimeter = scfg.load_instrument('Keysight') dummy_time = DummyInstrument(name="dummy_time") time_zero = time.time() def getTime(): return time.time() - time_zero
def test_load_or_create_experiment_loading(): """Test that an experiment is correctly loaded""" exp = new_experiment("experiment_name", "sample_name") exp_2 = load_or_create_experiment("experiment_name", "sample_name") assert_experiments_equal(exp, exp_2)
def __init__(self): """ 1. setup the VNA as an instrument (if it's not already setup) 2. specify experimental parameters 3. specify paths of the target files: database file and a new folder with raw (txt, png) files """ self.vna_name = 'VNA' self.vna_class = Anritsu_MS46522B # this is a qcodes VisaInstrument (interface between visa and qcodes) # Anritsu_MS46522B("VNA2", "TCPIP0::169.254.235.118::5001::SOCKET", 50e6, 20e9, -30, 30, 2) # -- check if instrument 'VNA' already exists. If not, create it if Instrument.exist(self.vna_name, self.vna_class): # an instrument is created by qcodes in a global context, # from which it can be retrieved manually using find_instrument self.vna = Instrument.find_instrument(self.vna_name, self.vna_class) else: self.vna = self.vna_class(self.vna_name, 'TCPIP0::169.254.235.118::5001::SOCKET', 50e6, 20e9, -30, 30, 2) # -- name the experiment -> automatic file names self.exp_name = 'Warm_VNA_Noise' # name used by qcodes self.cooldown_date = '20-09-18' self.sample_name = 'no_sample' # -- set experiment parameters (global constants, used in different measurement functions) self.numberofpoints = 20 # 2001 # number of measurement points self.vnapower = -10 # applied power self.start_frequency = 3.7e9 #3.387015e9 #6.608e9-3.5e6 # start frequency of sweep self.stop_frequency = 5.7e9 #3.387065e9 #6.611e9 +3.5e6 # stop frequency of sweep self.frequency_span = self.stop_frequency - self.start_frequency self.center_frequency = (self.stop_frequency - self.start_frequency)/2. + self.start_frequency # just used for power sweep self.measuredtrace='S21' # spectral density measured between port 1 and 2 self.ifbandwidth=10 # IF Bandwidth, must be in (10,30,50,70,100,300,500,700,1000,3000,5000,7000,10000)Hz self.powersweepstart=-30 # start for power sweep self.powersweepstop=20 # stop for powersweep self.powersweepnum=6 # number of power sweeps (perhaps +/-1) MUST BE AN EVEN NUMBER AT LEAST 6 # groupdelayref=0.0000000225 # vna.groupdelay.set(groupdelayref)#resets to 0 instead of working -> rounding to 0 # print(vna.groupdelay.get()) # -- set the path where the raw data should be saved to (pngs, txts) self.raw_path = ('C:\\Users\\Desktop\\tests' + '\\' + self.cooldown_date + '_' + self.sample_name + '\\' 'raw') # set the .db path qc.config["core"]["db_location"] = ( os.path.join('C:\\Users\\Desktop\\tests', 'test.db')) # store a qcodesrc file with the loaded .db path to the measurements folder qc.config.save_config( os.path.join("C:\\Users\\Desktop\\tests", ".qcodesrc")) # -- check if in the standard folder -see qcodes config file- an experiment with exp_name already exists # if not, create a new folder at path # if so, just print the last exp. ID and go on try: # qcodes interface of loading an experiment: # -- tries to connect to a database (specificed in config data structure) and searches for the exp_name self.exp = load_experiment_by_name(self.exp_name, sample=self.sample_name) print('Experiment loaded. Last ID no: ', self.exp.last_counter) # keep track of the experiment number except ValueError: print("Experiment name ", self.exp_name, " with sample name ", self.sample_name, " not found in ", qc.config["core"]["db_location"]) print('Starting new experiment.') self.exp = new_experiment(self.exp_name, self.sample_name) os.makedirs(self.raw_path, exist_ok=True) # ---- always create a new folder for each day of taking measurements self.raw_path_with_date = os.path.join(self.raw_path, date.today().strftime("%y-%m-%d")) if not os.path.isdir(self.raw_path_with_date): os.makedirs(self.raw_path_with_date, exist_ok=True) # force-create the directory
def _make_experiment(empty_temp_db): e = new_experiment("test-experiment", sample_name="test-sample") try: yield e finally: e.conn.close()
def test_load_by_run_spec(empty_temp_db, some_interdeps): def create_ds_with_exp_id(exp_id): ds = DataSet(exp_id=exp_id) ds.set_interdependencies(some_interdeps[1]) ds.mark_started() ds.add_results([{'ps1': 1, 'ps2': 2}]) return ds # create 3 experiments that mix two experiment names and two sample names exp_names = ["te1", "te2", "te1"] sample_names = ["ts1", "ts2", "ts2"] exps = [ new_experiment(exp_name, sample_name=sample_name) for exp_name, sample_name in zip(exp_names, sample_names) ] created_ds = [create_ds_with_exp_id(exp.exp_id) for exp in exps] conn = created_ds[0].conn guids = get_guids_from_run_spec(conn=conn) assert len(guids) == 3 # since we are not copying runs from multiple dbs we can always load by # captured_run_id and this is equivalent to load_by_id for i in range(1, 4): loaded_ds = load_by_run_spec(captured_run_id=i, conn=conn) assert loaded_ds.guid == guids[i - 1] assert loaded_ds.the_same_dataset_as(created_ds[i - 1]) # All the datasets datasets have the same captured counter # so we cannot load by that alone guids_cc1 = get_guids_from_run_spec(captured_counter=1, conn=conn) assert len(guids_cc1) == 3 with pytest.raises(NameError, match="More than one matching"): load_by_run_spec(captured_counter=1) # there are two different experiments with exp name "test-experiment1" # and thus 2 different datasets with counter=1 and that exp name guids_cc1_te1 = get_guids_from_run_spec(captured_counter=1, experiment_name='te1', conn=conn) assert len(guids_cc1_te1) == 2 with pytest.raises(NameError, match="More than one matching"): load_by_run_spec(captured_counter=1, experiment_name="te1", conn=conn) # but for "test-experiment2" there is only one guids_cc1_te2 = get_guids_from_run_spec(captured_counter=1, experiment_name='te2', conn=conn) assert len(guids_cc1_te2) == 1 loaded_ds = load_by_run_spec(captured_counter=1, experiment_name="te2", conn=conn) assert loaded_ds.guid == guids_cc1_te2[0] assert loaded_ds.the_same_dataset_as(created_ds[1]) # there are two different experiments with sample name "test_sample2" but # different exp names so the counter is not unique guids_cc1_ts2 = get_guids_from_run_spec(captured_counter=1, sample_name='ts2', conn=conn) assert len(guids_cc1_ts2) == 2 with pytest.raises(NameError, match="More than one matching"): load_by_run_spec(captured_counter=1, sample_name="ts2", conn=conn) # but for "test_sample1" there is only one guids_cc1_ts1 = get_guids_from_run_spec(captured_counter=1, sample_name='ts1', conn=conn) assert len(guids_cc1_ts1) == 1 loaded_ds = load_by_run_spec(captured_counter=1, sample_name="ts1", conn=conn) assert loaded_ds.the_same_dataset_as(created_ds[0]) assert loaded_ds.guid == guids_cc1_ts1[0] # we can load all 3 if we are specific. for i in range(3): loaded_ds = load_by_run_spec(captured_counter=1, experiment_name=exp_names[i], sample_name=sample_names[i], conn=conn) assert loaded_ds.the_same_dataset_as(created_ds[i]) assert loaded_ds.guid == guids[i] # load a non-existing run with pytest.raises(NameError, match="No run matching"): load_by_run_spec(captured_counter=10000, sample_name="ts2", conn=conn) empty_guid_list = get_guids_from_run_spec(conn=conn, experiment_name='nosuchexp') assert empty_guid_list == []