コード例 #1
0
def test_load_from_netcdf_and_write_metadata_to_db(empty_temp_db):
    netcdf_file_path = (Path(__file__).parent / "fixtures" / "db_files" /
                        "netcdf" / "qcodes_2.nc")

    if not os.path.exists(str(netcdf_file_path)):
        pytest.skip("No netcdf fixtures found.")

    ds = DataSetInMem._load_from_netcdf(netcdf_file_path)
    ds.write_metadata_to_db()

    loaded_ds = load_by_run_spec(captured_run_id=ds.captured_run_id)
    assert isinstance(loaded_ds, DataSetInMem)
    assert loaded_ds.captured_run_id == ds.captured_run_id
    assert loaded_ds.captured_counter == ds.captured_counter
    assert loaded_ds.run_timestamp_raw == ds.run_timestamp_raw
    assert loaded_ds.completed_timestamp_raw == ds.completed_timestamp_raw

    compare_datasets(ds, loaded_ds)

    # now we attempt to write again. This should be a noop so everything should
    # stay the same
    ds.write_metadata_to_db()
    loaded_ds = load_by_run_spec(captured_run_id=ds.captured_run_id)
    assert isinstance(loaded_ds, DataSetInMem)
    assert loaded_ds.captured_run_id == ds.captured_run_id
    assert loaded_ds.captured_counter == ds.captured_counter
    assert loaded_ds.run_timestamp_raw == ds.run_timestamp_raw
    assert loaded_ds.completed_timestamp_raw == ds.completed_timestamp_raw

    compare_datasets(ds, loaded_ds)
コード例 #2
0
def test_timestamps(experiment):
    ds = DataSetInMem._create_new_run(name="foo")

    assert ds.run_timestamp() is None
    assert ds.run_timestamp_raw is None

    assert ds.completed_timestamp() is None
    assert ds.completed_timestamp_raw is None

    pss: List[ParamSpecBase] = []
    for n in range(3):
        pss.append(ParamSpecBase(f"ps{n}", paramtype="numeric"))

    idps = InterDependencies_(dependencies={pss[0]: (pss[1], pss[2])})

    ds.prepare(interdeps=idps, snapshot={})

    assert ds.run_timestamp() is not None
    assert ds.run_timestamp_raw is not None

    assert ds.completed_timestamp() is None
    assert ds.completed_timestamp_raw is None

    ds.mark_completed()

    assert ds.run_timestamp() is not None
    assert ds.run_timestamp_raw is not None

    assert ds.completed_timestamp() is not None
    assert ds.completed_timestamp_raw is not None

    ds.mark_completed()
コード例 #3
0
def test_dataset_load_from_netcdf_and_db(meas_with_registered_param, DMM, DAC,
                                         tmp_path):
    with meas_with_registered_param.run(
            dataset_class=DataSetType.DataSetInMem) as datasaver:
        for set_v in np.linspace(0, 25, 10):
            DAC.ch1.set(set_v)
            get_v = DMM.v1()
            datasaver.add_result((DAC.ch1, set_v), (DMM.v1, get_v))

    with meas_with_registered_param.run(
            dataset_class=DataSetType.DataSetInMem) as datasaver:
        for set_v in np.linspace(0, 25, 10):
            DAC.ch1.set(set_v)
            get_v = DMM.v1()
            datasaver.add_result((DAC.ch1, set_v), (DMM.v1, get_v))

    path_to_db = datasaver.dataset._path_to_db
    ds = datasaver.dataset
    ds.add_metadata("mymetadatatag", 42)

    assert ds.run_id == 2
    assert isinstance(ds, DataSetInMem)

    ds.export(export_type="netcdf", path=str(tmp_path))
    loaded_ds = DataSetInMem._load_from_netcdf(
        tmp_path / f"qcodes_{ds.captured_run_id}_{ds.guid}.nc",
        path_to_db=path_to_db)
    assert isinstance(loaded_ds, DataSetInMem)
    assert loaded_ds.run_id == ds.run_id
    compare_datasets(ds, loaded_ds)
コード例 #4
0
def test_mark_pristine_completed_raises(experiment):
    ds = DataSetInMem._create_new_run(name="foo")

    with pytest.raises(
        RuntimeError, match="Can not mark a dataset as complete before it"
    ):
        ds.mark_completed()
コード例 #5
0
def test_load_from_netcdf_no_db_file(non_created_db):
    netcdf_file_path = (Path(__file__).parent / "fixtures" / "db_files" /
                        "netcdf" / "qcodes_2.nc")

    if not os.path.exists(str(netcdf_file_path)):
        pytest.skip("No netcdf fixtures found.")

    ds = DataSetInMem._load_from_netcdf(netcdf_file_path)
    ds.write_metadata_to_db()
    loaded_ds = load_by_run_spec(captured_run_id=ds.captured_run_id)
    assert isinstance(loaded_ds, DataSetInMem)
    compare_datasets(ds, loaded_ds)
コード例 #6
0
def test_empty_ds_parameters(experiment):

    ds = DataSetInMem._create_new_run(name="foo")

    assert ds._parameters is None

    ds._perform_start_actions()
    assert ds._parameters is None

    ds.mark_completed()

    assert ds._parameters is None
コード例 #7
0
def test_create_dataset_in_memory_explicit_db(empty_temp_db):
    default_db_location = qc.config["core"]["db_location"]

    extra_db_location = str(Path(default_db_location).parent / "extra.db")

    load_or_create_experiment(
        conn=connect(extra_db_location), experiment_name="myexp", sample_name="mysample"
    )

    ds = DataSetInMem._create_new_run(name="foo", path_to_db=str(extra_db_location))

    assert ds.path_to_db == extra_db_location
    assert default_db_location != extra_db_location
コード例 #8
0
def test_prepare_twice_raises(experiment):
    ds = DataSetInMem._create_new_run(name="foo")

    pss: List[ParamSpecBase] = []
    for n in range(3):
        pss.append(ParamSpecBase(f"ps{n}", paramtype="numeric"))

    idps = InterDependencies_(dependencies={pss[0]: (pss[1], pss[2])})

    ds.prepare(interdeps=idps, snapshot={})
    with pytest.raises(
        RuntimeError, match="Cannot prepare a dataset that is not pristine."
    ):
        ds.prepare(interdeps=idps, snapshot={})
コード例 #9
0
def test_write_metadata_to_explicit_db(empty_temp_db):
    default_db_location = qc.config["core"]["db_location"]
    extra_db_location = str(Path(default_db_location).parent / "extra.db")
    load_or_create_experiment(experiment_name="myexp", sample_name="mysample")
    load_or_create_experiment(
        conn=connect(extra_db_location), experiment_name="myexp", sample_name="mysample"
    )
    ds = DataSetInMem._create_new_run(name="foo")
    assert ds._parameters is None
    assert ds.path_to_db == default_db_location
    ds.export("netcdf")
    ds.write_metadata_to_db(path_to_db=extra_db_location)
    loaded_ds = load_by_guid(ds.guid, conn=connect(extra_db_location))

    ds.the_same_dataset_as(loaded_ds)
コード例 #10
0
def test_load_from_netcdf_legacy_version(non_created_db):
    # Qcodes 0.26 exported netcdf files did not contain
    # the parent dataset links and used a different engine to write data
    # check that it still loads correctly

    netcdf_file_path = (Path(__file__).parent / "fixtures" / "db_files" /
                        "netcdf" / "qcodes_v26.nc")

    if not os.path.exists(str(netcdf_file_path)):
        pytest.skip("No netcdf fixtures found.")

    ds = DataSetInMem._load_from_netcdf(netcdf_file_path)
    ds.write_metadata_to_db()
    loaded_ds = load_by_run_spec(captured_run_id=ds.captured_run_id)
    assert isinstance(loaded_ds, DataSetInMem)
    compare_datasets(ds, loaded_ds)
コード例 #11
0
def test_dataset_in_memory_reload_from_netcdf_complex(
        meas_with_registered_param_complex, DAC, complex_num_instrument,
        tmp_path):
    with meas_with_registered_param_complex.run(
            dataset_class=DataSetType.DataSetInMem) as datasaver:
        for set_v in np.linspace(0, 25, 10):
            DAC.ch1.set(set_v)
            get_v = complex_num_instrument.complex_num()
            datasaver.add_result((DAC.ch1, set_v),
                                 (complex_num_instrument.complex_num, get_v))

    ds = datasaver.dataset
    ds.add_metadata("mymetadatatag", 42)
    ds.add_metadata("someothermetadatatag", 42)
    ds.export(export_type="netcdf", path=str(tmp_path))

    assert isinstance(ds, DataSetInMem)
    loaded_ds = DataSetInMem._load_from_netcdf(
        tmp_path / f"qcodes_{ds.captured_run_id}_{ds.guid}.nc")
    assert isinstance(loaded_ds, DataSetInMem)
    compare_datasets(ds, loaded_ds)
コード例 #12
0
    def __enter__(self) -> DataSaver:
        # TODO: should user actions really precede the dataset?
        # first do whatever bootstrapping the user specified

        for func, args in self.enteractions:
            func(*args)

        dataset_class: Type[DataSetProtocol]

        # next set up the "datasaver"
        if self.experiment is not None:
            exp_id: Optional[int] = self.experiment.exp_id
            path_to_db: Optional[str] = self.experiment.path_to_db
            conn: Optional["ConnectionPlus"] = self.experiment.conn
        else:
            exp_id = None
            path_to_db = None
            conn = None

        if self._dataset_class is DataSetType.DataSet:
            self.ds = DataSet(
                name=self.name,
                exp_id=exp_id,
                conn=conn,
                in_memory_cache=self._in_memory_cache,
            )
        elif self._dataset_class is DataSetType.DataSetInMem:
            if self._in_memory_cache is False:
                raise RuntimeError("Cannot disable the in memory cache for a "
                                   "dataset that is only in memory.")
            self.ds = DataSetInMem._create_new_run(
                name=self.name,
                exp_id=exp_id,
                path_to_db=path_to_db,
            )
        else:
            raise RuntimeError("Does not support any other dataset classes")

        # .. and give the dataset a snapshot as metadata
        if self.station is None:
            station = Station.default
        else:
            station = self.station

        if station is not None:
            snapshot = station.snapshot()
        else:
            snapshot = {}

        self.ds.prepare(
            snapshot=snapshot,
            interdeps=self._interdependencies,
            write_in_background=self._write_in_background,
            shapes=self._shapes,
            parent_datasets=self._parent_datasets,
        )

        # register all subscribers
        if isinstance(self.ds, DataSet):
            for (callble, state) in self.subscribers:
                # We register with minimal waiting time.
                # That should make all subscribers be called when data is flushed
                # to the database
                log.debug(f"Subscribing callable {callble} with state {state}")
                self.ds.subscribe(callble,
                                  min_wait=0,
                                  min_count=1,
                                  state=state)

        print(f"Starting experimental run with id: {self.ds.captured_run_id}."
              f" {self._extra_log_info}")
        log.info(f"Starting measurement with guid: {self.ds.guid}, "
                 f'sample_name: "{self.ds.sample_name}", '
                 f'exp_name: "{self.ds.exp_name}", '
                 f'ds_name: "{self.ds.name}". '
                 f"{self._extra_log_info}")
        log.info(f"Using background writing: {self._write_in_background}")

        self.datasaver = DataSaver(dataset=self.ds,
                                   write_period=self.write_period,
                                   interdeps=self._interdependencies)

        return self.datasaver
コード例 #13
0
def test_no_interdeps_raises_in_prepare(experiment):
    ds = DataSetInMem._create_new_run(name="foo")
    with pytest.raises(RuntimeError, match="No parameters supplied"):
        ds.prepare(interdeps=InterDependencies_(), snapshot={})
コード例 #14
0
def test_load_from_non_existing_guid(experiment):
    guid = "This is not a guid"
    with pytest.raises(
        RuntimeError, match="Could not find the requested run with GUID"
    ):
        _ = DataSetInMem._load_from_db(conn=experiment.conn, guid=guid)