Exemple #1
0
def do_2d_sweep():
    # Create the DAQ object
    daq = Daq("Dev1", "testdaq")

    # Initialize the database you want to save data to
    try:
        experimentName = "testexp-2d_5"
        sampleName = "sampletest-2d"
        initialise_or_create_database_at(
            'C:\\Users\\erunb\\MeasureIt\\Databases\\testdatabase.db')
        qc.new_experiment(name=experimentName, sample_name=sampleName)
    except:
        print("Error opening database")
        daq.device.reset_device()
        daq.__del__()
        quit()

    # Create the sweep argument, tell it which channel to listen to
    # Sweep parameter arguments are:
    # [ <QCoDeS Parameter>, start value, stop value, step value ]
    in_sweep_params = [daq.submodules["ao0"].voltage, 0, 1, 0.1]
    out_sweep_params = [daq.submodules["ao1"].voltage, 0, 5, 1]
    freq = 1000
    # "measured" parameter
    param = daq.submodules["ai3"].voltage
    s = Sweep2D(in_sweep_params, out_sweep_params, freq, param)

    # Run the sweep automatically
    s.autorun()

    # Clean up the DAQ
    daq.__del__()
Exemple #2
0
def init_database(db, exp, samp, sweep=None):
    """
    Initializes a new database with exp and sample names and creates a new measurement if a sweep is set.
    
    Parameters
    ---------
    db:
        The desired path of the new database.
    exp:
        The experiment name.
    sample:
        The sample name.
    sweep=None:
        Optional weep object for creating new runs for existing sweeps
    """
    if '.db' not in db:
        db = f'{db}.db'

    if f'{os.environ["MeasureItHome"]}\\Databases\\' in db:
        initialise_or_create_database_at(db)
    else:
        initialise_or_create_database_at(os.environ['MeasureItHome'] +
                                         '\\Databases\\' + db)
    qc.new_experiment(exp, samp)

    if sweep is not None:
        sweep._create_measurement()
Exemple #3
0
    def set_database(self):
        """
        Changes the database for the next run. Pops out the next item in a list, if that
        is what was loaded, or keeps the same string.
        """
        # Grab the next database file name
        db = ""
        if isinstance(self.database, str):
            db = self.database
        elif isinstance(self.database, deque):
            db = self.database.popleft()

        # Grab the next sample name
        sample = ""
        if isinstance(self.sample_name, str):
            sample = self.sample_name
        elif isinstance(self.sample_name, deque):
            sample = self.sample_name.popleft()

        # Grab the next experiment name
        exp = ""
        if isinstance(self.exp_name, str):
            exp = self.exp_name
        elif isinstance(self.exp_name, deque):
            exp = self.exp_name.popleft()

        # Initialize the database
        try:
            initialise_or_create_database_at(
                'C:\\Users\\Nanouser\\Documents\\MeasureIt\\Databases\\' + db +
                '.db')
            qc.new_experiment(name=exp, sample_name=sample)
        except:
            print("Database info loaded incorrectly!")
Exemple #4
0
def test_foreground_twice(empty_temp_db_connection):
    new_experiment("test", "test1", conn=empty_temp_db_connection)
    ds1 = DataSet(conn=empty_temp_db_connection)
    ds1.mark_started(start_bg_writer=False)

    ds2 = DataSet(conn=empty_temp_db_connection)
    ds2.mark_started(start_bg_writer=False)
Exemple #5
0
def test_foreground_after_background_raises(empty_temp_db_connection):
    new_experiment("test", "test1", conn=empty_temp_db_connection)
    ds1 = DataSet(conn=empty_temp_db_connection)
    ds1.mark_started(start_bg_writer=True)

    ds2 = DataSet(conn=empty_temp_db_connection)
    with pytest.raises(RuntimeError, match="All datasets written"):
        ds2.mark_started(start_bg_writer=False)
Exemple #6
0
    def start(self):
        """
        Sets the database to the values given at initialization, then calls the callback function
        """

        initialise_or_create_database_at(self.db)
        qc.new_experiment(name=self.exp, sample_name=self.samp)
        if self.callback is not None and callable(self.callback):
            self.callback()
Exemple #7
0
def test_foreground_after_background_non_concurrent(empty_temp_db_connection):
    new_experiment("test", "test1", conn=empty_temp_db_connection)
    ds1 = DataSet(conn=empty_temp_db_connection)
    ds1.mark_started(start_bg_writer=True)
    ds1.mark_completed()

    ds2 = DataSet(conn=empty_temp_db_connection)
    ds2.mark_started(start_bg_writer=False)
    ds2.mark_completed()

    ds3 = DataSet(conn=empty_temp_db_connection)
    ds3.mark_started(start_bg_writer=True)
    ds3.mark_completed()
def load_experiment(sample_batch, sample_name):
    station = qc.Station.default
    # Set up folders, settings and logging for the experiment
    my_init(sample_batch + sample_name,
            station,
            pdf_folder=False,
            png_folder=False,
            analysis_folder=True,
            waveforms_folder=False,
            calib_config=False,
            annotate_image=False,
            mainfolder="D:/data/",
            display_pdf=False,
            display_individual_pdf=False,
            qubit_count=2)
    new_experiment(sample_batch, sample_name)
def load_or_create_experiment(experiment_name: str,
                              sample_name: str) -> Experiment:
    """
    Find and return an experiment with the given name and sample name,
    or create one if not found.

    Note: this function similar to pytopo.select_experiment().

    Args:
        experiment_name
            Name of the experiment to find or create
        sample_name
            Name of the sample

    Returns:
        The found or created experiment
    """
    try:
        experiment = qcodes.load_experiment_by_name(experiment_name,
                                                    sample_name)
    except ValueError as exception:
        if "Experiment not found" in str(exception):
            experiment = qcodes.new_experiment(experiment_name, sample_name)
        else:
            raise exception
    return experiment
Exemple #10
0
def test_add_experiments(empty_temp_db, experiment_name,
                         sample_name, dataset_name):
    global n_experiments
    n_experiments += 1

    _ = new_experiment(experiment_name, sample_name=sample_name)
    exps = experiments()
    assert len(exps) == n_experiments
    exp = exps[-1]
    assert exp.name == experiment_name
    assert exp.sample_name == sample_name
    assert exp.last_counter == 0

    dataset = new_data_set(dataset_name)
    dsid = dataset.run_id
    loaded_dataset = load_by_id(dsid)
    expected_ds_counter = 1
    assert loaded_dataset.name == dataset_name
    assert loaded_dataset.counter == expected_ds_counter
    assert loaded_dataset.table_name == "{}-{}-{}".format(dataset_name,
                                                          exp.exp_id,
                                                          loaded_dataset.counter)
    expected_ds_counter += 1
    dataset = new_data_set(dataset_name)
    dsid = dataset.run_id
    loaded_dataset = load_by_id(dsid)
    assert loaded_dataset.name == dataset_name
    assert loaded_dataset.counter == expected_ds_counter
    assert loaded_dataset.table_name == "{}-{}-{}".format(dataset_name,
                                                          exp.exp_id,
                                                          loaded_dataset.counter)
Exemple #11
0
def experiment_doubledots(empty_temp_db):
    e = new_experiment("test_experiment", sample_name="test_sample")
    populate_db_doubledots()
    try:
        yield e
    finally:
        e.conn.close()
Exemple #12
0
def do_1d_sweep(_min_v, _max_v, _step, _freq, _expName, _sampleName):
    # Create the DAQ object
    daq = Daq("Dev1", "testdaq")

    # Initialize the database you want to save data to
    try:
        experimentName = _expName
        sampleName = _sampleName
        initialise_or_create_database_at(
            'C:\\Users\\erunb\\MeasureIt\\Databases\\testdatabase.db')
        qc.new_experiment(name=experimentName, sample_name=sampleName)
    except:
        print("Error opening database")
        daq.device.reset_device()
        daq.__del__()
        quit()

    # Set our sweeping parameters
    min_v = _min_v
    max_v = _max_v
    step = _step
    freq = _freq

    # Create the sweep argument, tell it which channel to listen to
    s = Sweep1D(daq.submodules["ao0"].voltage,
                min_v,
                max_v,
                step,
                freq,
                bidirectional=True,
                meas=None,
                plot=True,
                auto_figs=True)
    s.follow_param(daq.submodules["ai3"].voltage)
    s._create_measurement((s.set_param))

    # Run the sweep automatically
    s.autorun()

    # Clean up the DAQ
    daq.__del__()

    # Show the experiment data
    ex = qc.dataset.experiment_container.load_experiment_by_name(
        experimentName, sampleName)
    fii = get_data_by_id(ex.data_sets()[0].run_id)
    print(fii)
Exemple #13
0
    def init_sweep(self):
        if self.daq is None:
            msg = QMessageBox()
            msg.setText("DAQ is not connected!")
            msg.setWindowTitle("Sweep Error")
            msg.setStandardButtons(QMessageBox.Close)
            msg.exec_()
            return

        self.v_start = _value_parser(self.min_v_val.text())
        self.v_end = _value_parser(self.max_v_val.text())

        try:
            self.v_step = float(self.step_v_val.text())
            if self.v_step > 0 and self.v_end < self.v_start:
                self.v_step = -1 * self.v_step
        except ValueError:
            self.v_step = (self.v_end - self.v_start) / 1000
        try:
            self.freq = float(self.steprate_val.text())
        except ValueError:
            self.freq = 10

        ichannel = "ai" + str(int(self.in_chan_box.currentText()))
        self.ochannel = "ao" + str(int(self.out_chan_box.currentText()))

        self.sweep_task = nidaqmx.Task()
        self.daq.submodules[self.ochannel].add_self_to_task(self.sweep_task)
        self.s = Sweep1D(self.daq.submodules[self.ochannel].voltage,
                         self.v_start, self.v_end, self.v_step, self.freq)
        self.s.follow_param(self.daq.submodules[ichannel].voltage)
        self.meas = self.s.get_measurement()

        self.counter = 0
        self.init_plot(self.daq.submodules[self.ochannel].voltage,
                       self.daq.submodules[ichannel].voltage)

        initialise_or_create_database_at(
            'C:\\Users\\erunb\\MeasureIt\\Databases\\testdb.db')
        qc.new_experiment(name='demotest3', sample_name='my best sample3')

        self.sweeping = True
        self.running = True
        self.curr_val = self.v_start
        self.run()
Exemple #14
0
    def setup_save(self):
        save_data_ui = SaveDataGUI(self, self.db, self.exp_name,
                                   self.sample_name)
        if save_data_ui.exec_():
            (self.db, self.exp_name,
             self.sample_name) = save_data_ui.get_save_info()

            try:
                initialise_or_create_database_at(self.db)
                qc.new_experiment(self.exp_name, self.sample_name)
                self.db_set = True
                return True
            except Exception as e:
                self.show_error('Error',
                                "Error opening up database. Try again.", e)
                return False
        else:
            return False
Exemple #15
0
def test_dataset_location(empty_temp_db_connection):
    """
    Test that an dataset and experiment points to the correct db file when
    a connection is supplied.
    """
    exp = new_experiment("test", "test1", conn=empty_temp_db_connection)
    ds = DataSet(conn=empty_temp_db_connection)
    assert path_to_dbfile(empty_temp_db_connection) == \
           empty_temp_db_connection.path_to_dbfile
    assert exp.path_to_db == empty_temp_db_connection.path_to_dbfile
    assert ds.path_to_db == empty_temp_db_connection.path_to_dbfile
def benchmark_add_results_vs_MAX_VARIABLE_NUMBER():
    filename = 'benchmark_add_results_vs_MAX_VARIABLE_NUMBER.png'
    if no_plots:
        return filename
    plt.figure()
    xr, yr = [], []

    mvn = qc.SQLiteSettings.limits['MAX_VARIABLE_NUMBER']
    for i in range(2, mvn, mvn // 50):
        ts = []
        for j in range(3):
            qc.SQLiteSettings.limits['MAX_VARIABLE_NUMBER'] = i
            new_experiment("profile", "profile")
            data_set = new_data_set("stress_test_simple")

            t1 = ParamSpec('t', 'numeric', label='time', unit='s')
            x = ParamSpec('x',
                          'numeric',
                          label='voltage',
                          unit='v',
                          depends_on=[t1])

            data_set.add_parameter(t1)
            data_set.add_parameter(x)
            insertion_size = 400 * 600
            t_values = np.linspace(-1, 1, insertion_size)
            results = [{"t": t, "x": 2 * t**2 + 1} for t in t_values]

            t1r = time.time()
            data_set.add_results(results)
            t = time.time() - t1r
            ts.append(t)
        xr.append(i)
        yr.append(mean(ts))

    plt.plot(xr, yr)
    plt.ylabel('execution time of data_set.add_results(result)')
    plt.xlabel('MAX_VARIABLE_NUMBER')
    plt.savefig(filename)
    return filename
Exemple #17
0
def second_third_experiment_labelled_data(second_empty_temp_db, tmp_path):

    e1 = new_experiment("test_experiment2", sample_name="test_sample")
    e2 = new_experiment("test_experiment3", sample_name="test_sample")

    for did in range(len(test_data_labels2)):
        ds = DataSet(os.path.join(tmp_path, "temp2.db"), exp_id=e2._exp_id)

        nt_metadata, current_label = generate_default_metadata()
        stage = test_data_labels2[did]
        if stage is not None:
            current_label[stage] = 1

        ds.add_metadata(nt.meta_tag, json.dumps(nt_metadata))
        ds.add_metadata("snapshot", json.dumps({}))
        for label, value in current_label.items():
            ds.add_metadata(label, value)
    try:
        yield e2
    finally:
        e1.conn.close()
        e2.conn.close()
Exemple #18
0
    def set_database(self):
        """
        Sets the loaded database information for each sweep before running.

        Database information must be previously loaded using the 'load_database_info'
        method. Creates path for database and begins a new QCoDeS experiment.
        """

        # Grab the next database file name
        if self.database is None:
            return

        db = ""
        if isinstance(self.database, str):
            db = self.database
        elif isinstance(self.database, deque):
            db = self.database.popleft()

        # Grab the next sample name
        sample = ""
        if isinstance(self.sample_name, str):
            sample = self.sample_name
        elif isinstance(self.sample_name, deque):
            sample = self.sample_name.popleft()

        # Grab the next experiment name
        exp = ""
        if isinstance(self.exp_name, str):
            exp = self.exp_name
        elif isinstance(self.exp_name, deque):
            exp = self.exp_name.popleft()

        # Initialize the database
        try:
            initialise_or_create_database_at(os.environ['MeasureItHome'] +
                                             '\\Databases\\' + db + '.db')
            qc.new_experiment(name=exp, sample_name=sample)
        except:
            print("Database info loaded incorrectly!")
Exemple #19
0
def experiment_partially_labelled(empty_temp_db, tmp_path):
    e = new_experiment("test_experiment", sample_name="test_sample")
    for did in range(len(test_data_labels)):
        ds = DataSet(os.path.join(tmp_path, "temp.db"))

        nt_metadata, current_label = generate_default_metadata()
        stage = test_data_labels[did]
        if stage is not None:
            current_label[stage] = 1
            for label, value in current_label.items():
                ds.add_metadata(label, value)

        ds.add_metadata(nt.meta_tag, json.dumps(nt_metadata))
        ds.add_metadata("snapshot", json.dumps({}))
    try:
        yield e
    finally:
        e.conn.close()
def test_nest():
    n_sample_points = 100
    x = ManualParameter("x")
    sweep_values_x = np.linspace(-1, 1, n_sample_points)

    y = ManualParameter("y")
    sweep_values_y = np.linspace(-1, 1, n_sample_points)

    m = ManualParameter("m")
    m.get = lambda: np.sin(x())

    n = ManualParameter("n")
    n.get = lambda: np.cos(x()) + 2 * np.sin(y())

    sweep_object = sweep(x, sweep_values_x)(m, sweep(y, sweep_values_y)(n))

    experiment = new_experiment("sweep_measure", sample_name="sine")
    station = Station()
    meas = SweepMeasurement(exp=experiment, station=station)
    meas.register_sweep(sweep_object)

    with meas.run() as datasaver:
        for data in sweep_object:
            datasaver.add_result(*data.items())

    data_set = datasaver._dataset
    assert data_set.paramspecs["x"].depends_on == ""
    assert data_set.paramspecs["y"].depends_on == ""
    assert data_set.paramspecs["m"].depends_on == "x"
    assert data_set.paramspecs["n"].depends_on == "x, y"

    data_x = data_set.get_data('x')
    data_y = data_set.get_data('y')

    assert data_x[::n_sample_points + 1] == [[xi] for xi in sweep_values_x]
    assert data_y[::n_sample_points + 1] == [[None] for _ in sweep_values_x]

    coordinate_layout = itertools.product(sweep_values_x, sweep_values_y)
    expected_x, expected_y = zip(*coordinate_layout)
    assert [ix for c, ix in enumerate(data_x)
            if c % (n_sample_points + 1)] == [[xi] for xi in expected_x]

    assert [iy for c, iy in enumerate(data_y)
            if c % (n_sample_points + 1)] == [[yi] for yi in expected_y]
Exemple #21
0
def experiment_ideal_run(empty_temp_db):
    e = new_experiment("test_tuning_run", sample_name="test_sample")
    for run_id in range(len(ideal_run_labels)):
        label = ideal_run_labels[run_id]
        if label == 'pinchoff':
            _ = save_1Ddata_with_qcodes(
                generate_pinchoff_data,
                generate_pinchoff_metadata,
            )
        elif label == 'doubledot':
            _ = save_2Ddata_with_qcodes(
                generate_doubledot_data,
                generate_doubledot_metadata,
            )
        else:
            raise NotImplementedError
    try:
        yield e
    finally:
        e.conn.close()
def test_inferred():

    x = ManualParameter("x", unit="V")

    @setter([("xmv", "mV")], inferred_parameters=[("x", "V")])
    def xsetter(milivolt_value):
        volt_value = milivolt_value / 1000.0  # From mV to V
        x.set(volt_value)
        return volt_value

    m = ManualParameter("m")
    m.get = lambda: np.sin(x())

    sweep_values = np.linspace(-1000, 1000, 100)  # We sweep in mV

    sweep_object = nest(sweep(xsetter, sweep_values), m)

    experiment = new_experiment("sweep_measure", sample_name="sine")
    station = Station()
    meas = SweepMeasurement(exp=experiment, station=station)
    meas.register_sweep(sweep_object)

    with meas.run() as datasaver:
        for data in sweep_object:
            datasaver.add_result(*data.items())

    data_set = datasaver._dataset
    assert data_set.paramspecs["x"].depends_on == ""
    assert data_set.paramspecs["x"].inferred_from == "xmv"
    assert data_set.paramspecs["xmv"].depends_on == ""
    assert data_set.paramspecs["m"].depends_on == "x"

    expected_xmv = [[xi] for xi in sweep_values]
    expected_x = [[xi / 1000] for xi in sweep_values]

    assert data_set.get_data('xmv') == expected_xmv
    assert data_set.get_data('x') == expected_x
def test_simple():
    x = ManualParameter("x")
    sweep_values = np.linspace(-1, 1, 100)

    m = ManualParameter("m")
    m.get = lambda: np.sin(x())

    sweep_object = nest(sweep(x, sweep_values), m)

    experiment = new_experiment("sweep_measure", sample_name="sine")
    station = Station()
    meas = SweepMeasurement(exp=experiment, station=station)
    meas.register_sweep(sweep_object)

    with meas.run() as datasaver:
        for data in sweep_object:
            datasaver.add_result(*data.items())

    data_set = datasaver._dataset
    assert data_set.paramspecs["x"].depends_on == ""
    assert data_set.paramspecs["m"].depends_on == "x"

    expected_x = [[xi] for xi in sweep_values]
    assert data_set.get_data('x') == expected_x
Exemple #24
0
def experiment(empty_temp_db):
    e = qc.new_experiment("test-experiment", sample_name="test-sample")
    yield e
    e.conn.close()
Exemple #25
0
def test_update_existing_guids(caplog):

    old_loc = 101
    old_ws = 1200

    new_loc = 232
    new_ws = 52123

    # prepare five runs with different location and work station codes

    with location_and_station_set_to(0, 0):
        new_experiment('test', sample_name='test_sample')

        ds1 = new_data_set('ds_one')
        xparam = ParamSpec('x', 'numeric')
        ds1.add_parameter(xparam)
        ds1.add_result({'x': 1})

        ds2 = new_data_set('ds_two')
        ds2.add_parameter(xparam)
        ds2.add_result({'x': 2})

        guid_comps_1 = parse_guid(ds1.guid)
        assert guid_comps_1['location'] == 0
        assert guid_comps_1['work_station'] == 0

        guid_comps_2 = parse_guid(ds2.guid)
        assert guid_comps_2['location'] == 0
        assert guid_comps_2['work_station'] == 0

    with location_and_station_set_to(0, old_ws):
        ds3 = new_data_set('ds_three')
        xparam = ParamSpec('x', 'numeric')
        ds3.add_parameter(xparam)
        ds3.add_result({'x': 3})

    with location_and_station_set_to(old_loc, 0):
        ds4 = new_data_set('ds_four')
        xparam = ParamSpec('x', 'numeric')
        ds4.add_parameter(xparam)
        ds4.add_result({'x': 4})

    with location_and_station_set_to(old_loc, old_ws):
        ds5 = new_data_set('ds_five')
        xparam = ParamSpec('x', 'numeric')
        ds5.add_parameter(xparam)
        ds5.add_result({'x': 5})

    with location_and_station_set_to(new_loc, new_ws):

        caplog.clear()
        expected_levels = [
            'INFO', 'INFO', 'INFO', 'INFO', 'INFO', 'INFO', 'WARNING', 'INFO',
            'WARNING', 'INFO', 'INFO'
        ]

        with caplog.at_level(logging.INFO):
            update_GUIDs(ds1.conn)

            for record, lvl in zip(caplog.records, expected_levels):
                assert record.levelname == lvl

        guid_comps_1 = parse_guid(ds1.guid)
        assert guid_comps_1['location'] == new_loc
        assert guid_comps_1['work_station'] == new_ws

        guid_comps_2 = parse_guid(ds2.guid)
        assert guid_comps_2['location'] == new_loc
        assert guid_comps_2['work_station'] == new_ws

        guid_comps_3 = parse_guid(ds3.guid)
        assert guid_comps_3['location'] == 0
        assert guid_comps_3['work_station'] == old_ws

        guid_comps_4 = parse_guid(ds4.guid)
        assert guid_comps_4['location'] == old_loc
        assert guid_comps_4['work_station'] == 0

        guid_comps_5 = parse_guid(ds5.guid)
        assert guid_comps_5['location'] == old_loc
        assert guid_comps_5['work_station'] == old_ws
def test_perform_actual_upgrade_6_to_newest_add_new_data():
    """
    Insert new runs on top of existing runs upgraded and verify that they
    get the correct captured_run_id and captured_counter
    """
    from qcodes.dataset.measurements import Measurement
    from qcodes.instrument.parameter import Parameter
    import numpy as np

    fixpath = os.path.join(fixturepath, 'db_files', 'version6')

    db_file = 'some_runs.db'
    dbname_old = os.path.join(fixpath, db_file)

    if not os.path.exists(dbname_old):
        pytest.skip("No db-file fixtures found. You can generate test db-files"
                    " using the scripts in the "
                    "https://github.com/QCoDeS/qcodes_generate_test_db/ repo")

    with temporarily_copied_DB(dbname_old, debug=False, version=6) as conn:
        assert isinstance(conn, ConnectionPlus)
        perform_db_upgrade(conn)
        assert get_user_version(conn) >= 7
        no_of_runs_query = "SELECT max(run_id) FROM runs"
        no_of_runs = one(
            atomic_transaction(conn, no_of_runs_query), 'max(run_id)')

        # Now let's insert new runs and ensure that they also get
        # captured_run_id assigned.
        params = []
        for n in range(5):
            params.append(Parameter(f'p{n}', label=f'Parameter {n}',
                                    unit=f'unit {n}', set_cmd=None,
                                    get_cmd=None))

        # Set up an experiment
        exp = new_experiment('some-exp', 'some-sample', conn=conn)
        meas = Measurement(exp=exp)
        meas.register_parameter(params[0])
        meas.register_parameter(params[1])
        meas.register_parameter(params[2], basis=(params[0],))
        meas.register_parameter(params[3], basis=(params[1],))
        meas.register_parameter(params[4], setpoints=(params[2], params[3]))

        # Make a number of identical runs
        for _ in range(10):
            with meas.run() as datasaver:
                for x in np.random.rand(10):
                    for y in np.random.rand(10):
                        z = np.random.rand()
                        datasaver.add_result((params[0], 0),
                                             (params[1], 1),
                                             (params[2], x),
                                             (params[3], y),
                                             (params[4], z))

        no_of_runs_new = one(
            atomic_transaction(conn, no_of_runs_query), 'max(run_id)')
        assert no_of_runs_new == 20

        # check that run_id is equivalent to captured_run_id for new
        # runs
        for run_id in range(no_of_runs, no_of_runs_new + 1):
            ds1 = load_by_id(run_id, conn)
            ds2 = load_by_run_spec(captured_run_id=run_id, conn=conn)

            assert ds1.the_same_dataset_as(ds2)

            assert ds1.run_id == run_id
            assert ds1.run_id == ds1.captured_run_id
            assert ds2.run_id == run_id
            assert ds2.run_id == ds2.captured_run_id

        # we are creating a new experiment into a db with one exp so:
        exp_id = 2

        # check that counter is equivalent to captured_counter for new
        # runs
        for counter in range(1, no_of_runs_new - no_of_runs + 1):
            ds1 = load_by_counter(counter, exp_id, conn)
            # giving only the counter is not unique since we have 2 experiments
            with pytest.raises(NameError, match="More than one"
                                                " matching dataset"):
                load_by_run_spec(captured_counter=counter, conn=conn)
            # however we can supply counter and experiment
            ds2 = load_by_run_spec(captured_counter=counter,
                                   experiment_name='some-exp',
                                   conn=conn)

            assert ds1.the_same_dataset_as(ds2)
            assert ds1.counter == counter
            assert ds1.counter == ds1.captured_counter
            assert ds2.counter == counter
            assert ds2.counter == ds2.captured_counter
from qcodes import Monitor
from qcodes import ManualParameter
from qdev_wrappers.file_setup import CURRENT_EXPERIMENT, my_init, close_station
from qdev_wrappers.station_configurator import StationConfigurator
from qcodes.dataset.plotting import plot_by_id
from qcodes.dataset.data_set import load_by_id
from qcodes.dataset.data_export import get_data_by_id
from qcodes import new_experiment, Parameter
from qcodes.dataset.measurements import Measurement
from qdev_wrappers.sweep_functions import do0d, do1d, do2d
from qcodes import ParamSpec, new_data_set
from qdev_wrappers.export import export_by_id
from local_instruments.qdev_fitter_QT1 import qdev_fitter
​
if FIRST_INITIALISATION:
    exp = new_experiment(EXPERIMENT_NAME, sample_name=SAMPLE_NAME)
​
scriptfolder = qc.config.user.scriptfolder
sys.path.append(scriptfolder)
​
mpl.rcParams['figure.subplot.bottom'] = 0.15
mpl.rcParams['font.size'] = 10
mpl.rcParams['image.cmap'] = 'hot'
​
if __name__ == '__main__':
​
    if qc.Station.default:
        close_station(qc.Station.default)
​
    STATION = qc.Station()
​
def experiment(empty_temp_db):
    e = new_experiment("test-experiment", sample_name="test-sample")
    try:
        yield e
    finally:
        e.conn.close()
Exemple #29
0
    def save_segmented_data_return_info(
        self,
        segment_db_name: str,
        segment_db_folder: Optional[str] = None,
    ) -> Dict[int, Dict[str, Dict[str, Tuple[float, float]]]]:
        """
        Save each mesh in a new dataset in given databases

        returns:
        segment_info = {
            data_id: {
                readout_method: {'range_x': (),
                                 'range_y': ()
                        }
                    }
        }
        """
        if segment_db_folder is None:
            segment_db_folder = nt.config["db_folder"]

        if not self.segmented_data:
            self.prepare_segmented_data(use_raw_data=True)
        if not os.path.isfile(os.path.join(segment_db_folder, segment_db_name)):
            ds = load_by_id(self.qc_run_id)
            nt.new_database(segment_db_name, db_folder=segment_db_folder)
            qc.new_experiment(f'segmented_{ds.exp_name}',
                              sample_name=ds.sample_name)


        original_params = self.qc_parameters
        segment_info: Dict[int, Dict[str, Dict[str, Tuple[float, float]]]] = {}

        with nt.switch_database(segment_db_name, segment_db_folder):
            for segment in self.segmented_data:
                meas = Measurement()
                meas.register_custom_parameter(
                    original_params[0].name,
                    label=original_params[0].label,
                    unit=original_params[0].unit,
                    paramtype="array",
                )

                meas.register_custom_parameter(
                    original_params[1].name,
                    label=original_params[1].label,
                    unit=original_params[1].unit,
                    paramtype="array",
                )
                result: List[List[Tuple[str, np.ndarray]]] = []
                ranges: Dict[str, Dict[str, Tuple[float, float]]] = {}
                m_params = [str(it) for it in list(segment.data_vars)]
                for ip, param_name in enumerate(m_params):
                    coord_names = list(segment.coords)
                    x_crd_name = coord_names[0]
                    y_crd_name = coord_names[1]

                    voltage_x = segment[param_name][x_crd_name].values
                    voltage_y = segment[param_name][y_crd_name].values
                    signal = segment[param_name].values

                    range_x = (np.min(voltage_x), np.max(voltage_x))
                    range_y = (np.min(voltage_y), np.max(voltage_y))
                    ranges[param_name] = {}
                    ranges[param_name]["range_x"] = range_x
                    ranges[param_name]["range_y"] = range_y

                    setpoints = self.raw_data[param_name].depends_on
                    meas.register_custom_parameter(
                        original_params[ip+2].name,
                        label=original_params[ip+2].label,
                        unit=original_params[1].unit,
                        paramtype="array",
                        setpoints=setpoints,
                    )
                    v_x_grid, v_y_grid = np.meshgrid(voltage_x, voltage_y)

                    result.append([(setpoints[0], v_x_grid),
                                    (setpoints[1], v_y_grid),
                                    (param_name, signal.T)])

                with meas.run() as datasaver:
                    for r_i in range(len(self.readout_methods)):
                        datasaver.add_result(*result[r_i])

                    datasaver.dataset.add_metadata(
                        "snapshot", json.dumps(self.snapshot)
                        )
                    datasaver.dataset.add_metadata(
                        nt.meta_tag, json.dumps(self.nt_metadata)
                    )
                    datasaver.dataset.add_metadata(
                        "original_guid", json.dumps(self.guid)
                        )
                    logger.debug(
                        "New dataset created and populated.\n"
                        + "database: "
                        + str(segment_db_name)
                        + "ID: "
                        + str(datasaver.run_id)
                    )
                    segment_info[datasaver.run_id] = ranges

        return segment_info
Exemple #30
0
def set_experiment_sample_names(sweep, exp, samp):
    """ Creates a new measurement with desired experiment and sample names. """

    if sweep.save_data is True:
        qc.new_experiment(exp, samp)
    sweep._create_measurement()