コード例 #1
0
def update_graph(clicks, selected_runs, selected_type, extra_runs, normalised,
                 data, selected_experiment):
    if clicks is None:
        raise PreventUpdate
    if selected_experiment is None:
        if selected_type is None:
            return None, None, None, None
    if selected_runs == []:
        return dash.no_update, 'No Runs Selected!', dash.no_update, dash.no_update
    if selected_runs is None:
        return dash.no_update, 'No Runs Selected!', dash.no_update, dash.no_update
    if selected_type is None:
        return dash.no_update, 'No Data Type Selected!', dash.no_update, dash.no_update
    for a in selected_runs:
        params = str(load_by_id(a).parameters).split(',')
        for b in dataNeeded[selected_type]:
            if (b in params) is False:
                return dash.no_update, 'Wrong Data Type!', dash.no_update, dash.no_update
    total_runs = selected_runs
    if extra_runs != None:
        for c in extra_runs:
            params_2 = str(load_by_id(c).parameters).split(',')
            for d in dataNeeded[selected_type]:
                if (d in params_2) is False:
                    return dash.no_update, dash.no_update, 'Wrong Run!', dash.no_update
        total_runs = total_runs + extra_runs
    totaldata = plot_data(selected_type, total_runs, normalised)
    data['run'] = selected_runs
    data['type'] = selected_type
    data['extra-run'] = extra_runs
    data['normalise'] = normalised
    data['total-run'] = total_runs
    data['graph'] = totaldata
    return totaldata, None, None, data
コード例 #2
0
def test_load_by_id():
    ds = new_data_set("test-dataset")
    run_id = ds.run_id
    ds.mark_started()
    ds.mark_completed()

    loaded_ds = load_by_id(run_id)
    assert ds.started is True
    assert ds.pristine is False
    assert ds.running is False
    assert loaded_ds.completed is True
    assert loaded_ds.exp_id == 1

    ds = new_data_set("test-dataset-unfinished")
    run_id = ds.run_id

    loaded_ds = load_by_id(run_id)
    assert ds.pristine is True
    assert ds.running is False
    assert ds.started is False
    assert loaded_ds.completed is False
    assert loaded_ds.exp_id == 1

    # let's take a run number that is not in the temporary test database file
    non_existing_run_id = run_id + 1
    with pytest.raises(ValueError,
                       match=f"Run with run_id "
                       f"{non_existing_run_id} does not "
                       f"exist in the database"):
        _ = load_by_id(non_existing_run_id)
コード例 #3
0
def update_graph_live(n, tab, data, a_type, normalise, l_data):
    if tab != 'tab-2':
        raise PreventUpdate
    if a_type is None:
        raise PreventUpdate
    if l_data is None:
        raise PreventUpdate
    live_run = exc.load_last_experiment().last_data_set().run_id
    if data != None:
        runs = data['runs']
        runs.append(live_run)
    else:
        runs = [live_run]
    if l_data != None:
        for a in runs:
            params = str(load_by_id(a).parameters).split(',')
            for b in dataNeeded[l_data['type']]:
                if (b in params) is False:
                    return dash.no_update, 'Wrong Data Type!'
        graph = plot_data(l_data['type'], runs, l_data['normalise'])
    else:
        for a in runs:
            params = str(load_by_id(a).parameters).split(',')
            for b in dataNeeded[a_type]:
                if (b in params) is False:
                    return dash.no_update, 'Wrong Data Type!'
        graph = plot_data(a_type, runs, normalise)
    return graph, None
コード例 #4
0
ファイル: metadata.py プロジェクト: lpetitTUD/Qcodes
def diff_param_values_by_id(left_id : RunId, right_id : RunId):
    """
    Given the IDs of two datasets, returns the differences between
    parameter values in each of their snapshots.
    """
    # Local import to reduce load time and
    # avoid circular references.
    from qcodes.dataset.data_set import load_by_id
    return diff_param_values(
        load_by_id(left_id).snapshot,
        load_by_id(right_id).snapshot
    )
コード例 #5
0
def test_load_by_id():
    ds = new_data_set("test-dataset")
    run_id = ds.run_id
    ds.mark_complete()

    loaded_ds = load_by_id(run_id)
    assert loaded_ds.completed is True
    assert loaded_ds.exp_id == 1

    ds = new_data_set("test-dataset-unfinished")
    run_id = ds.run_id

    loaded_ds = load_by_id(run_id)
    assert loaded_ds.completed is False
    assert loaded_ds.exp_id == 1
コード例 #6
0
ファイル: data_export.py プロジェクト: qnems/Qcodes
def get_data_by_id(run_id: int) -> List:
    """
    Load data from database and reshapes into 1D arrays with minimal
    name, unit and label metadata.
    """

    data = load_by_id(run_id)
    conn = data.conn
    deps = get_dependents(conn, run_id)
    output = []
    for dep in deps:

        dependencies = get_dependencies(conn, dep)
        data_axis = get_layout(conn, dep)
        rawdata = data.get_values(data_axis['name'])
        data_axis['data'] = flatten_1D_data_for_plot(rawdata)
        raw_setpoint_data = data.get_setpoints(data_axis['name'])
        my_output = []

        for i, dependency in enumerate(dependencies):
            axis = get_layout(conn, dependency[0])
            axis['data'] = flatten_1D_data_for_plot(raw_setpoint_data[i])
            my_output.append(axis)

        my_output.append(data_axis)
        output.append(my_output)
    return output
コード例 #7
0
def calculate_tunneling_rate(runids, station=None, exp=None, plot=False):
    meas = Measurement(exp=exp, station=station)
    meas.register_parameter(station.n6705b.VTUN, )
    meas.register_custom_parameter("Tunneling_Gradient",
                                   setpoints=(station.n6705b.VTUN, ),
                                   unit="V/S")
    meas.register_custom_parameter("runid")

    grads = []
    tun_vs = []
    for id in runids:
        data = load_by_id(id)
        sweep_data = data.get_parameter_data()
        x = sweep_data['dmm_VOUT']['time']
        y = sweep_data['dmm_VOUT']['dmm_VOUT']
        tun_v = sweep_data['n6705b_VTUN']['n6705b_VTUN'][0]
        # y = mx + b
        m, b = polyfit(x, y, 1)
        grads.append(m)
        tun_vs.append(tun_v)
        if plot:
            plt.figure()
            plt.plot(x, y)
            y2 = x * m + b
            plt.plot(x, y2)
            plt.ylabel("vout [V]")
            plt.xlabel("time [s]")
            plt.show()

    with meas.run() as datasaver:
        for m, tv, run in zip(grads, tun_vs, runids):
            datasaver.add_result((station.n6705b.VTUN, tv),
                                 ("Tunneling_Gradient", m), ("runid", run))
コード例 #8
0
ファイル: plotting.py プロジェクト: ectof/Qcodes
def plot_by_id(run_id: int,
               axes: Optional[Union[matplotlib.axes.Axes,
                              Sequence[matplotlib.axes.Axes]]] = None,
               colorbars: Optional[Union[matplotlib.colorbar.Colorbar,
                                   Sequence[
                                       matplotlib.colorbar.Colorbar]]] = None,
               rescale_axes: bool = True,
               auto_color_scale: Optional[bool] = None,
               cutoff_percentile: Optional[Union[Tuple[Number, Number],
                                                 Number]] = None,
               complex_plot_type: str = 'real_and_imag',
               complex_plot_phase: str = 'radians',
               **kwargs) -> AxesTupleList:
    """
    Construct all plots for a given `run_id`. All other arguments are forwarded
    to :func:`.plot_dataset`, see this for more details.
    """

    dataset = load_by_id(run_id)
    return plot_dataset(dataset,
                        axes,
                        colorbars,
                        rescale_axes,
                        auto_color_scale,
                        cutoff_percentile,
                        complex_plot_type,
                        complex_plot_phase,
                        **kwargs)
コード例 #9
0
ファイル: quick-sweeps.py プロジェクト: QNLSydney/FiveDot
def leakage_sweep(voltage_out,
                  dmm,
                  to=1e-3,
                  nump=101,
                  wt=0.1,
                  ithaco_impedance=20):
    id, plot = qcm.linear1d(voltage_out.voltage,
                            0,
                            to,
                            nump,
                            wt,
                            dmm.ithaco_current,
                            setback=True)
    data = load_by_id(id)
    plot.close()

    yoko_param = voltage_out.voltage.full_name
    dmm_param = dmm.ithaco_current.full_name

    setpoints = np.array(data.get_data(yoko_param)).T[0]
    values = np.array(data.get_data(dmm_param)).T[0]
    fit, res, _, _, _ = np.polyfit(values, setpoints, 1, full=True)
    fit[0] = abs(fit[0])
    print("Resistance was: {}Ohms".format(
        si_prefix.si_format(fit[0] - ithaco_impedance, precision=3)))
    print("Residuals were: {}".format(res))

    return fit, res
コード例 #10
0
def test_load_by_X_functions(two_empty_temp_db_connections, some_interdeps):
    """
    Test some different loading functions
    """
    source_conn, target_conn = two_empty_temp_db_connections

    source_path = path_to_dbfile(source_conn)
    target_path = path_to_dbfile(target_conn)

    source_exp1 = Experiment(conn=source_conn)
    source_ds_1_1 = DataSet(conn=source_conn, exp_id=source_exp1.exp_id)

    source_exp2 = Experiment(conn=source_conn)
    source_ds_2_1 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id)

    source_ds_2_2 = DataSet(conn=source_conn,
                            exp_id=source_exp2.exp_id,
                            name="customname")

    for ds in (source_ds_1_1, source_ds_2_1, source_ds_2_2):
        ds.set_interdependencies(some_interdeps[1])
        ds.mark_started()
        ds.add_result({name: 0.0 for name in some_interdeps[1].names})
        ds.mark_completed()

    extract_runs_into_db(source_path, target_path, source_ds_2_2.run_id)

    test_ds = load_by_guid(source_ds_2_2.guid, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)

    test_ds = load_by_id(1, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)

    test_ds = load_by_counter(1, 1, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)
コード例 #11
0
def plot_landau_fan():
    dataset = 1035

    dataset = load_by_id(dataset)
    param_data = dataset.get_parameter_data("R_xx", "R_xy")
    voltage_data = param_data["R_xx"]["yoko_voltage"]
    field_data = param_data["R_xx"]["mag_GRPZ_field"]
    R_xx_data = param_data["R_xx"]["R_xx"]
    R_xy_data = param_data["R_xy"]["R_xy"]

    # Reshape data
    lvoltage_data, voltage_data = np.array(detect_cycle(voltage_data))
    field_data = np.array(field_data[::lvoltage_data])
    y, x = np.mgrid[slice(voltage_data[0], voltage_data[-1], complex(0, lvoltage_data+1)),
                    slice(field_data[0], field_data[-1], complex(0, field_data.size))]
    R_xx_data = R_xx_data.reshape((field_data.size, lvoltage_data)).T
    R_xy_data = R_xy_data.reshape((field_data.size, lvoltage_data)).T
    vk_const = const.value("von Klitzing constant")

    fig, ax1 = plt.subplots()
    im = ax1.pcolormesh(y, x, np.log10(R_xx_data), vmin=1, vmax=5, cmap="inferno", rasterized=True)
    fig.colorbar(im)
    ax1.set_xlabel(r"Gate Voltage $\left(\si{\volt}\right)$")
    ax1.set_ylabel(r"$\textrm{B}_{\perp}$ $\left(\si{\tesla}\right)$")
    fig.tight_layout()
    fig.show()

    fig, ax1 = plt.subplots()
    ax1.pcolormesh(y, x, vk_const/R_xy_data, vmin=2, vmax=32, cmap="cividis_r")

    ax1.set_xlabel(r"Gate Voltage $\left(\si{\volt}\right)$")
    ax1.set_ylabel(r"$\textrm{B}_{\perp}$ $\left(\si{\tesla}\right)$")
    fig.tight_layout()
    fig.show()
コード例 #12
0
 def data_sets(self) -> List[DataSet]:
     """Get all the datasets of this experiment"""
     runs = get_runs(self.conn, self.exp_id)
     data_sets = []
     for run in runs:
         data_sets.append(load_by_id(run['run_id'], conn=self.conn))
     return data_sets
コード例 #13
0
def get_snapshot(data_id):
    """
    Get a snapshot from the dataset indexed by data_id
    """
    data = load_by_id(data_id)
    snap_str = data.get_metadata('snapshot')
    snap = json.loads(snap_str)
    return snap
コード例 #14
0
def load_dataset_from(path: str, run_id: int) -> 'DataSet':
    """
    Loads ``DataSet`` with the given ``run_id`` from a database file that
    is located in in the given ``path``.

    Note that after the call to this function, the database location in the
    qcodes config of the current python process is changed to ``path``.
    """
    initialise_or_create_database_at(path)
    return load_by_id(run_id=run_id)
コード例 #15
0
ファイル: metadata.py プロジェクト: tinix84/Qcodes
def diff_param_values_by_id(left_id: RunId, right_id: RunId) -> ParameterDiff:
    """
    Given the IDs of two datasets, returns the differences between
    parameter values in each of their snapshots.
    """
    # Local import to reduce load time and
    # avoid circular references.
    from qcodes.dataset.data_set import load_by_id

    left_snapshot = load_by_id(left_id).snapshot
    right_snapshot = load_by_id(right_id).snapshot

    if left_snapshot is None or right_snapshot is None:
        if left_snapshot is None:
            empty = left_id
        else:
            empty = right_id
        raise RuntimeError(f"Tried to compare two snapshots"
                           f"but the snapshot of {empty} "
                           f"is empty.")

    return diff_param_values(left_snapshot, right_snapshot)
コード例 #16
0
def test_perform_actual_upgrade_6_to_7():

    fixpath = os.path.join(fixturepath, 'db_files', 'version6')

    db_file = 'some_runs.db'
    dbname_old = os.path.join(fixpath, db_file)

    if not os.path.exists(dbname_old):
        pytest.skip("No db-file fixtures found. You can generate test db-files"
                    " using the scripts in the "
                    "https://github.com/QCoDeS/qcodes_generate_test_db/ repo")

    with temporarily_copied_DB(dbname_old, debug=False, version=6) as conn:
        assert isinstance(conn, ConnectionPlus)
        perform_db_upgrade_6_to_7(conn)
        assert get_user_version(conn) == 7

        no_of_runs_query = "SELECT max(run_id) FROM runs"
        no_of_runs = one(atomic_transaction(conn, no_of_runs_query),
                         'max(run_id)')
        assert no_of_runs == 10

        columns = atomic_transaction(conn,
                                     "PRAGMA table_info(runs)").fetchall()
        col_names = [col['name'] for col in columns]

        assert 'captured_run_id' in col_names
        assert 'captured_counter' in col_names

        for run_id in range(1, no_of_runs + 1):
            ds1 = load_by_id(run_id, conn)
            ds2 = load_by_run_spec(captured_run_id=run_id, conn=conn)

            assert ds1.the_same_dataset_as(ds2)

            assert ds1.run_id == run_id
            assert ds1.run_id == ds1.captured_run_id
            assert ds2.run_id == run_id
            assert ds2.run_id == ds2.captured_run_id

        exp_id = 1
        for counter in range(1, no_of_runs + 1):
            ds1 = load_by_counter(counter, exp_id, conn)
            ds2 = load_by_run_spec(captured_counter=counter, conn=conn)

            assert ds1.the_same_dataset_as(ds2)
            assert ds1.counter == counter
            assert ds1.counter == ds1.captured_counter
            assert ds2.counter == counter
            assert ds2.counter == ds2.captured_counter
コード例 #17
0
def export_by_id(run_id):
    '''Export CSV files with raw data of a measurement run.'''
    dataset = load_by_id(run_id)
    data = []
    headers = []
    for parameter_name, specs in dataset.paramspecs.items():
        parameter_data = np.ravel(dataset.get_data(parameter_name))
        data.append(parameter_data)
        parameter_header = '{} ({})'.format(specs.label, specs.unit)
        headers.append(parameter_header)
    data = np.vstack(data).T
    header = ' '.join(headers)
    filename = get_export_path(run_id, dataset.exp_id)
    np.savetxt(filename, data, header=header)
コード例 #18
0
def plot_hall_sweep_hmob():
    datasets = (1054, 1055, 1056, 1057, 1058)

    field_data = np.zeros(0)
    R_xx_data = np.zeros(0)
    R_xy_data = np.zeros(0)

    for dataset in datasets:
        dataset = load_by_id(dataset)
        param_data = dataset.get_parameter_data("R_xx", "R_xy")
        field_data = np.append(field_data, param_data["R_xx"]["mag_GRPZ_field"])
        R_xx_data = np.append(R_xx_data, param_data["R_xx"]["R_xx"])
        R_xy_data = np.append(R_xy_data, param_data["R_xy"]["R_xy"])

    max_x, min_x = np.where(field_data > 7.2)[0][0], np.where(field_data > 6.6)[0][0]
    print(min_x, max_x)
    print(f"Rho_XY (nu = 6) = {np.average(R_xy_data[min_x:max_x])}")
    smallfield = np.where(field_data > 0.05)[0][0]
    print(smallfield)

    fig, ax1 = plt.subplots()
    ax2 = ax1.twinx()
    ax1.plot(field_data, R_xy_data, 'b-', label="Rxy")
    ax2.plot(field_data, R_xx_data, 'r-', label="Rxx")

    # Plot plateaus
    vk_const = const.value("von Klitzing constant")
    for i in range(6, 12+1, 2):
        ax1.axhline(vk_const/i)
        print(f"At (nu = {i}): {vk_const/i:.2f}")

    # Plot theoretical locations
    density = np.polyfit(field_data[:500], R_xy_data[:500], 1)
    print(f"Fitting to field: {field_data[500]}.")
    density = 1/(const.e * density[0])
    print(f"Extracted Density: {density*1e-4:e} cm^2/Vs")
    filling_factors = (2*const.pi*density*const.hbar)/(const.e)
    for i in range(6, 12+1, 2):
        ax1.axvline(filling_factors/i)
    print(R_xx_data[11])
    sq = 1 / 5 # Width / length
    mu = 1/(np.average(R_xx_data[11-2:11+3]) * sq * const.e * density * 1e-4)
    print(f"Extracted Mobility: {mu}")

    fig.legend()
    ax1.set_xlabel(r"Field $\left(\si{\tesla}\right)$")
    ax1.set_ylabel(r"$\textrm{R}_{xy}$ $\left(\si{\ohm}\right)$")
    ax2.set_ylabel(r"$\textrm{R}_{xx}$ $\left(\si{\ohm}\right)$")
    fig.tight_layout()
    fig.show()
コード例 #19
0
    def __init__(self, *args, id, refresh=1):

        super().__init__(*args)

        self.id = id

        alldata = get_data_by_id(self.id)
        self.nplots = len(alldata)

        # initiate plots
        for i, data in enumerate(alldata):

            x = data[0]
            y = data[1]

            if len(data) == 2:  # 1D PLOTTING

                po = self._add_1d(i, x, y)

            elif len(data) == 3:  # 2D PLOTTING

                z = data[2]

                po = self._add_2d(i, x, y, z)

        # add subscriber
        #dataset = load_by_id(self.id)
        #dataset.subscribe(self.update_from_subscriber, min_wait=0, min_count=1, state=[])

        dataset = load_by_id(self.id)
        self.completed = dataset.completed

        while not self.completed:
            dataset = load_by_id(self.id)
            self.completed = dataset.completed
            self.update_plots()
            time.sleep(refresh)
コード例 #20
0
def test_load_by_X_functions(two_empty_temp_db_connections,
                             some_interdeps):
    """
    Test some different loading functions
    """
    source_conn, target_conn = two_empty_temp_db_connections

    source_path = path_to_dbfile(source_conn)
    target_path = path_to_dbfile(target_conn)

    source_exp1 = Experiment(conn=source_conn)
    source_ds_1_1 = DataSet(conn=source_conn, exp_id=source_exp1.exp_id)

    source_exp2 = Experiment(conn=source_conn)
    source_ds_2_1 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id)

    source_ds_2_2 = DataSet(conn=source_conn,
                            exp_id=source_exp2.exp_id,
                            name="customname")

    for ds in (source_ds_1_1, source_ds_2_1, source_ds_2_2):
        ds.set_interdependencies(some_interdeps[1])
        ds.mark_started()
        ds.add_results([{name: 0.0 for name in some_interdeps[1].names}])
        ds.mark_completed()

    extract_runs_into_db(source_path, target_path, source_ds_2_2.run_id)
    extract_runs_into_db(source_path, target_path, source_ds_2_1.run_id)
    extract_runs_into_db(source_path, target_path, source_ds_1_1.run_id)

    test_ds = load_by_guid(source_ds_2_2.guid, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)

    test_ds = load_by_id(1, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)

    test_ds = load_by_run_spec(captured_run_id=source_ds_2_2.captured_run_id,
                               conn=target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)

    assert source_exp2.exp_id == 2

    # this is now the first run in the db so run_id is 1
    target_run_id = 1
    # and the experiment ids will be interchanged.
    target_exp_id = 1

    test_ds = load_by_counter(target_run_id, target_exp_id, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)
コード例 #21
0
def comparator_sweep(exp, station, fn_switch, voltages, values=None):
    """
    open loop sweep of vref
    """
    # Set the switch state to 'open'
    if not callable(fn_switch):
        raise ValueError("Expecting Switch Function")
    fn_switch("open")

    deps = [station.dmm.VOUT]
    indeps = [
        station.n6705b.VBUS, station.n6705b.VFEEDBACK, station.n6705b.VREF,
        station.n6705b.VTUN, station.b2962.VDD, station.b2962.VDRAIN,
        station.yoko.VBIAS, station.b2962.ch1.current,
        station.b2962.ch2.current
    ]

    if values is None:
        values = np.linspace(0, voltages['vdd'], 101)
    # Sweep VFEEDBACK
    # runid = linear_trace(indeps, deps, station.n6705b.VFEEDBACK,
    #                       np.linspace(0, voltages['vdd'], 101), delay=0.5,
    #                       exp=exp, station=station,
    #                       fn_before=lambda: setup(station, voltages),
    #                       fn_after=lambda: all_disable(station))
    # # determine trip voltage
    # # pull "dmm_volt" and "n6705b_VFEEDBACK" out of dataset
    # data = load_by_id(runid)
    # sweep_data = data.get_parameter_data()
    # trip_point = find_trip_voltage(sweep_data['dmm_VOUT']['n6705b_VFEEDBACK'],
    #                                sweep_data['dmm_VOUT']['dmm_VOUT'])

    # Determine a good vref

# voltages['vfeedback'] = trip_point
    runid = linear_trace(indeps,
                         deps,
                         station.n6705b.VREF,
                         values,
                         delay=0.5,
                         exp=exp,
                         station=station,
                         fn_before=lambda: setup(station, voltages),
                         fn_after=lambda: all_disable(station))
    data = load_by_id(runid)
    sweep_data = data.get_parameter_data()
    vref_point = find_vref(sweep_data['dmm_VOUT']['n6705b_VREF'],
                           sweep_data['dmm_VOUT']['dmm_VOUT'])
    return (vref_point, runid)
コード例 #22
0
def test_load_by_X_functions(two_empty_temp_db_connections, some_paramspecs):
    """
    Test some different loading functions
    """
    source_conn, target_conn = two_empty_temp_db_connections

    source_path = path_to_dbfile(source_conn)
    target_path = path_to_dbfile(target_conn)

    source_exp1 = Experiment(conn=source_conn)
    source_ds_1_1 = DataSet(conn=source_conn, exp_id=source_exp1.exp_id)
    for ps in some_paramspecs[2].values():
        source_ds_1_1.add_parameter(ps)
    source_ds_1_1.mark_started()
    source_ds_1_1.add_result(
        {ps.name: 0.0
         for ps in some_paramspecs[2].values()})
    source_ds_1_1.mark_completed()

    source_exp2 = Experiment(conn=source_conn)
    source_ds_2_1 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id)
    for ps in some_paramspecs[2].values():
        source_ds_2_1.add_parameter(ps)
    source_ds_2_1.mark_started()
    source_ds_2_1.add_result(
        {ps.name: 0.0
         for ps in some_paramspecs[2].values()})
    source_ds_2_1.mark_completed()
    source_ds_2_2 = DataSet(conn=source_conn,
                            exp_id=source_exp2.exp_id,
                            name="customname")
    for ps in some_paramspecs[2].values():
        source_ds_2_2.add_parameter(ps)
    source_ds_2_2.mark_started()
    source_ds_2_2.add_result(
        {ps.name: 0.0
         for ps in some_paramspecs[2].values()})
    source_ds_2_2.mark_completed()

    extract_runs_into_db(source_path, target_path, source_ds_2_2.run_id)

    test_ds = load_by_guid(source_ds_2_2.guid, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)

    test_ds = load_by_id(1, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)

    test_ds = load_by_counter(1, 1, target_conn)
    assert source_ds_2_2.the_same_dataset_as(test_ds)
コード例 #23
0
def plot_hall_sweep_lmob():
    datasets = (1045,)

    field_data = np.zeros(0)
    R_xx_data = np.zeros(0)
    R_xy_data = np.zeros(0)

    for dataset in datasets:
        dataset = load_by_id(dataset)
        param_data = dataset.get_parameter_data("R_xx", "R_xy")
        field_data = np.append(field_data, param_data["R_xx"]["mag_GRPZ_field"])
        R_xx_data = np.append(R_xx_data, param_data["R_xx"]["R_xx"])
        R_xy_data = np.append(R_xy_data, param_data["R_xy"]["R_xy"])
    field_data = field_data[:-100]
    R_xx_data = R_xx_data[:-100]
    R_xy_data = R_xy_data[:-100]

    field_data = 1/field_data

    fig, ax1 = plt.subplots()
    ax2 = ax1.twinx()
    ax1.plot(field_data, R_xy_data, 'b-', label="Rxy")
    ax2.plot(field_data, R_xx_data, 'r-', label="Rxx")

    res, _ = curve_fit(dec_sin, field_data, R_xx_data, (2000, 5000, 150, -1, 1))
    print(res)
    ax2.plot(field_data, dec_sin(field_data, *res), 'k--')

#    # Plot plateaus
#    vk_const = const.value("von Klitzing constant")
#    for i in range(12, 24+1, 2):
#        ax1.axhline(vk_const/i)
#
#    # Plot theoretical locations
#    density = np.polyfit(field_data[-100:], R_xy_data[-100:], 1)
#    print(f"Fitting to field: {field_data[-100]}.")
#    density = 1/(const.e * density[0])
#    print(f"Extracted Density: {density*1e-4:e} cm^2/Vs")
#    filling_factors = (2*const.pi*density*const.hbar)/(const.e)
#    for i in range(12, 24+1, 2):
#        ax1.axvline(filling_factors/i)

    fig.legend()
    ax1.set_xlabel(r"1/Field $\left(\si{\per\tesla}\right)$")
    ax1.set_ylabel(r"$\textrm{R}_{xy}$ $\left(\si{\ohm}\right)$")
    ax2.set_ylabel(r"$\textrm{R}_{xx}$ $\left(\si{\ohm}\right)$")
    fig.tight_layout()
    fig.show()
コード例 #24
0
def get_data_by_id(run_id: int) -> \
        List[List[Dict[str, Union[str, np.ndarray]]]]:
    """
    Load data from database and reshapes into 1D arrays with minimal
    name, unit and label metadata.
    Only returns data from parameters that depend on other parameters or
    parameters that other parameters depend on, i.e. data for standalone
    parameters are not returned.

    Args:
        run_id: run ID from the database

    Returns:
        a list of lists of dictionaries like this:

    ::

        [
          # each element in this list refers
          # to one dependent (aka measured) parameter
            [
              # each element in this list refers
              # to one independent (aka setpoint) parameter
              # that the dependent parameter depends on;
              # a dictionary with the data and metadata of the dependent
              # parameter is in the *last* element in this list
                ...
                {
                    'data': <1D numpy array of points>,
                    'name': <name of the parameter>,
                    'label': <label of the parameter or ''>,
                    'unit': <unit of the parameter or ''>
                },
                ...
            ],
            ...
        ]

    """
    ds = load_by_id(run_id)
    output = _get_data_from_ds(ds)
    return output
コード例 #25
0
def redraw(run_id, axes, cbars):
    '''Call plot_by_id to plot the available data on axes.'''
    pause_time = 0.001
    dataset = load_by_id(run_id)
    if not dataset:  # there is not data available yet
        axes, cbars = [], []
    elif not axes:  # there is data available but no plot yet
        axes, cbars = plot_by_id(run_id)
    else:  # there is a plot already
        for axis in axes:
            axis.clear()
        for cbar in cbars:
            if cbar is not None:
                cbar.remove()
        axes, cbars = plot_by_id(run_id, axes)
        title = make_title(dataset)
        for axis in axes:
            axis.set_title(title)
        plt.pause(pause_time)
    return axes, cbars
コード例 #26
0
    def fit(self, dataid, fitclass, save_plots=True, p0=None, **kwargs):

        ax_list, _ = plot_by_id(dataid)
        popt_list = []
        pcov_list = []
        for i, ax in enumerate(ax_list):
            if ax.lines == []:
                print(f'No line found in plot {i}.')
            else:
                xdata = ax.lines[0].get_xdata()
                ydata = ax.lines[0].get_ydata()
                # Get initial guess on parameter is guess function is defined
                if (p0 is None and hasattr(fitclass, 'guess')):
                    p0 = getattr(fitclass, 'guess')(xdata, ydata)
                popt, pcov = curve_fit(fitclass.fun,
                                       xdata,
                                       ydata,
                                       p0=p0,
                                       **kwargs)
                popt_list.append(popt)
                pcov_list.append(pcov)

                if save_plots:
                    self.plot_1D(ax, xdata, ydata, fitclass, popt)

                    dataset = load_by_id(dataid)
                    mainfolder = config.user.mainfolder
                    experiment_name = dataset.exp_name
                    sample_name = dataset.sample_name

                    storage_dir = os.path.join(mainfolder, experiment_name,
                                               sample_name)
                    analysis_dir = os.path.join(storage_dir, 'Analysis')
                    os.makedirs(analysis_dir, exist_ok=True)

                    full_path = os.path.join(analysis_dir, f'{dataid}_{i}.png')
                    ax.figure.savefig(full_path, dpi=500)
        return popt_list, pcov_list
コード例 #27
0
def test_load_legacy_files_1D(experiment):
    location = 'fixtures/2018-01-17/#001_testsweep_15-42-57'
    dir = os.path.dirname(__file__)
    full_location = os.path.join(dir, location)
    run_ids = import_dat_file(full_location)
    run_id = run_ids[0]
    data = load_by_id(run_id)
    assert data.parameters == 'ch1,voltage'
    assert data.number_of_results == 201
    expected_names = ['ch1', 'voltage']
    expected_labels = ['Gate ch1', 'Gate voltage']
    expected_units = ['V', 'V']
    expected_depends_on = ['', 'ch1']
    for i, parameter in enumerate(data.get_parameters()):
        assert parameter.name == expected_names[i]
        assert parameter.label == expected_labels[i]
        assert parameter.unit == expected_units[i]
        assert parameter.depends_on == expected_depends_on[i]
        assert parameter.type == 'numeric'
    snapshot = json.loads(data.get_metadata('snapshot'))
    assert sorted(list(snapshot.keys())) == [
        '__class__', 'arrays', 'formatter', 'io', 'location', 'loop', 'station'
    ]
コード例 #28
0
ファイル: data_export.py プロジェクト: geoffroth/Qcodes
def get_data_by_id(run_id: int) -> List:
    """
    Load data from database and reshapes into 1D arrays with minimal
    name, unit and label metadata (see `get_layout` function).

    Args:
        run_id: run ID from the database

    Returns:
        a list of lists of dictionaries like this:

        [
          # each element in this list refers
          # to one dependent (aka measured) parameter
            [
              # each element in this list refers
              # to one independent (aka setpoint) parameter
              # that the dependent parameter depends on;
              # a dictionary with the data and metadata of the dependent
              # parameter is in the *last* element in this list
                ...
                {
                    'data': <1D numpy array of points>,
                    'name': <name of the parameter>,
                    'label': <label of the parameter or ''>,
                    'unit': <unit of the parameter or ''>
                },
                ...
            ],
            ...
        ]
    """

    data = load_by_id(run_id)

    conn = data.conn
    deps = get_dependents(conn, run_id)

    output = []
    for dep in deps:

        dependencies = get_dependencies(conn, dep)

        data_axis: Dict[str, Union[str, np.ndarray]] = get_layout(conn, dep)

        rawdata = data.get_values(data_axis['name'])
        data_axis['data'] = flatten_1D_data_for_plot(rawdata)

        raw_setpoint_data = data.get_setpoints(data_axis['name'])

        output_axes = []

        max_size = 0
        for dependency in dependencies:
            axis: Dict[str,
                       Union[str,
                             np.ndarray]] = get_layout(conn, dependency[0])

            mydata = flatten_1D_data_for_plot(raw_setpoint_data[axis['name']])
            axis['data'] = mydata

            size = mydata.size
            if size > max_size:
                max_size = size

            output_axes.append(axis)

        for axis in output_axes:
            size = axis['data'].size  # type: ignore
            if size < max_size:
                if max_size % size != 0:
                    raise RuntimeError("Inconsistent shapes of data. Got "
                                       f"{size} which is not a whole fraction"
                                       f"of {max_size}")
                axis['data'] = np.repeat(axis['data'], max_size // size)

        output_axes.append(data_axis)

        output.append(output_axes)
    return output
コード例 #29
0
ファイル: data_export.py プロジェクト: nataliejpg/Qcodes
def get_data_by_id(run_id: int, conn: Optional[ConnectionPlus] = None) -> \
        List[List[Dict[str, Union[str, np.ndarray]]]]:
    """
    Load data from database and reshapes into 1D arrays with minimal
    name, unit and label metadata (see `get_layout` function).
    Only returns data from parameters that depend on other parameters or
    parameters that other parameters depend on, i.e. data for standalone
    parameters are not returned.

    Args:
        run_id: run ID from the database
        conn: connection to the database to load from

    Returns:
        a list of lists of dictionaries like this:

    ::

        [
          # each element in this list refers
          # to one dependent (aka measured) parameter
            [
              # each element in this list refers
              # to one independent (aka setpoint) parameter
              # that the dependent parameter depends on;
              # a dictionary with the data and metadata of the dependent
              # parameter is in the *last* element in this list
                ...
                {
                    'data': <1D numpy array of points>,
                    'name': <name of the parameter>,
                    'label': <label of the parameter or ''>,
                    'unit': <unit of the parameter or ''>
                },
                ...
            ],
            ...
        ]

    """
    ds = load_by_id(run_id, conn=conn)

    dependent_parameters: Tuple[ParamSpecBase, ...] = ds.dependent_parameters

    parameter_data = ds.get_parameter_data(
        *[ps.name for ps in dependent_parameters])

    output = []

    for dep_name, data_dict in parameter_data.items():
        data_dicts_list = []

        dep_data_dict_index = None

        for param_name, data in data_dict.items():
            my_data_dict: Dict[str, Union[str, np.ndarray]] = {}

            my_data_dict['name'] = param_name

            my_data_dict['data'] = data.flatten()

            ps = ds.paramspecs[param_name]
            my_data_dict['unit'] = ps.unit
            my_data_dict['label'] = ps.label

            data_dicts_list.append(my_data_dict)

            if param_name == dep_name:
                dep_data_dict_index = len(data_dicts_list) - 1

        # put the data dict of the dependent one at the very end of the list
        if dep_data_dict_index is None:
            raise RuntimeError(f'{dep_name} not found in its own "datadict".')
        else:
            data_dicts_list.append(data_dicts_list.pop(dep_data_dict_index))

        output.append(data_dicts_list)

    return output
コード例 #30
0
def test_perform_actual_upgrade_6_to_newest_add_new_data():
    """
    Insert new runs on top of existing runs upgraded and verify that they
    get the correct captured_run_id and captured_counter
    """
    from qcodes.dataset.measurements import Measurement
    from qcodes.instrument.parameter import Parameter
    import numpy as np

    fixpath = os.path.join(fixturepath, 'db_files', 'version6')

    db_file = 'some_runs.db'
    dbname_old = os.path.join(fixpath, db_file)

    if not os.path.exists(dbname_old):
        pytest.skip("No db-file fixtures found. You can generate test db-files"
                    " using the scripts in the "
                    "https://github.com/QCoDeS/qcodes_generate_test_db/ repo")

    with temporarily_copied_DB(dbname_old, debug=False, version=6) as conn:
        assert isinstance(conn, ConnectionPlus)
        perform_db_upgrade(conn)
        assert get_user_version(conn) >= 7
        no_of_runs_query = "SELECT max(run_id) FROM runs"
        no_of_runs = one(
            atomic_transaction(conn, no_of_runs_query), 'max(run_id)')

        # Now let's insert new runs and ensure that they also get
        # captured_run_id assigned.
        params = []
        for n in range(5):
            params.append(Parameter(f'p{n}', label=f'Parameter {n}',
                                    unit=f'unit {n}', set_cmd=None,
                                    get_cmd=None))

        # Set up an experiment
        exp = new_experiment('some-exp', 'some-sample', conn=conn)
        meas = Measurement(exp=exp)
        meas.register_parameter(params[0])
        meas.register_parameter(params[1])
        meas.register_parameter(params[2], basis=(params[0],))
        meas.register_parameter(params[3], basis=(params[1],))
        meas.register_parameter(params[4], setpoints=(params[2], params[3]))

        # Make a number of identical runs
        for _ in range(10):
            with meas.run() as datasaver:
                for x in np.random.rand(10):
                    for y in np.random.rand(10):
                        z = np.random.rand()
                        datasaver.add_result((params[0], 0),
                                             (params[1], 1),
                                             (params[2], x),
                                             (params[3], y),
                                             (params[4], z))

        no_of_runs_new = one(
            atomic_transaction(conn, no_of_runs_query), 'max(run_id)')
        assert no_of_runs_new == 20

        # check that run_id is equivalent to captured_run_id for new
        # runs
        for run_id in range(no_of_runs, no_of_runs_new + 1):
            ds1 = load_by_id(run_id, conn)
            ds2 = load_by_run_spec(captured_run_id=run_id, conn=conn)

            assert ds1.the_same_dataset_as(ds2)

            assert ds1.run_id == run_id
            assert ds1.run_id == ds1.captured_run_id
            assert ds2.run_id == run_id
            assert ds2.run_id == ds2.captured_run_id

        # we are creating a new experiment into a db with one exp so:
        exp_id = 2

        # check that counter is equivalent to captured_counter for new
        # runs
        for counter in range(1, no_of_runs_new - no_of_runs + 1):
            ds1 = load_by_counter(counter, exp_id, conn)
            # giving only the counter is not unique since we have 2 experiments
            with pytest.raises(NameError, match="More than one"
                                                " matching dataset"):
                load_by_run_spec(captured_counter=counter, conn=conn)
            # however we can supply counter and experiment
            ds2 = load_by_run_spec(captured_counter=counter,
                                   experiment_name='some-exp',
                                   conn=conn)

            assert ds1.the_same_dataset_as(ds2)
            assert ds1.counter == counter
            assert ds1.counter == ds1.captured_counter
            assert ds2.counter == counter
            assert ds2.counter == ds2.captured_counter