Example #1
0
    def record_S21_sweep_power_sweep_frequency(self):
        # -- setting vna parameters
        # vna.sweep_mode.set('CONT')
        self.vna.power.set(self.vnapower)
        self.vna.center.set(self.center_frequency)
        self.vna.span.set(self.frequency_span)
        self.vna.points.set(self.num_freq_points)
        self.vna.if_bandwidth.set(self.ifbandwidth)
        self.vna.trace.set(self.measuredtrace)
        self.vna.auto_sweep.set(False)

        # vna.groupdelay.set(groupdelayref) #does not work yet
        meas = Measurement()
        # register the first independent parameter
        meas.register_parameter(self.vna.power)
        # register the second independent parameter
        meas.register_parameter(self.vna.real, setpoints=(self.vna.power,))
        # ^ (? Why would vna.real be an independent parameter?) Does it not get measured (dependent) as a function of freq?
        meas.register_parameter(self.vna.imaginary, setpoints=(
            self.vna.power,))  # now register the dependent one
        meas.register_parameter(self.vna.phase, setpoints=(
            self.vna.power,))  # now register the dependent one
        meas.register_parameter(self.vna.magnitude, setpoints=(
            self.vna.power,))  # now register the dependent one

        # -- taking data
        with meas.run() as datasaver:
            for v1 in np.linspace(self.powersweepstart, self.powersweepstop, self.num_power_points, endpoint=True):
                self.vna.active_trace.set(1)

                power = self.vna.power.set(v1)

                print(self.vna.power.get())  # check

                # vna.auto_sweep.set(False)
                # vna.auto_sweep.set(True)
                # some bug not taking the last row therefore two sweeps
                self.vna.traces.tr1.run_sweep()

                # power=vna.power()
                # vna.auto_sweep.set(False)
                imag = self.vna.imaginary()
                real = self.vna.real()
                phase = self.vna.phase()
                mag = self.vna.magnitude()

                # vna.active_trace.set(2)
                # vna.traces.tr2.run_sweep()
                power = self.vna.power()  # should still be the same as a few lines above

                # time.sleep(2)
                datasaver.add_result((self.vna.magnitude, mag),
                                     (self.vna.phase, phase),
                                     (self.vna.real, real),
                                     (self.vna.imaginary, imag),
                                     (self.vna.power, power))

                print(self.vna.power.get())

        plot_by_id(datasaver.run_id)

        pd = datasaver.dataset.get_parameter_data()

        # import pdb; pdb.set_trace()  # noqa BREAKPOINT

        magnitude_table = np.vstack((np.ravel(pd[self.vna_name + "_tr1_magnitude"][self.vna_name + "_power"]),
                                     np.ravel(pd[self.vna_name + "_tr1_magnitude"][self.vna_name + "_tr1_frequency"]),
                                     np.ravel(pd[self.vna_name + "_tr1_magnitude"][self.vna_name + "_tr1_magnitude"])))

        phase_table = np.vstack((np.ravel(pd[self.vna_name + "_tr1_phase"][self.vna_name + "_power"]),
                                 np.ravel(pd[self.vna_name + "_tr1_phase"][self.vna_name + "_tr1_frequency"]),
                                 np.ravel(pd[self.vna_name + "_tr1_phase"][self.vna_name + "_tr1_phase"])))

        real_table = np.vstack((np.ravel(pd[self.vna_name + "_tr1_real"][self.vna_name + "_power"]),
                                np.ravel(pd[self.vna_name + "_tr1_real"][self.vna_name + "_tr1_frequency"]),
                                np.ravel(pd[self.vna_name + "_tr1_real"][self.vna_name + "_tr1_real"])))

        imaginary_table = np.vstack((np.ravel(pd[self.vna_name + "_tr1_imaginary"][self.vna_name + "_power"]),
                                     np.ravel(pd[self.vna_name + "_tr1_imaginary"][self.vna_name + "_tr1_frequency"]),
                                     np.ravel(pd[self.vna_name + "_tr1_imaginary"][self.vna_name + "_tr1_imaginary"])))

        np.savetxt(os.path.join(self.raw_path_with_date,
                                str(datasaver.run_id)+'_powersweep' +
                                '_'+str(self.exp_name)+'_magnitude.txt'),
                   magnitude_table)

        np.savetxt(os.path.join(self.raw_path_with_date,
                                str(datasaver.run_id)+'_powersweep'+'_' +
                                str(self.exp_name)+'_phase.txt'),
                   phase_table)

        np.savetxt(os.path.join(self.raw_path_with_date,
                                str(datasaver.run_id)+'_powersweep' +
                                '_'+str(self.exp_name)+'_real.txt'),
                   real_table)

        np.savetxt(os.path.join(self.raw_path_with_date,
                                str(datasaver.run_id)+'_powersweep' +
                                '_'+str(self.exp_name)+'_imaginary.txt'),
                   imaginary_table)
Example #2
0
def dond(
    *params: Union[AbstractSweep, ParamMeasT],
    write_period: Optional[float] = None,
    measurement_name: str = "",
    exp: Optional[Experiment] = None,
    enter_actions: ActionsT = (),
    exit_actions: ActionsT = (),
    do_plot: Optional[bool] = None,
    show_progress: Optional[bool] = None,
    use_threads: Optional[bool] = None,
    additional_setpoints: Sequence[ParamMeasT] = tuple(),
) -> AxesTupleListWithDataSet:
    """
    Perform n-dimentional scan from slowest (first) to the fastest (last), to
    measure m measurement parameters. The dimensions should be specified
    as sweep objects, and after them the parameters to measure should be passed.

    Args:
        *params: Instances of n sweep classes and m measurement parameters,
            e.g. if linear sweep is considered:

            .. code-block::

                LinSweep(param_set_1, start_1, stop_1, num_points_1, delay_1), ...,
                LinSweep(param_set_n, start_n, stop_n, num_points_n, delay_n),
                param_meas_1, param_meas_2, ..., param_meas_m

        write_period: The time after which the data is actually written to the
            database.
        measurement_name: Name of the measurement. This will be passed down to
            the dataset produced by the measurement. If not given, a default
            value of 'results' is used for the dataset.
        exp: The experiment to use for this measurement.
        enter_actions: A list of functions taking no arguments that will be
            called before the measurements start.
        exit_actions: A list of functions taking no arguments that will be
            called after the measurements ends.
        do_plot: should png and pdf versions of the images be saved and plots
            are shown after the run. If None the setting will be read from
            ``qcodesrc.json``
        show_progress: should a progress bar be displayed during the
            measurement. If None the setting will be read from ``qcodesrc.json`
        use_threads: If True, measurements from each instrument will be done on
            separate threads. If you are measuring from several instruments
            this may give a significant speedup.
        additional_setpoints: A list of setpoint parameters to be registered in
            the measurement but not scanned/swept-over.
    """
    if do_plot is None:
        do_plot = config.dataset.dond_plot
    if show_progress is None:
        show_progress = config.dataset.dond_show_progress

    meas = Measurement(name=measurement_name, exp=exp)

    def _parse_dond_arguments(
        *params: Union[AbstractSweep, ParamMeasT]
    ) -> Tuple[List[AbstractSweep], List[ParamMeasT]]:
        """
        Parse supplied arguments into sweep objects and measurement parameters.
        """
        sweep_instances: List[AbstractSweep] = []
        params_meas: List[ParamMeasT] = []
        for par in params:
            if isinstance(par, AbstractSweep):
                sweep_instances.append(par)
            else:
                params_meas.append(par)
        return sweep_instances, params_meas

    def _make_nested_setpoints(sweeps: List[AbstractSweep]) -> np.ndarray:
        """Create the cartesian product of all the setpoint values."""
        if len(sweeps) == 0:
            return np.array([[]])  # 0d sweep (do0d)
        setpoint_values = [sweep.get_setpoints() for sweep in sweeps]
        setpoint_grids = np.meshgrid(*setpoint_values, indexing="ij")
        flat_setpoint_grids = [
            np.ravel(grid, order="C") for grid in setpoint_grids
        ]
        return np.vstack(flat_setpoint_grids).T

    sweep_instances, params_meas = _parse_dond_arguments(*params)
    nested_setpoints = _make_nested_setpoints(sweep_instances)

    all_setpoint_params = tuple(sweep.param
                                for sweep in sweep_instances) + tuple(
                                    s for s in additional_setpoints)

    measured_parameters = tuple(par for par in params_meas
                                if isinstance(par, _BaseParameter))

    try:
        loop_shape = tuple(1 for _ in additional_setpoints) + tuple(
            sweep.num_points for sweep in sweep_instances)
        shapes: Shapes = detect_shape_of_measurement(measured_parameters,
                                                     loop_shape)
    except TypeError:
        LOG.exception(f"Could not detect shape of {measured_parameters} "
                      f"falling back to unknown shape.")
        shapes = None

    _register_parameters(meas, all_setpoint_params)
    _register_parameters(meas,
                         params_meas,
                         setpoints=all_setpoint_params,
                         shapes=shapes)
    _set_write_period(meas, write_period)
    _register_actions(meas, enter_actions, exit_actions)

    original_delays: Dict[_BaseParameter, float] = {}
    params_set: List[_BaseParameter] = []
    for sweep in sweep_instances:
        original_delays[sweep.param] = sweep.param.post_delay
        sweep.param.post_delay = sweep.delay
        params_set.append(sweep.param)

    try:
        with _catch_keyboard_interrupts() as interrupted, meas.run(
        ) as datasaver:
            dataset = datasaver.dataset
            additional_setpoints_data = process_params_meas(
                additional_setpoints)
            for setpoints in tqdm(nested_setpoints, disable=not show_progress):
                param_set_list = []
                param_value_pairs = zip(params_set[::-1], setpoints[::-1])
                for setpoint_param, setpoint in param_value_pairs:
                    setpoint_param(setpoint)
                    param_set_list.append((setpoint_param, setpoint))
                datasaver.add_result(
                    *param_set_list,
                    *process_params_meas(params_meas, use_threads=use_threads),
                    *additional_setpoints_data,
                )
    finally:
        for parameter, original_delay in original_delays.items():
            parameter.post_delay = original_delay

    return _handle_plotting(dataset, do_plot, interrupted())
Example #3
0
def do1d(
    param_set: _BaseParameter,
    start: float,
    stop: float,
    num_points: int,
    delay: float,
    *param_meas: ParamMeasT,
    enter_actions: ActionsT = (),
    exit_actions: ActionsT = (),
    write_period: Optional[float] = None,
    measurement_name: str = "",
    exp: Optional[Experiment] = None,
    do_plot: Optional[bool] = None,
    use_threads: Optional[bool] = None,
    additional_setpoints: Sequence[ParamMeasT] = tuple(),
    show_progress: Optional[None] = None,
) -> AxesTupleListWithDataSet:
    """
    Perform a 1D scan of ``param_set`` from ``start`` to ``stop`` in
    ``num_points`` measuring param_meas at each step. In case param_meas is
    an ArrayParameter this is effectively a 2d scan.

    Args:
        param_set: The QCoDeS parameter to sweep over
        start: Starting point of sweep
        stop: End point of sweep
        num_points: Number of points in sweep
        delay: Delay after setting parameter before measurement is performed
        *param_meas: Parameter(s) to measure at each step or functions that
          will be called at each step. The function should take no arguments.
          The parameters and functions are called in the order they are
          supplied.
        enter_actions: A list of functions taking no arguments that will be
            called before the measurements start
        exit_actions: A list of functions taking no arguments that will be
            called after the measurements ends
        write_period: The time after which the data is actually written to the
            database.
        additional_setpoints: A list of setpoint parameters to be registered in
            the measurement but not scanned.
        measurement_name: Name of the measurement. This will be passed down to
            the dataset produced by the measurement. If not given, a default
            value of 'results' is used for the dataset.
        exp: The experiment to use for this measurement.
        do_plot: should png and pdf versions of the images be saved after the
            run. If None the setting will be read from ``qcodesrc.json`
        use_threads: If True measurements from each instrument will be done on
            separate threads. If you are measuring from several instruments
            this may give a significant speedup.
        show_progress: should a progress bar be displayed during the
            measurement. If None the setting will be read from ``qcodesrc.json`

    Returns:
        The QCoDeS dataset.
    """
    if do_plot is None:
        do_plot = config.dataset.dond_plot
    if show_progress is None:
        show_progress = config.dataset.dond_show_progress

    meas = Measurement(name=measurement_name, exp=exp)

    all_setpoint_params = (param_set, ) + tuple(s
                                                for s in additional_setpoints)

    measured_parameters = tuple(param for param in param_meas
                                if isinstance(param, _BaseParameter))
    try:
        loop_shape = tuple(1 for _ in additional_setpoints) + (num_points, )
        shapes: Shapes = detect_shape_of_measurement(measured_parameters,
                                                     loop_shape)
    except TypeError:
        LOG.exception(f"Could not detect shape of {measured_parameters} "
                      f"falling back to unknown shape.")
        shapes = None

    _register_parameters(meas, all_setpoint_params)
    _register_parameters(meas,
                         param_meas,
                         setpoints=all_setpoint_params,
                         shapes=shapes)
    _set_write_period(meas, write_period)
    _register_actions(meas, enter_actions, exit_actions)

    original_delay = param_set.post_delay
    param_set.post_delay = delay

    # do1D enforces a simple relationship between measured parameters
    # and set parameters. For anything more complicated this should be
    # reimplemented from scratch
    with _catch_keyboard_interrupts() as interrupted, meas.run() as datasaver:
        dataset = datasaver.dataset
        additional_setpoints_data = process_params_meas(additional_setpoints)
        setpoints = np.linspace(start, stop, num_points)

        # flush to prevent unflushed print's to visually interrupt tqdm bar
        # updates
        sys.stdout.flush()
        sys.stderr.flush()

        for set_point in tqdm(setpoints, disable=not show_progress):
            param_set.set(set_point)
            datasaver.add_result((param_set, set_point),
                                 *process_params_meas(param_meas,
                                                      use_threads=use_threads),
                                 *additional_setpoints_data)

    param_set.post_delay = original_delay

    return _handle_plotting(dataset, do_plot, interrupted())
Example #4
0
def linear2d(param_set1,
             start1,
             stop1,
             num_points1,
             delay1,
             param_set2,
             start2,
             stop2,
             num_points2,
             delay2,
             *param_meas,
             win=None,
             append=False,
             plot_params=None,
             atstart=None,
             ateachcol=None,
             ateach=None,
             atend=None,
             setback=False,
             write_period=120):
    """
    Run a sweep of a single parameter, between start and stop, with a delay after settings
    the point given by delay.

    Args:
        param_set1 (Parameter): The parameter to be stepped on the x-axis

        start1 (Union[int, float]): Starting point of the x-axis parameter

        stop1 (Union[int, float]): End point of the x-axis parameter

        num_points1 (int): Number of points to take between start and stop (inclusive) on the x-axis

        delay1 (Union[int, float]): The delay after setting the parameter on the x-axis

        param_set2 (Parameter): The parameter to be swept on the y-axis

        start2 (Union[int, float]): Starting point of the y-axis parameter

        stop2 (Union[int, float]): End point of the y-axis parameter

        num_points2 (int): Number of points to take between start and stop (inclusive) on the y-axis

        delay2 (Union[int, float]): The delay after setting the parameter on the y-axis

        *param_meas (Iterable[Parameter]): A list of the parameters to be measured at each of the
        set points. These must be single valued for live plotting to work

        win (Optional[PlotWindow]): The plot window to add plots to. If this value is None, the sweep
        will not be live plotted.

        append (bool): If this parameter is true, the trace will be appended to an existing window.

        plot_params (Optional[Iterable[Parameter]]): A list of measured parameters to live plot. If no
        value is given, then all parameters will be live-plotted

        atstart (Optional[Union[Callable,Iterable[Callable]]]): A function or list of functions
        to be run before the measurement is started. The functions will be run BEFORE the parameters
        are inserted into the measurement, hence if some parameters require setup before they are run,
        they can be inserted here.

        ateachcol (Optional[Union[Callable,Iterable[Callable]]]): A function or list of functions
        to be run after each column of data is complete, useful for example for doing more advanced
        wall control. These functions are run AFTER the delay.

        ateach (Optional[Union[Callable,Iterable[Callable]]]): A function or list of functions
        to be run after each time the sweep parameter is set. These functions will be run AFTER
        the delay, and so is suitable if an instrument requires a call to capture a trace before
        the parameter can be read.

        atend (Optional[Union[Callable,Iterable[Callable]]]): A function or list of functions
        to be run at the end of a trace. This is run AFTER the data is saved into the database,
        and after parameters are set back to their starting points (if setback is True), and
        can therefore be used to read the data that was taken and potentially do some post analysis.

        setback (Optional[bool]): If this is True, the setpoint parameter is returned to its starting
        value at the end of the sweep.

        write_period (Optional[int]): The time inbetween which data is written to the database.
        Irrespective of what this is set to, data will be saved when the week finishes, and will attempt
        to save in the case the sweep is interrupted.

    Returns:
        (id, win): ID is the trace id of the saved wave, win is a handle to the plot window that was created
        for the purposes of liveplotting.

    """
    _flush_buffers(*param_meas)

    # Register setpoints
    meas = Measurement()
    # Step Axis
    meas.register_parameter(param_set1)
    param_set1.post_delay = delay1
    set_points1 = np.linspace(start1, stop1, num_points1)
    # Sweep Axis
    meas.register_parameter(param_set2)
    param_set2.post_delay = delay2
    set_points2 = np.linspace(start2, stop2, num_points2)

    # Keep track of data and plots
    if plot_params is None:
        plot_params = param_meas
    output = []
    plots = {}

    # Run @start functions
    _run_functions(atstart)

    # Register each parameter
    for p, parameter in enumerate(param_meas):
        meas.register_parameter(parameter, setpoints=(param_set1, param_set2))
        output.append([parameter, None])

        # Add Plot item
        if win is not None and parameter in plot_params:
            if append:
                plotitem = win.items[0]
                plotdata = plotitem.plot(setpoint_x=set_points1,
                                         setpoint_y=set_points2)
            else:
                plotitem = win.addPlot(
                    name=parameter.full_name,
                    title="%s (%s) v.<br>%s (%s)" %
                    (param_set1.full_name, param_set1.label,
                     param_set2.full_name, param_set2.label))
                plotdata = plotitem.plot(setpoint_x=set_points1,
                                         setpoint_y=set_points2)
                plotitem.update_axes(param_set1, param_set2)
                plotdata.update_histogram_axis(parameter)
            plots[parameter] = LivePlotDataItem(
                plotitem, plotdata, np.ndarray((num_points1, num_points2)))

    meas.write_period = write_period
    pbar = None
    try:
        with meas.run() as datasaver:
            # Update plot titles
            win.run_id = datasaver.run_id
            win.win_title += "{} ".format(datasaver.run_id)
            for plotitem in plots.values():
                plotitem.plot.plot_title += " (id: %d)" % datasaver.run_id

            pbar = tqdm(total=num_points1, unit="col", position=0, leave=True)
            for i, set_point1 in enumerate(set_points1):
                param_set2.set(start2)
                param_set1.set(set_point1)
                _run_functions(ateachcol,
                               param_vals=(Setpoint(param_set1, i,
                                                    set_point1), ))
                for j, set_point2 in enumerate(set_points2):
                    param_set2.set(set_point2)
                    _run_functions(
                        ateach,
                        param_vals=(Setpoint(param_set1, i, set_point1),
                                    Setpoint(param_set2, j, set_point2)))
                    for p, parameter in enumerate(param_meas):
                        output[p][1] = parameter.get()

                        if win is not None and parameter in plots:
                            fdata = plots[parameter].data
                            fdata[i, j] = output[p][1]
                            if i == 0:
                                # Calculate z-range of data, and remove NaN's from first column
                                # This sets zero point for rest of data
                                z_range = (np.nanmin(fdata[i, :j + 1]),
                                           np.nanmax(fdata[i, :j + 1]))
                                fdata[0,
                                      j + 1:] = (z_range[0] + z_range[1]) / 2
                                fdata[1:, :] = (z_range[0] + z_range[1]) / 2

                            # Update plot items, and update range every 10 points
                            if (num_points1 * num_points2) < 1000 or (j %
                                                                      20) == 0:
                                plots[parameter].plotdata.update(fdata, True)

                    # Save data
                    datasaver.add_result((param_set1, set_point1),
                                         (param_set2, set_point2), *output)
                pbar.update(1)

            # At the end, do one last update to make sure that all data is displayed.
            if win is not None:
                for pd in plots.values():
                    pd.plotdata.update(pd.data, True)
    finally:
        # Set paramters back to start
        if setback:
            param_set1.set(start1)
            param_set2.set(start2)

        # Close progress bar
        if pbar is not None:
            pbar.close()

        _run_functions(atend)

    return datasaver.run_id
def test_perform_actual_upgrade_6_to_newest_add_new_data():
    """
    Insert new runs on top of existing runs upgraded and verify that they
    get the correct captured_run_id and captured_counter
    """
    from qcodes.dataset.measurements import Measurement
    from qcodes.instrument.parameter import Parameter
    import numpy as np

    fixpath = os.path.join(fixturepath, 'db_files', 'version6')

    db_file = 'some_runs.db'
    dbname_old = os.path.join(fixpath, db_file)

    if not os.path.exists(dbname_old):
        pytest.skip("No db-file fixtures found. You can generate test db-files"
                    " using the scripts in the "
                    "https://github.com/QCoDeS/qcodes_generate_test_db/ repo")

    with temporarily_copied_DB(dbname_old, debug=False, version=6) as conn:
        assert isinstance(conn, ConnectionPlus)
        perform_db_upgrade(conn)
        assert get_user_version(conn) >= 7
        no_of_runs_query = "SELECT max(run_id) FROM runs"
        no_of_runs = one(
            atomic_transaction(conn, no_of_runs_query), 'max(run_id)')

        # Now let's insert new runs and ensure that they also get
        # captured_run_id assigned.
        params = []
        for n in range(5):
            params.append(Parameter(f'p{n}', label=f'Parameter {n}',
                                    unit=f'unit {n}', set_cmd=None,
                                    get_cmd=None))

        # Set up an experiment
        exp = new_experiment('some-exp', 'some-sample', conn=conn)
        meas = Measurement(exp=exp)
        meas.register_parameter(params[0])
        meas.register_parameter(params[1])
        meas.register_parameter(params[2], basis=(params[0],))
        meas.register_parameter(params[3], basis=(params[1],))
        meas.register_parameter(params[4], setpoints=(params[2], params[3]))

        # Make a number of identical runs
        for _ in range(10):
            with meas.run() as datasaver:
                for x in np.random.rand(10):
                    for y in np.random.rand(10):
                        z = np.random.rand()
                        datasaver.add_result((params[0], 0),
                                             (params[1], 1),
                                             (params[2], x),
                                             (params[3], y),
                                             (params[4], z))

        no_of_runs_new = one(
            atomic_transaction(conn, no_of_runs_query), 'max(run_id)')
        assert no_of_runs_new == 20

        # check that run_id is equivalent to captured_run_id for new
        # runs
        for run_id in range(no_of_runs, no_of_runs_new + 1):
            ds1 = load_by_id(run_id, conn)
            ds2 = load_by_run_spec(captured_run_id=run_id, conn=conn)

            assert ds1.the_same_dataset_as(ds2)

            assert ds1.run_id == run_id
            assert ds1.run_id == ds1.captured_run_id
            assert ds2.run_id == run_id
            assert ds2.run_id == ds2.captured_run_id

        # we are creating a new experiment into a db with one exp so:
        exp_id = 2

        # check that counter is equivalent to captured_counter for new
        # runs
        for counter in range(1, no_of_runs_new - no_of_runs + 1):
            ds1 = load_by_counter(counter, exp_id, conn)
            # giving only the counter is not unique since we have 2 experiments
            with pytest.raises(NameError, match="More than one"
                                                " matching dataset"):
                load_by_run_spec(captured_counter=counter, conn=conn)
            # however we can supply counter and experiment
            ds2 = load_by_run_spec(captured_counter=counter,
                                   experiment_name='some-exp',
                                   conn=conn)

            assert ds1.the_same_dataset_as(ds2)
            assert ds1.counter == counter
            assert ds1.counter == ds1.captured_counter
            assert ds2.counter == counter
            assert ds2.counter == ds2.captured_counter
Example #6
0
    def save_segmented_data_return_info(
        self,
        segment_db_name: str,
        segment_db_folder: Optional[str] = None,
    ) -> Dict[int, Dict[str, Dict[str, Tuple[float, float]]]]:
        """
        Save each mesh in a new dataset in given databases

        returns:
        segment_info = {
            data_id: {
                readout_method: {'range_x': (),
                                 'range_y': ()
                        }
                    }
        }
        """
        if segment_db_folder is None:
            segment_db_folder = nt.config["db_folder"]

        if not self.segmented_data:
            self.prepare_segmented_data(use_raw_data=True)
        if not os.path.isfile(os.path.join(segment_db_folder, segment_db_name)):
            ds = load_by_id(self.qc_run_id)
            nt.new_database(segment_db_name, db_folder=segment_db_folder)
            qc.new_experiment(f'segmented_{ds.exp_name}',
                              sample_name=ds.sample_name)


        original_params = self.qc_parameters
        segment_info: Dict[int, Dict[str, Dict[str, Tuple[float, float]]]] = {}

        with nt.switch_database(segment_db_name, segment_db_folder):
            for segment in self.segmented_data:
                meas = Measurement()
                meas.register_custom_parameter(
                    original_params[0].name,
                    label=original_params[0].label,
                    unit=original_params[0].unit,
                    paramtype="array",
                )

                meas.register_custom_parameter(
                    original_params[1].name,
                    label=original_params[1].label,
                    unit=original_params[1].unit,
                    paramtype="array",
                )
                result: List[List[Tuple[str, np.ndarray]]] = []
                ranges: Dict[str, Dict[str, Tuple[float, float]]] = {}
                m_params = [str(it) for it in list(segment.data_vars)]
                for ip, param_name in enumerate(m_params):
                    coord_names = list(segment.coords)
                    x_crd_name = coord_names[0]
                    y_crd_name = coord_names[1]

                    voltage_x = segment[param_name][x_crd_name].values
                    voltage_y = segment[param_name][y_crd_name].values
                    signal = segment[param_name].values

                    range_x = (np.min(voltage_x), np.max(voltage_x))
                    range_y = (np.min(voltage_y), np.max(voltage_y))
                    ranges[param_name] = {}
                    ranges[param_name]["range_x"] = range_x
                    ranges[param_name]["range_y"] = range_y

                    setpoints = self.raw_data[param_name].depends_on
                    meas.register_custom_parameter(
                        original_params[ip+2].name,
                        label=original_params[ip+2].label,
                        unit=original_params[1].unit,
                        paramtype="array",
                        setpoints=setpoints,
                    )
                    v_x_grid, v_y_grid = np.meshgrid(voltage_x, voltage_y)

                    result.append([(setpoints[0], v_x_grid),
                                    (setpoints[1], v_y_grid),
                                    (param_name, signal.T)])

                with meas.run() as datasaver:
                    for r_i in range(len(self.readout_methods)):
                        datasaver.add_result(*result[r_i])

                    datasaver.dataset.add_metadata(
                        "snapshot", json.dumps(self.snapshot)
                        )
                    datasaver.dataset.add_metadata(
                        nt.meta_tag, json.dumps(self.nt_metadata)
                    )
                    datasaver.dataset.add_metadata(
                        "original_guid", json.dumps(self.guid)
                        )
                    logger.debug(
                        "New dataset created and populated.\n"
                        + "database: "
                        + str(segment_db_name)
                        + "ID: "
                        + str(datasaver.run_id)
                    )
                    segment_info[datasaver.run_id] = ranges

        return segment_info
Example #7
0
def do0d(*param_meas,
         win=None,
         append=False,
         stack=False,
         legend=False,
         atstart=None,
         ateach=None,
         atend=None):
    """
    Run a sweep of a single parameter, between start and stop, with a delay after settings
    the point given by delay.

    Args:
        *param_meas (Iterable[Parameter]): A list of the parameters to be measured at each of the
        set points. If any of the parameters given are ArrayParameters then a 1D sweep will be
        taken on that parameter, using the setpoints given in that ArrayParamter.

        win (Optional[PlotWindow]): The plot window to add plots to. If this value is None, the sweep
        will not be live plotted.

        append (bool): If this parameter is true, the trace will be appended to an existing window.

        stack (Optional[bool]): If this parameter is given, all parameters are stacked over
        each other on a single plot, otherwise separate plots are created for each measured parameter.

        legend (Optional[bool]): If true, a legend is added to each plot item.

        atstart (Optional[Union[Callable,Iterable[Callable]]]): A function or list of functions
        to be run before the measurement is started. The functions will be run BEFORE the parameters
        are inserted into the measurement, hence if some parameters require setup before they are run,
        they can be inserted here.

        ateach (Optional[Union[Callable,Iterable[Callable]]]): A function or list of functions
        to be run after each time the sweep parameter is set. These functions will be run AFTER
        the delay, and so is suitable if an instrument requires a call to capture a trace before
        the parameter can be read.

        atend (Optional[Union[Callable,Iterable[Callable]]]): A function or list of functions
        to be run at the end of a trace. This is run AFTER the data is saved into the database,
        and after parameters are set back to their starting points (if setback is True), and
        can therefore be used to read the data that was taken and potentially do some post analysis.

    Returns:
        (id, win): ID is the trace id of the saved wave, win is a handle to the plot window that was created
        for the purposes of liveplotting.
    """
    _flush_buffers(*param_meas)

    # Register setpoints
    meas = Measurement()

    # Keep track of data and plots
    output = []
    plots = []
    table = None
    table_items = {}

    # Run @start functions
    _run_functions(atstart)

    # Register each of the sweep parameters and set up a plot window for them
    for p, parameter in enumerate(param_meas):
        meas.register_parameter(parameter)
        output.append([parameter, None])

        if win is not None:
            # Figure out if we have 1d or 2d data
            shape = getattr(parameter, 'shape', None)
            if shape is not None and shape != tuple():
                set_points = parameter.setpoints[0]
                data = np.ndarray((parameter.shape[0], ))
            else:
                set_points = None

            # Create plot window
            if set_points is not None:
                if append:
                    plotitem = win.items[0]
                elif stack and win.items:
                    plotitem = win.items[0]
                    plotitem.plot_title += f" {parameter.full_name}"
                else:
                    plotitem = win.addPlot(name=parameter.full_name,
                                           title="%s" % (parameter.full_name))
                    if legend:
                        plotitem.addLegend()

                # Add data into the plot window
                plotdata = plotitem.plot(setpoint_x=set_points,
                                         pen=(255, 0, 0),
                                         name=parameter.full_name)
                plotitem.update_axes(parameter,
                                     parameter,
                                     param_x_setpoint=True)
                plots.append(LivePlotDataItem(plotitem, plotdata, data))
            else:
                if table is None:
                    table = pyplot.TableWidget(sortable=False)
                    t_widget = win.scene().addWidget(table)
                    t_widget.setMinimumSize(300, 0)
                    win.addItem(t_widget)
                table_items[parameter.full_name] = (0, )

    try:
        with meas.run() as datasaver:
            if win is not None:
                # Update plot titles to include the ID
                win.run_id = datasaver.run_id
                win.win_title += "{} ".format(datasaver.run_id)
                for plotitem in plots:
                    plotitem.plot.plot_title += " (id: %d)" % datasaver.run_id

            _run_functions(ateach, param_vals=tuple())
            # Read out each parameter
            plot_number = 0
            for p, parameter in enumerate(param_meas):
                output[p][1] = parameter.get()
                shape = getattr(parameter, 'shape', None)
                if win is not None:
                    if shape is not None and shape != tuple():
                        plots[plot_number].data[:] = output[p][
                            1]  # Update 2D data
                        plots[plot_number].plotdata.update(
                            plots[plot_number].data)
                        plot_number += 1
                    else:
                        table_items[parameter.full_name] = (output[p][1], )

            # If stacked, make traces different
            if stack:
                plots[0].plot.makeTracesDifferent()

            # Save data
            datasaver.add_result(*output)

            # Update table
            if table is not None:
                table.setData(table_items)
    finally:
        _run_functions(atend)  # Run functions at the end

    # Return the dataid
    return datasaver.run_id  # can use plot_by_id(dataid)
def generate_DB_file_with_runs_and_snapshots():
    """
    Generate a .db-file with a handful of runs some of which have snapshots.

    Generated runs:
        #1: run with a snapshot that has some content
        #2: run with a snapshot of an empty station
        #3: run without a snapshot
    """
    v4fixturepath = os.path.join(utils.fixturepath, 'version4')
    os.makedirs(v4fixturepath, exist_ok=True)
    path = os.path.join(v4fixturepath, 'with_runs_and_snapshots.db')

    if os.path.exists(path):
        os.remove(path)

    from qcodes.dataset.sqlite_base import is_column_in_table
    from qcodes.dataset.measurements import Measurement
    from qcodes.dataset.experiment_container import Experiment
    from qcodes import Parameter, Station
    from qcodes.dataset.descriptions import RunDescriber
    from qcodes.dataset.dependencies import InterDependencies

    exp = Experiment(path_to_db=path,
                     name='experiment_1',
                     sample_name='no_sample_1')
    conn = exp.conn

    # Now make some parameters to use in measurements
    params = []
    for n in range(4):
        params.append(Parameter(f'p{n}', label=f'Parameter {n}',
                                unit=f'unit {n}', set_cmd=None, get_cmd=None))

    # We are going to make 3 runs
    run_ids = []

    # Make a run with a snapshot with some content

    full_station = Station(*params[0:1], default=False)
    assert Station.default is None

    meas = Measurement(exp, full_station)
    meas.register_parameter(params[0])
    meas.register_parameter(params[1])
    meas.register_parameter(params[2], basis=(params[1],))
    meas.register_parameter(params[3], setpoints=(params[1], params[2]))

    with meas.run() as datasaver:

        for x in np.random.rand(4):
            for y in np.random.rand(4):
                z = np.random.rand()
                datasaver.add_result((params[1], x),
                                     (params[2], y),
                                     (params[3], z))

    run_ids.append(datasaver.run_id)

    # Make a run with a snapshot of empty station

    empty_station = Station(default=False)
    assert Station.default is None

    meas = Measurement(exp, empty_station)
    meas.register_parameter(params[0])
    meas.register_parameter(params[1])
    meas.register_parameter(params[2])
    meas.register_parameter(params[3], setpoints=(params[1], params[2]))

    with meas.run() as datasaver:

        for x in np.random.rand(4):
            for y in np.random.rand(4):
                z = np.random.rand()
                datasaver.add_result((params[1], x),
                                     (params[2], y),
                                     (params[3], z))

    run_ids.append(datasaver.run_id)

    # Make a run without a snapshot (i.e. station is None)

    assert Station.default is None

    meas = Measurement(exp)
    meas.register_parameter(params[0])
    meas.register_parameter(params[1])
    meas.register_parameter(params[2], basis=(params[1],))
    meas.register_parameter(params[3], setpoints=(params[1], params[2]))

    with meas.run() as datasaver:

        for x in np.random.rand(4):
            for y in np.random.rand(4):
                z = np.random.rand()
                datasaver.add_result((params[1], x),
                                     (params[2], y),
                                     (params[3], z))

    run_ids.append(datasaver.run_id)

    # Check correctness of run_id's

    assert [1, 2, 3] == run_ids, 'Run ids of generated runs are not as ' \
                                 'expected after generating runs #1-3'

    # Ensure snapshot column

    assert is_column_in_table(conn, 'runs', 'snapshot')
Example #9
0
def meas_with_registered_param(experiment, DAC, DMM):
    meas = Measurement()
    meas.register_parameter(DAC.ch1)
    meas.register_parameter(DMM.v1, setpoints=[DAC.ch1])
    yield meas
Example #10
0
def test_plot_dataset_2d_shaped(experiment, request, nan_setpoints, shifted):
    """
    Test plotting of preshaped data on a grid that may or may not be shifted
    with and without nans in the set points.
    """
    inst = DummyInstrument("dummy", gates=["s1", "m1", "s2"])
    request.addfinalizer(inst.close)

    inst.m1.get = np.random.randn

    meas = Measurement()
    meas.register_parameter(inst.s1)
    meas.register_parameter(inst.s2)
    meas.register_parameter(inst.m1, setpoints=(inst.s1, inst.s2))

    outer_shape = 10
    inner_shape = 20

    meas.set_shapes(
        detect_shape_of_measurement((inst.m1, ), (outer_shape, inner_shape)))

    shift = 0

    with meas.run() as datasaver:
        try:
            for outer in np.linspace(0, 9, outer_shape):
                for inner in np.linspace(0 + shift, 10 + shift, inner_shape):
                    datasaver.add_result((inst.s1, outer), (inst.s2, inner),
                                         (inst.m1, inst.m1()))
                    if inner > 7 and outer > 6 and nan_setpoints:
                        raise TerminateLoopException
                if shifted:
                    shift += 1
        except TerminateLoopException:
            pass

    axes, cbs = plot_dataset(datasaver.dataset)
    xlims = axes[0].get_xlim()
    ylims = axes[0].get_ylim()

    # check that this generates a QuadMesh which is the expected output of pcolormesh
    assert any(
        isinstance(mplobj, QuadMesh) for mplobj in axes[0].get_children())

    if nan_setpoints and shifted:
        assert xlims[0] == -0.5
        assert xlims[1] == 7.5
        assert ylims[0] < 0
        assert ylims[0] > -1.0
        assert ylims[1] > 16
        assert ylims[1] < 17
    elif not nan_setpoints and shifted:
        assert xlims[0] == -0.5
        assert xlims[1] == 9.5
        assert ylims[0] < 0
        assert ylims[0] > -1.0
        assert ylims[1] > 19
        assert ylims[1] < 20
    elif nan_setpoints and not shifted:
        assert xlims[0] == -0.5
        assert xlims[1] == 7.5
        assert ylims[0] < 0
        assert ylims[0] > -1.0
        assert ylims[1] > 10
        assert ylims[1] < 11
    else:
        assert xlims[0] == -0.5
        assert xlims[1] == 9.5
        assert ylims[0] < 0
        assert ylims[0] > -1.0
        assert ylims[1] > 10
        assert ylims[1] < 11
def generate_DB_file_with_runs_but_no_snapshots():
    """
    Generate a .db-file with a handful of runs without snapshots
    """

    # This function will run often on CI and re-generate the .db-files
    # That should ideally be a deterministic action
    # (although this hopefully plays no role)
    np.random.seed(0)

    v4fixturepath = os.path.join(utils.fixturepath, 'version4')
    os.makedirs(v4fixturepath, exist_ok=True)
    path = os.path.join(v4fixturepath, 'with_runs_but_no_snapshots.db')

    if os.path.exists(path):
        os.remove(path)

    from qcodes.dataset.sqlite_base import connect, is_column_in_table
    from qcodes.dataset.measurements import Measurement
    from qcodes.dataset.experiment_container import Experiment
    from qcodes import Parameter, Station

    connect(path)

    exp = Experiment(path_to_db=path,
                     name='experiment_1',
                     sample_name='no_sample_1')
    conn = exp.conn

    assert not is_column_in_table(conn, 'runs', 'snapshot')

    # Now make some parameters to use in measurements
    params = []
    for n in range(4):
        params.append(Parameter(f'p{n}', label=f'Parameter {n}',
                                unit=f'unit {n}', set_cmd=None, get_cmd=None))

    assert Station.default is None

    # Set up an experiment

    meas = Measurement(exp)
    meas.register_parameter(params[0])
    meas.register_parameter(params[1])
    meas.register_parameter(params[2], basis=(params[1],))
    meas.register_parameter(params[3], setpoints=(params[1], params[2]))

    # Make a number of identical runs

    for _ in range(4):

        with meas.run() as datasaver:

            for x in np.random.rand(4):
                for y in np.random.rand(4):
                    z = np.random.rand()
                    datasaver.add_result((params[1], x),
                                         (params[2], y),
                                         (params[3], z))

    assert not is_column_in_table(conn, 'runs', 'snapshot')
Example #12
0
def test_datasaver_arrays_lists_tuples(empty_temp_db, N):
    new_experiment('firstexp', sample_name='no sample')

    meas = Measurement()

    meas.register_custom_parameter(name='freqax',
                                   label='Frequency axis',
                                   unit='Hz')
    meas.register_custom_parameter(name='signal',
                                   label='qubit signal',
                                   unit='Majorana number',
                                   setpoints=('freqax', ))

    with meas.run() as datasaver:
        freqax = np.linspace(1e6, 2e6, N)
        signal = np.random.randn(N)

        datasaver.add_result(('freqax', freqax), ('signal', signal))

    assert datasaver.points_written == N

    with meas.run() as datasaver:
        freqax = np.linspace(1e6, 2e6, N)
        signal = np.random.randn(N - 1)

        with pytest.raises(ValueError):
            datasaver.add_result(('freqax', freqax), ('signal', signal))

    meas.register_custom_parameter(name='gate_voltage',
                                   label='Gate tuning potential',
                                   unit='V')
    meas.register_custom_parameter(name='signal',
                                   label='qubit signal',
                                   unit='Majorana flux',
                                   setpoints=('freqax', 'gate_voltage'))

    # save arrays
    with meas.run() as datasaver:
        freqax = np.linspace(1e6, 2e6, N)
        signal = np.random.randn(N)

        datasaver.add_result(('freqax', freqax), ('signal', signal),
                             ('gate_voltage', 0))

    assert datasaver.points_written == N

    # save lists
    with meas.run() as datasaver:
        freqax = list(np.linspace(1e6, 2e6, N))
        signal = list(np.random.randn(N))

        datasaver.add_result(('freqax', freqax), ('signal', signal),
                             ('gate_voltage', 0))

    assert datasaver.points_written == N

    # save tuples
    with meas.run() as datasaver:
        freqax = tuple(np.linspace(1e6, 2e6, N))
        signal = tuple(np.random.randn(N))

        datasaver.add_result(('freqax', freqax), ('signal', signal),
                             ('gate_voltage', 0))

    assert datasaver.points_written == N
Example #13
0
def test_subscriptions(experiment, DAC, DMM):
    """
    Test that subscribers are called at the moment that data is flushed to database

    Note that for the purpose of this test, flush_data_to_database method is called explicitly instead of waiting for
    the data to be flushed automatically after the write_period passes after a add_result call.

    Args:
        experiment (qcodes.dataset.experiment_container.Experiment) : qcodes experiment object
        DAC (qcodes.instrument.base.Instrument) : dummy instrument object
        DMM (qcodes.instrument.base.Instrument) : another dummy instrument object
    """
    def subscriber1(results, length, state):
        """
        A dict of all results
        """
        state[length] = results

    def subscriber2(results, length, state):
        """
        A list of all parameter values larger than 7
        """
        for res in results:
            state += [pres for pres in res if pres > 7]

    meas = Measurement(exp=experiment)
    meas.register_parameter(DAC.ch1)
    meas.register_parameter(DMM.v1, setpoints=(DAC.ch1, ))

    res_dict = {}
    lt7s = []

    meas.add_subscriber(subscriber1, state=res_dict)
    assert len(meas.subscribers) == 1
    meas.add_subscriber(subscriber2, state=lt7s)
    assert len(meas.subscribers) == 2

    meas.write_period = 0.2

    expected_list = []

    with meas.run() as datasaver:

        assert len(datasaver._dataset.subscribers) == 2
        assert res_dict == {}
        assert lt7s == []

        as_and_bs = list(zip(range(5), range(3, 8)))

        for num in range(5):

            (a, b) = as_and_bs[num]
            expected_list += [c for c in (a, b) if c > 7]

            datasaver.add_result((DAC.ch1, a), (DMM.v1, b))
            datasaver.flush_data_to_database()

            assert lt7s == expected_list
            assert list(res_dict.keys()) == [n for n in range(1, num + 2)]

    assert len(datasaver._dataset.subscribers) == 0
Example #14
0
    def record_S21_sweep_frequency(self, use_default_values=False, override_default_values=False, **kwargs):
        """ takes a frequency sweep, keeping all parameters the same 
            (getting them from the instrument) except for specific ones 
            set in kwargs, which are set in the instrument before performing 
            the sweep. """

        for key, value in kwargs.items():
            # check if the qcodes driver has this parameter
            self.vna.
            self.driver_exposed_parameters = __dict__.update(kwargs)
        
        self.vna.power(self.vnapower)
        self.vna.start(self.start_frequency)
        self.vna.stop(self.stop_frequency)
        self.vna.points(self.num_freq_points)
        self.vna.trace(self.measuredtrace)
        
        # num_freq_points = self.vna.points.get()  # get current number of points from VNA settings

        meas = Measurement()  # qcodes measurement

        # self.vna.points.set(20)
        self.vna.auto_sweep(False)

        meas.register_parameter(self.vna.real)
        meas.register_parameter(self.vna.imaginary)

        meas.register_parameter(self.vna.magnitude)
        meas.register_parameter(self.vna.phase)

        # actually get the data
        with meas.run() as datasaver:  # try to run the measurement (? but this doesn't yet write to the database)
            # self.vna.active_trace.set(1)  # there are Tr1 and Tr2
            self.vna.traces.tr1.run_sweep()

            imag = self.vna.imaginary()
            real = self.vna.real()

            mag = self.vna.magnitude()
            phase = self.vna.phase()

            datasaver.add_result((self.vna.magnitude, mag),
                                 (self.vna.phase, phase),
                                 (self.vna.real, real),
                                 (self.vna.imaginary, imag))

            dataid = datasaver.run_id

        pd = datasaver.dataset.get_parameter_data()

        plot_by_id(dataid)

        export = np.zeros((self.num_freq_points, 5))

        export[:, 0] = pd[self.vna_name +
                         "_tr1_magnitude"][self.vna_name + '_tr1_frequency'][0]
        export[:, 1] = pd[self.vna_name +
                         '_tr1_magnitude'][self.vna_name + '_tr1_magnitude'][0]
        export[:, 2] = pd[self.vna_name +
                         '_tr1_phase'][self.vna_name + '_tr1_phase'][0]
        export[:, 3] = pd[self.vna_name +
                         '_tr1_real'][self.vna_name + '_tr1_real'][0]
        export[:, 4] = pd[self.vna_name +
                         '_tr1_imaginary'][self.vna_name + '_tr1_imaginary'][0]

        np.savetxt(os.path.join(self.raw_path_with_date,
                                str(datasaver.run_id)+'_nosweep' +
                                '_'+str(self.exp_name)+'.txt'),
                   export)

        plt.plot(export[:, 0], export[:, 1])
        plt.xlabel('Frequency (Hz)')
        plt.ylabel('Magnitude (dB)')
        plt.savefig(os.path.join(self.raw_path_with_date,
                                 str(datasaver.run_id)+'_nosweep' +
                                 '_'+str(self.exp_name)+'_magnitude.png'))

        plt.cla()
        plt.plot(export[:, 0], export[:, 2])
        plt.xlabel('Frequency (Hz)')
        plt.ylabel('Phase (deg)')
        plt.savefig(os.path.join(self.raw_path_with_date,
                                 str(datasaver.run_id)+'_nosweep' +
                                 '_'+str(self.exp_name)+'_phase.png'))
Example #15
0
def test_cache_1d_shape(experiment, DAC, DMM, n_points, bg_writing,
                  channel_array_instrument, setpoints_type,
                  in_memory_cache):

    setpoints_param, setpoints_values = _prepare_setpoints_1d(
        DAC, channel_array_instrument,
        n_points, setpoints_type
    )

    meas = Measurement()

    meas.register_parameter(setpoints_param)

    meas_parameters = (DMM.v1,
                       channel_array_instrument.A.dummy_multi_parameter,
                       channel_array_instrument.A.dummy_scalar_multi_parameter,
                       channel_array_instrument.A.dummy_2d_multi_parameter,
                       channel_array_instrument.A.dummy_2d_multi_parameter_2,
                       channel_array_instrument.A.dummy_array_parameter,
                       channel_array_instrument.A.dummy_complex_array_parameter,
                       channel_array_instrument.A.dummy_complex,
                       channel_array_instrument.A.dummy_parameter_with_setpoints,
                       channel_array_instrument.A.dummy_parameter_with_setpoints_complex,
                       )
    pws_n_points = 10
    channel_array_instrument.A.dummy_start(0)
    channel_array_instrument.A.dummy_stop(10)
    channel_array_instrument.A.dummy_n_points(pws_n_points)

    expected_shapes = {
        'dummy_dmm_v1': (n_points, ),
        'dummy_channel_inst_ChanA_multi_setpoint_param_this': (n_points, 5),
        'dummy_channel_inst_ChanA_multi_setpoint_param_that': (n_points, 5),
        'dummy_channel_inst_ChanA_thisparam': (n_points, ),
        'dummy_channel_inst_ChanA_thatparam': (n_points, ),
        'dummy_channel_inst_ChanA_this': (n_points, 5, 3),
        'dummy_channel_inst_ChanA_that': (n_points, 5, 3),
        'dummy_channel_inst_ChanA_this_5_3': (n_points, 5, 3),
        'dummy_channel_inst_ChanA_this_2_7': (n_points, 2, 7),
        'dummy_channel_inst_ChanA_dummy_array_parameter': (n_points, 5),
        'dummy_channel_inst_ChanA_dummy_complex_array_parameter': (n_points, 5),
        'dummy_channel_inst_ChanA_dummy_complex': (n_points, ),
        'dummy_channel_inst_ChanA_dummy_parameter_with_setpoints': (n_points, pws_n_points),
        'dummy_channel_inst_ChanA_dummy_parameter_with_setpoints_complex': (n_points, pws_n_points)
    }

    for param in meas_parameters:
        meas.register_parameter(param, setpoints=(setpoints_param,))
    meas.set_shapes(detect_shape_of_measurement(
        meas_parameters,
        (n_points,))
    )
    n_points_measured = 0
    with meas.run(write_in_background=bg_writing,
                  in_memory_cache=in_memory_cache) as datasaver:
        dataset = datasaver.dataset
        _assert_parameter_data_is_identical(dataset.get_parameter_data(), dataset.cache.data())
        for i, v in enumerate(setpoints_values):
            n_points_measured += 1
            setpoints_param.set(v)

            meas_vals = [(param, param.get()) for param in meas_parameters[:-2]]
            meas_vals += expand_setpoints_helper(meas_parameters[-2])
            meas_vals += expand_setpoints_helper(meas_parameters[-1])

            datasaver.add_result((setpoints_param, v),
                                 *meas_vals)
            datasaver.flush_data_to_database(block=True)
            cache_data_trees = dataset.cache.data()
            param_data_trees = dataset.get_parameter_data()
            _assert_partial_cache_is_as_expected(
                cache_data_trees,
                expected_shapes,
                n_points_measured,
                param_data_trees,
                cache_correct=True
            )
    cache_data_trees = dataset.cache.data()
    param_data_trees = dataset.get_parameter_data()

    _assert_completed_cache_is_as_expected(cache_data_trees,
                                           param_data_trees,
                                           flatten=False)
Example #16
0
def RT_LT_ithaco(station, voltage, stanford_gain_V, gain_ithaco):

    station.dmm1.NPLC(10)
    station.dmm2.NPLC(10)

    station.yoko.output('off')
    station.yoko.source_mode("VOLT")
    station.yoko.output('on')

    station.yoko.voltage.step = 1e-3
    station.yoko.voltage.inter_delay = 0.0001

    meas = Measurement()

    meas.register_parameter(station.yoko.voltage)
    meas.register_parameter(station.BlueFors_LD.MC_temp)
    meas.register_custom_parameter("Counter")
    meas.register_custom_parameter("Current", unit = "A")
    meas.register_parameter(station.dmm1.volt)
    meas.register_parameter(station.dmm2.volt)
    meas.register_custom_parameter("Resistance", unit = "Ohms", setpoints=(station.BlueFors_LD.MC_temp,))
    
    win = qcm.pyplot.PlotWindow(title="R(T)")
    win.resize(750,500)

    num_points = 0
    array_size = 1
    temp_array = np.full((1,), np.nan)
    r_array = np.full((1,), np.nan)


    plot1 = win.addPlot(title="RT  4K - 8mK")
    plotdata = plot1.plot(setpoint_x=temp_array, color=(0, 0, 255))

    plot1.left_axis.label = "Resistance"
    plot1.left_axis.units = "Ohms"
    plot1.bot_axis.label = "Temperature"
    plot1.bot_axis.units = "K"

    j=0

    T = station.BlueFors_LD.MC_temp()

    with meas.run() as datasaver:
        
        while T  >0.008:
            T = station.BlueFors_LD.MC_temp() 

            station.yoko.voltage(voltage)
            
            time.sleep(1)

            volt_p = station.dmm1.volt()/stanford_gain_V
            curr_p = -station.dmm2.volt()/gain_ithaco
            
            
            station.yoko.voltage(-voltage)
            
            time.sleep(1)
            
            volt_m = station.dmm1.volt()/stanford_gain_V
            curr_m = -station.dmm2.volt()/gain_ithaco
            
            V_av = (volt_p - volt_m)/2
            I_av = (curr_p - curr_m)/2
            R_av = V_av/I_av

            datasaver.add_result((station.yoko.voltage, voltage),
                                (station.BlueFors_LD.MC_temp, T),
                                ("Counter", j),
                                 ("Resistance", R_av),
                                (station.dmm1.volt,V_av),
                                ("Current", I_av))

            temp_array[num_points] = T
            r_array[num_points] = R_av
            plotdata.xData = temp_array
            plotdata.update(r_array)
            num_points += 1

            if num_points == array_size:
                array_size *= 2
                temp_array.resize(array_size)
                temp_array[array_size//2:] = np.nan
                r_array.resize(array_size)
                r_array[array_size//2:] = np.nan
            
            #print((T,R_av))
            time.sleep(2)
            j = j+1
            
    station.yoko.voltage(0)
Example #17
0
def test_cache_2d_shape(experiment,
                        DAC,
                        DMM,
                        n_points_outer,
                        n_points_inner,
                        pws_n_points,
                        bg_writing,
                        channel_array_instrument,
                        cache_size):
    meas = Measurement()

    meas.register_parameter(DAC.ch1)
    meas.register_parameter(DAC.ch2)

    meas_parameters = (DMM.v1,
                       channel_array_instrument.A.dummy_multi_parameter,
                       channel_array_instrument.A.dummy_scalar_multi_parameter,
                       channel_array_instrument.A.dummy_2d_multi_parameter,
                       channel_array_instrument.A.dummy_2d_multi_parameter_2,
                       channel_array_instrument.A.dummy_array_parameter,
                       channel_array_instrument.A.dummy_complex_array_parameter,
                       channel_array_instrument.A.dummy_complex,
                       channel_array_instrument.A.dummy_parameter_with_setpoints,
                       channel_array_instrument.A.dummy_parameter_with_setpoints_complex,
                       )

    channel_array_instrument.A.dummy_start(0)
    channel_array_instrument.A.dummy_stop(10)
    channel_array_instrument.A.dummy_n_points(pws_n_points)
    for param in meas_parameters:
        meas.register_parameter(param, setpoints=(DAC.ch1, DAC.ch2))

    if cache_size == "too_small":
        meas.set_shapes(detect_shape_of_measurement(
            meas_parameters,
            (int(ceil(n_points_outer/2)), n_points_inner))
        )
    elif cache_size == "too_large":
        meas.set_shapes(detect_shape_of_measurement(
            meas_parameters,
            (n_points_outer*2, n_points_inner))
        )
    else:
        meas.set_shapes(detect_shape_of_measurement(
            meas_parameters,
            (n_points_outer, n_points_inner))
        )

    expected_shapes = {
        'dummy_dmm_v1': (n_points_outer, n_points_inner),
        'dummy_channel_inst_ChanA_multi_setpoint_param_this': (n_points_outer, n_points_inner, 5),
        'dummy_channel_inst_ChanA_multi_setpoint_param_that': (n_points_outer, n_points_inner, 5),
        'dummy_channel_inst_ChanA_thisparam': (n_points_outer, n_points_inner),
        'dummy_channel_inst_ChanA_thatparam': (n_points_outer, n_points_inner),
        'dummy_channel_inst_ChanA_this': (n_points_outer, n_points_inner, 5, 3),
        'dummy_channel_inst_ChanA_that': (n_points_outer, n_points_inner, 5, 3),
        'dummy_channel_inst_ChanA_this_5_3': (n_points_outer, n_points_inner, 5, 3),
        'dummy_channel_inst_ChanA_this_2_7': (n_points_outer, n_points_inner, 2, 7),
        'dummy_channel_inst_ChanA_dummy_array_parameter': (n_points_outer, n_points_inner, 5),
        'dummy_channel_inst_ChanA_dummy_complex_array_parameter': (n_points_outer, n_points_inner, 5),
        'dummy_channel_inst_ChanA_dummy_complex': (n_points_outer, n_points_inner),
        'dummy_channel_inst_ChanA_dummy_parameter_with_setpoints': (n_points_outer, n_points_inner, pws_n_points),
        'dummy_channel_inst_ChanA_dummy_parameter_with_setpoints_complex': (n_points_outer, n_points_inner, pws_n_points)
    }

    if cache_size == "correct":
        assert meas._shapes == expected_shapes

    with meas.run(write_in_background=bg_writing) as datasaver:
        dataset = datasaver.dataset
        # Check that parameter data and cache data are indential for empty datasets
        _assert_parameter_data_is_identical(dataset.get_parameter_data(), dataset.cache.data())
        n_points_measured = 0
        for v1 in np.linspace(-1, 1, n_points_outer):
            for v2 in np.linspace(-1, 1, n_points_inner):
                n_points_measured += 1
                DAC.ch1.set(v1)
                DAC.ch2.set(v2)
                meas_vals = [(param, param.get()) for param in meas_parameters]

                datasaver.add_result((DAC.ch1, v1),
                                     (DAC.ch2, v2),
                                     *meas_vals)
                datasaver.flush_data_to_database(block=True)
                param_data_trees = dataset.get_parameter_data()
                cache_data_trees = dataset.cache.data()

                _assert_partial_cache_is_as_expected(
                    cache_data_trees,
                    expected_shapes,
                    n_points_measured,
                    param_data_trees,
                    cache_size == "correct"
                )
    cache_data_trees = dataset.cache.data()
    param_data_trees = dataset.get_parameter_data()
    _assert_completed_cache_is_as_expected(cache_data_trees,
                                           param_data_trees,
                                           flatten=cache_size == "too_small",
                                           clip=cache_size == "too_large")
Example #18
0
def do0d(
    *param_meas: ParamMeasT,
    write_period: Optional[float] = None,
    measurement_name: str = "",
    exp: Optional[Experiment] = None,
    do_plot: Optional[bool] = None,
    use_threads: Optional[bool] = None,
    log_info: Optional[str] = None,
) -> AxesTupleListWithDataSet:
    """
    Perform a measurement of a single parameter. This is probably most
    useful for an ArrayParameter that already returns an array of data points

    Args:
        *param_meas: Parameter(s) to measure at each step or functions that
          will be called at each step. The function should take no arguments.
          The parameters and functions are called in the order they are
          supplied.
        write_period: The time after which the data is actually written to the
            database.
        measurement_name: Name of the measurement. This will be passed down to
            the dataset produced by the measurement. If not given, a default
            value of 'results' is used for the dataset.
        exp: The experiment to use for this measurement.
        do_plot: should png and pdf versions of the images be saved after the
            run. If None the setting will be read from ``qcodesrc.json``
        use_threads: If True measurements from each instrument will be done on
            separate threads. If you are measuring from several instruments
            this may give a significant speedup.
        log_info: Message that is logged during the measurement. If None a default
            message is used.

    Returns:
        The QCoDeS dataset.
    """
    if do_plot is None:
        do_plot = config.dataset.dond_plot
    meas = Measurement(name=measurement_name, exp=exp)
    if log_info is not None:
        meas._extra_log_info = log_info
    else:
        meas._extra_log_info = "Using 'qcodes.utils.dataset.doNd.do0d'"

    measured_parameters = tuple(
        param for param in param_meas if isinstance(param, _BaseParameter)
    )

    try:
        shapes: Shapes = detect_shape_of_measurement(
            measured_parameters,
        )
    except TypeError:
        LOG.exception(
            f"Could not detect shape of {measured_parameters} "
            f"falling back to unknown shape."
        )
        shapes = None

    _register_parameters(meas, param_meas, shapes=shapes)
    _set_write_period(meas, write_period)

    with meas.run() as datasaver:
        datasaver.add_result(*process_params_meas(param_meas, use_threads=use_threads))
        dataset = datasaver.dataset

    return _handle_plotting(dataset, do_plot)
Example #19
0
def test_register_parameter_numbers(DAC, DMM):
    """
    Test the registration of scalar QCoDeS parameters
    """

    parameters = [DAC.ch1, DAC.ch2, DMM.v1, DMM.v2]
    not_parameters = ['', 'Parameter', 0, 1.1, Measurement]

    meas = Measurement()

    for not_a_parameter in not_parameters:
        with pytest.raises(ValueError):
            meas.register_parameter(not_a_parameter)

    my_param = DAC.ch1
    meas.register_parameter(my_param)
    assert len(meas.parameters) == 1
    paramspec = meas.parameters[str(my_param)]
    assert paramspec.name == str(my_param)
    assert paramspec.label == my_param.label
    assert paramspec.unit == my_param.unit
    assert paramspec.type == 'numeric'

    # registering the same parameter twice should lead
    # to a replacement/update, but also change the
    # parameter order behind the scenes
    # (to allow us to re-register a parameter with new
    # setpoints)

    my_param.unit = my_param.unit + '/s'
    meas.register_parameter(my_param)
    assert len(meas.parameters) == 1
    paramspec = meas.parameters[str(my_param)]
    assert paramspec.name == str(my_param)
    assert paramspec.label == my_param.label
    assert paramspec.unit == my_param.unit
    assert paramspec.type == 'numeric'

    for parameter in parameters:
        with pytest.raises(ValueError):
            meas.register_parameter(my_param, setpoints=(parameter, ))
        with pytest.raises(ValueError):
            meas.register_parameter(my_param, basis=(parameter, ))

    meas.register_parameter(DAC.ch2)
    meas.register_parameter(DMM.v1)
    meas.register_parameter(DMM.v2)
    meas.register_parameter(my_param,
                            basis=(DAC.ch2, ),
                            setpoints=(DMM.v1, DMM.v2))

    assert list(meas.parameters.keys()) == [
        str(DAC.ch2), str(DMM.v1),
        str(DMM.v2), str(my_param)
    ]
    paramspec = meas.parameters[str(my_param)]
    assert paramspec.name == str(my_param)
    assert paramspec.inferred_from == ', '.join([str(DAC.ch2)])
    assert paramspec.depends_on == ', '.join([str(DMM.v1), str(DMM.v2)])

    meas = Measurement()

    meas.register_parameter(DAC.ch1)
    meas.register_parameter(DAC.ch2, setpoints=(DAC.ch1, ))
    with pytest.raises(ValueError):
        meas.register_parameter(DMM.v1, setpoints=(DAC.ch2, ))
Example #20
0
def test_cache_standalone(
    experiment,
    DMM,
    n_points,
    bg_writing,
    channel_array_instrument,
    set_shape,
    in_memory_cache,
):

    meas1 = Measurement()
    meas1.register_parameter(DMM.v1)

    meas_parameters1 = (
        DMM.v1,
        channel_array_instrument.A.dummy_multi_parameter,
        channel_array_instrument.A.dummy_scalar_multi_parameter,
        channel_array_instrument.A.dummy_2d_multi_parameter,
        channel_array_instrument.A.dummy_2d_multi_parameter_2,
        channel_array_instrument.A.dummy_array_parameter,
        channel_array_instrument.A.dummy_complex_array_parameter,
        channel_array_instrument.A.dummy_complex,
        channel_array_instrument.A.dummy_parameter_with_setpoints,
        channel_array_instrument.A.dummy_parameter_with_setpoints_complex,
    )
    pws_shape_1 = 10
    pws_shape_2 = 3
    channel_array_instrument.A.dummy_start(0)
    channel_array_instrument.A.dummy_stop(10)
    channel_array_instrument.A.dummy_n_points(pws_shape_1)
    channel_array_instrument.A.dummy_start_2(2)
    channel_array_instrument.A.dummy_stop_2(7)
    channel_array_instrument.A.dummy_n_points_2(pws_shape_2)

    if set_shape:
        meas1.set_shapes(
            {
                DMM.v1.full_name: (n_points,),
                channel_array_instrument.A.dummy_multi_parameter.full_names[0]: (
                    n_points,
                    5,
                ),
                channel_array_instrument.A.dummy_multi_parameter.full_names[1]: (
                    n_points,
                    5,
                ),
                channel_array_instrument.A.dummy_scalar_multi_parameter.full_names[0]: (
                    n_points,
                ),
                channel_array_instrument.A.dummy_scalar_multi_parameter.full_names[1]: (
                    n_points,
                ),
                channel_array_instrument.A.dummy_scalar_multi_parameter.full_names[0]: (
                    n_points,
                ),
                channel_array_instrument.A.dummy_scalar_multi_parameter.full_names[1]: (
                    n_points,
                ),
                channel_array_instrument.A.dummy_2d_multi_parameter.full_names[0]: (
                    n_points,
                    5,
                    3,
                ),
                channel_array_instrument.A.dummy_2d_multi_parameter.full_names[1]: (
                    n_points,
                    5,
                    3,
                ),
                channel_array_instrument.A.dummy_2d_multi_parameter_2.full_names[0]: (
                    n_points,
                    5,
                    3,
                ),
                channel_array_instrument.A.dummy_2d_multi_parameter_2.full_names[1]: (
                    n_points,
                    2,
                    7,
                ),
                channel_array_instrument.A.dummy_array_parameter.full_name: (
                    n_points,
                    5,
                ),
                channel_array_instrument.A.dummy_complex_array_parameter.full_name: (
                    n_points,
                    5,
                ),
                channel_array_instrument.A.dummy_complex.full_name: (n_points,),
                channel_array_instrument.A.dummy_parameter_with_setpoints.full_name: (
                    n_points,
                    pws_shape_1,
                ),
                channel_array_instrument.A.dummy_parameter_with_setpoints_complex.full_name: (
                    n_points,
                    pws_shape_1,
                ),
            }
        )

    for param in meas_parameters1:
        meas1.register_parameter(param)

    meas2 = Measurement()

    meas_parameters2 = (channel_array_instrument.A.dummy_parameter_with_setpoints_2d,)

    if set_shape:
        meas2.set_shapes(
            {meas_parameters2[0].full_name: (n_points, pws_shape_1, pws_shape_2)}
        )

    for param in meas_parameters2:
        meas2.register_parameter(param)

    with meas1.run(
        write_in_background=bg_writing, in_memory_cache=in_memory_cache
    ) as datasaver1:
        with meas2.run(
            write_in_background=bg_writing, in_memory_cache=in_memory_cache
        ) as datasaver2:

            dataset1 = datasaver1.dataset
            dataset2 = datasaver2.dataset
            _assert_parameter_data_is_identical(
                dataset1.get_parameter_data(), dataset1.cache.data()
            )
            _assert_parameter_data_is_identical(
                dataset2.get_parameter_data(), dataset2.cache.data()
            )
            for _ in range(n_points):

                meas_vals1 = [(param, param.get()) for param in meas_parameters1]

                datasaver1.add_result(*meas_vals1)
                datasaver1.flush_data_to_database(block=True)

                meas_vals2 = [(param, param.get()) for param in meas_parameters2]

                datasaver2.add_result(*meas_vals2)
                datasaver2.flush_data_to_database(block=True)

                _assert_parameter_data_is_identical(
                    dataset1.get_parameter_data(),
                    dataset1.cache.data(),
                    shaped_partial=set_shape,
                )
                _assert_parameter_data_is_identical(
                    dataset2.get_parameter_data(),
                    dataset2.cache.data(),
                    shaped_partial=set_shape,
                )
    _assert_parameter_data_is_identical(
        dataset1.get_parameter_data(), dataset1.cache.data()
    )
    if in_memory_cache is False:
        assert dataset1.cache._loaded_from_completed_ds is True
    assert dataset1.completed is True
    assert dataset1.cache.live is in_memory_cache
    _assert_parameter_data_is_identical(
        dataset2.get_parameter_data(), dataset2.cache.data()
    )
    if in_memory_cache is False:
        assert dataset2.cache._loaded_from_completed_ds is True
    assert dataset2.completed is True
    assert dataset1.cache.live is in_memory_cache
Example #21
0
def linear1d(param_set,
             start,
             stop,
             num_points,
             delay,
             *param_meas,
             win=None,
             append=False,
             plot_params=None,
             atstart=None,
             ateach=None,
             atend=None,
             setback=False,
             write_period=120):
    """
    Run a sweep of a single parameter, between start and stop, with a delay after settings
    the point given by delay.

    Args:
        param_set (Parameter): The parameter to be swept

        start (Union[int, float]): Starting point of the parameter

        stop (Union[int, float]): End point of the parameter

        num_points (int): Number of points to take between start and stop (inclusive)

        delay (Union[int, float]): The delay after setting the parameter

        *param_meas (Iterable[Parameter]): A list of the parameters to be measured at each of the
        set points. If any of the parameters given are ArrayParameters then a 2D sweep will be
        taken on that parameter, using the setpoints given in that ArrayParamter.
        Note: At the current time, there is an assumption that the setpoints do NOT change during
        a measurement, and that points are uniformly distributed for the purposes of plotting.
        If the points are not uniformly distributed, data is correctly saved, however the live
        plot will be distorted.

        win (Optional[PlotWindow]): The plot window to add plots to. If this value is None, the sweep
        will not be live plotted.

        append (bool): If this parameter is true, the trace will be appended to an existing window.

        plot_params (Optional[Iterable[Parameter]]): A list of measured parameters to live plot. If no
        value is given, then all parameters will be live-plotted

        atstart (Optional[Union[Callable,Iterable[Callable]]]): A function or list of functions
        to be run before the measurement is started. The functions will be run BEFORE the parameters
        are inserted into the measurement, hence if some parameters require setup before they are run,
        they can be inserted here.

        ateach (Optional[Union[Callable,Iterable[Callable]]]): A function or list of functions
        to be run after each time the sweep parameter is set. These functions will be run AFTER
        the delay, and so is suitable if an instrument requires a call to capture a trace before
        the parameter can be read.

        atend (Optional[Union[Callable,Iterable[Callable]]]): A function or list of functions
        to be run at the end of a trace. This is run AFTER the data is saved into the database,
        and after parameters are set back to their starting points (if setback is True), and
        can therefore be used to read the data that was taken and potentially do some post analysis.

        setback (Optional[bool]): If this is True, the setpoint parameter is returned to its starting
        value at the end of the sweep.

        write_period (Optional[int]): The time inbetween which data is written to the database.
        Irrespective of what this is set to, data will be saved when the week finishes, and will attempt
        to save in the case the sweep is interrupted.

    Returns:
        (id, win): ID is the trace id of the saved wave, win is a handle to the plot window that was created
        for the purposes of liveplotting.

    """
    _flush_buffers(*param_meas)

    # Register setpoints
    meas = Measurement()
    meas.register_parameter(param_set)
    param_set.post_delay = delay
    set_points = np.linspace(start, stop, num_points)

    # Keep track of data and plots
    if plot_params is None:
        plot_params = param_meas
    output = []
    plots = {}

    # Run @start functions
    _run_functions(atstart)

    # Register each of the sweep parameters and set up a plot window for them
    for p, parameter in enumerate(param_meas):
        meas.register_parameter(parameter, setpoints=(param_set, ))
        output.append([parameter, None])

        if win is not None and parameter in plot_params:
            # Create plot window
            if append:
                plotitem = win.items[0]
            else:
                plotitem = win.addPlot(name=parameter.full_name,
                                       title="%s (%s) v.<br>%s (%s)" %
                                       (param_set.full_name, param_set.label,
                                        parameter.full_name, parameter.label))

            # Figure out if we have 1d or 2d data
            shape = getattr(parameter, 'shape', None)
            if shape is not None and shape != tuple():
                set_points_y = parameter.setpoints[0]
                data = np.ndarray((num_points, parameter.shape[0]))
            else:
                set_points_y = None
                data = np.full(num_points, np.nan)

            # Add data into the plot window
            plotdata = plotitem.plot(setpoint_x=set_points,
                                     setpoint_y=set_points_y,
                                     pen=(255, 0, 0),
                                     name=parameter.name)
            plots[parameter] = LivePlotDataItem(plotitem, plotdata, data)

            # Update axes
            if set_points_y is not None:
                plotitem.update_axes(param_set,
                                     parameter,
                                     param_y_setpoint=True)
                plotdata.update_histogram_axis(parameter)
            else:
                plotitem.update_axes(param_set, parameter)

    # Run the sweep
    meas.write_period = write_period
    pbar = None
    try:
        with meas.run() as datasaver:
            if win is not None:
                # Update plot titles to include the ID
                win.run_id = datasaver.run_id
                win.win_title += "{} ".format(datasaver.run_id)
                for plotitem in plots.values():
                    plotitem.plot.plot_title += " (id: %d)" % datasaver.run_id

            # Then, run the actual sweep
            pbar = tqdm(total=num_points, unit="pt", position=0, leave=True)
            for i, set_point in enumerate(set_points):
                param_set.set(set_point)
                _run_functions(ateach,
                               param_vals=(Setpoint(param_set, i,
                                                    set_point), ))
                # Read out each parameter
                for p, parameter in enumerate(param_meas):
                    output[p][1] = parameter.get()
                    shape = getattr(parameter, 'shape', None)
                    if win is not None and parameter in plots:
                        if shape is not None and shape != tuple():
                            plots[parameter].data[i, :] = output[p][
                                1]  # Update 2D data
                            # For a 2D trace, figure out the value for data not yet set if this is the
                            # first column
                            if i == 0:
                                plots[parameter].data[1:] = (np.min(
                                    output[p][1]) + np.max(output[p][1])) / 2
                            # Update live plots
                            plots[parameter].plotdata.update(
                                plots[parameter].data)
                        else:
                            plots[parameter].data[i] = output[p][
                                1]  # Update 1D data
                            plots[parameter].plotdata.setData(
                                set_points[:i], plots[parameter].data[:i])

                # Save data
                datasaver.add_result((param_set, set_point), *output)
                pbar.update(1)
    finally:
        # Set back to start at the end of the measurement
        if setback:
            param_set.set(start)

        # Close the progress bar
        if pbar is not None:
            pbar.close()

        _run_functions(atend)  # Run functions at the end

    # Return the dataid
    return datasaver.run_id  # can use plot_by_id(dataid)
Example #22
0
    def _save_to_db(
        self,
        parameters: Sequence[Parameter],
        setpoints: Sequence[Sequence[float]],
        data: np.ndarray,
        nt_label: Sequence[str],
        quality: int = 1,
        write_period: int = 10,
    ) -> Union[None, int]:
        """ Save data to database. Returns run id. """

        nt.set_database(self.db_name, self.db_folder)

        if len(parameters) not in [1, 2]:
            logger.error("Only 1D and 2D sweeps supported right now.")
            return None

        meas = Measurement()

        if len(parameters) == 1:
            meas.register_parameter(parameters[0])
            meas.register_parameter(self.dummy_lockin.R,
                                    setpoints=(parameters[0], ))

            with meas.run() as datasaver:
                for x_indx, x_val in enumerate(setpoints[0]):
                    parameters[0](x_val)
                    datasaver.add_result((parameters[0], x_val),
                                         (self.dummy_lockin.R, data[x_indx]))

                dataid = datasaver.run_id

        if len(parameters) == 2:
            meas.register_parameter(parameters[0])
            meas.register_parameter(parameters[1])
            meas.register_parameter(self.dummy_lockin.R,
                                    setpoints=(parameters[0], parameters[1]))

            with meas.run() as datasaver:
                for x_indx, x_val in enumerate(setpoints[0]):
                    parameters[0](x_val)
                    for y_indx, y_val in enumerate(setpoints[1]):
                        parameters[1](y_val)
                        # qdot.voltage_nodes[2].v(x_val)
                        # qdot.voltage_nodes[4].v(y_val)
                        datasaver.add_result(
                            (parameters[0], x_val),
                            (parameters[1], y_val),
                            (self.dummy_lockin.R, data[x_indx, y_indx]),
                        )

                dataid = datasaver.run_id

        ds = load_by_id(dataid)

        meta_add_on = dict.fromkeys(nt.config["core"]["meta_fields"], Any)
        meta_add_on["device_name"] = self.name
        nm = dict.fromkeys(["dc_current", "rf"], (0, 1))
        meta_add_on["normalization_constants"] = nm

        ds.add_metadata(nt.meta_tag, json.dumps(meta_add_on))

        current_label = dict.fromkeys(LABELS, 0)
        for label in nt_label:
            if label is not None:  # and nt_label in LABELS:
                if label not in LABELS:
                    logger.error("CapacitanceModel: Invalid label.")
                    print(label)
                    raise ValueError
                current_label[label] = 1
                current_label["good"] = quality

        # print('data id {} current label: {} '.format(dataid, current_label ))
        for label, value in current_label.items():
            ds.add_metadata(label, value)

        return dataid
Example #23
0
def generate_DB_file_with_some_runs_having_not_run_descriptions():
    """
    Generate a .db-file with a handful of runs some of which lack run
    description or have it as empty object (based on a real case).

    Generated runs:
        #1: run with parameters and correct run description
        #2: run with parameters but run description is NULL
        #3: run with parameters but run description is empty RunDescriber
        #4: run without parameters but run description is NULL
    """
    v3fixturepath = os.path.join(fixturepath, 'version3')
    os.makedirs(v3fixturepath, exist_ok=True)
    path = os.path.join(v3fixturepath, 'some_runs_without_run_description.db')

    if os.path.exists(path):
        os.remove(path)

    from qcodes.dataset.measurements import Measurement
    from qcodes.dataset.experiment_container import Experiment
    from qcodes import Parameter
    from qcodes.dataset.descriptions import RunDescriber
    from qcodes.dataset.dependencies import InterDependencies

    exp = Experiment(path_to_db=path,
                     name='experiment_1',
                     sample_name='no_sample_1')
    conn = exp.conn

    # Now make some parameters to use in measurements
    params = []
    for n in range(5):
        params.append(
            Parameter(f'p{n}',
                      label=f'Parameter {n}',
                      unit=f'unit {n}',
                      set_cmd=None,
                      get_cmd=None))

    # Set up a measurement

    meas = Measurement(exp)
    meas.register_parameter(params[0])
    meas.register_parameter(params[1])
    meas.register_parameter(params[2], basis=(params[0], ))
    meas.register_parameter(params[3], basis=(params[1], ))
    meas.register_parameter(params[4], setpoints=(params[2], params[3]))

    # Initially make 3 correct runs

    run_ids = []

    for _ in range(3):

        with meas.run() as datasaver:

            for x in np.random.rand(10):
                for y in np.random.rand(10):
                    z = np.random.rand()
                    datasaver.add_result((params[2], x), (params[3], y),
                                         (params[4], z))

        run_ids.append(datasaver.run_id)

    assert [1, 2, 3] == run_ids, 'Run ids of generated runs are not as ' \
                                 'expected after generating runs #1-3'

    # Formulate SQL query for adjusting run_description column

    set_run_description_sql = f"""
               UPDATE runs
               SET run_description = ?
               WHERE run_id == ?
               """

    # Make run_description of run #2 NULL

    conn.execute(set_run_description_sql, (None, run_ids[1]))
    conn.commit()  # just to be sure

    # Make run_description of run #3 equivalent to an empty RunDescriber

    empty_run_description = RunDescriber(InterDependencies()).to_json()
    conn.execute(set_run_description_sql, (empty_run_description, run_ids[2]))
    conn.commit()  # just to be sure

    # Set up a measurement without parameters, and create run #4 out of it

    meas_no_params = Measurement(exp)

    with meas_no_params.run() as datasaver:
        pass

    run_ids.append(datasaver.run_id)

    assert [1, 2, 3, 4] == run_ids, 'Run ids of generated runs are not as ' \
                                    'expected after generating run #4'

    # Make run_description of run #4 NULL

    conn.execute(set_run_description_sql, (None, run_ids[3]))
    conn.commit()  # just to be sure
Example #24
0
def do1d(
    param_set: _BaseParameter,
    start: float,
    stop: float,
    num_points: int,
    delay: float,
    *param_meas: ParamMeasT,
    enter_actions: ActionsT = (),
    exit_actions: ActionsT = (),
    write_period: Optional[float] = None,
    do_plot: bool = True,
    additional_setpoints: Sequence[ParamMeasT] = tuple(),
) -> AxesTupleListWithDataSet:
    """
    Perform a 1D scan of ``param_set`` from ``start`` to ``stop`` in
    ``num_points`` measuring param_meas at each step. In case param_meas is
    an ArrayParameter this is effectively a 2d scan.

    Args:
        param_set: The QCoDeS parameter to sweep over
        start: Starting point of sweep
        stop: End point of sweep
        num_points: Number of points in sweep
        delay: Delay after setting parameter before measurement is performed
        *param_meas: Parameter(s) to measure at each step or functions that
          will be called at each step. The function should take no arguments.
          The parameters and functions are called in the order they are
          supplied.
        enter_actions: A list of functions taking no arguments that will be
            called before the measurements start
        exit_actions: A list of functions taking no arguments that will be
            called after the measurements ends
        write_period: The time after which the data is actually written to the
            database.
        additional_setpoints: A list of setpoint parameters to be registered in
            the measurement but not scanned.
        do_plot: should png and pdf versions of the images be saved after the
            run.

    Returns:
        The QCoDeS dataset.
    """
    meas = Measurement()

    all_setpoint_params = (param_set, ) + tuple(s
                                                for s in additional_setpoints)

    measured_parameters = tuple(param for param in param_meas
                                if isinstance(param, _BaseParameter))
    try:
        loop_shape = tuple(1 for _ in additional_setpoints) + (num_points, )
        shapes: Shapes = detect_shape_of_measurement(measured_parameters,
                                                     loop_shape)
    except TypeError:
        LOG.exception(f"Could not detect shape of {measured_parameters} "
                      f"falling back to unknown shape.")
        shapes = None

    _register_parameters(meas, all_setpoint_params)
    _register_parameters(meas,
                         param_meas,
                         setpoints=all_setpoint_params,
                         shapes=shapes)
    _set_write_period(meas, write_period)
    _register_actions(meas, enter_actions, exit_actions)
    param_set.post_delay = delay

    # do1D enforces a simple relationship between measured parameters
    # and set parameters. For anything more complicated this should be
    # reimplemented from scratch
    with _catch_keyboard_interrupts() as interrupted, meas.run() as datasaver:
        additional_setpoints_data = _process_params_meas(additional_setpoints)
        for set_point in np.linspace(start, stop, num_points):
            param_set.set(set_point)
            datasaver.add_result((param_set, set_point),
                                 *_process_params_meas(param_meas),
                                 *additional_setpoints_data)
        dataset = datasaver.dataset
    return _handle_plotting(dataset, do_plot, interrupted())
Example #25
0
def sweep_time(*param_meas, delay=10, until=None,
               win=None, append=False, plot_params=None, annotation=None,
               atstart=(), ateach=(), atend=()):
    """
    Run a time sweep, with a delay between each point. This sweep will run for `until` seconds,
    or indefinitely if until is None

    Args:
        *param_meas (Iterable[Parameter]): A list of the parameters to be measured at each of the
        set points. For now, these MUST be simple parameters. Arrays cannot be measured.

        win (Optional[PlotWindow]): The plot window to add plots to. If this value is None, the sweep
        will not be live plotted.

        append (bool): If this parameter is true, the trace will be appended to an existing window.

        plot_params (Optional[Iterable[Parameter]]): A list of parameters to plot. If not passed or None,
        all measured parameters will be automatically plotted.

        atstart (Optional[Union[Callable,Iterable[Callable]]]): A function or list of functions
        to be run before the measurement is started. The functions will be run BEFORE the parameters
        are inserted into the measurement, hence if some parameters require setup before they are run,
        they can be inserted here.

        ateach (Optional[Union[Callable,Iterable[Callable]]]): A function or list of functions
        to be run after each time the sweep parameter is set. These functions will be run AFTER
        the delay, and so is suitable if an instrument requires a call to capture a trace before
        the parameter can be read.

        atend (Optional[Union[Callable,Iterable[Callable]]]): A function or list of functions
        to be run at the end of a trace. This is run AFTER the data is saved into the database,
        and after parameters are set back to their starting points (if setback is True), and
        can therefore be used to read the data that was taken and potentially do some post analysis.

    Returns:
        (iw, win): ID is the trace id of the saved wave, win is a handle to the plot window that was created
        for the purposes of liveplotting.
    """
    _flush_buffers(*param_meas)

    # Register setpoints
    m = Measurement()
    m.register_custom_parameter("time", label="Time", unit="s")

    _run_functions(atstart)

    # Keep track of data and plots
    plt_data = {}
    time_data = np.full((1,), np.nan)
    array_size = 1
    curr_point = 0

    # If plot_params is not given, plot all measured parameters
    if plot_params is None:
        plot_params = param_meas

    # Set up parameters
    for param in param_meas:
        m.register_parameter(param, setpoints=("time", ))

        # Create plot window
        if win is not None and param in plot_params:
            plot = win.addPlot(name=param.full_name,
                            title=f"{param.full_name} ({param.label})")
            plot.left_axis.label = param.label
            plot.left_axis.unit = param.unit
            plot.bot_axis.label = "Time"
            plot.bot_axis.unit = "s"
            plotdata = plot.plot(setpoint_x=time_data, name=param.name, pen=(255,0,0))
            plt_data[param] = (plot, plotdata, np.full((1,), np.nan))

    if win is not None and annotation is not None:
        win.items[0].textbox(annotation)

    try:
        with m.run() as datasaver:
            start_time = time.monotonic()
            win.win_title += f"{datasaver.run_id}"
            for pd in plt_data.values():
                pd[0].plot_title += f" (id: {datasaver.run_id})"
            while True:
                # Update each parameter
                data = [("time", time.monotonic()-start_time)]
                time_data[curr_point] = data[-1][1]

                _run_functions(ateach, param_vals=(Setpoint("time", curr_point, data[-1][1])))

                if until is not None and time_data[curr_point] > until:
                    break

                for param in param_meas:
                    val = param()
                    if val is None:
                        val = np.nan
                    data.append((param, val))
                    if param in plot_params:
                        plt_data[param][2][curr_point] = data[-1][1]
                        plt_data[param][1].xData = time_data
                        plt_data[param][1].update(plt_data[param][2])

                curr_point += 1

                # Resize plot arrays if necessary
                if array_size == curr_point:
                    array_size *= 2
                    logger.debug("New plot array size: %d", array_size)
                    time_data.resize(array_size)
                    time_data[array_size//2:] = np.nan
                    for pld in plt_data.values():
                        pld[2].resize(array_size)
                        pld[2][array_size//2:] = np.nan

                datasaver.add_result(*data)

                # Wait until the next point time. Try to keep track of how long it
                # took for equipment to respond
                next_time = start_time + delay*curr_point
                while time.monotonic() < next_time:
                    sleep_time = max(0, min(0.01, time.monotonic() - next_time))
                    time.sleep(sleep_time)
    except KeyboardInterrupt:
        print(f"Trace cancelled with Ctrl-C")
        print(f"Ending plot at time {time.monotonic() - start_time}.")
    finally:
        _run_functions(atend)

    return datasaver.run_id
Example #26
0
def do2d(
    param_set1: _BaseParameter,
    start1: float,
    stop1: float,
    num_points1: int,
    delay1: float,
    param_set2: _BaseParameter,
    start2: float,
    stop2: float,
    num_points2: int,
    delay2: float,
    *param_meas: ParamMeasT,
    set_before_sweep: Optional[bool] = True,
    enter_actions: ActionsT = (),
    exit_actions: ActionsT = (),
    before_inner_actions: ActionsT = (),
    after_inner_actions: ActionsT = (),
    write_period: Optional[float] = None,
    flush_columns: bool = False,
    do_plot: bool = True,
    additional_setpoints: Sequence[ParamMeasT] = tuple(),
) -> AxesTupleListWithDataSet:
    """
    Perform a 1D scan of ``param_set1`` from ``start1`` to ``stop1`` in
    ``num_points1`` and ``param_set2`` from ``start2`` to ``stop2`` in
    ``num_points2`` measuring param_meas at each step.

    Args:
        param_set1: The QCoDeS parameter to sweep over in the outer loop
        start1: Starting point of sweep in outer loop
        stop1: End point of sweep in the outer loop
        num_points1: Number of points to measure in the outer loop
        delay1: Delay after setting parameter in the outer loop
        param_set2: The QCoDeS parameter to sweep over in the inner loop
        start2: Starting point of sweep in inner loop
        stop2: End point of sweep in the inner loop
        num_points2: Number of points to measure in the inner loop
        delay2: Delay after setting parameter before measurement is performed
        *param_meas: Parameter(s) to measure at each step or functions that
          will be called at each step. The function should take no arguments.
          The parameters and functions are called in the order they are
          supplied.
        set_before_sweep: if True the outer parameter is set to its first value
            before the inner parameter is swept to its next value.
        enter_actions: A list of functions taking no arguments that will be
            called before the measurements start
        exit_actions: A list of functions taking no arguments that will be
            called after the measurements ends
        before_inner_actions: Actions executed before each run of the inner loop
        after_inner_actions: Actions executed after each run of the inner loop
        write_period: The time after which the data is actually written to the
            database.
        flush_columns: The data is written after a column is finished
            independent of the passed time and write period.
        additional_setpoints: A list of setpoint parameters to be registered in
            the measurement but not scanned.
        do_plot: should png and pdf versions of the images be saved after the
            run.

    Returns:
        The QCoDeS dataset.
    """

    meas = Measurement()
    all_setpoint_params = (
        param_set1,
        param_set2,
    ) + tuple(s for s in additional_setpoints)

    measured_parameters = tuple(param for param in param_meas
                                if isinstance(param, _BaseParameter))

    try:
        loop_shape = tuple(
            1 for _ in additional_setpoints) + (num_points1, num_points2)
        shapes: Shapes = detect_shape_of_measurement(measured_parameters,
                                                     loop_shape)
    except TypeError:
        LOG.exception(f"Could not detect shape of {measured_parameters} "
                      f"falling back to unknown shape.")
        shapes = None

    _register_parameters(meas, all_setpoint_params)
    _register_parameters(meas,
                         param_meas,
                         setpoints=all_setpoint_params,
                         shapes=shapes)
    _set_write_period(meas, write_period)
    _register_actions(meas, enter_actions, exit_actions)

    param_set1.post_delay = delay1
    param_set2.post_delay = delay2

    with _catch_keyboard_interrupts() as interrupted, meas.run() as datasaver:
        additional_setpoints_data = _process_params_meas(additional_setpoints)
        for set_point1 in np.linspace(start1, stop1, num_points1):
            if set_before_sweep:
                param_set2.set(start2)

            param_set1.set(set_point1)
            for action in before_inner_actions:
                action()
            for set_point2 in np.linspace(start2, stop2, num_points2):
                # skip first inner set point if `set_before_sweep`
                if set_point2 == start2 and set_before_sweep:
                    pass
                else:
                    param_set2.set(set_point2)

                datasaver.add_result((param_set1, set_point1),
                                     (param_set2, set_point2),
                                     *_process_params_meas(param_meas),
                                     *additional_setpoints_data)
            for action in after_inner_actions:
                action()
            if flush_columns:
                datasaver.flush_data_to_database()
        dataset = datasaver.dataset
    return _handle_plotting(dataset, do_plot, interrupted())
Example #27
0
def Hall_gate(station, v_gates, V_polar, field_range_Y, stanford_gain_1, stanford_gain_2, stanford_gain_3):

    R_polar = 1e6

    print(f'I_polar = {V_polar/R_polar}')

    R_I = 1e4

    # dmm1 is voltage
    station.dmm1.NPLC(100)

    # dmm2 is current
    station.dmm2.NPLC(10)

    station.dmm3.NPLC(100)
    
    station.yoko.voltage.step = 1e-4
    station.yoko.voltage.inter_delay = 0.0001

    meas = Measurement()

    meas.register_parameter(station.yoko.voltage)
    meas.register_parameter(station.mag.y_target)
    meas.register_parameter(station.mag.y_measured)
    meas.register_parameter(station.mdac_8.ch01.voltage)
    meas.register_parameter(station.dmm1.volt, setpoints = (station.mag.y_target,station.mdac_8.ch01.voltage))
    meas.register_parameter(station.dmm2.volt, setpoints = (station.mag.y_target,station.mdac_8.ch01.voltage))
    meas.register_parameter(station.dmm3.volt, setpoints = (station.mag.y_target,station.mdac_8.ch01.voltage))
    meas.register_custom_parameter("R_h", unit = "Ohms", setpoints = (station.mag.y_target,station.mdac_8.ch01.voltage))
    meas.register_custom_parameter("R_xx", unit = "Ohms", setpoints = (station.mag.y_target,station.mdac_8.ch01.voltage))

    with meas.run() as datasaver:

        for b in field_range_Y:

            station.mag.y_target(b)
            station.mag.ramp('simul')

            while abs(station.mag.y_measured()-b)>0.001:
                time.sleep(2)
            time.sleep(5)

            l_y = station.mag.y_measured()

            print(l_y)

            for v_g in v_gates:

                station.mdac_8.ch01.ramp(v_g, 0.01)
                while abs(station.mdac_8.ch01.voltage()-v_g)>0.001:
                    time.sleep(2)
                time.sleep(2)
                print(f'V_g = {v_g} V')

           
                station.yoko.voltage(-V_polar)

                time.sleep(1)

                volt_h_m = station.dmm1.volt()/stanford_gain_1
                curr_m = station.dmm2.volt()/(R_I*stanford_gain_2)
                volt_m = station.dmm3.volt()/stanford_gain_3
                
                time.sleep(1)
                
                station.yoko.voltage(V_polar)
                
                time.sleep(1)
                
                volt_h_p = station.dmm1.volt()/stanford_gain_1
                curr_p = station.dmm2.volt()/(R_I*stanford_gain_2)
                volt_p = station.dmm3.volt()/stanford_gain_3
                
                time.sleep(1)
                
                V_av = (volt_p - volt_m)/2
                I_av = (curr_p - curr_m)/2
                V_h_av = (volt_h_p - volt_h_m)/2
                
                R_av = V_av/I_av
                R_h_av = V_h_av/I_av

                print(R_av)
                
                datasaver.add_result((station.mdac_8.ch01.voltage, v_g),
                                     (station.yoko.voltage, V_polar),
                                     (station.dmm2.volt, curr_p),
                                     (station.dmm1.volt, V_h_av),
                                     (station.dmm3.volt, V_av),
                                     (station.mag.y_measured, l_y),
                                     (station.mag.y_target, b),
                                     ("R_h", R_h_av),
                                     ("R_xx",R_av))
                station.yoko.voltage(0)
                ID_exp = datasaver.run_id
                
        plot_by_id(ID_exp)
Example #28
0
def test_cache_1d(experiment, DAC, DMM, n_points, bg_writing,
                  channel_array_instrument, setpoints_type,
                  set_shape, in_memory_cache):

    setpoints_param, setpoints_values = _prepare_setpoints_1d(
        DAC, channel_array_instrument,
        n_points, setpoints_type
    )

    meas1 = Measurement()

    meas1.register_parameter(setpoints_param)

    meas_parameters1 = (
        DMM.v1,
        channel_array_instrument.A.dummy_multi_parameter,
        channel_array_instrument.A.dummy_scalar_multi_parameter,
        channel_array_instrument.A.dummy_2d_multi_parameter,
        channel_array_instrument.A.dummy_2d_multi_parameter_2,
        channel_array_instrument.A.dummy_array_parameter,
        channel_array_instrument.A.dummy_complex_array_parameter,
        channel_array_instrument.A.dummy_complex,
        channel_array_instrument.A.dummy_parameter_with_setpoints,
        channel_array_instrument.A.dummy_parameter_with_setpoints_complex,
    )
    pws_shape_1 = 10
    pws_shape_2 = 3
    channel_array_instrument.A.dummy_start(0)
    channel_array_instrument.A.dummy_stop(10)
    channel_array_instrument.A.dummy_n_points(pws_shape_1)
    channel_array_instrument.A.dummy_start_2(2)
    channel_array_instrument.A.dummy_stop_2(7)
    channel_array_instrument.A.dummy_n_points_2(pws_shape_2)

    if set_shape:
        meas1.set_shapes(
            {
                DMM.v1.full_name: (n_points,),
                channel_array_instrument.A.dummy_multi_parameter.full_names[0]: (n_points, 5),
                channel_array_instrument.A.dummy_multi_parameter.full_names[1]: (n_points, 5),
                channel_array_instrument.A.dummy_scalar_multi_parameter.full_names[0]: (n_points,),
                channel_array_instrument.A.dummy_scalar_multi_parameter.full_names[1]: (n_points,),
                channel_array_instrument.A.dummy_scalar_multi_parameter.full_names[0]: (n_points,),
                channel_array_instrument.A.dummy_scalar_multi_parameter.full_names[1]: (n_points,),
                channel_array_instrument.A.dummy_2d_multi_parameter.full_names[0]: (n_points, 5, 3),
                channel_array_instrument.A.dummy_2d_multi_parameter.full_names[1]: (n_points, 5, 3),
                channel_array_instrument.A.dummy_2d_multi_parameter_2.full_names[0]: (n_points, 5, 3),
                channel_array_instrument.A.dummy_2d_multi_parameter_2.full_names[1]: (n_points, 2, 7),
                channel_array_instrument.A.dummy_array_parameter.full_name: (n_points, 5),
                channel_array_instrument.A.dummy_complex_array_parameter.full_name: (n_points, 5),
                channel_array_instrument.A.dummy_complex.full_name: (n_points,),
                channel_array_instrument.A.dummy_parameter_with_setpoints.full_name: (n_points, pws_shape_1),
                channel_array_instrument.A.dummy_parameter_with_setpoints_complex.full_name: (n_points, pws_shape_1)
             }
        )

    for param in meas_parameters1:
        meas1.register_parameter(param, setpoints=(setpoints_param,))

    meas2 = Measurement()

    meas2.register_parameter(setpoints_param)

    meas_parameters2 = (channel_array_instrument.A.dummy_parameter_with_setpoints_2d,)

    if set_shape:
        meas2.set_shapes(
            {meas_parameters2[0].full_name: (n_points, pws_shape_1, pws_shape_2)})

    for param in meas_parameters2:
        meas2.register_parameter(param, setpoints=(setpoints_param,))

    with meas1.run(
            write_in_background=bg_writing,
            in_memory_cache=in_memory_cache
    ) as datasaver1:
        with meas2.run(
                write_in_background=bg_writing,
                in_memory_cache=in_memory_cache
        ) as datasaver2:

            dataset1 = datasaver1.dataset
            dataset2 = datasaver2.dataset
            _assert_parameter_data_is_identical(dataset1.get_parameter_data(), dataset1.cache.data())
            _assert_parameter_data_is_identical(dataset2.get_parameter_data(), dataset2.cache.data())
            for i, v in enumerate(setpoints_values):
                setpoints_param.set(v)

                meas_vals1 = [(param, param.get()) for param in meas_parameters1]#[:-2]]
                meas_vals1 += expand_setpoints_helper(meas_parameters1[-2])
                meas_vals1 += expand_setpoints_helper(meas_parameters1[-1])

                datasaver1.add_result((setpoints_param, v),
                                      *meas_vals1)
                datasaver1.flush_data_to_database(block=True)

                meas_vals2 = [(param, param.get()) for param in meas_parameters2]

                datasaver2.add_result((setpoints_param, v),
                                      *meas_vals2)
                datasaver2.flush_data_to_database(block=True)

                _assert_parameter_data_is_identical(dataset1.get_parameter_data(),
                                                    dataset1.cache.data(),
                                                    shaped_partial=set_shape)
                _assert_parameter_data_is_identical(dataset2.get_parameter_data(),
                                                    dataset2.cache.data(),
                                                    shaped_partial=set_shape)
    _assert_parameter_data_is_identical(dataset1.get_parameter_data(),
                                        dataset1.cache.data())
    if in_memory_cache is False:
        assert dataset1.cache._loaded_from_completed_ds is True
    assert dataset1.completed is True
    assert dataset1.cache.live is in_memory_cache
    _assert_parameter_data_is_identical(dataset2.get_parameter_data(),
                                        dataset2.cache.data())
    if in_memory_cache is False:
        assert dataset2.cache._loaded_from_completed_ds is True
    assert dataset2.completed is True
    assert dataset1.cache.live is in_memory_cache
Example #29
0
def do2d(
    param_set1: _BaseParameter,
    start1: float,
    stop1: float,
    num_points1: int,
    delay1: float,
    param_set2: _BaseParameter,
    start2: float,
    stop2: float,
    num_points2: int,
    delay2: float,
    *param_meas: ParamMeasT,
    set_before_sweep: Optional[bool] = True,
    enter_actions: ActionsT = (),
    exit_actions: ActionsT = (),
    before_inner_actions: ActionsT = (),
    after_inner_actions: ActionsT = (),
    write_period: Optional[float] = None,
    measurement_name: str = "",
    exp: Optional[Experiment] = None,
    flush_columns: bool = False,
    do_plot: Optional[bool] = None,
    use_threads: Optional[bool] = None,
    additional_setpoints: Sequence[ParamMeasT] = tuple(),
    show_progress: Optional[None] = None,
) -> AxesTupleListWithDataSet:
    """
    Perform a 1D scan of ``param_set1`` from ``start1`` to ``stop1`` in
    ``num_points1`` and ``param_set2`` from ``start2`` to ``stop2`` in
    ``num_points2`` measuring param_meas at each step.

    Args:
        param_set1: The QCoDeS parameter to sweep over in the outer loop
        start1: Starting point of sweep in outer loop
        stop1: End point of sweep in the outer loop
        num_points1: Number of points to measure in the outer loop
        delay1: Delay after setting parameter in the outer loop
        param_set2: The QCoDeS parameter to sweep over in the inner loop
        start2: Starting point of sweep in inner loop
        stop2: End point of sweep in the inner loop
        num_points2: Number of points to measure in the inner loop
        delay2: Delay after setting parameter before measurement is performed
        *param_meas: Parameter(s) to measure at each step or functions that
          will be called at each step. The function should take no arguments.
          The parameters and functions are called in the order they are
          supplied.
        set_before_sweep: if True the outer parameter is set to its first value
            before the inner parameter is swept to its next value.
        enter_actions: A list of functions taking no arguments that will be
            called before the measurements start
        exit_actions: A list of functions taking no arguments that will be
            called after the measurements ends
        before_inner_actions: Actions executed before each run of the inner loop
        after_inner_actions: Actions executed after each run of the inner loop
        write_period: The time after which the data is actually written to the
            database.
        measurement_name: Name of the measurement. This will be passed down to
            the dataset produced by the measurement. If not given, a default
            value of 'results' is used for the dataset.
        exp: The experiment to use for this measurement.
        flush_columns: The data is written after a column is finished
            independent of the passed time and write period.
        additional_setpoints: A list of setpoint parameters to be registered in
            the measurement but not scanned.
        do_plot: should png and pdf versions of the images be saved after the
            run. If None the setting will be read from ``qcodesrc.json``
        use_threads: If True measurements from each instrument will be done on
            separate threads. If you are measuring from several instruments
            this may give a significant speedup.
        show_progress: should a progress bar be displayed during the
            measurement. If None the setting will be read from ``qcodesrc.json`

    Returns:
        The QCoDeS dataset.
    """

    if do_plot is None:
        do_plot = config.dataset.dond_plot
    if show_progress is None:
        show_progress = config.dataset.dond_show_progress

    meas = Measurement(name=measurement_name, exp=exp)
    all_setpoint_params = (
        param_set1,
        param_set2,
    ) + tuple(s for s in additional_setpoints)

    measured_parameters = tuple(param for param in param_meas
                                if isinstance(param, _BaseParameter))

    try:
        loop_shape = tuple(
            1 for _ in additional_setpoints) + (num_points1, num_points2)
        shapes: Shapes = detect_shape_of_measurement(measured_parameters,
                                                     loop_shape)
    except TypeError:
        LOG.exception(f"Could not detect shape of {measured_parameters} "
                      f"falling back to unknown shape.")
        shapes = None

    _register_parameters(meas, all_setpoint_params)
    _register_parameters(meas,
                         param_meas,
                         setpoints=all_setpoint_params,
                         shapes=shapes)
    _set_write_period(meas, write_period)
    _register_actions(meas, enter_actions, exit_actions)

    original_delay1 = param_set1.post_delay
    original_delay2 = param_set2.post_delay

    param_set1.post_delay = delay1
    param_set2.post_delay = delay2

    with _catch_keyboard_interrupts() as interrupted, meas.run() as datasaver:
        dataset = datasaver.dataset
        additional_setpoints_data = process_params_meas(additional_setpoints)
        setpoints1 = np.linspace(start1, stop1, num_points1)
        for set_point1 in tqdm(setpoints1, disable=not show_progress):
            if set_before_sweep:
                param_set2.set(start2)

            param_set1.set(set_point1)

            for action in before_inner_actions:
                action()

            setpoints2 = np.linspace(start2, stop2, num_points2)

            # flush to prevent unflushed print's to visually interrupt tqdm bar
            # updates
            sys.stdout.flush()
            sys.stderr.flush()
            for set_point2 in tqdm(setpoints2,
                                   disable=not show_progress,
                                   leave=False):
                # skip first inner set point if `set_before_sweep`
                if set_point2 == start2 and set_before_sweep:
                    pass
                else:
                    param_set2.set(set_point2)

                datasaver.add_result(
                    (param_set1, set_point1), (param_set2, set_point2),
                    *process_params_meas(param_meas, use_threads=use_threads),
                    *additional_setpoints_data)

            for action in after_inner_actions:
                action()
            if flush_columns:
                datasaver.flush_data_to_database()

    param_set1.post_delay = original_delay1
    param_set2.post_delay = original_delay2

    return _handle_plotting(dataset, do_plot, interrupted())
Example #30
0
# Criar um database
initialise_or_create_database_at("~/teste.db")

exp = load_or_create_experiment(experiment_name='osc realtime intro 2',
                                sample_name="osc realtime 1")


def calculateGain():
    Y = osc.ch3.wavesample()
    n_mean = int(len(Y) / 2)
    value = 2 * np.mean(Y[n_mean:])
    value = 10 * np.log(np.power(value, 2))
    return value


gain = Parameter('gain', label='gain', unit='dB', get_cmd=calculateGain)

# Medida de fato
meas = Measurement(exp=exp, station=station)

meas.register_parameter(PSG1.freq)
meas.register_parameter(gain, setpoints=[PSG1.freq])

with meas.run(write_in_background=True) as datasaver:
    for aFreq in np.linspace(1, 500, 500):
        time.sleep(1)
        PSG1.freq(aFreq)
        PSG2.freq(aFreq)
        datasaver.add_result((gain, gain()), (PSG1.freq, PSG1.freq()))