Ejemplo n.º 1
0
def test_cache_2d(experiment, DAC, DMM, n_points_outer, n_points_inner,
                  bg_writing, channel_array_instrument, in_memory_cache):
    meas = Measurement()

    meas.register_parameter(DAC.ch1)
    meas.register_parameter(DAC.ch2)

    meas_parameters = (
        DMM.v1,
        channel_array_instrument.A.dummy_multi_parameter,
        channel_array_instrument.A.dummy_scalar_multi_parameter,
        channel_array_instrument.A.dummy_2d_multi_parameter,
        channel_array_instrument.A.dummy_2d_multi_parameter_2,
        channel_array_instrument.A.dummy_array_parameter,
        channel_array_instrument.A.dummy_complex_array_parameter,
        channel_array_instrument.A.dummy_complex,
        channel_array_instrument.A.dummy_parameter_with_setpoints,
        channel_array_instrument.A.dummy_parameter_with_setpoints_complex,
    )
    channel_array_instrument.A.dummy_start(0)
    channel_array_instrument.A.dummy_stop(10)
    channel_array_instrument.A.dummy_n_points(10)
    for param in meas_parameters:
        meas.register_parameter(param, setpoints=(DAC.ch1, DAC.ch2))
    n_rows_written = 0
    with meas.run(write_in_background=bg_writing,
                  in_memory_cache=in_memory_cache) as datasaver:
        dataset = datasaver.dataset
        _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                            dataset.cache.data())
        for v1 in np.linspace(-1, 1, n_points_outer):
            for v2 in np.linspace(-1, 1, n_points_inner):
                DAC.ch1.set(v1)
                DAC.ch2.set(v2)
                meas_vals = [(param, param.get()) for param in meas_parameters]

                datasaver.add_result((DAC.ch1, v1), (DAC.ch2, v2), *meas_vals)
                datasaver.flush_data_to_database(block=True)
                n_rows_written += 1
                data = dataset.cache.data()
                _assert_parameter_data_is_identical(
                    dataset.get_parameter_data(), data)
    _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                        dataset.cache.data())
Ejemplo n.º 2
0
def linear_trace(independents,
                 dependents,
                 sweep_param,
                 values,
                 delay=None,
                 exp=None,
                 station=None,
                 fn_before=None,
                 fn_after=None):
    """
    Sweep a single variable over a linear range. Allows other params
    to be defined as dependent on this param and measured / saved.
    """

    meas = Measurement(exp=exp, station=station)
    for p in independents:
        meas.register_parameter(p, )
    for p in dependents:
        meas.register_parameter(p, setpoints=(sweep_param, ))

    if callable(fn_before):
        meas.add_before_run(fn_before, ())
    if callable(fn_after):
        meas.add_after_run(fn_after, ())

    save_list = []
    for p in (independents + dependents):
        save_list.append([p, None])

    with meas.run() as datasaver:

        for point in values:
            sweep_param.set(point)
            if delay is not None:
                time.sleep(delay)

            for i, p in enumerate(save_list):
                save_list[i][1] = p[0].get()

            datasaver.add_result(*save_list)

        runid = datasaver.run_id
    return runid
Ejemplo n.º 3
0
def test_column_mismatch(two_empty_temp_db_connections, some_interdeps, inst):
    """
    Test insertion of runs with no metadata and no snapshot into a DB already
    containing a run that has both
    """

    source_conn, target_conn = two_empty_temp_db_connections
    source_path = path_to_dbfile(source_conn)
    target_path = path_to_dbfile(target_conn)

    target_exp = Experiment(conn=target_conn)

    # Set up measurement scenario
    station = Station(inst)

    meas = Measurement(exp=target_exp, station=station)
    meas.register_parameter(inst.back)
    meas.register_parameter(inst.plunger)
    meas.register_parameter(inst.cutter, setpoints=(inst.back, inst.plunger))

    with meas.run() as datasaver:
        for back_v in [1, 2, 3]:
            for plung_v in [-3, -2.5, 0]:
                datasaver.add_result((inst.back, back_v),
                                     (inst.plunger, plung_v),
                                     (inst.cutter, back_v+plung_v))
    datasaver.dataset.add_metadata('meta_tag', 'meta_value')

    Experiment(conn=source_conn)
    source_ds = DataSet(conn=source_conn)
    source_ds.set_interdependencies(some_interdeps[1])

    source_ds.mark_started()
    source_ds.add_results([{name: 2.1
                            for name in some_interdeps[1].names}])
    source_ds.mark_completed()

    extract_runs_into_db(source_path, target_path, 1)

    # compare
    target_copied_ds = DataSet(conn=target_conn, run_id=2)

    assert target_copied_ds.the_same_dataset_as(source_ds)
Ejemplo n.º 4
0
    def generate_local_run(dbpath: Path) -> str:
        with initialised_database_at(str(dbpath)):
            new_experiment(sample_name="fivehundredtest_sample",
                           name="fivehundredtest_name")

            p1 = Parameter('Voltage', set_cmd=None)
            p2 = Parameter('Current', get_cmd=np.random.randn)

            meas = Measurement()
            meas.register_parameter(p1).register_parameter(p2, setpoints=[p1])

            with meas.run() as datasaver:
                for v in np.linspace(0, 2, 250):
                    p1(v)
                    datasaver.add_result((p1, cast(float, p1())),
                                         (p2, cast(float, p2())))
            guid = datasaver.dataset.guid
            datasaver.flush_data_to_database(block=True)
        return guid
Ejemplo n.º 5
0
def test_cache_complex_array_param_in_1d(experiment, DAC,
                                         channel_array_instrument, n_points,
                                         bg_writing, storage_type,
                                         outer_param_type, in_memory_cache):
    param = channel_array_instrument.A.dummy_complex_array_parameter
    meas = Measurement()
    if outer_param_type == 'numeric':
        outer_param = DAC.ch1
        outer_setpoints = np.linspace(-1, 1, n_points)
        outer_storage_type = storage_type
    else:
        outer_param = channel_array_instrument.A.dummy_text
        outer_setpoints = ['A', 'B', 'C', 'D']
        outer_storage_type = 'text'
    meas.register_parameter(outer_param, paramtype=outer_storage_type)
    meas.register_parameter(param,
                            setpoints=(outer_param, ),
                            paramtype=storage_type)
    array_used = _array_param_used_in_tree(meas)
    with meas.run(write_in_background=bg_writing,
                  in_memory_cache=in_memory_cache) as datasaver:
        dataset = datasaver.dataset
        for i, v1 in enumerate(outer_setpoints):
            datasaver.add_result((outer_param, v1), (param, param.get()))
            datasaver.flush_data_to_database(block=True)
            data = dataset.cache.data()
            n_rows_written = i + 1

            if array_used:
                expected_shape = (n_rows_written, ) + param.shape
            else:
                expected_shape = n_rows_written * np.prod(param.shape)
            assert data[param.full_name][
                param.full_name].shape == expected_shape
            assert data[param.full_name][
                outer_param.full_name].shape == expected_shape
            for setpoint_name in param.setpoint_full_names:
                assert data[
                    param.full_name][setpoint_name].shape == expected_shape
            _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                                data)
    _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                        dataset.cache.data())
Ejemplo n.º 6
0
def varlen_array_in_scalar_dataset(experiment):
    meas = Measurement()
    scalar_param = Parameter('scalarparam', set_cmd=None)
    param = ArraySetPointParam()
    meas.register_parameter(scalar_param)
    meas.register_parameter(param,
                            setpoints=(scalar_param, ),
                            paramtype='array')
    np.random.seed(0)
    with meas.run() as datasaver:
        for i in range(1, 10):
            scalar_param.set(i)
            param.setpoints = (np.arange(i), )
            datasaver.add_result((scalar_param, scalar_param.get()),
                                 (param, np.random.rand(i)))
    try:
        yield datasaver.dataset
    finally:
        datasaver.dataset.conn.close()
Ejemplo n.º 7
0
    def setup(self, bench_param):
        # Init DB
        self.tmpdir = tempfile.mkdtemp()
        qcodes.config["core"]["db_location"] = os.path.join(
            self.tmpdir, 'temp.db')
        qcodes.config["core"]["db_debug"] = False
        initialise_database()

        # Create experiment
        self.experiment = new_experiment("test-experiment",
                                         sample_name="test-sample")

        # Create measurement
        meas = Measurement(self.experiment)

        x1 = ManualParameter('x1')
        x2 = ManualParameter('x2')
        x3 = ManualParameter('x3')
        y1 = ManualParameter('y1')
        y2 = ManualParameter('y2')

        meas.register_parameter(x1, paramtype=bench_param['paramtype'])
        meas.register_parameter(x2, paramtype=bench_param['paramtype'])
        meas.register_parameter(x3, paramtype=bench_param['paramtype'])
        meas.register_parameter(y1,
                                setpoints=[x1, x2, x3],
                                paramtype=bench_param['paramtype'])
        meas.register_parameter(y2,
                                setpoints=[x1, x2, x3],
                                paramtype=bench_param['paramtype'])

        self.parameters = [x1, x2, x3, y1, y2]

        # Create the Runner context manager
        self.runner = meas.run()

        # Enter Runner and create DataSaver
        self.datasaver = self.runner.__enter__()

        # Create values for parameters
        for _ in range(len(self.parameters)):
            self.values.append(np.random.rand(bench_param['n_values']))
def test_cache_1d_every_other_point(experiment, DAC, DMM, n_points, bg_writing,
                                    channel_array_instrument, setpoints_type):

    setpoints_param, setpoints_values = _prepare_setpoints_1d(DAC, channel_array_instrument,
                                                                                   n_points, setpoints_type)

    meas = Measurement()

    meas.register_parameter(setpoints_param)

    meas_parameters = (DMM.v1,
                       channel_array_instrument.A.temperature,
                       channel_array_instrument.B.temperature
                       )
    for param in meas_parameters:
        meas.register_parameter(param, setpoints=(setpoints_param,))

    with meas.run(write_in_background=bg_writing) as datasaver:
        dataset = datasaver.dataset
        _assert_parameter_data_is_identical(dataset.get_parameter_data(), dataset.cache.data())
        for i, v in enumerate(setpoints_values):
            setpoints_param.set(v)

            meas_vals = [(param, param.get()) for param in meas_parameters]

            if i % 2 == 0:
                datasaver.add_result((setpoints_param, v),
                                     *meas_vals)
            else:
                datasaver.add_result((setpoints_param, v),
                                     *meas_vals[0:2])
            datasaver.flush_data_to_database(block=True)
            data = dataset.cache.data()
            assert len(data['dummy_channel_inst_ChanA_temperature']['dummy_channel_inst_ChanA_temperature']) == i + 1
            assert len(data['dummy_channel_inst_ChanB_temperature']['dummy_channel_inst_ChanB_temperature']) == i//2 + 1
            _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                                data)
    _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                        dataset.cache.data())
    assert dataset.cache._loaded_from_completed_ds is True
    _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                        dataset.cache.data())
Ejemplo n.º 9
0
    def take_buffer_keysight(self):
        """ takes a frequency sweep, not setting any values on the hardware """

        from qcodes import Station

        station = Station()
        station.add_component(self.vna)

        # import pdb; pdb.set_trace()
        meas = Measurement(station=station)  # qcodes measurement

        # self.vna.points.set(20)
        self.vna.auto_sweep(False)

        meas.register_parameter(self.vna.real)
        meas.register_parameter(self.vna.imaginary)

        meas.register_parameter(self.vna.magnitude)
        meas.register_parameter(self.vna.phase)

        # actually get the data
        with meas.run(
        ) as datasaver:  # try to run the measurement (? but this doesn't yet write to the database)
            self.vna.active_trace.set(1)  # there are Tr1 and Tr2
            # self.vna.traces.tr1.run_sweep()

            imag = self.vna.imaginary()
            real = self.vna.real()

            mag = self.vna.magnitude()
            phase = self.vna.phase()

            datasaver.add_result(
                (self.vna.magnitude, mag), (self.vna.phase, phase),
                (self.vna.real, real), (self.vna.imaginary, imag))

            dataid = datasaver.run_id

        pd = datasaver.dataset.get_parameter_data()
        snapshot = datasaver.dataset.snapshot

        plot_by_id(dataid)
Ejemplo n.º 10
0
def test_datasaver_1d(experiment, DAC, DMM, caplog, n_points):
    meas = Measurement()
    meas.register_parameter(DAC.ch1)
    meas.register_parameter(DMM.v1, setpoints=(DAC.ch1, ))

    n_points_expected = 5

    meas.set_shapes({DMM.v1.full_name: (n_points_expected, )})

    with meas.run() as datasaver:

        for set_v in np.linspace(0, 1, n_points):
            DAC.ch1()
            datasaver.add_result((DAC.ch1, set_v), (DMM.v1, DMM.v1()))

    ds = datasaver.dataset
    caplog.clear()
    data = ds.get_parameter_data()

    for dataarray in data[DMM.v1.full_name].values():
        assert dataarray.shape == (n_points, )

    if n_points == n_points_expected:
        assert len(caplog.record_tuples) == 0
    elif n_points > n_points_expected:
        assert len(caplog.record_tuples) == 2
        exp_module = "qcodes.dataset.sqlite.queries"
        exp_level = logging.WARNING
        exp_msg = ("Tried to set data shape for {} in "
                   "dataset {} "
                   "from metadata when loading "
                   "but found inconsistent lengths {} and {}")
        assert caplog.record_tuples[0] == (exp_module, exp_level,
                                           exp_msg.format(
                                               DMM.v1.full_name,
                                               DMM.v1.full_name, n_points,
                                               n_points_expected))
        assert caplog.record_tuples[1] == (exp_module, exp_level,
                                           exp_msg.format(
                                               DAC.ch1.full_name,
                                               DMM.v1.full_name, n_points,
                                               n_points_expected))
Ejemplo n.º 11
0
def test_string(experiment):
    """
    Test that we can save text into database via Measurement API
    """
    p = qc.Parameter('p',
                     label='String parameter',
                     unit='',
                     get_cmd=None,
                     set_cmd=None,
                     initial_value='some text')

    meas = Measurement(experiment)
    meas.register_parameter(p, paramtype='text')

    with meas.run() as datasaver:
        datasaver.add_result((p, "some text"))

    test_set = load_by_id(datasaver.run_id)

    assert test_set.get_parameter_data()["p"]["p"] == np.array(["some text"])
Ejemplo n.º 12
0
def do0d(*param_meas: Union[_BaseParameter, Callable[[], None]],
         do_plot: bool = True) -> AxesTupleListWithRunId:
    """
    Perform a measurement of a single parameter. This is probably most
    useful for an ArrayParamter that already returns an array of data points

    Args:
        *param_meas: Parameter(s) to measure at each step or functions that
          will be called at each step. The function should take no arguments.
          The parameters and functions are called in the order they are
          supplied.
        do_plot: should png and pdf versions of the images be saved after the
            run.

    Returns:
        The run_id of the DataSet created
    """
    meas = Measurement()
    output = []

    for parameter in param_meas:
        meas.register_parameter(parameter)
        output.append([parameter, None])

    with meas.run() as datasaver:

        for i, parameter in enumerate(param_meas):
            if isinstance(parameter, _BaseParameter):
                output[i][1] = parameter.get()
            elif callable(parameter):
                parameter()
        datasaver.add_result(*output)
    dataid = datasaver.run_id

    if do_plot is True:
        ax, cbs = _save_image(datasaver)
    else:
        ax = None,
        cbs = None

    return dataid, ax, cbs
Ejemplo n.º 13
0
def time_sweep(dependents,
               event_list,
               fn_before=None,
               fn_after=None,
               exp=None,
               station=None):
    """
    Takes a list of Events which can run some event and then measure
    for a period of time. Eg
    Set vtun to 3v for 5 secs, then bump to 10v for 2 secs, then
    back to 3v for another 5 secs.
    """
    meas = Measurement(exp=exp, station=station)
    meas.register_custom_parameter("time", label="Time", unit="S")

    if callable(fn_before):
        meas.add_before_run(fn_before, ())
    if callable(fn_after):
        meas.add_after_run(fn_after, ())

    for p in dependents:
        meas.register_parameter(p, setpoints=("time", ))

    with meas.run() as datasaver:
        start = time.time()
        for event in event_list:
            begin = time.time()
            if callable(event.fn_before):
                event.fn_before()
            time.sleep(0.5)
            while (time.time() - begin < event.time):
                save_list = []
                save_list.append(["time", time.time() - start])
                for p in dependents:
                    save_list.append([p, p.get()])

                datasaver.add_result(*save_list)

                time.sleep(event.step)
        runid = datasaver.run_id
    return runid
Ejemplo n.º 14
0
    def generate_local_exp(dbpath: Path) -> List[str]:
        with initialised_database_at(str(dbpath)):
            guids = []
            exp = load_or_create_experiment(experiment_name="test_guid")

            p1 = Parameter('Voltage', set_cmd=None)
            p2 = Parameter('Current', get_cmd=np.random.randn)

            meas = Measurement(exp=exp)
            meas.register_parameter(p1).register_parameter(p2, setpoints=[p1])

            # Meaure for 2 times to get 2 run ids and 2 guids
            for run in range(2):
                with meas.run() as datasaver:
                    for v in np.linspace(0 * run, 2 * run, 50):
                        p1(v)
                        datasaver.add_result((p1, cast(float, p1())),
                                             (p2, cast(float, p2())))
                guid = datasaver.dataset.guid
                guids.append(guid)
        return guids
Ejemplo n.º 15
0
def test_snapshot_creation_for_types_not_supported_by_builtin_json(experiment):
    """
    Test that `Measurement`/`Runner`/`DataSaver` infrastructure
    successfully dumps station snapshots in JSON format in cases when the
    snapshot contains data of types that are not supported by python builtin
    `json` module, for example, numpy scalars.
    """
    p1 = ManualParameter('p_np_int32', initial_value=numpy.int32(5))
    p2 = ManualParameter('p_np_float16', initial_value=numpy.float16(5.0))
    p3 = ManualParameter('p_np_array',
                         initial_value=numpy.meshgrid((1, 2), (3, 4)))
    p4 = ManualParameter('p_np_bool', initial_value=numpy.bool_(False))

    station = Station(p1, p2, p3, p4)

    measurement = Measurement(experiment, station)

    # we need at least 1 parameter to be able to run the measurement
    measurement.register_custom_parameter('dummy')

    with measurement.run() as data_saver:
        # we do this in order to create a snapshot of the station and add it
        # to the database
        pass

    snapshot = data_saver.dataset.snapshot

    assert 5 == snapshot['station']['parameters']['p_np_int32']['value']
    assert 5 == snapshot['station']['parameters']['p_np_int32']['raw_value']

    assert 5.0 == snapshot['station']['parameters']['p_np_float16']['value']
    assert 5.0 == snapshot['station']['parameters']['p_np_float16'][
        'raw_value']

    lst = [[[1, 2], [1, 2]], [[3, 3], [4, 4]]]
    assert lst == snapshot['station']['parameters']['p_np_array']['value']
    assert lst == snapshot['station']['parameters']['p_np_array']['raw_value']

    assert False is snapshot['station']['parameters']['p_np_bool']['value']
    assert False is snapshot['station']['parameters']['p_np_bool']['raw_value']
Ejemplo n.º 16
0
def do0d(*param_meas: ParamMeasT,
         write_period: Optional[float] = None,
         do_plot: bool = True) -> AxesTupleListWithDataSet:
    """
    Perform a measurement of a single parameter. This is probably most
    useful for an ArrayParameter that already returns an array of data points

    Args:
        *param_meas: Parameter(s) to measure at each step or functions that
          will be called at each step. The function should take no arguments.
          The parameters and functions are called in the order they are
          supplied.
        write_period: The time after which the data is actually written to the
            database.
        do_plot: should png and pdf versions of the images be saved after the
            run.

    Returns:
        The QCoDeS dataset.
    """
    meas = Measurement()

    measured_parameters = tuple(param for param in param_meas
                                if isinstance(param, _BaseParameter))

    try:
        shapes: Shapes = detect_shape_of_measurement(measured_parameters, )
    except TypeError:
        LOG.exception(f"Could not detect shape of {measured_parameters} "
                      f"falling back to unknown shape.")
        shapes = None

    _register_parameters(meas, param_meas, shapes=shapes)
    _set_write_period(meas, write_period)

    with meas.run() as datasaver:
        datasaver.add_result(*_process_params_meas(param_meas))
        dataset = datasaver.dataset

    return _handle_plotting(dataset, do_plot)
Ejemplo n.º 17
0
def test_string_with_wrong_paramtype(experiment):
    """
    Test that an exception occurs when saving string data if when registering a
    string parameter the paramtype was not set to 'text'
    """
    p = qc.Parameter('p',
                     label='String parameter',
                     unit='',
                     get_cmd=None,
                     set_cmd=None,
                     initial_value='some text')

    meas = Measurement(experiment)
    # intentionally forget `paramtype='text'`, so that the default 'numeric'
    # is used, and an exception is raised later
    meas.register_parameter(p)

    with meas.run() as datasaver:
        msg = "It is not possible to save a string value for parameter 'p' " \
              "because its type class is 'numeric', not 'text'."
        with pytest.raises(ValueError, match=msg):
            datasaver.add_result((p, "some text"))
Ejemplo n.º 18
0
def test_string_with_wrong_paramtype(experiment):
    """
    Test that an exception occurs when saving string data if when registering a
    string parameter the paramtype was not set to 'text'
    """
    p = qc.Parameter('p',
                     label='String parameter',
                     unit='',
                     get_cmd=None,
                     set_cmd=None,
                     initial_value='some text')

    meas = Measurement(experiment)
    # intentionally forget `paramtype='text'`, so that the default 'numeric'
    # is used, and an exception is raised later
    meas.register_parameter(p)

    with meas.run() as datasaver:
        msg = re.escape('Parameter p is of type "numeric", but got a '
                        "result of type <U9 (some text).")
        with pytest.raises(ValueError, match=msg):
            datasaver.add_result((p, "some text"))
def test_subscribers_called_at_exiting_context_if_queue_is_not_empty(
        experiment, DAC):
    """
    Upon quitting the "run()" context, verify that in case the queue is
    not empty, the subscriber's callback is still called on that data.
    This situation is created by setting the minimum length of the queue
    to a number that is larger than the number of value written to the dataset.
    """
    def collect_x_vals(results, length, state):
        """
        Collects first elements of results tuples in *state*
        """
        index_of_x = 0
        state += [res[index_of_x] for res in results]

    meas = Measurement(exp=experiment)
    meas.register_parameter(DAC.ch1)

    collected_x_vals = []

    meas.add_subscriber(collect_x_vals, state=collected_x_vals)

    given_x_vals = [0, 1, 2, 3]

    with meas.run() as datasaver:
        # Set the minimum queue size of the subscriber to more that
        # the total number of values being added to the dataset;
        # this way the subscriber callback is not called before
        # we exit the "run()" context.
        subscriber = list(datasaver.dataset.subscribers.values())[0]
        subscriber.min_queue_length = int(len(given_x_vals) + 1)

        for x in given_x_vals:
            datasaver.add_result((DAC.ch1, x))
            # Verify that the subscriber callback is not called yet
            assert collected_x_vals == []

    # Verify that the subscriber callback is finally called
    assert collected_x_vals == given_x_vals
Ejemplo n.º 20
0
def test_datasaver_array_parameters_channel(channel_array_instrument, DAC, N,
                                            storage_type, bg_writing):
    array_param = channel_array_instrument.A.dummy_array_parameter
    meas = Measurement()
    meas.register_parameter(DAC.ch1)
    meas.register_parameter(array_param,
                            setpoints=[DAC.ch1],
                            paramtype=storage_type)

    M = array_param.shape[0]
    with meas.run(write_in_background=bg_writing) as datasaver:
        for set_v in np.linspace(0, 0.01, N):
            datasaver.add_result((DAC.ch1, set_v),
                                 (array_param, array_param.get()))

    datadicts = _get_data_from_ds(datasaver.dataset)
    # one dependent parameter
    assert len(datadicts) == 1
    datadicts = datadicts[0]
    assert len(datadicts) == len(meas.parameters)
    for datadict in datadicts:
        assert datadict['data'].shape == (N * M, )
Ejemplo n.º 21
0
def test_list_of_strings(experiment):
    """
    Test saving list of strings via DataSaver
    """
    p_values = ["X_Y", "X_X", "X_I", "I_I"]
    list_of_strings = list(np.random.choice(p_values, (10,)))

    p = qc.Parameter('p', label='String parameter', unit='', get_cmd=None,
                     set_cmd=None, initial_value='X_Y')

    meas = Measurement(experiment)
    meas.register_parameter(p, paramtype='text')

    with meas.run() as datasaver:
        datasaver.add_result((p, list_of_strings))

    test_set = load_by_id(datasaver.run_id)

    try:
        assert [[item] for item in list_of_strings] == test_set.get_data("p")
    finally:
        test_set.conn.close()
Ejemplo n.º 22
0
def save_2Ddata_with_qcodes(data_generator_method, metadata_generator_method):
    meas = Measurement()
    meas.register_custom_parameter("v_x",
                                   paramtype="numeric",
                                   label="voltage x",
                                   unit="V")
    meas.register_custom_parameter("v_y",
                                   paramtype="numeric",
                                   label="voltage y",
                                   unit="V")
    meas.register_custom_parameter(
        "current",
        paramtype="numeric",
        label="dc current",
        unit="A",
        setpoints=("v_x", "v_y"),
    )
    meas.register_custom_parameter(
        "sensor",
        paramtype="numeric",
        label="dc sensor",
        unit="A",
        setpoints=("v_x", "v_y"),
    )

    with meas.run() as datasaver:
        xv, yv, ddot, sensor = data_generator_method()
        datasaver.add_result(("v_x", xv), ("v_y", yv), ("current", ddot))
        datasaver.add_result(("v_x", xv), ("v_y", yv), ("sensor", sensor))

    datasaver.dataset.add_metadata("snapshot", json.dumps({}))
    if metadata_generator_method is not None:
        nt_metadata, current_label = metadata_generator_method()

        datasaver.dataset.add_metadata(nt.meta_tag, json.dumps(nt_metadata))
        for label, value in current_label.items():
            datasaver.dataset.add_metadata(label, value)

    return datasaver
Ejemplo n.º 23
0
def test_cache_2d_num_with_multiple_storage_types(experiment, DAC, DMM,
                                                  n_points_outer,
                                                  n_points_inner, bg_writing,
                                                  storage_type,
                                                  in_memory_cache):
    meas = Measurement()

    meas.register_parameter(DAC.ch1, paramtype=storage_type)
    meas.register_parameter(DAC.ch2, paramtype=storage_type)
    meas.register_parameter(DMM.v1,
                            setpoints=(DAC.ch1, DAC.ch2),
                            paramtype=storage_type)
    array_used = _array_param_used_in_tree(meas)
    n_rows_written = 0
    with meas.run(write_in_background=bg_writing,
                  in_memory_cache=in_memory_cache) as datasaver:
        dataset = datasaver.dataset
        _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                            dataset.cache.data())
        for v1 in np.linspace(-1, 1, n_points_outer):
            for v2 in np.linspace(-1, 1, n_points_inner):
                DAC.ch1.set(v1)
                DAC.ch2.set(v2)
                datasaver.add_result((DAC.ch1, v1), (DAC.ch2, v2),
                                     (DMM.v1, DMM.v1.get()))
                datasaver.flush_data_to_database(block=True)
                n_rows_written += 1
                data = dataset.cache.data()
                if array_used:
                    shape = (n_rows_written, 1)
                else:
                    shape = (n_rows_written, )
                assert data[DMM.v1.full_name][DMM.v1.full_name].shape == shape
                assert data[DMM.v1.full_name][DAC.ch1.full_name].shape == shape
                _assert_parameter_data_is_identical(
                    dataset.get_parameter_data(), data)
    _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                        dataset.cache.data())
Ejemplo n.º 24
0
def test_station_snapshot_during_measurement(experiment, dac, dmm,
                                             pass_station):
    station = Station()
    station.add_component(dac)
    station.add_component(dmm, 'renamed_dmm')

    snapshot_of_station = station.snapshot()

    if pass_station:
        measurement = Measurement(experiment, station)
    else:
        # in this branch of the `if` we expect that `Measurement` object
        # will be initialized with `Station.default` which is equal to the
        # station object that is instantiated above
        measurement = Measurement(experiment)

    measurement.register_parameter(dac.ch1)
    measurement.register_parameter(dmm.v1, setpoints=[dac.ch1])

    with measurement.run() as data_saver:
        data_saver.add_result((dac.ch1, 7), (dmm.v1, 5))

    # 1. Test `get_metadata('snapshot')` method

    json_snapshot_from_dataset = data_saver.dataset.get_metadata('snapshot')
    snapshot_from_dataset = json.loads(json_snapshot_from_dataset)

    expected_snapshot = {'station': snapshot_of_station}
    assert expected_snapshot == snapshot_from_dataset

    # 2. Test `snapshot_raw` property

    assert json_snapshot_from_dataset == data_saver.dataset.snapshot_raw

    # 3. Test `snapshot` property

    assert expected_snapshot == data_saver.dataset.snapshot
Ejemplo n.º 25
0
def test_datasaver_multidimarrayparameter_as_array(SpectrumAnalyzer,
                                                   bg_writing):
    array_param = SpectrumAnalyzer.multidimspectrum
    meas = Measurement()
    meas.register_parameter(array_param, paramtype='array')
    assert len(meas.parameters) == 4
    inserted_data = array_param.get()
    with meas.run(write_in_background=bg_writing) as datasaver:
        datasaver.add_result((array_param, inserted_data))

    expected_shape = (1, 100, 50, 20)

    datadicts = _get_data_from_ds(datasaver.dataset)
    assert len(datadicts) == 1
    for datadict_list in datadicts:
        assert len(datadict_list) == 4
        for datadict in datadict_list:

            datadict['data'].shape = (np.prod(expected_shape), )
            if datadict['name'] == "dummy_SA_Frequency0":
                temp_data = np.linspace(array_param.start, array_param.stop,
                                        array_param.npts[0])
                expected_data = np.repeat(
                    temp_data, expected_shape[2] * expected_shape[3])
            if datadict['name'] == "dummy_SA_Frequency1":
                temp_data = np.linspace(array_param.start, array_param.stop,
                                        array_param.npts[1])
                expected_data = np.tile(
                    np.repeat(temp_data, expected_shape[3]), expected_shape[1])
            if datadict['name'] == "dummy_SA_Frequency2":
                temp_data = np.linspace(array_param.start, array_param.stop,
                                        array_param.npts[2])
                expected_data = np.tile(temp_data,
                                        expected_shape[1] * expected_shape[2])
            if datadict['name'] == "dummy_SA_multidimspectrum":
                expected_data = inserted_data.ravel()
            assert_allclose(datadict['data'], expected_data)
Ejemplo n.º 26
0
def test_datasaver_multidim_numeric(experiment, bg_writing):
    """
    Test that inserting multidim parameters as numeric works as expected
    """
    meas = Measurement(experiment)
    size1 = 10
    size2 = 15
    x1 = qc.ManualParameter('x1')
    x2 = qc.ManualParameter('x2')
    y1 = qc.ManualParameter('y1')
    y2 = qc.ManualParameter('y2')

    data_mapping = {
        name: i
        for i, name in zip(range(4), ['x1', 'x2', 'y1', 'y2'])
    }

    meas.register_parameter(x1, paramtype='numeric')
    meas.register_parameter(x2, paramtype='numeric')
    meas.register_parameter(y1, setpoints=[x1, x2], paramtype='numeric')
    meas.register_parameter(y2, setpoints=[x1, x2], paramtype='numeric')
    data = np.random.rand(4, size1, size2)
    with meas.run(write_in_background=bg_writing) as datasaver:
        datasaver.add_result(
            (str(x1), data[0, :, :]), (str(x2), data[1, :, :]),
            (str(y1), data[2, :, :]), (str(y2), data[3, :, :]))

    datadicts = _get_data_from_ds(datasaver.dataset)
    assert len(datadicts) == 2
    for datadict_list in datadicts:
        assert len(datadict_list) == 3
        for datadict in datadict_list:
            dataindex = data_mapping[datadict['name']]
            expected_data = data[dataindex, :, :].ravel()
            assert_allclose(datadict['data'], expected_data)

            assert datadict['data'].shape == (size1 * size2, )
Ejemplo n.º 27
0
def test_datasaver_unsized_arrays(empty_temp_db, N):
    new_experiment('firstexp', sample_name='no sample')

    meas = Measurement()

    meas.register_custom_parameter(name='freqax',
                                   label='Frequency axis',
                                   unit='Hz')
    meas.register_custom_parameter(name='signal',
                                   label='qubit signal',
                                   unit='Majorana number',
                                   setpoints=('freqax', ))
    # note that np.array(some_number) is not the same as the number
    # its also not an array with a shape. Check here that we handle it
    # correctly
    with meas.run() as datasaver:
        freqax = np.linspace(1e6, 2e6, N)
        signal = np.random.randn(N)
        for i in range(N):
            myfreq = np.array(freqax[i])
            mysignal = np.array(signal[i])
            datasaver.add_result(('freqax', myfreq), ('signal', mysignal))

    assert datasaver.points_written == N
Ejemplo n.º 28
0
def array_in_scalar_dataset_unrolled(experiment):
    """
    This fixture yields a dataset where an array-valued parameter is registered
    as a 'numeric' type and has an additional single-valued setpoint. We
    expect data to be saved as individual scalars, with the scalar setpoint
    repeated.
    """
    meas = Measurement()
    scalar_param = Parameter('scalarparam', set_cmd=None)
    param = ArraySetPointParam()
    meas.register_parameter(scalar_param)
    meas.register_parameter(param,
                            setpoints=(scalar_param, ),
                            paramtype='numeric')

    with meas.run() as datasaver:
        for i in range(1, 10):
            scalar_param.set(i)
            datasaver.add_result((scalar_param, scalar_param.get()),
                                 (param, param.get()))
    try:
        yield datasaver.dataset
    finally:
        datasaver.dataset.conn.close()
Ejemplo n.º 29
0
def linear1d(param_set,
             start,
             stop,
             num_points,
             delay,
             *param_meas,
             append=None,
             save=True,
             atstart=None,
             ateach=None,
             atend=None,
             wallcontrol=None,
             wallcontrol_slope=None,
             setback=False):
    """
    """

    _flush_buffers(*param_meas)
    # Set up a plotting window
    if append is None or not append:
        win = pyplot.PlotWindow()
        win.win_title = 'ID: '
        win.resize(1000, 600)
    elif isinstance(append, pyplot.PlotWindow):
        # Append to the given window
        win = append
    elif isinstance(append, bool):
        # Append to the last trace if true
        win = pyplot.PlotWindow.getWindows()[-1]
    else:
        raise ValueError(
            "Unknown argument to append. Either give a plot window"
            " or true to append to the last plot")

    # Register setpoints
    meas = Measurement()
    meas.register_parameter(param_set)
    param_set.post_delay = delay
    set_points = np.linspace(start, stop, num_points)

    # Keep track of data and plots
    output = []
    data = []
    plots = []

    # Run @start functions
    _run_functions(atstart)

    # Register each of the sweep parameters and set up a plot window for them
    for p, parameter in enumerate(param_meas):
        print(parameter, param_set)
        meas.register_parameter(parameter, setpoints=(param_set, ))
        output.append([parameter, None])

        # Create plot window
        if append is not None and append:
            plot = win.items[0]
        else:
            plot = win.addPlot(title="%s (%s) v.<br>%s (%s)" %
                               (param_set.full_name, param_set.label,
                                parameter.full_name, parameter.label))

        # Figure out if we have 1d or 2d data
        shape = getattr(parameter, 'shape', None)
        if shape is not None and shape:
            # If we have 2d data, we need to know its length
            shape = shape[0]
            set_points_y = parameter.setpoints[0]

            # Create data array
            data.append(np.ndarray((num_points, shape)))
        else:
            # Create data arrays
            data.append(np.full(num_points, np.nan))
            set_points_y = None

        plotdata = plot.plot(setpoint_x=set_points,
                             setpoint_y=set_points_y,
                             pen=(255, 0, 0),
                             name=parameter.name)

        # Update axes
        if set_points_y is not None:
            plot.update_axes(param_set, parameter, param_y_setpoint=True)
            plotdata.update_histogram_axis(parameter)
        else:
            plot.update_axes(param_set, parameter)
        plots.append(plotdata)

    if wallcontrol is not None:
        wallcontrol_start = wallcontrol.get()
        step = (stop - start) / num_points

    with meas.run() as datasaver:
        # Set write period to much longer...
        datasaver.write_period = 120
        # Update plot titles
        win.win_title += "{} ".format(datasaver.run_id)
        for i in range(len(param_meas)):
            plots[p]._parent.plot_title += " (id: %d)" % datasaver.run_id

        # Then, run the actual sweep
        for i, set_point in enumerate(set_points):
            if wallcontrol is not None:
                wallcontrol.set(wallcontrol_start +
                                i * step * wallcontrol_slope)
            param_set.set(set_point)
            _run_functions(ateach)
            for p, parameter in enumerate(param_meas):
                output[p][1] = parameter.get()
                shape = getattr(parameter, 'shape', None)
                if shape is not None and shape:
                    data[p][i, :] = output[p][1]  # Update 2D data
                    if i == 0:
                        data[p][1:] = (np.min(output[p][1]) +
                                       np.max(output[p][1])) / 2
                else:
                    data[p][i] = output[p][1]  # Update 1D data

                # Update live plots
                plots[p].update(data[p])
            # Save data
            datasaver.add_result((param_set, set_point), *output)

    if wallcontrol is not None:
        wallcontrol.set(wallcontrol_start)

    if setback:
        param_set.set(start)

    _run_functions(atend)

    if save:
        plot_tools.save_figure(win, datasaver.run_id)
    return (datasaver.run_id, win)  # can use plot_by_id(dataid)
Ejemplo n.º 30
0
def linear2d(param_set1,
             start1,
             stop1,
             num_points1,
             delay1,
             param_set2,
             start2,
             stop2,
             num_points2,
             delay2,
             *param_meas,
             save=True):

    _flush_buffers(*param_meas)
    # Set up a plotting window
    win = pyplot.PlotWindow()
    win.win_title = 'ID: '
    win.resize(800, 800)

    # Register setpoints
    meas = Measurement()
    # Step Axis
    meas.register_parameter(param_set1)
    param_set1.post_delay = delay1
    set_points1 = np.linspace(start1, stop1, num_points1)
    # Sweep Axis
    meas.register_parameter(param_set2)
    param_set2.post_delay = delay2
    set_points2 = np.linspace(start2, stop2, num_points2)

    # Keep track of data and plots
    output = []
    data = np.ndarray((len(param_meas), num_points1, num_points2))
    plots = []

    for p, parameter in enumerate(param_meas):
        meas.register_parameter(parameter, setpoints=(param_set1, param_set2))
        output.append([parameter, None])

        # Add Plot item
        plot = win.addPlot(title="%s (%s) v.<br>%s (%s)" %
                           (param_set1.full_name, param_set1.label,
                            param_set2.full_name, param_set2.label))
        plotdata = plot.plot(setpoint_x=set_points1, setpoint_y=set_points2)
        plot.update_axes(param_set1, param_set2)
        plotdata.update_histogram_axis(parameter)
        plots.append(plotdata)

    with meas.run() as datasaver:
        # Set write period to much longer...
        datasaver.write_period = 120
        # Update plot titles
        win.win_title += "{} ".format(datasaver.run_id)
        for i in range(len(param_meas)):
            plots[i]._parent.plot_title += " (id: %d)" % datasaver.run_id
            plots[i].pause_update()

        for i, set_point1 in enumerate(set_points1):
            param_set2.set(start2)
            param_set1.set(set_point1)
            for j, set_point2 in enumerate(set_points2):
                param_set2.set(set_point2)
                for p, parameter in enumerate(param_meas):
                    output[p][1] = parameter.get()
                    fdata = data[p]
                    fdata[i, j] = output[p][1]

                    if i == 0:
                        # Calculate z-range of data, and remove NaN's from first column
                        # This sets zero point for rest of data
                        z_range = (np.nanmin(fdata[i, :j + 1]),
                                   np.nanmax(fdata[i, :j + 1]))
                        fdata[0, j + 1:] = (z_range[0] + z_range[1]) / 2
                        fdata[1:, :] = (z_range[0] + z_range[1]) / 2

                    # Update plot items, and update range every 10 points
                    if (num_points1 * num_points2) < 1000 or (j % 20) == 0:
                        plots[p].update(fdata, update_range=((j % 100) == 0))

                # Save data
                datasaver.add_result((param_set1, set_point1),
                                     (param_set2, set_point2), *output)

        for i in range(len(param_meas)):
            fdata = data[i]
            plots[i].update(fdata, True)
            plots[i].resume_update()

    if save:
        plot_tools.save_figure(win, datasaver.run_id)
    return (datasaver.run_id, win)