Пример #1
0
def test_string(experiment):
    """
    Test that we can save text into database via Measurement API
    """
    p = qc.Parameter('p',
                     label='String parameter',
                     unit='',
                     get_cmd=None,
                     set_cmd=None,
                     initial_value='some text')

    meas = Measurement(experiment)
    meas.register_parameter(p, paramtype='text')

    with meas.run() as datasaver:
        datasaver.add_result((p, "some text"))

    test_set = load_by_id(datasaver.run_id)

    assert test_set.get_data("p") == [["some text"]]
Пример #2
0
def test_datasaver_1d(experiment, DAC, DMM, caplog,
                      n_points):
    meas = Measurement()
    meas.register_parameter(DAC.ch1)
    meas.register_parameter(DMM.v1, setpoints=(DAC.ch1,))

    n_points_expected = 5

    meas.set_shapes({DMM.v1.full_name: (n_points_expected,)})

    with meas.run() as datasaver:

        for set_v in np.linspace(0, 1, n_points):
            DAC.ch1()
            datasaver.add_result((DAC.ch1, set_v),
                                 (DMM.v1, DMM.v1()))

    ds = datasaver.dataset
    caplog.clear()
    data = ds.get_parameter_data()

    for dataarray in data[DMM.v1.full_name].values():
        assert dataarray.shape == (n_points,)

    if n_points == n_points_expected:
        assert len(caplog.record_tuples) == 0
    elif n_points > n_points_expected:
        assert len(caplog.record_tuples) == 2
        exp_module = "qcodes.dataset.sqlite.queries"
        exp_level = logging.WARNING
        exp_msg = ("Tried to set data shape for {} in "
                   "dataset {} "
                   "from metadata when loading "
                   "but found inconsistent lengths {} and {}")
        assert caplog.record_tuples[0] == (exp_module,
                                           exp_level,
                                           exp_msg.format(DMM.v1.full_name,
                                                          DMM.v1.full_name,
                                                          n_points,
                                                          n_points_expected))
        assert caplog.record_tuples[1] == (exp_module,
                                           exp_level,
                                           exp_msg.format(DAC.ch1.full_name,
                                                          DMM.v1.full_name,
                                                          n_points,
                                                          n_points_expected))
Пример #3
0
def do0d(*param_meas: Union[_BaseParameter, Callable[[], None]],
         do_plot: bool = True) -> AxesTupleListWithRunId:
    """
    Perform a measurement of a single parameter. This is probably most
    useful for an ArrayParamter that already returns an array of data points

    Args:
        *param_meas: Parameter(s) to measure at each step or functions that
          will be called at each step. The function should take no arguments.
          The parameters and functions are called in the order they are
          supplied.
        do_plot: should png and pdf versions of the images be saved after the
            run.

    Returns:
        The run_id of the DataSet created
    """
    meas = Measurement()
    output = []

    for parameter in param_meas:
        meas.register_parameter(parameter)
        output.append([parameter, None])

    with meas.run() as datasaver:

        for i, parameter in enumerate(param_meas):
            if isinstance(parameter, _BaseParameter):
                output[i][1] = parameter.get()
            elif callable(parameter):
                parameter()
        datasaver.add_result(*output)
    dataid = datasaver.run_id

    if do_plot is True:
        ax, cbs = _save_image(datasaver)
    else:
        ax = None,
        cbs = None

    return dataid, ax, cbs
Пример #4
0
    def generate_local_exp(dbpath: Path) -> List[str]:
        with initialised_database_at(str(dbpath)):
            guids = []
            exp = load_or_create_experiment(experiment_name="test_guid")

            p1 = Parameter('Voltage', set_cmd=None)
            p2 = Parameter('Current', get_cmd=np.random.randn)

            meas = Measurement(exp=exp)
            meas.register_parameter(p1).register_parameter(p2, setpoints=[p1])

            # Meaure for 2 times to get 2 run ids and 2 guids
            for run in range(2):
                with meas.run() as datasaver:
                    for v in np.linspace(0 * run, 2 * run, 50):
                        p1(v)
                        datasaver.add_result((p1, cast(float, p1())),
                                             (p2, cast(float, p2())))
                guid = datasaver.dataset.guid
                guids.append(guid)
        return guids
Пример #5
0
    def _create_measurement(self):
        """
        Creates a QCoDeS Measurement object. This controls the saving of data by registering
        QCoDeS Parameter objects, which this function does. Registers all 'tracked' parameters, 
        Returns the measurement object.
        This function will register only parameters that are followed BEFORE this function is
        called.
        """

        # First, create time parameter
        self.meas = Measurement()
        self.meas.register_custom_parameter('time', label='Time', unit='s')

        # Check if we are 'setting' a parameter, and register it
        if self.set_param is not None:
            self.meas.register_parameter(self.set_param)
        # Register all parameters we are following
        for p in self._params:
            self.meas.register_parameter(p)

        return self.meas
Пример #6
0
def store_array_to_database_alt(meas: Measurement, array: DataArray) -> int:
    dims = len(array.shape)
    if dims == 2:
        outer_data = np.empty(array.shape[1])
        with meas.run() as datasaver:
            for index1, i in enumerate(array.set_arrays[0]):
                outer_data[:] = i
                datasaver.add_result(
                    (array.set_arrays[0].array_id, outer_data),
                    (array.set_arrays[1].array_id,
                     array.set_arrays[1][index1, :]),
                    (array.array_id, array[index1, :]))
    elif dims == 1:
        with meas.run() as datasaver:
            for index, i in enumerate(array.set_arrays[0]):
                datasaver.add_result((array.set_arrays[0].array_id, i),
                                     (array.array_id, array[index]))
    else:
        raise NotImplementedError(
            'The exporter only currently handles 1 and 2 Dimentional data')
    return datasaver.run_id
Пример #7
0
def _make_meas_with_registered_param_complex(experiment, DAC,
                                             complex_num_instrument):
    meas = Measurement()
    meas.register_parameter(DAC.ch1)
    meas.register_parameter(complex_num_instrument.complex_num,
                            setpoints=[DAC.ch1])
    yield meas
Пример #8
0
def do0d(*param_meas: ParamMeasT,
         write_period: Optional[float] = None,
         do_plot: bool = True) -> AxesTupleListWithDataSet:
    """
    Perform a measurement of a single parameter. This is probably most
    useful for an ArrayParameter that already returns an array of data points

    Args:
        *param_meas: Parameter(s) to measure at each step or functions that
          will be called at each step. The function should take no arguments.
          The parameters and functions are called in the order they are
          supplied.
        write_period: The time after which the data is actually written to the
            database.
        do_plot: should png and pdf versions of the images be saved after the
            run.

    Returns:
        The QCoDeS dataset.
    """
    meas = Measurement()

    measured_parameters = tuple(param for param in param_meas
                                if isinstance(param, _BaseParameter))

    try:
        shapes: Shapes = detect_shape_of_measurement(measured_parameters, )
    except TypeError:
        LOG.exception(f"Could not detect shape of {measured_parameters} "
                      f"falling back to unknown shape.")
        shapes = None

    _register_parameters(meas, param_meas, shapes=shapes)
    _set_write_period(meas, write_period)

    with meas.run() as datasaver:
        datasaver.add_result(*_process_params_meas(param_meas))
        dataset = datasaver.dataset

    return _handle_plotting(dataset, do_plot)
Пример #9
0
def test_snapshot_creation_for_types_not_supported_by_builtin_json(experiment):
    """
    Test that `Measurement`/`Runner`/`DataSaver` infrastructure
    successfully dumps station snapshots in JSON format in cases when the
    snapshot contains data of types that are not supported by python builtin
    `json` module, for example, numpy scalars.
    """
    p1 = ManualParameter('p_np_int32', initial_value=numpy.int32(5))
    p2 = ManualParameter('p_np_float16', initial_value=numpy.float16(5.0))
    p3 = ManualParameter('p_np_array',
                         initial_value=numpy.meshgrid((1, 2), (3, 4)))
    p4 = ManualParameter('p_np_bool', initial_value=numpy.bool_(False))

    station = Station(p1, p2, p3, p4)

    measurement = Measurement(experiment, station)

    # we need at least 1 parameter to be able to run the measurement
    measurement.register_custom_parameter('dummy')

    with measurement.run() as data_saver:
        # we do this in order to create a snapshot of the station and add it
        # to the database
        pass

    snapshot = data_saver.dataset.snapshot

    assert 5 == snapshot['station']['parameters']['p_np_int32']['value']
    assert 5 == snapshot['station']['parameters']['p_np_int32']['raw_value']

    assert 5.0 == snapshot['station']['parameters']['p_np_float16']['value']
    assert 5.0 == snapshot['station']['parameters']['p_np_float16'][
        'raw_value']

    lst = [[[1, 2], [1, 2]], [[3, 3], [4, 4]]]
    assert lst == snapshot['station']['parameters']['p_np_array']['value']
    assert lst == snapshot['station']['parameters']['p_np_array']['raw_value']

    assert False is snapshot['station']['parameters']['p_np_bool']['value']
    assert False is snapshot['station']['parameters']['p_np_bool']['raw_value']
Пример #10
0
def test_list_of_strings(experiment):
    """
    Test saving list of strings via DataSaver
    """
    p_values = ["X_Y", "X_X", "X_I", "I_I"]
    list_of_strings = list(np.random.choice(p_values, (10,)))

    p = qc.Parameter('p', label='String parameter', unit='', get_cmd=None,
                     set_cmd=None, initial_value='X_Y')

    meas = Measurement(experiment)
    meas.register_parameter(p, paramtype='text')

    with meas.run() as datasaver:
        datasaver.add_result((p, list_of_strings))

    test_set = load_by_id(datasaver.run_id)

    try:
        assert [[item] for item in list_of_strings] == test_set.get_data("p")
    finally:
        test_set.conn.close()
Пример #11
0
def test_string_with_wrong_paramtype(experiment):
    """
    Test that an exception occurs when saving string data if when registering a
    string parameter the paramtype was not set to 'text'
    """
    p = qc.Parameter('p',
                     label='String parameter',
                     unit='',
                     get_cmd=None,
                     set_cmd=None,
                     initial_value='some text')

    meas = Measurement(experiment)
    # intentionally forget `paramtype='text'`, so that the default 'numeric'
    # is used, and an exception is raised later
    meas.register_parameter(p)

    with meas.run() as datasaver:
        msg = "It is not possible to save a string value for parameter 'p' " \
              "because its type class is 'numeric', not 'text'."
        with pytest.raises(ValueError, match=msg):
            datasaver.add_result((p, "some text"))
Пример #12
0
def test_string_with_wrong_paramtype(experiment):
    """
    Test that an exception occurs when saving string data if when registering a
    string parameter the paramtype was not set to 'text'
    """
    p = qc.Parameter('p',
                     label='String parameter',
                     unit='',
                     get_cmd=None,
                     set_cmd=None,
                     initial_value='some text')

    meas = Measurement(experiment)
    # intentionally forget `paramtype='text'`, so that the default 'numeric'
    # is used, and an exception is raised later
    meas.register_parameter(p)

    with meas.run() as datasaver:
        msg = re.escape('Parameter p is of type "numeric", but got a '
                        "result of type <U9 (some text).")
        with pytest.raises(ValueError, match=msg):
            datasaver.add_result((p, "some text"))
Пример #13
0
def test_column_mismatch(two_empty_temp_db_connections, some_interdeps, inst):
    """
    Test insertion of runs with no metadata and no snapshot into a DB already
    containing a run that has both
    """

    source_conn, target_conn = two_empty_temp_db_connections
    source_path = path_to_dbfile(source_conn)
    target_path = path_to_dbfile(target_conn)

    target_exp = Experiment(conn=target_conn)

    # Set up measurement scenario
    station = Station(inst)

    meas = Measurement(exp=target_exp, station=station)
    meas.register_parameter(inst.back)
    meas.register_parameter(inst.plunger)
    meas.register_parameter(inst.cutter, setpoints=(inst.back, inst.plunger))

    with meas.run() as datasaver:
        for back_v in [1, 2, 3]:
            for plung_v in [-3, -2.5, 0]:
                datasaver.add_result((inst.back, back_v),
                                     (inst.plunger, plung_v),
                                     (inst.cutter, back_v+plung_v))
    datasaver.dataset.add_metadata('meta_tag', 'meta_value')

    Experiment(conn=source_conn)
    source_ds = DataSet(conn=source_conn)
    source_ds.set_interdependencies(some_interdeps[1])

    source_ds.mark_started()
    source_ds.add_results([{name: 2.1
                            for name in some_interdeps[1].names}])
    source_ds.mark_completed()

    extract_runs_into_db(source_path, target_path, 1)

    # compare
    target_copied_ds = DataSet(conn=target_conn, run_id=2)

    assert target_copied_ds.the_same_dataset_as(source_ds)
Пример #14
0
def test_datasaver_multidimarrayparameter_as_array(SpectrumAnalyzer,
                                                   bg_writing):
    array_param = SpectrumAnalyzer.multidimspectrum
    meas = Measurement()
    meas.register_parameter(array_param, paramtype='array')
    assert len(meas.parameters) == 4
    inserted_data = array_param.get()
    with meas.run(write_in_background=bg_writing) as datasaver:
        datasaver.add_result((array_param, inserted_data))

    expected_shape = (1, 100, 50, 20)

    datadicts = _get_data_from_ds(datasaver.dataset)
    assert len(datadicts) == 1
    for datadict_list in datadicts:
        assert len(datadict_list) == 4
        for datadict in datadict_list:

            datadict['data'].shape = (np.prod(expected_shape), )
            if datadict['name'] == "dummy_SA_Frequency0":
                temp_data = np.linspace(array_param.start, array_param.stop,
                                        array_param.npts[0])
                expected_data = np.repeat(
                    temp_data, expected_shape[2] * expected_shape[3])
            if datadict['name'] == "dummy_SA_Frequency1":
                temp_data = np.linspace(array_param.start, array_param.stop,
                                        array_param.npts[1])
                expected_data = np.tile(
                    np.repeat(temp_data, expected_shape[3]), expected_shape[1])
            if datadict['name'] == "dummy_SA_Frequency2":
                temp_data = np.linspace(array_param.start, array_param.stop,
                                        array_param.npts[2])
                expected_data = np.tile(temp_data,
                                        expected_shape[1] * expected_shape[2])
            if datadict['name'] == "dummy_SA_multidimspectrum":
                expected_data = inserted_data.ravel()
            assert_allclose(datadict['data'], expected_data)
Пример #15
0
def test_measurement_name(experiment, DAC, DMM):

    fmt = experiment.format_string
    exp_id = experiment.exp_id

    name = 'yolo'

    meas = Measurement()
    meas.name = name

    meas.register_parameter(DAC.ch1)
    meas.register_parameter(DMM.v1, setpoints=[DAC.ch1])

    with meas.run() as datasaver:
        run_id = datasaver.run_id
        expected_name = fmt.format(name, exp_id, run_id)
        assert datasaver.dataset.table_name == expected_name
Пример #16
0
def test_cache_1d(experiment, DAC, DMM, n_points, bg_writing,
                  channel_array_instrument, setpoints_type):

    setpoints_param, setpoints_values = _prepare_setpoints_1d(
        DAC, channel_array_instrument, n_points, setpoints_type)

    meas = Measurement()

    meas.register_parameter(setpoints_param)

    meas_parameters = (
        DMM.v1,
        channel_array_instrument.A.dummy_multi_parameter,
        channel_array_instrument.A.dummy_scalar_multi_parameter,
        channel_array_instrument.A.dummy_2d_multi_parameter,
        channel_array_instrument.A.dummy_2d_multi_parameter_2,
        channel_array_instrument.A.dummy_array_parameter,
        channel_array_instrument.A.dummy_complex_array_parameter,
        channel_array_instrument.A.dummy_complex,
        channel_array_instrument.A.dummy_parameter_with_setpoints,
        channel_array_instrument.A.dummy_parameter_with_setpoints_complex,
    )
    channel_array_instrument.A.dummy_start(0)
    channel_array_instrument.A.dummy_stop(10)
    channel_array_instrument.A.dummy_n_points(10)
    for param in meas_parameters:
        meas.register_parameter(param, setpoints=(setpoints_param, ))

    with meas.run(write_in_background=bg_writing) as datasaver:
        dataset = datasaver.dataset
        _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                            dataset.cache.data())
        for i, v in enumerate(setpoints_values):
            setpoints_param.set(v)

            meas_vals = [(param, param.get())
                         for param in meas_parameters[:-2]]
            meas_vals += expand_setpoints_helper(meas_parameters[-2])
            meas_vals += expand_setpoints_helper(meas_parameters[-1])

            datasaver.add_result((setpoints_param, v), *meas_vals)
            datasaver.flush_data_to_database(block=True)
            data = dataset.cache.data()
            _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                                data)
    _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                        dataset.cache.data())
    assert dataset.cache._loaded_from_completed_ds is True
    _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                        dataset.cache.data())
Пример #17
0
def test_cache_1d_every_other_point(experiment, DAC, DMM, n_points, bg_writing,
                                    channel_array_instrument, setpoints_type,
                                    in_memory_cache):

    setpoints_param, setpoints_values = _prepare_setpoints_1d(DAC, channel_array_instrument,
                                                                                   n_points, setpoints_type)

    meas = Measurement()

    meas.register_parameter(setpoints_param)

    meas_parameters = (DMM.v1,
                       channel_array_instrument.A.temperature,
                       channel_array_instrument.B.temperature
                       )
    for param in meas_parameters:
        meas.register_parameter(param, setpoints=(setpoints_param,))

    with meas.run(
            write_in_background=bg_writing,
            in_memory_cache=in_memory_cache
    ) as datasaver:
        dataset = datasaver.dataset
        _assert_parameter_data_is_identical(dataset.get_parameter_data(), dataset.cache.data())
        for i, v in enumerate(setpoints_values):
            setpoints_param.set(v)

            meas_vals = [(param, param.get()) for param in meas_parameters]

            if i % 2 == 0:
                datasaver.add_result((setpoints_param, v),
                                     *meas_vals)
            else:
                datasaver.add_result((setpoints_param, v),
                                     *meas_vals[0:2])
            datasaver.flush_data_to_database(block=True)
            data = dataset.cache.data()
            assert len(data['dummy_channel_inst_ChanA_temperature']['dummy_channel_inst_ChanA_temperature']) == i + 1
            assert len(data['dummy_channel_inst_ChanB_temperature']['dummy_channel_inst_ChanB_temperature']) == i//2 + 1
            _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                                data)
    _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                        dataset.cache.data())
    if in_memory_cache is False:
        assert dataset.cache._loaded_from_completed_ds is True
    assert dataset.completed is True
    assert dataset.cache.live is in_memory_cache
    _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                        dataset.cache.data())
Пример #18
0
    def setup(self, bench_param):
        # Init DB
        self.tmpdir = tempfile.mkdtemp()
        qcodes.config["core"]["db_location"] = os.path.join(self.tmpdir,
                                                            'temp.db')
        qcodes.config["core"]["db_debug"] = False
        initialise_database()

        # Create experiment
        self.experiment = new_experiment("test-experiment",
                                         sample_name="test-sample")

        # Create measurement
        meas = Measurement(self.experiment)

        x1 = ManualParameter('x1')
        x2 = ManualParameter('x2')
        x3 = ManualParameter('x3')
        y1 = ManualParameter('y1')
        y2 = ManualParameter('y2')

        meas.register_parameter(x1)
        meas.register_parameter(x2)
        meas.register_parameter(x3)
        meas.register_parameter(y1, setpoints=[x1, x2, x3])
        meas.register_parameter(y2, setpoints=[x1, x2, x3])

        self.parameters = [x1, x2, x3, y1, y2]

        # Create the Runner context manager
        self.runner = meas.run()

        # Enter Runner and create DataSaver
        self.datasaver = self.runner.__enter__()

        # Create values for parameters
        for _ in range(len(self.parameters)):
            self.values.append(np.random.rand(bench_param['n_values']))
Пример #19
0
def test_cache_2d(experiment, DAC, DMM, n_points_outer,
                  n_points_inner, bg_writing, channel_array_instrument,
                  in_memory_cache):
    meas = Measurement()

    meas.register_parameter(DAC.ch1)
    meas.register_parameter(DAC.ch2)

    meas_parameters = (DMM.v1,
                       channel_array_instrument.A.dummy_multi_parameter,
                       channel_array_instrument.A.dummy_scalar_multi_parameter,
                       channel_array_instrument.A.dummy_2d_multi_parameter,
                       channel_array_instrument.A.dummy_2d_multi_parameter_2,
                       channel_array_instrument.A.dummy_array_parameter,
                       channel_array_instrument.A.dummy_complex_array_parameter,
                       channel_array_instrument.A.dummy_complex,
                       channel_array_instrument.A.dummy_parameter_with_setpoints,
                       channel_array_instrument.A.dummy_parameter_with_setpoints_complex,
                       )
    channel_array_instrument.A.dummy_start(0)
    channel_array_instrument.A.dummy_stop(10)
    channel_array_instrument.A.dummy_n_points(10)
    for param in meas_parameters:
        meas.register_parameter(param, setpoints=(DAC.ch1, DAC.ch2))
    n_rows_written = 0
    with meas.run(
            write_in_background=bg_writing,
            in_memory_cache=in_memory_cache) as datasaver:
        dataset = datasaver.dataset
        _assert_parameter_data_is_identical(dataset.get_parameter_data(), dataset.cache.data())
        for v1 in np.linspace(-1, 1, n_points_outer):
            for v2 in np.linspace(-1, 1, n_points_inner):
                DAC.ch1.set(v1)
                DAC.ch2.set(v2)
                meas_vals = [(param, param.get()) for param in meas_parameters[:-2]]
                meas_vals += expand_setpoints_helper(meas_parameters[-2])
                meas_vals += expand_setpoints_helper(meas_parameters[-1])

                datasaver.add_result((DAC.ch1, v1),
                                     (DAC.ch2, v2),
                                     *meas_vals)
                datasaver.flush_data_to_database(block=True)
                n_rows_written += 1
                data = dataset.cache.data()
                _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                                    data)
    _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                        dataset.cache.data())
Пример #20
0
def test_station_snapshot_during_measurement(experiment, dac, dmm,
                                             pass_station):
    station = Station()
    station.add_component(dac)
    station.add_component(dmm, 'renamed_dmm')

    snapshot_of_station = station.snapshot()

    if pass_station:
        measurement = Measurement(experiment, station)
    else:
        # in this branch of the `if` we expect that `Measurement` object
        # will be initialized with `Station.default` which is equal to the
        # station object that is instantiated above
        measurement = Measurement(experiment)

    measurement.register_parameter(dac.ch1)
    measurement.register_parameter(dmm.v1, setpoints=[dac.ch1])

    with measurement.run() as data_saver:
        data_saver.add_result((dac.ch1, 7), (dmm.v1, 5))

    # 1. Test `get_metadata('snapshot')` method

    json_snapshot_from_dataset = data_saver.dataset.get_metadata('snapshot')
    snapshot_from_dataset = json.loads(json_snapshot_from_dataset)

    expected_snapshot = {'station': snapshot_of_station}
    assert expected_snapshot == snapshot_from_dataset

    # 2. Test `snapshot_raw` property

    assert json_snapshot_from_dataset == data_saver.dataset.snapshot_raw

    # 3. Test `snapshot` property

    assert expected_snapshot == data_saver.dataset.snapshot
Пример #21
0
def array_in_str_dataset(experiment, request):
    meas = Measurement()
    scalar_param = Parameter('textparam', set_cmd=None)
    param = ArraySetPointParam()
    meas.register_parameter(scalar_param, paramtype='text')
    meas.register_parameter(param, setpoints=(scalar_param,),
                            paramtype=request.param)

    with meas.run() as datasaver:
        for i in ['A', 'B', 'C']:
            scalar_param.set(i)
            datasaver.add_result((scalar_param, scalar_param.get()),
                                 (param, param.get()))
    try:
        yield datasaver.dataset
    finally:
        datasaver.dataset.conn.close()
Пример #22
0
def test_datasaver_scalars(experiment, DAC, DMM, set_values, get_values,
                           breakpoint, write_period):

    no_of_runs = len(experiment)

    station = qc.Station(DAC, DMM)

    meas = Measurement(station=station)
    meas.write_period = write_period

    assert meas.write_period == write_period

    meas.register_parameter(DAC.ch1)
    meas.register_parameter(DMM.v1, setpoints=(DAC.ch1, ))

    with meas.run() as datasaver:
        for set_v, get_v in zip(set_values[:breakpoint],
                                get_values[:breakpoint]):
            datasaver.add_result((DAC.ch1, set_v), (DMM.v1, get_v))

        assert datasaver._dataset.number_of_results == 0
        sleep(write_period * 1.1)
        datasaver.add_result((DAC.ch1, set_values[breakpoint]),
                             (DMM.v1, get_values[breakpoint]))
        assert datasaver.points_written == breakpoint + 1

    assert datasaver.run_id == no_of_runs + 1

    with meas.run() as datasaver:
        with pytest.raises(ValueError):
            datasaver.add_result((DAC.ch2, 1), (DAC.ch2, 2))
        with pytest.raises(ValueError):
            datasaver.add_result((DMM.v1, 0))

    # important cleanup, else the following tests will fail
    qc.Station.default = None
Пример #23
0
def array_in_scalar_dataset_unrolled(experiment):
    meas = Measurement()
    scalar_param = Parameter('scalarparam', set_cmd=None)
    param = ArraySetPointParam()
    meas.register_parameter(scalar_param)
    meas.register_parameter(param, setpoints=(scalar_param,),
                            paramtype='numeric')

    with meas.run() as datasaver:
        for i in range(1, 10):
            scalar_param.set(i)
            datasaver.add_result((scalar_param, scalar_param.get()),
                                 (param, param.get()))
    try:
        yield datasaver.dataset
    finally:
        datasaver.dataset.conn.close()
Пример #24
0
def test_run_loaded_experiment():
    """
    Test that we can resume a measurement after loading by name
    """
    new_experiment("test", "test1")
    exp_loaded = load_experiment_by_name("test", "test1")

    meas = Measurement(exp=exp_loaded)
    meas.register_custom_parameter(name='dummy', paramtype='text')
    with meas.run():
        pass

    with meas.run():
        pass
Пример #25
0
def test_cache_complex_array_param_in_1d(experiment, DAC,
                                         channel_array_instrument, n_points,
                                         bg_writing, storage_type,
                                         outer_param_type):
    param = channel_array_instrument.A.dummy_complex_array_parameter
    meas = Measurement()
    if outer_param_type == 'numeric':
        outer_param = DAC.ch1
        outer_setpoints = np.linspace(-1, 1, n_points)
        outer_storage_type = storage_type
    else:
        outer_param = channel_array_instrument.A.dummy_text
        outer_setpoints = ['A', 'B', 'C', 'D']
        outer_storage_type = 'text'
    meas.register_parameter(outer_param, paramtype=outer_storage_type)
    meas.register_parameter(param,
                            setpoints=(outer_param, ),
                            paramtype=storage_type)
    array_used = _array_param_used_in_tree(meas)
    with meas.run(write_in_background=bg_writing) as datasaver:
        dataset = datasaver.dataset
        for i, v1 in enumerate(outer_setpoints):
            datasaver.add_result((outer_param, v1), (param, param.get()))
            datasaver.flush_data_to_database(block=True)
            data = dataset.cache.data()
            n_rows_written = i + 1

            if array_used:
                expected_shape = (n_rows_written, ) + param.shape
            else:
                expected_shape = n_rows_written * np.prod(param.shape)
            assert data[param.full_name][
                param.full_name].shape == expected_shape
            if storage_type != 'array':
                # with explicit array types the shape is incorrect
                # https://github.com/QCoDeS/Qcodes/issues/2105
                assert data[param.full_name][
                    outer_param.full_name].shape == expected_shape
            for setpoint_name in param.setpoint_full_names:
                assert data[
                    param.full_name][setpoint_name].shape == expected_shape
            _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                                data)
    _assert_parameter_data_is_identical(dataset.get_parameter_data(),
                                        dataset.cache.data())
Пример #26
0
def varlen_array_in_scalar_dataset(experiment):
    meas = Measurement()
    scalar_param = Parameter('scalarparam', set_cmd=None)
    param = ArraySetPointParam()
    meas.register_parameter(scalar_param)
    meas.register_parameter(param, setpoints=(scalar_param,),
                            paramtype='array')
    np.random.seed(0)
    with meas.run() as datasaver:
        for i in range(1, 10):
            scalar_param.set(i)
            param.setpoints = (np.arange(i),)
            datasaver.add_result((scalar_param, scalar_param.get()),
                                 (param, np.random.rand(i)))
    try:
        yield datasaver.dataset
    finally:
        datasaver.dataset.conn.close()
Пример #27
0
def test_datasaver_array_parameters_array(channel_array_instrument, DAC, N,
                                          bg_writing):
    """
    Test that storing array parameters inside a loop works as expected
    """
    storage_type = "array"
    array_param = channel_array_instrument.A.dummy_array_parameter
    dependency_name = 'dummy_channel_inst_ChanA_array_setpoint_param_this_setpoint'

    # Now for a real measurement

    meas = Measurement()

    meas.register_parameter(DAC.ch1, paramtype='numeric')
    meas.register_parameter(array_param,
                            setpoints=[DAC.ch1],
                            paramtype=storage_type)

    assert len(meas.parameters) == 3

    M = array_param.shape[0]
    dac_datapoints = np.linspace(0, 0.01, N)
    with meas.run(write_in_background=bg_writing) as datasaver:
        for set_v in dac_datapoints:
            datasaver.add_result((DAC.ch1, set_v),
                                 (array_param, array_param.get()))

    datadicts = _get_data_from_ds(datasaver.dataset)
    # one dependent parameter
    assert len(datadicts) == 1
    datadicts = datadicts[0]
    assert len(datadicts) == len(meas.parameters)
    for datadict in datadicts:
        if datadict['name'] == 'dummy_dac_ch1':
            expected_data = np.repeat(dac_datapoints, M)
        if datadict['name'] == dependency_name:
            expected_data = np.tile(np.linspace(5, 9, 5), N)
        if datadict[
                'name'] == 'dummy_channel_inst_ChanA_dummy_array_parameter':
            expected_data = np.empty(N * M)
            expected_data[:] = 2.
        assert_allclose(datadict['data'], expected_data)

        assert datadict['data'].shape == (N * M, )
Пример #28
0
def test_datasaver_multidim_array(experiment, bg_writing):
    """
    Test that inserting multidim parameters as arrays works as expected
    """
    meas = Measurement(experiment)
    size1 = 10
    size2 = 15

    data_mapping = {
        name: i
        for i, name in zip(range(4), ['x1', 'x2', 'y1', 'y2'])
    }

    x1 = qc.ManualParameter('x1')
    x2 = qc.ManualParameter('x2')
    y1 = qc.ManualParameter('y1')
    y2 = qc.ManualParameter('y2')

    meas.register_parameter(x1, paramtype='array')
    meas.register_parameter(x2, paramtype='array')
    meas.register_parameter(y1, setpoints=[x1, x2], paramtype='array')
    meas.register_parameter(y2, setpoints=[x1, x2], paramtype='array')
    data = np.random.rand(4, size1, size2)
    expected = {
        'x1': data[0, :, :],
        'x2': data[1, :, :],
        'y1': data[2, :, :],
        'y2': data[3, :, :]
    }
    with meas.run(write_in_background=bg_writing) as datasaver:
        datasaver.add_result(
            (str(x1), expected['x1']), (str(x2), expected['x2']),
            (str(y1), expected['y1']), (str(y2), expected['y2']))

    datadicts = _get_data_from_ds(datasaver.dataset)
    assert len(datadicts) == 2
    for datadict_list in datadicts:
        assert len(datadict_list) == 3
        for datadict in datadict_list:
            dataindex = data_mapping[datadict['name']]
            expected_data = data[dataindex, :, :].ravel()
            assert_allclose(datadict['data'], expected_data)

            assert datadict['data'].shape == (size1 * size2, )
Пример #29
0
def test_setting_write_period(empty_temp_db, wp):
    new_experiment('firstexp', sample_name='no sample')
    meas = Measurement()

    if isinstance(wp, str):
        with pytest.raises(ValueError):
            meas.write_period = wp
    elif wp < 1e-3:
        with pytest.raises(ValueError):
            meas.write_period = wp
    else:
        meas.write_period = wp
        assert meas._write_period == wp

        with meas.run() as datasaver:
            assert datasaver.write_period == wp
Пример #30
0
def test_datasaver_array_parameters_channel(channel_array_instrument, DAC, N,
                                            storage_type, bg_writing):
    array_param = channel_array_instrument.A.dummy_array_parameter
    meas = Measurement()
    meas.register_parameter(DAC.ch1)
    meas.register_parameter(array_param,
                            setpoints=[DAC.ch1],
                            paramtype=storage_type)

    M = array_param.shape[0]
    with meas.run(write_in_background=bg_writing) as datasaver:
        for set_v in np.linspace(0, 0.01, N):
            datasaver.add_result((DAC.ch1, set_v),
                                 (array_param, array_param.get()))

    datadicts = _get_data_from_ds(datasaver.dataset)
    # one dependent parameter
    assert len(datadicts) == 1
    datadicts = datadicts[0]
    assert len(datadicts) == len(meas.parameters)
    for datadict in datadicts:
        assert datadict['data'].shape == (N * M, )