Ejemplo n.º 1
0
 def test_loop_measure_all_channels(self):
     p1 = ManualParameter(name='p1', vals=Numbers(-10, 10))
     loop = Loop(p1.sweep(-10, 10, 1),
                 1e-6).each(self.instrument.channels.temperature)
     data = loop.run()
     self.assertEqual(data.p1_set.ndarray.shape, (21, ))
     self.assertEqual(len(data.arrays), 7)
     for chan in ['A', 'B', 'C', 'D', 'E', 'F']:
         self.assertEqual(
             getattr(data, 'testchanneldummy_Chan{}_temperature'.format(
                 chan)).ndarray.shape, (21, ))
Ejemplo n.º 2
0
    def test_bare_function(self):
        # not a use case we want to promote, but it's there...
        p = ManualParameter('test')

        def doubler(x):
            p.set(x * 2)

        f = Function('f', call_cmd=doubler, args=[Numbers(-10, 10)])

        f(4)
        self.assertEqual(p.get(), 8)
        with self.assertRaises(ValueError):
            f(20)
Ejemplo n.º 3
0
def test_szip_finiteness():
    """
    Test that if only parameters and/or functions are given to szip, we do not end up in infinite loops but instead
    iterate once returning the value of the parameter/function
    """
    x = ManualParameter("x")
    y = ManualParameter("y")

    x(0)
    y(1)

    for count, i in enumerate(szip(x, y)):
        assert i["x"] == x()
        assert i["y"] == y()
        assert count == 0
Ejemplo n.º 4
0
    def testLoopCombinedParameterTwice(self, npoints, x_start_stop,
                                       y_start_stop, z_start_stop):
        x_set = np.linspace(x_start_stop[0], x_start_stop[1], npoints)
        y_set = np.linspace(y_start_stop[0], y_start_stop[1], npoints)
        z_set = np.linspace(z_start_stop[0], z_start_stop[1], npoints)
        setpoints = np.hstack(
            (x_set.reshape(npoints,
                           1), y_set.reshape(npoints,
                                             1), z_set.reshape(npoints, 1)))
        parameters = [ManualParameter(name) for name in ["X", "Y", "Z"]]
        sweep_values = combine(*parameters, name="combined").sweep(setpoints)

        def wrapper():
            counter = 0

            def inner():
                nonlocal counter
                counter += 1
                return counter

            return inner

        self.dmm.voltage.get = wrapper()
        loop = Loop(sweep_values).each(self.dmm.voltage, self.dmm.voltage)
        data = loop.run(quiet=True)
        np.testing.assert_array_equal(data.arrays['X'].ndarray, x_set)
        np.testing.assert_array_equal(data.arrays['Y'].ndarray, y_set)
        np.testing.assert_array_equal(data.arrays['Z'].ndarray, z_set)
        np.testing.assert_array_equal(data.arrays['dmm_voltage_0'].ndarray,
                                      np.arange(1, npoints * 2, 2))
        np.testing.assert_array_equal(data.arrays['dmm_voltage_1'].ndarray,
                                      np.arange(2, npoints * 2 + 1, 2))
Ejemplo n.º 5
0
    def testLoopCombinedParameterPrintTask(self, npoints, x_start_stop,
                                           y_start_stop, z_start_stop):

        x_set = np.linspace(x_start_stop[0], x_start_stop[1], npoints)
        y_set = np.linspace(y_start_stop[0], y_start_stop[1], npoints)
        z_set = np.linspace(z_start_stop[0], z_start_stop[1], npoints)
        setpoints = np.hstack(
            (x_set.reshape(npoints,
                           1), y_set.reshape(npoints,
                                             1), z_set.reshape(npoints, 1)))

        parameters = [ManualParameter(name) for name in ["X", "Y", "Z"]]

        sweep_values = combine(*parameters, name="combined").sweep(setpoints)

        def ataskfunc():
            a = 1 + 1

        def btaskfunc():
            b = 1 + 2

        atask = Task(ataskfunc)
        btask = Task(btaskfunc)

        loop = Loop(sweep_values).each(atask, btask)
        data = loop.run(quiet=True)
        np.testing.assert_array_equal(data.arrays['X'].ndarray, x_set)
        np.testing.assert_array_equal(data.arrays['Y'].ndarray, y_set)
        np.testing.assert_array_equal(data.arrays['Z'].ndarray, z_set)
Ejemplo n.º 6
0
    def testLoopCombinedParameterInside(self, npoints, npoints_outer,
                                        x_start_stop, y_start_stop,
                                        z_start_stop):
        x_set = np.linspace(x_start_stop[0], x_start_stop[1], npoints_outer)
        y_set = np.linspace(y_start_stop[0], y_start_stop[1], npoints)
        z_set = np.linspace(z_start_stop[0], z_start_stop[1], npoints)

        setpoints = np.hstack((y_set.reshape(npoints,
                                             1), z_set.reshape(npoints, 1)))

        parameters = [ManualParameter(name) for name in ["X", "Y", "Z"]]
        sweep_values = combine(parameters[1], parameters[2],
                               name="combined").sweep(setpoints)

        def ataskfunc():
            a = 1 + 1

        def btaskfunc():
            b = 1 + 2

        atask = Task(ataskfunc)
        btask = Task(btaskfunc)

        def wrapper():
            counter = 0

            def inner():
                nonlocal counter
                counter += 1
                return counter

            return inner

        self.dmm.voltage.get = wrapper()
        loop = Loop(
            parameters[0].sweep(x_start_stop[0],
                                x_start_stop[1],
                                num=npoints_outer)).loop(sweep_values).each(
                                    self.dmm.voltage, atask,
                                    self.dmm.somethingelse, self.dmm.voltage,
                                    btask)
        data = loop.run(quiet=True)
        np.testing.assert_array_equal(data.arrays['X_set'].ndarray, x_set)
        np.testing.assert_array_equal(
            data.arrays['Y'].ndarray,
            np.repeat(y_set.reshape(1, npoints), npoints_outer, axis=0))
        np.testing.assert_array_equal(
            data.arrays['Z'].ndarray,
            np.repeat(z_set.reshape(1, npoints), npoints_outer, axis=0))

        np.testing.assert_array_equal(
            data.arrays['dmm_voltage_0'].ndarray,
            np.arange(1, npoints * npoints_outer * 2,
                      2).reshape(npoints_outer, npoints))
        np.testing.assert_array_equal(
            data.arrays['dmm_voltage_3'].ndarray,
            np.arange(2, npoints * npoints_outer * 2 + 1,
                      2).reshape(npoints_outer, npoints))
        np.testing.assert_array_equal(data.arrays['dmm_somethingelse'].ndarray,
                                      np.ones((npoints_outer, npoints)))
    def test_variable_sized_return_values_hard_sweep_soft_avg(self):
        """
        Tests a detector that acquires data in chunks of varying sizes
        """
        self.MC.soft_avg(10)
        counter_param = ManualParameter('counter', initial_value=0)

        def return_variable_size_values():
            idx = counter_param() % 3
            counter_param(counter_param()+1)

            if idx == 0:
                return np.arange(0, 7)
            elif idx == 1:
                return np.arange(7, 11)
            elif idx == 2:
                return np.arange(11, 30)

        sweep_pts = np.arange(30)

        d = det.Function_Detector(get_function=return_variable_size_values,
                                  value_names=['Variable size counter'],
                                  detector_control='hard')
        self.MC.set_sweep_function(None_Sweep(sweep_control='hard'))
        self.MC.set_sweep_points(sweep_pts)
        self.MC.set_detector_function(d)
        dat = self.MC.run('varying_chunk_size')
        dset = dat["dset"]
        x = dset[:, 0]
        y = dset[:, 1]

        self.assertEqual(np.shape(dset), (len(sweep_pts), 2))
        np.testing.assert_array_almost_equal(x, sweep_pts)
        np.testing.assert_array_almost_equal(y, sweep_pts)
        self.assertEqual(self.MC.total_nr_acquired_values, 10*30)
Ejemplo n.º 8
0
def measure_asm_files(asm_filenames, config_filename, qubit, MC):
    """
    Takes one or more asm_files as input and runs them on the hardware
    """
    qubit.prepare_for_timedomain()
    CBox = qubit.CBox  # The qubit object contains a reference to the CBox

    counter_param = ManualParameter('name_ctr', initial_value=0)

    if len(asm_filenames) > 1:
        MC.soft_avg(len(asm_filenames))
        nr_hard_averages = 256
    else:
        MC.soft_avg(8)
        nr_hard_averages = qubit.RO_acq_averages() // MC.soft_avg()

    if qubit.cal_pt_zero() is not None:
        cal_pts = (qubit.cal_pt_zero(), qubit.cal_pt_one())
    else:
        cal_pts = None

    prepare_function_kwargs = {
        'counter_param': counter_param,
        'asm_filenames': asm_filenames,
        'CBox': CBox
    }

    detector = CBox_int_avg_func_prep_det_CC(
        CBox,
        prepare_function=load_range_of_asm_files,
        prepare_function_kwargs=prepare_function_kwargs,
        nr_averages=nr_hard_averages,
        cal_pts=cal_pts)

    measurement_points = extract_msmt_pts_from_config(config_filename)

    s = swf.None_Sweep()
    if 'rb' in asm_filenames[0]:
        s.parameter_name = 'Number of Cliffords'
        s.unit = '#'
    MC.set_sweep_function(s)
    MC.set_sweep_points(measurement_points)
    MC.set_detector_function(detector)

    MC.run('Demo {}'.format(os.path.split(asm_filenames[0])[-1]))
    if 'rb' in asm_filenames[0]:
        a = ma.RandomizedBenchmarking_Analysis(label='Demo rb',
                                               rotate_and_normalize=False,
                                               close_fig=True)
        p = a.fit_res.best_values['p']
        Fcl = p + (1 - p) / 2
        Fg = Fcl**(1 / 1.875)
        print('Clifford Fidelity:\t{:.4f}\nGate Fidelity: \t\t{:.4f}'.format(
            Fcl, Fg))
        Ncl = np.arange(a.sweep_points[0], a.sweep_points[-1])
        RB_fit = ma.fit_mods.RandomizedBenchmarkingDecay(
            Ncl, **a.fit_res.best_values)
        MC.main_QtPlot.add(x=Ncl, y=RB_fit, subplot=0)
Ejemplo n.º 9
0
    def setUp(self):
        self.target_name = 'target_parameter'
        self.target_label = 'Target Parameter'
        self.target_unit = 'V'

        self.target = ManualParameter(name=self.target_name, label=self.target_label,
                                      unit=self.target_unit, initial_value=1.0,
                                      instrument=self.parent_instrument)
        self.parent_instrument.add_parameter(self.target)
        self.scaler = ScaledParameter(self.target, division=1)
Ejemplo n.º 10
0
def test_szip_measure_prior_to_set():
    """
    We can use szip to perform a measurement before setting sweep set points.  Test this scenario
    """
    x = ManualParameter("x")
    v = range(1, 10)
    m = ManualParameter("m")
    m.get = lambda: 2 * x()

    x(0)
    count = 0
    previous_x = x()

    for count, i in enumerate(szip(m, sweep(x, v))):
        assert i[
            "m"] == 2 * previous_x  # Note that at this point, x should already have been incremented
        assert count < len(v)
        previous_x = x()

    assert count == len(v) - 1
Ejemplo n.º 11
0
 def test_loop_measure_channels_by_name(self, values):
     p1 = ManualParameter(name='p1', vals=Numbers(-10, 10))
     for i in range(4):
         self.instrument.channels[i].temperature(values[i])
     loop = Loop(p1.sweep(-10, 10, 1),
                 1e-6).each(self.instrument.A.temperature,
                            self.instrument.B.temperature,
                            self.instrument.C.temperature,
                            self.instrument.D.temperature)
     data = loop.run()
     self.assertEqual(data.p1_set.ndarray.shape, (21, ))
     for i, chan in enumerate(['A', 'B', 'C', 'D']):
         self.assertEqual(
             getattr(data, 'testchanneldummy_Chan{}_temperature'.format(
                 chan)).ndarray.shape, (21, ))
         self.assertEqual(
             getattr(data, 'testchanneldummy_Chan{}_temperature'.format(
                 chan)).ndarray.max(), values[i])
         self.assertEqual(
             getattr(data, 'testchanneldummy_Chan{}_temperature'.format(
                 chan)).ndarray.min(), values[i])
Ejemplo n.º 12
0
def test_snapshot_creation_for_types_not_supported_by_builtin_json(experiment):
    """
    Test that `Measurement`/`Runner`/`DataSaver` infrastructure
    successfully dumps station snapshots in JSON format in cases when the
    snapshot contains data of types that are not supported by python builtin
    `json` module, for example, numpy scalars.
    """
    p1 = ManualParameter('p_np_int32', initial_value=numpy.int32(5))
    p2 = ManualParameter('p_np_float16', initial_value=numpy.float16(5.0))
    p3 = ManualParameter('p_np_array',
                         initial_value=numpy.meshgrid((1, 2), (3, 4)))
    p4 = ManualParameter('p_np_bool', initial_value=numpy.bool_(False))

    station = Station(p1, p2, p3, p4)

    measurement = Measurement(experiment, station)

    # we need at least 1 parameter to be able to run the measurement
    measurement.register_custom_parameter('dummy')

    with measurement.run() as data_saver:
        # we do this in order to create a snapshot of the station and add it
        # to the database
        pass

    snapshot = data_saver.dataset.snapshot

    assert 5 == snapshot['station']['parameters']['p_np_int32']['value']
    assert 5 == snapshot['station']['parameters']['p_np_int32']['raw_value']

    assert 5.0 == snapshot['station']['parameters']['p_np_float16']['value']
    assert 5.0 == snapshot['station']['parameters']['p_np_float16'][
        'raw_value']

    lst = [[[1, 2], [1, 2]], [[3, 3], [4, 4]]]
    assert lst == snapshot['station']['parameters']['p_np_array']['value']
    assert lst == snapshot['station']['parameters']['p_np_array']['raw_value']

    assert False is snapshot['station']['parameters']['p_np_bool']['value']
    assert False is snapshot['station']['parameters']['p_np_bool']['raw_value']
Ejemplo n.º 13
0
class TestMeasure(TestCase):
    def setUp(self):
        self.p1 = ManualParameter('P1', initial_value=1)

    def test_simple_scalar(self):
        data = Measure(self.p1).run_temp()

        self.assertEqual(data.single_set.tolist(), [0])
        self.assertEqual(data.P1.tolist(), [1])
        self.assertEqual(len(data.arrays), 2, data.arrays)

        self.assertNotIn('loop', data.metadata)

        meta = data.metadata['measurement']
        self.assertEqual(meta['__class__'], 'qcodes.measure.Measure')
        self.assertEqual(len(meta['actions']), 1)
        self.assertFalse(meta['background'])
        self.assertFalse(meta['use_data_manager'])
        self.assertFalse(meta['use_threads'])

        ts_start = datetime.strptime(meta['ts_start'], '%Y-%m-%d %H:%M:%S')
        ts_end = datetime.strptime(meta['ts_end'], '%Y-%m-%d %H:%M:%S')
        self.assertGreaterEqual(ts_end, ts_start)

    def test_simple_array(self):
        data = Measure(MultiGetter(arr=(1.2, 3.4))).run_temp()

        self.assertEqual(data.index0.tolist(), [0, 1])
        self.assertEqual(data.arr.tolist(), [1.2, 3.4])
        self.assertEqual(len(data.arrays), 2, data.arrays)

    def test_array_and_scalar(self):
        self.p1.set(42)
        data = Measure(MultiGetter(arr=(5, 6)), self.p1).run_temp()

        self.assertEqual(data.single_set.tolist(), [0])
        self.assertEqual(data.P1.tolist(), [42])
        self.assertEqual(data.index0.tolist(), [0, 1])
        self.assertEqual(data.arr.tolist(), [5, 6])
        self.assertEqual(len(data.arrays), 4, data.arrays)
def test_inferred():

    x = ManualParameter("x", unit="V")

    @setter([("xmv", "mV")], inferred_parameters=[("x", "V")])
    def xsetter(milivolt_value):
        volt_value = milivolt_value / 1000.0  # From mV to V
        x.set(volt_value)
        return volt_value

    m = ManualParameter("m")
    m.get = lambda: np.sin(x())

    sweep_values = np.linspace(-1000, 1000, 100)  # We sweep in mV

    sweep_object = nest(sweep(xsetter, sweep_values), m)

    experiment = new_experiment("sweep_measure", sample_name="sine")
    station = Station()
    meas = SweepMeasurement(exp=experiment, station=station)
    meas.register_sweep(sweep_object)

    with meas.run() as datasaver:
        for data in sweep_object:
            datasaver.add_result(*data.items())

    data_set = datasaver._dataset
    assert data_set.paramspecs["x"].depends_on == ""
    assert data_set.paramspecs["x"].inferred_from == "xmv"
    assert data_set.paramspecs["xmv"].depends_on == ""
    assert data_set.paramspecs["m"].depends_on == "x"

    expected_xmv = [[xi] for xi in sweep_values]
    expected_x = [[xi / 1000] for xi in sweep_values]

    assert data_set.get_data('xmv') == expected_xmv
    assert data_set.get_data('x') == expected_x
def test_simple():
    x = ManualParameter("x")
    sweep_values = np.linspace(-1, 1, 100)

    m = ManualParameter("m")
    m.get = lambda: np.sin(x())

    sweep_object = nest(sweep(x, sweep_values), m)

    experiment = new_experiment("sweep_measure", sample_name="sine")
    station = Station()
    meas = SweepMeasurement(exp=experiment, station=station)
    meas.register_sweep(sweep_object)

    with meas.run() as datasaver:
        for data in sweep_object:
            datasaver.add_result(*data.items())

    data_set = datasaver._dataset
    assert data_set.paramspecs["x"].depends_on == ""
    assert data_set.paramspecs["m"].depends_on == "x"

    expected_x = [[xi] for xi in sweep_values]
    assert data_set.get_data('x') == expected_x
def test_nest():
    n_sample_points = 100
    x = ManualParameter("x")
    sweep_values_x = np.linspace(-1, 1, n_sample_points)

    y = ManualParameter("y")
    sweep_values_y = np.linspace(-1, 1, n_sample_points)

    m = ManualParameter("m")
    m.get = lambda: np.sin(x())

    n = ManualParameter("n")
    n.get = lambda: np.cos(x()) + 2 * np.sin(y())

    sweep_object = sweep(x, sweep_values_x)(m, sweep(y, sweep_values_y)(n))

    experiment = new_experiment("sweep_measure", sample_name="sine")
    station = Station()
    meas = SweepMeasurement(exp=experiment, station=station)
    meas.register_sweep(sweep_object)

    with meas.run() as datasaver:
        for data in sweep_object:
            datasaver.add_result(*data.items())

    data_set = datasaver._dataset
    assert data_set.paramspecs["x"].depends_on == ""
    assert data_set.paramspecs["y"].depends_on == ""
    assert data_set.paramspecs["m"].depends_on == "x"
    assert data_set.paramspecs["n"].depends_on == "x, y"

    data_x = data_set.get_data('x')
    data_y = data_set.get_data('y')

    assert data_x[::n_sample_points + 1] == [[xi] for xi in sweep_values_x]
    assert data_y[::n_sample_points + 1] == [[None] for _ in sweep_values_x]

    coordinate_layout = itertools.product(sweep_values_x, sweep_values_y)
    expected_x, expected_y = zip(*coordinate_layout)
    assert [ix for c, ix in enumerate(data_x)
            if c % (n_sample_points + 1)] == [[xi] for xi in expected_x]

    assert [iy for c, iy in enumerate(data_y)
            if c % (n_sample_points + 1)] == [[yi] for yi in expected_y]
    def test_progress_callback(self):

        progress_param = ManualParameter('progress', initial_value=0)

        def set_progress_param_callable(progress):
            progress_param(progress)

        self.MC.on_progress_callback(set_progress_param_callable)

        self.assertEqual(progress_param(), 0)
        sweep_pts = np.linspace(0, 10, 30)
        self.MC.set_sweep_function(None_Sweep())
        self.MC.set_sweep_points(sweep_pts)
        self.MC.set_detector_function(det.Dummy_Detector_Soft())
        dat = self.MC.run('1D_soft')

        self.assertEqual(progress_param(), 100)
Ejemplo n.º 18
0
    def test_variable_gain(self):
        test_value = 5

        initial_gain = 2
        variable_gain_name = 'gain'
        gain = ManualParameter(name=variable_gain_name, initial_value=initial_gain)
        self.scaler.gain = gain
        self.scaler(test_value)

        assert self.scaler() == test_value
        assert self.target() == test_value / initial_gain
        assert self.scaler.division == 1/initial_gain

        second_gain = 7
        gain(second_gain)
        assert self.target() == test_value / initial_gain   #target value must change on scaler value change, not on gain/division
        self.scaler(test_value)
        assert self.target() == test_value / second_gain
        assert self.scaler.division == 1 / second_gain

        assert self.scaler.metadata['variable_multiplier'] == variable_gain_name
Ejemplo n.º 19
0
def main():
    x = ManualParameter("x", unit="V")
    y = ManualParameter("y", unit="V")

    m = ManualParameter("m", unit="A")
    m.get = lambda: x()**2

    n = ManualParameter("n", unit="A")
    n.get = lambda: x() - y()**2 + 16

    setup = [(lambda: None, tuple())]
    cleanup = [(lambda: None, tuple())]

    result = do_experiment("cool_experiment/my_sample",
                           setup,
                           sweep(x, [0, 1, 2])(m),
                           cleanup,
                           return_format=["data_set_path"])

    data_set_path = result[0]

    data = get_results_from_db_path(data_set_path, return_as_dict=True)

    print(data)
Ejemplo n.º 20
0
    def setUp(self):
        self.c0 = ManualParameter('c0', vals=Numbers(-10, 10))
        self.c1 = ManualParameter('c1')

        self.getter = StandardParameter('c2', get_cmd=lambda: 42)
Ejemplo n.º 21
0
 def setUpClass(cls):
     cls.p1 = ManualParameter('p1', vals=Numbers(-10, 10))
     cls.p2 = ManualParameter('p2', vals=Numbers(-10, 10))
     cls.p3 = ManualParameter('p3', vals=Numbers(-10, 10))
     Station().set_measurement(cls.p2, cls.p3)
Ejemplo n.º 22
0
class Measure(Metadatable):
    """
    Create a DataSet from a single (non-looped) set of actions.

    Args:
        *actions (any): sequence of actions to perform. Any action that is
            valid in a ``Loop`` can be used here. If an action is a gettable
            ``Parameter``, its output will be included in the DataSet.
            Scalars returned by an action will be saved as length-1 arrays,
            with a dummy setpoint for consistency with other DataSets.
    """
    dummy_parameter = ManualParameter(name='single',
                                      label='Single Measurement')

    def __init__(self, *actions):
        super().__init__()
        self._dummyLoop = Loop(self.dummy_parameter[0]).each(*actions)

    def run_temp(self, **kwargs):
        """
        Wrapper to run this measurement as a temporary data set
        """
        return self.run(quiet=True,
                        data_manager=False,
                        location=False,
                        **kwargs)

    def run(self,
            use_threads=False,
            quiet=False,
            data_manager=USE_MP,
            station=None,
            **kwargs):
        """
        Run the actions in this measurement and return their data as a DataSet

        Args:
            quiet (Optional[bool]): Set True to not print anything except
                errors. Default False.

            station (Optional[Station]): the ``Station`` this measurement
                pertains to. Defaults to ``Station.default`` if one is defined.
                Only used to supply metadata.

            use_threads (Optional[bool]): whether to parallelize ``get``
                operations using threads. Default False.

            Other kwargs are passed along to data_set.new_data. The key ones
            are:

            location (Optional[Union[str, False]]): the location of the
                DataSet, a string whose meaning depends on formatter and io,
                or False to only keep in memory. May be a callable to provide
                automatic locations. If omitted, will use the default
                DataSet.location_provider

            name (Optional[str]): if location is default or another provider
                function, name is a string to add to location to make it more
                readable/meaningful to users

            formatter (Optional[Formatter]): knows how to read and write the
                file format. Default can be set in DataSet.default_formatter

            io (Optional[io_manager]): knows how to connect to the storage
                (disk vs cloud etc)

        returns:
            a DataSet object containing the results of the measurement
        """

        # background is not configurable, would be weird to run this in the bg
        background = False

        data_set = self._dummyLoop.get_data_set(data_manager=data_manager,
                                                **kwargs)

        # set the DataSet to local for now so we don't save it, since
        # we're going to massage it afterward
        original_location = data_set.location
        data_set.location = False

        # run the measurement as if it were a Loop
        self._dummyLoop.run(background=background,
                            use_threads=use_threads,
                            station=station,
                            quiet=True)

        # look for arrays that are unnecessarily nested, and un-nest them
        all_unnested = True
        for array in data_set.arrays.values():
            if array.ndim == 1:
                if array.is_setpoint:
                    dummy_setpoint = array
                else:
                    # we've found a scalar - so keep the dummy setpoint
                    all_unnested = False
            else:
                # The original return was an array, so take off the extra dim.
                # (This ensures the outer dim length was 1, otherwise this
                # will raise a ValueError.)
                array.ndarray.shape = array.ndarray.shape[1:]

                # TODO: DataArray.shape masks ndarray.shape, and a user *could*
                # change it, thinking they were reshaping the underlying array,
                # but this would a) not actually reach the ndarray right now,
                # and b) if it *did* and the array was reshaped, this array
                # would be out of sync with its setpoint arrays, so bad things
                # would happen. So we probably want some safeguards in place
                # against this
                array.shape = array.ndarray.shape

                array.set_arrays = array.set_arrays[1:]

                array.init_data()

        # Do we still need the dummy setpoint array at all?
        if all_unnested:
            del data_set.arrays[dummy_setpoint.array_id]
            if hasattr(data_set, 'action_id_map'):
                del data_set.action_id_map[dummy_setpoint.action_indices]

        # now put back in the DataSet location and save it
        data_set.location = original_location
        data_set.write()

        # metadata: ActiveLoop already provides station snapshot, but also
        # puts in a 'loop' section that we need to replace with 'measurement'
        # but we use the info from 'loop' to ensure consistency and avoid
        # duplication.
        LOOP_SNAPSHOT_KEYS = [
            'background', 'ts_start', 'ts_end', 'use_data_manager',
            'use_threads'
        ]
        data_set.add_metadata({
            'measurement':
            {k: data_set.metadata['loop'][k]
             for k in LOOP_SNAPSHOT_KEYS}
        })
        del data_set.metadata['loop']

        # actions are included in self.snapshot() rather than in
        # LOOP_SNAPSHOT_KEYS because they are useful if someone just
        # wants a local snapshot of the Measure object
        data_set.add_metadata({'measurement': self.snapshot()})

        data_set.save_metadata()

        if not quiet:
            print(repr(data_set))
            print(datetime.now().strftime('acquired at %Y-%m-%d %H:%M:%S'))

        return data_set

    def snapshot_base(self, update=False):
        return {
            '__class__': full_class(self),
            'actions': _actions_snapshot(self._dummyLoop.actions, update)
        }
Ejemplo n.º 23
0
    return DS
DS = live_plotting()
#def add_T1exp_metadata(data):
#        
#        data.metadata['Parameters'] = {'Nrep': 10, 't_empty': 2, 't_load': 2.4, 't_read': 2.2}
#        data.write(write_metadata=True)
#
#
#add_T1exp_metadata(data)

#datatata = LP.run(background=False)
#%%



gate = ManualParameter('gate', vals=Numbers(-10, 10))
frequency = ManualParameter('frequency', vals=Numbers(-10, 10))
amplitude = ManualParameter('amplitude', vals=Numbers(-10, 10))
# a manual parameter returns a value that has been set
# so fix it to a value for this example
amplitude.set(-1)

combined = qc.combine(gate, frequency, name="gate_frequency")
combined.__dict__.items()

a = [1,2,3]
b = [c for c in a]
b = {'ele_%d'%i: i for i in a}
b = {'ele_{}'.format(i): i for i in a}
#
#Sweep = Loop(sweep_values = [1,2,3,4,5,6,8,77,32,44,564])
Ejemplo n.º 24
0
 def setUp(self):
     self.p1 = ManualParameter('P1', initial_value=1)
Ejemplo n.º 25
0
 def test_base(self):
     p = ManualParameter('p')
     with self.assertRaises(NotImplementedError):
         iter(SweepValues(p))
Ejemplo n.º 26
0
def mixer_skewness_calibration_5014(SH, source, station,
                                    MC=None,
                                    QI_amp_ratio=None, IQ_phase=None,
                                    frequency=None, f_mod=10e6,
                                    I_ch=1, Q_ch=2,
                                    name='mixer_skewness_calibration_5014'):
    '''
    Loads a cos and sin waveform in the specified I and Q channels of the
    tektronix 5014 AWG (taken from station.pulsar.AWG).
    By looking at the frequency corresponding with the spurious sideband the
    phase_skewness and amplitude skewness that minimize the signal correspond
    to the mixer skewness.

    Inputs:
        SH              (instrument)
        Source          (instrument)     MW-source used for driving
        station         (qcodes station) Contains the AWG and pulasr sequencer
        QI_amp_ratio    (parameter)      qcodes parameter
        IQ_phase        (parameter)
        frequency       (float Hz)       Spurious SB freq: f_source - f_mod
        f_mod           (float Hz)       Modulation frequency
        I_ch/Q_ch       (int or str)     Specifies the AWG channels

    returns:
        alpha, phi     the coefficients that go in the predistortion matrix
    For the spurious sideband:
        alpha = 1/QI_amp_optimal
        phi = -IQ_phase_optimal
    For details, see Leo's notes on mixer skewness calibration in the docs
    '''
    if frequency is None:
        # Corresponds to the frequency where to minimize with the SH
        frequency = source.frequency.get() - f_mod
    if QI_amp_ratio is None:
        QI_amp_ratio = ManualParameter('QI_amp', initial_value=1)
    if IQ_phase is None:
        IQ_phase = ManualParameter('IQ_phase', unit='deg', initial_value=0)
    if MC is None:
        MC = station.MC
    if type(I_ch) is int:
        I_ch = 'ch{}'.format(I_ch)
    if type(Q_ch) is int:
        Q_ch = 'ch{}'.format(Q_ch)

    d = det.SH_mixer_skewness_det(frequency, QI_amp_ratio, IQ_phase, SH,
                                  f_mod=f_mod,
                                  I_ch=I_ch, Q_ch=Q_ch, station=station)
    S1 = pw.wrap_par_to_swf(QI_amp_ratio)
    S2 = pw.wrap_par_to_swf(IQ_phase)

    ad_func_pars = {'adaptive_function': nelder_mead,
                    'x0': [1.0, 0.0],
                    'initial_step': [.15, 10],
                    'no_improv_break': 12,
                    'minimize': True,
                    'maxiter': 500}
    MC.set_sweep_functions([S1, S2])
    MC.set_detector_function(d)
    MC.set_adaptive_function_parameters(ad_func_pars)
    MC.run(name=name, mode='adaptive')
    a = ma.OptimizationAnalysis()
    # phi and alpha are the coefficients that go in the predistortion matrix
    alpha = 1/a.optimization_result[0][0]
    phi = -1*a.optimization_result[0][1]

    return phi, alpha
# import numpy as np
# import time
import logging as log
from typing import List, Union

# from pycqed.measurement import detector_functions as det
# from pycqed.measurement import sweep_functions as swf
from pycqed.measurement import optimization as opt

from qcodes.instrument.parameter import ManualParameter
# from pycqed.analysis.analysis_toolbox import normalize_TD_data
# from pycqed.measurement.openql_experiments import multi_qubit_oql as mqo
# from pycqed.analysis_v2 import measurement_analysis as ma2
# from pycqed.measurement.openql_experiments import clifford_rb_oql as cl_oql

counter_param = ManualParameter('counter', unit='#')
counter_param(0)


def conventional_CZ_cost_func(device,
                              FL_LutMan_QR,
                              MC,
                              prepare_for_timedomain=True,
                              disable_metadata=True,
                              qubits=['X', 'D4'],
                              flux_codeword_park=None,
                              flux_codeword='cz',
                              include_single_qubit_phase_in_cost=False,
                              include_leakage_in_cost=True,
                              measure_two_conditional_oscillations=False,
                              fixed_max_nr_of_repeated_gates=None,