Beispiel #1
0
def plot_wave_raw(wave_raw, samplerate=None, station=None):
    ''' Plot the raw wave

    Arguments:
        wave_raw (array): raw data which represents the waveform

    Returns:
        plot (QtPlot): the plot showing the data
    '''
    if samplerate is None:
        if station is None:
            raise Exception('There is no station')
        samplerate = 1 / station.awg.getattr('AWG_clock')
    else:
        samplerate = samplerate
    horz_var = np.arange(0, len(wave_raw) * samplerate, samplerate)
    x = DataArray(name='time(s)',
                  label='time (s)',
                  preset_data=horz_var,
                  is_setpoint=True)
    y = DataArray(label='sweep value (mV)',
                  preset_data=wave_raw,
                  set_arrays=(x, ))
    plot = QtPlot(x, y)

    return plot
Beispiel #2
0
    def test_nest_empty(self):
        data = DataArray()

        self.assertEqual(data.shape, ())

        mock_set_array = 'not really an array but we don\'t check'
        mock_set_array2 = 'another one'

        data.nest(2, action_index=44, set_array=mock_set_array)
        data.nest(3, action_index=66, set_array=mock_set_array2)

        # the array doesn't exist until you initialize it
        self.assertIsNone(data.ndarray)

        # but other attributes are set
        self.assertEqual(data.shape, (3, 2))
        self.assertEqual(data.action_indices, (66, 44))
        self.assertEqual(data.set_arrays, (mock_set_array2, mock_set_array))

        data.init_data()
        self.assertEqual(data.ndarray.shape, (3, 2))

        # after initializing data, you can't nest anymore because this isn't
        # a preset array
        with self.assertRaises(RuntimeError):
            data.nest(4)
Beispiel #3
0
    def test_full_write(self):
        formatter = GNUPlotFormat()
        location = self.locations[0]
        data = DataSet1D(name="test_full_write", location=location)

        formatter.write(data, data.io, data.location)

        with open(location + '/x_set.dat') as f:
            self.assertEqual(f.read(), file_1d())

        # check that we can add comment lines randomly into the file
        # as long as it's after the first three lines, which are comments
        # with well-defined meaning,
        # and that we can un-quote the labels
        lines = file_1d().split('\n')
        lines[1] = lines[1].replace('"', '')
        lines[3:3] = ['# this data is awesome!']
        lines[6:6] = ['# the next point is my favorite.']
        with open(location + '/x_set.dat', 'w') as f:
            f.write('\n'.join(lines))

        # normally this would be just done by data2 = load_data(location)
        # but we want to work directly with the Formatter interface here
        data2 = DataSet(location=location)
        formatter.read(data2)

        self.checkArraysEqual(data2.x_set, data.x_set)
        self.checkArraysEqual(data2.y, data.y)

        # data has been saved
        self.assertEqual(data.y.last_saved_index, 4)
        # data2 has been read back in, should show the same
        # last_saved_index
        self.assertEqual(data2.y.last_saved_index, 4)

        # while we're here, check some errors on bad reads

        # first: trying to read into a dataset that already has the
        # wrong size
        x = DataArray(name='x_set', label='X', preset_data=(1., 2.))
        y = DataArray(name='y',
                      label='Y',
                      preset_data=(3., 4.),
                      set_arrays=(x, ))
        data3 = new_data(arrays=(x, y), location=location + 'XX')
        # initially give it a different location so we can make it without
        # error, then change back to the location we want.
        data3.location = location
        with LogCapture() as logs:
            formatter.read(data3)

        self.assertTrue('ValueError' in logs.value, logs.value)

        # no problem reading again if only data has changed, it gets
        # overwritten with the disk copy
        data2.x_set[2] = 42
        data2.y[2] = 99
        formatter.read(data2)
        self.assertEqual(data2.x_set[2], 3)
        self.assertEqual(data2.y[2], 5)
Beispiel #4
0
def makeDataSet2Dplain(xname, x, yname, y, zname='measured', z=None, xunit=None,
                       yunit=None, zunit=None, location=None, loc_record=None):
    """ Make DataSet with one 2D array and two setpoint arrays

    Arguments:
        xname (string): the name of the setpoint x array.
        x (array or ndarray or list): the x setpoint data.
        yname (string): the name of the setpoint y array.
        y (array or ndarray or list): the y setpoint data.
        zname (str or list of str): the name of the measured array.
        z (array or list or None): optional the measured data.
        xunit (str or None): optional, the unit of the values stored in x.
        yunit (str or None): optional, the unit of the values stored in y.
        zunit (str or None): optional, the unit of the measured data.
        location (str, callable, bool or None): If you provide a string,
            it must be an unused location in the io manager.
            Can also be:
            - a callable `location provider` with one required parameter \
                (the io manager), and one optional (`record` dict),      \
                which returns a location string when called.
            - `False` - denotes an only-in-memory temporary DataSet.
        loc_record (dict or None): If location is a callable, this will be
            passed to it as `record`.

    Raises:
        See _make_data_set for the ValueError and TypeError exceptions that can be raised

    Returns:
        The resulting dataset.

    """
    setpoint_datay = np.array(y)
    setpoint_datax = np.array(x)
    setpoint_dataxy = np.tile(setpoint_datax, [setpoint_datay.size, 1])
    preset_data = np.NaN * np.ones((setpoint_datay.size, setpoint_datax.size))
    setpointy = DataArray(name=yname, array_id=yname, preset_data=setpoint_datay,
                          unit=yunit, is_setpoint=True)
    setpointx = DataArray(name=xname, array_id=xname, preset_data=setpoint_dataxy,
                          unit=xunit, set_arrays=(setpointy,), is_setpoint=True)

    if isinstance(zname, (str, qcodes.Parameter)):
        if isinstance(z, np.ndarray):
            measured_data_list = [z]
        else:
            measured_data_list = z
        measurement_list = [zname]
    else:
        measured_data_list = z
        measurement_list = zname

    measurement_unit = zunit

    data_set, _ = _make_data_set(measured_data_list, measurement_list, measurement_unit, location, loc_record,
                                 preset_data, [setpointy, setpointx])

    data_set.last_write = -1

    return data_set
Beispiel #5
0
    def test_init_data_error(self):
        data = DataArray(preset_data=[1, 2])
        data.shape = (3, )

        # not sure when this would happen... but if you call init_data
        # and it notices an inconsistency between shape and the actual
        # data that's already there, it raises an error
        with self.assertRaises(ValueError):
            data.init_data()
Beispiel #6
0
    def test_attributes(self):
        pname = 'Betty Sue'
        plabel = 'The best apple pie this side of Wenatchee'
        pfullname = 'bert'

        class MockParam:
            name = pname
            label = plabel

            def __init__(self, full_name=None):
                self.full_name = full_name

        name = 'Oscar'
        label = 'The grouch. GRR!'
        fullname = 'ernie'
        array_id = 24601
        set_arrays = ('awesomeness', 'chocolate content')
        shape = 'Ginornous'
        action_indices = (1, 2, 3, 4, 5)

        p_data = DataArray(parameter=MockParam(pfullname), name=name,
                           label=label, full_name=fullname)
        p_data2 = DataArray(parameter=MockParam(pfullname))

        # explicitly given name and label override parameter vals
        self.assertEqual(p_data.name, name)
        self.assertEqual(p_data.label, label)
        self.assertEqual(p_data.full_name, fullname)
        self.assertEqual(p_data2.name, pname)
        self.assertEqual(p_data2.label, plabel)
        self.assertEqual(p_data2.full_name, pfullname)
        # test default values
        self.assertIsNone(p_data.array_id)
        self.assertEqual(p_data.shape, ())
        self.assertEqual(p_data.action_indices, ())
        self.assertEqual(p_data.set_arrays, ())
        self.assertIsNone(p_data.ndarray)

        np_data = DataArray(name=name, label=label, array_id=array_id,
                            set_arrays=set_arrays, shape=shape,
                            action_indices=action_indices)
        self.assertEqual(np_data.name, name)
        self.assertEqual(np_data.label, label)
        # no full name or parameter - use name
        self.assertEqual(np_data.full_name, name)
        # test simple assignments
        self.assertEqual(np_data.array_id, array_id)
        self.assertEqual(np_data.set_arrays, set_arrays)
        self.assertEqual(np_data.shape, shape)
        self.assertEqual(np_data.action_indices, action_indices)

        name_data = DataArray(name=name)
        self.assertEqual(name_data.label, name)

        blank_data = DataArray()
        self.assertIsNone(blank_data.name)
Beispiel #7
0
    def test_edit_and_mark(self):
        data = DataArray(preset_data=[[1, 2], [3, 4]])
        self.assertEqual(data[0].tolist(), [1, 2])
        self.assertEqual(data[0, 1], 2)

        data.modified_range = None
        self.assertIsNone(data.last_saved_index)

        self.assertEqual(len(data), 2)
        data[0] = np.array([5, 6])
        data[1, 0] = 7
        self.assertEqual(data.ndarray.tolist(), [[5, 6], [7, 4]])

        self.assertEqual(data.modified_range, (0, 2))

        # as if we saved the first two points... the third should still
        # show as modified
        data.mark_saved(1)
        self.assertEqual(data.last_saved_index, 1)
        self.assertEqual(data.modified_range, (2, 2))

        # now we save the third point... no modifications left.
        data.mark_saved(2)
        self.assertEqual(data.last_saved_index, 2)
        self.assertEqual(data.modified_range, None)

        data.clear_save()
        self.assertEqual(data.last_saved_index, None)
        self.assertEqual(data.modified_range, (0, 2))
Beispiel #8
0
def DataSetCombined(location=None, name=None):
    # Complex DataSet with two 1D and two 2D arrays
    x = DataArray(name='x',
                  label='X!',
                  preset_data=(16., 17.),
                  is_setpoint=True)
    y1 = DataArray(name='y1',
                   label='Y1 value',
                   preset_data=(18., 19.),
                   set_arrays=(x, ))
    y2 = DataArray(name='y2',
                   label='Y2 value',
                   preset_data=(20., 21.),
                   set_arrays=(x, ))

    yset = DataArray(name='y',
                     label='Y',
                     preset_data=(22., 23., 24.),
                     is_setpoint=True)
    yset.nest(2, 0, x)
    z1 = DataArray(name='z1',
                   label='Z1',
                   preset_data=((25., 26., 27.), (28., 29., 30.)),
                   set_arrays=(x, yset))
    z2 = DataArray(name='z2',
                   label='Z2',
                   preset_data=((31., 32., 33.), (34., 35., 36.)),
                   set_arrays=(x, yset))
    return new_data(arrays=(x, y1, y2, yset, z1, z2),
                    location=location,
                    name=name)
Beispiel #9
0
    def test_edit_and_mark_slice(self):
        data = DataArray(preset_data=[[1] * 5] * 6)

        self.assertEqual(data.shape, (6, 5))
        data.modified_range = None

        data[:4:2, 2:] = 2
        self.assertEqual(data.tolist(),
                         [[1, 1, 2, 2, 2], [1, 1, 1, 1, 1], [1, 1, 2, 2, 2],
                          [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1]])
        self.assertEqual(data.modified_range, (2, 14))
Beispiel #10
0
    def test_repr(self):
        array2d = [[1, 2], [3, 4]]
        arrayrepr = repr(np.array(array2d))
        array_id = (3, 4)
        data = DataArray(preset_data=array2d)

        self.assertEqual(repr(data), 'DataArray[2,2]:\n' + arrayrepr)

        data.array_id = array_id
        self.assertEqual(repr(data),
                         'DataArray[2,2]: ' + str(array_id) + '\n' + arrayrepr)
Beispiel #11
0
def resampleImage(im):
    """ Resample the image so it has the similar sample rates (samples/mV) in both axis.

    Args:
        im (DataArray): input image.
    Returns:
        imr (numpy array): resampled image.
        setpoints (list of 2 numpy arrays): setpoint arrays from resampled image.
    """
    setpoints = im.set_arrays
    mVrange = [abs(setpoints[0][-1] - setpoints[0][0]),
               abs(setpoints[1][0, -1] - setpoints[1][0, 0])]
    samprates = [im.shape[0] // mVrange[0], im.shape[1] // mVrange[1]]
    factor = int(max(samprates) // min(samprates))
    if factor >= 2:
        axis = int(samprates[0] - samprates[1] < 0)
        if axis == 0:
            facrem = im.shape[0] % factor
            if facrem > 0:
                im = im[:-facrem, :]
            facrem = facrem + 1
            im = im.reshape(im.shape[0] // factor, factor, im.shape[1]).mean(1)
            spy = np.linspace(setpoints[0][0], setpoints[
                0][-facrem], im.shape[0])
            spx = np.tile(np.expand_dims(np.linspace(
                setpoints[1][0, 0], setpoints[1][0, -1], im.shape[1]), 0), im.shape[0])
            setpointy = DataArray(name='Resampled_' + setpoints[0].array_id,
                                  array_id='Resampled_' + setpoints[0].array_id, label=setpoints[0].label,
                                  unit=setpoints[0].unit, preset_data=spy, is_setpoint=True)
            setpointx = DataArray(name='Resampled_' + setpoints[1].array_id,
                                  array_id='Resampled_' + setpoints[1].array_id, label=setpoints[1].label,
                                  unit=setpoints[1].unit, preset_data=spx, is_setpoint=True)
            setpoints = [setpointy, setpointx]
        else:
            facrem = im.shape[1] % factor
            if facrem > 0:
                im = im[:, :-facrem]
            facrem = facrem + 1
            im = im.reshape(im.shape[0], im.shape[1] //
                            factor, factor).mean(-1)
            spx = np.tile(np.expand_dims(np.linspace(setpoints[1][0, 0], setpoints[
                1][0, -facrem], im.shape[1]), 0), [im.shape[0], 1])
            idx = setpoints[1].array_id
            if idx is None:
                idx = 'x'
            idy = setpoints[1].array_id
            if idy is None:
                idy = 'y'
            setpointx = DataArray(name='Resampled_' + idx, array_id='Resampled_' + idy, label=setpoints[1].label,
                                  unit=setpoints[1].unit, preset_data=spx, is_setpoint=True)
            setpoints = [setpoints[0], setpointx]

    return im, setpoints
Beispiel #12
0
def convert_to_probability(
    data_set,
    location,
    NewIO,
    formatter,
    threshold,
    qubit_num=1,
    repetition=100,
):
    for parameter in data_set.arrays:
        if len(data_set.arrays[parameter].ndarray.shape
               ) == 2 and parameter.endswith('set'):
            data_set_new = DataSet(location=location +
                                   '_average_probability_' + parameter,
                                   io=NewIO,
                                   formatter=formatter)


#    data_set = convert_to_01_state(data_set, threshold, qubit_num, repetition, name, unit, sweep_array)
    qubit_data_array = []
    set_array = []
    for parameter in data_set.arrays:
        data_array = data_set.arrays[parameter].ndarray
        dimension_1 = data_array.shape[0]
        arrayid = data_set.arrays[parameter].array_id
        if parameter.endswith(
                'set'):  ## or data_set.arrays[parameter].is_setpoint
            set_array.append(
                DataArray(preset_data=data_array,
                          name=parameter,
                          array_id=arrayid,
                          is_setpoint=True))

        elif not parameter.endswith('set'):
            dimension_2 = data_array.shape[1]
            probability_data = np.ndarray(shape=(dimension_1, dimension_2))

            for k in range(dimension_1):
                for l in range(dimension_2):
                    probability_data[k][l] = np.average(data_array[k][l])

            qubit_data_array.append(
                DataArray(preset_data=probability_data,
                          name=parameter,
                          array_id=arrayid,
                          is_setpoint=False))

    for array in set_array:
        data_set_new.add_array(array)
    for q in range(qubit_num):
        data_set_new.add_array(qubit_data_array[q])

    return data_set_new
Beispiel #13
0
 def test_array(self):
     # DataSet with one 2D array with 4 x 6 points
     yy, xx = np.meshgrid(np.arange(0, 10, .5), range(3))
     zz = xx ** 2 + yy ** 2
     # outer setpoint should be 1D
     xx = xx[:, 0]
     x = DataArray(name='x', label='X', preset_data=xx, is_setpoint=True)
     y = DataArray(name='y', label='Y', preset_data=yy, set_arrays=(x,),
                   is_setpoint=True)
     z = DataArray(name='z', label='Z', preset_data=zz, set_arrays=(x, y))
     self.assertTrue(z.size, 60)
     self.assertTupleEqual(z.shape, (3, 20))
     return z
Beispiel #14
0
def DataSet2D(location=None, name=None):
    # DataSet with one 2D array with 4 x 6 points
    yy, xx = numpy.meshgrid(range(4), range(6))
    zz = xx**2 + yy**2
    # outer setpoint should be 1D
    xx = xx[:, 0]
    x = DataArray(name='x', label='X', preset_data=xx, is_setpoint=True)
    y = DataArray(name='y',
                  label='Y',
                  preset_data=yy,
                  set_arrays=(x, ),
                  is_setpoint=True)
    z = DataArray(name='z', label='Z', preset_data=zz, set_arrays=(x, y))
    return new_data(arrays=(x, y, z), location=location, name=name)
Beispiel #15
0
def fit_addition_line(dataset, trimborder=True):
    """Fits a FermiLinear function to the addition line and finds the middle of the step.

    Args:
        dataset (qcodes dataset): The 1d measured data of addition line.
        trimborder (bool): determines if the edges of the data are taken into account for the fit.

    Returns:
        m_addition_line (float): x value of the middle of the addition line
        result_dict (dict): dictionary with the following results:
            fit parameters (array): fit parameters of the Fermi Linear function
            parameters initial guess (array): parameters of initial guess
            dataset fit (qcodes dataset): dataset with fitted Fermi Linear function
            dataset initial guess (qcodes dataset):dataset with guessed Fermi Linear function

    See also: FermiLinear and fitFermiLinear
    """
    y_array = dataset.default_parameter_array()
    setarray = y_array.set_arrays[0]
    x_data = np.array(setarray)
    y_data = np.array(y_array)

    if trimborder:
        cut_index = max(min(int(x_data.size / 40), 100), 1)
        x_data = x_data[cut_index:-cut_index]
        y_data = y_data[cut_index:-cut_index]
        setarray = setarray[cut_index:-cut_index]

    m_addition_line, result_dict = fit_addition_line_array(x_data,
                                                           y_data,
                                                           trimborder=False)

    y_initial_guess = FermiLinear(
        x_data, *list(result_dict['parameters initial guess']))
    dataset_guess = DataArray(name='fit',
                              label='fit',
                              preset_data=y_initial_guess,
                              set_arrays=(setarray, ))

    y_fit = FermiLinear(x_data, *list(result_dict['fit parameters']))
    dataset_fit = DataArray(name='fit',
                            label='fit',
                            preset_data=y_fit,
                            set_arrays=(setarray, ))

    return m_addition_line, {
        'dataset fit': dataset_fit,
        'dataset initial guess': dataset_guess
    }
Beispiel #16
0
def DataSet1D(location=None, name=None):
    # DataSet with one 1D array with 5 points

    # TODO: since y lists x as a set_array, it should automatically
    # set is_setpoint=True for x, shouldn't it? Any reason we woundn't
    # want that?
    x = DataArray(name='x',
                  label='X',
                  preset_data=(1., 2., 3., 4., 5.),
                  is_setpoint=True)
    y = DataArray(name='y',
                  label='Y',
                  preset_data=(3., 4., 5., 6., 7.),
                  set_arrays=(x, ))
    return new_data(arrays=(x, y), location=location, name=name)
Beispiel #17
0
    def _make_setpoint_array(self, shape, i, prev_setpoints, vals, name,
                             label, unit):
        if vals is None:
            vals = self._default_setpoints(shape)
        elif isinstance(vals, DataArray):
            # can't simply use the DataArray, even though that's
            # what we're going to return here, because it will
            # get nested (don't want to alter the original)
            # DataArrays do have the advantage though of already including
            # name and label, so take these if they exist
            if vals.name is not None:
                name = vals.name
            if vals.label is not None:
                label = vals.label

            # extract a copy of the numpy array
            vals = np.array(vals.ndarray)
        else:
            # turn any sequence into a (new) numpy array
            vals = np.array(vals)

        if vals.shape != shape:
            raise ValueError('nth setpoint array should have shape matching '
                             'the first n dimensions of shape.')

        if name is None:
            name = 'index{}'.format(i)

        return DataArray(name=name, label=label, set_arrays=prev_setpoints,
                         shape=shape, preset_data=vals, unit=unit, is_setpoint=True)
Beispiel #18
0
    def test_default_parameter(self):
        # Test whether the default_array function works
        m = DataSet2D()

        # test we can run with default arguments
        name = m.default_parameter_name()

        # test with paramname
        name = m.default_parameter_name(paramname='z')
        self.assertEqual(name, 'z')
        # test we can get the array instead of the name
        array = m.default_parameter_array(paramname='z')
        self.assertEqual(array, m.z)

        # first non-setpoint array
        array = m.default_parameter_array()
        self.assertEqual(array, m.z)

        # test with metadata
        m.metadata = dict({'default_parameter_name': 'x_set'})
        name = m.default_parameter_name()
        self.assertEqual(name, 'x_set')

        # test the fallback: no name matches, no non-setpoint array
        x = DataArray(name='x',
                      label='X',
                      preset_data=(1., 2., 3., 4., 5.),
                      is_setpoint=True)
        m = new_data(arrays=(x, ), name='onlysetpoint')
        name = m.default_parameter_name(paramname='dummy')
        self.assertEqual(name, 'x_set')
Beispiel #19
0
    def test_default_parameter(self):
        loc_fmt = 'data/{date}/#{counter}_{name}_{date}_{time}'
        rcd = {'name': 'test_default_parameter'}
        loc_provider = FormatLocation(fmt=loc_fmt, record=rcd)
        # Test whether the default_array function works
        m = DataSet2D(location=loc_provider)

        # test we can run with default arguments
        name = m.default_parameter_name()

        # test with paramname
        name = m.default_parameter_name(paramname='z')
        self.assertEqual(name, 'z')
        # test we can get the array instead of the name
        array = m.default_parameter_array(paramname='z')
        self.assertEqual(array, m.z)

        # first non-setpoint array
        array = m.default_parameter_array()
        self.assertEqual(array, m.z)

        # test with metadata
        m.metadata = dict({'default_parameter_name': 'x_set'})
        name = m.default_parameter_name()
        self.assertEqual(name, 'x_set')

        # test the fallback: no name matches, no non-setpoint array
        x = DataArray(name='x',
                      label='X',
                      preset_data=(1., 2., 3., 4., 5.),
                      is_setpoint=True)
        m = new_data(arrays=(x, ), name='onlysetpoint')
        name = m.default_parameter_name(paramname='dummy')
        self.assertEqual(name, 'x_set')
Beispiel #20
0
    def test_reading_into_existing_data_array(self):
        data = DataSet1D(location=self.loc_provider, name='test_read_existing')
        # closing before file is written should not raise error
        self.formatter.write(data)

        data2 = DataSet(location=data.location, formatter=self.formatter)
        d_array = DataArray(
            name='dummy',
            array_id='x_set',  # existing array id in data
            label='bla',
            unit='a.u.',
            is_setpoint=False,
            set_arrays=(),
            preset_data=np.zeros(5))
        data2.add_array(d_array)
        # test if d_array refers to same as array x_set in dataset
        self.assertTrue(d_array is data2.arrays['x_set'])
        data2.read()
        # test if reading did not overwrite dataarray
        self.assertTrue(d_array is data2.arrays['x_set'])
        # Testing if data was correctly updated into dataset
        self.checkArraysEqual(data2.arrays['x_set'], data.arrays['x_set'])
        self.checkArraysEqual(data2.arrays['y'], data.arrays['y'])
        self.formatter.close_file(data)
        self.formatter.close_file(data2)
Beispiel #21
0
def process_dataarray(
    dataset: DataSet,
    input_array_name: str,
    output_array_name: Optional[str],
    processing_function: Callable,
    label: Optional[str] = None,
    unit: Optional[str] = None,
) -> DataSet:
    """ Apply a function to a DataArray in a DataSet

    Args:
        dataset: Input dataset containing the data array
        input_array_name: Name of the data array to be processed
        output_array_nane: Name of the output array or None to operate in place
        processing_function: Method to apply to the data array
        label: Label for the output array
        unit: Unit for the output array
    """
    array = dataset.default_parameter_array(input_array_name)
    data = processing_function(np.array(array))
    if label is None:
        label = array.label
    if unit is None:
        unit = array.unit
    if output_array_name is None:
        array.ndarray[:] = data
    else:
        data_array = DataArray(array_id=output_array_name,
                               name=output_array_name,
                               label=label,
                               set_arrays=array.set_arrays,
                               preset_data=data,
                               unit=unit)
        dataset.add_array(data_array)
    return dataset
Beispiel #22
0
    def test_complete(self):
        array = DataArray(name='y', shape=(5,))
        array.init_data()
        data = new_data(arrays=(array,), location=False)
        self.syncing_array = array
        self.sync_index = 0
        data.sync = self.mock_sync
        bf = DataSet.background_functions
        bf['fail'] = self.failing_func
        bf['log'] = self.logging_func

        with LogCapture() as logs:
            # grab info and warnings but not debug messages
            logging.getLogger().setLevel(logging.INFO)
            data.complete(delay=0.001)

        logs = logs.value

        expected_logs = [
            'waiting for DataSet <False> to complete',
            'DataSet: 0% complete',
            'RuntimeError: it is called failing_func for a reason!',
            'background at index 1',
            'DataSet: 20% complete',
            'RuntimeError: it is called failing_func for a reason!',
            'background function fail failed twice in a row, removing it',
            'background at index 2',
            'DataSet: 40% complete',
            'background at index 3',
            'DataSet: 60% complete',
            'background at index 4',
            'DataSet: 80% complete',
            'background at index 5',
            'DataSet <False> is complete'
        ]

        log_index = 0
        for line in expected_logs:
            self.assertIn(line, logs, logs)
            try:
                log_index_new = logs.index(line, log_index)
            except ValueError:
                raise ValueError('line {} not found after {} in: \n {}'.format(
                    line, log_index, logs))
            self.assertTrue(log_index_new >= log_index, logs)
            log_index = log_index_new + len(line) + 1  # +1 for \n
        self.assertEqual(log_index, len(logs), logs)
Beispiel #23
0
def makeDataSet1Dplain(xname,
                       x,
                       yname,
                       y=None,
                       xunit=None,
                       yunit=None,
                       location=None,
                       loc_record=None):
    """ Make DataSet with one 1D array and one setpoint array

    Arguments:
        xname (string): the name of the setpoint array
        x (array or ndarray or list): the setpoint data
        yname (str or qcodes.Parameter or list): the name of the measured array
        y (array or ndarray or list): the measured data
        xunit (str or None): optional, the unit of the values stored in x array.
        yunit (str or None): optional, the unit of the values stored in y array.
        location (str, callable, bool or None): If you provide a string,
            it must be an unused location in the io manager.
            Can also be:
            - a callable `location provider` with one required parameter \
              (the io manager), and one optional (`record` dict),        \
              which returns a location string when called.
            - `False` - denotes an only-in-memory temporary DataSet.
        loc_record (dict or None): If location is a callable, this will be
            passed to it as `record`.

    Raises:
        See _make_data_set for the ValueError and TypeError exceptions that can be raised

    Returns:
        The resulting dataset.
    """
    setpoint_data = np.array(x)
    preset_data = np.NaN * np.ones(setpoint_data.size)
    if y is not None:
        y = np.array(y)

    setpoint = DataArray(name=xname,
                         array_id=xname,
                         preset_data=setpoint_data,
                         unit=xunit,
                         is_setpoint=True)

    if isinstance(yname, (str, qcodes.Parameter)):
        measured_data_list = [y]
        measurement_list = [yname]
    else:
        measured_data_list = y
        measurement_list = yname

    measurement_unit = yunit

    data_set, _ = _make_data_set(measured_data_list, measurement_list,
                                 measurement_unit, location, loc_record,
                                 preset_data, [setpoint])

    return data_set
Beispiel #24
0
def convert_to_01_state(data_set, threshold, qubit_num=1, repetition=100):
    #data_set = convert_to_ordered_data(data_set, qubit_num, repetition, name, unit, sweep_array)

    qubit_data_array = []
    set_array = []
    for parameter in data_set.arrays:
        data_array = data_set.arrays[parameter].ndarray
        dimension_1 = data_array.shape[0]
        array_id = data_set.arrays[parameter].array_id
        if parameter.endswith(
                'set'):  ## or data_set.arrays[parameter].is_setpoint
            set_array.append(
                DataArray(preset_data=data_array,
                          name=parameter,
                          array_id=array_id,
                          is_setpoint=True))

        elif not parameter.endswith('set'):
            dimension_2 = data_array.shape[1]
            data = np.ndarray(shape=(dimension_1, dimension_2, repetition))

            for k in range(dimension_1):
                for l in range(dimension_2):
                    for j in range(repetition):
                        data[k][l][j] = 1 if np.min(
                            data_array[k][l][j * seg_size:(j + 1) *
                                             seg_size]) <= threshold else 0

            qubit_data_array.append(
                DataArray(preset_data=data,
                          name=parameter,
                          array_id=array_id,
                          is_setpoint=False))

    data_set_new = DataSet(location=new_location + '_01_state',
                           io=NewIO,
                           formatter=formatter)

    for array in set_array:
        data_set_new.add_array(array)
    for q in range(qubit_num):
        data_set_new.add_array(qubit_data_array[q])

    return data_set_new
Beispiel #25
0
    def test_xarray_conversions(self):
        da = DataSet1D(name="TestDataArray_test_xarray_conversions").x_set

        xarray_dictionary = data_array_to_xarray_dictionary(da)

        xarray_dataarray = da.to_xarray()
        da_transformed = DataArray.from_xarray(xarray_dataarray)

        for key in ["name", "unit", "label"]:
            self.assertEqual(da.name, da_transformed.name)
Beispiel #26
0
    def containers(self):
        """
        Finds the data arrays that will be created by the actions in this
        loop, and nests them inside this level of the loop.

        Recursively calls `.containers` on any enclosed actions.
        """
        loop_size = len(self.sweep_values)
        data_arrays = []
        loop_array = DataArray(parameter=self.sweep_values.parameter,
                               is_setpoint=True)
        loop_array.nest(size=loop_size)

        data_arrays = [loop_array]
        # hack set_data into actions
        new_actions = self.actions[:]
        if hasattr(self.sweep_values, "parameters"):
            for parameter in self.sweep_values.parameters:
                new_actions.append(parameter)

        for i, action in enumerate(new_actions):
            if hasattr(action, 'containers'):
                action_arrays = action.containers()

            elif hasattr(action, 'get'):
                # this action is a parameter to measure
                # note that this supports lists (separate output arrays)
                # and arrays (nested in one/each output array) of return values
                action_arrays = self._parameter_arrays(action)

            else:
                # this *is* covered but the report misses it because Python
                # optimizes it away. See:
                # https://bitbucket.org/ned/coveragepy/issues/198
                continue  # pragma: no cover

            for array in action_arrays:
                array.nest(size=loop_size,
                           action_index=i,
                           set_array=loop_array)
            data_arrays.extend(action_arrays)

        return data_arrays
Beispiel #27
0
def makeDataSet1D(p, yname='measured', y=None, location=None, loc_record=None, return_names=False):
    """ Make DataSet with one or multiple 1D arrays and one setpoint array.

    Arguments:
        p (qcodes.SweepFixedValues): the setpoint array of data
        yname (str or list of str or Parameter or list of Parameter):
            when type is str or list of str : the name of measured array(s)
            when type is parameter or list of parameter: the measured Parameters
        y (array or list of array or None): optional (measured) data to fill the DataSet
        location (str, callable, bool or None): If you provide a string,
            it must be an unused location in the io manager.
            Can also be:
            - a callable `location provider` with one required parameter \
              (the io manager), and one optional (`record` dict),        \
              which returns a location string when called.
            - `False` - denotes an only-in-memory temporary DataSet.
        loc_record (dict or None): If location is a callable, this will be
            passed to it as `record`.
        return_names (bool): if True return array names in output

    Raises:
        See _make_data_set for the ValueError and TypeError exceptions that can be raised
        See _check_parameter for the TypeError exceptions that can be raised

    Returns:
        Depending on parameter return_names
            True: The resulting dataset and a tuple with the names of the added arrays (setpoint and measurements).
            False: The resulting dataset.
    """
    _check_parameter(p)

    setpoint_data = np.array(p)
    preset_data = np.NaN * np.ones(setpoint_data.size)

    setpoint = DataArray(name=p.name, array_id=p.name, label=p.parameter.label,
                         unit=p.parameter.unit, preset_data=setpoint_data, is_setpoint=True)

    if isinstance(yname, (str, qcodes.Parameter)):
        measured_data_list = [y]
        measurement_list = [yname]
    else:
        measured_data_list = y
        measurement_list = yname

    measurement_unit = None

    data_set, measure_names = _make_data_set(measured_data_list, measurement_list, measurement_unit, location,
                                             loc_record, preset_data, [setpoint])

    data_set.metadata['default_parameter_name'] = measure_names[0]
    if return_names:
        set_names = setpoint.name
        return data_set, (set_names, measure_names)
    else:
        return data_set
Beispiel #28
0
def seperate_data(data_set,
                  location,
                  NewIO,
                  formatter,
                  qubit_num=1,
                  repetition=100,
                  sweep_arrays=None,
                  sweep_names=None):
    #this function will seperate the raw data for each experiment (appended to the same seqeunce)
    #into different data files. This will make plotting and data handling easier.
    start = 0
    end = 0
    seperated_data = []
    for count, array in enumerate(sweep_arrays):

        end = start + len(sweep_arrays[count]) - 1
        seperated_data.append(
            DataSet(location=location + '_' + sweep_names[count] + '_set',
                    io=NewIO,
                    formatter=formatter))
        for parameter in data_set.arrays:
            if parameter.endswith(
                    'set') and data_set.arrays[parameter].ndarray.ndim > 1:
                name = sweep_names[count] + '_set'
            else:
                name = parameter
            if data_set.arrays[parameter].ndarray.ndim > 1:
                seperated_data[count].add_array(
                    DataArray(
                        preset_data=data_set.arrays[parameter][:, start:end],
                        name=name,
                        array_id=name,
                        is_setpoint=True))
            else:
                seperated_data[count].add_array(
                    DataArray(preset_data=data_set.arrays[parameter],
                              name=name,
                              array_id=name,
                              is_setpoint=True))
        start = end + 1

    return seperated_data
Beispiel #29
0
 def test_dataset_with_missing_attrs(self):
     data1 = new_data(formatter=self.formatter,
                      location=self.loc_provider,
                      name='test_missing_attr')
     arr = DataArray(array_id='arr', preset_data=np.linspace(0, 10, 21))
     data1.add_array(arr)
     data1.write()
     # data2 = DataSet(location=data1.location, formatter=self.formatter)
     # data2.read()
     data2 = load_data(location=data1.location, formatter=self.formatter)
     # cannot use the check arrays equal as I expect the attributes
     # to not be equal
     np.testing.assert_array_equal(data2.arrays['arr'], data1.arrays['arr'])
def scan_inside_awg(
    name,
    label,
):

    data = dig.get()

    pulse_length = sweep_loop1['para1'] if 'para1' in sweep_loop1 else 1

    data = np.array([data])

    pulse_length = np.array([pulse_length])

    data_array = DataArray(
        preset_data=data,
        name='digitizer',
    )

    pulse_array = DataArray(preset_data=pulse_length,
                            name=name + '_set',
                            is_setpoint=True)

    set_array = DataArray(preset_data=np.array([1]),
                          name='none_set',
                          array_id='pulse_length_set',
                          is_setpoint=True)

    data_set = new_data(
        arrays=[set_array, pulse_array, data_array],
        location=data_location,
        loc_record={
            'name': experiment_name,
            'label': sweep_type
        },
        io=data_IO,
    )

    return data_set