Exemplo n.º 1
0
    def test_overwrite(self):
        io = MatchIO([1])

        with self.assertRaises(FileExistsError):
            new_data(location='somewhere', io=io)

        data = new_data(location='somewhere', io=io, overwrite=True,)
        self.assertEqual(data.location, 'somewhere')
Exemplo n.º 2
0
def create_data_set(name: str,
                    base_folder: str,
                    subfolder: str = None,
                    formatter: Formatter = None):
    """Create empty ``DataSet`` within main data folder.

    Uses ``new_data``, and handles location formatting.

    Args:
        name: ``DataSet`` name, used as DataSet folder name.
        base_folder: Base folder for DataSet. Should be a pre-existing
            ``DataSet`` folder. If None, a new folder is created in main data
            folder.
        subfolder: Adds subfolder within base_folder for ``DataSet``.
            Should not be used without explicitly setting ``base_folder``.
        formatter: Formatter to use for data storage (e.g. GNUPlotFormat).

    Returns:
        New empty ``DataSet``.
    """
    location_string = '{base_folder}/'
    if subfolder is not None:
        location_string += '{subfolder}/'
    location_string += '#{{counter}}_{{name}}_{{time}}'

    location = qc.data.location.FormatLocation(fmt=location_string.format(
        base_folder=base_folder, name=name, subfolder=subfolder))

    data_set = new_data(location=location, name=name, formatter=formatter)
    return data_set
Exemplo n.º 3
0
    def test_writing_unsupported_types_to_hdf5(self):
        """
        Tests writing of
            - unsuported list type attr
            - nested dataset
        """
        some_dict = {}
        some_dict['list_of_ints'] = list(np.arange(5))
        some_dict['list_of_floats'] = list(np.arange(5.1))
        some_dict['weird_dict'] = {'a': 5}
        data1 = new_data(formatter=self.formatter,
                         location=self.loc_provider,
                         name='test_missing_attr')
        some_dict['nested_dataset'] = data1

        some_dict['list_of_dataset'] = [data1, data1]

        fp = self.loc_provider(io=DataSet.default_io,
                               record={'name': 'test_dict_writing'}) + '.hdf5'
        F = h5py.File(fp, mode='a')
        self.formatter.write_dict_to_hdf5(some_dict, F)
        new_dict = {}
        self.formatter.read_dict_from_hdf5(new_dict, F)
        # objects are not identical but the string representation should be
        self.assertEqual(str(some_dict['nested_dataset']),
                         new_dict['nested_dataset'])
        self.assertEqual(str(some_dict['list_of_dataset']),
                         new_dict['list_of_dataset'])

        F['weird_dict'].attrs['list_type'] = 'unsuported_list_type'
        with self.assertRaises(NotImplementedError):
            self.formatter.read_dict_from_hdf5(new_dict, F)
Exemplo n.º 4
0
    def test_full_write(self):
        formatter = GNUPlotFormat()
        location = self.locations[0]
        data = DataSet1D(name="test_full_write", location=location)

        formatter.write(data, data.io, data.location)

        with open(location + '/x_set.dat') as f:
            self.assertEqual(f.read(), file_1d())

        # check that we can add comment lines randomly into the file
        # as long as it's after the first three lines, which are comments
        # with well-defined meaning,
        # and that we can un-quote the labels
        lines = file_1d().split('\n')
        lines[1] = lines[1].replace('"', '')
        lines[3:3] = ['# this data is awesome!']
        lines[6:6] = ['# the next point is my favorite.']
        with open(location + '/x_set.dat', 'w') as f:
            f.write('\n'.join(lines))

        # normally this would be just done by data2 = load_data(location)
        # but we want to work directly with the Formatter interface here
        data2 = DataSet(location=location)
        formatter.read(data2)

        self.checkArraysEqual(data2.x_set, data.x_set)
        self.checkArraysEqual(data2.y, data.y)

        # data has been saved
        self.assertEqual(data.y.last_saved_index, 4)
        # data2 has been read back in, should show the same
        # last_saved_index
        self.assertEqual(data2.y.last_saved_index, 4)

        # while we're here, check some errors on bad reads

        # first: trying to read into a dataset that already has the
        # wrong size
        x = DataArray(name='x_set', label='X', preset_data=(1., 2.))
        y = DataArray(name='y',
                      label='Y',
                      preset_data=(3., 4.),
                      set_arrays=(x, ))
        data3 = new_data(arrays=(x, y), location=location + 'XX')
        # initially give it a different location so we can make it without
        # error, then change back to the location we want.
        data3.location = location
        with LogCapture() as logs:
            formatter.read(data3)

        self.assertTrue('ValueError' in logs.value, logs.value)

        # no problem reading again if only data has changed, it gets
        # overwritten with the disk copy
        data2.x_set[2] = 42
        data2.y[2] = 99
        formatter.read(data2)
        self.assertEqual(data2.x_set[2], 3)
        self.assertEqual(data2.y[2], 5)
Exemplo n.º 5
0
    def test_snapshot(self):
        data = new_data(location=False)
        expected_snap = {
            '__class__': 'qcodes.data.data_set.DataSet',
            'location': False,
            'arrays': {},
            'formatter': 'qcodes.data.gnuplot_format.GNUPlotFormat',
        }
        snap = strip_qc(data.snapshot())

        # handle io separately so we don't need to figure out our path
        self.assertIn('DiskIO', snap['io'])
        del snap['io']
        self.assertEqual(snap, expected_snap)

        # even though we removed io from the snapshot, it's still in .metadata
        self.assertIn('io', data.metadata)

        # then do the same transformations to metadata to check it too
        del data.metadata['io']
        strip_qc(data.metadata)
        self.assertEqual(data.metadata, expected_snap)

        # location is False so read_metadata should be a noop
        data.metadata = {'food': 'Fried chicken'}
        data.read_metadata()
        self.assertEqual(data.metadata, {'food': 'Fried chicken'})

        # snapshot should never delete things from metadata, only add or update
        data.metadata['location'] = 'Idaho'
        snap = strip_qc(data.snapshot())
        expected_snap['food'] = 'Fried chicken'
        del snap['io']
        self.assertEqual(snap, expected_snap)
Exemplo n.º 6
0
    def test_default_parameter(self):
        # Test whether the default_array function works
        m = DataSet2D()

        # test we can run with default arguments
        name = m.default_parameter_name()

        # test with paramname
        name = m.default_parameter_name(paramname='z')
        self.assertEqual(name, 'z')
        # test we can get the array instead of the name
        array = m.default_parameter_array(paramname='z')
        self.assertEqual(array, m.z)

        # first non-setpoint array
        array = m.default_parameter_array()
        self.assertEqual(array, m.z)

        # test with metadata
        m.metadata = dict({'default_parameter_name': 'x_set'})
        name = m.default_parameter_name()
        self.assertEqual(name, 'x_set')

        # test the fallback: no name matches, no non-setpoint array
        x = DataArray(name='x',
                      label='X',
                      preset_data=(1., 2., 3., 4., 5.),
                      is_setpoint=True)
        m = new_data(arrays=(x, ), name='onlysetpoint')
        name = m.default_parameter_name(paramname='dummy')
        self.assertEqual(name, 'x_set')
Exemplo n.º 7
0
def DataSetCombined(location=None, name=None):
    # Complex DataSet with two 1D and two 2D arrays
    x = DataArray(name='x',
                  label='X!',
                  preset_data=(16., 17.),
                  is_setpoint=True)
    y1 = DataArray(name='y1',
                   label='Y1 value',
                   preset_data=(18., 19.),
                   set_arrays=(x, ))
    y2 = DataArray(name='y2',
                   label='Y2 value',
                   preset_data=(20., 21.),
                   set_arrays=(x, ))

    yset = DataArray(name='y',
                     label='Y',
                     preset_data=(22., 23., 24.),
                     is_setpoint=True)
    yset.nest(2, 0, x)
    z1 = DataArray(name='z1',
                   label='Z1',
                   preset_data=((25., 26., 27.), (28., 29., 30.)),
                   set_arrays=(x, yset))
    z2 = DataArray(name='z2',
                   label='Z2',
                   preset_data=((31., 32., 33.), (34., 35., 36.)),
                   set_arrays=(x, yset))
    return new_data(arrays=(x, y1, y2, yset, z1, z2),
                    location=location,
                    name=name)
Exemplo n.º 8
0
    def test_default_parameter(self):
        loc_fmt = 'data/{date}/#{counter}_{name}_{date}_{time}'
        rcd = {'name': 'test_default_parameter'}
        loc_provider = FormatLocation(fmt=loc_fmt, record=rcd)
        # Test whether the default_array function works
        m = DataSet2D(location=loc_provider)

        # test we can run with default arguments
        name = m.default_parameter_name()

        # test with paramname
        name = m.default_parameter_name(paramname='z')
        self.assertEqual(name, 'z')
        # test we can get the array instead of the name
        array = m.default_parameter_array(paramname='z')
        self.assertEqual(array, m.z)

        # first non-setpoint array
        array = m.default_parameter_array()
        self.assertEqual(array, m.z)

        # test with metadata
        m.metadata = dict({'default_parameter_name': 'x_set'})
        name = m.default_parameter_name()
        self.assertEqual(name, 'x_set')

        # test the fallback: no name matches, no non-setpoint array
        x = DataArray(name='x',
                      label='X',
                      preset_data=(1., 2., 3., 4., 5.),
                      is_setpoint=True)
        m = new_data(arrays=(x, ), name='onlysetpoint')
        name = m.default_parameter_name(paramname='dummy')
        self.assertEqual(name, 'x_set')
Exemplo n.º 9
0
    def test_location_functions(self):
        def my_location(io, record):
            return 'data/{}'.format((record or {}).get('name') or 'LOOP!')

        def my_location2(io, record):
            name = (record or {}).get('name') or 'loop?'
            return 'data/{}/folder'.format(name)

        DataSet.location_provider = my_location

        self.assertEqual(new_data().location, 'data/LOOP!')
        self.assertEqual(new_data(name='cheese').location, 'data/cheese')

        data = new_data(location=my_location2)
        self.assertEqual(data.location, 'data/loop?/folder')
        data = new_data(location=my_location2, name='iceCream')
        self.assertEqual(data.location, 'data/iceCream/folder')
Exemplo n.º 10
0
    def get_data_set(self, data_manager=USE_MP, *args, **kwargs):
        """
        Return the data set for this loop.

        If no data set has been created yet, a new one will be created and
        returned. Note that all arguments can only be provided when the
        `DataSet` is first created; giving these during `run` when
        `get_data_set` has already been called on its own is an error.

        data_manager: a DataManager instance (omit to use default,
            False to store locally)

        kwargs are passed along to data_set.new_data. The key ones are:
        location: the location of the DataSet, a string whose meaning
            depends on formatter and io, or False to only keep in memory.
            May be a callable to provide automatic locations. If omitted, will
            use the default DataSet.location_provider
        name: if location is default or another provider function, name is
            a string to add to location to make it more readable/meaningful
            to users
        formatter: knows how to read and write the file format
            default can be set in DataSet.default_formatter
        io: knows how to connect to the storage (disk vs cloud etc)
        write_period: how often to save to storage during the loop.
            default 5 sec, use None to write only at the end

        returns:
            a DataSet object that we can use to plot
        """
        if self.data_set is None:
            if data_manager is False:
                data_mode = DataMode.LOCAL
            else:
                warnings.warn("Multiprocessing is in beta, use at own risk",
                              UserWarning)
                data_mode = DataMode.PUSH_TO_SERVER

            data_set = new_data(arrays=self.containers(),
                                mode=data_mode,
                                data_manager=data_manager,
                                *args,
                                **kwargs)

            self.data_set = data_set

        else:
            has_args = len(kwargs) or len(args)
            uses_data_manager = (self.data_set.mode != DataMode.LOCAL)
            if has_args or (uses_data_manager != data_manager):
                raise RuntimeError(
                    'The DataSet for this loop already exists. '
                    'You can only provide DataSet attributes, such as '
                    'data_manager, location, name, formatter, io, '
                    'write_period, when the DataSet is first created.')

        return self.data_set
Exemplo n.º 11
0
 def test_dataset_with_missing_attrs(self):
     data1 = new_data(formatter=self.formatter,
                      location=self.loc_provider,
                      name='test_missing_attr')
     arr = DataArray(array_id='arr', preset_data=np.linspace(0, 10, 21))
     data1.add_array(arr)
     data1.write()
     # data2 = DataSet(location=data1.location, formatter=self.formatter)
     # data2.read()
     data2 = load_data(location=data1.location, formatter=self.formatter)
     # cannot use the check arrays equal as I expect the attributes
     # to not be equal
     np.testing.assert_array_equal(data2.arrays['arr'], data1.arrays['arr'])
Exemplo n.º 12
0
def DataSet2D(location=None, name=None):
    # DataSet with one 2D array with 4 x 6 points
    yy, xx = numpy.meshgrid(range(4), range(6))
    zz = xx**2 + yy**2
    # outer setpoint should be 1D
    xx = xx[:, 0]
    x = DataArray(name='x', label='X', preset_data=xx, is_setpoint=True)
    y = DataArray(name='y',
                  label='Y',
                  preset_data=yy,
                  set_arrays=(x, ),
                  is_setpoint=True)
    z = DataArray(name='z', label='Z', preset_data=zz, set_arrays=(x, y))
    return new_data(arrays=(x, y, z), location=location, name=name)
Exemplo n.º 13
0
def DataSet1D(location=None, name=None):
    # DataSet with one 1D array with 5 points

    # TODO: since y lists x as a set_array, it should automatically
    # set is_setpoint=True for x, shouldn't it? Any reason we woundn't
    # want that?
    x = DataArray(name='x',
                  label='X',
                  preset_data=(1., 2., 3., 4., 5.),
                  is_setpoint=True)
    y = DataArray(name='y',
                  label='Y',
                  preset_data=(3., 4., 5., 6., 7.),
                  set_arrays=(x, ))
    return new_data(arrays=(x, y), location=location, name=name)
Exemplo n.º 14
0
    def get_data_set(self, *args, **kwargs):
        """
        Return the data set for this loop.

        If no data set has been created yet, a new one will be created and
        returned. Note that all arguments can only be provided when the
        `DataSet` is first created; giving these during `run` when
        `get_data_set` has already been called on its own is an error.

        Args:
            data_manager: a DataManager instance (omit to use default,
                False to store locally)
            print_data_set (True): print data_set

        kwargs are passed along to data_set.new_data. The key ones are:

        Args:
            location: the location of the DataSet, a string whose meaning
                depends on formatter and io, or False to only keep in memory.
                May be a callable to provide automatic locations. If omitted, will
                use the default DataSet.location_provider
            name: if location is default or another provider function, name is
                a string to add to location to make it more readable/meaningful
                to users
            formatter: knows how to read and write the file format
                default can be set in DataSet.default_formatter
            io: knows how to connect to the storage (disk vs cloud etc)
            write_period: how often to save to storage during the loop.
                default 5 sec, use None to write only at the end

        returns:
            a DataSet object that we can use to plot
        """
        if self.data_set is None:
            data_set = new_data(arrays=self.containers(), *args, **kwargs)
            data_set.add_metadata({'measurement_type': 'Loop'})
            self.data_set = data_set

        else:
            has_args = len(kwargs) or len(args)
            if has_args:
                raise RuntimeError(
                    'The DataSet for this loop already exists. '
                    'You can only provide DataSet attributes, such as '
                    'data_manager, location, name, formatter, io, '
                    'write_period, when the DataSet is first created.')

        return self.data_set
Exemplo n.º 15
0
    def test_complete(self):
        array = DataArray(name='y', shape=(5,))
        array.init_data()
        data = new_data(arrays=(array,), location=False)
        self.syncing_array = array
        self.sync_index = 0
        data.sync = self.mock_sync
        bf = DataSet.background_functions
        bf['fail'] = self.failing_func
        bf['log'] = self.logging_func

        with LogCapture() as logs:
            # grab info and warnings but not debug messages
            logging.getLogger().setLevel(logging.INFO)
            data.complete(delay=0.001)

        logs = logs.value

        expected_logs = [
            'waiting for DataSet <False> to complete',
            'DataSet: 0% complete',
            'RuntimeError: it is called failing_func for a reason!',
            'background at index 1',
            'DataSet: 20% complete',
            'RuntimeError: it is called failing_func for a reason!',
            'background function fail failed twice in a row, removing it',
            'background at index 2',
            'DataSet: 40% complete',
            'background at index 3',
            'DataSet: 60% complete',
            'background at index 4',
            'DataSet: 80% complete',
            'background at index 5',
            'DataSet <False> is complete'
        ]

        log_index = 0
        for line in expected_logs:
            self.assertIn(line, logs, logs)
            try:
                log_index_new = logs.index(line, log_index)
            except ValueError:
                raise ValueError('line {} not found after {} in: \n {}'.format(
                    line, log_index, logs))
            self.assertTrue(log_index_new >= log_index, logs)
            log_index = log_index_new + len(line) + 1  # +1 for \n
        self.assertEqual(log_index, len(logs), logs)
Exemplo n.º 16
0
 def save_single_raw_file(xdata, my_raw_data, rawdatacounter, maincounter):
     xarr = DataArray(preset_data=xdata,
                      is_setpoint=True,
                      name='time',
                      label='Time',
                      unit='s')
     yarr = DataArray(preset_data=my_raw_data,
                      set_arrays=(xarr, ),
                      name='demodulated_signal',
                      label='Demodulated Signal',
                      unit='V')
     name = '{0:06d}'.format(rawdatacounter)
     locstring = '{}{:03}_raw{}'.format(CURRENT_EXPERIMENT['exp_folder'],
                                        maincounter, sep)
     rawdataset = new_data(location=locstring, arrays=[xarr, yarr])
     rawdataset.formatter.number_format = '{:g}'
     rawdataset.formatter.extension = '.raw'
     rawdataset.finalize(filename=name, write_metadata=False)
Exemplo n.º 17
0
    def test_fraction_complete(self):
        empty_data = new_data(arrays=(), location=False)
        self.assertEqual(empty_data.fraction_complete(), 0.0)

        data = DataSetCombined(location=False)
        self.assertEqual(data.fraction_complete(), 1.0)

        # alter only the measured arrays, check that only these are used
        # to calculate fraction_complete
        data.y1.modified_range = (0, 0)  # 1 of 2
        data.y2.modified_range = (0, 0)  # 1 of 2
        data.z1.modified_range = (0, 2)  # 3 of 6
        data.z2.modified_range = (0, 2)  # 3 of 6
        self.assertEqual(data.fraction_complete(), 0.5)

        # mark more things complete using last_saved_index and synced_index
        data.y1.last_saved_index = 1  # 2 of 2
        data.z1.synced_index = 5  # 6 of 6
        self.assertEqual(data.fraction_complete(), 0.75)
Exemplo n.º 18
0
def dictionary_to_dataset(data_dictionary: dict) -> DataSet:
    """ Convert dictionary to DataSet.

    Args:
        data_dictionary: data to convert

    Returns:
        DataSet with converted data.
    """
    dataset = new_data()
    dataset.metadata.update(data_dictionary['metadata'])

    for array_key, array_dict in data_dictionary['arrays'].items():
        data_array = _dictionary_to_data_array(array_dict)
        dataset.add_array(data_array)

    for array_key, array_dict in data_dictionary['arrays'].items():
        set_arrays_names = array_dict['set_arrays']
        set_arrays = tuple([dataset.arrays[name] for name in set_arrays_names])
        dataset.arrays[array_key].set_arrays = set_arrays

    return dataset
def scan_inside_awg(
    name,
    label,
):

    data = dig.get()

    pulse_length = sweep_loop1['para1'] if 'para1' in sweep_loop1 else 1

    data = np.array([data])

    pulse_length = np.array([pulse_length])

    data_array = DataArray(
        preset_data=data,
        name='digitizer',
    )

    pulse_array = DataArray(preset_data=pulse_length,
                            name=name + '_set',
                            is_setpoint=True)

    set_array = DataArray(preset_data=np.array([1]),
                          name='none_set',
                          array_id='pulse_length_set',
                          is_setpoint=True)

    data_set = new_data(
        arrays=[set_array, pulse_array, data_array],
        location=data_location,
        loc_record={
            'name': experiment_name,
            'label': sweep_type
        },
        io=data_IO,
    )

    return data_set
Exemplo n.º 20
0
def _make_data_set(measured_data_list, measurement_list, measurement_unit,
                   location, loc_record, preset_data, setpoints):
    """ Generic code to make the data set for makeDataSet1D, makeDataSet1DPlain, makeDataSet2D, makeDataSet2DPlain

        Warnings logged:
        1. When the shape of the measured data is not matching the shape of the setpoint array. When the
           measured data is a list, each list item must match the shape of the setpoint array.

    Raises:
        ValueError: When the number of measurements names in the measurement_list does not match the number of
                    measurements.
                    If len(measurement_list) > len(measured_data_list) we would otherwise get an
                    IndexError later on. When len(measurement_list) < len(measured_data_list) now a ValueError is
                    raised because a part of the measure data is not stored silently otherwise.

        TypeError: When a measurement name in the measurement list has an invalid type.

    Returns:
        The resulting data set and the measure names list.
    """
    data_set = new_data(arrays=(), location=location, loc_record=loc_record)

    if len(setpoints) > 1:
        set_arrays = (setpoints[0], setpoints[1])
    else:
        set_arrays = (setpoints[0], )

    if measured_data_list is not None:
        if len(measurement_list) != len(measured_data_list):
            raise ValueError(
                f'The number of measurement names {len(measurement_list)} does not match the number '
                f'of measurements {len(measured_data_list)}')

    measure_names = []
    measure_units = []
    for parameter in measurement_list:
        if isinstance(parameter, str):
            # parameter is a str
            measure_names += [parameter]
            measure_units += [measurement_unit]
        elif isinstance(parameter, qcodes.Parameter):
            # parameter is a Parameter
            measure_names += [parameter.name]
            measure_units += [parameter.unit]
        else:
            raise TypeError(
                'Type of measurement names must be str or qcodes.Parameter')

    for idm, mname in enumerate(measure_names):
        preset_data_array = DataArray(name=mname,
                                      array_id=mname,
                                      label=mname,
                                      unit=measure_units[idm],
                                      preset_data=np.copy(preset_data),
                                      set_arrays=set_arrays)
        data_set.add_array(preset_data_array)
        if measured_data_list is not None and measured_data_list[
                idm] is not None:
            measured_array = np.array(measured_data_list[idm])
            if measured_array.shape != preset_data.shape:
                logger.warning(
                    f'Shape of measured data {preset_data.shape} does not match '
                    f'setpoint shape {measured_array.shape}')

            getattr(data_set, mname).ndarray = measured_array

    if len(setpoints) > 1:
        data_set.add_array(setpoints[1])
        data_set.add_array(setpoints[0])
    else:
        data_set.add_array(setpoints[0])

    return data_set, measure_names
data3 = DataArray(preset_data=data, name='digitizer3')

data1.ndarray

##
arrays = LP.containers()
arrays2 = []
arrays3 = [
    data1,
]
arrays4 = [data1, data2, data3]
data_set_2 = new_data(
    arrays=arrays3,
    location=None,
    loc_record={
        'name': 'T1',
        'label': 'Vread_sweep'
    },
    io=NewIO,
)

data_set_2.save_metadata()

test_location = '2017-08-18/20-40-19_T1_Vread_sweep'

data_set_3 = DataSet(
    location=test_location,
    io=NewIO,
)
data_set_3.read()
AWGpara_array = data_set_3.arrays['AWGpara_set'].ndarray
Exemplo n.º 22
0
 def test_mode_error(self):
     with self.assertRaises(ValueError):
         new_data(mode=DataMode.PUSH_TO_SERVER, data_manager=False)
Exemplo n.º 23
0
print('loop.data_set: %s' % LP.data_set)

NewIO = DiskIO(base_location = 'C:\\Users\\LocalAdmin\\Documents')
formatter = HDF5FormatMetadata()

OldIO = DiskIO(base_location = 'D:\\文献\\QuTech\\QTlab\\xiaotest\\testIO')

## get_data_set should contain parameter like io, location, formatter and others
data = LP.get_data_set(location=None, loc_record = {'name':'T1', 'label':'Vread_sweep'}, 
                       io = NewIO, formatter = formatter)
#data = LP.get_data_set(data_manager=False, location=None, loc_record = {'name':'T1', 'label':'T_load_sweep'})
print('loop.data_set: %s' % LP.data_set)

#%%
DS = new_data(location = 'aaaaaaaaaa',io = NewIO)
def live_plotting():
    for para in data.arrays:
        DS.arrays[para] = data.arrays[para]
    return DS
DS = live_plotting()
#def add_T1exp_metadata(data):
#        
#        data.metadata['Parameters'] = {'Nrep': 10, 't_empty': 2, 't_load': 2.4, 't_read': 2.2}
#        data.write(write_metadata=True)
#
#
#add_T1exp_metadata(data)

#datatata = LP.run(background=False)
#%%
Exemplo n.º 24
0
    def __enter__(self):
        """Operation when entering a loop"""
        self.is_context_manager = True

        # Encapsulate everything in a try/except to ensure that the context
        # manager is properly exited.
        try:
            if Measurement.running_measurement is None:
                # Register current measurement as active primary measurement
                Measurement.running_measurement = self
                Measurement.measurement_thread = threading.current_thread()

                # Initialize dataset
                self.dataset = new_data(name=self.name)
                self.dataset.active = True

                self._initialize_metadata(self.dataset)
                with self.timings.record(['dataset', 'save_metadata']):
                    self.dataset.save_metadata()

                    if hasattr(self.dataset, 'save_config'):
                        self.dataset.save_config()

                # Initialize attributes
                self.loop_shape = ()
                self.loop_indices = ()
                self.action_indices = (0,)
                self.data_arrays = {}
                self.set_arrays = {}

                self.log(f'Measurement started {self.dataset.location}')
                print(f'Measurement started {self.dataset.location}')

            else:
                if threading.current_thread() is not Measurement.measurement_thread:
                    raise RuntimeError(
                        "Cannot run a measurement while another measurement "
                        "is already running in a different thread."
                    )

                # Primary measurement is already running. Add this measurement as
                # a data_group of the primary measurement
                msmt = Measurement.running_measurement
                msmt.data_groups[msmt.action_indices] = self
                data_groups = [
                    (key, getattr(val, 'name', 'None')) for key, val in msmt.data_groups.items()
                ]
                msmt.dataset.add_metadata({'data_groups': data_groups})
                msmt.action_indices += (0,)

                # Nested measurement attributes should mimic the primary measurement
                self.loop_shape = msmt.loop_shape
                self.loop_indices = msmt.loop_indices
                self.action_indices = msmt.action_indices
                self.data_arrays = msmt.data_arrays
                self.set_arrays = msmt.set_arrays
                self.timings = msmt.timings

            # Perform measurement thread check, and set user namespace variables
            if self.force_cell_thread and Measurement.running_measurement is self:
                # Raise an error if force_cell_thread is True and the code is run
                # directly from an IPython cell/prompt but not from a separate thread
                is_main_thread = threading.current_thread() == threading.main_thread()
                if is_main_thread and directly_executed_from_cell():
                    raise RuntimeError(
                        "Measurement must be created in dedicated thread. "
                        "Otherwise specify force_thread=False"
                    )

                # Register the Measurement and data as variables in the user namespace
                # Usually as variable names are 'msmt' and 'data' respectively
                from IPython import get_ipython

                shell = get_ipython()
                shell.user_ns[self._default_measurement_name] = self
                shell.user_ns[self._default_dataset_name] = self.dataset


            return self
        except:
            # An error has occured, ensure running_measurement is cleared
            if Measurement.running_measurement is self:
                Measurement.running_measurement = None
            raise