Exemple #1
0
    def test_read_errors(self):
        formatter = GNUPlotFormat()

        # non-comment line at the beginning
        location = self.locations[0]
        data = DataSet(location=location)
        os.makedirs(location, exist_ok=True)
        with open(location + '/x_set.dat', 'w') as f:
            f.write('1\t2\n' + file_1d())
        with LogCapture() as logs:
            formatter.read(data)

        self.assertTrue('ValueError' in logs.value, logs.value)

        # same data array in 2 files
        location = self.locations[1]
        data = DataSet(location=location)
        os.makedirs(location, exist_ok=True)
        with open(location + '/x_set.dat', 'w') as f:
            f.write('\n'.join(
                ['# x_set\ty', '# "X"\t"Y"', '# 2', '1\t2', '3\t4']))
        with open(location + '/q.dat', 'w') as f:
            f.write('\n'.join(['# q\ty', '# "Q"\t"Y"', '# 2', '1\t2', '3\t4']))
        with LogCapture() as logs:
            formatter.read(data)

        self.assertTrue('ValueError' in logs.value, logs.value)
Exemple #2
0
 def test_constructor_errors(self):
     # no location - only allowed with load_data
     with self.assertRaises(ValueError):
         DataSet()
     # wrong type
     with self.assertRaises(ValueError):
         DataSet(location=42)
    def test_constructor_errors(self):
        # no location - only allowed with load_data
        with self.assertRaises(ValueError):
            DataSet()
        # wrong type
        with self.assertRaises(ValueError):
            DataSet(location=42)

        # OK to have location=False, but wrong mode
        with self.assertRaises(ValueError):
            DataSet(location=False, mode='happy')
    def test_from_server(self, gdm_mock):
        mock_dm = MockDataManager()
        gdm_mock.return_value = mock_dm
        mock_dm.location = 'Mars'
        mock_dm.live_data = MockLive()

        # wrong location or False location - converts to local
        data = DataSet(location='Jupiter',
                       data_manager=True,
                       mode=DataMode.PULL_FROM_SERVER)
        self.assertEqual(data.mode, DataMode.LOCAL)

        data = DataSet(location=False,
                       data_manager=True,
                       mode=DataMode.PULL_FROM_SERVER)
        self.assertEqual(data.mode, DataMode.LOCAL)

        # location matching server - stays in server mode
        data = DataSet(location='Mars',
                       data_manager=True,
                       mode=DataMode.PULL_FROM_SERVER,
                       formatter=MockFormatter())
        self.assertEqual(data.mode, DataMode.PULL_FROM_SERVER)
        self.assertEqual(data.arrays, MockLive.arrays)

        # cannot write except in LOCAL mode
        with self.assertRaises(RuntimeError):
            data.write()

        # cannot finalize in PULL_FROM_SERVER mode
        with self.assertRaises(RuntimeError):
            data.finalize()

        # now test when the server says it's not there anymore
        mock_dm.location = 'Saturn'
        data.sync()
        self.assertEqual(data.mode, DataMode.LOCAL)
        self.assertEqual(data.has_read_data, True)

        # now it's LOCAL so we *can* write.
        data.write()
        self.assertEqual(data.has_written_data, True)

        # location=False: write, read and sync are noops.
        data.has_read_data = False
        data.has_written_data = False
        data.location = False
        data.write()
        data.read()
        data.sync()
        self.assertEqual(data.has_read_data, False)
        self.assertEqual(data.has_written_data, False)
    def test_to_server(self, gdm_mock):
        mock_dm = MockDataManager()
        mock_dm.needs_restart = True
        gdm_mock.return_value = mock_dm

        data = DataSet(location='Venus',
                       data_manager=True,
                       mode=DataMode.PUSH_TO_SERVER)
        self.assertEqual(mock_dm.needs_restart, False, data)
        self.assertEqual(mock_dm.data_set, data)
        self.assertEqual(data.data_manager, mock_dm)
        self.assertEqual(data.mode, DataMode.PUSH_TO_SERVER)

        # cannot write except in LOCAL mode
        with self.assertRaises(RuntimeError):
            data.write()

        # now do what the DataServer does with this DataSet: init_on_server
        # fails until there is an array
        with self.assertRaises(RuntimeError):
            data.init_on_server()

        data.add_array(MockArray())
        data.init_on_server()
        self.assertEqual(data.noise.ready, True)

        # we can only add a given array_id once
        with self.assertRaises(ValueError):
            data.add_array(MockArray())
Exemple #6
0
    def test_incremental_write(self):
        data = DataSet1D(location=self.loc_provider, name='test_incremental')
        location = data.location
        data_copy = DataSet1D(False)

        # # empty the data and mark it as unmodified
        data.x_set[:] = float('nan')
        data.y[:] = float('nan')
        data.x_set.modified_range = None
        data.y.modified_range = None

        # simulate writing after every value comes in, even within
        # one row (x comes first, it's the setpoint)
        for i, (x, y) in enumerate(zip(data_copy.x_set, data_copy.y)):
            data.x_set[i] = x
            self.formatter.write(data)
            data.y[i] = y
            self.formatter.write(data)
        data2 = DataSet(location=location, formatter=self.formatter)
        data2.read()
        self.checkArraysEqual(data2.arrays['x_set'], data_copy.arrays['x_set'])
        self.checkArraysEqual(data2.arrays['y'], data_copy.arrays['y'])

        self.formatter.close_file(data)
        self.formatter.close_file(data2)
Exemple #7
0
    def test_full_write(self):
        formatter = GNUPlotFormat()
        location = self.locations[0]
        data = DataSet1D(name="test_full_write", location=location)

        formatter.write(data, data.io, data.location)

        with open(location + '/x_set.dat') as f:
            self.assertEqual(f.read(), file_1d())

        # check that we can add comment lines randomly into the file
        # as long as it's after the first three lines, which are comments
        # with well-defined meaning,
        # and that we can un-quote the labels
        lines = file_1d().split('\n')
        lines[1] = lines[1].replace('"', '')
        lines[3:3] = ['# this data is awesome!']
        lines[6:6] = ['# the next point is my favorite.']
        with open(location + '/x_set.dat', 'w') as f:
            f.write('\n'.join(lines))

        # normally this would be just done by data2 = load_data(location)
        # but we want to work directly with the Formatter interface here
        data2 = DataSet(location=location)
        formatter.read(data2)

        self.checkArraysEqual(data2.x_set, data.x_set)
        self.checkArraysEqual(data2.y, data.y)

        # data has been saved
        self.assertEqual(data.y.last_saved_index, 4)
        # data2 has been read back in, should show the same
        # last_saved_index
        self.assertEqual(data2.y.last_saved_index, 4)

        # while we're here, check some errors on bad reads

        # first: trying to read into a dataset that already has the
        # wrong size
        x = DataArray(name='x_set', label='X', preset_data=(1., 2.))
        y = DataArray(name='y',
                      label='Y',
                      preset_data=(3., 4.),
                      set_arrays=(x, ))
        data3 = new_data(arrays=(x, y), location=location + 'XX')
        # initially give it a different location so we can make it without
        # error, then change back to the location we want.
        data3.location = location
        with LogCapture() as logs:
            formatter.read(data3)

        self.assertTrue('ValueError' in logs.value, logs.value)

        # no problem reading again if only data has changed, it gets
        # overwritten with the disk copy
        data2.x_set[2] = 42
        data2.y[2] = 99
        formatter.read(data2)
        self.assertEqual(data2.x_set[2], 3)
        self.assertEqual(data2.y[2], 5)
Exemple #8
0
    def test_reading_into_existing_data_array(self):
        data = DataSet1D(location=self.loc_provider, name='test_read_existing')
        # closing before file is written should not raise error
        self.formatter.write(data)

        data2 = DataSet(location=data.location, formatter=self.formatter)
        d_array = DataArray(
            name='dummy',
            array_id='x_set',  # existing array id in data
            label='bla',
            unit='a.u.',
            is_setpoint=False,
            set_arrays=(),
            preset_data=np.zeros(5))
        data2.add_array(d_array)
        # test if d_array refers to same as array x_set in dataset
        self.assertTrue(d_array is data2.arrays['x_set'])
        data2.read()
        # test if reading did not overwrite dataarray
        self.assertTrue(d_array is data2.arrays['x_set'])
        # Testing if data was correctly updated into dataset
        self.checkArraysEqual(data2.arrays['x_set'], data.arrays['x_set'])
        self.checkArraysEqual(data2.arrays['y'], data.arrays['y'])
        self.formatter.close_file(data)
        self.formatter.close_file(data2)
Exemple #9
0
def convert_to_probability(
    data_set,
    location,
    NewIO,
    formatter,
    threshold,
    qubit_num=1,
    repetition=100,
):
    for parameter in data_set.arrays:
        if len(data_set.arrays[parameter].ndarray.shape
               ) == 2 and parameter.endswith('set'):
            data_set_new = DataSet(location=location +
                                   '_average_probability_' + parameter,
                                   io=NewIO,
                                   formatter=formatter)


#    data_set = convert_to_01_state(data_set, threshold, qubit_num, repetition, name, unit, sweep_array)
    qubit_data_array = []
    set_array = []
    for parameter in data_set.arrays:
        data_array = data_set.arrays[parameter].ndarray
        dimension_1 = data_array.shape[0]
        arrayid = data_set.arrays[parameter].array_id
        if parameter.endswith(
                'set'):  ## or data_set.arrays[parameter].is_setpoint
            set_array.append(
                DataArray(preset_data=data_array,
                          name=parameter,
                          array_id=arrayid,
                          is_setpoint=True))

        elif not parameter.endswith('set'):
            dimension_2 = data_array.shape[1]
            probability_data = np.ndarray(shape=(dimension_1, dimension_2))

            for k in range(dimension_1):
                for l in range(dimension_2):
                    probability_data[k][l] = np.average(data_array[k][l])

            qubit_data_array.append(
                DataArray(preset_data=probability_data,
                          name=parameter,
                          array_id=arrayid,
                          is_setpoint=False))

    for array in set_array:
        data_set_new.add_array(array)
    for q in range(qubit_num):
        data_set_new.add_array(qubit_data_array[q])

    return data_set_new
Exemple #10
0
    def test_full_write_read_2D(self):
        """
        Test writing and reading a file back in
        """
        data = DataSet2D(location=self.loc_provider, name='test2D')
        self.formatter.write(data)
        # Test reading the same file through the DataSet.read
        data2 = DataSet(location=data.location, formatter=self.formatter)
        data2.read()
        self.checkArraysEqual(data2.x_set, data.x_set)
        self.checkArraysEqual(data2.y_set, data.y_set)
        self.checkArraysEqual(data2.z, data.z)

        self.formatter.close_file(data)
        self.formatter.close_file(data2)
Exemple #11
0
 def test_metadata_write_read(self):
     """
     Test is based on the snapshot of the 1D dataset.
     Having a more complex snapshot in the metadata would be a better test.
     """
     data = DataSet1D(location=self.loc_provider, name='test_metadata')
     data.snapshot()  # gets the snapshot, not added upon init
     self.formatter.write(data)  # write_metadata is included in write
     data2 = DataSet(location=data.location, formatter=self.formatter)
     data2.read()
     self.formatter.close_file(data)
     self.formatter.close_file(data2)
     metadata_equal, err_msg = compare_dictionaries(
         data.metadata, data2.metadata,
         'original_metadata', 'loaded_metadata')
     self.assertTrue(metadata_equal, msg='\n'+err_msg)
Exemple #12
0
    def test_full_write_read_1D(self):
        """
        Test writing and reading a file back in
        """
        # location = self.locations[0]
        data = DataSet1D(name='test1D_full_write', location=self.loc_provider)
        # print('Data location:', os.path.abspath(data.location))
        self.formatter.write(data)
        # Used because the formatter has no nice find file method

        # Test reading the same file through the DataSet.read
        data2 = DataSet(location=data.location, formatter=self.formatter)
        data2.read()
        self.checkArraysEqual(data2.x_set, data.x_set)
        self.checkArraysEqual(data2.y, data.y)
        self.formatter.close_file(data)
        self.formatter.close_file(data2)
Exemple #13
0
 def load_info(self):
     try:
         for row in range(self._treemodel.rowCount()):
             index = self._treemodel.index(row, 0)
             i = 0
             while (index.child(i, 0).data() is not None):
                 filename = index.child(i, 3).data()
                 loc = '\\'.join(filename.split('\\')[:-1])
                 tempdata = DataSet(loc)
                 tempdata.read_metadata()
                 infotxt = DataViewer.get_data_info(tempdata.metadata)
                 self._treemodel.setData(index.child(i, 1), infotxt)
                 if 'comment' in tempdata.metadata.keys():
                     self._treemodel.setData(index.child(i, 4),
                                             tempdata.metadata['comment'])
                 i = i + 1
     except Exception as e:
         print(e)
Exemple #14
0
def convert_to_01_state(data_set, threshold, qubit_num=1, repetition=100):
    #data_set = convert_to_ordered_data(data_set, qubit_num, repetition, name, unit, sweep_array)

    qubit_data_array = []
    set_array = []
    for parameter in data_set.arrays:
        data_array = data_set.arrays[parameter].ndarray
        dimension_1 = data_array.shape[0]
        array_id = data_set.arrays[parameter].array_id
        if parameter.endswith(
                'set'):  ## or data_set.arrays[parameter].is_setpoint
            set_array.append(
                DataArray(preset_data=data_array,
                          name=parameter,
                          array_id=array_id,
                          is_setpoint=True))

        elif not parameter.endswith('set'):
            dimension_2 = data_array.shape[1]
            data = np.ndarray(shape=(dimension_1, dimension_2, repetition))

            for k in range(dimension_1):
                for l in range(dimension_2):
                    for j in range(repetition):
                        data[k][l][j] = 1 if np.min(
                            data_array[k][l][j * seg_size:(j + 1) *
                                             seg_size]) <= threshold else 0

            qubit_data_array.append(
                DataArray(preset_data=data,
                          name=parameter,
                          array_id=array_id,
                          is_setpoint=False))

    data_set_new = DataSet(location=new_location + '_01_state',
                           io=NewIO,
                           formatter=formatter)

    for array in set_array:
        data_set_new.add_array(array)
    for q in range(qubit_num):
        data_set_new.add_array(qubit_data_array[q])

    return data_set_new
Exemple #15
0
def seperate_data(data_set,
                  location,
                  NewIO,
                  formatter,
                  qubit_num=1,
                  repetition=100,
                  sweep_arrays=None,
                  sweep_names=None):
    #this function will seperate the raw data for each experiment (appended to the same seqeunce)
    #into different data files. This will make plotting and data handling easier.
    start = 0
    end = 0
    seperated_data = []
    for count, array in enumerate(sweep_arrays):

        end = start + len(sweep_arrays[count]) - 1
        seperated_data.append(
            DataSet(location=location + '_' + sweep_names[count] + '_set',
                    io=NewIO,
                    formatter=formatter))
        for parameter in data_set.arrays:
            if parameter.endswith(
                    'set') and data_set.arrays[parameter].ndarray.ndim > 1:
                name = sweep_names[count] + '_set'
            else:
                name = parameter
            if data_set.arrays[parameter].ndarray.ndim > 1:
                seperated_data[count].add_array(
                    DataArray(
                        preset_data=data_set.arrays[parameter][:, start:end],
                        name=name,
                        array_id=name,
                        is_setpoint=True))
            else:
                seperated_data[count].add_array(
                    DataArray(preset_data=data_set.arrays[parameter],
                              name=name,
                              array_id=name,
                              is_setpoint=True))
        start = end + 1

    return seperated_data
Exemple #16
0
    def test_multifile(self):
        formatter = GNUPlotFormat()
        location = self.locations[1]
        data = DataSetCombined(location)

        formatter.write(data, data.io, data.location)

        filex, filexy = files_combined()

        with open(location + '/x_set.dat') as f:
            self.assertEqual(f.read(), filex)
        with open(location + '/x_set_y_set.dat') as f:
            self.assertEqual(f.read(), filexy)

        data2 = DataSet(location=location)
        formatter.read(data2)

        for array_id in ('x_set', 'y1', 'y2', 'y_set', 'z1', 'z2'):
            self.checkArraysEqual(data2.arrays[array_id],
                                  data.arrays[array_id])
Exemple #17
0
    def test_loop_writing_2D(self):
        # pass
        station = Station()
        MockPar = MockParabola(name='Loop_writing_test_2D')
        station.add_component(MockPar)
        loop = Loop(MockPar.x[-100:100:20]).loop(MockPar.y[-50:50:10]).each(
            MockPar.skewed_parabola)
        data1 = loop.run(name='MockLoop_hdf5_test', formatter=self.formatter)
        data2 = DataSet(location=data1.location, formatter=self.formatter)
        data2.read()
        for key in data2.arrays.keys():
            self.checkArraysEqual(data2.arrays[key], data1.arrays[key])

        metadata_equal, err_msg = compare_dictionaries(data1.metadata,
                                                       data2.metadata,
                                                       'original_metadata',
                                                       'loaded_metadata')
        self.assertTrue(metadata_equal, msg='\n' + err_msg)
        self.formatter.close_file(data1)
        self.formatter.close_file(data2)
Exemple #18
0
def average_probability(data_set, location, NewIO, formatter, qubit_num=1):

    for parameter in data_set.arrays:
        if len(data_set.arrays[parameter].ndarray.shape
               ) == 2 and parameter.endswith('set'):
            data_set_new = DataSet(location=location +
                                   '_average_probability_data_' + parameter,
                                   io=NewIO,
                                   formatter=formatter)

    for parameter in data_set.arrays:
        if len(data_set.arrays[parameter].ndarray.shape) == 2:
            data = deepcopy(data_set.arrays[parameter].ndarray)
            data = np.average(data, axis=0)
            is_setpoint = data_set.arrays[parameter].is_setpoint
            name = data_set.arrays[parameter].name
            array_id = data_set.arrays[parameter].array_id
            data_set_new.add_array(
                DataArray(preset_data=data,
                          name=name,
                          array_id=array_id,
                          is_setpoint=is_setpoint))
    return data_set_new
Exemple #19
0
def convert_to_ordered_data(data_set,
                            qubit_num=1,
                            repetition=100,
                            name='frequency',
                            unit='GHz',
                            sweep_array=None):

    qubit_data_array = []
    set_array = []

    for parameter in data_set.arrays:
        data_array = data_set.arrays[parameter].ndarray
        dimension_1 = data_array.shape[0]
        array_name = parameter
        array_id = data_set.arrays[parameter].array_id

        if parameter.endswith('set'):
            if data_array.ndim == 2 and parameter.startswith('index'):
                dimension_2 = int(data_array.shape[-1] / 2 / (repetition + 1) /
                                  seg_size / qubit_num)
                sweep_array = sweep_array if sweep_array is not None else np.linspace(
                    0, dimension_2 - 1, dimension_2)
                data_array = np.array(
                    [sweep_array for k in range(dimension_1)])
                array_name = name + '_set'
                array_id = name + '_set'
            if data_array.ndim != 3 or not parameter.startswith('index'):
                set_array.append(
                    DataArray(preset_data=data_array,
                              name=array_name,
                              array_id=array_id,
                              is_setpoint=True))

        elif not parameter.endswith('set') and data_array.ndim == 2:

            data_num = int(data_array.shape[-1] / 2 / (repetition + 1) *
                           repetition)
            qubit_data_num = int(data_num / qubit_num)

            dimension_2 = int(data_array.shape[-1] / 2 / (repetition + 1) /
                              seg_size / qubit_num)
            qubit_data = np.ndarray(shape=(qubit_num, dimension_1, dimension_2,
                                           int(qubit_data_num / dimension_2)))

            for k in range(dimension_1):
                raw_data = data_array[k][::2]
                raw_marker = data_array[k][1::2]
                for seg in range(seg_size * qubit_num * dimension_2):
                    if raw_marker[seg] > 0.2:  ##  a better threshold ???
                        break
                data = raw_data[seg:data_num + seg]
                print('seg', seg)
                data_reshape = data.reshape(int(data_num / seg_size), seg_size)
                print('data_shape', data_reshape.shape)
                for l in range(dimension_2):
                    for q in range(qubit_num):

                        qubit_data[q][k][l] = data_reshape[qubit_num * l +
                                                           q::dimension_2 *
                                                           qubit_num].reshape(
                                                               seg_size *
                                                               repetition, )
                        n = 2 if q == 0 else q
                        if q >= 2:
                            n = q + 1
                        qubit_data_array.append(
                            DataArray(preset_data=qubit_data[q],
                                      name=parameter + 'qubit_%d' % (n),
                                      array_id=array_id + 'qubit_%d' % (n),
                                      is_setpoint=False))

        elif not parameter.endswith('set') and data_array.ndim == 3:
            data_num = int(data_array.shape[-1] / 2 / (repetition + 1) *
                           repetition)
            qubit_data_num = int(data_num / qubit_num)

            dimension_2 = data_array.shape[1]
            print('qubit_num, dimension_1, dimension_2, int(qubit_data_num)',
                  qubit_num, dimension_1, dimension_2, int(qubit_data_num))
            qubit_data = np.ndarray(shape=(qubit_num, dimension_1, dimension_2,
                                           int(qubit_data_num)))

            for k in range(dimension_1):
                for l in range(dimension_2):
                    raw_data = data_array[k][l][::2]
                    raw_marker = data_array[k][l][1::2]
                    for seg in range(seg_size * qubit_num):
                        if raw_marker[seg] > 0.2:  ##  a better threshold ???
                            break
                    data = raw_data[
                        seg:data_num +
                        seg]  ## here data consists both data from qubit1 and qubit2
                    for q in range(qubit_num):
                        data_reshape = data.reshape(int(data_num / seg_size),
                                                    seg_size)
                        qubit_data[q][k][l] = data_reshape[
                            q::qubit_num].reshape(seg_size * repetition, )
                        n = 2 if q == 0 else q
                        qubit_data_array.append(
                            DataArray(preset_data=qubit_data[q],
                                      name=parameter + 'qubit_%d' % (n),
                                      array_id=array_id + 'qubit_%d' % (n),
                                      is_setpoint=False))

    data_set_new = DataSet(location=new_location + '_ordered_raw_data',
                           io=NewIO,
                           formatter=formatter)
    for array in set_array:
        data_set_new.add_array(array)
    for q in range(qubit_num):
        data_set_new.add_array(qubit_data_array[q])

    return data_set_new
def convert_to_probability(data_set,
                           threshold,
                           loop_num,
                           qubit_num=1,
                           name='frequency',
                           unit='GHz',
                           sweep_array=None):

    data_set = convert_to_01_state(data_set, threshold, loop_num, qubit_num,
                                   name, unit, sweep_array)
    qubit_data_array = []
    for parameter in data_set.arrays:
        data_array = data_set.arrays[parameter]
        dimension_1 = data_array.shape[0]
        arrayid = data_set.arrays[parameter].array_id
        if parameter[
                -3:] == 'set':  ## or data_set.arrays[parameter].is_setpoint
            if len(data_array.shape) == 1:
                set_array1 = DataArray(preset_data=data_array.ndarray,
                                       name=parameter,
                                       array_id=arrayid,
                                       is_setpoint=True)
            if len(data_array.shape
                   ) == 2 and not parameter.startswith('index'):
                set_array2 = DataArray(preset_data=data_array.ndarray,
                                       name=parameter,
                                       array_id=arrayid,
                                       is_setpoint=True)

        elif parameter[-3:] != 'set':
            seg_num = int(data_set.arrays[parameter].shape[1])

            data = np.ndarray(shape=(dimension_1, loop_num))
            setpara = np.ndarray(shape=(dimension_1, loop_num))

            for k in range(dimension_1):
                #                data_k = []
                #                setpara_k = []
                state = np.ndarray(shape=(loop_num, int(seg_num / loop_num)))
                for i in range(seg_num):
                    loop = i % loop_num
                    sweep = i // loop_num
                    state[loop][sweep] = data_array.ndarray[k][i]

                for j in range(loop_num):
                    setpara[k][j] = j
                    probability = np.average(state[j])
                    data[k][j] = probability

                if loop_num > 1 and sweep_array is not None:
                    setpara[k] = sweep_array
            set_array3 = DataArray(preset_data=setpara,
                                   name=name,
                                   array_id=name + '_set',
                                   is_setpoint=True)
            #            if loop_num == 1:
            #                data = data.T[0]
            qubit_data_array.append(
                DataArray(preset_data=data,
                          name=parameter,
                          array_id=arrayid,
                          is_setpoint=False))

    data_set_new = DataSet(location=new_location,
                           io=NewIO,
                           formatter=formatter)
    data_set_new.add_array(set_array1)
    data_set_new.add_array(set_array2)
    if loop_num > 1:
        data_set_new.add_array(set_array3)
    for q in range(qubit_num):
        data_set_new.add_array(qubit_data_array[q])

    return data_set_new
]
arrays4 = [data1, data2, data3]
data_set_2 = new_data(
    arrays=arrays3,
    location=None,
    loc_record={
        'name': 'T1',
        'label': 'Vread_sweep'
    },
    io=NewIO,
)

data_set_2.save_metadata()

test_location = '2017-08-18/20-40-19_T1_Vread_sweep'

data_set_3 = DataSet(
    location=test_location,
    io=NewIO,
)
data_set_3.read()
AWGpara_array = data_set_3.arrays['AWGpara_set'].ndarray
index0_array = data_set_3.arrays['index0_set'].ndarray
digitizer_array = data_set_3.arrays['digitizer_digitizer'].ndarray

#
#print('loop.data_set: %s' % LP.data_set)
#
#data = LP.run()
#
def convert_to_01_state(data_set,
                        threshold,
                        loop_num,
                        qubit_num,
                        name='frequency',
                        unit='GHz',
                        sweep_array=None):
    data_set = convert_to_ordered_data(data_set, loop_num, qubit_num, name,
                                       unit, sweep_array)

    qubit_data_array = []

    for parameter in data_set.arrays:
        data_array = data_set.arrays[parameter]
        dimension_1 = data_array.shape[0]
        arrayid = data_set.arrays[parameter].array_id
        if parameter[
                -3:] == 'set':  ## or data_set.arrays[parameter].is_setpoint
            if len(data_array.shape) == 1:
                set_array1 = DataArray(preset_data=data_array.ndarray,
                                       name=parameter,
                                       array_id=arrayid,
                                       is_setpoint=True)
            elif len(data_array.shape
                     ) == 2 and not parameter.startswith('index'):
                set_array2 = DataArray(preset_data=data_array.ndarray,
                                       name=parameter,
                                       array_id=arrayid,
                                       is_setpoint=True)

        elif parameter[-3:] != 'set':
            seg_num = int(data_set.arrays[parameter].shape[1] / seg_size)
            data = np.ndarray(shape=(dimension_1, seg_num))
            setpara = np.ndarray(shape=(dimension_1, seg_num))

            for k in range(dimension_1):
                for j in range(seg_num):
                    setpara[k][j] = j
                    for i in range(seg_size):
                        if data_array.ndarray[k][j * seg_size +
                                                 i] <= threshold:
                            data[k][j] = 1
                            break
                    if i == seg_size - 1:
                        data[k][j] = 0

            set_array3 = DataArray(preset_data=setpara,
                                   name=name,
                                   array_id=name + '_set',
                                   is_setpoint=True)
            qubit_data_array.append(
                DataArray(preset_data=data,
                          name=parameter,
                          array_id=arrayid,
                          is_setpoint=False))

    data_set_new = DataSet(location=new_location,
                           io=NewIO,
                           formatter=formatter)
    data_set_new.add_array(set_array1)
    data_set_new.add_array(set_array2)
    if loop_num > 1:
        data_set_new.add_array(set_array3)
    for q in range(qubit_num):
        data_set_new.add_array(qubit_data_array[q])

    return data_set_new
Exemple #23
0
def majority_vote(data_set,
                  threshold,
                  qubit_num=1,
                  repetition=100,
                  name='frequency',
                  unit='GHz',
                  sweep_array=None,
                  average=False):

    data_set = convert_to_01_state(data_set, threshold, qubit_num, repetition,
                                   name, unit, sweep_array)

    set_array = []

    for parameter in data_set.arrays:
        data_array = data_set.arrays[parameter].ndarray
        dimension_1 = data_array.shape[0]
        arrayid = data_set.arrays[parameter].array_id
        if parameter.endswith(
                'set'):  ## or data_set.arrays[parameter].is_setpoint
            set_array.append(
                DataArray(preset_data=data_array,
                          name=parameter,
                          array_id=arrayid,
                          is_setpoint=True))

    dimension_2 = len(sweep_array) if sweep_array is not None else 2
    #    dimension_1 = 5
    vote_data = np.ndarray(shape=(dimension_1, dimension_2, repetition))
    average_vote_data = np.ndarray(shape=(dimension_1, dimension_2))
    name = 'vote'
    arrayid = 'vote'
    for k in range(dimension_1):
        for l in range(dimension_2):
            for repe in range(repetition):
                voter = np.array([
                    data_set.digitizerqubit_1[k][l][repe],
                    data_set.digitizerqubit_2[k][l][repe],
                    data_set.digitizerqubit_3[k][l][repe],
                ])

                vote_data[k][l][repe] = 1 if np.sum(voter) >= 2 else 0

            if average:
                average_vote_data[k][l] = np.average(vote_data[k][l])
                print('average: ', average_vote_data[k][l])

    data = vote_data if not average else average_vote_data

    vote_data_array = DataArray(preset_data=data,
                                name=name,
                                array_id=arrayid,
                                is_setpoint=False)

    data_set_new = DataSet(location=new_location,
                           io=NewIO,
                           formatter=formatter)

    for array in set_array:
        data_set_new.add_array(array)
    data_set_new.add_array(vote_data_array)

    return data_set_new
def convert_to_ordered_data(data_set,
                            loop_num,
                            qubit_num,
                            name='frequency',
                            unit='GHz',
                            sweep_array=None):
    #    Dimension = '1D'
    for parameter in data_set.arrays:
        data_array = data_set.arrays[parameter]
        dimension_1 = data_array.shape[0]
        arrayid = data_set.arrays[parameter].array_id
        if parameter.endswith('set'):
            if data_array.ndarray.ndim == 1:
                set_array1 = DataArray(preset_data=data_array.ndarray,
                                       name=parameter,
                                       array_id=arrayid,
                                       is_setpoint=True)
            elif data_array.ndarray.ndim == 2 and not parameter.startswith(
                    'index'):
                set_array2 = DataArray(preset_data=data_array.ndarray,
                                       name=parameter,
                                       array_id=arrayid,
                                       is_setpoint=True)

        elif not parameter.endswith('set'):

            data_num = int(data_set.arrays[parameter].shape[1] / 2 /
                           (repetition + 1) * repetition)

            qubit_data_num = int(data_num / qubit_num)

            data = np.ndarray(shape=(dimension_1, data_num))
            marker = np.ndarray(shape=(dimension_1, data_num))
            setpara = np.ndarray(shape=(dimension_1, qubit_data_num))

            qubit_data = np.ndarray(shape=(qubit_num, dimension_1,
                                           qubit_data_num))
            qubit_data_array = []
            for k in range(dimension_1):
                raw_data = data_array[k][::2]
                raw_marker = data_array[k][1::2]
                for seg in range(seg_size * loop_num):
                    if raw_marker[seg] > 0.1:  ##  a better threshold ???
                        break
                data[k] = raw_data[seg:data_num + seg]
                marker[k] = raw_marker[seg:data_num + seg]
                if sweep_array is None:
                    setpara[k] = np.linspace(0, data_num - 1, qubit_data_num)
                else:
                    sa = np.vstack(
                        [np.repeat(sweep_array, int(seg_size), axis=0)] *
                        repetition)
                    setpara[k] = sa.reshape(sa.size, )

                if qubit_num > 1:
                    data_reshape = data[k].reshape(int(data_num / seg_size),
                                                   seg_size)
                    for q in range(qubit_num):
                        qubit_data[q][k] = np.append(
                            np.array([]), data_reshape[q::qubit_num])
                elif qubit_num == 1:
                    qubit_data[0][k] = data[k]

            set_array3 = DataArray(preset_data=setpara,
                                   name=name,
                                   array_id=name + '_set',
                                   is_setpoint=True)
            for q in range(qubit_num):
                qubit_data_array.append(
                    DataArray(preset_data=qubit_data[q],
                              name=parameter + 'qubit_%d' % (q + 1),
                              array_id=arrayid + 'qubit_%d' % (q + 1),
                              is_setpoint=False))

    data_set_new = DataSet(location=new_location,
                           io=NewIO,
                           formatter=formatter)
    data_set_new.add_array(set_array1)
    data_set_new.add_array(set_array2)
    if loop_num != 1:
        data_set_new.add_array(set_array3)
    for q in range(qubit_num):
        data_set_new.add_array(qubit_data_array[q])
#    data_set_new.add_array(data_array4)
    return data_set_new