Exemplo n.º 1
0
    def test_incremental_write(self):
        formatter = GNUPlotFormat()
        location = self.locations[0]
        location2 = self.locations[1]  # use 2nd location for reading back in
        data = DataSet1D(name="test_incremental_write", location=location)
        path = location + '/x_set.dat'

        data_copy = DataSet1D(False)

        # empty the data and mark it as unmodified
        data.x_set[:] = float('nan')
        data.y[:] = float('nan')
        data.x_set.modified_range = None
        data.y.modified_range = None

        # simulate writing after every value comes in, even within
        # one row (x comes first, it's the setpoint)
        # we'll add a '*' after each write and check that they're
        # in the right places afterward, ie we don't write any given
        # row until it's done and we never totally rewrite the file
        self.stars_before_write = 0
        for i, (x, y) in enumerate(zip(data_copy.x_set, data_copy.y)):
            data.x_set[i] = x
            formatter.write(data, data.io, data.location)
            formatter.write(data, data.io, location2)
            self.add_star(path)

            data.y[i] = y
            formatter.write(data, data.io, data.location)
            data.x_set.clear_save()
            data.y.clear_save()
            formatter.write(data, data.io, location2)
            self.add_star(path)

            # we wrote to a second location without the stars, so we can read
            # back in and make sure that we get the right last_saved_index
            # for the amount of data we've read.
            reread_data = load_data(location=location2,
                                    formatter=formatter,
                                    io=data.io)
            self.assertEqual(repr(reread_data.x_set.tolist()),
                             repr(data.x_set.tolist()))
            self.assertEqual(repr(reread_data.y.tolist()),
                             repr(data.y.tolist()))
            self.assertEqual(reread_data.x_set.last_saved_index, i)
            self.assertEqual(reread_data.y.last_saved_index, i)

        starred_file = '\n'.join([
            '# x_set\ty', '# "X"\t"Y"', '# 5', '1\t3', '**2\t4', '**3\t5',
            '**4\t6', '**5\t7', '*'
        ])

        with open(path) as f:
            self.assertEqual(f.read(), starred_file)
        self.assertEqual(self.stars_before_write, 1)
Exemplo n.º 2
0
    def test_get_live(self):
        loc = 'live from New York!'

        class MockLive:
            pass

        live_data = MockLive()

        dm = MockDataManager()
        dm.location = loc
        dm.live_data = live_data

        data = load_data(data_manager=dm, location=loc)
        self.assertEqual(data, live_data)

        for nd in (None, NoData()):
            dm.live_data = nd
            with self.assertRaises(RuntimeError):
                load_data(data_manager=dm, location=loc)
            with self.assertRaises(RuntimeError):
                load_data(data_manager=dm)
Exemplo n.º 3
0
 def onOpenFile(self):
     options = QFileDialog.Options()
     # options |= QFileDialog.DontUseNativeDialog
     fileName, _ = QFileDialog.getOpenFileName(
         self,
         "QFileDialog.getOpenFileName()",
         "",
         "All Files (*);;Dataset Files (*.dat)",
         options=options)
     if fileName:
         dataset = load_data(location=fileName)
         self.data_array_widget.loadDataSet(dataset)
Exemplo n.º 4
0
 def test_dataset_with_missing_attrs(self):
     data1 = new_data(formatter=self.formatter,
                      location=self.loc_provider,
                      name='test_missing_attr')
     arr = DataArray(array_id='arr', preset_data=np.linspace(0, 10, 21))
     data1.add_array(arr)
     data1.write()
     # data2 = DataSet(location=data1.location, formatter=self.formatter)
     # data2.read()
     data2 = load_data(location=data1.location, formatter=self.formatter)
     # cannot use the check arrays equal as I expect the attributes
     # to not be equal
     np.testing.assert_array_equal(data2.arrays['arr'], data1.arrays['arr'])
Exemplo n.º 5
0
    def test_read_write(self):
        for f in self.formatters:
            print('test formatter %s' % f)
            dataset = DataSet2D(name="test_read_write")
            dataset.formatter = f()

            dataset.add_metadata(self.metadata)
            dataset.write(write_metadata=True)

            dataset2 = load_data(dataset.location, formatter=f())
            self.assertEqual(list(dataset.arrays.keys()),
                             list(dataset2.arrays.keys()))
            # strings should be read and written identically
            self.assertEqual(dataset.metadata['string'],
                             dataset2.metadata['string'])
Exemplo n.º 6
0
def import_dat_file(location: str) -> List[int]:
    """
    This imports a QCoDeS legacy DataSet
    """


    loaded_data = load_data(location)
    meas = setup_measurement(loaded_data)
    run_ids = []
    with meas.run() as datasaver:
        datasaver.dataset.add_metadata('snapshot', json.dumps(loaded_data.snapshot()))
        for arrayname, array in loaded_data.arrays.items():
            if not array.is_setpoint:
                run_id = store_array_to_database(datasaver, array)
                run_ids.append(run_id)
    return run_ids
Exemplo n.º 7
0
def data_set_plot(data_set, data_location):

    Plot = MatPlot()

    raw_data_set = load_data(
        location=data_location,
        io=NewIO,
    )

    data_set_P = convert_to_probability(raw_data_set, threshold=0.025)
    x_data = data_set_P.arrays['vsg2_frequency_set'].ndarray
    P_data = data_set_P.arrays['digitizer'].ndarray.T[0]

    x = x_data

    y = P_data

    plt.plot(x, y)
Exemplo n.º 8
0
def import_dat_file(location: str,
                    exp: Optional[Experiment] = None) -> List[int]:
    """
    This imports a QCoDeS legacy DataSet into the database.

    Args:
        location: Path to file containing legacy dataset
        exp: Specify the experiment to store data to.
            If None the default one is used. See the
            docs of :class:`.Measurement` for more details.
    """

    loaded_data = load_data(location)
    meas = setup_measurement(loaded_data, exp=exp)
    run_ids = []
    with meas.run() as datasaver:
        datasaver.dataset.add_metadata('snapshot',
                                       json.dumps(loaded_data.snapshot()))
        for arrayname, array in loaded_data.arrays.items():
            if not array.is_setpoint:
                run_id = store_array_to_database(datasaver, array)
                run_ids.append(run_id)
    return run_ids
Exemplo n.º 9
0
 def test_get_read(self):
     data = load_data(formatter=MockFormatter(), location='here!')
     self.assertEqual(data.has_read_data, True)
     self.assertEqual(data.has_read_metadata, True)
Exemplo n.º 10
0
 def test_load_false(self):
     with self.assertRaises(ValueError):
         load_data(False)
Exemplo n.º 11
0
 def test_no_saved_data(self):
     with self.assertRaises(IOError):
         load_data('_no/such/file_')
Exemplo n.º 12
0
'''
location37 = '2018-02-07/13-18-00/RB_experimentAllXY_sequence'
'''
'''

location38 = '2018-02-07/16-03-46/RB_experimentAllXY_sequence'

location39 = '2018-02-07/17-14-48/RB_experimentAllXY_sequence'

location40 = '2018-02-09/11-56-18/RB_experimentAllXY_sequence'

location41 = '2018-02-09/14-18-23/RB_experimentAllXY_sequence'

location = location41

ds = load_data(location=location, io=IO, formatter=formatter)
#DS = load_data(location = location1, io = IO)

#%%
'''
ds = DS
Qubit = 2
i = 0 if Qubit == 2 else 1

sweep_point = 31

ramsey_point = 11

fitting_point = 18

x = np.linspace(1,fitting_point,fitting_point)
Exemplo n.º 13
0
location3 = '2018-04-05/20-26-26/RB_experimentAllXY_sequence'
location3 = '2018-04-05/20-55-38/RB_experimentAllXY_sequence'
location3 = '2018-04-06/10-55-25/RB_experimentAllXY_sequence'
location3 = '2018-04-06/11-26-17/RB_experimentAllXY_sequence'

location3 = '2018-04-06/19-53-52/RB_experimentAllXY_sequence'

location3 = '2018-06-18/14-01-15/RB_experimentAllXY_sequence'

location3 = '2018-06-18/14-19-28/RB_experimentAllXY_sequence'

location3 = '2018-06-18/14-50-55/RB_experimentAllXY_sequence'

#location3 = '2018-04-04/18-38-25/RB_experimentAllXY_sequence'

ds = load_data(location=location3, io=IO_new, formatter=formatter)

#%%

Qubit = 2
i = 0 if Qubit == 2 else 1

fitting_point = 51
start_point = 0
'''
#x = np.linspace(1e-6, 25e-3, 60)[:fitting_point]
x = np.linspace(0, 1.5e-6, 31)[:fitting_point]
y = ds.probability_data[:,i,start_point:start_point+fitting_point].mean(axis = 0)

pars, pcov = curve_fit(T1_fitting, x, y,)
Exemplo n.º 14
0
from qcodes.data.io import DiskIO
from qcodes.data.data_set import new_data, DataSet,load_data
from qcodes.data.data_array import DataArray
import matplotlib.pyplot as plt
import matplotlib.widgets as widgets
from qcodes.plots.qcmatplotlib import MatPlot
from qcodes.plots.pyqtgraph import QtPlot
from mpldatacursor import datacursor
import numpy as np
#%%

NewIO = DiskIO(base_location = 'C:\\Users\\LocalAdmin\\Documents')
formatter = HDF5FormatMetadata()
try_location = '2017-09-04/17-23-05Finding_ResonanceRabi_Sweep'

DS = load_data(location = try_location, io = NewIO,)

DS_P = convert_to_probability(DS, 0.025)

DS_new = new_data(location = try_location, io = NewIO,)


x_data = np.linspace(1,10,10)
y_data = np.linspace(11,20,10)
#z_data = np.linspace(101,201,101)

Mplot = MatPlot(x_data,y_data)
Qplot = QtPlot(x_data,y_data)

Mplot = MatPlot()
location_new2 = '2018-09-06/13-46-30/RB_experimentAllXY_sequence'   #   Clifford + CZ

'''
very high but seems like an even odd effect
'''
location_new2 = '2018-09-06/23-39-09/RB_experimentAllXY_sequence'   #   Clifford + CZ


'''
non_interleave new
'''
location_new2 = '2018-09-04/11-24-17/RB_experiment2AllXY_sequence'   #   Clifford without CZ


DS4 = load_data(location = location_new2, io = IO_K2, formatter = formatter)

#DS = load_data(location = location_new2, io = IO_K, formatter = formatter)

#%% character benchmarking without normalization

ds = DS
fitting_points = 18
#seq_rep_num = 20
seq_rep_num = int(DS.sequence_number_set.shape[0]/16)
sequence_number = 16*seq_rep_num
#sequence_number = 112
repetition = 100
init_state = ['00', '01', '10', '11',]

x = np.array([len(clifford_sets[0][i]) for i in range(fitting_points)])
Exemplo n.º 16
0
location_new2 = '2018-07-21/19-03-59/RB_experimentAllXY_sequence'
location_new2 = '2018-07-21/23-23-55/RB_experimentAllXY_sequence'
location_new2 = '2018-07-22/02-18-53/RB_experimentAllXY_sequence'
location_new2 = '2018-07-23/18-36-11/RB_experimentAllXY_sequence'
location_new2 = '2018-07-23/19-35-23/RB_experimentAllXY_sequence'

#location_new2 = '2018-07-28/22-00-35/RB_experimentAllXY_sequence'
#location_new2 = '2018-07-28/23-36-24/RB_experimentAllXY_sequence'

location_new2 = '2018-08-19/23-43-55/RB_experimentAllXY_sequence'

location_new2 = '2018-08-26/13-44-11/RB_experimentAllXY_sequence'

location_new2 = '2018-08-28/16-16-04/RB_experimentAllXY_sequence'

DS = load_data(location=location_new2, io=IO_K, formatter=formatter)

#%%
NewIO = DiskIO(base_location='D:\\Data\\RB_experiment')

#DS = load_data(location = location5, io = NewIO)

DS = load_data(location=location8, io=IO, formatter=formatter)
DS2 = load_data(location=location6, io=IO, formatter=formatter)
#%%

ds = DS
Qubit = 2
i = 0 if Qubit == 2 else 1
ramsey_point = 11
fitting_point = 26
Exemplo n.º 17
0
def data_set_plot(data_set, data_location, sweep_type):

    Plot = MatPlot()

    threshold = 0.025
    name = 'Rabi'

    raw_data_set = load_data(
        location=data_location,
        io=data_IO,
    )
    """
    for 1D sweep outside a sequence, sequence is only one unit sequence
    """
    if sweep_type == 1:

        #        raw_data_set = load_data(location = data_location, io = data_IO,)

        data_set_P = convert_to_probability(raw_data_set,
                                            threshold=threshold,
                                            name=name)

        x_data = data_set_P.arrays['vsg2_frequency_set'].ndarray
        P_data = data_set_P.arrays['digitizer'].ndarray.T[0]

        x = x_data
        y = P_data

        plt.plot(x, y)
        datacursor()
    """
    for 2D sweep both inside sequence and outside a sequence
    """
    if sweep_type == 2:
        #        raw_data_set = load_data(location = data_location, io = data_IO,)

        data_set_P = convert_to_probability(raw_data_set,
                                            threshold=threshold,
                                            name=name)

        x_data = data_set_P.arrays['vsg2_frequency_set'].ndarray
        y_data = data_set_P.arrays[name + '_set'].ndarray[0]
        P_data = data_set_P.arrays['digitizer'].ndarray

        X, Y = np.meshgrid(x_data, y_data)

        plt.pcolor(X, Y, P_data.T)
        datacursor()
    """
    for 1D sweep inside a sequence, no qcodes-loop function
    """
    if sweep_type == 0:
        data_set_P = convert_to_probability(raw_data_set,
                                            threshold=threshold,
                                            name=name)
        x_data = data_set_P.arrays[name + '_set'].ndarray[0]

        P_data = data_set_P.arrays['digitizer'].ndarray[0]

        plt.plot(x_data, P_data)
        datacursor()

        return 0
formatter = HDF5FormatMetadata()

#%%     old data

location_15crot = '2017-10-25/19-36-23/BillCoish_experimentAllXY_sequence'
location_3init = '2017-10-25/20-04-38/BillCoish_experimentAllXY_sequence'
#location = '2017-12-12/17-47-41/RB_experimentAllXY_sequence'

#%%     new data

location_15crot = '2017-10-25/19-36-23/BillCoish_experimentAllXY_sequence'
location_3init = '2017-10-25/20-04-38/BillCoish_experimentAllXY_sequence'
#location = '2017-12-12/17-47-41/RB_experimentAllXY_sequence'

#%%
ds = load_data(location=location_3init, io=IO, formatter=formatter)

#%%     average trace

x = ds.index3_set[0, 0, 0, 0, :]
i = 0
y = ds.raw_data[:, 0, i, :, :].mean(axis=(0, 1))
y = ds.raw_data[10, 0, i, :, :].mean(axis=0)
y = ds.raw_data[10, 0, i, 58, :]
#%%
'''
Qubit = 2
i = 0 if Qubit == 2 else 1

fitting_point = 59
x = np.linspace(1e-6, 25e-3, 60)[:fitting_point]