def setDatadir(self, newindex):
        logging.info(f'Setting datadir with index: {newindex}')
        oldindex = self.datadirindex
        self.datadirindex = newindex
        datadir = self.datadirlist[newindex]

        self.io = DiskIO(datadir)
        logging.info('DataViewer: data directory %s' % datadir)
        self.logfile.setText('Log files at %s' % datadir)

        self.menuFolder.actions()[oldindex + 1].setText(self.menuFolder.actions()[oldindex + 1].text()[2:])
        self.menuFolder.actions()[newindex + 1].setText('>>' + self.menuFolder.actions()[newindex + 1].text())
        self.updateLogs()
    def setUp(self):
        get_data_manager().restart(force=True)
        kill_processes()
        # TODO: figure out what's leaving DataManager in a weird state
        # and fix it
        get_data_manager().restart(force=True)
        time.sleep(0.1)

        self.model = AMockModel()

        self.gates = MockGates(model=self.model, server_name='')
        self.source = MockSource(model=self.model, server_name='')
        self.meter = MockMeter(model=self.model, server_name='')
        self.location = '_loop_test_'
        self.location2 = '_loop_test2_'
        self.io = DiskIO('.')

        c1 = self.gates.chan1
        self.loop = Loop(c1[1:5:1], 0.001).each(c1)
        self.loop_progress = Loop(c1[1:5:1], 0.001,
                                  progress_interval=1).each(c1)

        self.assertFalse(self.io.list(self.location))
        self.assertFalse(self.io.list(self.location2))
    def __init__(self, name, qubits, awg, awg2, pulsar,
                 **kw):  ## name = 'calibration_20170615_Xiao'...

        super().__init__(name, qubits, awg, awg2, pulsar, **kw)

        self.Pi_length = [qubit.Pi_pulse_length for qubit in qubits]
        self.X_length = 0
        self.Rabi_power = 0

        #        self.qubits_name = qubits_name

        #        self.calibration_sequence = Sequence()
        self.sweep_inside_sequence = False

        #        self.formatter = HDF5FormatMetadata()
        self.data_IO = DiskIO(base_location='C:\\Users\\LocalAdmin\\Documents')
        self.calibration_data_location = self.data_location + '_calibration'

        self.data_set = None

        self.calibration = 'Ramsey'
        self.experiment = 'AllXY'

        self.calibrated_parameter = None
Beispiel #4
0
    def test_write_copy(self):
        data = DataSet1D(location=False)
        mockbase = os.path.abspath('some_folder')
        data.io = DiskIO(mockbase)

        mr = (2, 3)
        mr_full = (0, 4)
        lsi = 1
        data.x_set.modified_range = mr
        data.y.modified_range = mr
        data.x_set.last_saved_index = lsi
        data.y.last_saved_index = lsi

        with self.assertRaises(TypeError):
            data.write_copy()

        with self.assertRaises(TypeError):
            data.write_copy(path='some/path', io_manager=DiskIO('.'))

        with self.assertRaises(TypeError):
            data.write_copy(path='some/path', location='something/else')

        data.formatter = RecordingMockFormatter()
        data.write_copy(path='/some/abs/path')
        self.assertEqual(data.formatter.write_calls,
                         [(None, '/some/abs/path')])
        self.assertEqual(data.formatter.write_metadata_calls,
                         [(None, '/some/abs/path', False)])
        # check that the formatter gets called as if nothing has been saved
        self.assertEqual(data.formatter.modified_ranges, [{
            'x_set': mr_full,
            'y': mr_full
        }])
        self.assertEqual(data.formatter.last_saved_indices, [{
            'x_set': None,
            'y': None
        }])
        # but the dataset afterward has its original mods back
        self.assertEqual(data.x_set.modified_range, mr)
        self.assertEqual(data.y.modified_range, mr)
        self.assertEqual(data.x_set.last_saved_index, lsi)
        self.assertEqual(data.y.last_saved_index, lsi)

        # recreate the formatter to clear the calls attributes
        data.formatter = RecordingMockFormatter()
        data.write_copy(location='some/rel/path')
        self.assertEqual(data.formatter.write_calls,
                         [(mockbase, 'some/rel/path')])
        self.assertEqual(data.formatter.write_metadata_calls,
                         [(mockbase, 'some/rel/path', False)])

        mockbase2 = os.path.abspath('some/other/folder')
        io2 = DiskIO(mockbase2)

        with self.assertRaises(ValueError):
            # if location=False we need to specify it in write_copy
            data.write_copy(io_manager=io2)

        data.location = 'yet/another/path'
        data.formatter = RecordingMockFormatter()
        data.write_copy(io_manager=io2)
        self.assertEqual(data.formatter.write_calls,
                         [(mockbase2, 'yet/another/path')])
        self.assertEqual(data.formatter.write_metadata_calls,
                         [(mockbase2, 'yet/another/path', False)])
Beispiel #5
0
    return A * (p**(m)) + B


def sequence_decay(t, A, B, T):

    return A * (np.e**(-t / T)) + B


def Exp_Sin_decay(t, A, B, F, Phase, T):

    return A * (np.e**((-t / T)**1.0)) * np.sin(2 * 3.14 * F * t + Phase) + B


#%%
IO = DiskIO(base_location='C:\\Users\\LocalAdmin\\Documents\\RB_experiment')
formatter = HDF5FormatMetadata()
'''
data operated with Hardmard gate in 2 qubit space
'''
location1 = '2018-01-09/11-35-49/RB_experimentAllXY_sequence'
location2 = '2018-01-09/15-00-49/RB_experimentAllXY_sequence'
location3 = '2018-01-09/17-00-07/RB_experimentAllXY_sequence'
location4 = '2018-01-10/16-22-55/RB_experimentAllXY_sequence'

location5 = '2018-01-16/14-39-54/RB_experimentAllXY_sequence'
'''
data operated with Hardmard gate in 2 qubit space
'''
location6 = '2018-01-16/15-26-33/RB_experimentAllXY_sequence'
location7 = '2018-01-16/15-38-47/RB_experimentAllXY_sequence'
Beispiel #6
0

def T2_star_fitting(t, A, B, F, Phase, T):

    return A * (np.e**(-(t / T)**2)) * np.sin(2 * 3.14 * F * t + Phase) + B


def T2_star_fitting2(t, A, B, T):

    return A * (np.e**(-(t / T)**2)) + B


#    return A*(np.exp(-(t/T)**2))*+B

#%%
IO = DiskIO(base_location='C:\\Users\\LocalAdmin\\Documents\\RB_experiment')
formatter = HDF5FormatMetadata()

IO_new = DiskIO(
    base_location=
    'K:\\ns\\qt\\spin-qubits\\data\\b059_data\\2018 data\\Data\\RB_experiment')

#%%
location = '2017-12-12/16-57-48/RB_experimentAllXY_sequence'
location2 = '2017-12-12/17-28-29/RB_experimentAllXY_sequence'
location3 = '2017-12-12/17-47-41/RB_experimentAllXY_sequence'

location3 = '2018-03-23/14-41-59/RB_experimentAllXY_sequence'
location3 = '2018-03-23/17-16-26/RB_experimentAllXY_sequence'

location3 = '2018-03-23/17-39-47/RB_experimentAllXY_sequence'
Beispiel #7
0
Sweep_Value = P[1:5.5:0.5]

Sweep_2 = Q[2:10:1]
LP1 = Loop(sweep_values = Sweep_2).each(F)

#LP = Loop(sweep_values = Sweep_Value).each(LP1, TP,  E)
LP = Loop(sweep_values = Sweep_Value).each(F,E)

#%%
#LP.with_bg_task(task = Print,bg_final_task = None,min_delay=1).run()

#LP = Loop(sweep_values = Sweep_Value,).each(F)

print('loop.data_set: %s' % LP.data_set)

NewIO = DiskIO(base_location = 'C:\\Users\\LocalAdmin\\Documents')
formatter = HDF5FormatMetadata()

OldIO = DiskIO(base_location = 'D:\\文献\\QuTech\\QTlab\\xiaotest\\testIO')

## get_data_set should contain parameter like io, location, formatter and others
data = LP.get_data_set(location=None, loc_record = {'name':'T1', 'label':'Vread_sweep'}, 
                       io = NewIO, formatter = formatter)
#data = LP.get_data_set(data_manager=False, location=None, loc_record = {'name':'T1', 'label':'T_load_sweep'})
print('loop.data_set: %s' % LP.data_set)

#%%
DS = new_data(location = 'aaaaaaaaaa',io = NewIO)
def live_plotting():
    for para in data.arrays:
        DS.arrays[para] = data.arrays[para]
class TestMockInstLoop(TestCase):
    def setUp(self):
        get_data_manager().restart(force=True)
        kill_processes()
        # TODO: figure out what's leaving DataManager in a weird state
        # and fix it
        get_data_manager().restart(force=True)
        time.sleep(0.1)

        self.model = AMockModel()

        self.gates = MockGates(model=self.model, server_name='')
        self.source = MockSource(model=self.model, server_name='')
        self.meter = MockMeter(model=self.model, server_name='')
        self.location = '_loop_test_'
        self.location2 = '_loop_test2_'
        self.io = DiskIO('.')

        c1 = self.gates.chan1
        self.loop = Loop(c1[1:5:1], 0.001).each(c1)
        self.loop_progress = Loop(c1[1:5:1], 0.001,
                                  progress_interval=1).each(c1)

        self.assertFalse(self.io.list(self.location))
        self.assertFalse(self.io.list(self.location2))

    def tearDown(self):
        for instrument in [self.gates, self.source, self.meter]:
            instrument.close()

        get_data_manager().close()
        self.model.close()

        self.io.remove_all(self.location)
        self.io.remove_all(self.location2)

    def check_empty_data(self, data):
        expected = repr([float('nan')] * 4)
        self.assertEqual(repr(data.gates_chan1.tolist()), expected)
        self.assertEqual(repr(data.gates_chan1_set.tolist()), expected)

    def check_loop_data(self, data):
        self.assertEqual(data.gates_chan1.tolist(), [1, 2, 3, 4])
        self.assertEqual(data.gates_chan1_set.tolist(), [1, 2, 3, 4])

        self.assertTrue(self.io.list(self.location))

    def test_background_and_datamanager(self):
        # make sure that an unpicklable instrument can indeed run in a loop
        # because the instrument itself is in a server

        # TODO: if we don't save the dataset (location=False) then we can't
        # sync it when we're done. Should fix that - for now that just means
        # you can only do in-memory loops if you set data_manager=False
        # TODO: this is the one place we don't do quiet=True - test that we
        # really print stuff?
        data = self.loop.run(location=self.location, background=True)
        self.check_empty_data(data)

        # wait for process to finish (ensures that this was run in the bg,
        # because otherwise there *is* no loop.process)
        self.loop.process.join()

        data.sync()
        self.check_loop_data(data)

    def test_local_instrument(self):
        # a local instrument should work in a foreground loop, but
        # not in a background loop (should give a RuntimeError)
        self.gates.close()  # so we don't have two gates with same name
        gates_local = MockGates(model=self.model, server_name=None)
        self.gates = gates_local
        c1 = gates_local.chan1
        loop_local = Loop(c1[1:5:1], 0.001).each(c1)

        # if spawn, pickle will happen
        if mp.get_start_method() == "spawn":
            with self.assertRaises(RuntimeError):
                loop_local.run(location=self.location,
                               quiet=True,
                               background=True)
        # allow for *nix
        # TODO(giulioungaretti) see what happens ?
        # what is the expected beavhiour ?
        # The RunimError will never be raised here, as the forkmethod
        # won't try to pickle anything at all.
        else:
            logging.error(
                "this should not be allowed, but for now we let it be")
            loop_local.run(location=self.location, quiet=True)

        data = loop_local.run(location=self.location2,
                              background=False,
                              quiet=True)
        self.check_loop_data(data)

    def test_background_no_datamanager(self):
        data = self.loop.run(location=self.location,
                             background=True,
                             data_manager=False,
                             quiet=True)
        self.check_empty_data(data)

        self.loop.process.join()

        data.sync()
        self.check_loop_data(data)

    def test_foreground_and_datamanager(self):
        data = self.loop.run(location=self.location,
                             background=False,
                             quiet=True)
        self.assertFalse(hasattr(self.loop, 'process'))

        self.check_loop_data(data)

    def test_foreground_no_datamanager_progress(self):
        data = self.loop_progress.run(location=self.location,
                                      background=False,
                                      data_manager=False,
                                      quiet=True)
        self.assertFalse(hasattr(self.loop, 'process'))

        self.check_loop_data(data)

    @patch('qcodes.loops.tprint')
    def test_progress_calls(self, tprint_mock):
        data = self.loop_progress.run(location=self.location,
                                      background=False,
                                      data_manager=False,
                                      quiet=True)
        self.assertFalse(hasattr(self.loop, 'process'))

        self.check_loop_data(data)
        expected_calls = len(self.loop_progress.sweep_values) + 1
        self.assertEqual(tprint_mock.call_count, expected_calls)

        # now run again with no progress interval and check that we get no
        # additional calls
        data = self.loop_progress.run(location=False,
                                      background=False,
                                      data_manager=False,
                                      quiet=True,
                                      progress_interval=None)
        self.assertFalse(hasattr(self.loop, 'process'))

        self.check_loop_data(data)
        self.assertEqual(tprint_mock.call_count, expected_calls)

    def test_foreground_no_datamanager(self):
        data = self.loop.run(location=self.location,
                             background=False,
                             data_manager=False,
                             quiet=True)
        self.assertFalse(hasattr(self.loop, 'process'))

        self.check_loop_data(data)

    def test_enqueue(self):
        c1 = self.gates.chan1
        loop = Loop(c1[1:5:1], 0.01).each(c1)
        data1 = loop.run(location=self.location,
                         quiet=True,
                         background=True,
                         data_manager=True)

        # second running of the loop should be enqueued, blocks until
        # the first one finishes.
        # TODO: check what it prints?
        data2 = loop.run(location=self.location2,
                         quiet=True,
                         background=True,
                         data_manager=True)

        data1.sync()
        data2.sync()
        self.assertEqual(data1.gates_chan1.tolist(), [1, 2, 3, 4])
        for v in data2.gates_chan1:
            self.assertTrue(np.isnan(v))

        loop.process.join()
        data2.sync()
        self.assertEqual(data2.gates_chan1.tolist(), [1, 2, 3, 4])

        # and while we're here, check that running a loop in the
        # foreground *after* the background clears its .process
        self.assertTrue(hasattr(loop, 'process'))
        loop.run_temp()
        self.assertFalse(hasattr(loop, 'process'))
Beispiel #9
0
def time_trace(dev_name: str, channels: Dict[str, int], units: Dict[str, str],
               prefactors: Dict[str, Any], samplerate: int,
               sampleduration: Union[float, int]):
    """Records a time trace of data from DAQ analog input channels, converts data to desired units.
    
    Args:
        dev_name: DAQ device name (e.g. 'Dev1').
        channel: Dict of {channel_name: analog_input} (e.g. {'MAG': 0, 'SUSCX': 1}).
        unit: Physical unit of the channel (e.g. {'MAG': 'Phi0', 'SUSCX': 'Phi0/A'}).
        prefactor: Dict of {channel_name: Pint Quantity} from microscope.get_prefactors().
        samplerate: DAQ sampling rate (for each channel) in Hz.
        sampleduration: Sampling time in seconds.
        
    Returns:
        Dict: mdict
    """
    loc_provider = qc.FormatLocation(
        fmt='./data/{date}/#{counter}_{name}_{time}')
    loc = loc_provider(DiskIO('.'), record={'name': 'time_trace'})
    pathlib.Path(loc).mkdir(parents=True, exist_ok=True)
    prefactor_strs = {}
    for ch in channels:
        unit = units[ch]
        prefactors[ch].ito('{}/V'.format(unit))
        prefactor_strs.update({
            ch:
            '{} {}'.format(prefactors[ch].magnitude, prefactors[ch].units)
        })
    nsamples = int(samplerate * sampleduration)
    time = np.linspace(0, sampleduration, nsamples)
    mdict = {
        'time': {
            'array': time,
            'unit': 's'
        },
        'metadata': {
            #'channels': channels,
            #'units': units,
            #'prefactors': prefactor_strs,
            'samplerate': samplerate,
            'sampleduration': sampleduration,
            'location': loc
        }
    }
    nsamples = int(samplerate * sampleduration)
    time = np.linspace(0, sampleduration, nsamples)
    with nidaqmx.Task('time_trace_ai_task') as ai_task:
        for inst in DAQAnalogInputs.instances():
            inst.close()
        daq_ai = DAQAnalogInputs('daq_ai',
                                 dev_name,
                                 samplerate,
                                 channels,
                                 ai_task,
                                 samples_to_read=nsamples,
                                 timeout=sampleduration + 10)
        data_v = daq_ai.voltage()
        daq_ai.close()
    for i, ch in enumerate(channels):
        mdict.update({
            ch: {
                'array': data_v[i] * prefactors[ch].magnitude,
                'unit': units[ch],
                'prefactor': prefactor_strs[ch]
            }
        })
    io.savemat(loc + '/time_trace.mat', mdict)
    return mdict
    def get_surface(self, x_vec: np.ndarray, y_vec: np.ndarray,
                    tdc_params: Dict[str, Any]) -> None:
        """Performs touchdowns on a grid and fits a plane to the resulting surface.

        Args:
            x_vec: 1D array of x positions (must be same length as y_vec).
            y_vec: 1D array of y positions (must be same length as x_vec).
            tdc_params: Dict of capacitive touchdown parameters as defined
                in measurement configuration file.
        """
        old_pos = self.scanner.position()
        #: True if touchdown doesn't occur for any point in the grid
        out_of_range = False
        #: True if the loop is exited before finishing
        premature_exit = False
        self.scanner.break_loop = False
        self.scanner.td_has_occurred = False
        self.snapshot(update=True)
        x_grid, y_grid = np.meshgrid(x_vec, y_vec, indexing='ij')
        td_grid = np.full((len(x_vec), len(y_vec)), np.nan, dtype=np.double)
        log.info('Aqcuiring a plane.')
        v_retract = self.scanner.voltage_retract[self.temp].to('V').magnitude
        fig = plt.figure(figsize=(8, 3))
        ax0 = fig.add_subplot(121, projection='3d')
        ax1 = fig.add_subplot(122, projection='3d')
        for ax in [ax0, ax1]:
            ax.set_xlabel('x position [V]')
            ax.set_ylabel('y position [V]')
            ax.set_zlabel('z position [V]')
        ax0.set_title('Sample Plane')
        ax1.set_title('Sample Surface')
        fig.canvas.draw()
        fig.show()
        for i in range(len(x_vec)):
            for j in range(len(y_vec)):
                #: If any of the safety limits in td_cap() are exceeded,
                #: or the loop is interrupted by the user.
                if self.scanner.break_loop and not self.scanner.td_has_occurred:
                    log.warning('Aborting get_surface().')
                    premature_exit = True
                    break  #: goes to outer break statement
                else:
                    self.scanner.goto([x_grid[i, j], y_grid[i, j], v_retract])
                    data, tdc_plot = self.td_cap(tdc_params, update_snap=False)
                    td_grid[i, j] = self.scanner.td_height
                    clear_output(wait=True)
                    if self.scanner.td_height is None:
                        out_of_range = True
                        premature_exit = True
                        log.warning(
                            'Touchdown out of range. Stopping get_surface().')
                        self.scanner.goto(old_pos)
                        break  #: goes to outer break statement
                    plt.close(fig)
                    fig = plt.figure(figsize=(8, 3))
                    ax0 = fig.add_subplot(121, projection='3d')
                    ax1 = fig.add_subplot(122, projection='3d')
                    for ax in [ax0, ax1]:
                        ax.scatter(x_grid[np.isfinite(td_grid)],
                                   y_grid[np.isfinite(td_grid)],
                                   td_grid[np.isfinite(td_grid)],
                                   cmap='viridis')
                        ax.set_xlabel('x position [V]')
                        ax.set_ylabel('y position [V]')
                        ax.set_zlabel('z position [V]')
                    ax0.set_title('Sample Plane')
                    ax1.set_title('Sample Surface')
                    fig.canvas.draw()
                    fig.show()
                    plt.close(tdc_plot.fig)
                    continue  #: skips outer break statement
                break  #: occurs only if out_of_range or loop is broken
        self.scanner.goto(old_pos)
        if not out_of_range and not premature_exit:
            self.scanner.metadata.update(
                {'td_grid': {
                    'x': x_grid,
                    'y': y_grid,
                    'z': td_grid
                }})
            # Create spline function to interpolate over surface:
            self.scanner.surface_interp = Rbf(x_grid,
                                              y_grid,
                                              td_grid,
                                              function='cubic')
            #: Fit a plane to the td_grid
            x = np.reshape(x_grid, (-1, 1))
            y = np.reshape(y_grid, (-1, 1))
            td = np.reshape(td_grid, (-1, 1))
            z = np.column_stack((x, y, np.ones_like(x)))
            plane, res, _, _ = lstsq(z, td)
            log.info('New plane : {}.'.format([plane[i][0] for i in range(3)]))
            ax0.plot_surface(x_grid,
                             y_grid,
                             plane[0] * x_grid + plane[1] * y_grid + plane[2],
                             cmap='viridis',
                             alpha=0.5)
            ax1.plot_surface(x_grid,
                             y_grid,
                             self.scanner.surface_interp(x_grid, y_grid),
                             cmap='viridis',
                             alpha=0.5)
            for i, axis in enumerate(['x', 'y', 'z']):
                self.scanner.metadata['plane'].update({axis: plane[i][0]})
            self.atto.surface_is_current = True
            loc_provider = qc.FormatLocation(
                fmt='./data/{date}/#{counter}_{name}_{time}')
            loc = loc_provider(DiskIO('.'), record={'name': 'surface'})
            pathlib.Path(loc).mkdir(parents=True, exist_ok=True)
            fig.suptitle(loc)
            fig.canvas.draw()
            fig.show()
            plt.savefig(loc + '/surface.png')
            mdict = {
                'plane': {
                    ax: self.scanner.metadata['plane'][ax]
                    for ax in ['x', 'y', 'z']
                },
                'td_grid': {
                    ax: self.scanner.metadata['td_grid'][ax]
                    for ax in ['x', 'y', 'z']
                }
            }
            io.savemat(loc + '/surface.mat', mdict)
Beispiel #11
0
    def iv_tek_mod_daq(self, ivm_params: Dict[str, Any]) -> None:
        """Performs digital feedback on mod coil to measure flux vs. delay.

        AFG ch1 is used for pulse generator bias.
        AFG ch2 is used for comparator bias.
        DG ch1 is used for pulse generator trigger.

        Args:
            ivm_params: Dict of measurement parameters as definted in config_measurements json file.

        Returns:
            Tuple[Dict]: data_dict, metadict
                Dictionaries containing data arrays and instrument metadata.
        """

        data_dict = {}
        meta_dict = {}
        daq_config = self.config['instruments']['daq']
        ai_channels = daq_config['channels']['analog_inputs']
        meas_channels = ivm_params['channels']
        channels = {}
        for ch in meas_channels:
            channels.update({ch: ai_channels[ch]})

        vmod = self.Q_(ivm_params['vmod_initial']).to('V').magnitude
        vcomp_set = self.Q_(ivm_params['vcomp_set']).to('V').magnitude
        vmod_low = self.Q_(ivm_params['vmod_low']).to('V').magnitude
        vmod_high = self.Q_(ivm_params['vmod_high']).to('V').magnitude

        tsettle = self.Q_(ivm_params['tsettle']).to('s').magnitude
        tavg = self.Q_(ivm_params['tavg']).to('s').magnitude
        time_constant = self.Q_(ivm_params['time_constant'])
        
        period = self.Q_(ivm_params['afg']['ch1']['period'])
        delay0, delay1 = [self.Q_(val).to('s').magnitude for val in ivm_params['dg']['range']]
        delay_vec = np.linspace(delay0, delay1, ivm_params['dg']['nsteps'])
        vmod_vec = np.full_like(delay_vec, np.nan, dtype=np.double)

        #: Set AFG pulse parameters
        for ch in [1, 2]:
            p = ivm_params['afg']['ch{}'.format(ch)]
            getattr(self.afg, 'voltage_high{}'.format(ch))('{}V'.format(self.Q_(p['high']).to('V').magnitude))
            getattr(self.afg, 'voltage_low{}'.format(ch))('{}V'.format(self.Q_(p['low']).to('V').magnitude))
            getattr(self.afg, 'pulse_period{}'.format(ch))('{}us'.format(self.Q_(p['period']).to('us').magnitude))
            getattr(self.afg, 'pulse_width{}'.format(ch))('{}us'.format(self.Q_(p['width']).to('us').magnitude))
            getattr(self.afg, 'pulse_trans_lead{}'.format(ch))('{}us'.format(self.Q_(p['lead']).to('us').magnitude))
            getattr(self.afg, 'pulse_trans_trail{}'.format(ch))('{}us'.format(self.Q_(p['trail']).to('us').magnitude))
            getattr(self.afg, 'pulse_delay{}'.format(ch))('{}us'.format(self.Q_(p['delay']).to('us').magnitude))

        #: Set delay generator parameters
        p = ivm_params['dg']
        self.dg.delay_B('A, {:e}'.format(delay0))
        self.dg.delay_C('T0, {:e}'.format(self.Q_(p['ch2']['delay']).to('s').magnitude))
        self.dg.delay_D('C, {:e}'.format(self.Q_(p['ch2']['width']).to('s').magnitude))

        self.dg.amp_out_AB(self.Q_(p['ch1']['voltage']).to('V').magnitude)
        self.dg.offset_out_AB(self.Q_(p['ch1']['offset']).to('V').magnitude)
        self.dg.amp_out_CD(self.Q_(p['ch2']['voltage']).to('V').magnitude)
        self.dg.offset_out_CD(self.Q_(p['ch2']['offset']).to('V').magnitude)

        self.MAG_lockin.time_constant(self.Q_(ivm_params['time_constant']).to('s').magnitude)

        #: Get instrument metadata and prefactors
        lockin_snap = self.MAG_lockin.snapshot(update=True)
        lockin_meta = {}
        for param in ['time_constant', 'sensitivity', 'phase', 'reserve', 'filter_slope']:
            lockin_meta.update({param: lockin_snap['parameters'][param]})
        meta_dict.update({'metadata':
                            {'lockin': lockin_meta,
                             'afg': self.afg.snapshot(update=True),
                             'dg': self.dg.snapshot(update=True),
                             'ivm_params': ivm_params}
                        })
        #prefactor = 1 / (10 / lockin_snap['parameters']['sensitivity']['value'])
        #prefactor /= ivm_params['channels']['lockinX']['gain']

        with nidaqmx.Task('ai_task') as ai_task, nidaqmx.Task('ao_task') as ao_task:
            ao = '{}/ao{}'.format(daq_config['name'], daq_config['channels']['analog_outputs']['mod'])
            ao_task.ao_channels.add_ao_voltage_chan(ao, 'mod')
            for ch, idx in channels.items():
                channel = '{}/ai{}'.format(daq_config['name'], idx)
                ai_task.ai_channels.add_ai_voltage_chan(channel, ch)

            figM = plt.figure(figsize=(4,3))
            axM  = plt.gca()
            plt.xlim(min(delay_vec), max(delay_vec))
            plt.xlabel(r'Delay time [$\mu$s]')
            plt.ylabel('Modulation Voltage [V]')

            figT = plt.figure(figsize=(4,3))
            axT = plt.gca()
            plt.xlabel('Iteration number')
            plt.ylabel('Modulation Voltage [V]')

            log.info('Starting iv_tek_mod_daq.')
            try:
                #: Sweep delay time
                for j in range(len(delay_vec)):
                    self.dg.delay_A('T0, {:e}'.format(delay_vec[j]))
                    self.dg.delay_B('A, {:e}'.format(period.to('s').magnitude - delay_vec[j]))
                    elapsed_time = 0
                    nsamples = 0
                    vmod_time = np.array([])
                    t0 = time.time()
                    time.sleep(0.01)
                    #: Do digital PID control
                    while elapsed_time < tsettle + tavg:
                        ai_data = ai_task.read()
                        vcomp = ai_data[0]
                        err = vcomp - vcomp_set
                        vmod += P * err
                        vmod = np.mod(vmod, vmod_high)
                        #vmod = max(vmod, vmod_low) if vmod < 0 else min(vmod, vmod_high)
                        ao_task.write(vmod)
                        elapsed_time = time.time() - t0
                        nsamples += 1
                        vmod_time = np.append(vmod_time, vmod)
                    avg_start_pt = int(nsamples * tavg // (tsettle + tavg))
                    vmod_vec[j] = np.mean(vmod_time[avg_start_pt:])

                    clear_artists(axM)
                    axM.plot(delay_vec, vmod_vec, 'bo-')
                    plt.tight_layout()
                    figM.canvas.draw()

                    clear_artists(axT)
                    axT.plot(vmod_time, 'bo')
                    plt.tight_layout()
                    figT.canvas.draw()

                    time.sleep(0.05)
            except KeyboardInterrupt:
                log.warning('Measurement aborted by user.')

        data_dict.update({
            'delay_vec': {'array': delay_vec, 'unit': 's'},
            'vmod_vec': {'array': vmod_vec, 'unit': 'V'}
            })
        if ivm_params['save']:
            #: Get/create data location
            loc_provider = qc.FormatLocation(fmt='{date}/#{counter}_{name}_{time}')
            loc = loc_provider(DiskIO('.'), record={'name': ivm_params['fname']})
            pathlib.Path(loc).mkdir(parents=True, exist_ok=True)
            #: Save arrays to mat
            io.savemat('{}/{}'.format(loc, ivm_params['fname']), data_dict)
            #: Save metadata to json
            with open(loc + '/metadata.json', 'w') as f:
                try:
                    json.dump(meta_dict, f, sort_keys=True, indent=4, skipkeys=True)
                except TypeError:
                    pass
            #: Save figures to png
            figM.suptitle(loc)
            figM.tight_layout(rect=[0, 0.03, 1, 0.95])
            figM.savefig('{}/{}mod_d.png'.format(loc, ivm_params['fname']))
            figT.suptitle(loc)
            figT.tight_layout(rect=[0, 0.03, 1, 0.95])
            figT.savefig('{}/{}mod_t.png'.format(loc, ivm_params['fname']))
            log.info('Data saved to {}.'.format(loc))

        return data_dict, meta_dict
Beispiel #12
0
    def iv_mod_tek(self, ivm_params: Dict[str, Any]) -> Tuple[Dict[str, Any]]:
        """Measures IV characteristic at different mod coil voltages.

        Args:
            ivm_params: Dict of measurement parameters as definted in config_measurements json file.

        Returns:
            Tuple[Dict]: data_dict, metadict
                Dictionaries containing data arrays and instrument metadata.
        """

        data_dict = {}
        meta_dict = {}
        mod_range = [self.Q_(value).to('V').magnitude for value in ivm_params['mod_range']]
        bias_range = [self.Q_(value).to('V').magnitude for value in ivm_params['bias_range']]
        mod_vec = np.linspace(mod_range[0], mod_range[1], ivm_params['ntek2'])
        bias_vec = np.linspace(bias_range[0], bias_range[1], ivm_params['ntek1'])
        mod_grid, bias_grid  = np.meshgrid(mod_vec, bias_vec)
        data_dict.update({
            'mod_vec': {'array': mod_vec, 'unit': 'V'},
            'bias_vec': {'array': bias_vec, 'unit': 'V'},
            'mod_grid': {'array': mod_grid, 'unit': 'V'},
            'bias_grid': {'array': bias_grid, 'unit': 'V'}
            })
        ivmX = np.full_like(mod_grid, np.nan, dtype=np.double)
        ivmY = np.full_like(mod_grid, np.nan, dtype=np.double)
        
        #: Set AFG output channels
        self.afg.voltage_low1('0V')
        self.afg.voltage_high1('{}V'.format(bias_range[0]))

        self.afg.voltage_low2('0V')
        self.afg.voltage_high2('{}V'.format(mod_range[0]))
        self.afg.voltage_offset2('0V')

        #: Set pulse parameters
        for ch in [1, 2]:
            p = ivm_params['afg']['ch{}'.format(ch)]
            getattr(self.afg, 'pulse_period{}'.format(ch))('{}us'.format(self.Q_(p['period']).to('us').magnitude))
            getattr(self.afg, 'pulse_width{}'.format(ch))('{}us'.format(self.Q_(p['width']).to('us').magnitude))
            getattr(self.afg, 'pulse_trans_lead{}'.format(ch))('{}us'.format(self.Q_(p['lead']).to('us').magnitude))
            getattr(self.afg, 'pulse_trans_trail{}'.format(ch))('{}us'.format(self.Q_(p['trail']).to('us').magnitude))
            getattr(self.afg, 'pulse_delay{}'.format(ch))('{}us'.format(self.Q_(p['delay']).to('us').magnitude))

        #: Get instrument metadata and prefactors
        lockin_snap = self.MAG_lockin.snapshot(update=True)
        lockin_meta = {}
        for param in ['time_constant', 'sensitivity', 'phase', 'reserve', 'filter_slope']:
            lockin_meta.update({param: lockin_snap['parameters'][param]})
        meta_dict.update({'metadata':
                            {'lockin': lockin_meta,
                             'afg': self.afg.snapshot(update=True),
                             'ivm_params': ivm_params}
                        })
        prefactor = 1 / (10 / lockin_snap['parameters']['sensitivity']['value'])
        prefactor /= ivm_params['channels']['lockinX']['gain']
        delay = ivm_params['delay_factor'] * lockin_snap['parameters']['time_constant']['value']

        fig, ax = plt.subplots(1, figsize=(4,3))
        ax.set_xlim(min(bias_range), max(bias_range))
        ax.set_xlabel('Bias [V]')
        ax.set_ylabel('Voltage [V]')
        ax.set_title(ivm_params['channels']['lockinX']['label'])
        log.info('Starting iv_mod_tek.')
        try:
            for j in range(len(mod_vec)):
                dataX, dataY, dataX_avg, dataY_avg = (np.zeros(len(mod_vec)), ) * 4
                self.afg.voltage_offset2('{}V'.format(mod_vec[j]))
                for _ in range(ivm_params['navg']):
                    for i in range(len(bias_vec)):
                        self.afg.voltage_high1('{}V'.format(bias_vec[i]))
                        time.sleep(delay)
                        dataX[i] = self.MAG_lockin.X()
                        dataY[i] = self.MAG_lockin.Y()
                    dataX_avg += dataX
                    dataY_avg += dataY
                dataX_avg /= ivm_params['navg']
                dataY_avg /= ivm_params['navg']
                ivmX[:,j] = prefactor * dataX_avg
                ivmY[:,j] = prefactor * dataY_avg
                clear_artists(ax)
                ax.plot(bias_vec, prefactor * dataX_avg, 'bo-')
                plt.tight_layout()
                fig.canvas.draw()
            fig.show()

        except KeyboardInterrupt:
            log.warning('Measurement aborted by user.')

        figX = plt.figure(figsize=(4,3))    
        plt.pcolormesh(mod_grid, bias_grid, ivmX)
        plt.xlabel('Modulation [V]')
        plt.ylabel('Bias [V]')
        plt.title(ivm_params['channels']['lockinX']['label'])
        figX.tight_layout(rect=[0, 0.03, 1, 0.95])
        cbarX = plt.colorbar()
        cbarX.set_label('Voltage [V]')

        figY = plt.figure(figsize=(4,3))    
        plt.pcolormesh(mod_grid, bias_grid, ivmY)
        plt.xlabel('Modulation [V]')
        plt.ylabel('Bias [V]')
        plt.title(ivm_params['channels']['lockinY']['label'])
        figY.tight_layout(rect=[0, 0.03, 1, 0.95])
        cbarY = plt.colorbar()
        cbarY.set_label('Voltage [V]')

        data_dict.update({
            'lockinX': {'array': ivmX, 'unit': 'V'},
             'lockinY': {'array': ivmY, 'unit': 'V'}
            })

        if ivm_params['save']:
            #: Get/create data location
            loc_provider = qc.FormatLocation(fmt='{date}/#{counter}_{name}_{time}')
            loc = loc_provider(DiskIO('.'), record={'name': ivm_params['fname']})
            pathlib.Path(loc).mkdir(parents=True, exist_ok=True)
            #: Save arrays to mat
            io.savemat('{}/{}'.format(loc, ivm_params['fname']), data_dict)
            #: Save metadata to json
            with open(loc + '/metadata.json', 'w') as f:
                try:
                    json.dump(meta_dict, f, sort_keys=True, indent=4, skipkeys=True)
                except TypeError:
                    pass
            #: Save figures to png
            figX.suptitle(loc)
            figX.savefig('{}/{}X.png'.format(loc, ivm_params['fname']))
            figY.suptitle(loc)
            figY.savefig('{}/{}Y.png'.format(loc, ivm_params['fname']))
            log.info('Data saved to {}.'.format(loc))

        return data_dict, meta_dict
            loop = loop.loop((sp[sp.values]))

    play_task = Task(upload_play, seq)

    if loop is not None:
        m = loop.each(play_task, *params)
    else:
        m = Measure(play_task, *params)

    ds = m.run(loc_record={'name':name})
    return ds


path = 'C:/Projects/test/data'

io = DiskIO(path)
DataSet.default_io = io

station = Station()

# create "AWG1"
awgs = init_hardware()

# create channels P1, P2
p = init_pulselib(awgs)

v_param = lp.linspace(0, 200, 5, axis=0, unit = "mV", name = "vPulse")
t_wait = lp.linspace(20, 100, 3, axis=1, unit = "mV", name = "t_wait")


seg1 = p.mk_segment()
Beispiel #14
0
data_array1 = DataArray(preset_data = data, name = 'digitizer', is_setpoint = True)

data_array2 = DataArray(preset_data = data, name = 'digitizer2')

data_array3 = DataArray(preset_data = data, name = 'digitizer3')

#data_array4 = DataArray(parameter=digitizer_param, is_setpoint=True)

data_array5 = DataArray(preset_data = data, name = 'digitizer5')

data_array6 = DataArray(preset_data = data1, name = 'digitizer6')
"""
#%%

NewIO = DiskIO(base_location='C:\\Users\\LocalAdmin\\Documents')
formatter = HDF5FormatMetadata()
try_location = 'trytrytry'
"""
#arrays = LP.containers()
arrays2 = []
arrays3 = [data_array1,]
arrays4 = [data_array1, data_array2, data_array3]

data_set = new_data(arrays=arrays3, location=try_location, loc_record = {'name':'T1', 'label':'Vread_sweep'}, io = NewIO,)

#data_set.save_metadata()
"""

#%% load data
NewIO = DiskIO(base_location='C:\\Users\\LocalAdmin\\Documents')
Beispiel #15
0
def fft_noise(dev_name: str, channel: Dict[str, int], unit: str,
              prefactor: Any, samplerate: int,
              sampleduration: Union[float, int], navg: int, fmax: Union[float,
                                                                        int]):
    """Noise measurement of a single channel.
    
    Args:
        dev_name: DAQ device name (e.g. 'Dev1').
        channel: Dict of {channel_name: analog_input} (e.g. {'MAG': 0}).
        unit: Physical unit of the channel (e.g. 'Phi0').
        prefactor: Pint Quantity with dimenions of unit/V, from microscope.get_prefactors().
        samplerate: DAQ sampling rate in Hz.
        sampleduration: Sampling time in seconds.
        navg: Number of times to average the spectrum.
        fmax: Maximum frequency up to which the spectrum will be saved.
        
    Returns:
        Dict: mdict
    """
    loc_provider = qc.FormatLocation(
        fmt='./data/{date}/#{counter}_{name}_{time}')
    loc = loc_provider(DiskIO('.'), record={'name': 'fft_noise'})
    pathlib.Path(loc).mkdir(parents=True, exist_ok=True)
    prefactor_str = {}
    prefactor.ito('{}/V'.format(unit))
    prefactor_str.update({
        list(channel.keys())[0]:
        '{} {}'.format(prefactor.magnitude, prefactor.units)
    })
    mdict = {
        'metadata': {
            'channel': channel,
            'unit': unit,
            'prefactor': prefactor_str,
            'samplerate': samplerate,
            'sampleduration': sampleduration,
            'navg': navg,
            'fmax': fmax,
            'location': loc
        }
    }
    nsamples = int(samplerate * sampleduration)
    v_fft_avg = np.zeros((nsamples // 2, ))
    with nidaqmx.Task('fft_noise_ai_task') as ai_task:
        for inst in DAQAnalogInputs.instances():
            inst.close()
        daq_ai = DAQAnalogInputs('daq_ai',
                                 dev_name,
                                 samplerate,
                                 channel,
                                 ai_task,
                                 samples_to_read=nsamples,
                                 timeout=sampleduration + 10)
        for i in range(navg):
            data_v = daq_ai.voltage()[0].T
            Fs = nsamples / sampleduration
            v_fft = np.fft.fft(data_v) / (nsamples /
                                          np.sqrt(2 * sampleduration))
            v_fft_abs = np.abs(v_fft[:nsamples // 2])
            freqs = np.fft.fftfreq(nsamples, d=1 / Fs)[:nsamples // 2]
            v_fft_avg += v_fft_abs
        daq_ai.close()
        v_fft_avg = v_fft_avg / navg
        sig_fft_avg = prefactor.magnitude * v_fft_avg
        mdict.update({
            'v_fft_avg': v_fft_avg[freqs < fmax],
            'sig_fft_avg': sig_fft_avg[freqs < fmax],
            'freqs': freqs[freqs < fmax]
        })
        fig, ax = plt.subplots(1, 2, figsize=(8, 4), tight_layout=True)
        ax[0].loglog(freqs, v_fft_avg, lw=1)
        ax[0].set_ylabel('V/$\\sqrt{Hz}$')
        ax[1].loglog(freqs, sig_fft_avg, lw=1)
        ax[1].set_ylabel('{}/$\\sqrt{Hz}$'.format(unit))
        fig.suptitle(loc, x=0.5, y=1)
        for i in [0, 1]:
            ax[i].set_xlabel('Frequency [Hz]')
            ax[i].grid()
        plt.savefig(loc + '/fft_noise.png')
        io.savemat(loc + '/fft_noise.mat', mdict)
        return mdict
Beispiel #16
0
 def join(self, *args):
     return DiskIO('.').join(*args)
Beispiel #17
0
LOOP = Loop(sweep_values=Sweep_Value2).loop(
    sweep_values=Sweep_Value1).each(AMP)

#LOOP = Loop(sweep_values = Sweep_Value1).each(DIG)

#Sweep_Value1 = T[-25:-27:0.1]
#Sweep_Value2 = LP[-558:-562:0.2]
#LOOP = Loop(sweep_values = Sweep_Value2).loop(sweep_values = Sweep_Value1).each(dig)

#LOOP = Loop(sweep_values = Sweep_Value1).each(AMP)

#Sweep_Value3 = Count[0:4000:1]
#LOOP = Loop(sweep_values = Sweep_Value3, delay = 0.5).each(AMP)

NewIO = DiskIO(base_location='D:\\Data\\RB_experiment')
NewIO = DiskIO(
    base_location=
    'K:\\ns\\qt\\spin-qubits\\data\\b059_data\\2018 data\\Data\\RB_experiment')

## get_data_set should contain parameter like io, location, formatter and others
data = LOOP.get_data_set(
    location=None,
    loc_record={
        'name': 'DAC',
        'label': 'V_sweep'
    },
    io=NewIO,
)
print('loop.data_set: %s' % LOOP.data_set)
'''