def add_qubit_state_cal(self, filepath_to_0, filepath_to_1): state_0_data = hdf.Data(filepath_to_0) state_1_data = hdf.Data(filepath_to_1) self.state_0_cal = (state_0_data.data.amplitude_avg_0[:] * np.sin(state_0_data.data.phase_avg_0[:]), state_0_data.data.amplitude_avg_0[:] * np.cos(state_0_data.data.phase_avg_0[:])) self.state_1_cal = (state_1_data.data.amplitude_avg_0[:] * np.sin(state_1_data.data.phase_avg_0[:]), state_1_data.data.amplitude_avg_0[:] * np.cos(state_1_data.data.phase_avg_0[:])) state_0_data.close_file() state_1_data.close_file()
def _save_opt_data(self): ''' Saves optimized data in the h5 file and a respective view. ''' if self.urls is not None: self.hf = store.Data(self.file_name) hdf_data_opt = self.hf.add_value_vector( self.data_label + '_data_opt', folder=self.cfg['analysis_folder'], x=self.hf.get_dataset(self.urls[0])) hdf_data_opt.append(np.array(self.data)) self.optimized = True try: self.errors if self.errors is None: raise NameError except NameError: #no errors pass else: #write errors hdf_error = self.hf.add_value_vector( self.data_label + '_errors', folder=self.cfg['analysis_folder']) hdf_error.append(np.array(self.errors)) #error plot view joint_error_view = self.hf.add_view( self.data_label + '_err_plot', x=self.hf.get_dataset(self.urls[0]), y=hdf_data_opt, error=hdf_error) self.hf.close_file()
def __init__(self, hf_path): self._hf = store.Data(hf_path) self._first_circle = True self._first_lorentzian = True self._first_fano = True self._first_skewed_lorentzian = True self._do_prefilter_data = False self.pre_filter_params = [] self._debug = False # these ds_url should always be present in a resonator measurement self.ds_url_amp = "/entry/data0/amplitude" self.ds_url_pha = "/entry/data0/phase" self.ds_url_freq = "/entry/data0/frequency" # these ds_url depend on the measurement and may not exist self.ds_url_power = "/entry/data0/power" ''' catching error if datasets are empty while creating resonator object i.g. while live-fitting ''' try: self._get_datasets() self._datasets_loaded = True except KeyError: self._datasets_loaded = False
def measure_2D_AWG(self, iterations=1): ''' x_vec is sequence in AWG ''' if self.y_set_obj is None: raise ValueError('y-axes parameters not properly set') qkit.flow.sleep( ) # if stop button was pressed by now, abort without creating data files if iterations > 1: self.z_vec = range(iterations) self.z_coordname = 'iteration' self.z_set_obj = lambda z: True self.z_unit = '' self.measure_3D_AWG() # For 3D measurements with iterations, averages are only created at the end to get a consistent averaging base. hdf_file = hdf.Data(self._hdf.get_filepath()) for j in range(self.ndev): amp = np.array(hdf_file["/entry/data0/amplitude_%i" % j]) pha = np.array(hdf_file["/entry/data0/phase_%i" % j]) amp_avg = sum(amp[i] for i in range(iterations)) / iterations pha_avg = sum(pha[i] for i in range(iterations)) / iterations hdf_amp_avg = hdf_file.add_value_matrix('amplitude_avg_%i' % i, x=self._hdf_y, y=self._hdf_x, unit='a.u.') hdf_pha_avg = hdf_file.add_value_matrix('phase_avg_%i' % i, x=self._hdf_y, y=self._hdf_x, unit='rad') for i in range(len(self.y_vec)): hdf_amp_avg.append(amp_avg[i]) hdf_pha_avg.append(pha_avg[i]) hdf_file.close_file() else: self.mode = 3 # 1: 1D, 2: 2D, 3:1D_AWG/2D_AWG, 4:3D_AWG self._prepare_measurement_file() if self.ndev > 1: raise ValueError( 'Multiplexed readout is currently not supported for 2D measurements' ) if self.show_progress_bar: p = Progress_Bar(len(self.y_vec), name=self.dirname) try: # measurement loop for it in range(len(self.y_vec)): qkit.flow.sleep( ) # better done during measurement (waiting for trigger) self.y_set_obj(self.y_vec[it]) self._append_data(iteration=it) if self.show_progress_bar: p.iterate() finally: self._end_measurement()
def create_logfile(self): print('Create new log file for parameter %s.' % self.name) self.fname = os.path.join( self.log_path, self.name.replace(' ', '_') + time.strftime('%d%m%Y%H%M%S') + '.h5') #print self.fname self.hf = hdf_lib.Data(self.fname, mode='a') self.hdf_t = self.hf.add_coordinate('timestamps') self.hdf_v = self.hf.add_value_vector('values', x=self.hdf_t) self.url_timestamps = '/entry/data0/timestamps' self.url_values = '/entry/data0/values' view = self.hf.add_view('data_vs_time', x=self.hdf_t, y=self.hdf_v) #fit
def __init__(self, h5_filepath, comment='', save_pdf=False): """Inits h5plot with a h5_filepath (string, absolute path), optional comment string, and optional save_pdf boolean. """ if not plot_enable or not qkit.module_available("matplotlib"): logging.warning( "matplotlib not installed. I can not save your measurement files as png. I will disable this function." ) qkit.cfg['save_png'] = False if not qkit.cfg.get('save_png', True): return self.comment = comment self.save_pdf = save_pdf self.path = h5_filepath filepath = os.path.abspath( self.path) #put filepath to platform standards self.filedir = os.path.dirname( filepath ) #return directory component of the given pathname, here filepath self.image_dir = os.path.join(self.filedir, 'images') try: os.mkdir(self.image_dir) except OSError: logging.warning('Error creating image directory.') pass # open the h5 file and get the hdf_lib object self.hf = store.Data(self.path) # check for datasets for i, pentry in enumerate(self.hf['/entry'].keys()): key = '/entry/' + pentry for j, centry in enumerate(self.hf[key].keys()): try: self.key = '/entry/' + pentry + "/" + centry self.ds = self.hf[self.key] if self.ds.attrs.get('save_plot', True): self.plt() # this is the plot function except Exception as e: print("Exception in qkit/gui/plot/plot.py while plotting") print(self.key) print(e) #close hf file self.hf.close() print('Plots saved in ' + self.image_dir)
def set_reference_uid(self, uid=None): """ Set a UID to be shown as a reference trace in the "resistance_thickness" view. Args: uid (str): The UID of the h5 file from which the resistance and thickness data is to be displayed. """ try: ref = hdf.Data(qkit.fid[uid]) self._ref_resistance = ref.data.resistance[:] self._ref_thickness = ref.data.thickness[:] self._reference_uid = uid ref.close() return self._reference_uid except: self._reference_uid = None return "Invalid UID"
def _prepare_measurement_file(self): ''' creates the output .h5-file with distinct dataset structures for each measurement type. at this point all measurement parameters are known and put in the output file ''' self._data_file = hdf.Data(name=self._file_name, mode='a') self._measurement_object.uuid = self._data_file._uuid self._measurement_object.hdf_relpath = self._data_file._relpath self._measurement_object.instruments = qkit.instruments.get_instrument_names( ) self._measurement_object.save() self._mo = self._data_file.add_textlist('measurement') self._mo.append(self._measurement_object.get_JSON()) # instrument settings and logfile self._settings = self._data_file.add_textlist('settings') settings = waf.get_instrument_settings(self._data_file.get_filepath()) self._settings.append(settings) self._log_file = waf.open_log_file(self._data_file.get_filepath())
def read_hdf_data(self, return_data=False): ''' Read hdf data and set attributes self.coordinate, self.data. Return data as a numpy array if requested by 'return_data' argument. ''' try: self.hf = store.Data(self.file_name) except (IOError, NameError): logging.error('Could not read h5 file.') return #read hdf data data = [] for u in self.urls: data.append(np.array(self.hf[u], dtype=np.float64)) self.hf.close() #fill coordinate and data attributes if len(self.urls) == 2: #for only two urls, it is clear what to do self.coordinate = data[0] self.data = data[1] self.data_label = self.urls[1].split('/')[-1] self.data_url = self.urls[1] elif len( self.urls ) > 2: #if more than two, read out the data_column set in the config self.coordinate = data[0] self.data = data[self.cfg['data_column']] self.data_label = self.urls[self.cfg['data_column']].split('/')[-1] self.data_url = self.urls[self.cfg['data_column']] else: logging.warning( 'Coordinate and data attributes not assigned properly.') self.coordinate_label = self.urls[0].split('/')[-1] if return_data: return data, self.urls
def _prepare_monitoring_file(self): """ Creates the output .h5-file with distinct the required datasets and views. At this point all measurement parameters are known and put in the output file. """ self._data_file = hdf.Data(name=self._file_name, mode='a') self._measurement_object.uuid = self._data_file._uuid self._measurement_object.hdf_relpath = self._data_file._relpath self._measurement_object.instruments = qkit.instruments.get_instrument_names( ) self._measurement_object.save() self._mo = self._data_file.add_textlist('measurement') self._mo.append(self._measurement_object.get_JSON()) # write logfile and instrument settings self._write_settings_dataset() self._log = waf.open_log_file(self._data_file.get_filepath()) ''' Time record ''' self._data_time = self._data_file.add_value_vector('time', x=None, unit='s') ''' Quartz datasets ''' self._data_rate = self._data_file.add_value_vector( 'rate', x=self._data_time, unit='nm/s', save_timestamp=False) self._data_thickness = self._data_file.add_value_vector( 'thickness', x=self._data_time, unit='nm', save_timestamp=False) ''' Ohmmeter datasets ''' self._data_resistance = self._data_file.add_value_vector( 'resistance', x=self._data_time, unit='Ohm', save_timestamp=False) self._data_conductance = self._data_file.add_value_vector( 'conductance', x=self._data_time, unit='S', save_timestamp=False) self._data_deviation_abs = self._data_file.add_value_vector( 'deviation_absolute', x=self._data_time, unit='Ohm', save_timestamp=False) self._data_deviation_rel = self._data_file.add_value_vector( 'deviation_relative', x=self._data_time, unit='relative', save_timestamp=False) ''' MFC datasets ''' # FIXME: units? if self.mfc: self._data_pressure = self._data_file.add_value_vector( 'pressure', x=self._data_time, unit='ubar', save_timestamp=False) self._data_Ar_flow = self._data_file.add_value_vector( 'Ar_flow', x=self._data_time, unit='sccm', save_timestamp=False) self._data_ArO_flow = self._data_file.add_value_vector( 'ArO_flow', x=self._data_time, unit='sccm', save_timestamp=False) ''' Calculate ideal trend and create record ''' self._thickness_coord = self._data_file.add_coordinate( 'thickness_coord', unit='nm') self._thickness_coord.add(self.ideal_trend()[0]) if self._show_ideal: self._data_ideal = self._data_file.add_value_vector( 'ideal_resistance', x=self._thickness_coord, unit='Ohm', save_timestamp=False) self._data_ideal.append(self.ideal_trend()[1]) ''' Create target marker ''' if self._target_marker: self._target_thickness_line = self._data_file.add_value_vector( 'target_thickness', x=None, unit='nm', save_timestamp=False) self._target_thickness_line.append([ 0.8 * self._target_thickness, self._target_thickness, self._target_thickness, self._target_thickness, self._target_thickness, 1.2 * self._target_thickness ]) self._target_resistance_line = self._data_file.add_value_vector( 'target_resistance', x=None, unit='Ohm', save_timestamp=False) self._target_resistance_line.append([ self._target_resistance, self._target_resistance, 1.2 * self._target_resistance, 0.8 * self._target_resistance, self._target_resistance, self._target_resistance ]) self._target_conductance_line = self._data_file.add_value_vector( 'target_conductance', x=None, unit='S', save_timestamp=False) self._target_conductance_line.append([ 1. / self._target_resistance, 1. / self._target_resistance, 1. / 1.2 / self._target_resistance, 1. / 0.8 / self._target_resistance, 1. / self._target_resistance, 1. / self._target_resistance ]) ''' Estimation datasets ''' if self._fit_resistance: self._thickness_estimation = self._data_file.add_value_vector( 'thickness_estimation', x=None, unit='nm', save_timestamp=False) self._resistance_estimation = self._data_file.add_value_vector( 'resistance_estimation', x=None, unit='Ohm', save_timestamp=False) self._last_resistance_fit = self._data_file.add_value_vector( 'last_resistance_fit', x=None, unit='Ohm', save_timestamp=False) ''' Reference dataset ''' if not self._reference_uid == None: self._thickness_reference = self._data_file.add_value_vector( 'thickness_reference', x=None, unit='nm', save_timestamp=False) self._thickness_reference.append(self._ref_thickness) self._resistance_reference = self._data_file.add_value_vector( 'reference_' + self._reference_uid, x=None, unit='Ohm', save_timestamp=False) self._resistance_reference.append(self._ref_resistance) ''' Create Views ''' self._resist_view = self._data_file.add_view('resistance_thickness', x=self._data_thickness, y=self._data_resistance) if self._show_ideal: self._resist_view.add(x=self._thickness_coord, y=self._data_ideal) if self._target_marker: self._resist_view.add(x=self._target_thickness_line, y=self._target_resistance_line) if not self._reference_uid == None: self._resist_view.add(x=self._thickness_reference, y=self._resistance_reference) if self._fit_resistance: self._resist_view.add(x=self._thickness_coord, y=self._last_resistance_fit) self._conductance_view = self._data_file.add_view( 'conductance_thickness', x=self._data_thickness, y=self._data_conductance) if self._target_marker: self._conductance_view.add(x=self._target_thickness_line, y=self._target_conductance_line) self._deviation_abs_view = self._data_file.add_view( 'deviation_absolute', x=self._data_thickness, y=self._data_deviation_abs) self._deviation_rel_view = self._data_file.add_view( 'deviation_relative', x=self._data_thickness, y=self._data_deviation_rel) ''' Create comment ''' if self.comment: self._data_file.add_comment(self.comment) ''' Open GUI ''' if self.qviewkit_singleInstance and self.open_qviewkit and self._qvk_process: self._qvk_process.terminate() # terminate an old qviewkit instance
def _store_fit_data(self, fit_params, fit_covariance): ''' Appends fitted data to the h5 file in the specified analysis folder. As the fit is a fixed length array, a respective parameter axis is created and also stored in the h5 file. If data was optimized and stored with the method 'optimize', all rlevant joint views are created in the h5 file. inputs: - fit_params: np array of the resulting fit parameters - fit_covariance: estimated covariances of the fit_params as returned by curve_fit ''' try: self.hf = store.Data(self.file_name) #create coordinate and fit data vector hdf_x = self.hf.add_coordinate(self.data_label + '_coordinate', folder=self.cfg['analysis_folder']) hdf_x.add(self.x_vec) if self.optimized: hdf_y = self.hf.add_value_vector( self.data_label + '_opt_fit', folder=self.cfg['analysis_folder'], x=hdf_x) else: hdf_y = self.hf.add_value_vector( self.data_label + '_fit', folder=self.cfg['analysis_folder'], x=hdf_x) hdf_y.append(np.array(self.fvalues)) #parameter entry including errors hdf_params = self.hf.add_coordinate( self.data_label + '_' + self.fit_function.__name__ + '_params', folder=self.cfg['analysis_folder']) hdf_params.add(np.array(list(fit_params) + list(fit_covariance))) if self.data_label + '_data_opt' in self.hf[os.path.join( '/entry/', self.cfg['analysis_folder'] + '0').replace('\\', '/')].keys(): #create joint view with fit, data, and errors if existing try: self.errors if self.errors is None: raise NameError except NameError: #no errors joint_view = self.hf.add_view(self.data_label + '_opt_fit', x=hdf_x, y=hdf_y) #fit joint_view.add(x=self.hf.get_dataset(self.urls[0]), y=self.hf.get_dataset( os.path.join( '/entry/', self.cfg['analysis_folder'] + '0', self.data_label + '_data_opt').replace('\\', '/'))) #data else: #inlcuding errors joint_error_view_fit = self.hf.add_view( self.data_label + '_err_plot_fit', x=self.hf.get_dataset(self.urls[0]), y=self.hf.get_dataset( os.path.join('/entry/', self.cfg['analysis_folder'] + '0', self.data_label + '_data_opt').replace('\\', '/')), error=self.hf.get_dataset( os.path.join('/entry/', self.cfg['analysis_folder'] + '0', self.data_label + '_errors').replace( '\\', '/'))) #errorplot joint_error_view_fit.add(x=hdf_x, y=hdf_y) #fit else: #no optimization joint_view = self.hf.add_view(self.data_label + '_fit', x=hdf_x, y=hdf_y) #fit joint_view.add(x=self.hf.get_dataset(self.urls[0]), y=self.hf.get_dataset(self.data_url)) #data self.hf.close_file() except NameError as m: logging.error( 'Error while attempting to save fit data in h5 file: {:s}'. format(str(m)))
def discover_hdf_data(self, entries=None): ''' Read hdf data file and store urls that match entries or seem reasonable. - Inputs: entries (optional): specifies entries in h5 file whose urls to be returned - entries can be a list of string keywords (entries) in the form ['string1',string2',...,'stringn'] - when no entries are specified, discover_hdf_data looks for a frequency axis or a pulse length axis for the (first) coordinate axis and also searches amplitude_avg and phase_avg. If not present, the urls of 'amplitude' and 'phase' data are discovered - TODO: a nice feature would be to save a hint in each h5 file which entries to use for quick fitting ''' try: self.hf = store.Data(self.file_name) except (IOError, NameError): logging.error('Could not read h5 file.') return keys = self.hf['/entry/data0'].keys() if self.cfg['show_output']: print('Available data entries:', keys) # only show the available data entries, but analysis entries can still be accessed url_tree = '/entry/data0/' urls = [] if entries is None: #no entries specified for k in keys: #go through all keys try: #search for parameter axis if str(k[:4]).lower() == 'freq' or str( k[:2]).lower() == 'f ' or str( k[:4]).lower() == 'puls' or str( k[:4]).lower() == 'dacf' or str( k[:5]).lower() == 'delay' or str( k[:8]).lower() == 'pi pulse': urls.append(url_tree + k) break except IndexError: logging.error( 'Entries cannot be identified. Parameter names too short. Aborting.' ) return if len(urls) == 0: logging.error('No parameter axis found. Aborting.') return #first look for amplitude_avg and phase_avg entries -> time domain for k in keys: try: if 'avg' in str(k).lower() and str(k[:3]).lower() == 'amp': urls.append(url_tree + k) break except IndexError: logging.error('Entries cannot be identified. Aborting.') return for k in keys: try: if 'avg' in str(k).lower() and str(k[:3]).lower() == 'pha': urls.append(url_tree + k) break except IndexError: logging.error('Entries cannot be identified. Aborting.') return #if nothing found previously, then use amplitude and phase entries if len(urls) != 3: for k in keys: try: if str(k[:3]).lower() == 'amp': urls.append(url_tree + k) break except IndexError: logging.error( 'Entries cannot be identified. Aborting.') return for k in keys: try: if str(k[:3]).lower() == 'pha': urls.append(url_tree + k) break except IndexError: logging.error( 'Entries cannot be identified. Aborting.') return else: #use specified entries entrytypes = self.hf['/entry/'].keys() try: entrytypes.remove('views') except (ValueError, AttributeError): pass for et in entrytypes: for e in entries: for k in self.hf['/entry/' + et].keys(): try: if e == et + k or (e == k and et == 'data0'): urls.append('/entry/' + et + '/' + k) except IndexError: logging.error( 'Entries cannot be identified. No data for >> {:s} << found. Aborting.' .format(str(e))) return self.hf.close() #cast to real strings in case the urls ended up to be unicode self.urls = [str(u) for u in urls] if self.cfg['show_output']: print('Entries identified:', self.urls)
def _prepare_measurement_file(self): qkit.flow.start() if self.dirname is None: self.dirname = self.x_coordname self.ndev = len(self.readout.get_tone_freq( )) # returns array of readout freqs (=1 for non-multiplexed readout) self._hdf = hdf.Data(name=self.dirname, mode='a') self._hdf_x = self._hdf.add_coordinate(self.x_coordname, unit=self.x_unit) self._hdf_x.add(self.x_vec) self._settings = self._hdf.add_textlist('settings') settings = waf.get_instrument_settings(self._hdf.get_filepath()) self._settings.append(settings) self._log = waf.open_log_file(self._hdf.get_filepath()) self._hdf_readout_frequencies = self._hdf.add_coordinate( self.multiplex_attribute, unit=self.multiplex_unit) self._hdf_readout_frequencies.add(self.readout.get_tone_freq()) if self.ReadoutTrace: self._hdf_TimeTraceAxis = self._hdf.add_coordinate( 'recorded timepoint', unit='s') self._hdf_TimeTraceAxis.add( np.arange(self.sample.mspec.get_samples()) / self.readout.get_adc_clock()) if self.mode == 1: # 1D self._hdf_amp = [] self._hdf_pha = [] for i in range(self.ndev): self._hdf_amp.append( self._hdf.add_value_vector('amplitude_%i' % i, x=self._hdf_x, unit='a.u.')) self._hdf_pha.append( self._hdf.add_value_vector('phase_%i' % i, x=self._hdf_x, unit='rad')) if self.ReadoutTrace: self._hdf_I = self._hdf.add_value_matrix( 'I_TimeTrace', x=self._hdf_x, y=self._hdf_TimeTraceAxis, unit='V', save_timestamp=False) self._hdf_Q = self._hdf.add_value_matrix( 'Q_TimeTrace', x=self._hdf_x, y=self._hdf_TimeTraceAxis, unit='V', save_timestamp=False) elif self.mode == 2: # 2D self._hdf_y = self._hdf.add_coordinate(self.y_coordname, unit=self.y_unit) self._hdf_y.add(self.y_vec) self._hdf_amp = [] self._hdf_pha = [] for i in range(self.ndev): self._hdf_amp.append( self._hdf.add_value_matrix('amplitude_%i' % i, x=self._hdf_x, y=self._hdf_y, unit='a.u.')) self._hdf_pha.append( self._hdf.add_value_matrix('phase_%i' % i, x=self._hdf_x, y=self._hdf_y, unit='rad')) if self.ReadoutTrace: # TODO: One dimension missing here? self._hdf_I = self._hdf.add_value_matrix( 'I_TimeTrace', x=self._hdf_y, y=self._hdf_TimeTraceAxis, unit='V', save_timestamp=False) self._hdf_Q = self._hdf.add_value_matrix( 'Q_TimeTrace', x=self._hdf_y, y=self._hdf_TimeTraceAxis, unit='V', save_timestamp=False) elif self.mode == 3: # 1D_AWG/2D_AWG self._hdf_y = self._hdf.add_coordinate(self.y_coordname, unit=self.y_unit) self._hdf_y.add(self.y_vec) self._hdf_amp = [] self._hdf_pha = [] for i in range(self.ndev): self._hdf_amp.append( self._hdf.add_value_matrix('amplitude_%i' % i, x=self._hdf_y, y=self._hdf_x, unit='a.u.')) self._hdf_pha.append( self._hdf.add_value_matrix('phase_%i' % i, x=self._hdf_y, y=self._hdf_x, unit='rad')) if self.ReadoutTrace: self._hdf_I = self._hdf.add_value_box( 'I_TimeTrace', x=self._hdf_y, y=self._hdf_x, z=self._hdf_TimeTraceAxis, unit='V', save_timestamp=False) self._hdf_Q = self._hdf.add_value_box( 'Q_TimeTrace', x=self._hdf_y, y=self._hdf_x, z=self._hdf_TimeTraceAxis, unit='V', save_timestamp=False) elif self.mode == 4: # 3D_AWG self._hdf_y = self._hdf.add_coordinate(self.y_coordname, unit=self.y_unit) self._hdf_y.add(self.y_vec) self._hdf_z = self._hdf.add_coordinate(self.z_coordname, unit=self.z_unit) self._hdf_z.add(self.z_vec) self._hdf_amp = [] self._hdf_pha = [] for i in range(self.ndev): self._hdf_amp.append( self._hdf.add_value_box('amplitude_%i' % i, x=self._hdf_z, y=self._hdf_y, z=self._hdf_x, unit='a.u.')) self._hdf_pha.append( self._hdf.add_value_box('phase_%i' % i, x=self._hdf_z, y=self._hdf_y, z=self._hdf_x, unit='rad')) if self.ReadoutTrace: self._hdf_I = self._hdf.add_value_box( 'I_TimeTrace', x=self._hdf_z, y=self._hdf_y, z=self._hdf_TimeTraceAxis, unit='V', save_timestamp=False) self._hdf_Q = self._hdf.add_value_box( 'Q_TimeTrace', x=self._hdf_y, y=self._hdf_y, z=self._hdf_TimeTraceAxis, unit='V', save_timestamp=False) if self.create_averaged_data: self._hdf_amp_avg = [] self._hdf_pha_avg = [] for i in range(self.ndev): self._hdf_amp_avg.append( self._hdf.add_value_vector('amplitude_avg_%i' % i, x=self._hdf_x, unit='a.u.')) self._hdf_pha_avg.append( self._hdf.add_value_vector('phase_avg_%i' % i, x=self._hdf_x, unit='rad')) if self.comment: self._hdf.add_comment(self.comment) self._hdf.hf.hf.attrs['default_ds'] = ['data0/amplitude_%i' % i for i in range(min(5,self.ndev))] +\ ['data0/phase_%i' % i for i in range(min(5,self.ndev))] if self.qviewkit_singleInstance and self.open_qviewkit and self._qvk_process: self._qvk_process.terminate() # terminate an old qviewkit instance if self.open_qviewkit: self._qvk_process = qviewkit.plot( self._hdf.get_filepath(), datasets=[ 'amplitude_%i' % i for i in range(min(5, self.ndev)) ] + ['phase_%i' % i for i in range(min(5, self.ndev))]) try: self.readout.start() except AttributeError: pass
def _prepare_measurement_file(self): ''' creates the output .h5-file with distinct dataset structures for each measurement type. at this point all measurement parameters are known and put in the output file ''' self._data_file = hdf.Data(name=self._file_name, mode='a') self._measurement_object.uuid = self._data_file._uuid self._measurement_object.hdf_relpath = self._data_file._relpath self._measurement_object.instruments = qkit.instruments.get_instrument_names( ) self._measurement_object.save() self._mo = self._data_file.add_textlist('measurement') self._mo.append(self._measurement_object.get_JSON()) # write logfile and instrument settings self._write_settings_dataset() self._log = waf.open_log_file(self._data_file.get_filepath()) self._data_freq = self._data_file.add_coordinate('frequency', unit='Hz') self._data_freq.add(self._freqpoints) self._data = [] # empty list as we have a variable number of channels if self._scan_1D: for i in range(self.num_traces): self._data.append( self._data_file.add_value_vector(self.traces_names[i], x=self._data_freq, unit=self.units[i], save_timestamp=True)) if self._scan_2D: self._data_x = self._data_file.add_coordinate(self.x_coordname, unit=self.x_unit) self._data_x.add(self.x_vec) for i in range(self.num_traces): self._data.append( self._data_file.add_value_matrix(self.traces_names[i], x=self._data_x, y=self._data_freq, unit=self.units[i], save_timestamp=True)) if self.log_function != None: # use logging self._log_value = [] for i in range(len(self.log_function)): self._log_value.append( self._data_file.add_value_vector( self.log_name[i], x=self._data_x, unit=self.log_unit[i], dtype=self.log_dtype[i])) if self._scan_3D: self._data_x = self._data_file.add_coordinate(self.x_coordname, unit=self.x_unit) self._data_x.add(self.x_vec) self._data_y = self._data_file.add_coordinate(self.y_coordname, unit=self.y_unit) self._data_y.add(self.y_vec) for i in range(self.num_traces): self._data.append( self._data_file.add_value_box(self.traces_names[i], x=self._data_x, y=self.data_y, z=self._data_freq, unit=self.units[i], save_timestamp=True)) if self.log_function != None: # use logging self._log_value = [] for i in range(len(self.log_function)): self._log_value.append( self._data_file.add_value_vector( self.log_name[i], x=self._data_x, unit=self.log_unit[i], dtype=self.log_dtype[i])) if self.comment: self._data_file.add_comment(self.comment) if self.qviewkit_singleInstance and self.open_qviewkit and self._qvk_process: self._qvk_process.terminate() # terminate an old qviewkit instance
def _prepare_measurement_file(self): ''' creates the output .h5-file with distinct dataset structures for each measurement type. at this point all measurement parameters are known and put in the output file ''' self._data_file = hdf.Data(name=self._file_name, mode='a') self._measurement_object.uuid = self._data_file._uuid self._measurement_object.hdf_relpath = self._data_file._relpath self._measurement_object.instruments = qkit.instruments.get_instrument_names( ) self._measurement_object.save() self._mo = self._data_file.add_textlist('measurement') self._mo.append(self._measurement_object.get_JSON()) # write logfile and instrument settings self._write_settings_dataset() self._log = waf.open_log_file(self._data_file.get_filepath()) if not self._scan_time: self._data_freq = self._data_file.add_coordinate('frequency', unit='Hz') self._data_freq.add(self._freqpoints) if self._scan_1D: self._data_real = self._data_file.add_value_vector( 'real', x=self._data_freq, unit='', save_timestamp=True) self._data_imag = self._data_file.add_value_vector( 'imag', x=self._data_freq, unit='', save_timestamp=True) self._data_amp = self._data_file.add_value_vector( 'amplitude', x=self._data_freq, unit='arb. unit', save_timestamp=True) self._data_pha = self._data_file.add_value_vector( 'phase', x=self._data_freq, unit='rad', save_timestamp=True) if self._scan_2D: self._data_x = self._data_file.add_coordinate(self.x_coordname, unit=self.x_unit) self._data_x.add(self.x_vec) self._data_amp = self._data_file.add_value_matrix( 'amplitude', x=self._data_x, y=self._data_freq, unit='arb. unit', save_timestamp=True) self._data_pha = self._data_file.add_value_matrix( 'phase', x=self._data_x, y=self._data_freq, unit='rad', save_timestamp=True) if self.log_function != None: #use logging self._log_value = [] for i in range(len(self.log_function)): self._log_value.append( self._data_file.add_value_vector( self.log_name[i], x=self._data_x, unit=self.log_unit[i], dtype=self.log_dtype[i])) if self._nop < 10: """creates view: plot middle point vs x-parameter, for qubit measurements""" self._data_amp_mid = self._data_file.add_value_vector( 'amplitude_midpoint', unit='arb. unit', x=self._data_x, save_timestamp=True) self._data_pha_mid = self._data_file.add_value_vector( 'phase_midpoint', unit='rad', x=self._data_x, save_timestamp=True) #self._view = self._data_file.add_view("amplitude vs. " + self.x_coordname, x = self._data_x, y = self._data_amp[self._nop/2]) if self._scan_3D: self._data_x = self._data_file.add_coordinate(self.x_coordname, unit=self.x_unit) self._data_x.add(self.x_vec) self._data_y = self._data_file.add_coordinate(self.y_coordname, unit=self.y_unit) self._data_y.add(self.y_vec) self._data_amp = self._data_file.add_value_box( 'amplitude', x=self._data_x, y=self._data_y, z=self._data_freq, unit='arb. unit', save_timestamp=False) self._data_pha = self._data_file.add_value_box( 'phase', x=self._data_x, y=self._data_y, z=self._data_freq, unit='rad', save_timestamp=False) if self.log_function != None: #use logging self._log_value = [] for i in range(len(self.log_function)): self._log_value.append( self._data_file.add_value_vector( self.log_name[i], x=self._data_x, unit=self.log_unit[i], dtype=self.log_dtype[i])) if self._scan_time: self._data_freq = self._data_file.add_coordinate('frequency', unit='Hz') self._data_freq.add([self.vna.get_centerfreq()]) self._data_time = self._data_file.add_coordinate('time', unit='s') self._data_time.add( np.arange(0, self._nop, 1) * self.vna.get_sweeptime() / (self._nop - 1)) self._data_x = self._data_file.add_coordinate('trace_number', unit='') self._data_x.add(np.arange(0, self.number_of_timetraces, 1)) self._data_amp = self._data_file.add_value_matrix( 'amplitude', x=self._data_x, y=self._data_time, unit='lin. mag.', save_timestamp=False) self._data_pha = self._data_file.add_value_matrix( 'phase', x=self._data_x, y=self._data_time, unit='rad.', save_timestamp=False) if self.comment: self._data_file.add_comment(self.comment) if self.qviewkit_singleInstance and self.open_qviewkit and self._qvk_process: self._qvk_process.terminate() #terminate an old qviewkit instance