def extract_data(self): """ [2020-07-15] data extraction style copied from `multiplexed_readout_analysis` This is a new style (sept 2019) data extraction. This could at some point move to a higher level class. """ self.get_timestamps() self.timestamp = self.timestamps[0] data_fp = get_datafilepath_from_timestamp(self.timestamp) param_spec = { "data": ("Experimental Data/Data", "dset"), "value_units": ("Experimental Data", "attr:value_units"), "value_names": ("Experimental Data", "attr:value_names"), } rdd = self.raw_data_dict = hd5.extract_pars_from_datafile( data_fp, param_spec) # Convert to proper python types and not bytes rdd["value_names"] = np.array(rdd["value_names"], dtype=str) rdd["value_units"] = np.array(rdd["value_units"], dtype=str) # Parts added to be compatible with base analysis data requirements rdd["folder"] = os.path.dirname(data_fp) rdd["timestamps"] = self.timestamps rdd["measurementstring"] = rdd["folder"]
def extract_data(self): """ This is a new style (sept 2019) data extraction. This could at some point move to a higher level class. """ self.get_timestamps() self.timestamp = self.timestamps[0] data_fp = get_datafilepath_from_timestamp(self.timestamp) param_spec = { 'data': ('Experimental Data/Data', 'dset'), 'combinations': ('Experimental Data/Experimental Metadata/combinations', 'dset'), 'value_names': ('Experimental Data', 'attr:value_names') } self.raw_data_dict = h5d.extract_pars_from_datafile( data_fp, param_spec) # For some reason the list is stored a list of length 1 arrays... self.raw_data_dict['combinations'] = [ c[0] for c in self.raw_data_dict['combinations'] ] # Parts added to be compatible with base analysis data requirements self.raw_data_dict['timestamps'] = self.timestamps self.raw_data_dict['folder'] = os.path.split(data_fp)[0]
def extract_data(self): self.raw_data_dict = {} self.timestamps = a_tools.get_timestamps_in_range(self.t_start, self.t_start, label=self.labels) self.raw_data_dict['timestamps'] = self.timestamps data_fp = a_tools.get_datafilepath_from_timestamp(self.timestamps[0]) param_spec = {'data': ('Experimental Data/Data', 'dset')} data = h5d.extract_pars_from_datafile(data_fp, param_spec) self.raw_data_dict['number_flips'] = data['data'][:, 0] for i, q in enumerate(self.qubits): self.raw_data_dict['{}_data'.format(q)] = data['data'][:, i + 1] self.raw_data_dict['folder'] = os.path.dirname(data_fp)
def test_extract_pars_from_datafile(): param_spec = {'T1': ('Analysis/Fitted Params F|1>/tau', 'attr:value'), 'uT1': ('Analysis/Fitted Params F|1>/tau', 'attr:stderr'), 'data': ('Experimental Data/Data', 'dset'), 'timestamp': ('MC settings/begintime', 'dset')} fp = a_tools.get_datafilepath_from_timestamp('20190807_000109') extract_pars_dict = h5d.extract_pars_from_datafile(fp, param_spec) assert extract_pars_dict['T1'] == approx(3.385325226491e-05) assert extract_pars_dict['uT1'] == approx(4.5737638947423746e-07) assert extract_pars_dict['timestamp'].T[0] == approx( [2.019e+03, 8.000e+00, 7.000e+00, 0.000e+00, 1.000e+00, 9.000e+00, 2.000e+00, 2.190e+02, 1.000e+00]) assert extract_pars_dict['data'][:, 0] == approx( np.arange(0, 165.001e-6, 3.75e-6))
def extract_data(self): """ This is a new style (sept 2019) data extraction. This could at some point move to a higher level class. """ self.get_timestamps() self.timestamp = self.timestamps[0] data_fp = get_datafilepath_from_timestamp(self.timestamp) param_spec = { 'data': ('Experimental Data/Data', 'dset'), 'value_names': ('Experimental Data', 'attr:value_names') } self.raw_data_dict = h5d.extract_pars_from_datafile( data_fp, param_spec) # Parts added to be compatible with base analysis data requirements self.raw_data_dict['timestamps'] = self.timestamps self.raw_data_dict['folder'] = os.path.split(data_fp)[0]
def extract_data(self): self.raw_data_dict = {} data_fp = get_datafilepath_from_timestamp(self.t_start) for qubit in self.qubit_list: self.raw_data_dict[qubit] = {} param_dict = {} for param in self.parameter_list: param_spec = {'{}'.format(param): ( 'Instrument settings/{}'.format(qubit), 'attr:{}'.format(param))} param_dict[param] = list(h5d.extract_pars_from_datafile( data_fp, param_spec).values())[0] self.raw_data_dict[qubit] = param_dict for key in param_dict.keys(): if param_dict[key] == 'None' or param_dict[key] == '0': param_dict[key] = np.NaN param_dict['F_RB'] = 1-float(param_dict['F_RB']) param_dict['F_RB'] = str(param_dict['F_RB']) param_dict['F_ssro'] = 1-float(param_dict['F_ssro']) param_dict['F_ssro'] = str(param_dict['F_ssro']) # for fgate in param_dict['F_RB'`]: # param_dict[F_RB] # Two qubit gates dic in pairs self.raw_data_dict_2Q = {} for pair in self.pairs: self.raw_data_dict_2Q[pair] = {} param_dict_2Q = {} for param_2Q in self.parameter_list_2Q: param_spec = {'{}'.format(param_2Q): ( 'Instrument settings/device', 'attr:{}'.format(param_2Q))} param_dict_2Q[param_2Q] = list(h5d.extract_pars_from_datafile( data_fp, param_spec).values())[0] self.raw_data_dict_2Q[pair] = param_dict_2Q # create a dic for each Qb # convert from dic to pd data frame self.raw_data_frame = pd.DataFrame.from_dict(self.raw_data_dict).T self.raw_data_frame_2Q = pd.DataFrame.from_dict( self.raw_data_dict_2Q).T # Parts added to be compatible with base analysis data requirements self.raw_data_dict['timestamps'] = self.t_start self.raw_data_dict['folder'] = os.path.split(data_fp)[0]
def extract_data(self): """ Extract pauli terms from multiple hd5 files. """ self.raw_data_dict = {} self.timestamps = a_tools.get_timestamps_in_range(self.t_start, self.t_stop, label=self.labels) for ts in self.timestamps: data_fp = get_datafilepath_from_timestamp(ts) param_spec = { 'TFD_dict': ('Analysis/quantities_of_interest', 'attr:all_attr'), 'tomo_dict': ('Analysis/quantities_of_interest/full_tomo_dict', 'attr:all_attr') } self.raw_data_dict[ts] = h5d.extract_pars_from_datafile( data_fp, param_spec) # Parts added to be compatible with base analysis data requirements self.raw_data_dict['timestamps'] = self.timestamps self.raw_data_dict['folder'] = os.path.split(data_fp)[0]
def extract_data(self): self.raw_data_dict = {} self.raw_data_dict['artificial_detuning'] = self.artificial_detuning self.timestamps = a_tools.get_timestamps_in_range(self.t_start, self.t_start, label=self.labels) self.raw_data_dict['timestamps'] = self.timestamps data_fp = a_tools.get_datafilepath_from_timestamp(self.timestamps[0]) param_spec = {'data': ('Experimental Data/Data', 'dset')} data = h5d.extract_pars_from_datafile(data_fp, param_spec) self.raw_data_dict['points'] = data['data'][:, 0] for i, q in enumerate(self.qubits): self.raw_data_dict['{}_data'.format(q)] = data['data'][:, i + 1] self.raw_data_dict['{}_times'.format(q)] = self.times[i] param_spec_old_freq = { '{}_freq_old'.format(q): ('Instrument settings/{}'.format(q), 'attr:freq_qubit') } old_freq = h5d.extract_pars_from_datafile(data_fp, param_spec_old_freq) self.raw_data_dict['{}_freq_old'.format(q)] = float( old_freq['{}_freq_old'.format(q)]) self.raw_data_dict['folder'] = os.path.dirname(data_fp)
def test_get_datafilepath_from_timestamp_raises_no_data(): timestamp = '20170412_183929' with pytest.raises(ValueError): a_tools.get_datafilepath_from_timestamp(timestamp)
def test_get_datafilepath_from_timestamp(): timestamp = '20170412_183928' data_fp = a_tools.get_datafilepath_from_timestamp(timestamp) print(data_fp) assert data_fp == os.path.join(datadir, '20170412', '183928_Rabi-n1_q9', '183928_Rabi-n1_q9.hdf5')
def prepare_learner_data_for_restore(timestamp: str, value_names: set = None): """ NB: Telling the learner about two many points takes a significant time. Beyond 1000 pnts expect several minutes Args: tell_multivariate_image: (bool) usually we only give the learner a scalar value (e.g. cost func), use this if you want to give it more Usage example: [1]: import adaptive adaptive.notebook_extension(_inline_js=False) from pycqed.utilities.learners_utils import (tell_X_Y, prepare_learner_data_for_restore) ts = "20200219_194452" dummy_f = lambda X, Y: 0. dict_for_learner = prepare_learner_data_for_restore(ts, value_names={"Cost func"}) learner = adaptive.Learner2D(dummy_f, bounds=dict_for_learner["bounds"]) X=dict_for_learner["X"] Y=dict_for_learner["Y"] tell_X_Y(learner, X, Y) def plot(l): plot = l.plot(tri_alpha=1.) return (plot + plot.Image + plot.EdgePaths).cols(2) [2]: %%opts Overlay [height=500 width=700] a_plot = plot(learner) a_plot """ value_names_label = "value_names" sweep_parameter_names = "sweep_parameter_names" data_fp = a_tools.get_datafilepath_from_timestamp(timestamp) param_spec = { "data": ("Experimental Data/Data", "dset"), value_names_label: ("Experimental Data", "attr:" + value_names_label), sweep_parameter_names: ("Experimental Data", "attr:" + sweep_parameter_names), } raw_data_dict = h5d.extract_pars_from_datafile(data_fp, param_spec) # This should have been done in the `extract_pars_from_datafile`... raw_data_dict[value_names_label] = np.array( raw_data_dict[value_names_label], dtype=str) dim_domain = len(raw_data_dict[sweep_parameter_names]) data_T = raw_data_dict["data"].T if dim_domain == 1: X = data_T[0][0] bounds = (np.min(X), np.max(X)) else: X = data_T[:dim_domain] bounds = [(np.min(Xi), np.max(Xi)) for Xi in X] X = X.T # Shaping for the learner if value_names is not None: img_idxs = np.where([ name in value_names for name in raw_data_dict[value_names_label] ])[0] Y = data_T[dim_domain + img_idxs] else: Y = data_T[dim_domain:] Y = Y[0] if len(Y) == 1 else Y.T return {"raw_data_dict": raw_data_dict, "bounds": bounds, "X": X, "Y": Y}