def write_experiment_runs_to_text_files_conv_vs_restless(starttime,endtime,maxlength,filename_gateseq,number_of_gateseq): """ Function for writing experiment runs to text files. At the moment only works for an experiment which is repeated for same parameters several times in time. It also assumes that one starts with conventional tuning, then restless etc. It's specialised for only experiment and is more of a One-fit. ---------------------------------------------------------------------------------------- Parameters: starttime:string Formatted in the standard form as used in PycQED_py3, in the form of e.g. 20160614_000000. endtime:string Formatted in the standard form as used in PycQED_py3, in the form of e.g.20160615_235959 maxlength: integer Maximum length of germ power sequence filename_gateseq: string Text file containing the list of experiments/gatesequences number_of_gateseq: the number of gatesequences in each experiment """ from pycqed.analysis import measurement_analysis as ma from pycqed.analysis import analysis_toolbox as a_tools import measurement.pulse_sequences.gate_set_tomography as _gst from importlib import reload import h5py reload(_gst) experiment_timestamp_strings = a_tools.get_timestamps_in_range(starttime, endtime, label = 'GST') file_path_names = [] for i in range(len(experiment_timestamp_strings)): file_path_names.append(a_tools.data_from_time(experiment_timestamp_strings[i])) file_path_names_conventional = file_path_names[0::2] file_path_names_restless = file_path_names[1::2] for i in range(len(file_path_names_conventional)): fn = file_path_names_conventional[i] suffix = fn[len(a_tools.datadir)+10:] filename_input = file_path_names_conventional[i]+'\\'+suffix+'.hdf5' filename_output = 'GST_data_paper_L%s_conv_%s_run%s_17june' %(maxlength,suffix, i) +'.txt' _gst.write_textfile_data_for_GST_input_adapt_develop(filename_input, filename_output,filename_gateseq, number_of_gateseq,zero_one_inverted = 'automatic') for i in range(len(file_path_names_restless)): fn = file_path_names_restless[i] suffix = fn[len(a_tools.datadir)+10:] filename_input = file_path_names_restless[i]+'\\'+suffix+'.hdf5' filename_output = 'GST_data_paper_L%s_rest_%s_run%s_17june' %(maxlength,suffix, i) +'.txt' _gst.write_textfile_data_for_GST_input_adapt_develop(filename_input, filename_output,filename_gateseq, number_of_gateseq,zero_one_inverted = 'automatic')
def _fill_experimental_values_with_ReparkingRamsey(experimental_values, timestamp, qubit): """ Fills the experimental_values dictionary with the experimental values from ReparkingRamsey experiments. NOTE if there are multiple measurements of the same transition at the same voltage, the (chronological) last one will be used. Arguments: experimental_values: dictionary to fill with experimental data timestamp: timestamp qubit: qubit (qubit object) or qubit name (str) """ path = a_tools.data_from_time(timestamp) filepath = a_tools.measurement_filename(path) data = h5py.File(filepath, "r") if "_ge_" in filepath: transition = "ge" elif "_ef_" in filepath: transition = "ef" else: log.warning( "Transition not recognized. Only ge and ef transitions " "are supported.") return if type(qubit) is str: qubit_name = qubit else: qubit_name = qubit.name if not qubit_name in filepath: return try: dc_voltages = np.array(data["Experimental Data"] ["Experimental Metadata"]["dc_voltages"]) for i in range(len(dc_voltages)): freq = float( np.array(data["Analysis"]["Processed data"] ["analysis_params_dict"][ qubit_name + f"_" f"{i}"]["exp_decay"].attrs["new_qb_freq"])) freq_std = float( np.array(data["Analysis"]["Processed data"] ["analysis_params_dict"][ qubit_name + f"_" f"{i}"]["exp_decay"].attrs["new_qb_freq"])) voltage = dc_voltages[i] HamiltonianFittingAnalysis._fill_experimental_values( experimental_values, voltage, transition, freq) except: log.warning( "Could not get reparking data from file {}".format(filepath))
def test_save_quantities_of_interest(self): # Test based on test below to get a dummy dataset ts = '20161124_162604' a = ba.BaseDataAnalysis() a.proc_data_dict['quantities_of_interest'] = {'a': 5} a.timestamps = [ts] a.save_quantities_of_interest() fn = a_tools.measurement_filename(a_tools.data_from_time(ts)) with h5py.File(fn, 'r') as file: saved_val = float(file['Analysis']['quantities_of_interest'].attrs['a']) assert saved_val == 5
def _fill_experimental_values_with_Ramsey(experimental_values, timestamp, qubit, fluxlines_dict): """ Fills the experimental_values dictionary with the experimental values from Ramsey experiments. NOTE if there are multiple measurements of the same transition at the same voltage, the (chronological) last one will be used. Arguments: experimental_values: dictionary containing the experimental values timestamp: timestamp qubit: qubit (qubit object) or qubit name (str) fluxlines_dict: dictionary containing the fluxline ids (necessary to determine voltage) """ path = a_tools.data_from_time(timestamp) filepath = a_tools.measurement_filename(path) data = h5py.File(filepath, "r") if "_ge_" in filepath: transition = "ge" elif "_ef_" in filepath: transition = "ef" else: raise ValueError("Transition not recognized. Only ge and ef " "transitions are supported.") if type(qubit) is str: qubit_name = qubit else: qubit_name = qubit.name if not qubit_name in filepath: return try: freq = data["Analysis"]["Processed data"]["analysis_params_dict"][ qubit_name]["exp_decay"].attrs["new_qb_freq"] freq_std = data["Analysis"]["Processed data"][ "analysis_params_dict"][qubit_name]["exp_decay"].attrs[ "new_qb_freq_stderr"] voltage = float(data["Instrument settings"]["DCSource"].attrs[ fluxlines_dict[qubit_name].name]) HamiltonianFittingAnalysis._fill_experimental_values( experimental_values, voltage, transition, freq) except KeyError: log.warning(f"Could not get ramsey data from file {filepath}")
def test_save_fit_results(self): # strictly speaking an integration test as it relies on the cond # oscillation analysis, but the only thing tested here is # if the value of the fit_result is saved. ts = '20181126_131143' a = ma2.Conditional_Oscillation_Analysis(t_start=ts) exp_val = a.fit_res['cos_fit_off'].params['amplitude'].value fn = a_tools.measurement_filename(a_tools.data_from_time(ts)) with h5py.File(fn, 'r') as file: saved_val = float(file['Analysis']['cos_fit_off']['params'] ['amplitude'].attrs['value']) a.fit_res = {} a.save_fit_results() assert exp_val == saved_val
def get_experimental_values_from_timestamps(qubit, fluxlines_dict, timestamps, **kw): """ Gets the experimental values from the database from a list of timestamps and returns them in the usual format. Arguments: timestamps: timestamps qubit: qubit object or qubit name (str) transition: transition Keyword Arguments: include_reparkings: Boolean to include reparkings in the datadir: path to the directory containing the desired data. """ experimental_values = {} # datadirectory default_datadir = a_tools.datadir a_tools.datadir = kw.get("datadir", default_datadir) include_reparkings = kw.get("include_reparkings", False) for timestamp in timestamps: path = a_tools.data_from_time(timestamp) if "_Ramsey_" in path: if fluxlines_dict is None: raise ValueError( "fluxlines_dict must be specified for to" "read the experimental values from Ramsey experiments") HamiltonianFittingAnalysis._fill_experimental_values_with_Ramsey( experimental_values, timestamp, qubit, fluxlines_dict) elif ("_ReparkingRamsey_" in path) and include_reparkings: HamiltonianFittingAnalysis._fill_experimental_values_with_ReparkingRamsey( experimental_values, timestamp, qubit) a_tools.datadir = default_datadir return experimental_values
def plot(self, sequences=0, segments=0, qubits=None, save=False, legend=True, **plot_kwargs): """ Plots (a subset of) sequences / segments of the QuantumExperiment :param sequences (int, list, "all"): sequences to plot. Can be "all" (plot all sequences), an integer (index of sequence to plot), or a list of integers/str. If strings are in the list, then plots only sequences with the corresponding name. :param segments (int, list, "all"): Segments to be plotted. If a single index i is provided, then the ith segment will be plot- ted for each sequence in `sequences`. Otherwise a list of list of indices must be provided: the outer list corresponds to each sequence and the inner list to the indices of the segments to plot. E.g. segments=[[0,1],[3]] will plot segment 0 and 1 of sequence 0 and segment 3 of sequence 1. If the string 'all' is provided, then all segments are plotted. Plots segment 0 by default. :param qubits (list): list of qubits to plot. Defaults to self.meas_objs. Qubits can be specified as qubit names or qubit objects. :param save (bool): whether or not to save the figures in the measurement folder. :param legend (bool): whether or not to show the legend. :param plot_kwargs: kwargs passed on to segment.plot(). By default, channel_map is taken from dev.get_channel_map(qubits) if available. :return: """ plot_kwargs = deepcopy(plot_kwargs) if sequences == "all": # plot all sequences sequences = self.sequences # if the provided sequence is not it a list or tuple, make it a list if np.ndim(sequences) == 0: sequences = [sequences] # get sequence objects from sequence name or index sequences = np.ravel([[s for i, s in enumerate(self.sequences) if i == ind or s.name == ind] for ind in sequences]) if qubits is None: qubits = self.meas_objs qubits, _ = self.get_qubits(qubits) # get qubit objects default_ch_map = \ self.dev.get_channel_map(qubits) if self.dev is not None else \ {qb.name: qb.get_channels() for qb in qubits} plot_kwargs.update(dict(channel_map=plot_kwargs.pop('channel_map', default_ch_map))) plot_kwargs.update(dict(legend=legend)) if segments == "all": # plot all segments segments = [range(len(seq.segments)) for seq in sequences] elif isinstance(segments, int): # single segment from index segments = [[segments] for _ in sequences] figs_and_axs = [] for seq, segs in zip(sequences, segments): for s in segs: s = list(seq.segments.keys())[s] if save: try: from pycqed.analysis import analysis_toolbox as a_tools folder = a_tools.data_from_time(self.timestamp, folder=self.MC.datadir(), auto_fetch=False) except: log.warning('Could not determine folder of current ' 'experiment. Sequence plot will be saved in ' 'current directory.') folder = "." import os save_path = os.path.join(folder, "_".join((seq.name, s)) + ".png") save_kwargs = dict(fname=save_path, bbox_inches="tight") plot_kwargs.update(dict(save_kwargs=save_kwargs, savefig=True)) figs_and_axs.append(seq.segments[s].plot(**plot_kwargs)) # avoid returning a list of Nones (if show_and_close is True) return [v for v in figs_and_axs if v is not None] or None