def get_all(self):
     timestamp = a_tools.latest_data(contains='Define_vector_v1',
                                     return_timestamp=True)[0]
     # Note: if this fails during the initialization just copy an old
     # folder with that name (and 0 field) into your data directory.
     # Don't make a try-except statement here to prevent unintentional
     # and wrong definitions of the vectors
     params_dict = {'v1': 'Magnet.v1'}
     data = a_tools.get_data_from_timestamp_list([timestamp],
                                                 params_dict,
                                                 filter_no_analysis=False)
     v1 = np.fromstring(data['v1'][0][1:-1], dtype=float, sep=' ')
     self.v1(v1)
     timestamp = a_tools.latest_data(contains='Define_vector_v2',
                                     return_timestamp=True)[0]
     params_dict = {'v2': 'Magnet.v2'}
     data = a_tools.get_data_from_timestamp_list([timestamp],
                                                 params_dict,
                                                 filter_no_analysis=False)
     v2 = np.fromstring(data['v2'][0][1:-1], dtype=float, sep=' ')
     self.switch_state()
     self.v2(v2)
     self.field_vector()
     self.field()
     self.angle()
     self.field_steps()
Exemple #2
0
    def get_angle(self):
        # return 0.0 # Only add this line when doing the first initialization!
        if self.switch_state_z() == 'SuperConducting':
            ## get the persistent field from the HDF5 file

            timestamp = atools.latest_data(
                contains='Switch_Z_is_changed_to_SuperConducting_state',
                return_timestamp=True)[0]
            params_dict = {'field_z': 'Magnet.field_z'}
            numeric_params = ['field_z']
            data = a_tools.get_data_from_timestamp_list(
                [timestamp],
                params_dict,
                numeric_params=numeric_params,
                filter_no_analysis=False)
            field_val_z = data['field_z'][0]
        else:  ## Normal conducting
            field_val_z = self.measure_field_z()

        if self.switch_state_y() == 'SuperConducting':
            ## get the persistent field from the HDF5 file

            timestamp = atools.latest_data(
                contains='Switch_Y_is_changed_to_SuperConducting_state',
                return_timestamp=True)[0]
            params_dict = {'field_y': 'Magnet.field_y'}
            numeric_params = ['field_y']
            data = a_tools.get_data_from_timestamp_list(
                [timestamp],
                params_dict,
                numeric_params=numeric_params,
                filter_no_analysis=False)
            field_val_y = data['field_y'][0]
        else:  ## Normal conducting
            field_val_y = self.measure_field_y()
        field = np.sqrt(field_val_z**2 + field_val_y**2)
        if field == 0:
            return 0
        elif field_val_z >= 0:
            return np.arcsin(field_val_y / field) * 360. / (2 * np.pi)
        else:
            return -np.arcsin(field_val_y / field) * 360. / (2 * np.pi)
Exemple #3
0
    def check_job(self):
        """
        Checks whether new jobs have been found and processes them
        Returns: True if a job was found and processed (done or failed),
            False otherwise.

        """
        try:
            timestamps, folders = a_tools.latest_data(newer_than=self.last_ts,
                                                      raise_exc=False,
                                                      return_timestamp=True,
                                                      n_matches=1000)
        except ValueError as e:
            return  # could not find any timestamps matching criteria
        log.info(f"Searching jobs in: {timestamps[0]} ... {timestamps[-1]}.")
        found_jobs = False
        for folder, ts in zip(folders, timestamps):
            jobs_in_folder = []
            for file in os.listdir(folder):
                if file.endswith(".job"):
                    jobs_in_folder.append(os.path.join(folder, file))
            if len(jobs_in_folder) > 0:
                log.info(f"Found {len(jobs_in_folder)} job(s) in {ts}")
                found_jobs = True

            for filename in jobs_in_folder:
                if os.path.isfile(filename):
                    time.sleep(
                        1)  # wait to make sure that the file is fully written

                    job = self.read_job(filename)
                    errl = len(self.job_errs)
                    os.rename(filename, filename + '.running')
                    self.run_job(job)
                    time.sleep(1)  # wait to make sure that files are written
                    t = self.get_datetimestamp()
                    if os.path.isfile(filename):
                        os.rename(filename, f"{filename}_{t}.loop_detected")
                        log.warning(f'A loop was detected! Job {filename} '
                                    f'tries to delegate plotting.')
                    if errl == len(self.job_errs):
                        os.rename(filename + '.running',
                                  f"{filename}_{t}.done")
                    else:
                        new_filename = f"{filename}_{t}.failed"
                        os.rename(filename + '.running', new_filename)
                        new_errors = self.job_errs[errl:]
                        self.write_to_job(new_filename, new_errors)
                    self.last_ts = ts
        if not found_jobs:
            log.info(f"No new job found.")
            return False
        else:
            return True
Exemple #4
0
    def start(self):
        """
        Starts the AnalysisDaemon
        Returns:

        """
        self.last_ts = a_tools.latest_data(older_than=self.t_start,
                                           return_path=False,
                                           return_timestamp=True,
                                           n_matches=1)[0]
        self.run()
Exemple #5
0
    def __init__(self, feedline=None, qubit_list: list = None,
                 t_start: str = None, metric: str = None, label: str = '',
                 options_dict: dict = None, parameter_list=None,
                 pairs: list = None, parameter_list_2Q: list = None, auto=True):

        super().__init__(t_start=t_start,
                         label=label,
                         options_dict=options_dict)
        if qubit_list is None and pairs is None:
            if feedline == '1':
                qubit_list = ['D1', 'Z1', 'X', 'D3', 'D4']
                pairs = [('D1', 'Z1'), ('Z1', 'D3'), ('X', 'D3'), ('D1', 'X'),
                         ('X', 'D4')]
            elif feedline == '2':
                qubit_list = ['D2', 'Z2']
                pairs = [('D2', 'Z2')]
                # in case feedline 2
            elif feedline == 'both':
                qubit_list = ['D1', 'D2', 'Z1', 'X', 'Z2', 'D3', 'D4']
                pairs = [('D1', 'Z1'), ('Z1', 'D3'), ('X', 'D3'), ('D1', 'X'),
                         ('X', 'D4'), ('Z2', 'D4'), ('D2', 'Z2'), ('D2', 'X')]
                # Both feedlines
            else:
                raise KeyError
        else:
            raise KeyError

        if t_start is None:
            t_start = a_tools.latest_data(return_timestamp=True)[0]

        self.qubit_list = qubit_list
        self.pairs = pairs
        self.feedline = feedline  # as for GBT we work/feedline
        self.t_start = t_start

        if parameter_list is None:
            # params you want to report. All taken from the qubit object.
            self.parameter_list = ['freq_res', 'freq_qubit',
                                   'anharmonicity', 'fl_dc_I0', 'T1',
                                   'T2_echo', 'T2_star', 'F_RB', 'F_ssro',
                                   'F_discr', 'ro_rel_events', 'ro_res_ext']
        if parameter_list_2Q is None:
            # params you want to report. All taken from the device object.
            self.parameter_list_2Q = ['ro_lo_freq', 'ro_pow_LO']
        if auto:
            self.run_analysis()
Exemple #6
0
    def get_field_y(self):
        # return 0.0 # Only add this line when doing the first initialization!
        if self.switch_state_y() == 'SuperConducting':
            ## get the persistent field from the HDF5 file

            timestamp = atools.latest_data(
                contains='Switch_Y_is_changed_to_SuperConducting_state',
                return_timestamp=True)[0]
            params_dict = {'field': 'Magnet.field_y'}
            numeric_params = ['field']
            data = a_tools.get_data_from_timestamp_list(
                [timestamp],
                params_dict,
                numeric_params=numeric_params,
                filter_no_analysis=False)
            return data['field'][0]
        else:  ## Normal conducting
            meas_field = self.measure_field_y()
            return meas_field
Exemple #7
0
def plot_and_save_cz_amp_sweep(cphases, soft_sweep_params_dict, fit_res,
                               qbc_name, qbt_name, save_fig=True, show=True,
                               plot_guess=False, timestamp=None):

    sweep_param_name = list(soft_sweep_params_dict)[0]
    sweep_points = soft_sweep_params_dict[sweep_param_name]['values']
    unit = soft_sweep_params_dict[sweep_param_name]['unit']
    best_val = fit_res.model.func(np.pi, **fit_res.best_values)
    fit_points_init = fit_res.model.func(cphases, **fit_res.init_values)
    fit_points = fit_res.model.func(cphases, **fit_res.best_values)

    fig, ax = plt.subplots()
    ax.plot(cphases*180/np.pi, sweep_points, 'o-')
    ax.plot(cphases*180/np.pi, fit_points, '-r')
    if plot_guess:
        ax.plot(cphases*180/np.pi, fit_points_init, '--k')
    ax.hlines(best_val, cphases[0]*180/np.pi, cphases[-1]*180/np.pi)
    ax.vlines(180, sweep_points.min(), sweep_points.max())
    ax.set_ylabel('Flux pulse {} ({})'.format(sweep_param_name, unit))
    ax.set_xlabel('Conditional phase (rad)')
    ax.set_title('CZ {}-{}'.format(qbc_name, qbt_name))

    ax.text(0.5, 0.95, 'Best {} = {:.6f} ({})'.format(
        sweep_param_name, best_val*1e9 if unit=='s' else best_val, unit),
            horizontalalignment='center', verticalalignment='top',
            transform=ax.transAxes)
    if save_fig:
        import datetime
        import os
        fig_title = 'CPhase_amp_sweep_{}_{}'.format(qbc_name, qbt_name)
        fig_title = '{}--{:%Y%m%d_%H%M%S}'.format(
            fig_title, datetime.datetime.now())
        if timestamp is None:
            save_folder = a_tools.latest_data()
        else:
            save_folder = a_tools.get_folder(timestamp)
        filename = os.path.abspath(os.path.join(save_folder, fig_title+'.png'))
        fig.savefig(filename, bbox_inches='tight')
    if show:
        plt.show()
Exemple #8
0
def get_timestamps(data_dict=None,
                   t_start=None,
                   t_stop=None,
                   label='',
                   data_file_path=None,
                   **params):
    """
    Get timestamps (YYYYMMDD_hhmmss) of HDF files from a specified location.

    Args:
        data_dict (dict or OrderedDict): the extracted timestamps will be
            stored here
        t_start (str): timestamp of the form YYYYMMDD_hhmmss. This timestamp
            is returned if t_stop is None, and otherwise it is the first
            timestamp of the range [t_start, t_stop]
        t_stop (str): timestamp of the form YYYYMMDD_hhmmss. The last timestamp
            to be extracted, starting at t_start
        label (str): if specified, only those timestamps are returned for which
            this label is contained in the filename
        data_file_path (str): full path to a datafile for which the timestamp
            will be returned

    Keyword args (**params)
        passed to analysis_tools.py/latest_data and
            analysis_tools.py/get_timestamps_in_range

    Returns
        data dict containing the timestamps
    """
    # If I put data_dict = OrderedDict() in the input params, somehow this
    # function sees the data_dict I have in my notebook. How???
    if data_dict is None:
        data_dict = OrderedDict()

    timestamps = None
    if data_file_path is None:
        if t_start is None:
            if isinstance(label, list):
                timestamps = [
                    a_tools.latest_data(contains=l,
                                        return_timestamp=True,
                                        **params)[0] for l in label
                ]
            else:
                timestamps = [
                    a_tools.latest_data(contains=label,
                                        return_timestamp=True,
                                        **params)[0]
                ]
        elif t_stop is None:
            if isinstance(t_start, list):
                timestamps = t_start
            else:
                timestamps = [t_start]
        else:
            timestamps = a_tools.get_timestamps_in_range(
                t_start,
                timestamp_end=t_stop,
                label=label if label != '' else None,
                **params)

    if timestamps is None or len(timestamps) == 0:
        raise ValueError('No data file found.')

    data_dict['timestamps'] = timestamps
    return data_dict
 def update(self, force=False):
     if self.autoloadCheckbox.isChecked() or force:
         self.filename(a_tools.latest_data())
         self.filename(self._next_file())
 def _prev_folder(self):
     ts = self._current_timestamp()
     if ts is not None:
         return a_tools.latest_data(older_than=self._current_timestamp())
     else:
         return None