def gaussian_fit(da, guess=(1, 0, 1), plot=False, title=None):
    def gaussian(x, a, x0, sigma):
        return a * _np.exp(-0.5 * ((x - x0) / sigma)**2)
#
# 	def log_gaussian(x, a, x0, sigma):
# 		return _np.log10(gaussian(x, a, x0, sigma))
#
# 	if apply_log is True:
# 		fit_func = log_gaussian
# 		da = _np.log10(da.copy())
# 	else:

    fit_func = gaussian

    x = da.coords[da.dims[0]].values.copy()
    y = da.values.copy()
    y_max = y.max()

    fit_params, _ = _curve_fit(
        fit_func, xdata=x, ydata=y / y_max,
        p0=guess)  #, bounds=([0, -_np.inf, 0], [_np.inf, _np.inf, _np.inf]))
    fit_params[0] *= y_max
    x_fit = _np.linspace(x.min(), x.max(), 100)
    y_fit = _xr.DataArray(gaussian(x_fit, *fit_params),
                          dims=da.dims,
                          coords=[x_fit])

    if plot is True:
        fig, ax = _plt.subplots()
        y_fit.plot(ax=ax, label='Fit')
        da.plot(ax=ax, label='Raw', linestyle='', marker='x')
        ax.set_title(title + '\n' + str(fit_params))
        ax.legend()

    return fit_params
Exemple #2
0
def rb_decay_rate(dataset,showPlot=False,xlim=None,ylim=None,saveFigPath=None):
    """
    Compute the Randomized Benchmarking (RB) decay rate given an data set
    containing counts for RB gate strings.  Note: currently this function
    only works for 1-qubit dataset having SPAM labels 'plus' and 'minus'.

    Parameters
    ----------
    dataset : DataSet
      The RB data set.

    showPlot : bool, optional
       Whether to show a plot of the fit to the RB data.

    xlim : (xmin,xmax), optional
       Specify x-axis limits for plot

    ylim : (ymin,ymax), optional
       Specify y-axis limits for plot

    saveFigPath : string, optional
       Pathname to save a plot of the fit to the RB data.

    Returns
    -------
    a,b : float
       The best-fit decay curve parameters a and b, as defined in
       the rb_decay function.
    """
    RBlengths = []
    RBsuccesses = []
    for key in list(dataset.keys()):
        dataLine = dataset[key]
        plus = dataLine['plus']
        minus = dataLine['minus']
        N = plus + minus
        RBlengths.append(len(key))
        RBsuccesses.append(1 - dataLine['plus']/float(N))
        if dataLine['plus']/float(N) > 1:
            print(key)
    a,b = _curve_fit(rb_decay,RBlengths,RBsuccesses)[0]
    if saveFigPath or showPlot:
        newplot = _plt.figure()
        newplotgca = newplot.gca()
        newplotgca.plot(RBlengths,RBsuccesses,'.')
        newplotgca.plot(range(max(RBlengths)),
                        rb_decay(_np.arange(max(RBlengths)),a,b),'+')
        newplotgca.set_xlabel('RB sequence length (non-Clifford)')
        newplotgca.set_ylabel('Success rate')
        newplotgca.set_title('RB success')
        if xlim:
            _plt.xlim(xlim)
        if ylim:
            _plt.ylim(ylim)
    if saveFigPath:
        newplot.savefig(saveFigPath)
    return a,b
Exemple #3
0
    def __init__(self, f, xdata, ydata, *args, **kwargs):
        params, cov = _curve_fit(f, xdata, ydata, *args, **kwargs)
        errors = [sp.sqrt(cov[i, i]) for i in range(len(cov))]

        if len(sp.where(cov == sp.inf)[0]) > 0:
            raise ValueError(
                "Fit unsuccessful, provide better initial parameters (p0)")

        self.params = params
        self.errors = errors
        self.xdata = sp.array(xdata)
        self.ydata = sp.array(ydata)
        self.f = lambda x: f(x, *params)
Exemple #4
0
    def _fit_2DGaussian(self, **kwargs):
        """
        2D guassian fit function. XP found this code online from
        http://stackoverflow.com/questions/21566379/fitting-a-2d-gaussian-function-using-scipy-optimize-curve-fit-valueerror-and-m
        Parameters
        ----------
        kwargs
        ------
        rho: float32, the initial guess for amplitude of the gaussian. default(0.3)
        x0: int, the initial guess for the column of the peak in pixels. default(300)
        y0: int, the initial guess for the row of the peak in pixels. default(250)
        w_a: float32, the initial guess for the bigger standard deviation. default(25)
        w_b: float32, the initial guess for the smaller standard deviation. default(20)
        Background: float32, the initial guess for the offset. default(0.02)
        plot: boolean, whether the resultant fit should be plotted. default(False)
        """
        rho = kwargs["rho"]
        x0 = kwargs["x0"]
        y0 = kwargs["y0"]
        w_a = kwargs["w_a"]
        w_b = kwargs["w_b"]
        initial_guess = (rho, x0, y0, w_a, w_b)

        # Get the row & column numbers
        row_num = len(self.data)
        col_num = len(self.data[0])

        # Create x and y indices
        x = _n.linspace(0, col_num - 1, col_num)
        y = _n.linspace(0, row_num - 1, row_num)
        x, y = _n.meshgrid(x, y)

        # The fitting
        popt, pcov = _curve_fit(self._twoD_Gaussian, (x, y), self.data.flatten(), p0=initial_guess)
        # Return the fitting parameter results, and convert the position to meters.
        self.rho = popt[0]
        self.x0 = popt[1]
        self.y0 = popt[2]
        self.w_a = abs(popt[3])
        self.w_b = abs(popt[4])
        self.rho_err = _n.sqrt(pcov[0][0])
        self.x0_err = _n.sqrt(pcov[1][1])
        self.y0_err = _n.sqrt(pcov[2][2])
        self.w_a_err = _n.sqrt(pcov[3][3])
        self.w_b_err = _n.sqrt(pcov[4][4])

        self.w_a_len = 100 * self.pixel_size * self.w_a
        self.w_b_len = 100 * self.pixel_size * self.w_b
Exemple #5
0
    def compute_diffusion_coefficient(self,
                                      fit_begin=0.1,
                                      fit_end=0.9,
                                      nb_blocks=1,
                                      use_filtered=True):
        msd, err = self.msd
        block_length = len(msd)
        cut_begin = int(block_length * fit_begin)
        cut_end = int(block_length * fit_end)

        def func(x, m, c):
            return m * x + c

        popt, pcov = _curve_fit(func,
                                _np.arange(block_length)[cut_begin:cut_end],
                                msd[cut_begin:cut_end])
        return popt[0] / 6.
Exemple #6
0
def curve_fit(likelihood: Likelihood, x0=None, bounds=None, **kwargs):
    """Use scipy's curve_fit to do LM to find the MAP.

    Parameters
    ----------
    likelihood
        In this case the likelihood must be a subclass of Chi2.
    x0
        The initial guess
    bounds
        A list of tuples of parameters bounds, or False if no bounds are to be set. If
        None, use the min/max bounds on each parameter in the likelihood.
    """

    def model(x, *p):
        return likelihood.reduce_model(params=p)

    if x0 is None:
        x0 = np.array([apar.fiducial for apar in likelihood.child_active_params])

    eps = kwargs.get("options", {}).get("eps", 1e-8)
    if bounds is None:
        bounds = (
            [apar.min + 2 * eps for apar in likelihood.child_active_params],
            [apar.max - 2 * eps for apar in likelihood.child_active_params],
        )

    elif not bounds:
        bounds = (-np.inf, np.inf)

    res = _curve_fit(
        model,
        xdata=np.linspace(0, 1, len(likelihood.data)),
        ydata=likelihood.data,
        p0=x0,
        sigma=likelihood.sigma,
        bounds=bounds,
        **kwargs,
    )

    return res
Exemple #7
0
    def compute_analytic_error_bars(self,
                                    epsilon,
                                    delta,
                                    r_0,
                                    p0=[0.5, 0.5, 0.98]):
        """
        Compute error bars on RB fit parameters, including the RB decay rate
        using the quasi-analytic methods provided in W&F.

        *At present, this method is not fully supported.*

        Parameters
        ----------
        epsilon : float
            Specifies desired confidence interval half-width for each average
            survival probability estimate \hat{F}_m (See W&F Eq. 8).  
            E.g., epsilon = 0.01 means that the confidence interval width for
            \hat{F}_m is 0.02. See `create_K_m_sched` for further details.
    
        delta : float
            Specifies desired confidence level for confidence interval
            specified by epsilon.  delta = 1-0.6827 corresponds to a
            confidence level of 1 sigma.  (See W&F Eq. 8).  The smaller
            delta is, the larger each value of K_m will be. See 
            `create_K_m_sched` for further details.
    
        r_0 : float
            Estimate of upper bound of the RB number for the system in 
            question. The smaller r is, the smaller each value of K_m will be.
            However, if the system's actual RB number is larger than r_0, then
            the W&F-derived error bars cannot be assumed to be valid.  Addition
            ally, it is assumed that m_max*r_0 << 1.  See `create_K_m_sched`
            for further details.

        p0 : list, optional
            A list of [f,A,B] parameters to seed the RB curve fitting.  Usually
            the default values are fine.

        Returns
        -------
        None
        """
        print("WARNING: ANALYTIC BOOSTRAP ERROR BAR METHOD NOT YET" +
              "GUARANTEED TO BE STABLE.")
        print("ERROR BARS ONLY FOR ZEROTH ORDER FIT.")
        print('Processesing analytic bootstrap, following Wallman and' +
              'Flammia.\nThe error bars are reliable if and only if the' +
              'schedule for K_m has been chosen appropriately, given:')
        print('delta =', delta)
        print('epsilon =', epsilon)
        print('r_0 =', r_0)

        #DEBUG? gstyp_list = ['clifford']
        #KENNY: does WF assume clifford-gatestring data?

        for gstyp in gstyp_list:
            Ns = _np.array(self.dicts[gstyp]['counts'])
            sigma_list = _np.sqrt(epsilon**2 + 1. / Ns)
            results = _curve_fit(_rbutils.standard_fit_function,
                                 self.dicts[gstyp]['lengths'],
                                 self.dicts[gstyp]['successes'],
                                 p0=p0,
                                 sigma=sigma_list)
        self.dicts[gstyp]['WF fit full results'] = results
        self.dicts[gstyp]['A_error_WF'] = _np.sqrt(results[1][0, 0])
        self.dicts[gstyp]['B_error_WF'] = _np.sqrt(results[1][1, 1])
        self.dicts[gstyp]['f_error_WF'] = _np.sqrt(results[1][2, 2])
        self.dicts[gstyp]['r_error_WF'] = (self.d-1.)/self.d  \
                                   * self.dicts[gstyp]['f_error_WF']

        print("Analytic error bars computed.  Use print methods to access.")
Exemple #8
0
def custom_least_squares_fit(lengths,
                             asps,
                             n,
                             a=None,
                             b=None,
                             seed=None,
                             rtype='EI'):
    """
    Fits RB average success probabilities to the exponential decay a + Bp^m using least-squares fitting.

    Parameters
    ----------
    lengths : list
        The RB lengths to fit to (the 'm' values in a + Bp^m).

    asps : list
        The average survival probabilities to fit (the observed P_m values to fit
        to P_m = a + Bp^m).

    n : int
        The number of qubits the data was generated from.

    a : float, optional
        If not None, a value to fix a to.

    b : float, optional
        If not None, a value to fix b to.

    seed : list, optional
        Seeds for variables in the fit, in the order [a,b,p] (with a and/or b dropped if it is set
        to a fixed value).

    rtype : {'EI','AGI'}, optional
        The RB error rate rescaling convention. 'EI' results in RB error rates that are associated
        with the entanglement infidelity, which is the error probability with stochastic errors (and
        is equal to the diamond distance). 'AGI' results in RB error rates that are associated with
        average gate infidelity.

    Returns
    -------
    Dict
        The fit results. If item with the key 'success' is False, the fit has failed.
    """
    seed_dict = {}
    variable = {}
    variable['a'] = True
    variable['b'] = True
    variable['p'] = True
    lengths = _np.array(lengths, _np.int64)
    asps = _np.array(asps, 'd')

    # The fit to do if a fixed value for a is given
    if a is not None:

        variable['a'] = False

        if b is not None:

            variable['b'] = False

            def curve_to_fit(m, p):
                return a + b * p**m

            if seed is None:
                seed = 0.9
                seed_dict['a'] = None
                seed_dict['b'] = None
                seed_dict['p'] = seed

            try:
                fitout, junk = _curve_fit(curve_to_fit,
                                          lengths,
                                          asps,
                                          p0=seed,
                                          bounds=([0.], [1.]))
                p = fitout
                success = True
            except:
                success = False

        else:

            def curve_to_fit(m, b, p):
                return a + b * p**m

            if seed is None:
                seed = [1. - a, 0.9]
                seed_dict['a'] = None
                seed_dict['b'] = 1. - a
                seed_dict['p'] = 0.9
            try:
                fitout, junk = _curve_fit(curve_to_fit,
                                          lengths,
                                          asps,
                                          p0=seed,
                                          bounds=([-_np.inf,
                                                   0.], [+_np.inf, 1.]))
                b = fitout[0]
                p = fitout[1]
                success = True
            except:
                success = False

    # The fit to do if a fixed value for a is not given
    else:

        if b is not None:

            variable['b'] = False

            def curve_to_fit(m, a, p):
                return a + b * p**m

            if seed is None:
                seed = [1 / 2**n, 0.9]
                seed_dict['a'] = 1 / 2**n
                seed_dict['b'] = None
                seed_dict['p'] = 0.9

            try:
                fitout, junk = _curve_fit(curve_to_fit,
                                          lengths,
                                          asps,
                                          p0=seed,
                                          bounds=([0., 0.], [1., 1.]))
                a = fitout[0]
                p = fitout[1]
                success = True
            except:
                success = False

        else:

            def curve_to_fit(m, a, b, p):
                return a + b * p**m

            if seed is None:
                seed = [1 / 2**n, 1 - 1 / 2**n, 0.9]
                seed_dict['a'] = 1 / 2**n
                seed_dict['b'] = 1 - 1 / 2**n
                seed_dict['p'] = 0.9

            try:
                fitout, junk = _curve_fit(curve_to_fit,
                                          lengths,
                                          asps,
                                          p0=seed,
                                          bounds=([0., -_np.inf,
                                                   0.], [1., +_np.inf, 1.]))
                a = fitout[0]
                b = fitout[1]
                p = fitout[2]
                success = True
            except:
                success = False

    estimates = {}
    if success:
        estimates['a'] = a
        estimates['b'] = b
        estimates['p'] = p
        estimates['r'] = _rbt.p_to_r(p, 2**n, rtype)

    results = {}
    results['estimates'] = estimates
    results['variable'] = variable
    results['seed'] = seed_dict
    # Todo : fix this.
    results['success'] = success

    return results
Exemple #9
0
def custom_least_squares_data_fitting(lengths, ASPs, n, A=None, B=None, seed=None, rtype='EI'):
    """
    Fits RB average success probabilities to the exponential decay A + Bp^m using least-squares fitting.

    Parameters
    ----------
    lengths : list
        The RB lengths to fit to (the 'm' values in A + Bp^m).

    ASPs : list
        The average survival probabilities to fit (the observed P_m values to fit
        to P_m = A + Bp^m).

    n : int
        The number of qubits the data is on..

    A : float, optional
        If not None, a value to fix A to.

    B : float, optional
        If not None, a value to fix B to.

    seed : list, optional
        Seeds for variables in the fit, in the order [A,B,p] (with A and/or B dropped if it is set
        to a fixed value).

    rtype : {'EI','AGI'}, optional
        The RB error rate rescaling convention. 'EI' results in RB error rates that are associated
        with the entanglement infidelity, which is the error probability with stochastic errors (and
        is equal to the diamond distance). 'AGI' results in RB error rates that are associated with
        average gate infidelity.

    Returns
    -------
    Dict
        The fit results. If item with the key 'success' is False, the fit has failed.
    """
    seed_dict = {}
    variable = {}
    variable['A'] = True
    variable['B'] = True
    variable['p'] = True
    lengths = _np.array(lengths, int)
    ASPs = _np.array(ASPs, 'd')

    # The fit to do if a fixed value for A is given
    if A is not None:

        variable['A'] = False

        if B is not None:

            variable['B'] = False

            def curve_to_fit(m, p):
                return A + B * p**m

            if seed is None:
                seed = 0.9
                seed_dict['A'] = None
                seed_dict['B'] = None
                seed_dict['p'] = seed

            try:
                fitout, junk = _curve_fit(curve_to_fit, lengths, ASPs, p0=seed, bounds=([0.], [1.]))
                p = fitout
                success = True
            except:
                success = False

        else:

            def curve_to_fit(m, B, p):
                return A + B * p**m

            if seed is None:
                seed = [1. - A, 0.9]
                seed_dict['A'] = None
                seed_dict['B'] = 1. - A
                seed_dict['p'] = 0.9
            try:
                fitout, junk = _curve_fit(curve_to_fit, lengths, ASPs, p0=seed, bounds=([-_np.inf, 0.], [+_np.inf, 1.]))
                B = fitout[0]
                p = fitout[1]
                success = True
            except:
                success = False

    # The fit to do if a fixed value for A is not given
    else:

        if B is not None:

            variable['B'] = False

            def curve_to_fit(m, A, p):
                return A + B * p**m

            if seed is None:
                seed = [1 / 2**n, 0.9]
                seed_dict['A'] = 1 / 2**n
                seed_dict['B'] = None
                seed_dict['p'] = 0.9

            try:
                fitout, junk = _curve_fit(curve_to_fit, lengths, ASPs, p0=seed, bounds=([0., 0.], [1., 1.]))
                A = fitout[0]
                p = fitout[1]
                success = True
            except:
                success = False

        else:

            def curve_to_fit(m, A, B, p):
                return A + B * p**m

            if seed is None:
                seed = [1 / 2**n, 1 - 1 / 2**n, 0.9]
                seed_dict['A'] = 1 / 2**n
                seed_dict['B'] = 1 - 1 / 2**n
                seed_dict['p'] = 0.9

            try:
                fitout, junk = _curve_fit(curve_to_fit, lengths, ASPs, p0=seed,
                                          bounds=([0., -_np.inf, 0.], [1., +_np.inf, 1.]))
                A = fitout[0]
                B = fitout[1]
                p = fitout[2]
                success = True
            except:
                success = False

    estimates = {}
    if success:
        estimates['A'] = A
        estimates['B'] = B
        estimates['p'] = p
        estimates['r'] = _rbt.p_to_r(p, 2**n)

    results = {}
    results['estimates'] = estimates
    results['variable'] = variable
    results['seed'] = seed_dict
    # Todo : fix this.
    results['success'] = success

    return results
Exemple #10
0
def fofRH_from_dry_wet_scattering(scatt_dry, scatt_wet,RH_dry, RH_wet, data_period = 60, return_fits = False, verbose = False):
    """
    This function was originally written for ARM's AOS dry wet nephelometer proceedure. Programming will likely be needed to make it work for something else.

    Notes
    -----
    For each RH scan in the wet nephelometer an experimental f_RH curve is created by deviding
    scatt_wet by scatt_dry. This curve is then fit by a gamma as well as a kappa parametrizaton.
    Here the dry nephelometer is NOT considered as RH = 0 but its actuall RH (averaged over the
    time of the scann) is considered. I was hoping that this will eliminated a correlation between
    "the ratio" and the dry nephelometer's RH ... it didn't :-(

    Parameters
    ----------
    scatt_dry:  TimeSeries
    scatt_wet:  TimeSeries
    RH_dry:     TimeSeries
    RH_wet:     TimeSeries
    data_period: int,float
        measurement frequency. Might only be needed if return_fits = True ... double check
    return_fits: bool
        If the not just the fit results but also the corresponding curves are going to be returned

    Returns
    -------
    pandas.DataFrame containing the fit results
    pandas.DataFrame containing the fit curves (only if retun_fits == True)

    """

    # some modification to the kappa function so it can be used in the fit routine later
    f_RH_kappa_RH0 = lambda RH0: (lambda RH, k: f_RH_kappa(RH, k, RH0))
    f_RH_gamma_RH0 = lambda RH0: (lambda RH, g: f_RH_gamma(RH, g, RH0))
    # crate the f(RH)/f(RH0)

    #     scatt_dry = _timeseries._timeseries(_pd.DataFrame(scatt_dry))
    #     scatt_dry._data_period = data_period
    #     scatt_wet = _timeseries._timeseries(_pd.DataFrame(scatt_wet))
    #     scatt_wet._data_period = data_period

    f_RH = scatt_wet / scatt_dry
    f_RH.data.columns = ['f_RH']
    #     f_RH = _timeseries._timeseries(_pd.DataFrame(f_RH, columns=['f_RH']))
    #     f_RH._data_period = data_period

    # get start time for next RH-ramp (it always start a few minutes after the full hour)
    start, end = f_RH.get_timespan()
    start_first_section = _np.datetime64('{:d}-{:02d}-{:02d} {:02d}:00:00'.format(start.year, start.month, start.day, start.hour))

    if (start.minute + start.second + start.microsecond) > 0:
        start_first_section += _np.timedelta64(1, 'h')

    # select one hour starting with time defined above. Also align/merge dry and wet RH to it
    i = -1
    fit_res_list = []
    results = _pd.DataFrame(columns=['kappa',
                                     'kappa_std',
                                     'f_RH_85_kappa',
                                     'f_RH_85_kappa_std',
                                     #                                   'f_RH_85_kappa_errp',
                                     #                                   'f_RH_85_kappa_errm',
                                     'gamma',
                                     'gamma_std',
                                     'f_RH_85_gamma',
                                     'f_RH_85_gamma_std',
                                     'wet_neph_max',
                                     'dry_neph_mean',
                                     'dry_neph_std',
                                     'wet_neph_min'], dtype = _np.float64)
    while i < 30:
        i += 1
        # stop if section end is later than end of file
        section_start = start_first_section + _np.timedelta64(i, 'h')
        section_end = start_first_section + _np.timedelta64(i, 'h') + _np.timedelta64(45, 'm')

        if (end - section_end) < _np.timedelta64(0, 's'):
            break

        if verbose:
            print('================')
            print('start of section: ', section_start)
            print('end of section: ', section_end)


        try:
            section = f_RH.zoom_time(section_start, section_end)
        except IndexError:
            if verbose:
                print('section has no data in it!')
            results.loc[section_start] = _np.nan
            continue



        df = section.data.copy().dropna()
        if df.shape[0] < 2:
            if verbose:
                print('no data in section.dropna()!')
            results.loc[section_start] = _np.nan
            continue

        #         section = section.merge(out.RH_nephelometer._del_all_columns_but('RH_NephVol_Wet'))
        #         section = section.merge(out.RH_nephelometer._del_all_columns_but('RH_NephVol_Dry')).data

        section = section.merge(RH_wet)
        section = section.merge(RH_dry).data

        # this is needed to get the best parameterization
        dry_neph_mean = section.RH_NephVol_Dry.mean()
        dry_neph_std = section.RH_NephVol_Dry.std()
        wet_neph_min = section.RH_NephVol_Wet.min()
        wet_neph_max = section.RH_NephVol_Wet.max()
        # clean up
        section.dropna(inplace=True)
        section = section[section.f_RH != _np.inf]
        section = section[section.f_RH != -_np.inf]
        timestamps = section.index.copy()
        section.index = section.RH_NephVol_Wet
        section.drop('RH_NephVol_Wet', axis=1, inplace=True)
        section.drop('RH_NephVol_Dry', axis=1, inplace=True)

        # fitting!!
        if dry_neph_mean > wet_neph_max:
            if verbose:
                print('dry_neph_mean > wet_neph_max!!! something wrong with dry neph!!')
            results.loc[section_start] = _np.nan
            continue

        try:
            kappa, [k_varience] = _curve_fit(f_RH_kappa_RH0(dry_neph_mean), section.index.values, section.f_RH.values)
            # gamma, [varience] = curve_fit(gamma_paramterization, section.index.values, section.f_RH.values)
            gamma, [varience] = _curve_fit(f_RH_gamma_RH0(dry_neph_mean), section.index.values, section.f_RH.values)
        except:
            import pdb
            pdb.set_trace()

        frame_this = {'kappa': kappa[0],
                      'kappa_std': _np.sqrt(k_varience[0]),
                      'f_RH_85_kappa': f_RH_kappa(85, kappa[0]),
                      'f_RH_85_kappa_std': - f_RH_kappa(85, kappa[0]) + f_RH_kappa(85, kappa[0] + _np.sqrt(k_varience[0])),
                      #         'f_RH_85_kappa_errp': f_RH_kappa(85, kappa[0] + _np.sqrt(k_varience[0])),
                      #         'f_RH_85_kappa_errm': f_RH_kappa(85, kappa[0] - _np.sqrt(k_varience[0])),
                      'gamma': gamma[0],
                      'gamma_std': _np.sqrt(varience[0]),
                      'f_RH_85_gamma': f_RH_gamma(85, gamma[0]),
                      'f_RH_85_gamma_std': - f_RH_gamma(85, gamma[0]) + f_RH_gamma(85, gamma[0] + _np.sqrt(varience[0])),
                      'dry_neph_mean': dry_neph_mean,
                      'dry_neph_std': dry_neph_std,
                      'wet_neph_min': wet_neph_min,
                      'wet_neph_max': wet_neph_max}

        results.loc[section_start] = frame_this

        if return_fits:
            # plotting preparation
            RH = section.index.values
            #     RH = _np.linspace(0,100,20)
            fit = f_RH_gamma_RH0(dry_neph_mean)(RH, gamma)
            fit_std_p = f_RH_gamma_RH0(dry_neph_mean)(RH, gamma + _np.sqrt(varience))
            fit_std_m = f_RH_gamma_RH0(dry_neph_mean)(RH, gamma - _np.sqrt(varience))

            fit_k = f_RH_kappa_RH0(dry_neph_mean)(RH, kappa)
            fit_k_std_p = f_RH_kappa_RH0(dry_neph_mean)(RH, kappa + _np.sqrt(k_varience))
            fit_k_std_m = f_RH_kappa_RH0(dry_neph_mean)(RH, kappa - _np.sqrt(k_varience))

            df['fit_gamma'] = _pd.Series(fit, index=df.index)
            df['fit_gamma_stdp'] = _pd.Series(fit_std_p, index=df.index)
            df['fit_gamma_stdm'] = _pd.Series(fit_std_m, index=df.index)

            df['fit_kappa'] = _pd.Series(fit_k, index=df.index)
            df['fit_kappa_stdp'] = _pd.Series(fit_k_std_p, index=df.index)
            df['fit_kappa_stdm'] = _pd.Series(fit_k_std_m, index=df.index)
            fit_res_list.append(df)

    if results.shape[0] == 0:
        results.loc[start] = _np.nan
    results = _timeseries.TimeSeries(results)
    results._data_period = 3600

    if return_fits:
        fit_res = _pd.concat(fit_res_list).sort_index()
        ts = _timeseries.TimeSeries(fit_res)
        ts._data_period = data_period
        fit_res = ts.close_gaps()
        return results, fit_res
    else:

        return results
Exemple #11
0
def rb_decay_rate(dataset,
                  showPlot=False,
                  xlim=None,
                  ylim=None,
                  saveFigPath=None):
    """
    Compute the Randomized Benchmarking (RB) decay rate given an data set
    containing counts for RB gate strings.  Note: currently this function
    only works for 1-qubit dataset having SPAM labels 'plus' and 'minus'.

    Parameters
    ----------
    dataset : DataSet
      The RB data set.
    
    showPlot : bool, optional
       Whether to show a plot of the fit to the RB data.

    xlim : (xmin,xmax), optional
       Specify x-axis limits for plot

    ylim : (ymin,ymax), optional
       Specify y-axis limits for plot

    saveFigPath : string, optional
       Pathname to save a plot of the fit to the RB data.

    Returns
    -------
    a,b : float
       The best-fit decay curve parameters a and b, as defined in 
       the rb_decay function.
    """
    RBlengths = []
    RBsuccesses = []
    for key in dataset.keys():
        dataLine = dataset[key]
        plus = dataLine['plus']
        minus = dataLine['minus']
        N = plus + minus
        RBlengths.append(len(key))
        RBsuccesses.append(1 - dataLine['plus'] / float(N))
        if dataLine['plus'] / float(N) > 1:
            print key
    a, b = _curve_fit(rb_decay, RBlengths, RBsuccesses)[0]
    if saveFigPath or showPlot:
        newplot = _plt.figure()
        newplotgca = newplot.gca()
        newplotgca.plot(RBlengths, RBsuccesses, '.')
        newplotgca.plot(xrange(max(RBlengths)),
                        rb_decay(_np.arange(max(RBlengths)), a, b), '+')
        newplotgca.set_xlabel('RB sequence length (non-Clifford)')
        newplotgca.set_ylabel('Success rate')
        newplotgca.set_title('RB success')
        if xlim:
            _plt.xlim(xlim)
        if ylim:
            _plt.ylim(ylim)
    if saveFigPath:
        newplot.savefig(saveFigPath)
    return a, b
Exemple #12
0
    def compute_mean_residence_time(self,
                                    nb_blocks=1,
                                    filter_artifacts=False,
                                    per_residue=False,
                                    return_average_time=False,
                                    use_filtered=True):
        if use_filtered: results = self.filtered_results
        else: results = self.initial_results

        def func(x, tau, lamda):
            return _np.exp(-(x / tau)**lamda)

        all_residence_time = []
        all_average_time = []
        block_length = int(self.nb_frames / nb_blocks)
        xdata = _np.arange(1, block_length + 1)
        for run in range(nb_blocks):
            s = slice(run * block_length, (run + 1) * block_length, 1)
            intervals = {
                key: _hf.intervals_binary(results[key][s])
                for key in results
            }
            residence_time = _np.zeros(block_length)
            average_time = _np.array([0., 0.])

            if per_residue:
                residence_time = {}
                del_res = []
                average_time = {}
                for pair in results:
                    segn, resn, resi, _, _, _ = _hf.deconst_key(pair, True)
                    residue_key = segn + '-' + resn + '-' + str(resi)
                    work_intervals = intervals[pair]
                    interval_lengths = _np.diff(work_intervals,
                                                1).astype(_np.int).flatten()
                    if filter_artifacts:
                        interval_lengths = interval_lengths[(interval_lengths <
                                                             block_length)]
                    try:
                        average_time[residue_key] += _np.array(
                            [interval_lengths.sum(), interval_lengths.size])
                    except KeyError:
                        average_time[residue_key] = _np.array(
                            [interval_lengths.sum(), interval_lengths.size])
                        residence_time[residue_key] = _np.zeros(block_length)
                    for l in interval_lengths:
                        residence_time[residue_key][:l] += _np.arange(
                            1, l + 1)[::-1]

                for residue_key in residence_time:
                    residence_time[residue_key] /= _np.arange(
                        1, block_length + 1)[::-1]
                    residence_time[residue_key] /= residence_time[residue_key][
                        0]
                    if _np.isnan(residence_time[residue_key]).any():
                        del_res.append(residue_key)
                    average_time[residue_key] = average_time[residue_key][
                        0] / average_time[residue_key][1]
                    try:
                        (tau,
                         lamda), pcov = _curve_fit(func,
                                                   xdata,
                                                   residence_time[residue_key],
                                                   p0=(10.0, 0.5))
                        residence_time[residue_key] = tau / lamda * _gamma(
                            1 / lamda)
                    except:
                        if average_time[residue_key] <= 2.0:
                            residence_time[residue_key] = 1.0
                        else:
                            try:
                                (tau, lamda), pcov = _curve_fit(
                                    func,
                                    xdata,
                                    residence_time[residue_key],
                                    p0=(block_length, 0.5))
                                residence_time[
                                    residue_key] = tau / lamda * _gamma(
                                        1 / lamda)
                            except:
                                residence_time[residue_key] = block_length
                for key in del_res:
                    del residence_time[key]

            else:
                for water in results:
                    work_intervals = intervals[water]
                    interval_lengths = _np.diff(work_intervals,
                                                1).astype(_np.int).flatten()
                    if filter_artifacts:
                        interval_lengths = interval_lengths[(interval_lengths <
                                                             block_length)]
                    average_time += _np.array(
                        [interval_lengths.sum(), interval_lengths.size])
                    for l in interval_lengths:
                        residence_time[:l] += _np.arange(1, l + 1)[::-1]
                residence_time /= _np.arange(1, block_length + 1)[::-1]
                residence_time /= residence_time[0]
                average_time = average_time[0] / average_time[1]
                try:
                    (tau, lamda), pcov = _curve_fit(func,
                                                    xdata,
                                                    residence_time,
                                                    p0=(10.0, 0.5))
                    residence_time = tau / lamda * _gamma(1 / lamda)
                except:
                    if average_time <= 2.0:
                        residence_time = 1.0
                    else:
                        residence_time = block_length
            all_residence_time.append(residence_time)
            all_average_time.append(average_time)
        if per_residue:
            for key in residence_time:
                temp_residence_time = []
                temp_average_time = []
                for residence_dict in all_residence_time:
                    try:
                        temp_residence_time.append(residence_dict[key])
                    except:
                        pass
                for average_dict in all_average_time:
                    try:
                        temp_average_time.append(average_time[key])
                    except:
                        pass
                residence_time[key] = (_np.mean(temp_residence_time),
                                       _np.std(temp_residence_time) /
                                       _np.sqrt(nb_blocks))
                average_time[key] = (_np.mean(temp_average_time),
                                     _np.std(temp_average_time) /
                                     _np.sqrt(nb_blocks))
        else:
            residence_time = (_np.mean(all_residence_time),
                              _np.std(all_residence_time) /
                              _np.sqrt(nb_blocks))
            average_time = (_np.mean(all_average_time),
                            _np.std(all_average_time) / _np.sqrt(nb_blocks))

        if return_average_time: return residence_time, average_time
        else: return residence_time
Exemple #13
0
def calculatePeakDisplacements(intensityProfiles, peakFitSettings, progressReporter = None, pInitial = None, **curveFitKwargs):
    """
    Fits an ODM FitFunction to the target Series of intensity profiles.
    
    Parameters
    ----------
    
    intensityProfiles : pandas.Series of 1D numpy.ndarray
        A series of intensityProfiles that will be curve fit
    peakFitSettings : ODAFitSettings instance
        The curve fit settings to use for curve fitting
    progressReporter : ProgressReporter instance
        The ProgressReporter to use for displaying progress information. 
        A StdOutProgressReporter is used by default.
    curveFitKwargs : Keyword arguments that will be passed to the curve_fit
        function (scipy.optimization).
    
    
    Returns
    -------

    A dataframe with the calculated displacements that has the same index as the input
    intensity profile Series.
    """
    
    if not progressReporter:
        progressReporter = _StdOutProgressReporter()
    
    fitFunction = peakFitSettings.fitFunction
    index=intensityProfiles.index
    
    if pInitial is not None:        
        p0 = pInitial
    else:
        templateProfile = peakFitSettings.referenceIntensityProfile if peakFitSettings.referenceIntensityProfile is not None else intensityProfiles.iloc[0]
        estimatesDict = fitFunction.estimateInitialParameters(templateProfile, **peakFitSettings.estimatorValuesDict)
        p0 = estimatesDict.values()
        
    xmin = peakFitSettings.xminBound
    xmax = peakFitSettings.xmaxBound
    xdata = _np.arange(len(intensityProfiles.iloc[0]))[xmin:xmax]
    
    progress = 0.0
    total = len(index)
    curveFitResults = total*[None]
    for i in range(total):
         ydata = intensityProfiles.iloc[i][xmin:xmax]
         popt,pcov = _curve_fit(fitFunction,\
                  xdata = xdata,\
                  ydata = ydata,\
                  p0 = p0,**curveFitKwargs)
         p0 = popt
         
         curveFitResult = {}       

         curveFitResult['popt'] = popt
         curveFitResult['pcov'] = pcov         
         curveFitResult['chiSquare'] = _chisquare(ydata,fitFunction(xdata,*popt))[0]
         curveFitResult['curveFitResult'] = attrdict.AttrDict(curveFitResult)
         curveFitResult['displacement'] = fitFunction.getDisplacement(*popt)
         
         curveFitResults[i] = curveFitResult
         
         progress += 1
         progressReporter.progress(progress / total * 100)
    
    df = _pd.DataFrame(index=index,data=curveFitResults)
    
    progressReporter.done()
    
    return df
Exemple #14
0
def graph_it(x,
             y,
             graph_type=None,
             x_error=0,
             y_error=0,
             title="",
             x_title="",
             y_title="",
             size=(20, 10),
             sig_digi=3,
             coeff_x=0.8,
             coeff_y=0.8,
             error_fill_bet=True,
             plot_residuals=True,
             show_chi=True,
             coeff_text=(''),
             resid_x_error=0,
             resid_y_error=0,
             y_scale="",
             x_scale="",
             extra_code_main='',
             extra_code_residuals=''):
    """
    Plot and Customize two 1D arrays.
    :param x: Horizontal axis data.
    :param y: Vertical axis data.
    :param graph_type: Formula of fit. Example: lambda x,a,b: a*x+b.
    :param x_error: X Error bar size.
    :param y_error: Y Error bar size.
    :param title: Graphs title.
    :param x_title: X axis title.
    :param y_title: Y axis title.
    :param size: Graphs size.
    :param sig_digi: Significant digits length.
    :param coeff_x: X position of the legend.
    :param coeff_y: Y position of the legend.
    :param error_fill_bet: Draws an aura of error freedom around the graph. Default is Off.
    :param plot_residuals: Plots the residuals as another graph under the Data graph.
    :param show_chi: Show reduced chi-squared result.
    :param coeff_text: Tuple of units for graphs legend.
    :param resid_x_error: X residuals error bar size.
    :param resid_y_error: Y residuals error bar size.
    :param y_scale: gets a an array of two iterables, one for for actual rescaling and one for lables,
                    most commonly use with generatePiAxis().
    :param x_scale: gets a an array of two iterables, one for for actual rescaling and one for lables,
                    most commonly use with generatePiAxis().
    :param extra_code_main: Runs extra script after the main graph plot.
    :param extra_code_residuals: uns extra script after the residuals graph plot.
    :return: A list of guessed parameters given in graph_type.
    """
    _plt.rc('text', usetex=False)
    fig, ax = _plt.subplots(figsize=size)
    tick_fine = 0
    x = _np.array(x)
    y = _np.array(y)
    x_error = _np.array(x_error)
    y_error = _np.array(y_error)
    graph_dict = {}

    if graph_type is not None:
        popt, pcov = _curve_fit(graph_type, x, y, maxfev=100000)
        sigma_ab = _np.sqrt(_np.diagonal(pcov))  # type: _np.ndarray
        x_model = _expand_linspace(x.min(), x.max(), len(x) * 3)
        _plt.plot(x_model, graph_type(x_model, *popt), 'black')
        _plt.errorbar(x,
                      y,
                      xerr=x_error,
                      yerr=y_error,
                      fmt='s',
                      color='b',
                      visible=False,
                      alpha=0.6,
                      capsize=10,
                      capthick=0.5,
                      ecolor='k')
        if show_chi == True:
            if y_error.any() == 0:
                print(
                    "No stat. error data provided, skipping chi squared calculation"
                )
            else:
                chi = _chi_squared(x, y, popt, y_error, graph_type)
                title += "\n$\\chi^2 = %s$" % str(chi)
                graph_dict['chi2'] = chi
                graph_dict['p-value'] = 1 - _chi2.cdf(chi, 1)
        if error_fill_bet:
            bound_upper = graph_type(x_model, *(popt + sigma_ab))
            bound_lower = graph_type(x_model, *(popt - sigma_ab))
            # plotting the confidence intervals
            _plt.fill_between(x_model,
                              bound_lower,
                              bound_upper,
                              color='midnightblue',
                              alpha=0.15)
        if coeff_text != ():
            coeff_text = list(coeff_text)
            coeff_text += [''] * (len(popt) - len(coeff_text))
            text_res = ""
            for i, param in enumerate(popt):
                text_res += ((graph_type.__code__.co_varnames[i + 1]) +
                             str((" = {:." + str(sig_digi) +
                                  "u}").format(_ufloat(popt[i], sigma_ab[i])) +
                                 coeff_text[i] + "\n"))
            _plt.text(coeff_x,
                      coeff_y,
                      text_res[:-2],
                      transform=ax.transAxes,
                      fontsize=20,
                      bbox=dict(boxstyle='round', facecolor='grey', alpha=0.5),
                      family='DejaVu Sans')
        tick_fine = 1

    _plt.scatter(x,
                 y,
                 facecolor='red',
                 marker='s',
                 edgecolor='black',
                 s=70,
                 alpha=1)
    _sns.set_style("whitegrid")
    ax.set_title(title)
    ax.set_ylabel(y_title)
    ax.set_xlabel(x_title)

    if x_scale != "":
        ax.set_xticks(x_scale[0])
        ax.set_xticklabels(x_scale[1])
        for tick in ax.xaxis.get_major_ticks():
            tick.label.set_fontsize(25)
    if y_scale != "":
        ax.set_yticks(y_scale[0])
        ax.set_yticklabels(y_scale[1])
        for tick in ax.yaxis.get_major_ticks():
            tick.label.set_fontsize(25)

    if tick_fine == 1:
        ax.set_xlim(x_model.min(), x_model.max())
    else:
        ax.set_xlim(
            _expand_linspace(x.min(), x.max(),
                             len(x) * 3).min(),
            _expand_linspace(x.min(), x.max(),
                             len(x) * 3).max())
    exec(extra_code_main)
    graph_dict['main_graph'] = (fig, ax)

    if plot_residuals and graph_type is not None:
        residuals = y - graph_type(x, *popt)
        fig, ax = _plt.subplots(figsize=(20, 5))
        _plt.scatter(x,
                     residuals,
                     facecolor='red',
                     marker='s',
                     edgecolor='black',
                     s=70,
                     alpha=1)
        ax.errorbar(x,
                    residuals,
                    xerr=resid_x_error,
                    fmt='s',
                    yerr=residuals * resid_y_error,
                    marker='s',
                    visible=False,
                    alpha=0.6,
                    capsize=10,
                    capthick=0.5,
                    ecolor='k')
        ax.set_xlim(x_model.min(), x_model.max())
        ax.set_ylim(residuals.min() - _np.abs(residuals.min() * 0.5),
                    residuals.max() + _np.abs(residuals.max() * 0.5))
        ax.set_title("Residuals Plot of " + title)
        ax.set_ylabel(y_title)
        ax.set_xlabel(x_title)
        graph_dict['resid_graph'] = (fig, ax)
    _mplcursors.cursor(hover=True)

    exec(extra_code_residuals)
    if tick_fine == 1:
        graph_dict['params'] = [(_ufloat(popt[i], sigma_ab[i]))
                                for i in range(len(popt))]
        return graph_dict
    else:
        return graph_dict