예제 #1
0
def process_single(pars: GammaAnalysisParams,
                   overwrite_transition=False,
                   overwrite_entropy=False):
    """Does all the processing necessary for a single GammaAnalysisParams (i.e. csq mapping, transition fitting,
    entropy fitting and setting integration info"""
    if pars.csq_mapped:
        calculate_csq_map(pars.entropy_datnum,
                          csq_datnum=pars.csq_datnum,
                          overwrite=overwrite_entropy)
        if pars.transition_only_datnum:
            calculate_csq_map(pars.transition_only_datnum,
                              csq_datnum=pars.csq_datnum,
                              overwrite=overwrite_transition)

    if pars.transition_only_datnum is not None:
        print(f'Dat{pars.transition_only_datnum}')
        if pars.save_name + '_cold' not in get_dat(pars.transition_only_datnum).Transition.fit_names \
                or overwrite_transition:
            do_transition_only_calc(
                datnum=pars.transition_only_datnum,
                save_name=pars.save_name,
                theta=pars.force_theta,
                gamma=pars.force_gamma,
                center_func=pars.transition_center_func_name,
                width=pars.transition_fit_width,
                t_func_name=pars.transition_func_name,
                csq_mapped=pars.csq_mapped,
                data_rows=pars.transition_data_rows,
                overwrite=overwrite_transition)
    if pars.save_name not in get_dat(
            pars.entropy_datnum).Entropy.fit_names or overwrite_entropy:
        print(f'Dat{pars.entropy_datnum}')
        do_entropy_calc(pars.entropy_datnum,
                        save_name=pars.save_name,
                        setpoint_start=pars.setpoint_start,
                        t_func_name=pars.entropy_transition_func_name,
                        csq_mapped=pars.csq_mapped,
                        theta=pars.force_theta,
                        gamma=pars.force_gamma,
                        width=pars.entropy_fit_width,
                        data_rows=pars.transition_data_rows,
                        overwrite=overwrite_entropy)

    calculate_new_sf_only(
        pars.entropy_datnum,
        pars.save_name,
        dt=pars.force_dt,
        amp=pars.force_amp,
        from_square_transition=pars.sf_from_square_transition,
        transition_datnum=pars.transition_only_datnum,
        fit_name=pars.save_name)

    # Save to HDF
    d = get_dat(pars.entropy_datnum)
    save_gamma_analysis_params_to_dat(d,
                                      analysis_params=pars,
                                      name=pars.save_name)
    return None
예제 #2
0
def check_nrg_fit(datnum, exisiting_fit='forced_theta_linear'):
    """

    Args:
        datnum ():
        exisiting_fit (): To get the Theta value from

    Returns:

    """
    dat = get_dat(datnum)
    x = dat.Data.x
    data = dat.Data.get_data('csq_mapped_avg')
    theta = dat.Transition.get_fit(name=exisiting_fit).best_values.theta
    nrg_fitter = NrgUtil(
        NRGParams(gamma=1,
                  theta=theta,
                  center=0,
                  amp=1.5,
                  lin=0.003,
                  const=0,
                  lin_occ=0))
    fit = nrg_fitter.get_fit(x=x, data=data)
    fig = p1d.plot(data=data, x=x, trace_name='data')
    fig.add_trace(p1d.trace(x=x, data=fit.eval_fit(x=x), name='fit'))
    fig.add_trace(p1d.trace(x=x, data=fit.eval_init(x=x), name='init'))
    fig.show()
    print(
        f'Dat{dat.datnum}: G/T = {fit.best_values.g / fit.best_values.theta:.2f}'
    )
예제 #3
0
def plot_multiple_dndt(params: List[Params]) -> go.Figure:
    fig = p1d.figure(xlabel='Sweep Gate/max(Gamma, T)',
                     ylabel=f'{DELTA}I*max(Gamma, T)',
                     title=f'{DELTA}I vs Sweep gate for various Gamma/T')
    for param in params:
        dat = get_dat(param.datnum)

        out = dat.SquareEntropy.get_Outputs(name='forced_theta_linear_non_csq')
        sweep_x = out.x / 100  # /100 to convert to real mV
        data_dndt = out.average_entropy_signal
        # transition_fit = tdat.Transition.get_fit(name='forced_theta_linear_non_csq')
        # gamma_over_t = transition_fit.best_values.g/transition_fit.best_values.theta
        gamma_over_t = param.gamma / param.theta

        rescale = max(param.gamma, param.theta)

        fig.add_trace(
            p1d.trace(x=sweep_x / rescale,
                      data=data_dndt * rescale,
                      mode='lines',
                      name=f'{gamma_over_t:.2f}'))
    fig.update_layout(legend_title='Gamma/Theta')
    fig.update_xaxes(range=[-0.2, 0.2])
    fig.update_layout(template='simple_white')
    return fig
예제 #4
0
 def _get_params_from_dat(self,
                          datnum,
                          fit_which: str = 'i_sense',
                          hot_or_cold: str = 'cold') -> NRGParams:
     dat = get_dat(datnum)
     orig_fit = dat.NrgOcc.get_fit(name=NRG_OCC_FIT_NAME)
     if fit_which == 'i_sense':
         data = get_avg_i_sense_data(dat,
                                     CSQ_DATNUM,
                                     center_func=_center_func,
                                     hot_or_cold=hot_or_cold)
         if hot_or_cold == 'hot':  # Then allow theta to vary, but hold gamma const
             params = U.edit_params(orig_fit.params, ['g', 'theta'],
                                    [None, None],
                                    vary=[False, True])
         else:
             params = orig_fit.params
         new_fit = dat.NrgOcc.get_fit(calculate_only=True,
                                      x=data.x,
                                      data=data.data,
                                      initial_params=params)
     elif fit_which == 'entropy':
         data = get_avg_entropy_data(dat,
                                     center_func=_center_func,
                                     csq_datnum=CSQ_DATNUM)
         new_fit = NrgUtil(inital_params=orig_fit).get_fit(
             data.x, data.data, which_data='dndt')
     else:
         raise NotImplementedError
     params = NRGParams.from_lm_params(new_fit.params)
     return params
예제 #5
0
def run_weakly_coupled_nrg_fit(
    datnum: int,
    csq_datnum: Optional[int],
    center_func: Optional[Callable[[DatHDF], bool]] = None,
    overwrite: bool = False,
) -> FitInfo:
    """
    Runs
    Args:
        datnum (): Dat to calculate for
        csq_datnum (): Num of dat to use for CSQ mapping  (will only calculate if necessary)
        center_func: Whether data should be centered first for dat
            (e.g. lambda dat: True if dat.Logs.dacs['ESC'] > -250 else False)
        overwrite (): NOTE: Only overwrites final Avg fit.

    Returns:

    """
    fit_name = 'csq_gamma_small' if csq_datnum is not None else 'gamma_small'
    dat = get_dat(datnum)
    avg_data = get_avg_i_sense_data(dat, csq_datnum, center_func=center_func)

    pars = get_initial_params(avg_data, which='nrg')
    pars['g'].value = 0.005
    pars['g'].vary = False
    pars['occ_lin'].vary = False
    fit = dat.NrgOcc.get_fit(which='avg',
                             name=fit_name,
                             initial_params=pars,
                             data=avg_data.data,
                             x=avg_data.x,
                             calculate_only=False,
                             check_exists=False,
                             overwrite=overwrite)
    return fit
예제 #6
0
def figure_1_add_NRG_fit_to_gamma_dndt() -> go.Figure:
    fit_name = 'forced_theta_linear_non_csq'
    dat = get_dat(2170)

    init_params = NRGParams(
        gamma=23.4352,
        theta=4.396,
        center=78.4,
        amp=0.675,
        lin=0.00121,
        const=7.367,
        lin_occ=0.0001453,
    )

    out = dat.SquareEntropy.get_Outputs(name=fit_name)
    x = out.x
    z = out.average_entropy_signal

    fig = p1d.figure(xlabel='V_P', ylabel=f'{DELTA}I (nA)')
    fig.add_trace(p1d.trace(x=x, data=z, name='Data', mode='lines'))

    dndt_init_params = copy.copy(init_params)
    dndt_init_params.amp = 0.0001  # To rescale the arbitrary NRG dndt scale
    dndt_init_params.theta = dat.SquareEntropy.get_fit(
        fit_name=fit_name).best_values.theta
    nrg_fitter = NrgUtil(inital_params=dndt_init_params)
    fit = nrg_fitter.get_fit(x=x, data=z, which_data='dndt')

    fig.add_trace(
        p1d.trace(x=x, data=fit.eval_fit(x=x), mode='lines', name='Fit'))
    fig.add_trace(
        p1d.trace(x=x, data=fit.eval_init(x=x), mode='lines', name='Init'))

    fig.show()
    return fig
예제 #7
0
    def __init__(
        self,
        run,
        run_centers,
        datnum,
        overwrite_centers,
        sp_start,
        se_transition_func,
        se_fit_width,
        se_rows,
        use_tonly,
        tonly_datnum,
        tonly_func,
        tonly_width,
        tonly_rows,
        center_func,
        force_theta,
        force_gamma,
        force_dt,
        force_amp,
        int_from_se,
        use_csq,
        csq_datnum,
    ):
        self.run = triggered_by(
            self.components.but_run.id)  # i.e. True if run was the trigger
        self.run_centers = triggered_by(
            self.components.but_run_generate_centers.id)
        self.overwrite_centers = True if overwrite_centers else False
        self.datnum = datnum

        # SE fitting
        self.sp_start = sp_start if sp_start else 0.0
        self.ent_transition_func = se_transition_func
        self.ent_width = se_fit_width
        self.ent_rows = se_rows if se_rows else (None, None)

        # Tonly fitting
        self.use_tonly = use_tonly
        self.tonly_datnum = tonly_datnum
        self.tonly_func = tonly_func
        self.tonly_width = tonly_width
        self.tonly_rows = tonly_rows

        self.center_func = center_func
        self.force_theta = force_theta
        self.force_gamma = force_gamma

        # Integration info
        self.force_dt = force_dt
        self.force_amp = force_amp
        self.int_from_se = int_from_se

        # CSQ mapping
        self.csq_map = use_csq
        self.csq_datnum = csq_datnum

        # ## Post init
        self.dat = get_dat(self.datnum) if self.datnum else None
예제 #8
0
def calc_transition(datnum: int, exp2hdf=FebMar21Exp2HDF) -> FitInfo:
    dat = get_dat(datnum, exp2hdf=exp2hdf, overwrite=False)
    try:
        tfit = dat.Transition.avg_fit
    except Exception as e:
        print(f'Dat{datnum} raised {e}')
        tfit = None
    return tfit
예제 #9
0
    def __init__(self, datnum: int, se_name, e_names, t_names, int_names):
        self.datnum: Optional[int] = datnum
        self.se_name: str = se_name
        self.e_names: List[str] = listify_dash_input(e_names)
        self.t_names: List[str] = listify_dash_input(t_names)
        self.int_names: List[str] = listify_dash_input(int_names)

        # Generated
        self.dat = get_dat(datnum) if self.datnum is not None else None
예제 #10
0
 def get_real_data(self, occupation=False) -> Data1D:
     if self.which_plot == 'weak':
         dat = get_dat(2164)
     elif self.which_plot == 'strong':
         dat = get_dat(2170)
         # dat = get_dat(2213)
     else:
         raise NotImplementedError
     if occupation:
         data = get_avg_i_sense_data(dat,
                                     CSQ_DATNUM,
                                     center_func=_center_func,
                                     hot_or_cold='cold')
     else:
         data = get_avg_entropy_data(dat,
                                     center_func=_center_func,
                                     csq_datnum=CSQ_DATNUM)
     return Data1D(x=data.x, data=data.data)
예제 #11
0
def compare_nrg_with_i_sense_for_single_dat(
        datnum: int,
        csq_map_datnum: Optional[int] = None,
        show_2d_centering_comparsion=False,
        show_1d_fit_comparison=True):
    """
    Runs and can plot the comparsion of 2D centering, and 1D fitting of averaged data for NRG vs regular I_sense, either
    csq mapped or not
    Args:
        datnum ():
        csq_map_datnum ():  Optional CSQ datnum for csq mapping
        show_2d_centering_comparsion (): Whether to show the 2D plot with centers
        show_1d_fit_comparison (): Whether to show the 1D fit and fit info

    Returns:

    """
    dat = get_dat(datnum)
    if csq_map_datnum is not None:
        csq_dat = get_dat(csq_map_datnum)
        data = get_2d_i_sense_csq_mapped(dat, csq_dat)
        using_csq = True
    else:
        data = get_2d_data(dat)
        using_csq = False

    if show_2d_centering_comparsion:
        fig = plot_2d_i_sense(data,
                              title_prepend=f'Dat{dat.datnum}: ',
                              trace_type='heatmap',
                              using_csq=using_csq)
        run_and_plot_center_comparsion(fig, data).show()

    fits = fit_2d_transition_data(data,
                                  fit_with='i_sense',
                                  initial_params=None)
    centers = [f.best_values.mid for f in fits]
    avg_data = average_data(data, centers)

    if show_1d_fit_comparison:
        fig = plot_single_transition(avg_data,
                                     title_prepend=f'Dat{dat.datnum}: ',
                                     using_csq=using_csq)
        run_and_plot_single_fit_comparison(fig, avg_data).show()
예제 #12
0
 def transition_2d(self) -> go.Figure:
     if not self._correct_call_args():
         logger.warning(f'Bad call args to GraphCallback')
         return go.Figure()
     if not self.calculated_triggered or self.calculated.analysis_params.transition_only_datnum is None:
         dat = self.dat
         plotter = TwoD(dat=dat)
         out = dat.SquareEntropy.get_row_only_output(name='default',
                                                     calculate_only=True)
         x = out.x
         y = dat.Data.get_data('y')
         data = np.nanmean(out.cycled[:, (
             0,
             2,
         ), :], axis=1)
         fig = plotter.plot(data=data,
                            x=x,
                            y=y,
                            title=f'Dat{dat.datnum}: 2D Cold Transition')
         if self.calculated_triggered:
             out = self.calculated.calculated_entropy_fit.output
             x = out.x
             data = np.nanmean(out.cycled[:, (
                 0,
                 2,
             ), :], axis=1)
             ys = y_from_rows(
                 self.calculated.analysis_params.entropy_data_rows,
                 y,
                 mode='values')
             fig.add_trace(
                 plotter.trace(data=data,
                               x=x,
                               y=np.linspace(ys[0], ys[1],
                                             out.entropy_signal.shape[0])))
             for h in ys:
                 plotter.add_line(fig, h, mode='horizontal', color='black')
     else:
         dat = get_dat(
             self.calculated.analysis_params.transition_only_datnum)
         plotter = TwoD(dat=dat)
         x = dat.Transition.x
         y = dat.Data.get_data('y')
         data = dat.Transition.data
         ys = y_from_rows(
             self.calculated.analysis_params.transition_data_rows,
             y,
             mode='values')
         fig = plotter.plot(data=data,
                            x=x,
                            y=y,
                            title=f'Dat{dat.datnum}: 2D Transition only')
         for h in ys:
             plotter.add_line(fig, h, mode='horizontal', color='black')
     return fig
예제 #13
0
def plot_integrated(
    datnum: int,
    params: Params,
) -> go.Figure:
    """

    Args:
        datnum (): To get the integration info/dndt from

    Returns:

    """
    dat = get_dat(datnum)

    out = dat.SquareEntropy.get_Outputs(name='forced_theta_linear_non_csq')
    sweep_x = out.x
    data_dndt = out.average_entropy_signal
    int_info = dat.Entropy.get_integration_info(
        name='forced_theta_linear_non_csq')
    data_int = int_info.integrate(data_dndt)
    data_isense_cold = np.nanmean(out.averaged[(0, 2), :], axis=0)

    fit = nrg_fit(sweep_x, data_isense_cold, init_params=params)
    params = Params(
        gamma=fit.best_values.g,
        theta=fit.best_values.theta,
        center=fit.best_values.mid,
        amp=fit.best_values.amp,
        lin=fit.best_values.lin,
        const=fit.best_values.const,
        lin_occ=fit.best_values.occ_lin,
    )

    nrg = NRG_func_generator('int_dndt')
    nrg_integrated = nrg(sweep_x, params.center, params.gamma,
                         params.theta)  # , amp=1, lin=0, const=0, occ_lin=0)

    fig = p1d.figure(xlabel='Sweep Gate (mV)', ylabel='Entropy (kB)')

    for data, label in zip([data_int, nrg_integrated], ['Data', 'NRG']):
        fig.add_trace(
            p1d.trace(x=sweep_x / 100, data=data, mode='lines',
                      name=label))  # /100 to convert to real mV

    for v in [np.log(2), np.log(3)]:
        p1d.add_line(fig,
                     v,
                     mode='horizontal',
                     color='black',
                     linewidth=1,
                     linetype='dash')

    fig.update_layout(template='simple_white')
    return fig
예제 #14
0
def set_sf_from_transition(entropy_datnums,
                           transition_datnums,
                           fit_name,
                           integration_info_name,
                           dt_from_self=False,
                           fixed_dt=None,
                           fixed_amp=None,
                           experiment_name: Optional[str] = None):
    for enum, tnum in progressbar(zip(entropy_datnums, transition_datnums)):
        edat = get_dat(enum, exp2hdf=experiment_name)
        tdat = get_dat(tnum, exp2hdf=experiment_name)
        try:
            _set_amplitude_from_transition_only(edat,
                                                tdat,
                                                fit_name,
                                                integration_info_name,
                                                dt_from_self=dt_from_self,
                                                fixed_dt=fixed_dt,
                                                fixed_amp=fixed_amp)
        except (TypeError, U.NotFoundInHdfError):
            print(
                f'Failed to set scaling factor for dat{enum} using dat{tnum}')
예제 #15
0
    def calculate_centers(self) -> str:
        """Calculates center fits for dats with option to overwrite and DOES write to HDF. Can be extremely slow!

        Note: Currently only uses csq_mapped/force_theta/force_gamma... Does not use setpoint or width.
        Note: Saves center fits under same name regardless of whether from CSQ mapping or not.. (should be very similar)
        """
        def get_x_data(dat: DatHDF) -> Tuple[np.ndarray, np.ndarray]:
            x = dat.Data.get_data('x')
            data = dat.Data.get_data(
                'i_sense') if not self.csq_map else e_dat.Data.get_data(
                    'csq_mapped')
            if data.ndim == 2:
                data = data[0]
            return x, data

        if not self.run_centers:
            raise PreventUpdate

        e_dat = self.dat
        if e_dat is None:
            raise PreventUpdate

        init_x, init_data = get_x_data(e_dat)
        calc_params = TransitionCalcParams(initial_x=init_x,
                                           initial_data=init_data,
                                           force_theta=self.force_theta,
                                           force_gamma=self.force_gamma,
                                           csq_mapped=self.csq_map)
        # Do Center calculations
        set_centers(e_dat,
                    self.center_func,
                    calc_params=calc_params,
                    se_data=True,
                    csq_mapped=self.csq_map)

        if self.use_tonly:
            t_dat = get_dat(self.tonly_datnum)
            init_x, init_data = get_x_data(t_dat)
            calc_params = TransitionCalcParams(initial_x=init_x,
                                               initial_data=init_data,
                                               force_theta=self.force_theta,
                                               force_gamma=self.force_gamma,
                                               csq_mapped=self.csq_map)
            set_centers(t_dat,
                        self.center_func,
                        se_data=False,
                        calc_params=calc_params,
                        csq_mapped=self.csq_map)

        return f'Done'
예제 #16
0
    def _get_datas(self) -> Tuple[List[Data1D], List[float], List[float]]:
        datas = []
        if self.which_plot == 'data':
            gammas = []
            thetas = []
            for k in PARAM_DATNUM_DICT:
                dat = get_dat(k)
                data = get_avg_entropy_data(dat,
                                            center_func=_center_func,
                                            csq_datnum=CSQ_DATNUM)
                occ_data = get_avg_i_sense_data(dat,
                                                CSQ_DATNUM,
                                                center_func=_center_func)
                data.x = occ_data.x
                data.x -= dat.NrgOcc.get_x_of_half_occ(
                    fit_name=NRG_OCC_FIT_NAME)
                fit = dat.NrgOcc.get_fit(name=NRG_OCC_FIT_NAME)
                # data.x = data.x/fit.best_values.theta*kb*0.1  # So that difference in lever arm is taken into account
                gammas.append(fit.best_values.g)
                thetas.append(fit.best_values.theta)
                rescale = max(fit.best_values.g, fit.best_values.theta)
                datas.append(
                    Data1D(x=data.x / rescale, data=data.data * rescale))
        elif self.which_plot == 'nrg':
            gts = [0.1, 1, 5, 10, 25]
            theta = 1
            thetas = [theta] * len(gts)
            gammas = list(np.array(gts) * theta)
            for gamma in gammas:
                x_width = max([gamma, theta]) * 15
                pars = NRGParams(gamma=gamma, theta=theta)
                data = NrgUtil().data_from_params(params=pars,
                                                  x=np.linspace(
                                                      -x_width, x_width, 501),
                                                  which_data='dndt',
                                                  which_x='sweepgate')
                data.x -= get_x_of_half_occ(params=pars.to_lm_params())
                data.x *= 0.0001  # Convert back to NRG units
                data.data = data.data / np.sum(data.data) / np.mean(
                    np.diff(data.x)) * np.log(2)  # Convert to real entropy
                rescale = max(gamma, theta)
                data.x = data.x / rescale
                data.data = data.data * rescale
                datas.append(data)

        else:
            raise NotImplementedError

        return datas, gammas, thetas
예제 #17
0
    def __init__(self, se_name, e_names, t_names, int_names, calculated,
                 datnum):
        super().__init__()  # Shutting up PyCharm
        self.se_name: str = se_name
        self.e_names: List[str] = listify_dash_input(e_names)
        self.t_names: List[str] = listify_dash_input(t_names)
        self.int_names: List[str] = listify_dash_input(int_names)
        self.datnum: Optional[int] = datnum

        self.calculated: StoreData = calculated
        self.calculated_triggered = triggered_by(
            self.components.store_calculated.id)

        # Generated
        self.dat = get_dat(datnum) if self.datnum is not None else None
예제 #18
0
    def __init__(
        self,
        datnum,
        t_fit_names,  # plotting existing
        calculated,
    ):
        self.datnum: int = datnum
        # plotting existing
        self.t_fit_names: List[str] = listify_dash_input(t_fit_names)

        self.calculated: StoreData = calculated
        self.calculated_triggered = triggered_by(
            self.components.store_calculated.id)

        # ################# Post calculations
        self.dat = get_dat(self.datnum) if self.datnum is not None else None
예제 #19
0
def do_calc(datnum):
    dat = get_dat(datnum)
    fit = dat.Transition.avg_fit
    mid = fit.best_values.mid
    width = fit.best_values.theta * 15
    x1, x2 = U.get_data_index(dat.Transition.avg_x, [mid - width, mid + width],
                              is_sorted=True)
    x = dat.Transition.avg_x[x1:x2]
    data = dat.Transition.avg_data[x1:x2]
    new_fit = dat.Transition.get_fit(which='avg',
                                     name='narrow_centered',
                                     overwrite=True,
                                     x=x,
                                     data=data)
    print(f'Fitting Dat{dat.datnum} from {x[0]:.2f} -> {x[-1]:.2f}mV')
    return new_fit
예제 #20
0
def _temp_calculate_from_non_csq():
    """Used this to recalculate the csq mapped output (used same centers because they are not really going to change
    for gamma broadened anyway)"""
    dat = get_dat(2170)
    from dat_analysis.analysis_tools.csq_mapping import calculate_csq_map
    calculate_csq_map(2170, None, 2172)
    inps = dat.SquareEntropy.get_Inputs(
        x_array=dat.Data.get_data('x'),
        i_sense=dat.Data.get_data('csq_mapped'),
        save_name='forced_theta_linear')
    pp = dat.SquareEntropy.get_ProcessParams(
        name='forced_theta_linear_non_csq', save_name='forced_theta_linear')

    out = dat.SquareEntropy.get_Outputs(name='forced_theta_linear',
                                        inputs=inps,
                                        process_params=pp)
예제 #21
0
 def one_d_data_subtract_fit(self):
     if self.datnum:
         dat = get_dat(self.datnum, exp2hdf=self.experiment_name)
         plotter = OneD(dat=dat)
         xlabel = 'Sweepgate /mV'
         fig = plotter.figure(xlabel=xlabel, ylabel='Current /nA', title=f'Data Subtract Fit: G={self.g:.2f}mV, '
                                                                         f'{THETA}={self.theta:.2f}mV, '
                                                                         f'{THETA}/G={self.theta / self.g:.2f}')
         for i, which in enumerate(self.which):
             if 'i_sense' in which:
                 x, data = _get_x_and_data(self.datnum, self.experiment_name, which)
                 nrg_func = NRG_func_generator(which='i_sense')
                 nrg_data = nrg_func(x, self.mid, self.g, self.theta, self.amp, self.lin, self.const, self.occ_lin)
                 data_sub_nrg = data - nrg_data
                 fig.add_trace(plotter.trace(x=x, data=data_sub_nrg, name=f'{which} subtract NRG', mode='lines'))
         return fig
     return go.Figure()
예제 #22
0
def run_forced_theta_nrg_fit(
    datnum: int,
    csq_datnum: Optional[int],
    center_func: Optional[Callable[[DatHDF], bool]] = None,
    which_linear_theta_params: str = 'normal',
    overwrite: bool = False,
) -> FitInfo:
    """
    Runs
    Args:
        datnum (): Dat to calculate for
        csq_datnum (): Num of dat to use for CSQ mapping  (will only calculate if necessary)
        center_func: Whether data should be centered first for dat
            (e.g. lambda dat: True if dat.Logs.dacs['ESC'] > -250 else False)
        which_linear_theta_params: str =
        overwrite (): NOTE: Only overwrites final Avg fit.

    Returns:

    """
    if csq_datnum is not None:
        fit_name = 'csq_forced_theta'
    else:
        fit_name = 'forced_theta'

    if center_func is None:
        center_func = lambda dat: False  # Default to no centering for gamma broadened
    dat = get_dat(datnum)
    avg_data = get_avg_i_sense_data(dat, csq_datnum, center_func=center_func)

    pars = get_initial_params(avg_data, which='nrg')
    theta = get_linear_theta(dat, which_params=which_linear_theta_params)
    pars['theta'].value = theta
    pars['theta'].vary = False
    pars['g'].value = 5
    pars['g'].max = theta * 50  # limit of NRG data
    pars['g'].min = theta / 10000  # limit of NRG data
    pars['occ_lin'].vary = True

    if abs((x := avg_data.x)[-1] -
           x[0]) > 1500:  # If it's a wider scan, only fit over middle 1500
        cond = np.where(np.logical_and(x > -750, x < 750))
        avg_data.x, avg_data.data = x[cond], avg_data.data[cond]
예제 #23
0
    def __init__(self, datnum: int, current_value):
        dat = get_dat(datnum) if datnum is not None else None

        min_ = 0
        max_ = 1
        step = 1
        marks = {}
        value = (0, 1)
        if dat is not None:
            yshape = dat.Data.get_data('y').shape[0]
            max_ = yshape - 1
            marks = {int(v): str(int(v)) for v in np.linspace(min_, max_, 5)}
            if current_value and all([min_ < v < max_ for v in current_value]):
                value = current_value
            else:
                value = (min_, max_)
        super().__init__(min=min_,
                         max=max_,
                         step=step,
                         marks=marks,
                         value=value)
예제 #24
0
def do_calc(datnum):
    """Just a function which can be passed to a process pool for faster calculation"""
    save_name = 'SPS.0045'

    dat = get_dat(datnum)

    setpoints = [0.0045, None]

    # Get other inputs
    setpoint_times = square_wave_time_array(dat.SquareEntropy.square_awg)
    sp_start, sp_fin = [U.get_data_index(setpoint_times, sp) for sp in setpoints]
    logger.debug(f'Setpoint times: {setpoints}, Setpoint indexs: {sp_start, sp_fin}')

    # Run Fits
    pp = dat.SquareEntropy.get_ProcessParams(name=None,  # Start from default and modify from there
                                             setpoint_start=sp_start, setpoint_fin=sp_fin,
                                             transition_fit_func=i_sense,
                                             save_name=save_name)
    out = dat.SquareEntropy.get_Outputs(name=save_name, inputs=None, process_params=pp, overwrite=False)
    dat.Entropy.get_fit(which='avg', name=save_name, data=out.average_entropy_signal, x=out.x, check_exists=False)
    [dat.Entropy.get_fit(which='row', row=i, name=save_name,
                         data=row, x=out.x, check_exists=False) for i, row in enumerate(out.entropy_signal)]
예제 #25
0
def _get_x_and_data(datnum, experiment_name, which: str) -> Tuple[np.ndarray, np.ndarray]:
    if datnum:
        dat = get_dat(datnum, exp2hdf=experiment_name)
        try:
            awg = dat.Logs.awg
            se = True
        except NotFoundInHdfError:
            se = False
        if se:
            out = _get_output(datnum, experiment_name)
            x = out.x
            data = data_from_output(out, which)
        else:
            if 'i_sense' not in which:
                raise NotFoundInHdfError(f'Dat{datnum} is a Transition only dat and has no {which} data.')
            x = dat.Transition.x
            if dat.Logs.dacs['ESC'] >= ESC_GAMMA_LIMIT:
                data = np.nanmean(dat.Transition.data, axis=0)
            else:
                data = dat.Transition.avg_data
        return x, data
    else:
        raise RuntimeError(f'No datnum found to load data from')
예제 #26
0
def _get_output(datnum, experiment_name) -> Output:
    def calculate_se_output(dat: DatHDF):
        if dat.Logs.dacs['ESC'] >= ESC_GAMMA_LIMIT:  # Gamma broadened so no centering
            logger.info(f'Dat{dat.datnum}: Calculating {OUTPUT_NAME} without centering')
            do_entropy_calc(dat.datnum, save_name=OUTPUT_NAME, setpoint_start=OUTPUT_SETPOINT, csq_mapped=False,
                            experiment_name=experiment_name,
                            center_for_avg=False)
        else:  # Not gamma broadened so needs centering
            logger.info(f'Dat{dat.datnum}: Calculating {OUTPUT_NAME} with centering')
            do_entropy_calc(dat.datnum, save_name=OUTPUT_NAME, setpoint_start=OUTPUT_SETPOINT, csq_mapped=False,
                            center_for_avg=True,
                            experiment_name=experiment_name,
                            t_func_name='i_sense')

    if datnum:
        dat = get_dat(datnum, exp2hdf=experiment_name)
        if OUTPUT_NAME not in dat.SquareEntropy.Output_names():
            with thread_lock:
                if OUTPUT_NAME not in dat.SquareEntropy.Output_names():  # check again in case a previous thread did this
                    calculate_se_output(dat)
        out = dat.SquareEntropy.get_Outputs(name=OUTPUT_NAME)
    else:
        raise RuntimeError(f'No datnum found to load data from')
    return out
예제 #27
0
    # base_dt = 1.149
    # ######### 100mK
    # line_pars['slope'].value = 0.08866821
    # line_pars['intercept'].value = 64.3754
    # theta_for_dt = line.eval(x=-265, params=line_pars)  # Dat2101 is the setting where DCbias was done and dT is defined
    # base_dt = 0.0127 * 1000
    #####

    ######## 50mK
    # line_pars['slope'].value = 1.086e-4*1000
    # line_pars['intercept'].value = 0.05184*1000
    # theta_for_dt = line.eval(x=-270, params=line_pars)  # dT is calculated at -270mV ESC
    # base_dt = 22.8

    for par in all_params:
        dat = get_dat(par.transition_only_datnum)
        theta = line.eval(params=line_pars, x=dat.Logs.fds['ESC'])
        par.force_theta = theta
        par.force_dt = base_dt * theta / theta_for_dt  # Scale dT with same proportion as theta

        # par.force_dt = base_dt
        # if par.transition_only_datnum == 2136:
        #     par.force_amp = 0.52

    # Do processing
    general = AnalysisGeneral(params_list=all_params,
                              calculate=True,
                              overwrite_entropy=True,
                              overwrite_transition=True)
    run_processing(general, multiprocessed=True)
    for par in all_params:
예제 #28
0
    # dndt_, freq = U.decimate(dndts[1], measure_freq=all_dats[1].Logs.measure_freq, numpnts=200, return_freq=True)
    x_ = U.get_matching_x(dndt_datas[1].x, dndt_)
    # dndt_signal(ax, xs=xs[1], datas=dndts[1])
    dndt_signal(ax, xs=x_, datas=dndt_, amp_sensitivity=amps[1])
    ax.set_xlim(-3, 3)
    # ax.set_title('dN/dT for gamma broadened')

    ax.plot(x_, nrg_fits[1].eval_fit(x=x_ * 100), label='NRG Fit')

    for ax in strong_fig.axes:
        ax.set_ylabel(ax.get_ylabel(), labelpad=5)
    plt.tight_layout()
    strong_fig.show()

    # Data for single hot/cold plot
    dat = get_dat(2164)
    # dat = get_dat(7334)

    _, avg_x = dat.NrgOcc.get_avg_data(check_exists=True, return_x=True)
    sweep_x = avg_x / 100  # Convert to real mV
    cold_data = get_avg_i_sense_data(dat,
                                     None,
                                     _center_func,
                                     False,
                                     hot_or_cold='cold')
    hot_data = get_avg_i_sense_data(dat,
                                    None,
                                    _center_func,
                                    False,
                                    hot_or_cold='hot')
예제 #29
0
    plot_transition_fitting = False
    plot_transition_values = True
    plot_entropy_vs_gamma = True
    plot_entropy_vs_time = False
    plot_amp_comparison = False
    plot_csq_map_check = False
    plot_stacked_square_heated = False
    plot_stacked_transition = False
    plot_dot_tune = False
    print_info = False

    # For resetting dats if something goes badly wrong
    reset_dats = False
    if reset_dats:
        for datnum in transition_datnums + entropy_datnums + csq_datnums:
            get_dat(datnum, overwrite=True)

    # Calculations
    gamma_scans = True
    if gamma_scans:
        csq_map = False
        calculate = True
        overwrite = False
        theta = None
        gamma = 0
        width = None
        dt_from_self = True
        # dt = 1.111
        dt = 1.45 * 9.423
        amp = None
        x_func = lambda dat: dat.Logs.fds['ESC']
예제 #30
0
                           data=real_data.data,
                           name=igor_name,
                           x_label='Occupation',
                           y_label=f'Scaled {DELTA}I (a.u.)'))
        save_infos.append(
            U.IgorSaveInfo(x=real_data.x,
                           data=fit_data.data,
                           name=f'{igor_name}_fit',
                           x_label='Occupation',
                           y_label=f'Scaled {DELTA}I (a.u.)'))

    U.save_multiple_save_info_to_itx(filename, save_infos)

    ############################# Weak signal in Gamma broadened
    save_infos = []
    dat = get_dat(2213)
    data2d = get_2d_data(dat, 'entropy')
    data2d.x = data2d.x / 100  # Convert to real mV

    data = Data1D(data=np.nanmean(data2d.data, axis=0), x=data2d.x)
    data_single = Data1D(data=data2d.data[0], x=data2d.x)

    fig = p1d.plot(data.data,
                   x=data.x,
                   xlabel='V_D (mV)',
                   ylabel='Delta I (nA)',
                   mode='lines',
                   trace_name='Avg Data')
    fig.add_trace(
        p1d.trace(data_single.data,
                  x=data_single.x,