def plot_vs_gamma(datnums: List[int], save_name: str, general: AnalysisGeneral, sf_name: Optional[str] = None, show=True): dats = get_dats(datnums) fig = get_integrated_fig( dats, title_append=f' at ESS = {dats[0].Logs.fds["ESS"]}mV') int_info_name = save_name if sf_name is None else sf_name fig.add_trace( get_integrated_trace(dats=dats, x_func=general.x_func, x_label=general.x_label, trace_name='Data', save_name=save_name, int_info_name=int_info_name, SE_output_name=save_name)) fig2 = plot_fit_integrated_comparison(dats, x_func=general.x_func, x_label=general.x_label, int_info_name=int_info_name, fit_name=save_name, plot=True) if show: fig.show() return fig
class TestThreading(TestCase): from dat_analysis.dat_object.make_dat import DatHandler, get_dat, get_dats from dat_analysis.data_standardize.exp_specific.Feb21 import Feb21Exp2HDF from concurrent.futures import ThreadPoolExecutor dat_dir = os.path.abspath('fixtures/dats/2021Feb') # Where to put outputs (i.e. DatHDFs) Testing_Exp2HDF = get_testing_Exp2HDF(dat_dir, output_dir, base_class=Feb21Exp2HDF) pool = ThreadPoolExecutor(max_workers=5) different_dats = get_dats([717, 719, 720, 723, 724, 725], exp2hdf=Testing_Exp2HDF) single_dat = different_dats[0] same_dats = [single_dat] * 10 def test_threaded_manipulate_test(self): """Test that running multiple threads through a method which changes an instance attribute works with thread locks""" def threaded_manipulate_test(dat: DatHDF): eq = dat._threaded_manipulate_test() return eq t1 = time.time() rets = list( self.pool.map(threaded_manipulate_test, self.different_dats)) print(f'Time elapsed: {time.time()-t1:.2f}s, Returns = {rets}') self.assertTrue(all(rets)) t1 = time.time() rets = list(self.pool.map(threaded_manipulate_test, self.same_dats)) print(f'Time elapsed: {time.time()-t1:.2f}s, Returns = {rets}') self.assertTrue(all(rets)) def test_threaded_reentrant_test(self): """Test that the reentrant lock allows a recursive method call to work properly""" t1 = time.time() ret = self.single_dat._threaded_reentrant_test(i=0) print(f'Time elapsed: {time.time()-t1:.2f}s, Returns = {ret}') self.assertEqual(3, ret) def reentrant_test(dat: DatHDF, i): return dat._threaded_reentrant_test(i=i) t1 = time.time() rets = list( self.pool.map(reentrant_test, self.different_dats, [0, 7, 1, 5, 1, 7])) print(f'Time elapsed: {time.time()-t1:.2f}s, Returns = {rets}') self.assertEqual([3, 7, 3, 5, 3, 7], rets) t1 = time.time() rets = list( self.pool.map(reentrant_test, self.same_dats, [0, 7, 1, 5, 1, 7])) print(f'Time elapsed: {time.time()-t1:.2f}s, Returns = {rets}') self.assertEqual([3, 7, 3, 5, 3, 7], rets)
def setUp(self): """ Note: This actually requires quite a lot of things to be working to run (get_dats does quite a lot of work) Returns: """ print('running setup') # SetUp before tests helpers.clear_outputs(output_dir) self.dats = get_dats([717, 719, 720, 723, 724, 725], exp2hdf=Testing_Exp2HDF, overwrite=True)
def _thetas_from_datnums(datnums: Iterable[int], overwrite=False, exp2hdf=FebMar21Exp2HDF) -> Tuple[float]: dats = get_dats(list(datnums), exp2hdf=exp2hdf, overwrite=overwrite) thetas = [ dat.Transition.get_fit(which='avg', name='narrow_centered').best_values.theta for dat in dats ] # thetas = [dat.Transition.avg_fit.best_values.theta for dat in dats] return tuple(thetas)
def plot_gamma_dcbias(datnums: List[int], save_name: str, show_each_data=True): """ Makes a figure for Theta vs DCbias with option to show the data which is being used to obtain thetas Args: datnums (): Datnums that form DCbias measurement (i.e. repeats at fixed Biases) save_name (): Name of fits etc to be loaded (must already exist) show_each_data (): Whether to show the fit for each dataset (i.e. to check everything looks good) Returns: go.Figure: A plotly figure of Theta vs DCbias """ # if calculate: # with ProcessPoolExecutor() as pool: # list(pool.map(partial(do_transition_only_calc, save_name=save_name, theta=theta, gamma=None, width=600, # t_func_name='i_sense_digamma', overwrite=False), GAMMA_DCbias)) dats = get_dats(datnums) plotter = OneD(dats=dats) # fig = plotter.figure(ylabel='Current /nA', # title=f'Dats{dats[0].datnum}-{dats[-1].datnum}: DCbias in Gamma broadened' ) # dat_pairs = np.array(dats).reshape((-1, 2)) # line = lm.models.LinearModel() # params = line.make_params() # for ds in dat_pairs: # for dat, color in zip(ds, ['blue', 'red']): # params['slope'].value = dat.Transition.avg_fit.best_values.lin # params['intercept'].value = dat.Transition.avg_fit.best_values.const # fig.add_trace(plotter.trace(x=dat.Transition.avg_x, data=dat.Transition.avg_data-line.eval(params=params, x=dat.Transition.avg_x), # name=f'Dat{dat.datnum}: Bias={dat.Logs.fds["HO1/10M"]/10:.1f}nA', # mode='lines', # trace_kwargs=dict(line=dict(color=color)), # )) fig = plotter.figure( ylabel='Current /nA', title= f'Dats{dats[0].datnum}-{dats[-1].datnum}: DCbias in Gamma broadened') line = lm.models.LinearModel() params = line.make_params() for dat in dats[1::2]: params['slope'].value = dat.Transition.avg_fit.best_values.lin params['intercept'].value = dat.Transition.avg_fit.best_values.const fig.add_trace( plotter.trace( x=dat.Transition.avg_x, data=dat.Transition.avg_data - line.eval(params=params, x=dat.Transition.avg_x), name= f'Dat{dat.datnum}: Bias={dat.Logs.fds["HO1/10M"] / 10:.1f}nA', mode='lines', )) fig.show()
def plot_transition_values(datnums: List[int], save_name: str, general: AnalysisGeneral, param_name: str = 'theta', transition_only: bool = True, show=True): param = param_name if transition_only: all_dats = get_dats(datnums) fig = transition_fig(dats=all_dats, xlabel='ESC /mV', title_append=' vs ESC for Transition Only scans', param=param) dats = get_dats(datnums) fig.add_trace(transition_trace(dats, x_func=general.x_func, from_square_entropy=False, fit_name=save_name, param=param, label='Data')) # print( # f'Avg weakly coupled cold theta = ' # f'{np.mean([dat.Transition.get_fit(name=fit_name).best_values.theta for dat in dats if dat.Logs.fds["ESC"] <= -330])}') else: all_dats = get_dats(datnums) fig = transition_fig(dats=all_dats, xlabel='ESC /mV', title_append=' vs ESC for Entropy scans', param=param) dats = get_dats(datnums) fig.add_trace(transition_trace(dats, x_func=general.x_func, from_square_entropy=True, fit_name=save_name, param=param, label='Data')) # print( # f'Avg weakly coupled cold theta = {np.mean([dat.SquareEntropy.get_fit(which_fit="transition", fit_name=fit_name).best_values.theta for dat in dats if dat.Logs.fds["ESC"] <= -330])}') if show: fig.show() return fig
def plot_stacked_square_heated(datnums: List[int], save_name: str, plot=True): dats = get_dats(datnums) # Plot Integrated integrated_plot_info = PlotInfo( title_append='Integrated Entropy', ylabel='Entropy /kB', data_func=lambda dat: dat.Entropy.get_integrated_entropy( name=save_name, data=dat.SquareEntropy.get_Outputs( name=save_name, check_exists=True).average_entropy_signal), x_func=lambda dat: dat.SquareEntropy.get_Outputs(name=save_name, check_exists=True).x, trace_name=lambda dat: f'Dat{dat.datnum}') fit_plot_info = PlotInfo( title_append='Fit Entropy', ylabel='Entropy /kB', data_func=lambda dat: dat.SquareEntropy.get_Outputs( name=save_name, check_exists=True).average_entropy_signal, x_func=lambda dat: dat.SquareEntropy.get_Outputs(name=save_name, check_exists=True).x, trace_name=lambda dat: f'Dat{dat.datnum}') figs = [] for plot_info in [integrated_plot_info]: plotter = OneD(dats=dats) dat = dats[0] fig = plotter.figure( xlabel=dat.Logs.xlabel, ylabel=plot_info.ylabel, title= f'Dats{dats[0].datnum}-{dats[-1].datnum}: {plot_info.title_append}' ) for dat in dats: data = plot_info.data_func(dat) x = plot_info.x_func(dat) hover_infos = [ HoverInfo(name='Datnum', func=lambda dat: dat.datnum, precision='d', units=''), HoverInfo(name=dat.Logs.xlabel, func=lambda dat: plot_info.x_func(dat), precision='.2f', units='/mV'), HoverInfo(name=plot_info.ylabel, func=lambda dat: dat.datnum, precision='d', units=''), ] hover_funcs, template = _additional_data_dict_converter( hover_infos) hover_data = [] for func in hover_funcs: v = func(dat) if not hasattr(v, '__len__') or len( v) == 1: # Make sure a hover info for each x_coord v = [v] * len(x) hover_data.append(v) fig.add_trace( plotter.trace(x=x, data=data, name=plot_info.trace_name(dat), hover_data=hover_data, hover_template=template, mode='lines')) if plot: fig.show() figs.append(fig) return figs
# transition_dats = get_dats(ALL_DATS_Tonly) # fits = [] # for esc_range in [(-300, -230), (-280, -230), (-280, -250), (-290, -257)]: # fits.append( # linear_fit_thetas(dats=transition_dats, fit_name='forced_gamma_zero', # filter_func=lambda dat: True if esc_range[0] < dat.Logs.fds['ESC'] < esc_range[ # 1] else False, # show_plots=False) # ) # single_fit = linear_fit_thetas(dats=transition_dats, fit_name='forced_gamma_zero', # filter_func=lambda dat: True if (-282 < dat.logs.fds['esc'] < -265) or (-255 < dat.logs.fds['esc'] < -235) else False, # show_plots=False) from dat_analysis.analysis_tools.general_fitting import calculate_fit tdats = get_dats(VS_GAMMA_Tonly) linear_fit_thetas(dats=tdats, fit_name='forced_gamma_zero', filter_func=lambda dat: True if dat.Logs.dacs["ESC"] < -285 else False, show_plots=True, sweep_gate_divider=100) # print('done') p1d = OneD(dat=None) fig = p1d.figure(xlabel='ESC /mV', ylabel='dT') dats = get_dats(VS_GAMMA) escs = [dat.Logs.dacs['ESC'] for dat in dats] # dts = [dat.SquareEntropy.get_fit(fit_name='forced_gamma_zero_non_csq_hot').best_values.theta - # dat.SquareEntropy.get_fit(fit_name='forced_gamma_zero_non_csq_cold').best_values.theta for dat in dats]
# lin_occ: float # vary_theta: bool = False # vary_gamma: bool = False # datnum: Optional[int] = None if __name__ == '__main__': set_default_rcParams() from dat_analysis.dat_object.make_dat import get_dats, get_dat # csq_datnum = 2197 csq_datnum = None ############################################################################################# # Data for dN/dT # all_dats = get_dats([2164, 2170]) all_dats = get_dats([2164, 2167]) # fit_names = ['csq_gamma_small', 'csq_forced_theta'] fit_names = ['gamma_small', 'forced_theta'] # all_dats = get_dats([2164, 2216]) # Weak, Strong coupling # all_dats = get_dats([7334, 7356]) # Weak, Strong coupling # all_dats = get_dats([7334, 7360]) # Weak, Strong coupling tonly_dats = get_dats([dat.datnum + 1 for dat in all_dats]) dndt_datas, gts, nrg_fits, amps = [], [], [], [] for dat, fit_name in zip(all_dats, fit_names): dndt = get_avg_entropy_data(dat, center_func=_center_func, csq_datnum=csq_datnum) init_fit = dat.NrgOcc.get_fit(name=fit_name) # params = NRGParams.from_lm_params(init_fit.params)
# fit = dat.NrgOcc.get_fit(name=self.fit_save_name, initial_params=init_fit_pars, data=avg_data, x=avg_x, # check_exists=False, overwrite=False) # self.fit = fit # return fit # # def plot_nrg_fit(self) -> go.Figure: # if self.fit_save_name not in self.dat.Transition.fit_names: # self.generate_nrg_fit() # # dat = self.dat # avg_data, avg_x = dat.NrgOcc.get_avg_data(name=self.fit_save_name, return_x=True) # fig = p1d.plot(data=avg_data, x=avg_x, trace_name='Avg Data', title=f'Dat{dat.datnum}: NRG Transition Fit', # mode='lines', xlabel=dat.Logs.xlabel, ylabel='Current (nA)') # fit = dat.NrgOcc.get_fit(name=self.fit_save_name) # fig.add_trace(p1d.trace(data=fit.eval_fit(avg_x), x=avg_x, mode='lines', name='NRG Fit')) # return fig # if __name__ == '__main__': dat = get_dat(7437) dats = get_dats((7437, 7796 + 1)) all_data = AllTraceData(dats) all_data.heating_power_fig().show() all_data.save_to_itx() single_temp_data = all_data.get_dats_closest_to_temp(100) single_temp_data.save_to_itx() # GenerateNrgFit(dat).plot_nrg_fit().show()
plotter = OneD(dat=dat) fig = plotter.figure(xlabel=dat.Logs.ylabel, ylabel=f'{name} {units}', title=f'Dat{dat.datnum}: {name} vs {dat.Logs.ylabel}') trace = _get_param_trace(fits, param, dat.Data.get_data('y')) fig.add_trace(trace) return fig def _get_param_trace(fits: List, param: str, y_array: np.ndarray): plotter = OneD(dat=None) trace = plotter.trace(x=y_array, data=[getattr(fit.best_values, param) for fit in fits], mode='markers+lines') return trace if __name__ == '__main__': fits = [] dats = get_dats(HQPC_TUNING[7:]) for dat in dats: # fig = plot_2d(dat, differentiated=True) # fig.show() # fits.append(fit_centers(dat)) fig = plot_param(dat, 'theta') fig.show() fig = plot_param(dat, 'mid') fig.show() # for fit, dat in zip(fits, dats): # print(f'Dat{dat.datnum}:\n' # f'Slope: {fit.best_values["slope"]:.3f}\n' # f'Intercept: {fit.best_values["intercept"]:.2f}\n') #
f'\tWidth: {C.PM}{fit_width}mV\n' f'\tAmp: {fit.best_values.amp:.3f}{y_units}\n' f'\tTheta: {fit.best_values.theta:.2f}mV\n' f'\tLin: {fit.best_values.lin:.3g}{y_units}/mV\n' f'\tConst: {fit.best_values.const:.1f}{y_units}\n' f'\tCenter: {fit.best_values.mid:.1f}mV\n' + add_print) fig_fit.show() fig_minus.show() if plot_transition_values: transition_only = True fit_name = save_name param = 'theta' if transition_only: all_dats = get_dats(transition_datnums) fig = transition_fig( dats=all_dats, xlabel='ESC /mV', title_append=' vs ESC for Transition Only scans', param=param) for dnums, label in zip([transition_datnums], ['Set 1', 'Set 2']): all_dats = get_dats(dnums) fig.add_trace( transition_trace(all_dats, x_func=x_func, from_square_entropy=False, fit_name=save_name, param=param, label=label)) print(
# for param in ['mid', 'const', 'lin', 'theta', 'amp']: # fig = plot_per_row_of_transition_param(dats, param_name=param, # x=[dat.Logs.fds['ESS'] for dat in dats], xlabel='ESS /mV', # stdev_only=False) # fig.show(renderer='browser') # fig = plot_stdev_of_avg(dats[0]) # for dat in dats[1:]: # fig.add_trace(trace_stdev_of_avg(dat)) # fig = waterfall_stdev_of_avg(dats) # fig.show(renderer='browser') # datnums = [702, 703, 707, 708] datnums = [7436, 7435] all_dats = get_dats(datnums) plotter = OneD(dats=all_dats) fig = plotter.figure( xlabel='Time /s', ylabel='Current /Arbitrary', title= f'Dats{all_dats[0].datnum}-{all_dats[-1].datnum}: Transition ReadVsTime<br>Decimated to 10Hz', ) for dat, name, bias in zip(all_dats, [ 'On Transition 300uV', 'Off Transition 300uV', 'On Transition 500uV', 'Off Transition 500uV' ], [300, 300, 500, 500]): data = dat.Data.get_data('i_sense') numpts = data.shape[-1] time_elapsed = numpts / dat.Logs.measure_freq
# fig_x_func = lambda dat: np.linspace(dat.Data.get_data('sweepgates_y')[0][1]/10, # dat.Data.get_data('sweepgates_y')[0][2]/10, # dat.Data.get_data('y').shape[0]) # x_label = "HQPC bias /nA" # # # fig_x_func = lambda dat: dat.Data.get_data('y') # # x_label = dats[-1].Logs.ylabel # # for dat in dats[-1:]: # fig = dcbias_single_dat(dat, fig_x_func=fig_x_func, x_label=x_label) # fig.show() # Multi Dat DCbias # all_dats = get_dats((6449, 6456 + 1)) # all_dats = get_dats((6912, 6963 + 1)) all_dats = get_dats((6960, 6963 + 1)) # all_dats = get_dats((7437, 7844 + 1)) # with ProcessPoolExecutor() as pool: # dones = pool.map(temp_calc, [dat.datnum for dat in all_dats]) # for num in dones: # print(num) dats_by_temp = sort_by_temps(all_dats) figs = [] for temp, dats in progressbar(dats_by_temp.items()): dats = order_list(dats, [dat.Logs.fds['HO1/10M'] for dat in dats]) fig = dcbias_multi_dat(dats) fig.update_layout( title= f'Dats{min([dat.datnum for dat in dats])}-{max([dat.datnum for dat in dats])}: DC bias ' f'at {np.nanmean([dat.Logs.temps.mc*1000 for dat in dats]):.0f}mK')
# ALL = list(range(6811, 6910 + 1))# + list(range(6964, 7083+1)) # 20 positions along transition at 100, 500, 300, 50, ~10mK (all no heating) # ALL = list(range(6964, 7083 + 1)) ALL = list(range(7084, 7093+1)) # ALL = list(range(7094, 7112+1)) # ALL = list(range(7129, 7302+1)) for num in [7074, 7217, 7226, 7225, 7232, 7231, 7233, 7289, 7283, 7277, 7279, 7275, 7282, 7284, 7281, 7280]: if num in ALL: ALL.remove(num) # ALL.remove(7074) if __name__ == '__main__': # Loading all_dats = get_dats(ALL) dats_by_temp = sort_by_temps(all_dats) dats_by_coupling_gate = sort_by_coupling(all_dats) # Fitting run_simple_fits(all_dats) run_varying_width_fits(dats_by_temp, {500: 200, 400: 150, 300: 100, 200: 75, 100: 50, 50: 30, 10: 20}) # Checking # check_min_max_temps() # print(get_specific_dat(100, -230).datnum) # check_centers() # check_rough_broadening_vs_temp().show() # plotting fig1 = theta_slope_in_weakly_coupled(all_dats, show_intermediate=False)
# save_to_igor_itx(file_path=f'fig3_gamma_vs_coupling.itx', xs=[gamma_cg_vals], datas=[gts], # names=['gamma_over_ts'], # x_labels=['Coupling Gate (mV)'], # y_labels=['Gamma/kT']) # # # plotting gamma_vs_coupling # fig, ax = plt.subplots(1, 1) # ax = gamma_vs_coupling(ax, coupling_gates=gamma_cg_vals, gammas=gts) # ax.plot((x_ := np.linspace(-375, -235)), np.power(10, fit.eval(x=x_)), linestyle=':') # plt.tight_layout() # fig.show() # ########################################################################## # Data for integrated_entropy nrg_fit_name = 'csq_forced_theta' entropy_dats = get_dats([2164, 2121, 2167, 2133]) # tonly_dats = get_dats([dat.datnum + 1 for dat in entropy_dats]) tonly_dats = get_dats([dat.datnum for dat in entropy_dats]) datas, gts = [], [] fit_datas = [] for dat, tonly_dat in zip(entropy_dats, tonly_dats): data = get_integrated_data( dat, fit_name=nrg_fit_name, zero_point=-350, csq_datnum=csq_datnum, which_linear_theta_fit=which_linear_theta_fit) fit = tonly_dat.NrgOcc.get_fit(name=nrg_fit_name) gt = fit.best_values.g / \ fit.best_values.theta
# # Plot Data dN/dT # fig.add_trace(plotter.trace(x=occ_x, data=(data_dndt*1.40-0.25), mode='lines+markers', name='Data')) # # for i in range(40, 47, 1): # fig.add_trace(plotter.trace(data=dndts[i]/np.max(dndts[i]), x=occs[i], name=f'T = {ts[i]:.2g}', mode='lines')) # # [plotter.add_line(fig, v, color='black', linetype='dash') for v in [0, 1]] # # fig.update_layout(template='simple_white') # fig.show() # # fig.write_image('dndt_vs_Occ_many.svg') # dats = get_dats([ 2102, 7046, 7084, 7094 ]) # Last CD, This CD slow sweep, This CD same sweep, This CD fast sweep plotter = OneD(dats=dats) plotter.RESAMPLE_METHOD = 'downsample' # So jumps are still obvious instead of binning plotter.MAX_POINTS = 2000 single_figs = [] row = 0 fig = plotter.figure(xlabel='ACC /mV', ylabel='Current /nA', title=f'Comparing Transition Noise: Row{row}') for dat in dats: fig.add_trace( plotter.trace( data=dat.Transition.data[row],
# new_x = (x - mid - gamma*(-1.76567) - theta*(-1)) / gamma new_x = (x - mid) / gamma # new_x = (x - mid) return new_x, new_data if __name__ == '__main__': set_default_rcParams() from dat_analysis.dat_object.make_dat import get_dats, get_dat from temp import get_centered_x_at_half_occ # csq_datnum = 2197 csq_datnum = None # Data for weakly coupled dN/dTs # all_dats = get_dats([2097, 2103, 2107, 2109]) all_dats = get_dats([2164]) # all_dats = get_dats([2097, 2103, 2109]) all_dats = U.order_list(all_dats, [dat.Logs.fds['ESC'] for dat in all_dats]) datas = [ get_avg_entropy_data(dat, center_func=_center_func, csq_datnum=csq_datnum) for dat in all_dats ] xs = [data.x / 100 for data in datas] # /100 to convert to real mV dndts = [data.data for data in datas] U.save_to_igor_itx(
if __name__ == '__main__': # compare_nrg_with_i_sense_for_single_dat(datnum=2164, csq_map_datnum=2166, # show_2d_centering_comparsion=False, # show_1d_fit_comparison=True) # run_weakly_coupled_csq_mapped_nrg_fit(2164, 2166) # entropy_dats = get_dats(range(2095, 2142 + 1, 2)) # transition_dats = get_dats(range(2096, 2142 + 1, 2)) # # entropy_dats = get_dats([2164, 2167, 2170, 2121, 2213]) # transition_dats = get_dats([dat.datnum + 1 for dat in entropy_dats]) entropy_dats = get_dats(range(5303, 5307 + 1)) transition_dats = [] all_dats = entropy_dats + transition_dats # csq_dats = get_dats((2185, 2208 + 1)) # CSQ dats, NOT correctly ordered single_csq = get_dat(2197) for dat in progressbar(entropy_dats + transition_dats): # data_2d = get_2d_i_sense_csq_mapped(dat, single_csq, overwrite=True) # get_avg_i_sense_data(dat, csq_datnum=single_csq.datnum, # center_func=_center_func, # overwrite=True) # get_avg_i_sense_data(dat, csq_datnum=None, # center_func=_center_func, # overwrite=True) pass
ylabel='Current (nA)', title='CS current vs CSQ gate') x = dat.Data.x data = dat.Data.i_sense if cutoff: upper_lim = U.get_data_index(x, cutoff) x, data = x[:upper_lim], data[:upper_lim] fig.add_trace(plotter.trace(data=data, x=x)) fig.show() return Data1D(x=x, data=data) if __name__ == '__main__': from dat_analysis.dat_object.make_dat import get_dat, get_dats dats = get_dats([5774, 5775, 5777 ]) # Gamma broadened entropy with 300, 100, 50uV CS bias names = ['Usual settings (300uV bias)', '100uV CS bias', '50uV CS bias'] igor_names = [ 'gamma_300uv_csbias', 'gamma_100uv_csbias', 'gamma_50uv_csbias' ] filename = f'sup_gamma_varying_csbias.itx' # dats = get_dats([7356, 7428, 7845]) # Normal, 1.5 instead of 2.5nA heating, 50mK (and corresponding heat) vs 100mK # names = ['Usual settings', 'Reduced heating bias', 'Lower Fridge Temp'] # igor_names = ['gamma_100mk_normalheat', 'gamma_100mk_lowheat', 'gamma_50mk_normalheat'] # filename = f'sup_gamma_varying_heat.itx' for dat in progressbar(dats): run_dat_init(dat, overwrite=False) # for dat in dats:
mids = [] for g in gs: params['g'].value = g occs = model.eval(x=x, params=params) mids.append(x[U.get_data_index(occs, 0.5, is_sorted=True)]) all_mids.append(mids) plotter = OneD(dat=None) fig = plotter.figure(xlabel='Gamma /mV', ylabel='Shift of 0.5 OCC', title='Shift of 0.5 Occupation vs Theta and G') fig.update_layout(legend=dict(title='Theta /mV')) for mids, theta in zip(all_mids, thetas): fig.add_trace( plotter.trace(data=mids, x=gs, name=f'{theta:.1f}', mode='lines')) fig.show() return fig if __name__ == '__main__': nrg = NRGData.from_old_mat() # plotting_center_shift() all_dats = get_dats((5780, 5795 + 1)) for dat in all_dats: print(f'Dat{dat.datnum}\n' f'CSbias: {(dat.Logs.bds["CSBIAS/100"]+1.3)*10:.0f}uV\n' f'Repeats: {len(dat.Data.get_data("y"))}\n' f'ESP: {dat.Logs.fds["ESP"]:.1f}mV\n' f'ACC-Center: {np.nanmean(dat.Data.get_data("x")):.0f}mV\n')
out = dat.SquareEntropy.get_Outputs(name='sps_50', process_params=pp) fit = dat.Entropy.get_fit(which='avg', name='sps_50', x=out.x, data=out.average_entropy_signal, check_exists=False) return fit if __name__ == '__main__': chosen_dats = { 8797: '100mK Similar', 8710: '50mK Similar', 8808: '100mK Different', 8721: '50mK Different' } dc_bias_dats = { 100: get_dats((4284, 4295), datname='s2e', exp2hdf=Sep20.SepExp2HDF), 50: get_dats((8593, 8599), datname='s2e', exp2hdf=Sep20.SepExp2HDF), } dc_bias_infos = {k: dcbias.DCbiasInfo.from_dats(dc_bias_dats[k], bias_key=bias_key, force_centered=False) for k, bias_key in zip(dc_bias_dats, ['R2T(10M)', 'R2T/0.001'])} # dats = [get_dat(num, datname='s2e', exp2hdf=Sep20.SepExp2HDF, overwrite=False) for num in datnums] # # for dat in dats: # if dat.Entropy._integration_info_exists('default') is False: # if dat.datnum in [8797, 8808]: # temp = 100 # elif dat.datnum in [8710, 8721]: # temp = 50 # else: