def __init__(self, spectra_file, data_file, calibrated_offset_us, calibrated_source_to_detector_m, folder, norm_factor=1, baseline=False, norm_to_file=None, slice_start=None, slice_end=None, energy_min=1e-5, energy_max=1000, energy_step=0.01, database='ENDF_VII'): self.experiment = Experiment(spectra_file=spectra_file, data_file=data_file, folder=folder) self.energy_min = energy_min self.energy_max = energy_max self.energy_step = energy_step self.database = database self.calibrated_offset_us = calibrated_offset_us self.calibrated_source_to_detector_m = calibrated_source_to_detector_m self.raw_layer = None self.experiment.slice(start=slice_start, end=slice_end) self.baseline = baseline if norm_to_file is not None: self.experiment.norm_to(norm_to_file, norm_factor=norm_factor) self.exp_x_interp, self.exp_y_interp = self.experiment.xy_scaled( energy_min=self.energy_min, energy_max=self.energy_max, energy_step=self.energy_step, x_type='energy', y_type='attenuation', offset_us=self.calibrated_offset_us, source_to_detector_m=self.calibrated_source_to_detector_m, baseline=self.baseline) self.fit_result = None self.fitted_density_gcm3 = None self.fitted_thickness_mm = None self.fitted_residual = None self.fitted_gap = None self.fitted_fjac = None self.fitted_layer = None self.fitted_simulation = None self.layer_list = None self.raw_layer = None self.fitted_iso_result = None self.fitted_iso_residual = None self.params_for_fit = None self.params_for_iso_fit = None self.isotope_stack = {} self.sample_vary = None self.df = None
def test_xy_scaled(self): experiment = Experiment(data_file=self.data_file, spectra_file=self.spectra_file, folder=self.folder, source_to_detector_m=self.source_to_detector_m, offset_us=self.offset_us) x_interp, y_interp = experiment.xy_scaled(energy_min=self.energy_min, energy_max=self.energy_max, energy_step=self.energy_step, x_type='energy', y_type='attenuation', offset_us=0, source_to_detector_m=15, baseline=False) self.assertAlmostEqual(x_interp[1] - x_interp[0], self.energy_step, delta=self.energy_step / 1000)
def test_get_x(self): experiment = Experiment(data_file='_data_xy_unit_test.txt', spectra_file=self.spectra_file, folder=self.folder, source_to_detector_m=self.source_to_detector_m, offset_us=self.offset_us) _x_returned = experiment.get_x(x_type='energy', offset_us=0., source_to_detector_m=15) _x_expected = np.array( [5.825324e+00, 5.821177e+00, 5.817034e+00, 5.812896e+00]) self.assertAlmostEqual(_x_returned[-1], _x_expected[-1], delta=0.000001) self.assertAlmostEqual(_x_returned[-2], _x_expected[-2], delta=0.000001) self.assertAlmostEqual(_x_returned[-3], _x_expected[-3], delta=0.000001) self.assertAlmostEqual(_x_returned[-4], _x_expected[-4], delta=0.000001) _x_returned = experiment.get_x(x_type='lambda', offset_us=0., source_to_detector_m=15) _x_expected = np.array([0.118490, 0.118532, 0.118575, 0.118617]) self.assertAlmostEqual(_x_returned[-1], _x_expected[-1], delta=0.000001) self.assertAlmostEqual(_x_returned[-2], _x_expected[-2], delta=0.000001) self.assertAlmostEqual(_x_returned[-3], _x_expected[-3], delta=0.000001) self.assertAlmostEqual(_x_returned[-4], _x_expected[-4], delta=0.000001)
def test_get_y(self): experiment = Experiment(data_file='_data_xy_unit_test.txt', spectra_file=self.spectra_file, folder=self.folder, source_to_detector_m=self.source_to_detector_m, offset_us=self.offset_us) _y_returned = experiment.get_y(y_type='transmission', baseline=False) _y_expected = np.array( [1.003423, 1.008694, 1.008373, 1.004356, 1.008168, 1.016091]) self.assertAlmostEqual(_y_returned[-1], _y_expected[-1], delta=0.000001) self.assertAlmostEqual(_y_returned[-2], _y_expected[-2], delta=0.000001) self.assertAlmostEqual(_y_returned[-3], _y_expected[-3], delta=0.000001) self.assertAlmostEqual(_y_returned[-4], _y_expected[-4], delta=0.000001) _y_returned = experiment.get_y(y_type='attenuation', baseline=False) _y_expected = np.array( [-0.003423, -0.008694, -0.008373, -0.004356, -0.008168, -0.016091]) self.assertAlmostEqual(_y_returned[-1], _y_expected[-1], delta=0.000001) self.assertAlmostEqual(_y_returned[-2], _y_expected[-2], delta=0.000001) self.assertAlmostEqual(_y_returned[-3], _y_expected[-3], delta=0.000001) self.assertAlmostEqual(_y_returned[-4], _y_expected[-4], delta=0.000001)
def test_load_txt_csv(self): experiment = Experiment(data_file='_data_xy_unit_test.txt', spectra_file=self.spectra_file, folder=self.folder, source_to_detector_m=self.source_to_detector_m, offset_us=self.offset_us) _dict_expected = np.array( [1.003423, 1.008694, 1.008373, 1.004356, 1.008168, 1.016091]) _dict_returned = np.array(experiment.data[0]) self.assertEqual(_dict_returned[0], _dict_expected[0]) self.assertEqual(_dict_returned[1], _dict_expected[1]) self.assertEqual(_dict_returned[2], _dict_expected[2]) self.assertEqual(_dict_returned[3], _dict_expected[3]) self.assertEqual(_dict_returned[4], _dict_expected[4]) self.assertEqual(_dict_returned[5], _dict_expected[5])
def __init__( self, # Initialize ResoFit.experiment spectra_file: str, data_file: str, folder: str, exp_source_to_detector_m, exp_offset_us, baseline: bool, baseline_deg: int, # Initialize ResoFit.simulation layer: fit_util.Layer, energy_min, energy_max, energy_step, database: str, x_type: str, y_type: str): """ Initialization with passed file location and sample info :param spectra_file: :type spectra_file: :param data_file: :type data_file: :param layer: Layer() :type layer: :param energy_min: :type energy_min: :param energy_max: :type energy_max: :param energy_step: :type energy_step: :param folder: :type folder: :param baseline: True -> to remove baseline/background by detrend :type baseline: boolean """ self.x_type = x_type self.y_type = y_type self.energy_min = energy_min self.energy_max = energy_max self.energy_step = energy_step self.simulation = Simulation(energy_min=energy_min, energy_max=energy_max, energy_step=energy_step, database=database) self.simulation.add_Layer(layer=layer) self.experiment = Experiment( spectra_file=spectra_file, data_file=data_file, folder=folder, source_to_detector_m=exp_source_to_detector_m, offset_us=exp_offset_us, baseline=baseline, baseline_deg=baseline_deg) self.experiment.t_start_us = self.experiment.t_start_us + _exp_time_offset_us self.init_source_to_detector_m = exp_source_to_detector_m self.init_offset_us = exp_offset_us self.calibrated_offset_us = None self.calibrated_source_to_detector_m = None self.calibrate_result = None self.params_to_calibrate = None
class Calibration(object): def __init__( self, # Initialize ResoFit.experiment spectra_file: str, data_file: str, folder: str, exp_source_to_detector_m, exp_offset_us, baseline: bool, baseline_deg: int, # Initialize ResoFit.simulation layer: fit_util.Layer, energy_min, energy_max, energy_step, database: str, x_type: str, y_type: str): """ Initialization with passed file location and sample info :param spectra_file: :type spectra_file: :param data_file: :type data_file: :param layer: Layer() :type layer: :param energy_min: :type energy_min: :param energy_max: :type energy_max: :param energy_step: :type energy_step: :param folder: :type folder: :param baseline: True -> to remove baseline/background by detrend :type baseline: boolean """ self.x_type = x_type self.y_type = y_type self.energy_min = energy_min self.energy_max = energy_max self.energy_step = energy_step self.simulation = Simulation(energy_min=energy_min, energy_max=energy_max, energy_step=energy_step, database=database) self.simulation.add_Layer(layer=layer) self.experiment = Experiment( spectra_file=spectra_file, data_file=data_file, folder=folder, source_to_detector_m=exp_source_to_detector_m, offset_us=exp_offset_us, baseline=baseline, baseline_deg=baseline_deg) self.experiment.t_start_us = self.experiment.t_start_us + _exp_time_offset_us self.init_source_to_detector_m = exp_source_to_detector_m self.init_offset_us = exp_offset_us self.calibrated_offset_us = None self.calibrated_source_to_detector_m = None self.calibrate_result = None self.params_to_calibrate = None def calibrate(self, source_to_detector_m=None, offset_us=None, vary='all', each_step=False): """ calibrate the instrumental parameters: source-to-detector-distance & detector delay :param each_step: boolean. True -> show values and chi^2 of each step :param source_to_detector_m: estimated distance in m :param offset_us: estimated time offset in us :param vary: vary one of or both of 'source_to_detector' and 'offset' to calibrate (default: 'all') :return: lmfit MinimizerResult """ # Overwrite init values if input detected if source_to_detector_m is None: source_to_detector_m = self.init_source_to_detector_m if offset_us is None: offset_us = self.init_offset_us vary_type_list = ['source_to_detector', 'offset', 'all', 'none'] if vary not in vary_type_list: raise ValueError( "'vary=' can only be one of '{}'".format(vary_type_list)) simu_x = self.simulation.get_x( x_type='energy', offset_us=offset_us, source_to_detector_m=source_to_detector_m) simu_y = self.simulation.get_y(y_type='attenuation') _run = True if vary == 'all': source_to_detector_vary_tag = True offset_vary_tag = True elif vary == 'source_to_detector': source_to_detector_vary_tag = True offset_vary_tag = False elif vary == 'offset': source_to_detector_vary_tag = False offset_vary_tag = True else: # vary == 'none': source_to_detector_vary_tag = False offset_vary_tag = False _run = False self.params_to_calibrate = Parameters() self.params_to_calibrate.add('source_to_detector_m', value=source_to_detector_m, vary=source_to_detector_vary_tag) self.params_to_calibrate.add('offset_us', value=offset_us, vary=offset_vary_tag) # Print before print( "+----------------- Calibration -----------------+\nParams before:" ) self.params_to_calibrate.pretty_print() # Use lmfit to obtain 'source_to_detector_m' & 'offset_us' to minimize 'y_gap_for_calibration' if _run: self.calibrate_result = minimize( y_gap_for_calibration, self.params_to_calibrate, method='leastsq', args=(simu_x, simu_y, self.energy_min, self.energy_max, self.energy_step, self.experiment, 'energy', 'attenuation', each_step)) # Print after print("\nParams after:") self.calibrate_result.__dict__['params'].pretty_print() # Print chi^2 # self.calibrated_residual = self.calibrate_result.__dict__['residual'] print("Calibration chi^2 : {}\n".format( self.calibrate_result.__dict__['chisqr'])) self.calibrated_offset_us = self.calibrate_result.__dict__[ 'params'].valuesdict()['offset_us'] self.calibrated_source_to_detector_m = \ self.calibrate_result.__dict__['params'].valuesdict()['source_to_detector_m'] return self.calibrate_result else: self.calibrated_offset_us = offset_us self.calibrated_source_to_detector_m = source_to_detector_m print( "\ncalibrate() was not run as requested, input values used:\n" "calibrated_offset_us = {}\ncalibrated_source_to_detector_m = {}" .format(offset_us, source_to_detector_m)) # self.experiment.xy_scaled(energy_min=self.energy_min, # energy_max=self.energy_max, # energy_step=self.energy_step, # x_type='energy', # y_type='attenuation', # offset_us=offset_us, # source_to_detector_m=source_to_detector_m, # ) def __find_peak(self, thres, min_dist): # load detected peak with x in image number # if self.calibrate_result is None: if self.calibrated_source_to_detector_m is None or self.calibrated_offset_us is None: raise ValueError("Instrument params have not been calibrated.") self.experiment.find_peak(x_type=self.x_type, y_type=self.y_type, thres=thres, min_dist=min_dist) # self.experiment.o_peak._scale_peak_df(energy_min=self.energy_min, energy_max=self.energy_max, # ) return self.experiment.o_peak.peak_dict def index_peak(self, thres_exp, min_dist_exp, thres_map, min_dist_map, rel_tol, impr_reso=True): if self.experiment.o_peak is None: self.__find_peak(thres=thres_exp, min_dist=min_dist_exp) # find peak map using Simulation.peak_map() _peak_map_dict = self.simulation.peak_map( thres=thres_map, min_dist=min_dist_map, impr_reso=impr_reso, x_type=self.x_type, y_type=self.y_type, offset_us=self.calibrated_offset_us, source_to_detector_m=self.calibrated_source_to_detector_m, t_unit=self.experiment.t_unit, t_start_us=self.experiment.t_start_us, time_resolution_us=self.experiment.time_resolution_us, num_offset=self.experiment.img_start) # pass peak map to Peak() assert _peak_map_dict['x_type'] == self.experiment.o_peak.peak_dict[ 'x_type'] assert _peak_map_dict['y_type'] == self.experiment.o_peak.peak_dict[ 'y_type'] self.experiment.o_peak.peak_map_full = _peak_map_dict['peak_map'] # index using Peak() self.experiment.o_peak.index_peak(_peak_map_dict, rel_tol=rel_tol) # return self.experiment.o_peak.peak_map_indexed def analyze_peak(self, fit_model, report=False, show_fit=False): if self.experiment.o_peak is None: raise AttributeError( "Please run 'Calibration.index_peak()' before peak analysis.") self.experiment.o_peak.analyze(report=report, fit_model=fit_model) if show_fit: self.experiment.o_peak.plot_fit() # def calibrate_peak_pos(self, thres=0.15, min_dist=2, vary='all', each_step=False): # """ # calibrate the instrumental parameters: source-to-detector-distance & detector delay # based on peak positions obtained from the instrument parameters after Calibration.calibrate(). # # :param thres: # :type thres: # :param min_dist: # :type min_dist: # :param vary: vary one of or both of 'source_to_detector' and 'offset' to calibrate (default: 'all') # :type vary: # :param each_step: True -> show values and chi^2 of each step # :type each_step: boolean. # :return: calibration result # :rtype: lmfit MinimizerResult # """ # if self.peak_map_indexed is None: # raise ValueError('Calibrate must be run before running advanced calibration.') # # self.init_source_to_detector_m = source_to_detector_m # # self.init_offset_us = offset_us # if vary not in ['source_to_detector', 'offset', 'all', 'none']: # raise ValueError("'vary=' can only be one of ['source_to_detector', 'offset', 'all' 'none']") # ideal_x = [] # for _ele in self.peak_map_indexed.keys(): # ideal_x = ideal_x + list(self.peak_map_indexed[_ele]['ideal']['x']) # sorted(ideal_x) # print(ideal_x) # # source_to_detector_vary_tag = True # offset_vary_tag = True # if vary == 'source_to_detector': # offset_vary_tag = False # if vary == 'offset': # source_to_detector_vary_tag = False # if vary == 'none': # source_to_detector_vary_tag = False # offset_vary_tag = False # self.params_to_calibrate = Parameters() # self.params_to_calibrate.add('source_to_detector_m', # value=self.calibrated_source_to_detector_m, # vary=source_to_detector_vary_tag) # self.params_to_calibrate.add('offset_us', # value=self.calibrated_offset_us, # vary=offset_vary_tag) # # Print before # print("-------Calibration(advanced)-------\nParams before:") # self.params_to_calibrate.pretty_print() # # Use lmfit to obtain 'source_to_detector_m' & 'offset_us' to minimize 'y_gap_for_calibration' # self.calibrate_result = minimize(y_gap_for_adv_calibration, # self.params_to_calibrate, # method='leastsq', # args=(ideal_x, thres, min_dist, # self.experiment, each_step)) # # Print after # print("Params after:") # self.calibrate_result.__dict__['params'].pretty_print() # # Print chi^2 # self.calibrated_residual = self.calibrate_result.__dict__['residual'] # print("Calibration chi^2 : {}\n".format(sum(self.calibrated_residual ** 2))) # self.calibrated_offset_us = self.calibrate_result.__dict__['params'].valuesdict()['offset_us'] # self.calibrated_source_to_detector_m = \ # self.calibrate_result.__dict__['params'].valuesdict()['source_to_detector_m'] # # # Save the calibrated experimental x & y in Calibration class # self.exp_x_raw_calibrated = self.experiment.x_raw(angstrom=False, # offset_us=self.calibrated_offset_us, # source_to_detector_m=self.calibrated_source_to_detector_m) # self.exp_y_raw_calibrated = self.experiment.y_raw(transmission=False, baseline=self.baseline) # # self.exp_x_interp_calibrated, self.exp_y_interp_calibrated = self.experiment.xy_scaled( # energy_min=self.energy_min, # energy_max=self.energy_max, # energy_step=self.energy_step, # offset_us=self.calibrated_offset_us, # source_to_detector_m=self.calibrated_source_to_detector_m, # baseline=self.baseline) # # return self.calibrate_result def plot(self, x_type=None, y_type=None, t_unit='us', index_level='iso', peak_id='indexed', peak_exp='indexed', peak_height=True, before=False, interp=False, mixed=False, logx=True, logy=False, table=True, grid=True, save_fig=False): """""" fit_util.check_if_in_list(peak_id, fit_util.peak_type_list) fit_util.check_if_in_list(peak_exp, fit_util.peak_type_list) fit_util.check_if_in_list(index_level, fit_util.index_level_list) if x_type is None: x_type = self.x_type if y_type is None: y_type = self.y_type old_colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] new_colors = [ '#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf' ] marker_styles = [ 'o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd', 'P', 'X' ] color_cycle = cycle(new_colors) # color_cycle_2 = cycle(new_colors) # color_cycle_3 = cycle(new_colors) # color_cycle_4 = cycle(new_colors) style_cycle = cycle(marker_styles) simu_label = 'Ideal' exp_label = 'Exp' exp_before_label = 'Exp_init' exp_interp_label = 'Exp_interp' sample_name = ' & '.join(self.simulation.layer_list) fig_title = "Calibration result of sample ('{}')".format(sample_name) fig = plt.Figure() # plot table + graph if table: ax1 = plt.subplot2grid(shape=(10, 10), loc=(0, 1), rowspan=8, colspan=8) # plot graph only else: ax1 = plt.subplot(111) # Plot simulated total signal if mixed: _x = self.simulation.get_x( x_type=x_type, t_unit=t_unit, offset_us=self.calibrated_offset_us, source_to_detector_m=self.calibrated_source_to_detector_m, t_start_us=self.experiment.t_start_us, time_resolution_us=self.experiment.time_resolution_us, num_offset=self.experiment.slice_start) _y = self.simulation.get_y(y_type=y_type) ax1.plot(_x, _y, 'b-', label=simu_label, linewidth=1) """Plot options""" # 1. if before: # Plot the raw data before fitting _x_init = self.experiment.get_x( x_type=x_type, t_unit=t_unit, offset_us=self.init_offset_us, source_to_detector_m=self.init_source_to_detector_m, ) _y_init = self.experiment.get_y(y_type=y_type) ax1.plot(_x_init, _y_init, linestyle='-', linewidth=1, marker='o', markersize=2, color='c', label=exp_before_label) # 2. if interp: _exp_x_interp_calibrated, _exp_y_interp_calibrated = self.experiment.xy_scaled( x_type=x_type, y_type=y_type, energy_min=self.energy_min, energy_max=self.energy_max, energy_step=self.energy_step, t_unit=t_unit, offset_us=self.calibrated_offset_us, source_to_detector_m=self.calibrated_source_to_detector_m, ) # plot the interpolated raw data ax1.plot(_exp_x_interp_calibrated, _exp_y_interp_calibrated, 'r:', label=exp_interp_label, linewidth=1) else: # plot the calibrated raw data _x_cali = self.experiment.get_x( x_type=x_type, t_unit=t_unit, offset_us=self.calibrated_offset_us, source_to_detector_m=self.calibrated_source_to_detector_m) _y_cali = self.experiment.get_y(y_type=y_type) ax1.plot(_x_cali, _y_cali, linestyle='-', linewidth=1, marker='o', markersize=2, color='r', label=exp_label) if peak_exp == 'all': # _peak_x_exp = fit_util.convert_exp_peak_df(x_type=x_type, peak_df=_peak_df_scaled, t_unit=t_unit) _peak_df_scaled = self.experiment.o_peak.peak_dict['df'] _peak_x_exp = _peak_df_scaled['x'] # if x_type == 'time': # _peak_x_exp = fit_util.convert_s(x=_peak_x_exp, t_unit=t_unit) # _peak_y_exp = fit_util.convert_attenuation_to(y_type=y_type, y=_peak_df_scaled['y']) _peak_y_exp = _peak_df_scaled['y'] ax1.scatter( _peak_x_exp, _peak_y_exp, c='k', marker='x', # s=30, # marker='o', # facecolors='none', # edgecolors='k', label='_nolegend_') # plot peaks detected and indexed if self.experiment.o_peak is not None: if self.experiment.o_peak.peak_map_indexed_dict is not None: if y_type == 'transmission': _start_point = 1 ax1.set_ylim(top=1.1, bottom=-0.01) _pos = 1.05 else: _start_point = 0 ax1.set_ylim(top=1.01, bottom=-0.1) _pos = -0.05 _peak_map_indexed = self.experiment.o_peak.peak_map_indexed_dict[ 'peak_map_indexed'] _peak_map_full = self.experiment.o_peak.peak_map_full if index_level == 'iso': _peak_name_list = [ _name for _name in _peak_map_indexed.keys() if '-' in _name ] else: _peak_name_list = [ _name for _name in _peak_map_indexed.keys() if '-' not in _name ] if peak_id == 'all': _current_peak_map = _peak_map_full # _tag = 'ideal' else: # peak_id == 'indexed' _current_peak_map = _peak_map_indexed _tag = 'ideal' for _peak_name in _peak_name_list: if len(_current_peak_map[_peak_name][_tag]) > 0: _peak_x = _current_peak_map[_peak_name][_tag]['x'] _peak_y = _current_peak_map[_peak_name][_tag]['y'] if peak_exp == 'indexed': _legend_name = '_nolegend_' else: _legend_name = _peak_name _current_color = next(color_cycle) _current_style = next(style_cycle) ax1.plot(_peak_x, [_pos] * len(_peak_x), '|', ms=10, color=_current_color, label=_legend_name) if peak_height: ax1.plot( _peak_x, _peak_y, '_', # marker=next(style_cycle_1), # ms=4, color=_current_color, label='_nolegend_') ax1.vlines(_peak_x, _start_point, _peak_y, color=_current_color, alpha=1, label='_nolegend_') if peak_exp == 'indexed': _peak_x_exp = _peak_map_indexed[_peak_name]['exp'][ 'x'] _peak_y_exp = _peak_map_indexed[_peak_name]['exp'][ 'y'] ax1.scatter( _peak_x_exp, _peak_y_exp, marker=_current_style, # ms=4, color=_current_color, label=_peak_name) if 'peak_span' in _peak_map_indexed[_peak_name].keys(): if len(_peak_map_indexed[_peak_name]['exp']) > 0: _data_point_x = _peak_map_indexed[_peak_name][ 'peak_span']['x'] _data_point_y = _peak_map_indexed[_peak_name][ 'peak_span']['y'] ax1.scatter(_data_point_x, _data_point_y, label='_nolegend_') # Set plot limit and captions ax1 = fit_util.set_plt(ax1, fig_title=fig_title, grid=grid, x_type=x_type, y_type=y_type, t_unit=t_unit, logx=logx, logy=logy) # Plot table if table: # ax2 = plt.subplot2grid(shape=(10, 7), loc=(0, 1), rowspan=4, colspan=5) # ax2.axis('off') # columns = list(self.calibrate_result.__dict__['params'].valuesdict().keys()) columns_to_show = [r'$L$ (m)', r'$\Delta t$ ($\rm{\mu}$s)'] rows = ['Before', 'After'] _row_before = [self.init_source_to_detector_m, self.init_offset_us] _row_after = [ self.calibrated_source_to_detector_m, self.calibrated_offset_us ] # for _each in columns: # _row_after.append(self.calibrate_result.__dict__['params'].valuesdict()[_each]) # _row_before.append(self.params_to_calibrate.valuesdict()[_each]) table = ax1.table( rowLabels=rows, colLabels=columns_to_show, # colWidths= cellText=[_row_before, _row_after], # rows of data values bbox=[0, -0.33, 1.0, 0.18] # [left,bottom,width,height] ) # table.scale(0.5, 1) table.auto_set_font_size(False) table.set_fontsize(10) plt.tight_layout() if save_fig: _sample_name = '_'.join(self.simulation.layer_list) _filename = 'calibration_' + _sample_name + '.png' plt.savefig(_filename, dpi=600, transparent=True) plt.close() return ax1 def export(self, x_type='energy', y_type='attenuation', t_unit='us', index_level='iso', peak_id='indexed', before=False, interp=False, mixed=True): simu_label = 'ideal' exp_label = 'exp_raw' exp_before_label = 'exp_init' exp_interp_label = 'exp_interp' _df = pd.DataFrame() _col_suffix = fit_util.get_df_col_name(x_type=x_type) # Simulated total signal if mixed: _x = self.simulation.get_x( x_type=x_type, t_unit=t_unit, offset_us=self.calibrated_offset_us, source_to_detector_m=self.calibrated_source_to_detector_m, t_start_us=self.experiment.t_start_us, time_resolution_us=self.experiment.time_resolution_us) _y = self.simulation.get_y(y_type=y_type) _df['x_' + simu_label] = _x _df['y_' + simu_label] = _y """Plot options""" # Raw data before fitting if before: _x_init = self.experiment.get_x( x_type=x_type, t_unit=t_unit, offset_us=self.init_offset_us, source_to_detector_m=self.init_source_to_detector_m) _y_init = self.experiment.get_y(y_type=y_type, baseline=self.baseline) _df['x_' + exp_before_label] = _x_init _df['y_' + exp_before_label] = _y_init # 2. if interp: _exp_x_interp_calibrated, _exp_y_interp_calibrated = self.experiment.xy_scaled( x_type=x_type, y_type=y_type, energy_min=self.energy_min, energy_max=self.energy_max, energy_step=self.energy_step, t_unit=t_unit, offset_us=self.calibrated_offset_us, source_to_detector_m=self.calibrated_source_to_detector_m, baseline=self.baseline) # Interpolated raw data _df['x_' + exp_interp_label + _col_suffix] = _exp_x_interp_calibrated _df['y_' + exp_interp_label] = _exp_y_interp_calibrated else: # plot the calibrated raw data _x_cali = self.experiment.get_x( x_type=x_type, t_unit=t_unit, offset_us=self.calibrated_offset_us, source_to_detector_m=self.calibrated_source_to_detector_m) _y_cali = self.experiment.get_y(y_type=y_type, baseline=self.baseline) _df['x_' + exp_label + _col_suffix] = pd.Series(_x_cali) _df['y_' + exp_label] = pd.Series(_y_cali) # plot peaks detected and indexed if self.experiment.o_peak and self.experiment.o_peak.peak_map_indexed is not None: _peak_df_scaled = self.experiment.o_peak.peak_df_scaled _peak_map_indexed = self.experiment.o_peak.peak_map_indexed _peak_map_full = self.experiment.o_peak.peak_map_full _x_peak_exp_all = fit_util.convert_exp_peak_df( x_type=x_type, peak_df=_peak_df_scaled, t_unit=t_unit) _y_peak_exp_all = fit_util.convert_attenuation_to( y_type=y_type, y=_peak_df_scaled['y']) # _df = pd.concat([_df, _peak_df_scaled], axis=1) _df['x_peak_exp_all'] = pd.Series(_x_peak_exp_all) _df['y_peak_exp_all'] = pd.Series(_y_peak_exp_all) x_tag = fit_util.get_peak_tag(x_type=x_type) for _peak_name in _peak_map_indexed.keys(): if len(_peak_map_full[_peak_name]['ideal']) > 0: _x_peak_ideal_all = _peak_map_full[_peak_name]['ideal'][ x_tag] _y_peak_ideal_all = _peak_map_full[_peak_name]['ideal'][ 'y'] _df['x_peak_ideal_all(' + _peak_name + ')'] = _x_peak_ideal_all _df['y_peak_ideal_all(' + _peak_name + ')'] = _y_peak_ideal_all if len(_peak_map_indexed[_peak_name]['ideal']) > 0: _x_peak_ideal_indexed = _peak_map_indexed[_peak_name][ 'ideal'][x_tag] _y_peak_ideal_indexed = _peak_map_indexed[_peak_name][ 'ideal']['y'] _x_peak_exp_indexed = _peak_map_indexed[_peak_name]['exp'][ x_tag] _y_peak_exp_indexed = _peak_map_indexed[_peak_name]['exp'][ 'y'] _df['x_peak_exp(' + _peak_name + ')'] = _x_peak_exp_indexed _df['y_peak_exp(' + _peak_name + ')'] = _y_peak_exp_indexed _df['x_peak_ideal(' + _peak_name + ')'] = _x_peak_ideal_indexed _df['y_peak_ideal(' + _peak_name + ')'] = _y_peak_ideal_indexed _df.to_clipboard(index=False) return _df
class FitResonance(object): def __init__(self, spectra_file, data_file, calibrated_offset_us, calibrated_source_to_detector_m, folder, norm_factor=1, baseline=False, norm_to_file=None, slice_start=None, slice_end=None, energy_min=1e-5, energy_max=1000, energy_step=0.01, database='ENDF_VII'): self.experiment = Experiment(spectra_file=spectra_file, data_file=data_file, folder=folder) self.energy_min = energy_min self.energy_max = energy_max self.energy_step = energy_step self.database = database self.calibrated_offset_us = calibrated_offset_us self.calibrated_source_to_detector_m = calibrated_source_to_detector_m self.raw_layer = None self.experiment.slice(start=slice_start, end=slice_end) self.baseline = baseline if norm_to_file is not None: self.experiment.norm_to(norm_to_file, norm_factor=norm_factor) self.exp_x_interp, self.exp_y_interp = self.experiment.xy_scaled( energy_min=self.energy_min, energy_max=self.energy_max, energy_step=self.energy_step, x_type='energy', y_type='attenuation', offset_us=self.calibrated_offset_us, source_to_detector_m=self.calibrated_source_to_detector_m, baseline=self.baseline) self.fit_result = None self.fitted_density_gcm3 = None self.fitted_thickness_mm = None self.fitted_residual = None self.fitted_gap = None self.fitted_fjac = None self.fitted_layer = None self.fitted_simulation = None self.layer_list = None self.raw_layer = None self.fitted_iso_result = None self.fitted_iso_residual = None self.params_for_fit = None self.params_for_iso_fit = None self.isotope_stack = {} self.sample_vary = None self.df = None # self.peak_map_full = None # self.peak_map_indexed = None def fit(self, raw_layer: fit_util.Layer, vary='density', each_step=False): if vary not in ['density', 'thickness', 'none']: raise ValueError( "'vary=' can only be one of ['density', 'thickness', 'none']") # Default vary is: 'density' self.sample_vary = vary thickness_vary_tag = False density_vary_tag = True if vary == 'thickness': thickness_vary_tag = True density_vary_tag = False if vary == 'none': density_vary_tag = False self.raw_layer = raw_layer '''Load params''' print(raw_layer) self.layer_list = list(raw_layer.info.keys()) self.params_for_fit = Parameters() for _each_layer in self.layer_list: if self.raw_layer.info[_each_layer]['density']['value'] is np.NaN: self.raw_layer.info[_each_layer]['density'][ 'value'] = pt.elements.isotope(_each_layer).density self.params_for_fit.add( 'thickness_mm_' + _each_layer, value=self.raw_layer.info[_each_layer]['thickness']['value'], vary=thickness_vary_tag, min=0) self.params_for_fit.add( 'density_gcm3_' + _each_layer, value=self.raw_layer.info[_each_layer]['density']['value'], vary=density_vary_tag, min=0) # Print before print( "+----------------- Fitting ({}) -----------------+\nParams before:" .format(vary)) self.params_for_fit.pretty_print() # Fitting self.fit_result = minimize(y_gap_for_fitting, self.params_for_fit, method='leastsq', args=(self.exp_x_interp, self.exp_y_interp, self.layer_list, self.energy_min, self.energy_max, self.energy_step, self.database, each_step)) # Print after print("\nParams after:") self.fit_result.__dict__['params'].pretty_print() # Print chi^2 self.fitted_residual = self.fit_result.__dict__['residual'] print("Fitting chi^2 : {}\n".format(sum(self.fitted_residual**2))) '''Export fitted params as Layer()''' # Save the fitted 'density' or 'thickness' in Layer() self.fitted_layer = Layer() for _each_layer in self.layer_list: self.fitted_layer.add_layer( layer=_each_layer, thickness_mm=self.fit_result.__dict__['params'].valuesdict()[ 'thickness_mm_' + _each_layer], density_gcm3=self.fit_result.__dict__['params'].valuesdict()[ 'density_gcm3_' + _each_layer]) # self.fitted_fjac = self.fit_result.__dict__['fjac'] # print(self.fit_result.__dict__['fjac'][0]) '''Create fitted simulation''' self.fitted_simulation = Simulation(energy_min=self.energy_min, energy_max=self.energy_max, energy_step=self.energy_step, database=self.database) for each_layer in self.layer_list: self.fitted_simulation.add_layer( layer=each_layer, thickness_mm=self.fitted_layer.info[each_layer]['thickness'] ['value'], density_gcm3=self.fitted_layer.info[each_layer]['density'] ['value']) return self.fit_result def fit_iso(self, layer, each_step=False): """ :param layer: :type layer: :param each_step: :type each_step: :return: :rtype: """ self.params_for_iso_fit = Parameters() self.isotope_stack[layer] = { 'list': self.fitted_simulation.o_reso.stack[layer][layer]['isotopes'] ['list'], 'ratios': self.fitted_simulation.o_reso.stack[layer][layer]['isotopes'] ['isotopic_ratio'] } _formatted_isotope_list = [] _params_name_list = [] # Form list of param name for _isotope_index in range(len(self.isotope_stack[layer]['list'])): _split = self.isotope_stack[layer]['list'][_isotope_index].split( '-') _flip = _split[::-1] _formatted_isotope_name = ''.join(_flip) # _formatted_isotope_name = self.isotope_stack[layer]['list'][_isotope_index].replace('-', '_') _formatted_isotope_list.append(_formatted_isotope_name) _params_name_list = _formatted_isotope_list # Form Parameters() for fitting for _name_index in range(len(_params_name_list)): self.params_for_iso_fit.add( _params_name_list[_name_index], value=self.isotope_stack[layer]['ratios'][_name_index], min=0, max=1) # Constrain sum of isotope ratios to be 1 # _params_name_list_temp = _params_name_list[:] # _constraint = '+'.join(_params_name_list_temp) # self.params_for_iso_fit.add('sum', expr=_constraint) _constraint_param = _params_name_list[-1] _params_name_list_temp = _params_name_list[:] _params_name_list_temp.remove(_constraint_param) _constraint = '-'.join(_params_name_list_temp) _constraint = '1-' + _constraint self.params_for_iso_fit[_constraint_param].set(expr=_constraint) # Print params before print( "+----------------- Fitting (isotopic at.%) -----------------+\nParams before:" ) self.params_for_iso_fit.pretty_print() # Fitting self.fitted_iso_result = minimize( y_gap_for_iso_fitting, self.params_for_iso_fit, method='leastsq', args=(self.exp_x_interp, self.exp_y_interp, layer, _formatted_isotope_list, self.fitted_simulation, each_step)) # Print params after print("\nParams after:") self.fitted_iso_result.__dict__['params'].pretty_print() # Print chi^2 self.fitted_iso_residual = self.fitted_iso_result.__dict__['residual'] print("Fit iso chi^2 : {}\n".format( self.fitted_iso_result.__dict__['chisqr'])) return def molar_conc(self): molar_conc_units = 'mol/cm3' print( "Molar-conc. ({})\tBefore_fit\tAfter_fit".format(molar_conc_units)) for _each_layer in self.layer_list: molar_mass_value = self.fitted_simulation.o_reso.stack[ _each_layer][_each_layer]['molar_mass']['value'] molar_mass_units = self.fitted_simulation.o_reso.stack[ _each_layer][_each_layer]['molar_mass']['units'] # Adding molar_mass to fitted_layer info self.fitted_layer.info[_each_layer]['molar_mass'][ 'value'] = molar_mass_value self.fitted_layer.info[_each_layer]['molar_mass'][ 'units'] = molar_mass_units # Adding molar_mass to raw_layer info self.raw_layer.info[_each_layer]['molar_mass'][ 'value'] = molar_mass_value self.raw_layer.info[_each_layer]['molar_mass'][ 'units'] = molar_mass_units # Adding molar_concentration to fitted_layer info molar_conc_value = self.fitted_layer.info[_each_layer]['density'][ 'value'] / molar_mass_value self.fitted_layer.info[_each_layer]['molar_conc'][ 'value'] = molar_conc_value self.fitted_layer.info[_each_layer]['molar_conc'][ 'units'] = molar_conc_units # Calculate starting molar_concentration and fitted_layer info start_molar_conc_value = self.raw_layer.info[_each_layer][ 'density']['value'] / molar_mass_value self.raw_layer.info[_each_layer]['molar_conc'][ 'value'] = start_molar_conc_value self.raw_layer.info[_each_layer]['molar_conc'][ 'units'] = molar_conc_units # molar_conc_output[_each_layer] = {'Before_fit': start_molar_conc_value, # 'After_fit': molar_conc_value} print("{}\t{}\t{}".format(_each_layer, start_molar_conc_value, molar_conc_value)) print('\n') return self.fitted_layer.info def index_peak(self, thres, min_dist, map_thres=0.01, map_min_dist=20, rel_tol=5e-3, isotope=False): if self.experiment.o_peak is None: self.experiment.find_peak(thres=thres, min_dist=min_dist) self.experiment._scale_peak_with_ev( energy_min=self.energy_min, energy_max=self.energy_max, offset_us=self.calibrated_offset_us, source_to_detector_m=self.calibrated_source_to_detector_m) assert self.experiment.o_peak.peak_df is not None assert self.experiment.o_peak.peak_df_scaled is not None _peak_map = self.fitted_simulation.peak_map( thres=map_thres, min_dist=map_min_dist, impr_reso=True, # isotope=isotope, ) self.experiment.o_peak.peak_map_full = _peak_map self.experiment.o_peak.index_peak(peak_map=_peak_map, rel_tol=rel_tol) return self.experiment.o_peak.peak_map_indexed # def analyze_peak(self): # pass def plot(self, error=True, table=True, grid=True, before=False, interp=False, total=True, all_elements=False, all_isotopes=False, items_to_plot=None, peak_mark=True, peak_id='indexed', y_type='transmission', x_type='energy', t_unit='us', logx=False, logy=False, save_fig=False): """ :param error: :type error: :param table: :type table: :param grid: :type grid: :param before: :type before: :param interp: :type interp: :param total: :type total: :param all_elements: :type all_elements: :param all_isotopes: :type all_isotopes: :param items_to_plot: :type items_to_plot: :param peak_mark: :type peak_mark: :param peak_id: :type peak_id: :param y_type: :type y_type: :param x_type: :type x_type: :param t_unit: :type t_unit: :param logx: :type logx: :param logy: :type logy: :param save_fig: :type save_fig: :return: :rtype: """ # Form signals from fitted_layer if self.fitted_simulation is None: self.fitted_simulation = Simulation(energy_min=self.energy_min, energy_max=self.energy_max, energy_step=self.energy_step) for each_layer in self.layer_list: self.fitted_simulation.add_layer( layer=each_layer, thickness_mm=self.fitted_layer.info[each_layer] ['thickness']['value'], density_gcm3=self.fitted_layer.info[each_layer]['density'] ['value']) if peak_id not in ['indexed', 'all']: raise ValueError("'peak=' must be one of ['indexed', 'full'].") simu_x = self.fitted_simulation.get_x(x_type='energy') simu_y = self.fitted_simulation.get_y(y_type='attenuation') # Get plot labels simu_label = 'Fit' simu_before_label = 'Fit_init' exp_label = 'Exp' exp_interp_label = 'Exp_interp' sample_name = ' & '.join(self.layer_list) if self.sample_vary is None: raise ValueError("Vary type ['density'|'thickness'] is not set.") fig_title = 'Fitting result of sample (' + sample_name + ')' # Create pd.DataFrame self.df = pd.DataFrame() # Clear any left plt plt.close() # plot table + graph if table is True: ax1 = plt.subplot2grid(shape=(10, 10), loc=(0, 1), rowspan=8, colspan=8) # plot graph only else: ax1 = plt.subplot(111) # Plot after fitting if total is True: ax1.plot(simu_x, simu_y, 'b-', label=simu_label, linewidth=1) # Save to df _live_df_x_label = simu_label + '_eV' _live_df_y_label = simu_label + '_attenuation' self.df[_live_df_x_label] = simu_x self.df[_live_df_y_label] = simu_y """Plot options""" # 1. if before is True: # Plot before fitting # Form signals from raw_layer simulation = Simulation(energy_min=self.energy_min, energy_max=self.energy_max, energy_step=self.energy_step) for each_layer in self.layer_list: simulation.add_layer( layer=each_layer, thickness_mm=self.raw_layer.info[each_layer]['thickness'] ['value'], density_gcm3=self.raw_layer.info[each_layer]['density'] ['value']) simu_x = simulation.get_x(x_type='energy') simu_y_before = simulation.get_y(y_type='attenuation') ax1.plot(simu_x, simu_y_before, 'c-.', label=simu_before_label, linewidth=1) # Save to df _live_df_x_label = simu_before_label + '_eV' _live_df_y_label = simu_before_label + '_attenuation' self.df[_live_df_x_label] = simu_x self.df[_live_df_y_label] = simu_y_before # 2. if interp is True: # Plot exp. data (interpolated) x_interp, y_interp = self.experiment.xy_scaled( energy_max=self.energy_max, energy_min=self.energy_min, energy_step=self.energy_step, x_type='energy', y_type='attenuation', baseline=self.baseline, offset_us=self.calibrated_offset_us, source_to_detector_m=self.calibrated_source_to_detector_m) ax1.plot(x_interp, y_interp, 'r:', label=exp_interp_label, linewidth=1) # Save to df _live_df_x_label = exp_interp_label + '_eV' _live_df_y_label = exp_interp_label + '_attenuation' self.df[_live_df_x_label] = x_interp self.df[_live_df_y_label] = y_interp else: # Plot exp. data (raw) exp_x = self.experiment.get_x( x_type='energy', offset_us=self.calibrated_offset_us, source_to_detector_m=self.calibrated_source_to_detector_m) exp_y = self.experiment.get_y(y_type='attenuation', baseline=self.baseline) ax1.plot(exp_x, exp_y, linestyle='-', linewidth=1, marker='o', markersize=2, color='r', label=exp_label) # Save to df _df = pd.DataFrame() _live_df_x_label = exp_label + '_eV' _live_df_y_label = exp_label + '_attenuation' _df[_live_df_x_label] = exp_x _df[_live_df_y_label] = exp_y # Concatenate since the length of raw and simu are not the same self.df = pd.concat([self.df, _df], axis=1) # 3. if error is True: # Plot fitting differences error_label = 'Diff.' _move_below_by = 0.2 moved_fitted_residual = self.fitted_residual - _move_below_by ax1.plot(simu_x, moved_fitted_residual, 'g-', label=error_label, linewidth=1, alpha=1) # Save to df _live_df_x_label = error_label + '_eV' _live_df_y_label = error_label + '_attenuation' self.df[_live_df_x_label] = simu_x self.df[_live_df_y_label] = moved_fitted_residual # 4. if all_elements is True: # show signal from each elements _stack_signal = self.fitted_simulation.o_reso.stack_signal _stack = self.fitted_simulation.o_reso.stack y_axis_tag = 'attenuation' for _layer in _stack.keys(): for _element in _stack[_layer]['elements']: _y_axis = _stack_signal[_layer][_element][y_axis_tag] ax1.plot(simu_x, _y_axis, label="{}".format(_element), linewidth=1, alpha=0.85) # Save to df _live_df_x_label = _element + '_eV' _live_df_y_label = _element + '_attenuation' self.df[_live_df_x_label] = simu_x self.df[_live_df_y_label] = _y_axis # 4. if all_isotopes is True: # show signal from each isotopes _stack_signal = self.fitted_simulation.o_reso.stack_signal _stack = self.fitted_simulation.o_reso.stack y_axis_tag = 'attenuation' for _layer in _stack.keys(): for _element in _stack[_layer]['elements']: for _isotope in _stack[_layer][_element]['isotopes'][ 'list']: _y_axis = _stack_signal[_layer][_element][_isotope][ y_axis_tag] ax1.plot(simu_x, _y_axis, label="{}".format(_isotope), linewidth=1, alpha=1) # Save to df _live_df_x_label = _isotope + '_eV' _live_df_y_label = _isotope + '_attenuation' self.df[_live_df_x_label] = simu_x self.df[_live_df_y_label] = _y_axis # 5. if items_to_plot is not None: # plot specified from 'items_to_plot' y_axis_tag = 'attenuation' items = fit_util.Items(o_reso=self.fitted_simulation.o_reso, database=self.database) shaped_items = items.shaped(items_list=items_to_plot) _signal_dict = items.values(y_axis_type=y_axis_tag) for _each_label in list(_signal_dict.keys()): ax1.plot(simu_x, _signal_dict[_each_label], '--', label=_each_label, linewidth=1, alpha=1) # Save to df _live_df_x_label = _each_label + '_eV' _live_df_y_label = _each_label + '_attenuation' self.df[_live_df_x_label] = simu_x self.df[_live_df_y_label] = _signal_dict[_each_label] # plot peaks detected and indexed if self.experiment.o_peak and self.experiment.o_peak.peak_map_indexed is not None: _peak_df_scaled = self.experiment.o_peak.peak_df_scaled _peak_map_indexed = self.experiment.o_peak.peak_map_indexed _peak_map_full = self.experiment.o_peak.peak_map_full if peak_mark is True: ax1.plot(_peak_df_scaled['x'], _peak_df_scaled['y'], 'kx', label='_nolegend_') if error is False: ax1.set_ylim(ymin=-0.1) for _ele_name in _peak_map_indexed.keys(): if peak_id is 'all': ax1.plot(_peak_map_full[_ele_name]['ideal']['x'], [-0.05] * len(_peak_map_full[_ele_name]['ideal']['x']), '|', ms=10, label=_ele_name) elif peak_id is 'indexed': ax1.plot(_peak_map_indexed[_ele_name]['exp']['x'], [-0.05] * len(_peak_map_indexed[_ele_name]['exp']['x']), '|', ms=8, label=_ele_name) if 'peak_span' in _peak_map_indexed[_ele_name].keys(): _data_point_x = _peak_map_indexed[_ele_name]['peak_span'][ 'energy_ev'] _data_point_y = _peak_map_indexed[_ele_name]['peak_span'][ 'y'] ax1.scatter(_data_point_x, _data_point_y, label='_nolegend_') # Set plot limit and captions fit_util.set_plt(ax=ax1, fig_title=fig_title, grid=grid, x_type=x_type, y_type=y_type, t_unit=t_unit, logx=logx, logy=logy) # Plot table if table is True: if self.fitted_iso_result is None: columns = list( self.fit_result.__dict__['params'].valuesdict().keys()) else: columns = self.fit_result.__dict__['var_names'] columns_to_show_dict = {} for _each in columns: _split = _each.split('_') if _split[0] == 'thickness': _name_to_show = r'$d_{\rm{' + _split[-1] + '}}$' + ' (mm)' else: _name_to_show = r'$\rho_{\rm{' + _split[ -1] + '}}$' + ' (g/cm$^3$)' columns_to_show_dict[_each] = _name_to_show columns_to_show = list(columns_to_show_dict.values()) rows = ['Before', 'After'] _row_before = [] _row_after = [] for _each in columns: _row_after.append( round( self.fit_result.__dict__['params'].valuesdict()[_each], 3)) _row_before.append( round(self.params_for_fit.valuesdict()[_each], 3)) if self.fitted_iso_result is not None: _iso_columns = list(self.fitted_iso_result.__dict__['params']. valuesdict().keys()) columns = columns + _iso_columns _iso_columns_to_show_dict = {} for _each_iso in _iso_columns: _num_str = re.findall('\d+', _each_iso)[0] _name_str = _each_iso[0] _sup_name = r"$^{" + _num_str + "}$" + _name_str _iso_columns_to_show_dict[_each_iso] = _sup_name _iso_columns_to_show = list(_iso_columns_to_show_dict.values()) columns_to_show = columns_to_show + _iso_columns_to_show for _each in _iso_columns: _row_after.append( round( self.fitted_iso_result.__dict__['params']. valuesdict()[_each], 3)) _row_before.append( round(self.params_for_iso_fit.valuesdict()[_each], 3)) table = ax1.table(rowLabels=rows, colLabels=columns_to_show, cellText=[_row_before, _row_after], loc='upper right', bbox=[0, -0.33, 1.0, 0.18]) table.auto_set_font_size(False) table.set_fontsize(10) plt.tight_layout() if save_fig: _sample_name = '_'.join(self.layer_list) _filename = 'fitting_' + _sample_name + '.png' plt.savefig(_filename, dpi=600, transparent=True) plt.close() else: plt.show() def export(self, filename=None): if self.df is None: raise ValueError( "pd.DataFrame is empty, please run required step: FitResonance.plot()" ) elif filename is None: self.df.to_clipboard(excel=True) else: self.df.to_csv(filename)
data_file2 = 'Gd_thick.csv' data_file3 = 'spheres.csv' spectra_file = 'Image002_Spectra.txt' repeat = 1 source_to_detector_m = 16.45 # 16#16.445359069030175#16.447496101100739 offset_us = 2.752 # 0#2.7120797253959119#2.7355447625559037 # transmission = False baseline = False energy_xmax = 150 lambda_xmax = None x_axis = 'number' # # Calibrate the peak positions experiment1 = Experiment(data_file=data_file1, spectra_file=spectra_file, norm_factor=repeat, folder=folder) experiment2 = Experiment(data_file=data_file2, spectra_file=spectra_file, norm_factor=repeat, folder=folder) experiment1.plot(offset_us=offset_us, source_to_detector_m=source_to_detector_m, x_type=x_axis, baseline=baseline, energy_xmax=energy_xmax, lambda_xmax=lambda_xmax) experiment2.plot(offset_us=offset_us, source_to_detector_m=source_to_detector_m, x_type=x_axis, baseline=baseline, energy_xmax=energy_xmax, lambda_xmax=lambda_xmax) # experiment1.export_raw(offset_us=offset_us) # x1, y1 = experiment1.xy_scaled(offset_us=offset_us, source_to_detector_m=source_to_detector_m,
import numpy as np import pprint import matplotlib.pyplot as plt from ResoFit.experiment import Experiment import peakutils as pku from ResoFit.simulation import Simulation from scipy import signal import scipy folder = 'data/IPTS_20439/reso_data_20439' sample_name = ['No Pb', '5mm Pb', '10mm Pb'] data_file = ['Ta_no_lead_6C.csv', 'Ta_5mm_lead_4C_norm_to_2C.csv', 'Ta_10mm_lead_6C.csv'] norm_to_file = ['blank_no_lead_6C.csv', 'blank_5mm_lead_4C_norm_to_2C.csv', 'blank_10mm_lead_6C.csv'] norm_factor = [1, 1, 1] spectra_file = 'Ta_lead_10mm__0__040_Spectra.txt' baseline = False deg = 6 # x_axis = 'number' logx = False # # # Calibrate the peak positions x_type = 'energy' y_type = 'transmission' source_to_detector_m = 16.45 offset_us = 0 fmt = '-' lw = 1 exps = {} ax0 = None for _index, each_name in enumerate(sample_name): exps[each_name] = Experiment(spectra_file=spectra_file, data_file=data_file[_index], folder=folder, source_to_detector_m=source_to_detector_m, offset_us=offset_us)
import numpy as np import pprint import matplotlib.pyplot as plt from ResoFit.experiment import Experiment import peakutils as pku from ResoFit.simulation import Simulation from scipy import signal import scipy folder = 'data/IPTS_20784/reso_data_20784' data_file1 = 'AgI.csv' data_file2 = 'Ta_Pb_whole.csv' # data_file2 = 'spheres_background_1.csv' spectra_file = 'Ta_lead_10mm__0__040_Spectra.txt' norm_to_file1 = 'blank_region.csv' norm_to_file2 = 'OB_Pb_whole.csv' # # source_to_detector_m = 16.45 # 16#16.445359069030175#16.447496101100739 # offset_us = 2.752 # 0#2.7120797253959119#2.7355447625559037 # baseline = False # energy_xmax = 150 # lambda_xmax = None # x_axis = 'number' # # # # Calibrate the peak positions x_type = 'energy' source_to_detector_m = 16.45 offset_us = 0 fmt = '-' experiment1 = Experiment(data_file=data_file1, spectra_file=spectra_file,
# x_axis = 'number' logx = False # # # Calibrate the peak positions x_type = 'energy' y_type = 'transmission' source_to_detector_m = 16.45 offset_us = 0 fmt = '-' lw = 1 exps = {} ax0 = None for _index, each_name in enumerate(sample_name): exps[each_name] = Experiment(spectra_file=spectra_file, data_file=data_file[_index], folder=folder) exps[each_name].norm_to(file=norm_to_file[_index], norm_factor=norm_factor[_index]) if ax0 is None: ax0 = exps[each_name].plot(x_type=x_type, y_type=y_type, source_to_detector_m=source_to_detector_m, offset_us=offset_us, logx=logx, baseline=baseline, deg=deg, fmt=fmt, lw=lw, label=each_name) else:
import numpy as np import pprint import matplotlib.pyplot as plt from ResoFit.experiment import Experiment import peakutils as pku from ResoFit.simulation import Simulation from scipy import signal import scipy folder = 'data/IPTS_19558/reso_data_19558' data_file1 = 'spheres.csv' # data_file2 = 'spheres_background_1.csv' spectra_file = 'Image002_Spectra.txt' # # source_to_detector_m = 16.45 # 16#16.445359069030175#16.447496101100739 # offset_us = 2.752 # 0#2.7120797253959119#2.7355447625559037 # baseline = False # energy_xmax = 150 # lambda_xmax = None # x_axis = 'number' # # # # Calibrate the peak positions experiment1 = Experiment(data_file=data_file1, spectra_file=spectra_file, folder=folder, baseline=True) experiment1.slice(start=300, reset_index=False) peak_df = experiment1.find_peak() experiment1.plot(x_axis='number', t_unit='s') plt.plot(peak_df['x_num'], peak_df['y'], 'kx') energy_min = 7
data_file3 = 'run_42_image1_big_bottom2.txt' ob_file3 = 'run_42_image1_big_bottom2_ob_lines.txt' data_file4 = 'run_42_image0_big_bottom.txt' ob_file4 = 'run_42_image0_big_bottom_ob.txt' repeat = 1 source_to_detector_m = 15. # 16#16.445359069030175#16.447496101100739 offset_us = 0 # 0#2.7120797253959119#2.7355447625559037 # transmission = False baseline = False energy_xmax = 150 lambda_xmax = None x_axis = 'lambda' # # Calibrate the peak positions experiment1 = Experiment(data_file=data_file1, spectra_file=spectra_file, norm_factor=repeat, folder=folder) experiment1.norm_to(ob_file1) experiment1.plot(offset_us=offset_us, source_to_detector_m=source_to_detector_m, x_axis=x_axis, baseline=baseline, energy_xmax=energy_xmax, lambda_xmax=lambda_xmax, transmission=True) experiment2 = Experiment(data_file=data_file2, spectra_file=spectra_file, norm_factor=repeat, folder=folder) experiment2.norm_to(ob_file2)