def y_gap_for_fitting(params,
                      exp_x_interp,
                      exp_y_interp,
                      layer_list,
                      energy_min,
                      energy_max,
                      energy_step,
                      database,
                      each_step=False):
    parvals = params.valuesdict()
    simulation = Simulation(energy_min=energy_min,
                            energy_max=energy_max,
                            energy_step=energy_step,
                            database=database)
    for each_layer in layer_list:
        simulation.add_layer(
            layer=each_layer,
            thickness_mm=parvals['thickness_mm_' + each_layer],
            density_gcm3=parvals['density_gcm3_' + each_layer])
    simu_x = simulation.get_x(x_type='energy')
    simu_y = simulation.get_y(y_type='attenuation')
    gap = (exp_y_interp - simu_y)  # ** 2

    if each_step is True:
        for each_layer in layer_list:
            print(
                "Trying: density_gcm3_{}: {}    thickness_mm_{}: {}    chi^2: {}"
                .format(each_layer, parvals['density_gcm3_' + each_layer],
                        each_layer, parvals['thickness_mm_' + each_layer],
                        sum((exp_y_interp - simu_y)**2)))
    return gap
def y_gap_for_iso_fitting(params,
                          exp_x_interp,
                          exp_y_interp,
                          layer,
                          formatted_isotope_list,
                          fitted_simulation: Simulation,
                          each_step=False):
    parvals = params.valuesdict()
    isotope_ratio_list = []
    for _isotope_index in range(len(formatted_isotope_list)):
        isotope_ratio_list.append(
            parvals[formatted_isotope_list[_isotope_index]])

    fitted_simulation.set_isotopic_ratio(
        layer=layer, element=layer, new_isotopic_ratio_list=isotope_ratio_list)
    simu_x = fitted_simulation.get_x(x_type='energy')
    simu_y = fitted_simulation.get_y(y_type='attenuation')
    gap = (exp_y_interp - simu_y)  # ** 2

    if each_step is True:
        for each_iso in formatted_isotope_list:
            print("Trying: {}: {}    chi^2: {}".format(
                each_iso, parvals[each_iso], sum((exp_y_interp - simu_y)**2)))
    return gap
Example #3
0
    def __init__(
            self,
            # Initialize ResoFit.experiment
            spectra_file: str,
            data_file: str,
            folder: str,
            exp_source_to_detector_m,
            exp_offset_us,
            baseline: bool,
            baseline_deg: int,
            # Initialize ResoFit.simulation
            layer: fit_util.Layer,
            energy_min,
            energy_max,
            energy_step,
            database: str,
            x_type: str,
            y_type: str):
        """
        Initialization with passed file location and sample info

        :param spectra_file:
        :type spectra_file:
        :param data_file:
        :type data_file:
        :param layer: Layer()
        :type layer:
        :param energy_min:
        :type energy_min:
        :param energy_max:
        :type energy_max:
        :param energy_step:
        :type energy_step:
        :param folder:
        :type folder:
        :param baseline: True -> to remove baseline/background by detrend
        :type baseline: boolean
        """
        self.x_type = x_type
        self.y_type = y_type
        self.energy_min = energy_min
        self.energy_max = energy_max
        self.energy_step = energy_step
        self.simulation = Simulation(energy_min=energy_min,
                                     energy_max=energy_max,
                                     energy_step=energy_step,
                                     database=database)
        self.simulation.add_Layer(layer=layer)
        self.experiment = Experiment(
            spectra_file=spectra_file,
            data_file=data_file,
            folder=folder,
            source_to_detector_m=exp_source_to_detector_m,
            offset_us=exp_offset_us,
            baseline=baseline,
            baseline_deg=baseline_deg)
        self.experiment.t_start_us = self.experiment.t_start_us + _exp_time_offset_us
        self.init_source_to_detector_m = exp_source_to_detector_m
        self.init_offset_us = exp_offset_us
        self.calibrated_offset_us = None
        self.calibrated_source_to_detector_m = None
        self.calibrate_result = None
        self.params_to_calibrate = None
Example #4
0
class Calibration(object):
    def __init__(
            self,
            # Initialize ResoFit.experiment
            spectra_file: str,
            data_file: str,
            folder: str,
            exp_source_to_detector_m,
            exp_offset_us,
            baseline: bool,
            baseline_deg: int,
            # Initialize ResoFit.simulation
            layer: fit_util.Layer,
            energy_min,
            energy_max,
            energy_step,
            database: str,
            x_type: str,
            y_type: str):
        """
        Initialization with passed file location and sample info

        :param spectra_file:
        :type spectra_file:
        :param data_file:
        :type data_file:
        :param layer: Layer()
        :type layer:
        :param energy_min:
        :type energy_min:
        :param energy_max:
        :type energy_max:
        :param energy_step:
        :type energy_step:
        :param folder:
        :type folder:
        :param baseline: True -> to remove baseline/background by detrend
        :type baseline: boolean
        """
        self.x_type = x_type
        self.y_type = y_type
        self.energy_min = energy_min
        self.energy_max = energy_max
        self.energy_step = energy_step
        self.simulation = Simulation(energy_min=energy_min,
                                     energy_max=energy_max,
                                     energy_step=energy_step,
                                     database=database)
        self.simulation.add_Layer(layer=layer)
        self.experiment = Experiment(
            spectra_file=spectra_file,
            data_file=data_file,
            folder=folder,
            source_to_detector_m=exp_source_to_detector_m,
            offset_us=exp_offset_us,
            baseline=baseline,
            baseline_deg=baseline_deg)
        self.experiment.t_start_us = self.experiment.t_start_us + _exp_time_offset_us
        self.init_source_to_detector_m = exp_source_to_detector_m
        self.init_offset_us = exp_offset_us
        self.calibrated_offset_us = None
        self.calibrated_source_to_detector_m = None
        self.calibrate_result = None
        self.params_to_calibrate = None

    def calibrate(self,
                  source_to_detector_m=None,
                  offset_us=None,
                  vary='all',
                  each_step=False):
        """
        calibrate the instrumental parameters: source-to-detector-distance & detector delay
        :param each_step: boolean. True -> show values and chi^2 of each step
        :param source_to_detector_m: estimated distance in m
        :param offset_us: estimated time offset in us
        :param vary: vary one of or both of 'source_to_detector' and 'offset' to calibrate (default: 'all')

        :return: lmfit MinimizerResult
        """
        # Overwrite init values if input detected
        if source_to_detector_m is None:
            source_to_detector_m = self.init_source_to_detector_m
        if offset_us is None:
            offset_us = self.init_offset_us

        vary_type_list = ['source_to_detector', 'offset', 'all', 'none']
        if vary not in vary_type_list:
            raise ValueError(
                "'vary=' can only be one of '{}'".format(vary_type_list))
        simu_x = self.simulation.get_x(
            x_type='energy',
            offset_us=offset_us,
            source_to_detector_m=source_to_detector_m)
        simu_y = self.simulation.get_y(y_type='attenuation')
        _run = True
        if vary == 'all':
            source_to_detector_vary_tag = True
            offset_vary_tag = True
        elif vary == 'source_to_detector':
            source_to_detector_vary_tag = True
            offset_vary_tag = False
        elif vary == 'offset':
            source_to_detector_vary_tag = False
            offset_vary_tag = True
        else:  # vary == 'none':
            source_to_detector_vary_tag = False
            offset_vary_tag = False
            _run = False

        self.params_to_calibrate = Parameters()
        self.params_to_calibrate.add('source_to_detector_m',
                                     value=source_to_detector_m,
                                     vary=source_to_detector_vary_tag)
        self.params_to_calibrate.add('offset_us',
                                     value=offset_us,
                                     vary=offset_vary_tag)
        # Print before
        print(
            "+----------------- Calibration -----------------+\nParams before:"
        )
        self.params_to_calibrate.pretty_print()
        # Use lmfit to obtain 'source_to_detector_m' & 'offset_us' to minimize 'y_gap_for_calibration'
        if _run:
            self.calibrate_result = minimize(
                y_gap_for_calibration,
                self.params_to_calibrate,
                method='leastsq',
                args=(simu_x, simu_y, self.energy_min, self.energy_max,
                      self.energy_step, self.experiment, 'energy',
                      'attenuation', each_step))
            # Print after
            print("\nParams after:")
            self.calibrate_result.__dict__['params'].pretty_print()
            # Print chi^2
            # self.calibrated_residual = self.calibrate_result.__dict__['residual']
            print("Calibration chi^2 : {}\n".format(
                self.calibrate_result.__dict__['chisqr']))
            self.calibrated_offset_us = self.calibrate_result.__dict__[
                'params'].valuesdict()['offset_us']
            self.calibrated_source_to_detector_m = \
                self.calibrate_result.__dict__['params'].valuesdict()['source_to_detector_m']
            return self.calibrate_result
        else:
            self.calibrated_offset_us = offset_us
            self.calibrated_source_to_detector_m = source_to_detector_m
            print(
                "\ncalibrate() was not run as requested, input values used:\n"
                "calibrated_offset_us = {}\ncalibrated_source_to_detector_m = {}"
                .format(offset_us, source_to_detector_m))
            # self.experiment.xy_scaled(energy_min=self.energy_min,
            #                           energy_max=self.energy_max,
            #                           energy_step=self.energy_step,
            #                           x_type='energy',
            #                           y_type='attenuation',
            #                           offset_us=offset_us,
            #                           source_to_detector_m=source_to_detector_m,
            #                           )

    def __find_peak(self, thres, min_dist):
        # load detected peak with x in image number
        # if self.calibrate_result is None:
        if self.calibrated_source_to_detector_m is None or self.calibrated_offset_us is None:
            raise ValueError("Instrument params have not been calibrated.")
        self.experiment.find_peak(x_type=self.x_type,
                                  y_type=self.y_type,
                                  thres=thres,
                                  min_dist=min_dist)
        # self.experiment.o_peak._scale_peak_df(energy_min=self.energy_min, energy_max=self.energy_max,
        #                                       )
        return self.experiment.o_peak.peak_dict

    def index_peak(self,
                   thres_exp,
                   min_dist_exp,
                   thres_map,
                   min_dist_map,
                   rel_tol,
                   impr_reso=True):
        if self.experiment.o_peak is None:
            self.__find_peak(thres=thres_exp, min_dist=min_dist_exp)
        # find peak map using Simulation.peak_map()
        _peak_map_dict = self.simulation.peak_map(
            thres=thres_map,
            min_dist=min_dist_map,
            impr_reso=impr_reso,
            x_type=self.x_type,
            y_type=self.y_type,
            offset_us=self.calibrated_offset_us,
            source_to_detector_m=self.calibrated_source_to_detector_m,
            t_unit=self.experiment.t_unit,
            t_start_us=self.experiment.t_start_us,
            time_resolution_us=self.experiment.time_resolution_us,
            num_offset=self.experiment.img_start)
        # pass peak map to Peak()
        assert _peak_map_dict['x_type'] == self.experiment.o_peak.peak_dict[
            'x_type']
        assert _peak_map_dict['y_type'] == self.experiment.o_peak.peak_dict[
            'y_type']
        self.experiment.o_peak.peak_map_full = _peak_map_dict['peak_map']
        # index using Peak()
        self.experiment.o_peak.index_peak(_peak_map_dict, rel_tol=rel_tol)
        # return self.experiment.o_peak.peak_map_indexed

    def analyze_peak(self, fit_model, report=False, show_fit=False):
        if self.experiment.o_peak is None:
            raise AttributeError(
                "Please run 'Calibration.index_peak()' before peak analysis.")
        self.experiment.o_peak.analyze(report=report, fit_model=fit_model)
        if show_fit:
            self.experiment.o_peak.plot_fit()

    # def calibrate_peak_pos(self, thres=0.15, min_dist=2, vary='all', each_step=False):
    #     """
    #     calibrate the instrumental parameters: source-to-detector-distance & detector delay
    #     based on peak positions obtained from the instrument parameters after Calibration.calibrate().
    #
    #     :param thres:
    #     :type thres:
    #     :param min_dist:
    #     :type min_dist:
    #     :param vary: vary one of or both of 'source_to_detector' and 'offset' to calibrate (default: 'all')
    #     :type vary:
    #     :param each_step: True -> show values and chi^2 of each step
    #     :type each_step: boolean.
    #     :return: calibration result
    #     :rtype: lmfit MinimizerResult
    #     """
    #     if self.peak_map_indexed is None:
    #         raise ValueError('Calibrate must be run before running advanced calibration.')
    #     # self.init_source_to_detector_m = source_to_detector_m
    #     # self.init_offset_us = offset_us
    #     if vary not in ['source_to_detector', 'offset', 'all', 'none']:
    #         raise ValueError("'vary=' can only be one of ['source_to_detector', 'offset', 'all' 'none']")
    #     ideal_x = []
    #     for _ele in self.peak_map_indexed.keys():
    #         ideal_x = ideal_x + list(self.peak_map_indexed[_ele]['ideal']['x'])
    #     sorted(ideal_x)
    #     print(ideal_x)
    #
    #     source_to_detector_vary_tag = True
    #     offset_vary_tag = True
    #     if vary == 'source_to_detector':
    #         offset_vary_tag = False
    #     if vary == 'offset':
    #         source_to_detector_vary_tag = False
    #     if vary == 'none':
    #         source_to_detector_vary_tag = False
    #         offset_vary_tag = False
    #     self.params_to_calibrate = Parameters()
    #     self.params_to_calibrate.add('source_to_detector_m',
    #                                  value=self.calibrated_source_to_detector_m,
    #                                  vary=source_to_detector_vary_tag)
    #     self.params_to_calibrate.add('offset_us',
    #                                  value=self.calibrated_offset_us,
    #                                  vary=offset_vary_tag)
    #     # Print before
    #     print("-------Calibration(advanced)-------\nParams before:")
    #     self.params_to_calibrate.pretty_print()
    #     # Use lmfit to obtain 'source_to_detector_m' & 'offset_us' to minimize 'y_gap_for_calibration'
    #     self.calibrate_result = minimize(y_gap_for_adv_calibration,
    #                                      self.params_to_calibrate,
    #                                      method='leastsq',
    #                                      args=(ideal_x, thres, min_dist,
    #                                            self.experiment, each_step))
    #     # Print after
    #     print("Params after:")
    #     self.calibrate_result.__dict__['params'].pretty_print()
    #     # Print chi^2
    #     self.calibrated_residual = self.calibrate_result.__dict__['residual']
    #     print("Calibration chi^2 : {}\n".format(sum(self.calibrated_residual ** 2)))
    #     self.calibrated_offset_us = self.calibrate_result.__dict__['params'].valuesdict()['offset_us']
    #     self.calibrated_source_to_detector_m = \
    #         self.calibrate_result.__dict__['params'].valuesdict()['source_to_detector_m']
    #
    #     # Save the calibrated experimental x & y in Calibration class
    #     self.exp_x_raw_calibrated = self.experiment.x_raw(angstrom=False,
    #                                                       offset_us=self.calibrated_offset_us,
    #                                                       source_to_detector_m=self.calibrated_source_to_detector_m)
    #     self.exp_y_raw_calibrated = self.experiment.y_raw(transmission=False, baseline=self.baseline)
    #
    #     self.exp_x_interp_calibrated, self.exp_y_interp_calibrated = self.experiment.xy_scaled(
    #         energy_min=self.energy_min,
    #         energy_max=self.energy_max,
    #         energy_step=self.energy_step,
    #         offset_us=self.calibrated_offset_us,
    #         source_to_detector_m=self.calibrated_source_to_detector_m,
    #         baseline=self.baseline)
    #
    #     return self.calibrate_result

    def plot(self,
             x_type=None,
             y_type=None,
             t_unit='us',
             index_level='iso',
             peak_id='indexed',
             peak_exp='indexed',
             peak_height=True,
             before=False,
             interp=False,
             mixed=False,
             logx=True,
             logy=False,
             table=True,
             grid=True,
             save_fig=False):
        """"""
        fit_util.check_if_in_list(peak_id, fit_util.peak_type_list)
        fit_util.check_if_in_list(peak_exp, fit_util.peak_type_list)
        fit_util.check_if_in_list(index_level, fit_util.index_level_list)

        if x_type is None:
            x_type = self.x_type
        if y_type is None:
            y_type = self.y_type

        old_colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
        new_colors = [
            '#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
            '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'
        ]
        marker_styles = [
            'o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd',
            'P', 'X'
        ]
        color_cycle = cycle(new_colors)
        # color_cycle_2 = cycle(new_colors)
        # color_cycle_3 = cycle(new_colors)
        # color_cycle_4 = cycle(new_colors)
        style_cycle = cycle(marker_styles)

        simu_label = 'Ideal'
        exp_label = 'Exp'
        exp_before_label = 'Exp_init'
        exp_interp_label = 'Exp_interp'
        sample_name = ' & '.join(self.simulation.layer_list)
        fig_title = "Calibration result of sample ('{}')".format(sample_name)
        fig = plt.Figure()

        # plot table + graph
        if table:
            ax1 = plt.subplot2grid(shape=(10, 10),
                                   loc=(0, 1),
                                   rowspan=8,
                                   colspan=8)
        # plot graph only
        else:
            ax1 = plt.subplot(111)

        # Plot simulated total signal
        if mixed:
            _x = self.simulation.get_x(
                x_type=x_type,
                t_unit=t_unit,
                offset_us=self.calibrated_offset_us,
                source_to_detector_m=self.calibrated_source_to_detector_m,
                t_start_us=self.experiment.t_start_us,
                time_resolution_us=self.experiment.time_resolution_us,
                num_offset=self.experiment.slice_start)
            _y = self.simulation.get_y(y_type=y_type)
            ax1.plot(_x, _y, 'b-', label=simu_label, linewidth=1)
        """Plot options"""
        # 1.
        if before:
            # Plot the raw data before fitting
            _x_init = self.experiment.get_x(
                x_type=x_type,
                t_unit=t_unit,
                offset_us=self.init_offset_us,
                source_to_detector_m=self.init_source_to_detector_m,
            )
            _y_init = self.experiment.get_y(y_type=y_type)
            ax1.plot(_x_init,
                     _y_init,
                     linestyle='-',
                     linewidth=1,
                     marker='o',
                     markersize=2,
                     color='c',
                     label=exp_before_label)

        # 2.
        if interp:
            _exp_x_interp_calibrated, _exp_y_interp_calibrated = self.experiment.xy_scaled(
                x_type=x_type,
                y_type=y_type,
                energy_min=self.energy_min,
                energy_max=self.energy_max,
                energy_step=self.energy_step,
                t_unit=t_unit,
                offset_us=self.calibrated_offset_us,
                source_to_detector_m=self.calibrated_source_to_detector_m,
            )
            # plot the interpolated raw data
            ax1.plot(_exp_x_interp_calibrated,
                     _exp_y_interp_calibrated,
                     'r:',
                     label=exp_interp_label,
                     linewidth=1)
        else:
            # plot the calibrated raw data
            _x_cali = self.experiment.get_x(
                x_type=x_type,
                t_unit=t_unit,
                offset_us=self.calibrated_offset_us,
                source_to_detector_m=self.calibrated_source_to_detector_m)
            _y_cali = self.experiment.get_y(y_type=y_type)
            ax1.plot(_x_cali,
                     _y_cali,
                     linestyle='-',
                     linewidth=1,
                     marker='o',
                     markersize=2,
                     color='r',
                     label=exp_label)
        if peak_exp == 'all':
            # _peak_x_exp = fit_util.convert_exp_peak_df(x_type=x_type, peak_df=_peak_df_scaled, t_unit=t_unit)
            _peak_df_scaled = self.experiment.o_peak.peak_dict['df']
            _peak_x_exp = _peak_df_scaled['x']
            # if x_type == 'time':
            #     _peak_x_exp = fit_util.convert_s(x=_peak_x_exp, t_unit=t_unit)
            # _peak_y_exp = fit_util.convert_attenuation_to(y_type=y_type, y=_peak_df_scaled['y'])
            _peak_y_exp = _peak_df_scaled['y']
            ax1.scatter(
                _peak_x_exp,
                _peak_y_exp,
                c='k',
                marker='x',
                # s=30,
                # marker='o',
                # facecolors='none',
                # edgecolors='k',
                label='_nolegend_')
        # plot peaks detected and indexed
        if self.experiment.o_peak is not None:
            if self.experiment.o_peak.peak_map_indexed_dict is not None:
                if y_type == 'transmission':
                    _start_point = 1
                    ax1.set_ylim(top=1.1, bottom=-0.01)
                    _pos = 1.05
                else:
                    _start_point = 0
                    ax1.set_ylim(top=1.01, bottom=-0.1)
                    _pos = -0.05
                _peak_map_indexed = self.experiment.o_peak.peak_map_indexed_dict[
                    'peak_map_indexed']
                _peak_map_full = self.experiment.o_peak.peak_map_full

                if index_level == 'iso':
                    _peak_name_list = [
                        _name for _name in _peak_map_indexed.keys()
                        if '-' in _name
                    ]
                else:
                    _peak_name_list = [
                        _name for _name in _peak_map_indexed.keys()
                        if '-' not in _name
                    ]

                if peak_id == 'all':
                    _current_peak_map = _peak_map_full
                    # _tag = 'ideal'
                else:  # peak_id == 'indexed'
                    _current_peak_map = _peak_map_indexed
                _tag = 'ideal'

                for _peak_name in _peak_name_list:
                    if len(_current_peak_map[_peak_name][_tag]) > 0:

                        _peak_x = _current_peak_map[_peak_name][_tag]['x']
                        _peak_y = _current_peak_map[_peak_name][_tag]['y']
                        if peak_exp == 'indexed':
                            _legend_name = '_nolegend_'
                        else:
                            _legend_name = _peak_name
                        _current_color = next(color_cycle)
                        _current_style = next(style_cycle)
                        ax1.plot(_peak_x, [_pos] * len(_peak_x),
                                 '|',
                                 ms=10,
                                 color=_current_color,
                                 label=_legend_name)
                        if peak_height:
                            ax1.plot(
                                _peak_x,
                                _peak_y,
                                '_',
                                # marker=next(style_cycle_1),
                                # ms=4,
                                color=_current_color,
                                label='_nolegend_')
                            ax1.vlines(_peak_x,
                                       _start_point,
                                       _peak_y,
                                       color=_current_color,
                                       alpha=1,
                                       label='_nolegend_')

                        if peak_exp == 'indexed':
                            _peak_x_exp = _peak_map_indexed[_peak_name]['exp'][
                                'x']
                            _peak_y_exp = _peak_map_indexed[_peak_name]['exp'][
                                'y']
                            ax1.scatter(
                                _peak_x_exp,
                                _peak_y_exp,
                                marker=_current_style,
                                # ms=4,
                                color=_current_color,
                                label=_peak_name)

                        if 'peak_span' in _peak_map_indexed[_peak_name].keys():
                            if len(_peak_map_indexed[_peak_name]['exp']) > 0:
                                _data_point_x = _peak_map_indexed[_peak_name][
                                    'peak_span']['x']
                                _data_point_y = _peak_map_indexed[_peak_name][
                                    'peak_span']['y']
                                ax1.scatter(_data_point_x,
                                            _data_point_y,
                                            label='_nolegend_')

        # Set plot limit and captions
        ax1 = fit_util.set_plt(ax1,
                               fig_title=fig_title,
                               grid=grid,
                               x_type=x_type,
                               y_type=y_type,
                               t_unit=t_unit,
                               logx=logx,
                               logy=logy)

        # Plot table
        if table:
            # ax2 = plt.subplot2grid(shape=(10, 7), loc=(0, 1), rowspan=4, colspan=5)
            # ax2.axis('off')
            # columns = list(self.calibrate_result.__dict__['params'].valuesdict().keys())
            columns_to_show = [r'$L$ (m)', r'$\Delta t$ ($\rm{\mu}$s)']
            rows = ['Before', 'After']
            _row_before = [self.init_source_to_detector_m, self.init_offset_us]
            _row_after = [
                self.calibrated_source_to_detector_m, self.calibrated_offset_us
            ]
            # for _each in columns:
            #     _row_after.append(self.calibrate_result.__dict__['params'].valuesdict()[_each])
            #     _row_before.append(self.params_to_calibrate.valuesdict()[_each])
            table = ax1.table(
                rowLabels=rows,
                colLabels=columns_to_show,  # colWidths=
                cellText=[_row_before, _row_after],  # rows of data values
                bbox=[0, -0.33, 1.0, 0.18]  # [left,bottom,width,height]
            )
            # table.scale(0.5, 1)
            table.auto_set_font_size(False)
            table.set_fontsize(10)
            plt.tight_layout()

        if save_fig:
            _sample_name = '_'.join(self.simulation.layer_list)
            _filename = 'calibration_' + _sample_name + '.png'
            plt.savefig(_filename, dpi=600, transparent=True)
            plt.close()
        return ax1

    def export(self,
               x_type='energy',
               y_type='attenuation',
               t_unit='us',
               index_level='iso',
               peak_id='indexed',
               before=False,
               interp=False,
               mixed=True):

        simu_label = 'ideal'
        exp_label = 'exp_raw'
        exp_before_label = 'exp_init'
        exp_interp_label = 'exp_interp'
        _df = pd.DataFrame()

        _col_suffix = fit_util.get_df_col_name(x_type=x_type)
        # Simulated total signal
        if mixed:
            _x = self.simulation.get_x(
                x_type=x_type,
                t_unit=t_unit,
                offset_us=self.calibrated_offset_us,
                source_to_detector_m=self.calibrated_source_to_detector_m,
                t_start_us=self.experiment.t_start_us,
                time_resolution_us=self.experiment.time_resolution_us)
            _y = self.simulation.get_y(y_type=y_type)
            _df['x_' + simu_label] = _x
            _df['y_' + simu_label] = _y
        """Plot options"""
        # Raw data before fitting
        if before:
            _x_init = self.experiment.get_x(
                x_type=x_type,
                t_unit=t_unit,
                offset_us=self.init_offset_us,
                source_to_detector_m=self.init_source_to_detector_m)
            _y_init = self.experiment.get_y(y_type=y_type,
                                            baseline=self.baseline)
            _df['x_' + exp_before_label] = _x_init
            _df['y_' + exp_before_label] = _y_init

        # 2.
        if interp:
            _exp_x_interp_calibrated, _exp_y_interp_calibrated = self.experiment.xy_scaled(
                x_type=x_type,
                y_type=y_type,
                energy_min=self.energy_min,
                energy_max=self.energy_max,
                energy_step=self.energy_step,
                t_unit=t_unit,
                offset_us=self.calibrated_offset_us,
                source_to_detector_m=self.calibrated_source_to_detector_m,
                baseline=self.baseline)
            # Interpolated raw data
            _df['x_' + exp_interp_label +
                _col_suffix] = _exp_x_interp_calibrated
            _df['y_' + exp_interp_label] = _exp_y_interp_calibrated
        else:
            # plot the calibrated raw data
            _x_cali = self.experiment.get_x(
                x_type=x_type,
                t_unit=t_unit,
                offset_us=self.calibrated_offset_us,
                source_to_detector_m=self.calibrated_source_to_detector_m)
            _y_cali = self.experiment.get_y(y_type=y_type,
                                            baseline=self.baseline)
            _df['x_' + exp_label + _col_suffix] = pd.Series(_x_cali)
            _df['y_' + exp_label] = pd.Series(_y_cali)

        # plot peaks detected and indexed
        if self.experiment.o_peak and self.experiment.o_peak.peak_map_indexed is not None:
            _peak_df_scaled = self.experiment.o_peak.peak_df_scaled
            _peak_map_indexed = self.experiment.o_peak.peak_map_indexed
            _peak_map_full = self.experiment.o_peak.peak_map_full
            _x_peak_exp_all = fit_util.convert_exp_peak_df(
                x_type=x_type, peak_df=_peak_df_scaled, t_unit=t_unit)
            _y_peak_exp_all = fit_util.convert_attenuation_to(
                y_type=y_type, y=_peak_df_scaled['y'])
            # _df = pd.concat([_df, _peak_df_scaled], axis=1)

            _df['x_peak_exp_all'] = pd.Series(_x_peak_exp_all)
            _df['y_peak_exp_all'] = pd.Series(_y_peak_exp_all)

            x_tag = fit_util.get_peak_tag(x_type=x_type)
            for _peak_name in _peak_map_indexed.keys():
                if len(_peak_map_full[_peak_name]['ideal']) > 0:
                    _x_peak_ideal_all = _peak_map_full[_peak_name]['ideal'][
                        x_tag]
                    _y_peak_ideal_all = _peak_map_full[_peak_name]['ideal'][
                        'y']
                    _df['x_peak_ideal_all(' + _peak_name +
                        ')'] = _x_peak_ideal_all
                    _df['y_peak_ideal_all(' + _peak_name +
                        ')'] = _y_peak_ideal_all
                if len(_peak_map_indexed[_peak_name]['ideal']) > 0:
                    _x_peak_ideal_indexed = _peak_map_indexed[_peak_name][
                        'ideal'][x_tag]
                    _y_peak_ideal_indexed = _peak_map_indexed[_peak_name][
                        'ideal']['y']
                    _x_peak_exp_indexed = _peak_map_indexed[_peak_name]['exp'][
                        x_tag]
                    _y_peak_exp_indexed = _peak_map_indexed[_peak_name]['exp'][
                        'y']
                    _df['x_peak_exp(' + _peak_name + ')'] = _x_peak_exp_indexed
                    _df['y_peak_exp(' + _peak_name + ')'] = _y_peak_exp_indexed
                    _df['x_peak_ideal(' + _peak_name +
                        ')'] = _x_peak_ideal_indexed
                    _df['y_peak_ideal(' + _peak_name +
                        ')'] = _y_peak_ideal_indexed

        _df.to_clipboard(index=False)

        return _df
class TestItems(unittest.TestCase):
    layer_1 = 'U'
    thickness_1 = 0.05
    layer_2 = 'Ag'
    thickness_2 = 0.05
    layers = fit_util.Layer()
    layers.add_layer(layer=layer_1, thickness_mm=thickness_1)
    layers.add_layer(layer=layer_2, thickness_mm=thickness_2)
    database = '_data_for_unittest'
    simulation = Simulation(database=database)
    simulation.add_Layer(layers)
    items = fit_util.Items(simulation.o_reso)

    def test_raises(self):
        name = 'AG'
        pytest.raises(ValueError, fit_util._shape_items, name=name)
        name = 'aG'
        pytest.raises(ValueError, fit_util._shape_items, name=name)
        name = 'AgO'
        pytest.raises(ValueError, fit_util._shape_items, name=name)
        name = 'ag'
        pytest.raises(ValueError, fit_util._shape_items, name=name)
        name = ''
        pytest.raises(ValueError, fit_util._shape_items, name=name)
        name = []
        pytest.raises(ValueError, fit_util._shape_items, name=name)

    def test_isotope_format(self):
        name = '238-U'
        expected_path = ['U', 'U', '238-U']
        assert fit_util._shape_items(name) == expected_path
        name = '238U'
        assert fit_util._shape_items(name) == expected_path
        name = 'U-238'
        assert fit_util._shape_items(name) == expected_path
        name = 'U238'
        assert fit_util._shape_items(name) == expected_path

    def test_fill_iso_to_items(self):
        name = 'U*'
        expected_path_list = [['U', 'U', '233-U'], ['U', 'U', '234-U'],
                              ['U', 'U', '235-U'], ['U', 'U', '238-U']]
        assert fit_util._fill_iso_to_items(
            name, database=self.database) == expected_path_list
        name = 'U'
        pytest.raises(ValueError,
                      fit_util._fill_iso_to_items,
                      name=name,
                      database=self.database)

    def test_shape_items(self):
        name = 'U'
        expected_path = ['U', 'U']
        assert fit_util._shape_items(name) == expected_path
        name = 'u'
        expected_path = ['U', 'U']
        assert fit_util._shape_items(name) == expected_path
        name = 'Gd'
        expected_path = ['Gd', 'Gd']
        assert fit_util._shape_items(name) == expected_path

    def test_items_shaped(self):
        _input = ['Gd', ['U'], 'U-238', 'U*']
        expected = [['Gd', 'Gd'], ['U', 'U'], ['U', 'U', '233-U'],
                    ['U', 'U', '234-U'], ['U', 'U', '235-U'],
                    ['U', 'U', '238-U']]
        obtained = self.items.shaped(_input)
        assert obtained == expected

    def test_items_original(self):
        _input = [['Gd', 'Gd'], ['U', 'U'], ['U', 'U', '233-U'],
                  ['U', 'U', '234-U'], ['U', 'U', '235-U'],
                  ['U', 'U', '238-U']]
        expected = [['Gd', 'Gd'], ['U', 'U'], ['U', 'U', '233-U'],
                    ['U', 'U', '234-U'], ['U', 'U', '235-U'],
                    ['U', 'U', '238-U']]
        obtained = self.items.shaped(_input)
        assert obtained == expected
class TestPeaks(unittest.TestCase):
    energy_min = 7
    energy_max = 150
    energy_step = 0.01
    simulation = Simulation(energy_min=energy_min,
                            energy_max=energy_max,
                            energy_step=energy_step,
                            database='_data_for_unittest')
    simulation.add_layer(layer='U', thickness_mm=0.05)
    x = simulation.o_reso.stack_sigma['U']['U']['energy_eV']
    y = simulation.o_reso.stack_sigma['U']['U']['sigma_b']

    def test_findpeak1(self):
        peak_found = fit_util._find_peak(y=self.y,
                                         x=self.x,
                                         thres=0.015,
                                         min_dist=1,
                                         imprv_reso=False)
        peak_expected = {
            'x': [20.87, 36.68, 66.03, 80.75, 102.57, 116.91],
            'y': [
                9801.184720322835, 13337.61249582717, 4356.4307835150385,
                276.22478464487637, 6022.958717161858, 2003.9245670422251
            ],
        }
        self.assertDictEqual(peak_found, peak_expected)

    def test_findpeak2(self):
        peak_found = fit_util._find_peak(y=self.y,
                                         x=self.x,
                                         thres=0.015,
                                         min_dist=1,
                                         imprv_reso=True)
        peak_expected = {
            'x': [
                20.87274280816388, 36.68474982964769, 66.03358430830164,
                80.75548785869171, 102.56856740218703, 116.91012795048718
            ],
            'y': [
                9801.184720322835, 13337.61249582717, 4356.4307835150385,
                276.22478464487637, 6022.958717161858, 2003.9245670422251
            ]
        }
        self.assertDictEqual(peak_found, peak_expected)

    def test_findpeak3(self):
        peak_found = fit_util._find_peak(y=self.y,
                                         thres=0.015,
                                         min_dist=1,
                                         imprv_reso=False)
        peak_expected = {
            'x': [1387, 2968, 5903, 7375, 9557, 10991],
            'y': [
                9801.184720322835, 13337.61249582717, 4356.4307835150385,
                276.22478464487637, 6022.958717161858, 2003.9245670422251
            ]
        }
        self.assertDictEqual(peak_found, peak_expected)

    def test_findpeak4(self):
        peak_found = fit_util._find_peak(y=self.y,
                                         thres=0.015,
                                         min_dist=1,
                                         imprv_reso=True)
        peak_expected = {
            'x': [
                1387.2742808163878, 2968.47498296478, 5903.358430830144,
                7375.5487858692095, 9556.85674021868, 10991.012795048715
            ],
            'y': [
                9801.184720322835, 13337.61249582717, 4356.4307835150385,
                276.22478464487637, 6022.958717161858, 2003.9245670422251
            ]
        }
        print(peak_found)
        self.assertDictEqual(peak_found, peak_expected)

    def test_indexes(self):
        x = self.simulation.o_reso.stack_sigma['U']['U']['energy_eV']
        y = self.simulation.o_reso.stack_sigma['U']['U']['sigma_b']
        peak_df = fit_util.find_peak(y=y,
                                     x=x,
                                     x_name='x',
                                     thres=0.015,
                                     min_dist=1)
        peak_df_expected = {
            'x': [20.87, 36.68, 66.03, 80.75, 102.57, 116.91],
            'y': [
                9801.18472032, 13337.61249583, 4356.43078352, 276.22478464,
                6022.95871716, 2003.92456704
            ],
        }
        assert peak_df['x'].tolist() == pytest.approx(peak_df_expected['x'])
        assert peak_df['y'].tolist() == pytest.approx(peak_df_expected['y'])

        peak_df = fit_util.find_peak(y=y, x_name='x', thres=0.015, min_dist=1)
        peak_df_expected = {
            'x': [1387, 2968, 5903, 7375, 9557, 10991],
            'y': [
                9801.18472032, 13337.61249583, 4356.43078352, 276.22478464,
                6022.95871716, 2003.92456704
            ],
        }
        assert peak_df['x'].tolist() == pytest.approx(peak_df_expected['x'])
        assert peak_df['y'].tolist() == pytest.approx(peak_df_expected['y'])
Example #7
0
    def plot(self,
             error=True,
             table=True,
             grid=True,
             before=False,
             interp=False,
             total=True,
             all_elements=False,
             all_isotopes=False,
             items_to_plot=None,
             peak_mark=True,
             peak_id='indexed',
             y_type='transmission',
             x_type='energy',
             t_unit='us',
             logx=False,
             logy=False,
             save_fig=False):
        """

        :param error:
        :type error:
        :param table:
        :type table:
        :param grid:
        :type grid:
        :param before:
        :type before:
        :param interp:
        :type interp:
        :param total:
        :type total:
        :param all_elements:
        :type all_elements:
        :param all_isotopes:
        :type all_isotopes:
        :param items_to_plot:
        :type items_to_plot:
        :param peak_mark:
        :type peak_mark:
        :param peak_id:
        :type peak_id:
        :param y_type:
        :type y_type:
        :param x_type:
        :type x_type:
        :param t_unit:
        :type t_unit:
        :param logx:
        :type logx:
        :param logy:
        :type logy:
        :param save_fig:
        :type save_fig:
        :return:
        :rtype:
        """
        # Form signals from fitted_layer
        if self.fitted_simulation is None:
            self.fitted_simulation = Simulation(energy_min=self.energy_min,
                                                energy_max=self.energy_max,
                                                energy_step=self.energy_step)
            for each_layer in self.layer_list:
                self.fitted_simulation.add_layer(
                    layer=each_layer,
                    thickness_mm=self.fitted_layer.info[each_layer]
                    ['thickness']['value'],
                    density_gcm3=self.fitted_layer.info[each_layer]['density']
                    ['value'])
        if peak_id not in ['indexed', 'all']:
            raise ValueError("'peak=' must be one of ['indexed', 'full'].")
        simu_x = self.fitted_simulation.get_x(x_type='energy')
        simu_y = self.fitted_simulation.get_y(y_type='attenuation')

        # Get plot labels
        simu_label = 'Fit'
        simu_before_label = 'Fit_init'
        exp_label = 'Exp'
        exp_interp_label = 'Exp_interp'
        sample_name = ' & '.join(self.layer_list)
        if self.sample_vary is None:
            raise ValueError("Vary type ['density'|'thickness'] is not set.")
        fig_title = 'Fitting result of sample (' + sample_name + ')'

        # Create pd.DataFrame
        self.df = pd.DataFrame()

        # Clear any left plt
        plt.close()

        # plot table + graph
        if table is True:
            ax1 = plt.subplot2grid(shape=(10, 10),
                                   loc=(0, 1),
                                   rowspan=8,
                                   colspan=8)
        # plot graph only
        else:
            ax1 = plt.subplot(111)

        # Plot after fitting
        if total is True:
            ax1.plot(simu_x, simu_y, 'b-', label=simu_label, linewidth=1)

        # Save to df
        _live_df_x_label = simu_label + '_eV'
        _live_df_y_label = simu_label + '_attenuation'
        self.df[_live_df_x_label] = simu_x
        self.df[_live_df_y_label] = simu_y
        """Plot options"""

        # 1.
        if before is True:
            # Plot before fitting
            # Form signals from raw_layer
            simulation = Simulation(energy_min=self.energy_min,
                                    energy_max=self.energy_max,
                                    energy_step=self.energy_step)
            for each_layer in self.layer_list:
                simulation.add_layer(
                    layer=each_layer,
                    thickness_mm=self.raw_layer.info[each_layer]['thickness']
                    ['value'],
                    density_gcm3=self.raw_layer.info[each_layer]['density']
                    ['value'])
            simu_x = simulation.get_x(x_type='energy')
            simu_y_before = simulation.get_y(y_type='attenuation')
            ax1.plot(simu_x,
                     simu_y_before,
                     'c-.',
                     label=simu_before_label,
                     linewidth=1)
            # Save to df
            _live_df_x_label = simu_before_label + '_eV'
            _live_df_y_label = simu_before_label + '_attenuation'
            self.df[_live_df_x_label] = simu_x
            self.df[_live_df_y_label] = simu_y_before
        # 2.
        if interp is True:
            # Plot exp. data (interpolated)
            x_interp, y_interp = self.experiment.xy_scaled(
                energy_max=self.energy_max,
                energy_min=self.energy_min,
                energy_step=self.energy_step,
                x_type='energy',
                y_type='attenuation',
                baseline=self.baseline,
                offset_us=self.calibrated_offset_us,
                source_to_detector_m=self.calibrated_source_to_detector_m)
            ax1.plot(x_interp,
                     y_interp,
                     'r:',
                     label=exp_interp_label,
                     linewidth=1)
            # Save to df
            _live_df_x_label = exp_interp_label + '_eV'
            _live_df_y_label = exp_interp_label + '_attenuation'
            self.df[_live_df_x_label] = x_interp
            self.df[_live_df_y_label] = y_interp
        else:
            # Plot exp. data (raw)
            exp_x = self.experiment.get_x(
                x_type='energy',
                offset_us=self.calibrated_offset_us,
                source_to_detector_m=self.calibrated_source_to_detector_m)
            exp_y = self.experiment.get_y(y_type='attenuation',
                                          baseline=self.baseline)
            ax1.plot(exp_x,
                     exp_y,
                     linestyle='-',
                     linewidth=1,
                     marker='o',
                     markersize=2,
                     color='r',
                     label=exp_label)

            # Save to df
            _df = pd.DataFrame()
            _live_df_x_label = exp_label + '_eV'
            _live_df_y_label = exp_label + '_attenuation'
            _df[_live_df_x_label] = exp_x
            _df[_live_df_y_label] = exp_y
            # Concatenate since the length of raw and simu are not the same
            self.df = pd.concat([self.df, _df], axis=1)

        # 3.
        if error is True:
            # Plot fitting differences
            error_label = 'Diff.'
            _move_below_by = 0.2
            moved_fitted_residual = self.fitted_residual - _move_below_by
            ax1.plot(simu_x,
                     moved_fitted_residual,
                     'g-',
                     label=error_label,
                     linewidth=1,
                     alpha=1)
            # Save to df
            _live_df_x_label = error_label + '_eV'
            _live_df_y_label = error_label + '_attenuation'
            self.df[_live_df_x_label] = simu_x
            self.df[_live_df_y_label] = moved_fitted_residual
        # 4.
        if all_elements is True:
            # show signal from each elements
            _stack_signal = self.fitted_simulation.o_reso.stack_signal
            _stack = self.fitted_simulation.o_reso.stack
            y_axis_tag = 'attenuation'

            for _layer in _stack.keys():
                for _element in _stack[_layer]['elements']:
                    _y_axis = _stack_signal[_layer][_element][y_axis_tag]
                    ax1.plot(simu_x,
                             _y_axis,
                             label="{}".format(_element),
                             linewidth=1,
                             alpha=0.85)
                    # Save to df
                    _live_df_x_label = _element + '_eV'
                    _live_df_y_label = _element + '_attenuation'
                    self.df[_live_df_x_label] = simu_x
                    self.df[_live_df_y_label] = _y_axis
        # 4.
        if all_isotopes is True:
            # show signal from each isotopes
            _stack_signal = self.fitted_simulation.o_reso.stack_signal
            _stack = self.fitted_simulation.o_reso.stack
            y_axis_tag = 'attenuation'
            for _layer in _stack.keys():
                for _element in _stack[_layer]['elements']:
                    for _isotope in _stack[_layer][_element]['isotopes'][
                            'list']:
                        _y_axis = _stack_signal[_layer][_element][_isotope][
                            y_axis_tag]
                        ax1.plot(simu_x,
                                 _y_axis,
                                 label="{}".format(_isotope),
                                 linewidth=1,
                                 alpha=1)
                        # Save to df
                        _live_df_x_label = _isotope + '_eV'
                        _live_df_y_label = _isotope + '_attenuation'
                        self.df[_live_df_x_label] = simu_x
                        self.df[_live_df_y_label] = _y_axis
        # 5.
        if items_to_plot is not None:
            # plot specified from 'items_to_plot'
            y_axis_tag = 'attenuation'
            items = fit_util.Items(o_reso=self.fitted_simulation.o_reso,
                                   database=self.database)
            shaped_items = items.shaped(items_list=items_to_plot)
            _signal_dict = items.values(y_axis_type=y_axis_tag)
            for _each_label in list(_signal_dict.keys()):
                ax1.plot(simu_x,
                         _signal_dict[_each_label],
                         '--',
                         label=_each_label,
                         linewidth=1,
                         alpha=1)
                # Save to df
                _live_df_x_label = _each_label + '_eV'
                _live_df_y_label = _each_label + '_attenuation'
                self.df[_live_df_x_label] = simu_x
                self.df[_live_df_y_label] = _signal_dict[_each_label]

        # plot peaks detected and indexed
        if self.experiment.o_peak and self.experiment.o_peak.peak_map_indexed is not None:
            _peak_df_scaled = self.experiment.o_peak.peak_df_scaled
            _peak_map_indexed = self.experiment.o_peak.peak_map_indexed
            _peak_map_full = self.experiment.o_peak.peak_map_full
            if peak_mark is True:
                ax1.plot(_peak_df_scaled['x'],
                         _peak_df_scaled['y'],
                         'kx',
                         label='_nolegend_')
            if error is False:
                ax1.set_ylim(ymin=-0.1)
            for _ele_name in _peak_map_indexed.keys():
                if peak_id is 'all':
                    ax1.plot(_peak_map_full[_ele_name]['ideal']['x'], [-0.05] *
                             len(_peak_map_full[_ele_name]['ideal']['x']),
                             '|',
                             ms=10,
                             label=_ele_name)
                elif peak_id is 'indexed':
                    ax1.plot(_peak_map_indexed[_ele_name]['exp']['x'],
                             [-0.05] *
                             len(_peak_map_indexed[_ele_name]['exp']['x']),
                             '|',
                             ms=8,
                             label=_ele_name)
                if 'peak_span' in _peak_map_indexed[_ele_name].keys():
                    _data_point_x = _peak_map_indexed[_ele_name]['peak_span'][
                        'energy_ev']
                    _data_point_y = _peak_map_indexed[_ele_name]['peak_span'][
                        'y']
                    ax1.scatter(_data_point_x,
                                _data_point_y,
                                label='_nolegend_')

        # Set plot limit and captions
        fit_util.set_plt(ax=ax1,
                         fig_title=fig_title,
                         grid=grid,
                         x_type=x_type,
                         y_type=y_type,
                         t_unit=t_unit,
                         logx=logx,
                         logy=logy)

        # Plot table
        if table is True:
            if self.fitted_iso_result is None:
                columns = list(
                    self.fit_result.__dict__['params'].valuesdict().keys())
            else:
                columns = self.fit_result.__dict__['var_names']

            columns_to_show_dict = {}
            for _each in columns:
                _split = _each.split('_')
                if _split[0] == 'thickness':
                    _name_to_show = r'$d_{\rm{' + _split[-1] + '}}$' + ' (mm)'
                else:
                    _name_to_show = r'$\rho_{\rm{' + _split[
                        -1] + '}}$' + ' (g/cm$^3$)'
                columns_to_show_dict[_each] = _name_to_show
            columns_to_show = list(columns_to_show_dict.values())
            rows = ['Before', 'After']
            _row_before = []
            _row_after = []
            for _each in columns:
                _row_after.append(
                    round(
                        self.fit_result.__dict__['params'].valuesdict()[_each],
                        3))
                _row_before.append(
                    round(self.params_for_fit.valuesdict()[_each], 3))

            if self.fitted_iso_result is not None:
                _iso_columns = list(self.fitted_iso_result.__dict__['params'].
                                    valuesdict().keys())
                columns = columns + _iso_columns
                _iso_columns_to_show_dict = {}
                for _each_iso in _iso_columns:
                    _num_str = re.findall('\d+', _each_iso)[0]
                    _name_str = _each_iso[0]
                    _sup_name = r"$^{" + _num_str + "}$" + _name_str
                    _iso_columns_to_show_dict[_each_iso] = _sup_name
                _iso_columns_to_show = list(_iso_columns_to_show_dict.values())
                columns_to_show = columns_to_show + _iso_columns_to_show
                for _each in _iso_columns:
                    _row_after.append(
                        round(
                            self.fitted_iso_result.__dict__['params'].
                            valuesdict()[_each], 3))
                    _row_before.append(
                        round(self.params_for_iso_fit.valuesdict()[_each], 3))
            table = ax1.table(rowLabels=rows,
                              colLabels=columns_to_show,
                              cellText=[_row_before, _row_after],
                              loc='upper right',
                              bbox=[0, -0.33, 1.0, 0.18])
            table.auto_set_font_size(False)
            table.set_fontsize(10)
            plt.tight_layout()

        if save_fig:
            _sample_name = '_'.join(self.layer_list)
            _filename = 'fitting_' + _sample_name + '.png'
            plt.savefig(_filename, dpi=600, transparent=True)
            plt.close()
        else:
            plt.show()
Example #8
0
    def fit(self, raw_layer: fit_util.Layer, vary='density', each_step=False):
        if vary not in ['density', 'thickness', 'none']:
            raise ValueError(
                "'vary=' can only be one of ['density', 'thickness', 'none']")
        # Default vary is: 'density'
        self.sample_vary = vary
        thickness_vary_tag = False
        density_vary_tag = True
        if vary == 'thickness':
            thickness_vary_tag = True
            density_vary_tag = False
        if vary == 'none':
            density_vary_tag = False
        self.raw_layer = raw_layer
        '''Load params'''
        print(raw_layer)
        self.layer_list = list(raw_layer.info.keys())
        self.params_for_fit = Parameters()
        for _each_layer in self.layer_list:
            if self.raw_layer.info[_each_layer]['density']['value'] is np.NaN:
                self.raw_layer.info[_each_layer]['density'][
                    'value'] = pt.elements.isotope(_each_layer).density
            self.params_for_fit.add(
                'thickness_mm_' + _each_layer,
                value=self.raw_layer.info[_each_layer]['thickness']['value'],
                vary=thickness_vary_tag,
                min=0)
            self.params_for_fit.add(
                'density_gcm3_' + _each_layer,
                value=self.raw_layer.info[_each_layer]['density']['value'],
                vary=density_vary_tag,
                min=0)
        # Print before
        print(
            "+----------------- Fitting ({}) -----------------+\nParams before:"
            .format(vary))
        self.params_for_fit.pretty_print()
        # Fitting
        self.fit_result = minimize(y_gap_for_fitting,
                                   self.params_for_fit,
                                   method='leastsq',
                                   args=(self.exp_x_interp, self.exp_y_interp,
                                         self.layer_list, self.energy_min,
                                         self.energy_max, self.energy_step,
                                         self.database, each_step))
        # Print after
        print("\nParams after:")
        self.fit_result.__dict__['params'].pretty_print()
        # Print chi^2
        self.fitted_residual = self.fit_result.__dict__['residual']
        print("Fitting chi^2 : {}\n".format(sum(self.fitted_residual**2)))
        '''Export fitted params as Layer()'''

        # Save the fitted 'density' or 'thickness' in Layer()
        self.fitted_layer = Layer()
        for _each_layer in self.layer_list:
            self.fitted_layer.add_layer(
                layer=_each_layer,
                thickness_mm=self.fit_result.__dict__['params'].valuesdict()[
                    'thickness_mm_' + _each_layer],
                density_gcm3=self.fit_result.__dict__['params'].valuesdict()[
                    'density_gcm3_' + _each_layer])
        # self.fitted_fjac = self.fit_result.__dict__['fjac']
        # print(self.fit_result.__dict__['fjac'][0])
        '''Create fitted simulation'''

        self.fitted_simulation = Simulation(energy_min=self.energy_min,
                                            energy_max=self.energy_max,
                                            energy_step=self.energy_step,
                                            database=self.database)
        for each_layer in self.layer_list:
            self.fitted_simulation.add_layer(
                layer=each_layer,
                thickness_mm=self.fitted_layer.info[each_layer]['thickness']
                ['value'],
                density_gcm3=self.fitted_layer.info[each_layer]['density']
                ['value'])
        return self.fit_result
Example #9
0
class FitResonance(object):
    def __init__(self,
                 spectra_file,
                 data_file,
                 calibrated_offset_us,
                 calibrated_source_to_detector_m,
                 folder,
                 norm_factor=1,
                 baseline=False,
                 norm_to_file=None,
                 slice_start=None,
                 slice_end=None,
                 energy_min=1e-5,
                 energy_max=1000,
                 energy_step=0.01,
                 database='ENDF_VII'):
        self.experiment = Experiment(spectra_file=spectra_file,
                                     data_file=data_file,
                                     folder=folder)
        self.energy_min = energy_min
        self.energy_max = energy_max
        self.energy_step = energy_step
        self.database = database
        self.calibrated_offset_us = calibrated_offset_us
        self.calibrated_source_to_detector_m = calibrated_source_to_detector_m
        self.raw_layer = None
        self.experiment.slice(start=slice_start, end=slice_end)
        self.baseline = baseline
        if norm_to_file is not None:
            self.experiment.norm_to(norm_to_file, norm_factor=norm_factor)
        self.exp_x_interp, self.exp_y_interp = self.experiment.xy_scaled(
            energy_min=self.energy_min,
            energy_max=self.energy_max,
            energy_step=self.energy_step,
            x_type='energy',
            y_type='attenuation',
            offset_us=self.calibrated_offset_us,
            source_to_detector_m=self.calibrated_source_to_detector_m,
            baseline=self.baseline)

        self.fit_result = None
        self.fitted_density_gcm3 = None
        self.fitted_thickness_mm = None
        self.fitted_residual = None
        self.fitted_gap = None
        self.fitted_fjac = None
        self.fitted_layer = None
        self.fitted_simulation = None
        self.layer_list = None
        self.raw_layer = None
        self.fitted_iso_result = None
        self.fitted_iso_residual = None
        self.params_for_fit = None
        self.params_for_iso_fit = None
        self.isotope_stack = {}
        self.sample_vary = None
        self.df = None
        # self.peak_map_full = None
        # self.peak_map_indexed = None

    def fit(self, raw_layer: fit_util.Layer, vary='density', each_step=False):
        if vary not in ['density', 'thickness', 'none']:
            raise ValueError(
                "'vary=' can only be one of ['density', 'thickness', 'none']")
        # Default vary is: 'density'
        self.sample_vary = vary
        thickness_vary_tag = False
        density_vary_tag = True
        if vary == 'thickness':
            thickness_vary_tag = True
            density_vary_tag = False
        if vary == 'none':
            density_vary_tag = False
        self.raw_layer = raw_layer
        '''Load params'''
        print(raw_layer)
        self.layer_list = list(raw_layer.info.keys())
        self.params_for_fit = Parameters()
        for _each_layer in self.layer_list:
            if self.raw_layer.info[_each_layer]['density']['value'] is np.NaN:
                self.raw_layer.info[_each_layer]['density'][
                    'value'] = pt.elements.isotope(_each_layer).density
            self.params_for_fit.add(
                'thickness_mm_' + _each_layer,
                value=self.raw_layer.info[_each_layer]['thickness']['value'],
                vary=thickness_vary_tag,
                min=0)
            self.params_for_fit.add(
                'density_gcm3_' + _each_layer,
                value=self.raw_layer.info[_each_layer]['density']['value'],
                vary=density_vary_tag,
                min=0)
        # Print before
        print(
            "+----------------- Fitting ({}) -----------------+\nParams before:"
            .format(vary))
        self.params_for_fit.pretty_print()
        # Fitting
        self.fit_result = minimize(y_gap_for_fitting,
                                   self.params_for_fit,
                                   method='leastsq',
                                   args=(self.exp_x_interp, self.exp_y_interp,
                                         self.layer_list, self.energy_min,
                                         self.energy_max, self.energy_step,
                                         self.database, each_step))
        # Print after
        print("\nParams after:")
        self.fit_result.__dict__['params'].pretty_print()
        # Print chi^2
        self.fitted_residual = self.fit_result.__dict__['residual']
        print("Fitting chi^2 : {}\n".format(sum(self.fitted_residual**2)))
        '''Export fitted params as Layer()'''

        # Save the fitted 'density' or 'thickness' in Layer()
        self.fitted_layer = Layer()
        for _each_layer in self.layer_list:
            self.fitted_layer.add_layer(
                layer=_each_layer,
                thickness_mm=self.fit_result.__dict__['params'].valuesdict()[
                    'thickness_mm_' + _each_layer],
                density_gcm3=self.fit_result.__dict__['params'].valuesdict()[
                    'density_gcm3_' + _each_layer])
        # self.fitted_fjac = self.fit_result.__dict__['fjac']
        # print(self.fit_result.__dict__['fjac'][0])
        '''Create fitted simulation'''

        self.fitted_simulation = Simulation(energy_min=self.energy_min,
                                            energy_max=self.energy_max,
                                            energy_step=self.energy_step,
                                            database=self.database)
        for each_layer in self.layer_list:
            self.fitted_simulation.add_layer(
                layer=each_layer,
                thickness_mm=self.fitted_layer.info[each_layer]['thickness']
                ['value'],
                density_gcm3=self.fitted_layer.info[each_layer]['density']
                ['value'])
        return self.fit_result

    def fit_iso(self, layer, each_step=False):
        """

        :param layer:
        :type layer:
        :param each_step:
        :type each_step:
        :return:
        :rtype:
        """
        self.params_for_iso_fit = Parameters()
        self.isotope_stack[layer] = {
            'list':
            self.fitted_simulation.o_reso.stack[layer][layer]['isotopes']
            ['list'],
            'ratios':
            self.fitted_simulation.o_reso.stack[layer][layer]['isotopes']
            ['isotopic_ratio']
        }
        _formatted_isotope_list = []
        _params_name_list = []
        # Form list of param name
        for _isotope_index in range(len(self.isotope_stack[layer]['list'])):
            _split = self.isotope_stack[layer]['list'][_isotope_index].split(
                '-')
            _flip = _split[::-1]
            _formatted_isotope_name = ''.join(_flip)
            # _formatted_isotope_name = self.isotope_stack[layer]['list'][_isotope_index].replace('-', '_')
            _formatted_isotope_list.append(_formatted_isotope_name)
            _params_name_list = _formatted_isotope_list
        # Form Parameters() for fitting
        for _name_index in range(len(_params_name_list)):
            self.params_for_iso_fit.add(
                _params_name_list[_name_index],
                value=self.isotope_stack[layer]['ratios'][_name_index],
                min=0,
                max=1)
        # Constrain sum of isotope ratios to be 1

        # _params_name_list_temp = _params_name_list[:]
        # _constraint = '+'.join(_params_name_list_temp)
        # self.params_for_iso_fit.add('sum', expr=_constraint)

        _constraint_param = _params_name_list[-1]
        _params_name_list_temp = _params_name_list[:]
        _params_name_list_temp.remove(_constraint_param)

        _constraint = '-'.join(_params_name_list_temp)
        _constraint = '1-' + _constraint
        self.params_for_iso_fit[_constraint_param].set(expr=_constraint)

        # Print params before
        print(
            "+----------------- Fitting (isotopic at.%) -----------------+\nParams before:"
        )
        self.params_for_iso_fit.pretty_print()
        # Fitting
        self.fitted_iso_result = minimize(
            y_gap_for_iso_fitting,
            self.params_for_iso_fit,
            method='leastsq',
            args=(self.exp_x_interp, self.exp_y_interp, layer,
                  _formatted_isotope_list, self.fitted_simulation, each_step))
        # Print params after
        print("\nParams after:")
        self.fitted_iso_result.__dict__['params'].pretty_print()
        # Print chi^2
        self.fitted_iso_residual = self.fitted_iso_result.__dict__['residual']
        print("Fit iso chi^2 : {}\n".format(
            self.fitted_iso_result.__dict__['chisqr']))

        return

    def molar_conc(self):
        molar_conc_units = 'mol/cm3'
        print(
            "Molar-conc. ({})\tBefore_fit\tAfter_fit".format(molar_conc_units))
        for _each_layer in self.layer_list:
            molar_mass_value = self.fitted_simulation.o_reso.stack[
                _each_layer][_each_layer]['molar_mass']['value']
            molar_mass_units = self.fitted_simulation.o_reso.stack[
                _each_layer][_each_layer]['molar_mass']['units']
            # Adding molar_mass to fitted_layer info
            self.fitted_layer.info[_each_layer]['molar_mass'][
                'value'] = molar_mass_value
            self.fitted_layer.info[_each_layer]['molar_mass'][
                'units'] = molar_mass_units
            # Adding molar_mass to raw_layer info
            self.raw_layer.info[_each_layer]['molar_mass'][
                'value'] = molar_mass_value
            self.raw_layer.info[_each_layer]['molar_mass'][
                'units'] = molar_mass_units
            # Adding molar_concentration to fitted_layer info
            molar_conc_value = self.fitted_layer.info[_each_layer]['density'][
                'value'] / molar_mass_value
            self.fitted_layer.info[_each_layer]['molar_conc'][
                'value'] = molar_conc_value
            self.fitted_layer.info[_each_layer]['molar_conc'][
                'units'] = molar_conc_units
            # Calculate starting molar_concentration and fitted_layer info
            start_molar_conc_value = self.raw_layer.info[_each_layer][
                'density']['value'] / molar_mass_value
            self.raw_layer.info[_each_layer]['molar_conc'][
                'value'] = start_molar_conc_value
            self.raw_layer.info[_each_layer]['molar_conc'][
                'units'] = molar_conc_units
            # molar_conc_output[_each_layer] = {'Before_fit': start_molar_conc_value,
            #                                   'After_fit': molar_conc_value}
            print("{}\t{}\t{}".format(_each_layer, start_molar_conc_value,
                                      molar_conc_value))
        print('\n')

        return self.fitted_layer.info

    def index_peak(self,
                   thres,
                   min_dist,
                   map_thres=0.01,
                   map_min_dist=20,
                   rel_tol=5e-3,
                   isotope=False):
        if self.experiment.o_peak is None:
            self.experiment.find_peak(thres=thres, min_dist=min_dist)
        self.experiment._scale_peak_with_ev(
            energy_min=self.energy_min,
            energy_max=self.energy_max,
            offset_us=self.calibrated_offset_us,
            source_to_detector_m=self.calibrated_source_to_detector_m)
        assert self.experiment.o_peak.peak_df is not None
        assert self.experiment.o_peak.peak_df_scaled is not None

        _peak_map = self.fitted_simulation.peak_map(
            thres=map_thres,
            min_dist=map_min_dist,
            impr_reso=True,
            # isotope=isotope,
        )
        self.experiment.o_peak.peak_map_full = _peak_map
        self.experiment.o_peak.index_peak(peak_map=_peak_map, rel_tol=rel_tol)
        return self.experiment.o_peak.peak_map_indexed

    # def analyze_peak(self):
    #     pass

    def plot(self,
             error=True,
             table=True,
             grid=True,
             before=False,
             interp=False,
             total=True,
             all_elements=False,
             all_isotopes=False,
             items_to_plot=None,
             peak_mark=True,
             peak_id='indexed',
             y_type='transmission',
             x_type='energy',
             t_unit='us',
             logx=False,
             logy=False,
             save_fig=False):
        """

        :param error:
        :type error:
        :param table:
        :type table:
        :param grid:
        :type grid:
        :param before:
        :type before:
        :param interp:
        :type interp:
        :param total:
        :type total:
        :param all_elements:
        :type all_elements:
        :param all_isotopes:
        :type all_isotopes:
        :param items_to_plot:
        :type items_to_plot:
        :param peak_mark:
        :type peak_mark:
        :param peak_id:
        :type peak_id:
        :param y_type:
        :type y_type:
        :param x_type:
        :type x_type:
        :param t_unit:
        :type t_unit:
        :param logx:
        :type logx:
        :param logy:
        :type logy:
        :param save_fig:
        :type save_fig:
        :return:
        :rtype:
        """
        # Form signals from fitted_layer
        if self.fitted_simulation is None:
            self.fitted_simulation = Simulation(energy_min=self.energy_min,
                                                energy_max=self.energy_max,
                                                energy_step=self.energy_step)
            for each_layer in self.layer_list:
                self.fitted_simulation.add_layer(
                    layer=each_layer,
                    thickness_mm=self.fitted_layer.info[each_layer]
                    ['thickness']['value'],
                    density_gcm3=self.fitted_layer.info[each_layer]['density']
                    ['value'])
        if peak_id not in ['indexed', 'all']:
            raise ValueError("'peak=' must be one of ['indexed', 'full'].")
        simu_x = self.fitted_simulation.get_x(x_type='energy')
        simu_y = self.fitted_simulation.get_y(y_type='attenuation')

        # Get plot labels
        simu_label = 'Fit'
        simu_before_label = 'Fit_init'
        exp_label = 'Exp'
        exp_interp_label = 'Exp_interp'
        sample_name = ' & '.join(self.layer_list)
        if self.sample_vary is None:
            raise ValueError("Vary type ['density'|'thickness'] is not set.")
        fig_title = 'Fitting result of sample (' + sample_name + ')'

        # Create pd.DataFrame
        self.df = pd.DataFrame()

        # Clear any left plt
        plt.close()

        # plot table + graph
        if table is True:
            ax1 = plt.subplot2grid(shape=(10, 10),
                                   loc=(0, 1),
                                   rowspan=8,
                                   colspan=8)
        # plot graph only
        else:
            ax1 = plt.subplot(111)

        # Plot after fitting
        if total is True:
            ax1.plot(simu_x, simu_y, 'b-', label=simu_label, linewidth=1)

        # Save to df
        _live_df_x_label = simu_label + '_eV'
        _live_df_y_label = simu_label + '_attenuation'
        self.df[_live_df_x_label] = simu_x
        self.df[_live_df_y_label] = simu_y
        """Plot options"""

        # 1.
        if before is True:
            # Plot before fitting
            # Form signals from raw_layer
            simulation = Simulation(energy_min=self.energy_min,
                                    energy_max=self.energy_max,
                                    energy_step=self.energy_step)
            for each_layer in self.layer_list:
                simulation.add_layer(
                    layer=each_layer,
                    thickness_mm=self.raw_layer.info[each_layer]['thickness']
                    ['value'],
                    density_gcm3=self.raw_layer.info[each_layer]['density']
                    ['value'])
            simu_x = simulation.get_x(x_type='energy')
            simu_y_before = simulation.get_y(y_type='attenuation')
            ax1.plot(simu_x,
                     simu_y_before,
                     'c-.',
                     label=simu_before_label,
                     linewidth=1)
            # Save to df
            _live_df_x_label = simu_before_label + '_eV'
            _live_df_y_label = simu_before_label + '_attenuation'
            self.df[_live_df_x_label] = simu_x
            self.df[_live_df_y_label] = simu_y_before
        # 2.
        if interp is True:
            # Plot exp. data (interpolated)
            x_interp, y_interp = self.experiment.xy_scaled(
                energy_max=self.energy_max,
                energy_min=self.energy_min,
                energy_step=self.energy_step,
                x_type='energy',
                y_type='attenuation',
                baseline=self.baseline,
                offset_us=self.calibrated_offset_us,
                source_to_detector_m=self.calibrated_source_to_detector_m)
            ax1.plot(x_interp,
                     y_interp,
                     'r:',
                     label=exp_interp_label,
                     linewidth=1)
            # Save to df
            _live_df_x_label = exp_interp_label + '_eV'
            _live_df_y_label = exp_interp_label + '_attenuation'
            self.df[_live_df_x_label] = x_interp
            self.df[_live_df_y_label] = y_interp
        else:
            # Plot exp. data (raw)
            exp_x = self.experiment.get_x(
                x_type='energy',
                offset_us=self.calibrated_offset_us,
                source_to_detector_m=self.calibrated_source_to_detector_m)
            exp_y = self.experiment.get_y(y_type='attenuation',
                                          baseline=self.baseline)
            ax1.plot(exp_x,
                     exp_y,
                     linestyle='-',
                     linewidth=1,
                     marker='o',
                     markersize=2,
                     color='r',
                     label=exp_label)

            # Save to df
            _df = pd.DataFrame()
            _live_df_x_label = exp_label + '_eV'
            _live_df_y_label = exp_label + '_attenuation'
            _df[_live_df_x_label] = exp_x
            _df[_live_df_y_label] = exp_y
            # Concatenate since the length of raw and simu are not the same
            self.df = pd.concat([self.df, _df], axis=1)

        # 3.
        if error is True:
            # Plot fitting differences
            error_label = 'Diff.'
            _move_below_by = 0.2
            moved_fitted_residual = self.fitted_residual - _move_below_by
            ax1.plot(simu_x,
                     moved_fitted_residual,
                     'g-',
                     label=error_label,
                     linewidth=1,
                     alpha=1)
            # Save to df
            _live_df_x_label = error_label + '_eV'
            _live_df_y_label = error_label + '_attenuation'
            self.df[_live_df_x_label] = simu_x
            self.df[_live_df_y_label] = moved_fitted_residual
        # 4.
        if all_elements is True:
            # show signal from each elements
            _stack_signal = self.fitted_simulation.o_reso.stack_signal
            _stack = self.fitted_simulation.o_reso.stack
            y_axis_tag = 'attenuation'

            for _layer in _stack.keys():
                for _element in _stack[_layer]['elements']:
                    _y_axis = _stack_signal[_layer][_element][y_axis_tag]
                    ax1.plot(simu_x,
                             _y_axis,
                             label="{}".format(_element),
                             linewidth=1,
                             alpha=0.85)
                    # Save to df
                    _live_df_x_label = _element + '_eV'
                    _live_df_y_label = _element + '_attenuation'
                    self.df[_live_df_x_label] = simu_x
                    self.df[_live_df_y_label] = _y_axis
        # 4.
        if all_isotopes is True:
            # show signal from each isotopes
            _stack_signal = self.fitted_simulation.o_reso.stack_signal
            _stack = self.fitted_simulation.o_reso.stack
            y_axis_tag = 'attenuation'
            for _layer in _stack.keys():
                for _element in _stack[_layer]['elements']:
                    for _isotope in _stack[_layer][_element]['isotopes'][
                            'list']:
                        _y_axis = _stack_signal[_layer][_element][_isotope][
                            y_axis_tag]
                        ax1.plot(simu_x,
                                 _y_axis,
                                 label="{}".format(_isotope),
                                 linewidth=1,
                                 alpha=1)
                        # Save to df
                        _live_df_x_label = _isotope + '_eV'
                        _live_df_y_label = _isotope + '_attenuation'
                        self.df[_live_df_x_label] = simu_x
                        self.df[_live_df_y_label] = _y_axis
        # 5.
        if items_to_plot is not None:
            # plot specified from 'items_to_plot'
            y_axis_tag = 'attenuation'
            items = fit_util.Items(o_reso=self.fitted_simulation.o_reso,
                                   database=self.database)
            shaped_items = items.shaped(items_list=items_to_plot)
            _signal_dict = items.values(y_axis_type=y_axis_tag)
            for _each_label in list(_signal_dict.keys()):
                ax1.plot(simu_x,
                         _signal_dict[_each_label],
                         '--',
                         label=_each_label,
                         linewidth=1,
                         alpha=1)
                # Save to df
                _live_df_x_label = _each_label + '_eV'
                _live_df_y_label = _each_label + '_attenuation'
                self.df[_live_df_x_label] = simu_x
                self.df[_live_df_y_label] = _signal_dict[_each_label]

        # plot peaks detected and indexed
        if self.experiment.o_peak and self.experiment.o_peak.peak_map_indexed is not None:
            _peak_df_scaled = self.experiment.o_peak.peak_df_scaled
            _peak_map_indexed = self.experiment.o_peak.peak_map_indexed
            _peak_map_full = self.experiment.o_peak.peak_map_full
            if peak_mark is True:
                ax1.plot(_peak_df_scaled['x'],
                         _peak_df_scaled['y'],
                         'kx',
                         label='_nolegend_')
            if error is False:
                ax1.set_ylim(ymin=-0.1)
            for _ele_name in _peak_map_indexed.keys():
                if peak_id is 'all':
                    ax1.plot(_peak_map_full[_ele_name]['ideal']['x'], [-0.05] *
                             len(_peak_map_full[_ele_name]['ideal']['x']),
                             '|',
                             ms=10,
                             label=_ele_name)
                elif peak_id is 'indexed':
                    ax1.plot(_peak_map_indexed[_ele_name]['exp']['x'],
                             [-0.05] *
                             len(_peak_map_indexed[_ele_name]['exp']['x']),
                             '|',
                             ms=8,
                             label=_ele_name)
                if 'peak_span' in _peak_map_indexed[_ele_name].keys():
                    _data_point_x = _peak_map_indexed[_ele_name]['peak_span'][
                        'energy_ev']
                    _data_point_y = _peak_map_indexed[_ele_name]['peak_span'][
                        'y']
                    ax1.scatter(_data_point_x,
                                _data_point_y,
                                label='_nolegend_')

        # Set plot limit and captions
        fit_util.set_plt(ax=ax1,
                         fig_title=fig_title,
                         grid=grid,
                         x_type=x_type,
                         y_type=y_type,
                         t_unit=t_unit,
                         logx=logx,
                         logy=logy)

        # Plot table
        if table is True:
            if self.fitted_iso_result is None:
                columns = list(
                    self.fit_result.__dict__['params'].valuesdict().keys())
            else:
                columns = self.fit_result.__dict__['var_names']

            columns_to_show_dict = {}
            for _each in columns:
                _split = _each.split('_')
                if _split[0] == 'thickness':
                    _name_to_show = r'$d_{\rm{' + _split[-1] + '}}$' + ' (mm)'
                else:
                    _name_to_show = r'$\rho_{\rm{' + _split[
                        -1] + '}}$' + ' (g/cm$^3$)'
                columns_to_show_dict[_each] = _name_to_show
            columns_to_show = list(columns_to_show_dict.values())
            rows = ['Before', 'After']
            _row_before = []
            _row_after = []
            for _each in columns:
                _row_after.append(
                    round(
                        self.fit_result.__dict__['params'].valuesdict()[_each],
                        3))
                _row_before.append(
                    round(self.params_for_fit.valuesdict()[_each], 3))

            if self.fitted_iso_result is not None:
                _iso_columns = list(self.fitted_iso_result.__dict__['params'].
                                    valuesdict().keys())
                columns = columns + _iso_columns
                _iso_columns_to_show_dict = {}
                for _each_iso in _iso_columns:
                    _num_str = re.findall('\d+', _each_iso)[0]
                    _name_str = _each_iso[0]
                    _sup_name = r"$^{" + _num_str + "}$" + _name_str
                    _iso_columns_to_show_dict[_each_iso] = _sup_name
                _iso_columns_to_show = list(_iso_columns_to_show_dict.values())
                columns_to_show = columns_to_show + _iso_columns_to_show
                for _each in _iso_columns:
                    _row_after.append(
                        round(
                            self.fitted_iso_result.__dict__['params'].
                            valuesdict()[_each], 3))
                    _row_before.append(
                        round(self.params_for_iso_fit.valuesdict()[_each], 3))
            table = ax1.table(rowLabels=rows,
                              colLabels=columns_to_show,
                              cellText=[_row_before, _row_after],
                              loc='upper right',
                              bbox=[0, -0.33, 1.0, 0.18])
            table.auto_set_font_size(False)
            table.set_fontsize(10)
            plt.tight_layout()

        if save_fig:
            _sample_name = '_'.join(self.layer_list)
            _filename = 'fitting_' + _sample_name + '.png'
            plt.savefig(_filename, dpi=600, transparent=True)
            plt.close()
        else:
            plt.show()

    def export(self, filename=None):
        if self.df is None:
            raise ValueError(
                "pd.DataFrame is empty, please run required step: FitResonance.plot()"
            )
        elif filename is None:
            self.df.to_clipboard(excel=True)
        else:
            self.df.to_csv(filename)
Example #10
0
import matplotlib.pyplot as plt

from ResoFit.experiment import Experiment
from ResoFit.simulation import Simulation
from ResoFit._pulse_shape import NeutronPulse

overwrite_csv = False
source_to_detector_m = 16.45

simulation = Simulation(energy_min=78,
                        energy_max=82,
                        energy_step=0.01,
                        database='ENDF_VII')
simulation.add_layer(layer='Gd', thickness_mm=0.075)
simulation._convolve_beam_shapes(source_to_detector_m=source_to_detector_m,
                                 model_index=1,
                                 conv_proton=True,
                                 proton_params={})
# model_index:
# 1: 'ikeda_carpenter',
# 2: 'cole_windsor',
# 3: 'pseudo_voigt',
# 4: 'ikeda_carpenter_jparc',
# 5: 'cole_windsor_jparc'

# folder = 'data/IPTS_19558/reso_data_19558'
# data_file1 = 'Gd_thin.csv'
# spectra_file = 'Image002_Spectra.txt'
# experiment1 = Experiment(data_file=data_file1,
#                          spectra_file=spectra_file,
#                          folder=folder,
class TestSimulation(unittest.TestCase):
    energy_min = 7
    energy_max = 10
    energy_step = 1
    database = '_data_for_unittest'
    simulation = Simulation(energy_min=energy_min,
                            energy_max=energy_max,
                            energy_step=energy_step,
                            database='_data_for_unittest')

    def test_add_layer(self):
        simulation = self.simulation
        simulation.add_layer(layer='U', thickness_mm=0.15)
        _simu_x_returned = simulation.get_x(x_type='energy')
        _simu_y_returned = simulation.get_y(y_type='attenuation')
        _simu_x_expected = np.array([7., 8., 9., 10.])
        _simu_y_expected = np.array(
            [0.03699373, 0.00936537, 0.00854215, 0.00726004])
        self.assertAlmostEqual(_simu_x_returned[0],
                               _simu_x_expected[0],
                               delta=0.000001)
        self.assertAlmostEqual(_simu_x_returned[1],
                               _simu_x_expected[1],
                               delta=0.000001)
        self.assertAlmostEqual(_simu_x_returned[2],
                               _simu_x_expected[2],
                               delta=0.000001)
        self.assertAlmostEqual(_simu_x_returned[3],
                               _simu_x_expected[3],
                               delta=0.000001)
        self.assertAlmostEqual(_simu_y_returned[0],
                               _simu_y_expected[0],
                               delta=0.000001)
        self.assertAlmostEqual(_simu_y_returned[1],
                               _simu_y_expected[1],
                               delta=0.000001)
        self.assertAlmostEqual(_simu_y_returned[2],
                               _simu_y_expected[2],
                               delta=0.000001)
        self.assertAlmostEqual(_simu_y_returned[3],
                               _simu_y_expected[3],
                               delta=0.000001)

    def test_set_isotopic_ratio(self):
        simulation = self.simulation
        simulation.add_layer(layer='U', thickness_mm=0.15)
        simulation.set_isotopic_ratio('U', 'U', [0., 0., 0.99, 0.01])
        _isotopic_ratio_list_wrong_len = [0., 0.99, 0.01]
        self.assertRaises(
            ValueError,
            simulation.set_isotopic_ratio,
            layer='U',
            element='U',
            new_isotopic_ratio_list=_isotopic_ratio_list_wrong_len)
        _simu_x_returned = simulation.get_x(x_type='energy')
        _simu_y_returned = simulation.get_y(y_type='attenuation')
        _simu_x_expected = np.array([7., 8., 9., 10.])
        _simu_y_expected = np.array(
            [0.06464851, 0.01259978, 0.11890677, 0.02255858])
        self.assertAlmostEqual(_simu_x_returned[0],
                               _simu_x_expected[0],
                               delta=0.000001)
        self.assertAlmostEqual(_simu_x_returned[1],
                               _simu_x_expected[1],
                               delta=0.000001)
        self.assertAlmostEqual(_simu_x_returned[2],
                               _simu_x_expected[2],
                               delta=0.000001)
        self.assertAlmostEqual(_simu_x_returned[3],
                               _simu_x_expected[3],
                               delta=0.000001)
        self.assertAlmostEqual(_simu_y_returned[0],
                               _simu_y_expected[0],
                               delta=0.000001)
        self.assertAlmostEqual(_simu_y_returned[1],
                               _simu_y_expected[1],
                               delta=0.000001)
        self.assertAlmostEqual(_simu_y_returned[2],
                               _simu_y_expected[2],
                               delta=0.000001)
        self.assertAlmostEqual(_simu_y_returned[3],
                               _simu_y_expected[3],
                               delta=0.000001)

    def test_x_angstrom(self):
        simulation = self.simulation
        simulation.add_layer(layer='U', thickness_mm=0.15)
        _x_returned = simulation.get_x(x_type='lambda')
        _x_expected = np.array(
            [0.10809189, 0.10111071, 0.09532809, 0.09043617])
        self.assertAlmostEqual(_x_returned[0], _x_expected[0], delta=0.000001)
        self.assertAlmostEqual(_x_returned[1], _x_expected[1], delta=0.000001)
        self.assertAlmostEqual(_x_returned[2], _x_expected[2], delta=0.000001)
        self.assertAlmostEqual(_x_returned[3], _x_expected[3], delta=0.000001)

    def test_y_transmission(self):
        simulation = self.simulation
        simulation.add_layer(layer='U', thickness_mm=0.15)
        _y_returned = simulation.get_y(y_type='transmission')
        _y_expected = np.array(
            [0.96300627, 0.99063463, 0.99145785, 0.99273996])
        self.assertAlmostEqual(_y_returned[0], _y_expected[0], delta=0.000001)
        self.assertAlmostEqual(_y_returned[1], _y_expected[1], delta=0.000001)
        self.assertAlmostEqual(_y_returned[2], _y_expected[2], delta=0.000001)
        self.assertAlmostEqual(_y_returned[3], _y_expected[3], delta=0.000001)

    def test_xy_simu(self):
        simulation = self.simulation
        simulation.add_layer(layer='U', thickness_mm=0.15)
        _x_returned = simulation.get_x(x_type='lambda')
        _y_returned = simulation.get_y(y_type='transmission')
        _x_expected = np.array(
            [0.10809189, 0.10111071, 0.09532809, 0.09043617])
        self.assertAlmostEqual(_x_returned[0], _x_expected[0], delta=0.000001)
        self.assertAlmostEqual(_x_returned[1], _x_expected[1], delta=0.000001)
        self.assertAlmostEqual(_x_returned[2], _x_expected[2], delta=0.000001)
        self.assertAlmostEqual(_x_returned[3], _x_expected[3], delta=0.000001)
        _y_expected = np.array(
            [0.96300627, 0.99063463, 0.99145785, 0.99273996])
        self.assertAlmostEqual(_y_returned[0], _y_expected[0], delta=0.000001)
        self.assertAlmostEqual(_y_returned[1], _y_expected[1], delta=0.000001)
        self.assertAlmostEqual(_y_returned[2], _y_expected[2], delta=0.000001)
        self.assertAlmostEqual(_y_returned[3], _y_expected[3], delta=0.000001)
        _x_returned = simulation.get_x(x_type='energy')
        _y_returned = simulation.get_y(y_type='attenuation')
        _x_expected = np.array([7., 8., 9., 10.])
        self.assertAlmostEqual(_x_returned[0], _x_expected[0], delta=0.000001)
        self.assertAlmostEqual(_x_returned[1], _x_expected[1], delta=0.000001)
        self.assertAlmostEqual(_x_returned[2], _x_expected[2], delta=0.000001)
        self.assertAlmostEqual(_x_returned[3], _x_expected[3], delta=0.000001)
        _y_expected = np.array(
            [0.03699373, 0.00936537, 0.00854215, 0.00726004])
        self.assertAlmostEqual(_y_returned[0], _y_expected[0], delta=0.000001)
        self.assertAlmostEqual(_y_returned[1], _y_expected[1], delta=0.000001)
        self.assertAlmostEqual(_y_returned[2], _y_expected[2], delta=0.000001)
        self.assertAlmostEqual(_y_returned[3], _y_expected[3], delta=0.000001)

    def test_peak_map(self):
        pass
import numpy as np
import pprint
import matplotlib.pyplot as plt
from ResoFit.experiment import Experiment
import peakutils as pku
from ResoFit.simulation import Simulation
from scipy import signal
import scipy
folder = 'data/IPTS_19558/reso_data_19558'
data_file1 = 'spheres.csv'
# data_file2 = 'spheres_background_1.csv'
spectra_file = 'Image002_Spectra.txt'
#
# source_to_detector_m = 16.45  # 16#16.445359069030175#16.447496101100739
# offset_us = 2.752  # 0#2.7120797253959119#2.7355447625559037
# baseline = False
# energy_xmax = 150
# lambda_xmax = None
# x_axis = 'number'
#
# # # Calibrate the peak positions
experiment1 = Experiment(data_file=data_file1,
                         spectra_file=spectra_file,
                         folder=folder,
                         baseline=True)
experiment1.slice(start=300, reset_index=False)
peak_df = experiment1.find_peak()
experiment1.plot(x_axis='number', t_unit='s')
plt.plot(peak_df['x_num'], peak_df['y'], 'kx')
energy_min = 7
import numpy as np
import pprint
import matplotlib.pyplot as plt
from ResoFit.experiment import Experiment
import peakutils as pku
from ResoFit.simulation import Simulation
from scipy import signal
import scipy
folder = 'data/IPTS_20439/reso_data_20439'
sample_name = [
    'Ta on MCP (6pC)',
    'Ta on 10mm Pb on MCP (6pC)',
    # 'Ta',
    # 'Ta 9C',
    # 'Ta Cd',
    'Ta + Cd on MCP (9pC)',
    'Ta inside ILL at 80C Cd on MCP (12pC)'
]
data_file = [
    'Ta_no_lead_2C_total_6C.csv',
    'Ta_10mm_lead_2C_total_6C.csv',
    # 'Ta.csv',
    # 'Ta_9C.csv',
    # 'Ta_Cd.csv',
    'Ta_Cd_9C.csv',
    'Ta_80C_12pC.csv'
]
norm_to_file = [
    'OB_no_lead_2C_total_6C.csv',
    'OB_10mm_lead_2C_total_6C.csv',