示例#1
0
def gauss_fit_poiss_ph_region(pnr,
                              min_peak_sep,
                              th01=None,
                              threshold=None,
                              weighted=False,
                              plot=False):
    """
    improve the precision in the location of the peaks by fitting them
    using a sum of Gaussian distributions
    'poiss_ph' naming convention because it only fits the amplitudes to poissonian stats for n>=1
    :param pnr: 2D histogram, output of np.histogram, assumes it's a rectangular function (first bin) + sum of gaussians, but discards the first bin.
    :param min_peak_sep: Minimum distance between each detected peak.
    :param threshold: Normalized threshold, float between [0., 1.]
    :param weighted: if True, it associate a poissonian error to the
        frequencies
    """

    # unpack the histogram into x and y values
    # first bin corresponds to n=0 traces with no photon detection events, thus having zero area.
    f0 = pnr[0][0]  #n=0 freq
    frequencies = pnr[0][1:]

    # match the number of bins to the frequencies, find the step size
    x_val = pnr[1][1:]
    step = np.diff(x_val)[0]
    x0 = pnr[1][0] + step / 2  #n=0 bin
    x_val = x_val[:-1] + step / 2.

    # find a first approximation of the peak location using local differences
    peaks_pos, peak_height = peaks([frequencies, x_val], min_peak_sep,
                                   threshold)
    print 'est peak pos = {}\nest peak hts = {}'.format(peaks_pos, peak_height)

    # detect min between n=0 and n=1
    if th01 == None:
        th01 = x_val[find_idx(x_val, peaks_pos[0]) +
                     np.argmin(frequencies[(x_val > peaks_pos[0])
                                           & (x_val < peaks_pos[1])])]

    print 'th01 = {}'.format(th01)

    # constrain fitting region:
    x_val_ = x_val
    frequencies_ = frequencies

    mask = x_val > th01
    x_val = x_val[mask]
    frequencies = frequencies[mask]
    peaks_pos = peaks_pos[peaks_pos > th01]
    peak_height = peak_height[peaks_pos > th01]

    # build a fitting model with a number of gaussian distributions matching
    # the number of peaks
    fit_model = np.sum([
        GaussianModel(prefix='g{}_'.format(k + 1))
        for k, _ in enumerate(peaks_pos)
    ])

    # Generate the initial conditions for the fit
    p = Parameters()
    p.add('n_bar', 0.2, min=0)

    p.add('A', np.max(peak_height) * min_peak_sep)
    # p.add('Delta_E', peaks_pos[-1] - peaks_pos[-2])
    p.add('Delta_E', 5)
    # p.add('g1_sigma', min_peak_sep / 5, min=0)
    p.add('sigma_p', min_peak_sep / np.sqrt(2) / np.pi, min=0)
    # n>=1 Centers
    p.add('g1_center', peaks_pos[0], min=0, vary=1)
    p.add('g2_center', peaks_pos[1], min=0)
    [
        p.add(
            'g{}_center'.format(k + 3),
            j,
            # expr='g{}_center + Delta_E'.format(k + 1)
        ) for k, j in enumerate(peaks_pos[2:])
    ]

    # n>=1 amplitudes
    [
        p.add('g{}_amplitude'.format(k + 1),
              j * min_peak_sep / np.sqrt(2),
              expr='A * exp(-n_bar) * n_bar**{} / factorial({})'.format(
                  k + 1, k + 1),
              min=0) for k, j in enumerate(peak_height)
    ]

    # n>=1 fixed widths
    [
        p.add('g{}_sigma'.format(k + 1),
              min_peak_sep / np.sqrt(2) / np.pi,
              min=0,
              expr='sigma_p * sqrt({})'.format(k + 1)
              # expr='sigma_p'
              ) for k, _ in enumerate(peak_height)
    ]

    if weighted:
        # generates the poissonian errors, correcting for zero values
        err = np.sqrt(frequencies)
        err[frequencies == 0] = 1

        result = fit_model.fit(frequencies,
                               x=x_val,
                               params=p,
                               weights=1 / err,
                               method='powell')
    else:
        result = fit_model.fit(frequencies, x=x_val, params=p)

    n_bar = result.params.valuesdict()['n_bar']
    print 'poissonian probs from n=1,2...= {}'.\
    format([np.exp(-n_bar) * n_bar**(k+1) / math.factorial(k+1)\
    for k, j in enumerate(peak_height)])

    if plot:
        plt.figure(figsize=(10, 5))
        plt.errorbar(pnr[1][:-1] + step / 2,
                     pnr[0],
                     yerr=np.sqrt(pnr[0]),
                     linestyle='',
                     ecolor='black',
                     color='black')
        [
            plt.plot(x_val,
                     result.eval_components(x=x_val)['g{}_'.format(k + 1)])
            for k, _ in enumerate(result.components)
        ]
        plt.axvline(th01, color='black', label='th01')
        plt.legend()
        print result.fit_report()
    return result
示例#2
0
                              gamma_dos=gamma_dos)
    ADMRObject = ADMR([condObject])
    ADMRObject.Btheta_array = Btheta_array
    ADMRObject.runADMR()
    print("ADMR time : %.6s seconds" % (time.time() - start_total_time))

    diff_0 = rzz_0 - ADMRObject.rzz_array[0, :]
    diff_15 = rzz_15 - ADMRObject.rzz_array[1, :]
    diff_30 = rzz_30 - ADMRObject.rzz_array[2, :]
    diff_45 = rzz_45 - ADMRObject.rzz_array[3, :]

    return np.concatenate((diff_0, diff_15, diff_30, diff_45))


## Initialize
pars = Parameters()
pars.add("gamma_0", value=gamma_0_ini, vary=gamma_0_vary, min=0)
pars.add("gamma_dos", value=gamma_dos_ini, vary=gamma_dos_vary, min=0)
pars.add("gamma_k", value=gamma_k_ini, vary=gamma_k_vary, min=0)
pars.add("power", value=power_ini, vary=power_vary, min=2)
pars.add("mu", value=mu_ini, vary=mu_vary)
pars.add("M", value=M_ini, vary=M_vary, min=0.001)

## Run fit algorithm
out = minimize(residualFunc,
               pars,
               args=(bandObject, rzz_0, rzz_15, rzz_30, rzz_45))

## Display fit report
print(fit_report(out.params))
示例#3
0
all_pixels = np.asarray(all_pixels)

all_pixels1 = all_pixels[0:25]
all_pixels2 = all_pixels[25:55]
all_pixels3 = all_pixels[55:-1]

pixel1 = all_pixels1[np.random.choice(all_pixels1.shape[0], 6, replace=False)]
pixel2 = all_pixels2[np.random.choice(all_pixels2.shape[0], 6, replace=False)]
pixel3 = all_pixels3[np.random.choice(all_pixels3.shape[0], 6, replace=False)]

pixel = np.concatenate((pixel1, pixel2, pixel3))

print(pixel)

#All parameters in my model
params = Parameters()
params.add_many(
    ('incli', 85, True),
    ('col_dens', 14, True),
    ('height', 11.5, True),
    ('dop_param', 7.0, True),
    ('csize', 0.1, False),
    ('r_0', 1, True),
    ('vel_max', 195, True),
    ('h_v', 4.5, True),
)

spectralpixel = [14, 36]  #spectralpixels to "count" an absorption

lam1 = np.arange(4825.12, 4886.37 + 1.25,
                 1.25)  #wavelenghts to plot the rebinned data
示例#4
0
def fit_jd_hist(
    hists: list,
    dt: float,
    D: list,
    fit_D: list,
    F: list,
    fit_F: list,
    sigma: float,
    fit_sigma: bool,
    verbose=False,
):

    """
    Fits jd probability functions to a jd histograms.

    Parameters:
    hist (list): histogram values
    D (list): init values for MSD
    F (list): fractions for D, sum = 1
    sigma (float): localization precision guess
    funcs (dict): dictionary with functions sigma, gamma, center, amplitude

    Returns:
    popt (lmfit.minimizerResult): optimized parameters
    """

    from lmfit import Parameters, Parameter, minimize

    def residual(fit_params, data):
        res = cumulative_error_jd_hist(fit_params, data, len(D))
        return res

    fit_params = Parameters()
    # fit_params.add('sigma', value=sigma, vary=fit_sigma, min=0.)
    fit_params.add("dt", value=dt, vary=False)
    try:
        fit_params.add("max_lag", value=max([h.lag for h in hists]), vary=False)
    except TypeError as e:
        logger.error(
            f"problem with `hists`: expected `list`,\
            got `{type(hists)}`"
        )
        raise e

    for i, (d, f_d, f, f_f) in enumerate(zip(D, fit_D, F, fit_F)):
        fit_params.add(f"D{i}", value=d, vary=f_d, min=0.0)
        fit_params.add(f"F{i}", value=f, min=0.0, max=1.0, vary=f_f)

    f_expr = "1"
    for i, f in enumerate(F[:-1]):
        f_expr += f" - F{i}"

    fit_params[f"F{i+1}"] = Parameter(name=f"F{i+1}", min=0.0, max=1.0, expr=f_expr)

    for i, (s, f_s, min_s, max_s) in enumerate(
        zip(sigma, fit_sigma, (0, sigma[0]), (3 * sigma[0], D[-1]))
    ):
        fit_params.add(f"sigma{i}", value=s, min=min_s, max=max_s, vary=f_s)

    logger.debug("start minimize")

    minimizer_result = minimize(residual, fit_params, args=(hists,))

    if verbose:
        logger.info(f"completed in {minimizer_result.nfev} steps")
        minimizer_result.params.pretty_print()

    return minimizer_result
示例#5
0
def load_dataset_from_hdf(fname):
    """Load dataset from HDF5 file and instantiate a `VoigtFit.Dataset' class."""
    with h5py.File(fname, 'r') as hdf:
        z_sys = hdf.attrs['redshift']
        ds = dataset.DataSet(z_sys)
        ds.velspan = hdf.attrs['velspan']
        ds.verbose = hdf.attrs['verbose']
        if 'name' in hdf.attrs.keys():
            ds.set_name(hdf.attrs['name'])
        else:
            ds.set_name('')

        # Load .data:
        data = hdf['data']
        for chunk in data.values():
            res = chunk.attrs['res']
            norm = chunk.attrs['norm']
            ds.add_data(chunk['wl'].value,
                        chunk['flux'].value,
                        res,
                        err=chunk['error'].value,
                        normalized=norm)

        # Load .regions:
        # --- this will be deprecated in later versions
        hdf_regions = hdf['regions']
        for reg in hdf_regions.values():
            region_lines = list()
            for line_tag, line_group in reg['lines'].items():
                act = line_group.attrs['active']
                # Add check for backward compatibility:
                if line_tag in dataset.lineList['trans']:
                    line_instance = dataset.Line(line_tag, active=act)
                    region_lines.append(line_instance)
                    ds.all_lines.append(line_tag)
                    ds.lines[line_tag] = line_instance
                else:
                    print(" [WARNING] - Anomaly detected for line:")
                    print("             %s" % line_tag)
                    print(" I suspect that the atomic linelist has changed...")
                    print("")

            # Instantiate the Region Class with the first Line:
            line_init = region_lines[0]
            v = reg.attrs['velspan']
            specID = reg.attrs['specID']
            Region = regions.Region(v, specID, line_init)
            if len(region_lines) == 1:
                # The first and only line has already been loaded
                pass

            elif len(region_lines) > 1:
                # Load the rest of the lines:
                for line in region_lines[1:]:
                    Region.lines.append(line)
            else:
                err_msg = "Something went wrong in this region: %s. No lines are defined!" % str(
                    reg.name)
                raise ValueError(err_msg)

            # Set region data and attributes:
            Region.res = reg.attrs['res']
            Region.normalized = reg.attrs['normalized']
            Region.cont_err = reg.attrs['cont_err']
            Region.new_mask = reg.attrs['new_mask']

            Region.wl = reg['wl'].value
            Region.flux = reg['flux'].value
            Region.mask = reg['mask'].value
            Region.err = reg['error'].value

            ds.regions.append(Region)

        # Load .molecules:
        molecules = hdf['molecules']
        if len(molecules) > 0:
            for molecule, band_data in molecules.items():
                bands = [[b, J] for b, J in band_data]
                ds.molecules[molecule] = bands
                # No need to call ds.add_molecule
                # lines are added above when defining the regions.

        # Load .components:
        components = hdf['components']
        if 'best_fit' in hdf:
            # --- Prepare fit parameters  [class: lmfit.Parameters]
            ds.best_fit = Parameters()

        for ion, comps in components.items():
            ds.components[ion] = list()
            if len(comps) > 0:
                for n, comp in enumerate(comps.values()):
                    if 'best_fit' in hdf:
                        # If 'best_fit' exists, use the best-fit values.
                        # The naming for 'best_fit' and 'components' is parallel
                        # so one variable in components can easily be identified
                        # in the best_fit data group by replacing the path:
                        pointer = comp.name
                        fit_pointer = pointer.replace('components', 'best_fit')
                        z = hdf[fit_pointer + '/z'].value
                        z_err = hdf[fit_pointer + '/z'].attrs['error']
                        b = hdf[fit_pointer + '/b'].value
                        b_err = hdf[fit_pointer + '/b'].attrs['error']
                        logN = hdf[fit_pointer + '/logN'].value
                        logN_err = hdf[fit_pointer + '/logN'].attrs['error']

                    else:
                        z = comp['z'].value
                        z_err = None
                        b = comp['b'].value
                        b_err = None
                        logN = comp['logN'].value
                        logN_err = None

                    # Extract component options:
                    opts = dict()
                    for varname in ['z', 'b', 'N']:
                        if varname == 'N':
                            hdf_name = 'logN'
                        else:
                            hdf_name = varname

                        tie = comp[hdf_name].attrs['tie_%s' % varname]
                        tie = None if tie == 'None' else tie
                        vary = comp[hdf_name].attrs['var_%s' % varname]
                        opts['tie_%s' % varname] = tie
                        opts['var_%s' % varname] = vary

                    # Add component to DataSet class:
                    ds.add_component(ion, z, b, logN, **opts)

                    if 'best_fit' in hdf:
                        # Add Parameters to DataSet.best_fit:
                        z_name = 'z%i_%s' % (n, ion)
                        b_name = 'b%i_%s' % (n, ion)
                        N_name = 'logN%i_%s' % (n, ion)
                        ds.best_fit.add(z_name, value=z, vary=opts['var_z'])
                        ds.best_fit[z_name].stderr = z_err
                        ds.best_fit.add(b_name,
                                        value=b,
                                        vary=opts['var_b'],
                                        min=0.,
                                        max=500.)
                        ds.best_fit[b_name].stderr = b_err
                        ds.best_fit.add(N_name,
                                        value=logN,
                                        vary=opts['var_N'],
                                        min=0.,
                                        max=40.)
                        ds.best_fit[N_name].stderr = logN_err

        if 'best_fit' in hdf:
            # Now the components have been defined in ds, so I can use them for the loop
            # to set the parameter ties:
            for ion, comps in ds.components.items():
                for n, comp in enumerate(comps):
                    z, b, logN, opts = comp
                    z_name = 'z%i_%s' % (n, ion)
                    b_name = 'b%i_%s' % (n, ion)
                    N_name = 'logN%i_%s' % (n, ion)

                    if opts['tie_z']:
                        ds.best_fit[z_name].expr = opts['tie_z']
                    if opts['tie_b']:
                        ds.best_fit[b_name].expr = opts['tie_b']
                    if opts['tie_N']:
                        ds.best_fit[N_name].expr = opts['tie_N']

        return ds
示例#6
0
def _metabolite_fitting(ppm, spectrum):
    """Private function to fit a mixture of Voigt profile to
    citrate metabolites.

    """
    ppm_limits = (2.30, 2.90)
    idx_ppm = np.flatnonzero(np.bitwise_and(ppm > ppm_limits[0],
                                            ppm < ppm_limits[1]))
    sub_ppm = ppm[idx_ppm]
    sub_spectrum = spectrum[idx_ppm]

    f = interp1d(sub_ppm, sub_spectrum, kind='cubic')
    ppm_interp = np.linspace(sub_ppm[0], sub_ppm[-1], num=5000)

    # Define the default parameters
    # Define their bounds
    mu_bounds = (2.54, 2.68)
    delta_2_bounds = (.06, .16)
    delta_3_bounds = (.06, .16)
    # Define the default shifts
    ppm_cit = np.linspace(mu_bounds[0], mu_bounds[1], num=1000)
    mu_dft = ppm_cit[np.argmax(f(ppm_cit))]
    # Redefine the maximum to avoid to much motion
    mu_bounds = (mu_dft - 0.04, mu_dft + 0.04)
    # Redefine the limit of ppm to use for the fitting
    ppm_interp = np.linspace(mu_dft - .20, mu_dft + 0.20, num=5000)
    delta_2_dft = .1
    delta_3_dft = .1

    # Define the default amplitude
    alpha_1_dft = (f(mu_dft) /
                   _gaussian_profile(0., 1., 0., .01))
    alpha_2_dft = (f(mu_dft + delta_2_dft) /
                   _gaussian_profile(0., 1., 0., .01))
    alpha_3_dft = (f(mu_dft - delta_3_dft) /
                   _gaussian_profile(0., 1., 0., .01))
    # Create the vector for the default parameters
    # popt_default = np.array([alpha_1_dft, mu_dft, .01,
    #                          alpha_2_dft, delta_2_dft, .01,
    #                          alpha_3_dft, delta_3_dft, .01])
    # Define the list of parameters
    params = Parameters()
    params.add('alpha1', value=alpha_1_dft, min=0.1, max=100)
    params.add('alpha2', value=alpha_2_dft, min=0.1, max=100)
    params.add('alpha3', value=alpha_3_dft, min=0.1, max=100)
    params.add('mu1', value=mu_dft, min=mu_bounds[0], max=mu_bounds[1])
    params.add('delta2', value=delta_2_dft, min=delta_2_bounds[0],
               max=delta_2_bounds[1])
    params.add('delta3', value=delta_3_dft, min=delta_3_bounds[0],
               max=delta_3_bounds[1])
    params.add('sigma1', value=.01, min=.01, max=0.1)
    params.add('sigma2', value=.01, min=.01, max=0.03)
    params.add('sigma3', value=.01, min=.01, max=0.03)

    data = f(ppm_interp)
    res_citrate = minimize(residual, params, args=(ppm_interp, ),
                           kws={'data':data}, method='least_squares')
    # res_citrate = minimize(residual, res_citrate.params, args=(ppm_interp, ),
    #                        kws={'data':data}, method='differential_evolution')


    # ppm_limits = (2.90, 3.25)
    mu_dft = res_citrate.params['mu1'].value
    delta_4_bounds = (.55, .59)
    # delta_6_bounds = (.18, .20)

    delta_4_dft = .57
    # delta_6_dft = .19

    print mu_dft + delta_4_dft

    ppm_limits = (mu_dft + delta_4_bounds[0] - .02,
                  mu_dft + delta_4_bounds[1] + .02)
    idx_ppm = np.flatnonzero(np.bitwise_and(ppm > ppm_limits[0],
                                            ppm < ppm_limits[1]))
    sub_ppm = ppm[idx_ppm]
    sub_spectrum = spectrum[idx_ppm]

    f = interp1d(sub_ppm, sub_spectrum, kind='cubic')
    ppm_interp = np.linspace(sub_ppm[0], sub_ppm[-1], num=5000)

    delta_4_dft = ppm_interp[np.argmax(f(ppm_interp))] - mu_dft

    ppm_limits = (mu_dft + delta_4_dft - .04,
                  mu_dft + delta_4_dft + .04)
    idx_ppm = np.flatnonzero(np.bitwise_and(ppm > ppm_limits[0],
                                            ppm < ppm_limits[1]))
    sub_ppm = ppm[idx_ppm]
    sub_spectrum = spectrum[idx_ppm]

    f = interp1d(sub_ppm, sub_spectrum, kind='cubic')
    ppm_interp = np.linspace(sub_ppm[0], sub_ppm[-1], num=5000)

    print sub_ppm

    alpha_4_dft = (f(mu_dft + delta_4_dft) /
                   _gaussian_profile(0., 1., 0., .01))
    # alpha_6_dft = (f(mu_dft + delta_4_dft - delta_6_dft) /
    #                _gaussian_profile(0., 1., 0., .01))

    params = Parameters()
    params.add('alpha4', value=alpha_4_dft, min=0.0, max=100)
    params.add('mu1', value=mu_dft, vary=False)
    params.add('delta4', value=delta_4_dft, vary=False)
    #params.add('alpha6', value=alpha_6_dft, min=0.01, max=100)
    #params.add('delta6', value=delta_6_dft, min=delta_6_bounds[0],
    #           max=delta_6_bounds[1])
    params.add('sigma4', value=.01, min=.001, max=0.02)
    #params.add('sigma6', value=.005, min=.0001, max=0.005)

    data = f(ppm_interp)
    # res_choline = minimize(residual_choline, params, args=(ppm_interp, ),
    #                        kws={'data':data}, method='differential_evolution')
    res_choline = minimize(residual_choline, params, args=(ppm_interp, ),
                           kws={'data':data}, method='least_squares')


    # # Restart the optimisation by finding the maximum and setting the summit
    # delta_choline = res_choline.params['delta4']
    # idx_max_ch = np.flatnonzero(np.bitwise_and(ppm_interp >
    #                                            mu_dft - delta_choline,
    #                                            ppm_interp <
    #                                            mu_dft + delta_choline))
    # # Find the maximum associated with the max of the choline
    # print np.max(f(ppm_interp)[idx_max_ch])
    # delta_4_dft = ppm_interp[np.argmax(f(ppm_interp)[idx_max_ch])] - mu_dft
    # print delta_4_dft
    # params.add('delta4', value=delta_4_dft, vary=False)
    # res_choline = minimize(residual_choline, params, args=(ppm_interp, ),
    #                        kws={'data':data}, method='differential_evolution')

    # res_choline = minimize(residual_choline, params,
    #                        args=(ppm_interp, ), kws={'data':data},
    #                        method='least_squares')


    return res_citrate, res_choline
示例#7
0
# define the maximum identical length that can share the linecuts
# (we need to concatenate them)
width_length = min(width_z.size, width_y.size, width_x.size)
linecuts = np.empty(
    (6, width_length
     ))  # rows 0:3 for the widths, rows 3:6 for the corresponding cuts
idx = 0
for key in linecuts_dict.keys():  # order is maintained in OrderedDict
    linecuts[idx, :] = util.crop_pad_1d(
        linecuts_dict[key],
        output_length=width_length)  # implicit crop from the center
    idx += 1

# create nb_fit sets of parameters, one per data set
fit_params = Parameters()
for idx in range(
        3):  # 3 linecuts in orthogonal directions to be fitted by a gaussian
    fit_params.add("amp_%i" % (idx + 1), value=1, min=0.1, max=100)
    fit_params.add(
        "cen_%i" % (idx + 1),
        value=linecuts[idx, :].mean(),
        min=linecuts[idx, :].min(),
        max=linecuts[idx, :].max(),
    )
    fit_params.add("sig_%i" % (idx + 1), value=5, min=0.1, max=100)

# run the global fit to all the data sets
minimization = minimize(
    util.objective_lmfit,
    fit_params,
示例#8
0
    def fit_solenoid(self, cfg_params, cfg_pickle, profile=False):
        """Main fitting function for FieldFitter class.

        The typical magnetic field geometry for the Mu2E experiment is determined by one or more
        solenoids, with some contaminating external fields.  The purpose of this function is to fit
        a set of sparse magnetic field data that would, in practice, be generated by a field
        measurement device.

        The following assumptions must hold for the input data:
           * The data is represented in a cylindrical coordiante system.
           * The data forms a series of planes, where all planes intersect at R=0.
           * All planes has the same R and Z values.
           * All positive Phi values have an associated negative phi value, which uniquely defines a
             single plane in R-Z space.

        Args:
           cfg_params (namedtuple): 'ns ms cns cms Reff func_version'
           cfg_pickle (namedtuple): 'use_pickle save_pickle load_name save_name recreate'
           profile (Optional[bool]): True if you want to exit after the model is built, before
               actual fitting is performed for profiling. Default is False.

        Returns:
            Nothing.  Generates class attributes after fitting, and saves parameter values, if
            saving is specified.
        """
        Reff         = cfg_params.Reff
        ns           = cfg_params.ns
        ms           = cfg_params.ms
        func_version = cfg_params.func_version
        Bz           = []
        Br           = []
        Bphi         = []
        RR           = []
        ZZ           = []
        PP           = []
        XX           = []
        YY           = []
        cns = cfg_params.cns
        cms = cfg_params.cms
        if func_version in [8, 9]:
            self.input_data.eval('Xp = X+1075', inplace=True)
            self.input_data.eval('Yp = Y-440', inplace=True)
            self.input_data.eval('Rp = sqrt(Xp**2+Yp**2)', inplace=True)
            self.input_data.eval('Phip = arctan2(Yp,Xp)', inplace=True)
            RRP = []
            PPP = []

        for phi in self.phi_steps:
            # determine phi and negative phi
            if phi == 0:
                nphi = np.pi
            else:
                nphi = phi-np.pi

            # select data with correct pair of phis
            input_data_phi = self.input_data[
                (np.isclose(self.input_data.Phi, phi)) | (np.isclose(self.input_data.Phi, nphi))
            ]
            # make radial values negative for negative phis to prevent degeneracy
            input_data_phi.ix[np.isclose(input_data_phi.Phi, nphi), 'R'] *= -1

            # convert B field components into 2D arrays
            # print input_data_phi
            piv_bz = input_data_phi.pivot('Z', 'R', 'Bz')
            piv_br = input_data_phi.pivot('Z', 'R', 'Br')
            piv_bphi = input_data_phi.pivot('Z', 'R', 'Bphi')
            piv_phi = input_data_phi.pivot('Z', 'R', 'Phi')
            if func_version == 6:
                piv_x = input_data_phi.pivot('Z', 'R', 'X')
                piv_y = input_data_phi.pivot('Z', 'R', 'Y')
            elif func_version in [8, 9]:
                piv_rp = input_data_phi.pivot('Z', 'R', 'Rp')
                piv_phip = input_data_phi.pivot('Z', 'R', 'Phip')

            # bookkeeping for field and position values
            R = piv_br.columns.values
            Z = piv_br.index.values
            Bz.append(piv_bz.values)
            Br.append(piv_br.values)
            Bphi.append(piv_bphi.values)
            RR_slice, ZZ_slice = np.meshgrid(R, Z)
            RR.append(RR_slice)
            ZZ.append(ZZ_slice)
            if func_version == 6:
                XX.append(piv_x.values)
                YY.append(piv_y.values)
            elif func_version in [8, 9]:
                RRP.append(piv_rp.values)
                PPP.append(piv_phip.values)
            # use_phis = np.sort(input_data_phi.Phi.unique())
            # formatting for correct phi ordering
            # if phi == 0:
            #     use_phis = use_phis[::-1]
            # print phi, use_phis
            # PP_slice = np.full_like(RR_slice, use_phis[0])
            # PP_slice[:, int(PP_slice.shape[1]/2):] = use_phis[1]
            PP.append(piv_phi.values)

        # combine all phi slices
        ZZ = np.concatenate(ZZ)
        RR = np.concatenate(RR)
        PP = np.concatenate(PP)
        Bz = np.concatenate(Bz)
        Br = np.concatenate(Br)
        Bphi = np.concatenate(Bphi)
        if func_version == 6:
            XX = np.concatenate(XX)
            YY = np.concatenate(YY)
        if func_version in [8, 9]:
            RRP = np.concatenate(RRP)
            PPP = np.concatenate(PPP)
        if profile:
            # terminate here if we are profiling the code for further optimization
            return ZZ, RR, PP, Bz, Br, Bphi

        # Choose the type of fitting function we'll be using.
        if func_version == 1:
            brzphi_3d_fast = ff.brzphi_3d_producer_modbessel(ZZ, RR, PP, Reff, ns, ms)
        elif func_version == 2:
            brzphi_3d_fast = ff.brzphi_3d_producer_bessel(ZZ, RR, PP, Reff, ns, ms)
        elif func_version == 3:
            brzphi_3d_fast = ff.brzphi_3d_producer_bessel_hybrid(ZZ, RR, PP, Reff, ns, ms)
        elif func_version == 4:
            brzphi_3d_fast = ff.brzphi_3d_producer_numba_v2(ZZ, RR, PP, Reff, ns, ms)
        elif func_version == 5:
            # factory = ffc.FunctionProducer(RR, PP, ZZ, ns, ms, 'modbessel', L=Reff)
            # brzphi_3d_fast = factory.get_fit_function()
            brzphi_3d_fast = ff.brzphi_3d_producer_modbessel_phase(ZZ, RR, PP, Reff, ns, ms)
        elif func_version == 6:
            brzphi_3d_fast = ff.brzphi_3d_producer_modbessel_phase_ext(ZZ, RR, PP, Reff, ns, ms,
                                                                       cns, cms)
        elif func_version == 7:
            brzphi_3d_fast = ff.brzphi_3d_producer_modbessel_phase_hybrid(ZZ, RR, PP, Reff, ns, ms,
                                                                          cns, cms)
        elif func_version == 8:
            brzphi_3d_fast = ff.brzphi_3d_producer_modbessel_phase_hybrid_disp2(ZZ, RR, PP, RRP,
                                                                                PPP, Reff, ns, ms,
                                                                                cns, cms)
        elif func_version == 9:
            brzphi_3d_fast = ff.brzphi_3d_producer_modbessel_phase_hybrid_disp3(ZZ, RR, PP, RRP,
                                                                                PPP, Reff, ns, ms,
                                                                                cns, cms)
        else:
            raise KeyError('func version '+func_version+' does not exist')

        # Generate an lmfit Model
        if func_version == 6:
            self.mod = Model(brzphi_3d_fast, independent_vars=['r', 'z', 'phi', 'x', 'y'])
        elif func_version in [8,9]:
            self.mod = Model(brzphi_3d_fast, independent_vars=['r', 'z', 'phi', 'rp', 'phip'])
        else:
            self.mod = Model(brzphi_3d_fast, independent_vars=['r', 'z', 'phi'])

        # Load pre-defined starting valyes for parameters, or make a new set
        if cfg_pickle.use_pickle or cfg_pickle.recreate:
            self.params = pkl.load(open(self.pickle_path+cfg_pickle.load_name+'_results.p', "rb"))
        else:
            self.params = Parameters()

        if 'R' not in self.params:
            self.params.add('R', value=Reff, vary=False)
        if 'ns' not in self.params:
            self.params.add('ns', value=ns, vary=False)
        else:
            self.params['ns'].value = ns
        if 'ms' not in self.params:
            self.params.add('ms', value=ms, vary=False)
        else:
            self.params['ms'].value = ms

        for n in range(ns):
            # If function version 5, `D` parameter is a delta offset for phi
            if func_version in [5, 6, 7, 8, 9]:
                if 'D_{0}'.format(n) not in self.params:
                    self.params.add('D_{0}'.format(n), value=0, min=-np.pi*0.5, max=np.pi*0.5)
                else:
                    self.params['D_{0}'.format(n)].vary = True
            # Otherwise `D` parameter is a scaling constant, along with a `C` parameter
            else:
                if 'C_{0}'.format(n) not in self.params:
                    self.params.add('C_{0}'.format(n), value=1)
                else:
                    self.params['C_{0}'.format(n)].vary = True
                if 'D_{0}'.format(n) not in self.params:
                    self.params.add('D_{0}'.format(n), value=0.001)
                else:
                    self.params['D_{0}'.format(n)].vary = True

            for m in range(ms):
                if 'A_{0}_{1}'.format(n, m) not in self.params:
                    self.params.add('A_{0}_{1}'.format(n, m), value=0, vary=True)
                else:
                    self.params['A_{0}_{1}'.format(n, m)].vary = True
                if 'B_{0}_{1}'.format(n, m) not in self.params:
                    self.params.add('B_{0}_{1}'.format(n, m), value=0, vary=True)
                else:
                    self.params['B_{0}_{1}'.format(n, m)].vary = True
                # Additional terms used in func version 3
                if func_version == 3:
                    if 'E_{0}_{1}'.format(n, m) not in self.params:
                        self.params.add('E_{0}_{1}'.format(n, m), value=0, vary=True)
                    else:
                        self.params['E_{0}_{1}'.format(n, m)].vary = True
                    if 'F_{0}_{1}'.format(n, m) not in self.params:
                        self.params.add('F_{0}_{1}'.format(n, m), value=0, vary=True)
                    else:
                        self.params['F_{0}_{1}'.format(n, m)].vary = True
                    if m > 3:
                        self.params['E_{0}_{1}'.format(n, m)].vary = False
                        self.params['F_{0}_{1}'.format(n, m)].vary = False

        if func_version == 6:

            if 'cns' not in self.params:
                self.params.add('cns', value=cns, vary=False)
            else:
                self.params['cns'].value = cns
            if 'cms' not in self.params:
                self.params.add('cms', value=cms, vary=False)
            else:
                self.params['cms'].value = cms
            for cn in range(1, cns+1):
                for cm in range(1, cms+1):
                    if 'C_{0}'.format(cm-1+(cn-1)*cms) not in self.params:
                        self.params.add('C_{0}'.format(cm-1+(cn-1)*cms), value=0, vary=True)
                    else:
                        self.params['C_{0}'.format(cm-1+(cn-1)*cms)].vary = True

            if 'e1' not in self.params:
                self.params.add('e1'.format(n), value=0, min=-np.pi*0.5, max=np.pi*0.5, vary=True)
            else:
                self.params['e1'].vary = True
            if 'e2' not in self.params:
                self.params.add('e2'.format(n), value=0, min=-np.pi*0.5, max=np.pi*0.5, vary=True)
            else:
                self.params['e2'].vary = True

        if func_version in [7, 8, 9]:
            if 'cns' not in self.params:
                self.params.add('cns', value=cns, vary=False)
            else:
                self.params['cns'].value = cns
            if 'cms' not in self.params:
                self.params.add('cms', value=cms, vary=False)
            else:
                self.params['cms'].value = cms

            for cn in range(cns):
                if 'G_{0}'.format(cn) not in self.params:
                    self.params.add('G_{0}'.format(cn), value=0, min=-np.pi*0.5, max=np.pi*0.5,
                                    vary=True)
                else:
                    self.params['G_{0}'.format(cn)].vary = True

                for cm in range(cms):
                    if 'E_{0}_{1}'.format(cn, cm) not in self.params:
                        self.params.add('E_{0}_{1}'.format(cn, cm), value=0, vary=True)
                    else:
                        self.params['E_{0}_{1}'.format(cn, cm)].vary = True
                    if 'F_{0}_{1}'.format(cn, cm) not in self.params:
                        self.params.add('F_{0}_{1}'.format(cn, cm), value=0, vary=True)
                    else:
                        self.params['F_{0}_{1}'.format(cn, cm)].vary = True

            if func_version == 9:
                if 'X' not in self.params:
                    self.params.add('X', value=0, vary=True)
                if 'Y' not in self.params:
                    self.params.add('Y', value=0, vary=True)

        if not cfg_pickle.recreate:
            print('fitting with n={0}, m={1}, cn={2}, cm={3}'.format(ns, ms, cns, cms))
        else:
            print('recreating fit with n={0}, m={1}, cn={2}, cm={3}, pickle_file={4}'.format(
                ns, ms, cns, cms, cfg_pickle.load_name))
        start_time = time()
        if func_version not in [6, 8, 9]:
            if cfg_pickle.recreate:
                for param in self.params:
                    self.params[param].vary = False
                self.result = self.mod.fit(np.concatenate([Br, Bz, Bphi]).ravel(),
                                           r=RR, z=ZZ, phi=PP, params=self.params,
                                           method='leastsq', fit_kws={'maxfev': 1})
            elif cfg_pickle.use_pickle:
                mag = 1/np.sqrt(Br**2+Bz**2+Bphi**2)
                self.result = self.mod.fit(np.concatenate([Br, Bz, Bphi]).ravel(),
                                           weights=np.concatenate([mag, mag, mag]).ravel(),
                                           r=RR, z=ZZ, phi=PP, params=self.params,
                                           method='leastsq', fit_kws={'maxfev': 5000})
            else:
                mag = 1/np.sqrt(Br**2+Bz**2+Bphi**2)
                self.result = self.mod.fit(np.concatenate([Br, Bz, Bphi]).ravel(),
                                           weights=np.concatenate([mag, mag, mag]).ravel(),
                                           r=RR, z=ZZ, phi=PP, params=self.params,
                                           method='leastsq', fit_kws={'maxfev': 2000})
        elif func_version == 6:
            if cfg_pickle.recreate:
                for param in self.params:
                    self.params[param].vary = False
                self.result = self.mod.fit(np.concatenate([Br, Bz, Bphi]).ravel(),
                                           r=RR, z=ZZ, phi=PP, x=XX, y=YY, params=self.params,
                                           method='leastsq', fit_kws={'maxfev': 1})
            elif cfg_pickle.use_pickle:
                mag = 1/np.sqrt(Br**2+Bz**2+Bphi**2)
                self.result = self.mod.fit(np.concatenate([Br, Bz, Bphi]).ravel(),
                                           weights=np.concatenate([mag, mag, mag]).ravel(),
                                           r=RR, z=ZZ, phi=PP, x=XX, y=YY, params=self.params,
                                           method='leastsq', fit_kws={'maxfev': 7000})
            else:
                mag = 1/np.sqrt(Br**2+Bz**2+Bphi**2)
                self.result = self.mod.fit(np.concatenate([Br, Bz, Bphi]).ravel(),
                                           weights=np.concatenate([mag, mag, mag]).ravel(),
                                           r=RR, z=ZZ, phi=PP, x=XX, y=YY, params=self.params,
                                           method='leastsq', fit_kws={'maxfev': 2000})

        elif func_version in [8, 9]:
            if cfg_pickle.recreate:
                for param in self.params:
                    self.params[param].vary = False
                self.result = self.mod.fit(np.concatenate([Br, Bz, Bphi]).ravel(),
                                           r=RR, z=ZZ, phi=PP, rp=RRP, phip=PPP, params=self.params,
                                           method='leastsq', fit_kws={'maxfev': 1})
            elif cfg_pickle.use_pickle:
                mag = 1/np.sqrt(Br**2+Bz**2+Bphi**2)
                self.result = self.mod.fit(np.concatenate([Br, Bz, Bphi]).ravel(),
                                           weights=np.concatenate([mag, mag, mag]).ravel(),
                                           r=RR, z=ZZ, phi=PP, rp=RRP, phip=PPP, params=self.params,
                                           method='leastsq', fit_kws={'maxfev': 12000})
            else:
                mag = 1/np.sqrt(Br**2+Bz**2+Bphi**2)
                self.result = self.mod.fit(np.concatenate([Br, Bz, Bphi]).ravel(),
                                           weights=np.concatenate([mag, mag, mag]).ravel(),
                                           r=RR, z=ZZ, phi=PP, rp=RRP, phip=PPP, params=self.params,
                                           method='leastsq', fit_kws={'maxfev': 2000})

        self.params = self.result.params
        end_time = time()
        print(("Elapsed time was %g seconds" % (end_time - start_time)))
        report_fit(self.result, show_correl=False)
        if cfg_pickle.save_pickle:  # and not cfg_pickle.recreate:
            self.pickle_results(self.pickle_path+cfg_pickle.save_name)
示例#9
0
def autobk(energy, mu=None, group=None, rbkg=1, nknots=None, e0=None,
           edge_step=None, kmin=0, kmax=None, kweight=1, dk=0.1,
           win='hanning', k_std=None, chi_std=None, nfft=2048, kstep=0.05,
           pre_edge_kws=None, nclamp=4, clamp_lo=1, clamp_hi=1,
           calc_uncertainties=True, err_sigma=1, _larch=None, **kws):
    """Use Autobk algorithm to remove XAFS background

    Parameters:
    -----------
      energy:    1-d array of x-ray energies, in eV, or group
      mu:        1-d array of mu(E)
      group:     output group (and input group for e0 and edge_step).
      rbkg:      distance (in Ang) for chi(R) above
                 which the signal is ignored. Default = 1.
      e0:        edge energy, in eV.  If None, it will be determined.
      edge_step: edge step.  If None, it will be determined.
      pre_edge_kws:  keyword arguments to pass to pre_edge()
      nknots:    number of knots in spline.  If None, it will be determined.
      kmin:      minimum k value   [0]
      kmax:      maximum k value   [full data range].
      kweight:   k weight for FFT.  [1]
      dk:        FFT window window parameter.  [0.1]
      win:       FFT window function name.     ['hanning']
      nfft:      array size to use for FFT [2048]
      kstep:     k step size to use for FFT [0.05]
      k_std:     optional k array for standard chi(k).
      chi_std:   optional chi array for standard chi(k).
      nclamp:    number of energy end-points for clamp [2]
      clamp_lo:  weight of low-energy clamp [1]
      clamp_hi:  weight of high-energy clamp [1]
      calc_uncertaintites:  Flag to calculate uncertainties in
                            mu_0(E) and chi(k) [True]
      err_sigma: sigma level for uncertainties in mu_0(E) and chi(k) [1]

    Output arrays are written to the provided group.

    Follows the 'First Argument Group' convention.
    """
    msg = sys.stdout
    if _larch is not None:
        msg = _larch.writer.write
    if 'kw' in kws:
        kweight = kws.pop('kw')
    if len(kws) > 0:
        msg('Unrecognized a:rguments for autobk():\n')
        msg('    %s\n' % (', '.join(kws.keys())))
        return
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='autobk')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    energy = remove_dups(energy)
    # if e0 or edge_step are not specified, get them, either from the
    # passed-in group or from running pre_edge()
    group = set_xafsGroup(group, _larch=_larch)

    if edge_step is None and isgroup(group, 'edge_step'):
        edge_step = group.edge_step
    if e0 is None and isgroup(group, 'e0'):
        e0 = group.e0
    if e0 is None or edge_step is None:
        # need to run pre_edge:
        pre_kws = dict(nnorm=3, nvict=0, pre1=None,
                       pre2=-50., norm1=100., norm2=None)
        if pre_edge_kws is not None:
            pre_kws.update(pre_edge_kws)
        pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws)
        if e0 is None:
            e0 = group.e0
        if edge_step is None:
            edge_step = group.edge_step
    if e0 is None or edge_step is None:
        msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n')
        return

    # get array indices for rkbg and e0: irbkg, ie0
    ie0 = index_of(energy, e0)
    rgrid = np.pi/(kstep*nfft)
    if rbkg < 2*rgrid: rbkg = 2*rgrid
    irbkg = int(1.01 + rbkg/rgrid)

    # save ungridded k (kraw) and grided k (kout)
    # and ftwin (*k-weighting) for FT in residual
    enpe = energy[ie0:] - e0
    kraw = np.sign(enpe)*np.sqrt(ETOK*abs(enpe))
    if kmax is None:
        kmax = max(kraw)
    else:
        kmax = max(0, min(max(kraw), kmax))
    kout  = kstep * np.arange(int(1.01+kmax/kstep), dtype='float64')
    iemax = min(len(energy), 2+index_of(energy, e0+kmax*kmax/ETOK)) - 1

    # interpolate provided chi(k) onto the kout grid
    if chi_std is not None and k_std is not None:
        chi_std = np.interp(kout, k_std, chi_std)
    # pre-load FT window
    ftwin = kout**kweight * ftwindow(kout, xmin=kmin, xmax=kmax,
                                     window=win, dx=dk, dx2=dk)
    # calc k-value and initial guess for y-values of spline params
    nspl = max(5, min(64, int(2*rbkg*(kmax-kmin)/np.pi) + 2))
    spl_y, spl_k, spl_e  = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl)
    for i in range(nspl):
        q  = kmin + i*(kmax-kmin)/(nspl - 1)
        ik = index_nearest(kraw, q)
        i1 = min(len(kraw)-1, ik + 5)
        i2 = max(0, ik - 5)
        spl_k[i] = kraw[ik]
        spl_e[i] = energy[ik+ie0]
        spl_y[i] = (2*mu[ik+ie0] + mu[i1+ie0] + mu[i2+ie0] ) / 4.0

    # get spline represention: knots, coefs, order=3
    # coefs will be varied in fit.
    knots, coefs, order = splrep(spl_k, spl_y)

    # set fit parameters from initial coefficients
    params = Parameters()
    for i in range(len(coefs)):
        params.add(name = FMT_COEF % i, value=coefs[i], vary=i<len(spl_y))

    initbkg, initchi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1],
                                   knots, coefs, order, kout)

    # do fit
    result = minimize(__resid, params, method='leastsq',
                      gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5,
                      kws = dict(ncoefs=len(coefs), chi_std=chi_std,
                                 knots=knots, order=order,
                                 kraw=kraw[:iemax-ie0+1],
                                 mu=mu[ie0:iemax+1], irbkg=irbkg, kout=kout,
                                 ftwin=ftwin, kweight=kweight,
                                 nfft=nfft, nclamp=nclamp,
                                 clamp_lo=clamp_lo, clamp_hi=clamp_hi))

    # write final results
    coefs = [result.params[FMT_COEF % i].value for i in range(len(coefs))]
    bkg, chi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1],
                           knots, coefs, order, kout)
    obkg = np.copy(mu)
    obkg[ie0:ie0+len(bkg)] = bkg

    # outputs to group
    group = set_xafsGroup(group, _larch=_larch)
    group.bkg  = obkg
    group.chie = (mu-obkg)/edge_step
    group.k    = kout
    group.chi  = chi/edge_step
    group.e0   = e0

    # now fill in 'autobk_details' group
    details = Group(params=result.params)

    details.init_bkg = np.copy(mu)
    details.init_bkg[ie0:ie0+len(bkg)] = initbkg
    details.init_chi = initchi/edge_step
    details.knots_e  = spl_e
    details.knots_y  = np.array([coefs[i] for i in range(nspl)])
    details.init_knots_y = spl_y
    details.nfev = result.nfev
    details.kmin = kmin
    details.kmax = kmax
    group.autobk_details = details

    # uncertainties in mu0 and chi: can be fairly slow.
    if calc_uncertainties:
        nchi = len(chi)
        nmue = iemax-ie0 + 1
        redchi = result.redchi
        covar  = result.covar / redchi
        jac_chi = np.zeros(nchi*nspl).reshape((nspl, nchi))
        jac_bkg = np.zeros(nmue*nspl).reshape((nspl, nmue))

        cvals, cerrs = [], []
        for i in range(len(coefs)):
             par = result.params[FMT_COEF % i]
             cvals.append(getattr(par, 'value', 0.0))
             cdel = getattr(par, 'stderr', 0.0)
             if cdel is None:
                 cdel = 0.0
             cerrs.append(cdel/2.0)
        cvals = np.array(cvals)
        cerrs = np.array(cerrs)

        # find derivatives by hand!
        _k = kraw[:nmue]
        _m = mu[ie0:iemax+1]
        for i in range(nspl):
            cval0 = cvals[i]
            cvals[i] = cval0 + cerrs[i]
            bkg1, chi1 = spline_eval(_k, _m, knots, cvals, order, kout)

            cvals[i] = cval0 - cerrs[i]
            bkg2, chi2 = spline_eval(_k, _m, knots, cvals, order, kout)

            cvals[i] = cval0
            jac_chi[i] = (chi1 - chi2) / (2*cerrs[i])
            jac_bkg[i] = (bkg1 - bkg2) / (2*cerrs[i])

        dfchi = np.zeros(nchi)
        dfbkg = np.zeros(nmue)
        for i in range(nspl):
            for j in range(nspl):
                dfchi += jac_chi[i]*jac_chi[j]*covar[i,j]
                dfbkg += jac_bkg[i]*jac_bkg[j]*covar[i,j]

        prob = 0.5*(1.0 + erf(err_sigma/np.sqrt(2.0)))
        dchi = t.ppf(prob, nchi-nspl) * np.sqrt(dfchi*redchi)
        dbkg = t.ppf(prob, nmue-nspl) * np.sqrt(dfbkg*redchi)

        group.delta_chi = dchi
        group.delta_bkg = 0.0*mu
        group.delta_bkg[ie0:ie0+len(dbkg)] = dbkg
示例#10
0
def init_load(filename, FreeE, fit_params, fixed_params, shifts, anglestep, Fit: bool, Plot: bool, ani_ori: bool, fit_select:int, *args):
    # filename string,  value, path to parameter.dat file used for fitting
    # FreeE string,  of Free Energy Density
    # fit_params dict => Parameter + start value
    # fixed_params dict => Fixed parameters + value
    # shift: float
    # anglestep: integer
    # Fit: bool, to control whether to fit or not
    # Plot: bool, same as Fit

    print(filename, FreeE, fit_params, fixed_params, shifts, anglestep, Fit, Plot)

    if ani_ori: #OOP
        maxBresDelta = 0.1  # Also adjustable by gui?
    else:
        maxBresDelta = 0.001  # Also adjustable by gui?

    shift = shifts

    # Add consts to fixed params dict
    fixed_params[mu0] = 4 * m.pi * 10 ** (-7)
    fixed_params[mub] = 9.27 * 10 ** (-24)
    fixed_params[hbar] = (6.62606957 * 10 ** (-34)) / (2 * m.pi)
    #gamma = g*mub/hbar 
    fixed_params[gamma] = fixed_params['g'] * fixed_params[mub] / fixed_params[hbar]

    # Load fitted Params and determine Lineshape

    Lineshape, fit_num = get_fit_options_from_file(filename)

    D = np.loadtxt(filename, dtype='float', skiprows=0) # Data
    if Lineshape == "Lorentz":
        R_raw = D[:, 3 * fit_select]
        Winkel = D[:, 3 * fit_num + 2].flatten()
    elif Lineshape == "Dyson":
        print("Dyson")
        R_raw = D[:, 4 * fit_select]
        Winkel = D[:, 4 * fit_num + 2].flatten()
    else: # Rest (not implemented yet)
        print("Please use either Lorentz or Dyson shape!\n-----------------------------------\nOr go to line 105 in tools/ani_tools.py and change the numbers to the correspondign column values")
        try:
            R_raw = D[:, 4 * fit_select]    # Bres column is 4
            Winkel = D[:, 4 * fit_num + 2].flatten() # Angle column is 4 + 2, the +2 is because of a linear function
        except Exception as e:
            print(e)
    
    B_inter = interp1d(Winkel, R_raw)  # Interpoliertes B_res, array mit Länge len(Winkel)

    #FreeE = 'B*M*(sin(theta)*sin(thetaB)*cos(phi - phiB) + cos(theta)*cos(thetaB)) - K2p*sin(theta)**2*cos(phi - phiu)**2 - K4p*(cos(4*phi) + 3)*sin(theta)**4/8 - K4s*cos(theta)**4/2 - (-K2s + M**2*mu0/2)*sin(theta)**2'
    # Freie Energy Formel + Baselgia
    F = sympify(FreeE)  # let symengine interpret the FreeE string as a function

    # Use Baselgia approach for Bres (Has no singularity at theta = 0!)
    halb_res = gamma ** 2 / M ** 2 * (
            F.diff(theta, 2) * (F.diff(phi, 2) / (sin(theta)) ** 2 + cos(theta) / sin(theta) * F.diff(theta)) - (
            F.diff(theta, phi) / sin(theta) - cos(theta) / sin(theta) * F.diff(phi) / sin(theta)) ** 2)
    eq_halb_res = Eq(omega ** 2, halb_res)

    # Formel der Resonanzfrequenz B_RES, Lösung [1] ist positive Lösung der Wurzelfunktion (glaub ich)
    B_RES = solve(eq_halb_res, B)  # Solves the Baselgia approach for B
    B_RES = B_RES.args[1]  # Choose second solution of solve!

    # Now create rule as dict(). rule is then variable that contains every parameter information (params+const)
    rule = {**fit_params, **fixed_params}
    # ----->  [K2s, K2p, K4s, K4p, phiU, f, g, M, mu0, muB, hbar, gamma, omega]

    # Create angle arrays

    Winkel_min = min(Winkel) # its in deg; Orignial data
    Winkel_max = max(Winkel) # its in deg; Orignial data
    angle_step = anglestep  # define stepwidth (resolution)
    angle_RANGE_deg = np.arange(Winkel_min,Winkel_max,angle_step*180/m.pi)  # Phi Array in degress NOT shifted
    reference_data = B_inter(angle_RANGE_deg) # Interpolate Data from experiment with array steps set by programm
    # Now the idea is, we have an array (B_inter_data_array) of real world data, which is shifted in +- direction,
    # we therefore need to shift this.
    # This reference_data is not shifted, the shift is introduced in angle_RANGE. The lmfit routine then uses
    # the information of the shifted angles to map/fit the interpolated data.

    #try:

    def correct_angle_RANGE(angle_RANGE,len_ref, count=0, add='-', lauf_var=0, shift=0):
        if add == '-':
            angle_RANGE = np.arange(angle_min + shift, angle_max + shift, m.pi / (1 / (angle_step / m.pi) - lauf_var), dtype='float')
        else:
            angle_RANGE = np.arange(angle_min + shift, angle_max + shift, m.pi / (1 / (angle_step / m.pi) + lauf_var), dtype='float')
        if len(angle_RANGE) != len_ref:
            print(len(angle_RANGE),len_ref,count)
            if count < 10:
                correct_angle_RANGE(angle_RANGE, len_ref, count+1,'-', lauf_var+1)
            elif count < 20:
                if count == 10:
                    lauf_var = 0
                correct_angle_RANGE(angle_RANGE,len_ref,count+1,'+', lauf_var+1)
            else:
                print('angle_RANGE and reference_data have different length, change Anglestep (~ +-1 ) and try again')
        else:
            return angle_RANGE

    is_OOP = ani_ori

    if Fit:
        angle_min = (Winkel_min + shift) * m.pi / 180  # define smallest value; shifted
        angle_max = (Winkel_max + shift) * m.pi / 180  # define biggest value; shifted
       # angle_RANGE = np.arange(angle_min, angle_max, angle_step, dtype='float')  # array of radians, with stepwidth = anglestep
        angle_RANGE = np.arange(Winkel_min + shift, Winkel_max + shift, angle_step*180/m.pi) * m.pi/180
        # There is the possibility, that the length of angle_RANGE can be smaller or bigger than reference_data
        if len(angle_RANGE) != len(reference_data):
            angle_RANGE = correct_angle_RANGE(angle_RANGE, len(reference_data),0 , '-', 0, shift*m.pi/180)

        # Then limit the array so that it doesnt exceed 0-360 degrees
        for i, l in enumerate(angle_RANGE):
            if l >= 359.99 * m.pi / 180:
                angle_RANGE[i] -= 360.0 * m.pi / 180
            elif l <= 0.0:
                angle_RANGE[i] += 360.0 * m.pi / 180
        print('Simulating Bres and fitting anisotropy constants. Your computer may be unresponsive')
        end_pfad = init_folder(filename)


        func_args = [i for i in fit_params.keys()] # get the names/keys of the fitted params
        func_args = str(func_args).replace('[','').replace(']','').replace("'",'') # prepare the list for the string function
        func_str = create_str_func(func_args, is_OOP)
        print("FUNC ARGS:",func_args)
        exec(func_str,globals())  # This will create the function given by func_str: "Model_Fit_Fkt"

        # Then create Parameter dict() and model for lmfit
        params_Fit = Parameters()
        for name, value in fit_params.items():
            if name == 'phiU':
                params_Fit.add(name, value, min=0, max=2*m.pi)
            else:
                params_Fit.add(name, value)
        model = Model(Model_Fit_Fkt)

        # plot: bool, rules_start: dict, angle_RANGE: list, phi_array: list, B_inter: func, B_RES: sympy_object, F: sympy_object
        main_loop(Plot, rule, angle_RANGE, reference_data, B_RES, F, model, params_Fit, fixed_params, fit_params, maxBresDelta, end_pfad, is_OOP)
    else:   # Pre Fit
        angle_RANGE = angle_RANGE_deg * m.pi/180
        if len(angle_RANGE) != len(reference_data):
            angle_RANGE = correct_angle_RANGE(angle_RANGE, len(reference_data))

        print('Creating pre fit')
        result = create_pre_fit(rule, angle_RANGE, angle_RANGE_deg, reference_data, B_RES, F, is_OOP)
        return result
示例#11
0
    def fit_external(self, cns=1, cms=1, use_pickle=False, pickle_name='default',
                     line_profile=False, recreate=False):
        """For fitting an external field in cartesian space.

        Note:
            Not currently used or maintained.
        """

        a     = 3e4
        b     = 3e4
        c     = 3e4

        Bz    = []
        Bx    = []
        By    = []
        Bzerr = []
        Bxerr = []
        Byerr = []
        ZZ    = []
        XX    = []
        YY    = []
        for y in self.xy_steps:

            input_data_y = self.input_data[self.input_data.Y == y]

            piv_bz = input_data_y.pivot('Z', 'X', 'Bz')
            piv_bx = input_data_y.pivot('Z', 'X', 'Bx')
            piv_by = input_data_y.pivot('Z', 'X', 'By')
            piv_bz_err = input_data_y.pivot('Z', 'X', 'Bzerr')
            piv_bx_err = input_data_y.pivot('Z', 'X', 'Bxerr')
            piv_by_err = input_data_y.pivot('Z', 'X', 'Byerr')

            X = piv_bx.columns.values
            Z = piv_bx.index.values
            Bz.append(piv_bz.values)
            Bx.append(piv_bx.values)
            By.append(piv_by.values)
            Bzerr.append(piv_bz_err.values)
            Bxerr.append(piv_bx_err.values)
            Byerr.append(piv_by_err.values)
            XX_slice, ZZ_slice = np.meshgrid(X, Z)
            XX.append(XX_slice)
            ZZ.append(ZZ_slice)
            YY_slice = np.full_like(XX_slice, y)
            YY.append(YY_slice)

        ZZ = np.concatenate(ZZ)
        XX = np.concatenate(XX)
        YY = np.concatenate(YY)
        Bz = np.concatenate(Bz)
        Bx = np.concatenate(Bx)
        By = np.concatenate(By)
        Bzerr = np.concatenate(Bzerr)
        Bxerr = np.concatenate(Bxerr)
        Byerr = np.concatenate(Byerr)
        if line_profile:
            return ZZ, XX, YY, Bz, Bx, By

        b_external_3d_fast = ff.b_external_3d_producer(a, b, c, ZZ, XX, YY, cns, cms)
        self.mod = Model(b_external_3d_fast, independent_vars=['x', 'y', 'z'])

        if use_pickle or recreate:
            self.params = pkl.load(open(pickle_name+'_results.p', "rb"))
        else:
            self.params = Parameters()

        if 'cns' not in self.params:
            self.params.add('cns', value=cns, vary=False)
        else:
            self.params['cns'].value = cns
        if 'cms' not in self.params:
            self.params.add('cms', value=cms, vary=False)
        else:
            self.params['cms'].value = cms

        if 'epsilon1' not in self.params:
            self.params.add('epsilon1', value=0.1, min=0, max=2*np.pi, vary=True)
        else:
            self.params['epsilon1'].vary = True
        if 'epsilon2' not in self.params:
            self.params.add('epsilon2', value=0.1, min=0, max=2*np.pi, vary=True)
        else:
            self.params['epsilon2'].vary = True

        for cn in range(1, cns+1):
            for cm in range(1, cms+1):
                if 'C_{0}_{1}'.format(cn, cm) not in self.params:
                    self.params.add('C_{0}_{1}'.format(cn, cm), value=1, vary=True)
                else:
                    self.params['C_{0}_{1}'.format(cn, cm)].vary = True

        if not recreate:
            print('fitting external with cn={0}, cm={1}'.format(cns, cms))
        start_time = time()
        if recreate:
            for param in self.params:
                self.params[param].vary = False

            self.result = self.mod.fit(np.concatenate([Bx, By, Bz]).ravel(), x=XX, y=YY, z=ZZ,
                                       params=self.params, method='leastsq', fit_kws={'maxfev': 1})
        elif use_pickle:
            self.result = self.mod.fit(np.concatenate([Bx, By, Bz]).ravel(), x=XX, y=YY, z=ZZ,
                                       params=self.params, method='leastsq',
                                       fit_kws={'maxfev': 1000})
        else:
            self.result = self.mod.fit(np.concatenate([Bx, By, Bz]).ravel(), x=XX, y=YY, z=ZZ,
                                       params=self.params, method='leastsq')

        self.params = self.result.params
        end_time = time()
        if not recreate:
            print(("Elapsed time was %g seconds" % (end_time - start_time)))
            report_fit(self.result, show_correl=False)
        if not self.no_save and not recreate:
            self.pickle_results(pickle_name)
示例#12
0
def otwo_doublet_fitting(file_name, sky_file_name):
    sa_data = spectra_analysis(file_name, sky_file_name)
    y_shifted = sa_data['gd_shifted']
    orr = wavelength_solution(file_name)
    sn_data = sky_noise_weighting(file_name, sky_file_name)

    redshift = sa_data['redshift']

    # obtaining the OII range and region
    # lower and upper bound on wavelength range
    lower_lambda = (1 + redshift) * 3600
    upper_lambda = (1 + redshift) * 3750

    otr = [lower_lambda, upper_lambda]
    print(otr)

    orr_x = np.linspace(orr['begin'], orr['end'], orr['steps'])
    dt_region = [find_nearest(orr_x, x) for x in otr]
    otwo_region = y_shifted[dt_region[0]:dt_region[1]]

    print(orr_x)

    ot_x = orr_x[dt_region[0]:dt_region[1]]

    otwo_max_loc = np.argmax(otwo_region)
    otwo_max_val = np.max(otwo_region)

    # standard deviation of a range before the peak
    stdr_b = 50
    stdr_e = otwo_max_loc - 50
    stddev_lim = [stdr_b, stdr_e]

    stddev_x = ot_x[stddev_lim[0]:stddev_lim[1]]
    stddev_region = otwo_region[stddev_lim[0]:stddev_lim[1]]
    stddev_val = np.std(stddev_region)

    # fitting a gaussian doublet model to the data
    dblt_mu = [3727.092, 3729.875]  # the actual non-redshifted wavelengths

    dblt_val = ot_x[otwo_max_loc]

    dblt_rng = [dblt_val - 20, dblt_val + 20]
    dblt_rng = [find_nearest(orr_x, x) for x in dblt_rng]
    dblt_rng_vals = orr_x[dblt_rng[0]:dblt_rng[1]]

    dblt_rgn = y_shifted[dblt_rng[0]:dblt_rng[1]]

    rdst = sa_data['redshift']

    sky_weight = sn_data['inverse_sky']
    sky_weight = sky_weight[dt_region[0]:dt_region[1]]

    # the parameters we need are (c, i1, i2, sigma1, z)
    p0 = [0, otwo_max_val, 1.3, 3, rdst]
    c, i_val1, r, sigma_gal, z = p0

    sigma_sky = sn_data['sky_sigma']

    gss_pars = Parameters()
    gss_pars.add('c', value=c)
    gss_pars.add('i1', value=i_val1, min=0.0)
    gss_pars.add('r', value=r, min=0.5, max=1.5)
    gss_pars.add('i2', expr='i1/r', min=0.0)
    gss_pars.add('sigma_gal', value=sigma_gal)
    gss_pars.add('z', value=z)
    gss_pars.add('sigma_inst', value=sigma_sky, vary=False)

    gss_model = Model(f_doublet)
    gss_result = gss_model.fit(otwo_region,
                               x=ot_x,
                               params=gss_pars,
                               weights=sky_weight)

    opti_pms = gss_result.best_values
    init_pms = gss_result.init_values

    # working out signal to noise now
    sn_line_parms = Parameters()
    sn_line_parms.add('c', value=c)

    sn_line_model = Model(sn_line)
    sn_line_rslt = sn_line_model.fit(otwo_region, x=ot_x, params=sn_line_parms)
    sn_line_bpms = sn_line_rslt.best_values
    sn_line_data = sn_line_rslt.best_fit

    sn_gauss_parms = Parameters()
    sn_gauss_parms.add('c', value=c)
    sn_gauss_parms.add('i1', value=i_val1, min=0.0)
    sn_gauss_parms.add('mu', value=dblt_val)
    sn_gauss_parms.add('sigma1', value=sigma_gal)

    sn_gauss_model = Model(sn_gauss)
    sn_gauss_rslt = sn_gauss_model.fit(otwo_region,
                                       x=ot_x,
                                       params=sn_gauss_parms)
    sn_gauss_bpms = sn_gauss_rslt.best_values
    sn_gauss_data = sn_gauss_rslt.best_fit

    sn_line_csqs = chisq(sn_line_data, otwo_region, stddev_val)
    sn_gauss_csqs = chisq(sn_gauss_data, otwo_region, stddev_val)

    signal_noise = np.sqrt(sn_line_csqs['chisq'] - sn_gauss_csqs['chisq'])

    # saving data to text files
    curr_file_name = file_name.split('.')
    curr_file_name = curr_file_name[0].split('/')
    stk_f_n = curr_file_name[-1]

    data_dir = 'cube_results/' + stk_f_n
    if not os.path.exists(data_dir):
        os.mkdir(data_dir)

    file_writer.analysis_complete(data_dir, stk_f_n, gss_result, init_pms,
                                  opti_pms, sn_line_csqs, sn_gauss_csqs,
                                  signal_noise, sn_line_bpms, sn_line_data,
                                  sn_gauss_bpms, sn_gauss_data)

    return {
        'range': otr,
        'x_region': ot_x,
        'y_region': otwo_region,
        'doublet_range': dblt_rng_vals,
        'std_x': stddev_x,
        'std_y': stddev_region,
        'lm_best_fit': gss_result.best_fit,
        'lm_best_param': gss_result.best_values,
        'lm_init_fit': gss_result.init_fit,
        'sn_line': sn_line_rslt.best_fit,
        'sn_gauss': sn_gauss_rslt.best_fit
    }
示例#13
0
def sky_noise_weighting(file_name, sky_file_name):
    """ finding the sky noise from a small section of the cube data """
    cs_data = spectra_analysis(file_name, sky_file_name)
    cube_data = cs_data['gd_shifted']
    sn_data = cs_data['sky_noise']
    wl_soln = wavelength_solution(file_name)

    sn_data_min = np.min(sn_data)
    in_wt = 1 / (sn_data - sn_data_min + 1)

    sky_regns = np.zeros(
        (len(in_wt), 2))  # storing regions of potential sky noise
    for i in range(len(in_wt)):
        data_acl = cube_data[i]
        data_sky = sn_data[i]
        data_prb = in_wt[i]

        if (0.00 <= np.abs(data_prb) <= 1.00):
            sky_regns[i][0] = data_prb
            sky_regns[i][1] = data_sky

    # finding max peak in the sky-noise data and fitting a Gaussian to that
    # x-axis data
    x_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])

    # Finding peaks with PeakUtils
    sky_peaks = peakutils.indexes(sn_data, thres=300, thres_abs=True)
    sky_peaks_x = peakutils.interpolate(x_range, sn_data, sky_peaks)

    if (sky_peaks_x.size != 0):
        sky_peak = sky_peaks_x[0]
        sky_peak_index = find_nearest(sky_peak, x_range)
    else:
        sky_peak = 6000
        sky_peak_index = 0

    sky_peak_loc = x_range[sky_peak_index]

    sky_peak_range = [sky_peak - 100, sky_peak + 100]
    sky_peak_range_loc = [find_nearest(x_range, x) for x in sky_peak_range]

    sky_rng_x = x_range[sky_peak_range_loc[0]:sky_peak_range_loc[1]]
    sky_rng_y = sn_data[sky_peak_range_loc[0]:sky_peak_range_loc[1]]

    sky_gauss_params = Parameters()
    sky_gauss_params.add('c', value=0)
    sky_gauss_params.add('i1', value=np.max(sky_rng_y), min=0.0)
    sky_gauss_params.add('mu', value=sky_peak_loc)
    sky_gauss_params.add('sigma1', value=3)

    sky_gauss_model = Model(sn_gauss)
    sky_gauss_rslt = sky_gauss_model.fit(sky_rng_y,
                                         x=sky_rng_x,
                                         params=sky_gauss_params)
    sky_gauss_best = sky_gauss_rslt.best_values

    sky_sigma = sky_gauss_best['sigma1']

    return {
        'inverse_sky': in_wt,
        'sky_regions': sky_regns,
        'sky_sigma': sky_sigma
    }
示例#14
0
def S11fit(frec,
           S11,
           ftype='A',
           fitbackground=True,
           trimwidth=5.,
           doplots=False,
           margin=51,
           oldpars=None,
           refitback=True,
           reusefitpars=False,
           fitwidth=None):
    """**MAIN FIT ROUTINE**

    Fits complex data S11 vs frecuency to one of 4 models adjusting for a multiplicative complex background
    It fits the data in three steps.  Firstly it fits the background signal removing a certain window around the detected peak position.
    Then it fits the model times the background to the full data set keeping the background parameters fixed at the fitted values.  Finally it refits all background and
    model parameters once more starting from the previously fitted values.  The fit model is:

    .. math:: \Gamma'(\omega)=(a+b\omega+c\omega^2)\exp(j(a'+b'\omega))\cdot
        \Gamma(\omega,Q_\\textrm{int},Q_\\textrm{ext},\\theta),


    Parameters
    ----------
    frec : array_like
        Array of X values (typically frequency)
    S11 : array_like
        Complex array of Z values (typically S11 data)
    ftype : {'A','B','-A','-B', 'X'}, optional
        Fit model function (A,B,-A,-B, see S11theo for formulas)
    fitbackground : bool, optional
        If "True" will attempt to fit and remove background.  If "False", will use a constant background equal to 1 and fit only model function to data.
    trimwidth : float, optional
        Number of linewidths around resonance (estimated pre-fit) to remove for background only fit.
    doplots : bool, optional
        If "True", shows debugging and intermediate plots
    margin : float, optional
        Smoothing window to apply to signal for initial guess procedures (the fit uses unsmoothed data)
    oldpars : lmfit.Parameters, optional
        Parameter data from previous fit (expects lmfit Parameter object). Used when "refitback" is "False" or "reusefitpars" is "True".
    refitback : bool, optional
        If set to False, does not fit the background but uses parameters provided in "oldpars".  If set to "True", fits background normally
    reusefitpars : bool, optional
        If set to True, uses parameters provided in "oldpars" as initial guess for fit parameters in main model fit (ignored by background fit)
    fitwidth : float, optional
        If set to a numerical value, will trim the signal to a certain number of widths around the resonance for all the fit

    Returns
    -------
    params : lmfit.Parameters
        Fitted parameter values
    freq : numpy.ndarray
        Array of frequency values within the fitted range
    S11: numpy.ndarray
        Array of complex signal values within the fitted range
    finalresult : lmfit.MinimizerResult
        The full minimizer result object (see lmfit documentation for details)

    """
    # Convert frequency and S11 into arrays
    frec = np.array(frec)
    S11 = np.array(S11)

    #Smooth data for initial guesses
    if margin == None or margin == 0:  #If no smooting desired, pass None as margin
        margin = 0
        sReS11 = S11.real
        sImS11 = S11.imag
        sS11 = np.array([x + 1j * y for x, y in zip(sReS11, sImS11)])
    elif type(margin) != int or margin % 2 == 0 or margin <= 3:
        raise ValueError(
            'margin has to be either None, 0, or an odd integer larger than 3')
    else:
        sReS11 = np.array(smooth(S11.real, margin, 3))
        sImS11 = np.array(smooth(S11.imag, margin, 3))
        sS11 = np.array([x + 1j * y for x, y in zip(sReS11, sImS11)])
    #Make smoothed phase vector removing 2pi jumps
    sArgS11 = np.angle(sS11)
    sArgS11 = np.unwrap(sArgS11)
    sdiffang = np.diff(sArgS11)

    #sdiffang = [ x if np.abs(x)<pi else -pi for x in np.diff(np.angle(sS11)) ]
    #Get resonance index from maximum of the derivative of the imaginary part
    #ires = np.argmax(np.abs(np.diff(sImS11)))
    #f0i=frec[ires]

    #Get resonance index from maximum of the derivative of the phase
    avgph = np.average(sdiffang)
    errvec = [np.power(x - avgph, 2.) for x in sdiffang]
    #ires = np.argmax(np.abs(sdiffang[margin:-margin]))+margin
    if margin == 0:
        ires = np.argmax(errvec[0:-1]) + 0
    else:
        ires = np.argmax(errvec[margin:-margin]) + margin
    f0i = frec[ires]
    print("Max index: ", ires, " Max frequency: ", f0i)

    if doplots:
        plt.clf()
        plt.title('Original signal (Re,Im)')
        plt.plot(frec, S11.real)
        plt.plot(frec, S11.imag)
        plt.axis('auto')
        plt.show()

        plt.plot(np.angle(sS11))
        plt.title('Smoothed Phase')
        plt.axis('auto')
        plt.show()
        if margin == 0:
            plt.plot(sdiffang[0:-1])
        else:
            plt.plot(sdiffang[margin:-margin])
        plt.plot(sdiffang)
        plt.title('Diff of Smoothed Phase')
        plt.show()

    #Get peak width by finding width of spike in diffphase plot
    (imin, imax) = getwidth_phase(ires, errvec, margin)
    di = imax - imin
    print("Peak limits: ", imin, imax)
    print("Lower edge: ", frec[imin], " Center: ", frec[ires], " Upper edge: ",
          frec[imax], " Points in width: ", di)

    if doplots:
        plt.title('Smoothed (ph-phavg)\^2')
        plt.plot(errvec)
        plt.plot([imin], [errvec[imin]], 'ro')
        plt.plot([imax], [errvec[imax]], 'ro')
        plt.show()

    if not fitwidth == None:
        i1 = max(int(ires - di * fitwidth), 0)
        i2 = min(int(ires + di * fitwidth), len(frec))
        frec = frec[i1:i2]
        S11 = S11[i1:i2]
        ires = ires - i1
        imin = imin - i1
        imax = imax - i1

    #Trim peak from data (trimwidth times the width)
    (backfrec, backsig) = trim(frec, S11, ires - trimwidth * di,
                               ires + trimwidth * di)

    if doplots:
        plt.title('Trimmed signal for background fit (Re,Im)')
        plt.plot(backfrec, backsig.real, backfrec, backsig.imag)
        plt.plot(frec, S11.real, frec, S11.imag)
        plt.show()

        plt.title('Trimmed signal for background fit (Abs)')
        plt.plot(backfrec, np.abs(backsig))
        plt.plot(frec, np.abs(S11))
        plt.show()

        plt.title('Trimmed signal for background fit (Phase)')
        plt.plot(backfrec, np.angle(backsig))
        plt.plot(frec, np.angle(S11))
        plt.show()

    if fitbackground:
        #Make initial background guesses
        b0 = (np.abs(sS11)[-1] - np.abs(sS11)[0]) / (frec[-1] - frec[0])
        #    a0 = np.abs(sS11)[0] - b0*frec[0]
        a0 = np.average(np.abs(sS11)) - b0 * backfrec[0]
        #    a0 = np.abs(sS11)[0] - b0*backfrec[0]
        c0 = 0.
        #    bp0 = ( np.angle(sS11[di])-np.angle(sS11[0]) )/(frec[di]-frec[0])
        xx = []
        for i in range(0, len(backfrec) - 1):
            df = backfrec[i + 1] - backfrec[i]
            dtheta = np.angle(backsig[i + 1]) - np.angle(backsig[i])
            if (np.abs(dtheta) > pi):
                continue
            xx.append(dtheta / df)
        #Remove infinite values in xx (from repeated frequency points for example)
        xx = np.array(xx)
        idx = np.isfinite(xx)
        xx = xx[idx]

        #    bp0 = np.average([ x if np.abs(x)<pi else 0 for x in np.diff(np.angle(backsig))] )/(frec[1]-frec[0])
        bp0 = np.average(xx)
        #   ap0 = np.angle(sS11[0]) - bp0*frec[0]
        #   ap0 = np.average(np.unwrap(np.angle(backsig))) - bp0*backfrec[0]
        ap0 = np.unwrap(np.angle(backsig))[0] - bp0 * backfrec[0]
        cp0 = 0.
        print(a0, b0, ap0, bp0)
    else:
        a0 = 0
        b0 = 0
        c0 = 0
        ap0 = 0
        bp0 = 0
        cp0 = 0

    params = Parameters()
    myvary = True
    params.add('a', value=a0, vary=myvary)
    params.add('b', value=b0, vary=myvary)
    params.add('c', value=c0, vary=myvary)
    params.add('ap', value=ap0, vary=myvary)
    params.add('bp', value=bp0, vary=myvary)
    params.add('cp', value=cp0, vary=myvary)

    if not fitbackground:
        if ftype == 'A' or ftype == 'B':
            params['a'].set(value=-1, vary=False)
        elif ftype == '-A' or ftype == '-B' or ftype == 'X':
            params['a'].set(value=1, vary=False)
        params['b'].set(value=0, vary=False)
        params['c'].set(value=0, vary=False)
        params['ap'].set(value=0, vary=False)
        params['bp'].set(value=0, vary=False)
        params['cp'].set(value=0, vary=False)
    elif not refitback and oldpars != None:
        params['a'].set(value=oldpars['a'].value, vary=False)
        params['b'].set(value=oldpars['b'].value, vary=False)
        params['c'].set(value=oldpars['c'].value, vary=False)
        params['ap'].set(value=oldpars['ap'].value, vary=False)
        params['bp'].set(value=oldpars['bp'].value, vary=False)
        params['cp'].set(value=oldpars['cp'].value, vary=False)

# do background fit

    params['cp'].set(value=0., vary=False)
    result = minimize(background2min, params, args=(backfrec, backsig))
    '''
    params = result.params
    params['a'].set(vary=False)
    params['b'].set(vary=False)
    params['c'].set(vary=False)
    params['ap'].set(vary=False)
    params['bp'].set(vary=False)
    params['cp'].set(vary=True)
    result = minimize(background2min, params, args=(backfrec, backsig))

    params = result.params
    params['a'].set(vary=True)
    params['b'].set(vary=True)
    params['c'].set(vary=True)
    params['ap'].set(vary=True)
    params['bp'].set(vary=True)
    params['cp'].set(vary=True)
    result = minimize(background2min, params, args=(backfrec, backsig))
    '''

    # write error report
    report_fit(result.params)

    #calculate final background and remove background from original data
    complexresidual = un_realimag(result.residual)
    # backgroundfit = backsig + complexresidual
    fullbackground = np.array([S11back(xx, result.params) for xx in frec])
    S11corr = -S11 / fullbackground
    if ftype == '-A' or ftype == '-B':
        S11corr = -S11corr

    if doplots:
        plt.title('Signal and fitted background (Re,Im)')
        plt.plot(frec, S11.real)
        plt.plot(frec, S11.imag)
        plt.plot(frec, fullbackground.real)
        plt.plot(frec, fullbackground.imag)
        plt.show()

        plt.title('Signal and fitted background (Phase)')
        plt.plot(frec, np.angle(S11))
        plt.plot(frec, np.angle(fullbackground))
        plt.show()

        plt.title('Signal and fitted background (Polar)')
        plt.plot(S11.real, S11.imag)
        plt.plot(fullbackground.real, fullbackground.imag)
        plt.show()

        plt.title('Signal with background removed (Re,Im)')
        plt.plot(frec, S11corr.real)
        plt.plot(frec, S11corr.imag)
        plt.show()

        plt.title('Signal with background removed (Phase)')
        ph = np.unwrap(np.angle(S11corr))
        plt.plot(frec, ph)
        plt.show()

        plt.title('Signal with background removed (Polar)')
        plt.plot(S11corr.real, S11corr.imag)
        plt.show()

#Make initial guesses for peak fit
#    ires = np.argmax(S11corr.real)
#    f0i=frec[ires]
#    imin = np.argmax(S11corr.imag)
#    imax = np.argmin(S11corr.imag)

    ktot = np.abs(frec[imax] - frec[imin])
    if ftype == 'A':
        Tres = np.abs(S11corr[ires] + 1)
        kext0 = ktot * Tres / 2.
    elif ftype == '-A':
        Tres = np.abs(1 - S11corr[ires])
        kext0 = ktot * Tres / 2.
    elif ftype == '-B':
        Tres = np.abs(S11corr[ires])
        kext0 = (1 - Tres) * ktot
    elif ftype == 'B':
        Tres = np.abs(S11corr[ires])
        kext0 = (1 + Tres) * ktot
    elif ftype == 'X':
        Tres = np.abs(S11corr[ires])
        kext0 = (1 - Tres) * ktot
    kint0 = ktot - kext0
    if kint0 <= 0.:
        kint0 = kext0
    Qint0 = f0i / kint0
    Qext0 = f0i / kext0

    #Make new parameter object (includes previous fitted background values)
    params = result.params
    if reusefitpars and oldpars != None:
        params.add('Qint', value=oldpars['Qint'].value, vary=True, min=0)
        params.add('Qext', value=oldpars['Qext'].value, vary=True, min=0)
        params.add('f0', value=oldpars['f0'].value, vary=True)
        params.add('theta', value=oldpars['theta'].value, vary=True)
    else:
        params.add('Qint', value=Qint0, vary=True, min=0)
        params.add('Qext', value=Qext0, vary=True, min=0)
        params.add('f0', value=f0i, vary=True)
        params.add('theta', value=0, vary=True)
    params['a'].set(vary=False)
    params['b'].set(vary=False)
    params['c'].set(vary=False)
    params['ap'].set(vary=False)
    params['bp'].set(vary=False)
    params['cp'].set(vary=False)

    #Do final fit
    finalresult = minimize(S11residual, params, args=(frec, S11, ftype))
    # write error report
    report_fit(finalresult.params)
    params = finalresult.params
    try:
        print('QLoaded = ',
              1 / (1. / params['Qint'].value + 1. / params['Qext'].value))
    except ZeroDivisionError:
        print('QLoaded = ', 0.)

    if doplots:
        plt.title('Pre-Final signal and fit (Re,Im)')
        plt.plot(frec, S11corr.real)
        plt.plot(frec, S11corr.imag)
        plt.plot(frec, S11theo(frec, params, ftype).real)
        plt.plot(frec, S11theo(frec, params, ftype).imag)
        plt.show()

        plt.title('Pre-Final signal and fit (Polar)')
        plt.plot(S11.real, S11.imag)
        plt.plot(
            S11func(frec, params, ftype).real,
            S11func(frec, params, ftype).imag)
        plt.axes().set_aspect('equal', 'datalim')
        plt.show()

#REDO final fit varying all parameters
    if refitback and fitbackground:
        params['a'].set(vary=True)
        params['b'].set(vary=True)
        params['c'].set(vary=True)
        params['ap'].set(vary=True)
        params['bp'].set(vary=True)
        params['cp'].set(vary=False)
        finalresult = minimize(S11residual, params, args=(frec, S11, ftype))

        # write error report
        report_fit(finalresult.params)
        params = finalresult.params
        try:
            print('QLoaded = ',
                  1 / (1. / params['Qint'].value + 1. / params['Qext'].value))
        except ZeroDivisionError:
            print('QLoaded = ', 0.)


#calculate final result and background
    complexresidual = un_realimag(finalresult.residual)
    finalfit = S11 + complexresidual
    # newbackground = np.array([S11back(xx,finalresult.params) for xx in frec])

    if doplots:
        plt.title('Final signal and fit (Re,Im)')
        plt.plot(frec, S11.real)
        plt.plot(frec, S11.imag)
        plt.plot(frec, finalfit.real)
        plt.plot(frec, finalfit.imag)
        plt.show()

        plt.title('Final signal and fit (Polar)')
        plt.plot(S11.real, S11.imag)
        plt.plot(finalfit.real, finalfit.imag)
        plt.axes().set_aspect('equal', 'datalim')
        plt.show()

        plt.title('Final signal and fit (Abs)')
        plt.plot(frec, np.abs(S11))
        plt.plot(frec, np.abs(finalfit))
        plt.show()

    # chi2 = finalresult.chisqr

    return params, frec, S11, finalresult
示例#15
0
def di_fit_simultaneous(x, z, centers, widths, x0bounds, 
                        constrain = None, fix = None, span = None, 
                        nboot=None, bootstat='bounds'):
    
    def di_bootstrap_eps(mboot, xx, zz, fit_params, jlow, jhigh, pp0, bbounds):
        """ bootstrap estimate of errors on epsilon for single curve fit 
            Following this: http://www.phas.ubc.ca/~oser/p509/Lec_20.pdf """
        
        # create zfit and resid, both of which have shape=z.shape
        zfit = di_sense_simple(xx, *fit_params)
        resid = zz - zfit
        
        boot_results = np.zeros((mboot,len(fit_params)))
        for k in range(mboot):
            ztest = zfit + np.random.choice(resid.flatten(), size=zfit.shape)
            out, _ = curve_fit(di_sense_simple, xx[jlow:jhigh], 
                                ztest[jlow:jhigh], p0=pp0, bounds=bbounds)
            boot_results[k] = out
        
        if bootstat=='bounds':
            return np.percentile(boot_results[:,-1], [2.5, 97.5])
        elif bootstat=='std':
            return np.array([boot_results[:,-1].std(), boot_results[:,-1].std()])
    
    def di_dataset(params, i, xx):
        
        x0 = params['x0_{0:d}'.format(i)]
        theta = params['theta_{0:d}'.format(i)]
        di0 = params['di0_{0:d}'.format(i)]
        di2 = params['di2_{0:d}'.format(i)]
        epsilon = params['epsilon_{0:d}'.format(i)]
        
        return di_sense_simple(xx, x0, theta, di0, di2, epsilon)
    
    def di_objective(params, xx, zz, idx0, idx1):
        """ calculate total residual for fits to several data sets held
            in a 2-D array """
        
        n,m = zz.shape
        resid = []
        # make residual per data set
        for i in range(n):
            resid.append(zz[i,idx0[i]:idx1[i]] - di_dataset(params, i, x[i,idx0[i]:idx1[i]]))
        # now flatten this to a 1D array, as minimize() needs
        return np.concatenate(resid)
    
    # get the dimensions of z
    if(z.ndim==1):
        m = len(z)
        n = 1
        z.shape = (n,m)
    elif(z.ndim==2):
        n,m = z.shape
    else:
        raise ValueError('the shape of zarray is wrong')
    
    # deal with the shape of x
    # should have a number of rows = 1 or number of rows = len(z)
    
    if(x.ndim==1 or x.shape[0]==1):
        x = np.tile(x, (n,1))
    elif(x.shape[0]==n):
        pass
    else:
        raise ValueError('the shape of xarray is wrong')
        
    if(span):
        icenters = np.nanargmin(np.abs(x.transpose()-centers), axis=0)
        ilow = np.nanargmin(np.abs(x.transpose()-(centers-span)), axis=0)
        ihigh = np.nanargmin(np.abs(x.transpose()-(centers+span)), axis=0)
    else:
        ilow = np.zeros(n, dtype=np.int)
        ihigh = -1*np.ones(n, dtype=np.int)
    
    columns = ['x0', 'theta', 'di0', 'di2', 'epsilon']
    df = pd.DataFrame(columns=columns)
    
    # add constraints specified in the 'constrain' list
    if(constrain or fix):
        
        # create parameters, one per data set
        fit_params = Parameters()

        for i in range(n):
            fit_params.add('x0_{0:d}'.format(i), value=centers[i], min=x0bounds[0], max=x0bounds[1])
            fit_params.add('theta_{0:d}'.format(i), value=widths[i], min=0.05, max=10.0)
            fit_params.add('di0_{0:d}'.format(i), 
                            value=0.5*max(abs(z[i,ilow[i]:ihigh[i]].min()),
                                          abs(z[i,ilow[i]:ihigh[i]].max())), 
                           min=0.0, max=0.5)
            fit_params.add('di2_{0:d}'.format(i), value=(z[i,ilow[i]]+z[i,ihigh[i]])/2.0, 
                           min=-0.01, max=0.01)
            fit_params.add('epsilon_{0:d}'.format(i), value=0.0, min=-2.0, max=2.0)

        if(constrain):
            for p in constrain:
                for i in range(1,n):
                    fit_params['{0}_{1:d}'.format(p,i)].expr = '{0}_{1:d}'.format(p,0)
                    
        if(fix):
            for p in fix:
                for i in range(n):
                    fit_params['{0}_{1:d}'.format(p,i)].vary = False
                    
        # run the global fit to all the data sets
        m = minimize(di_objective, fit_params, args=(x, z, ilow, ihigh))
    
        valdict = m.params.valuesdict()
        for i in range(n):
            df.loc[i] = [valdict['{0}_{1:d}'.format(c, i)] for c in columns]
    else:
        # no parameters need to be fixed between data sets
        # fit them all separately (much faster)
        if nboot:
            eps_err = np.zeros((n,2))
        for i in range(n):
            p0 = [centers[i], widths[i], 
                  max(abs(z[i,ilow[i]:ihigh[i]].min()), abs(z[i,ilow[i]:ihigh[i]].max())),
                  (z[i,ilow[i]]+z[i,ihigh[i]])/2.0, 0.0]
            bounds = [(x0bounds[0], 0.05, 0.0, -0.05, -2.0), (x0bounds[1], 10.0, 0.5, 0.05, 2.0)]
            df.loc[i], _ = curve_fit(di_sense_simple, x[i,ilow[i]:ihigh[i]], 
                                             z[i,ilow[i]:ihigh[i]], p0=p0, bounds=bounds)
            
            if nboot:
                eps_err[i] = di_bootstrap_eps(nboot, x[i], z[i], df.loc[i],
                                     ilow[i], ihigh[i], p0, bounds)
        if nboot:
            return df, eps_err
            
    return df
示例#16
0
# Define the residual function, specify "true" parameter values, and generate
# a synthetic data set with some noise:


def residual(pars, x, data=None):
    argu = (x*pars['decay'])**2
    shift = pars['shift']
    if abs(shift) > pi/2:
        shift = shift - sign(shift)*pi
    model = pars['amp']*sin(shift + x/pars['period']) * exp(-argu)
    if data is None:
        return model
    return model - data


p_true = Parameters()
p_true.add('amp', value=14.0)
p_true.add('period', value=5.33)
p_true.add('shift', value=0.123)
p_true.add('decay', value=0.010)

x = linspace(0.0, 250.0, 2500)
noise = random.normal(scale=0.7215, size=x.size)
data = residual(p_true, x) + noise

###############################################################################
# Create fitting parameters and set initial values:
fit_params = Parameters()
fit_params.add('amp', value=13.0)
fit_params.add('period', value=2)
fit_params.add('shift', value=0.0)
示例#17
0
 def init_params(self):
     """
     Define all the fitting parameters like
     self.params.add('sig',value = 0, vary = 0, min = -np.inf, max = np.inf, expr = None, brute_step = None)
     """
     self.params = Parameters()
     self.params.add('norm',
                     value=self.norm,
                     vary=0,
                     min=-np.inf,
                     max=np.inf,
                     expr=None,
                     brute_step=0.1)
     self.params.add('D',
                     value=self.D,
                     vary=0,
                     min=-np.inf,
                     max=np.inf,
                     expr=None,
                     brute_step=0.1)
     self.params.add('phi',
                     value=self.phi,
                     vary=0,
                     min=-np.inf,
                     max=np.inf,
                     expr=None,
                     brute_step=0.1)
     self.params.add('R',
                     value=self.R,
                     vary=0,
                     min=-np.inf,
                     max=np.inf,
                     expr=None,
                     brute_step=0.1)
     self.params.add('sbkg',
                     value=self.sbkg,
                     vary=0,
                     min=-np.inf,
                     max=np.inf,
                     expr=None,
                     brute_step=0.1)
     self.params.add('cbkg',
                     value=self.cbkg,
                     vary=0,
                     min=-np.inf,
                     max=np.inf,
                     expr=None,
                     brute_step=0.1)
     self.params.add('abkg',
                     value=self.abkg,
                     vary=0,
                     min=-np.inf,
                     max=np.inf,
                     expr=None,
                     brute_step=0.1)
     self.params.add('U',
                     value=self.U,
                     vary=0,
                     min=-np.inf,
                     max=np.inf,
                     expr=None,
                     brute_step=0.1)
     self.params.add('Hsig',
                     value=self.Hsig,
                     vary=0,
                     min=-np.inf,
                     max=np.inf,
                     expr=None,
                     brute_step=0.1)
     for mkey in self.__mpar__.keys():
         for key in self.__mpar__[mkey].keys():
             if key != 'Material':
                 for i in range(len(self.__mpar__[mkey][key])):
                     self.params.add('__%s_%s_%03d' % (mkey, key, i),
                                     value=self.__mpar__[mkey][key][i],
                                     vary=0,
                                     min=0.0,
                                     max=np.inf,
                                     expr=None,
                                     brute_step=0.1)
示例#18
0
def lc_fit(fn, period, epoc, objid, fit_t0=False):
    '''
    Function to fit a batman model to an input lc datafile to find the best 
    fit system parameters
    '''

    time0 = TIME()
    #Load time and flux data for the object
    DATA = np.loadtxt(fn)
    time, flux, err = DATA[:, 0], DATA[:, 3], DATA[:, 4]

    phase = ((time - epoc) / period) % 1  #Convert time values in to phases
    phase = np.array([p - 1 if p > 0.8 else p for p in phase], dtype=float)

    p_fit = phase[phase <
                  0.2]  #Crop phase and flux arrays to only contain values
    f_fit = flux[phase < 0.2]  #in range (-0.2 ,  0.2)
    e_fit = err[phase < 0.2]

    params = Parameters()  #Parameter instance to hold fit parameters
    params.add('rp', value=0.05, min=0., max=1.)  #Planet:Star radius ratio
    params.add('a', value=10., min=0., max=100.)  #Semi-major axis
    params.add('inc', value=89., min=60., max=90.)  #Orbital inclination
    if fit_t0:
        params.add('t0', value=0.0, min=-0.1, max=0.1)  #Transit centre time

    res = minimize(lc_min,
                   params,
                   args=(p_fit, f_fit, e_fit, fit_t0),
                   method='leastsq')  #perform minimization
    chi2 = np.sum(res.residual) / res.nfree
    rp_best, a_best, inc_best = res.params['rp'].value, res.params[
        'a'].value, res.params['inc'].value
    if fit_t0:
        t0_best = res.params['t0'].value
        print(
            'Best fit parameters: rp={:.6f}; a={:.6f}; inc={:.6f}; t0={:.6f}'.
            format(rp_best, a_best, inc_best, t0_best))
    else:
        print('Best fit parameters: rp={:.6f}; a={:.6f}; inc={:.6f}'.format(
            rp_best, a_best, inc_best))

    print('Minimization result: {}: {}; chi2={:.4f}'.format(
        res.success, res.message, chi2))

    #Produce a best fit model using the minimization results
    pm_best = bm.TransitParams()

    if fit_t0: pm_best.t0 = t0_best
    else: pm_best.t0 = 0.  #Time of transit centre

    pm_best.per = 1.  #Orbital period = 1 as phase folded
    pm_best.rp = rp_best  #Ratio of planet to stellar radius
    pm_best.a = a_best  #Semi-major axis (units of stellar radius)
    pm_best.inc = inc_best  #Orbital Inclination [deg]
    pm_best.ecc = 0.  #Orbital eccentricity (fix circular orbits)
    pm_best.w = 90.  #Longitude of periastron [deg] (unimportant as circular orbits)
    pm_best.u = [0.1, 0.3]  #Stellar LD coefficients
    pm_best.limb_dark = "quadratic"  #LD model

    p_best = np.linspace(-0.2, 0.2, 10000)  #Produce a model LC using
    m_best = bm.TransitModel(pm_best, p_best)  #the output best fit parameters
    f_best = m_best.light_curve(pm_best)

    p1 = p_best[np.where(f_best < 1)[0][0]]  #Phase of first contact
    p4 = p_best[np.where(f_best < 1)[0][-1]]  #Phase of final contact

    t_dur = (p4 - p1) * period * 24  #Transit duration [hours]
    t_depth = (1 - f_best.min()) * 100  #Transit depth [percent]

    #Produce binned data set for plotting
    bw = 10 / (1440 * period)  #Bin width - 10 mins in units of phase
    p_bin, f_bin, e_bin = lc_bin(p_fit, f_fit, bw)

    #Produce plot of data and best fit model LC
    plt.figure(figsize=(9, 7.5))

    plt.plot(p_fit,
             f_fit,
             marker='o',
             color='gray',
             linestyle='none',
             markersize=1)
    plt.plot(p_bin, f_bin, 'ro', markersize=5)
    plt.plot(p_best, f_best, 'g--', linewidth=2)

    plt.xlabel('Phase')
    plt.ylabel('Flux')
    plt.title(
        'Depth: {:.4f}%;  Duration: {:4f} hours;  (Rp/Rs): {:.4f};  chi2: {:.4f}'
        .format(t_depth, t_dur, rp_best, chi2))
    plt.xlim((-3 * p4, 3 * p4))
    plt.ylim((f_bin.min() - t_depth / 200, 1 + t_depth / 200))

    #    plt.savefig('/home/astro/phrvdf/NGTS_fitting/quickfit_plots/NOI_{}_lcfit.png'.format(objid))
    time1 = TIME()
    print("Time taken: {:.4f} s".format(time1 - time0))

    plt.show()
    if fit_t0:
        return p_best, f_best, p_fit, f_fit, e_fit, rp_best, a_best, inc_best, t0_best, t_dur, t_depth, res
    else:
        return p_best, f_best, p_fit, f_fit, e_fit, rp_best, a_best, inc_best, t_dur, t_depth, res
示例#19
0
prazneVrstice = 5

# imena datotek s podatki
file1 = 'fS2xantanP1T20.csv'
file2 = 'fS2xantanP1T30.csv'

# uvozimo podatke
omega1, G1_1, G2_1 = ft.podatki(file1, prazneVrstice)
omega2, G1_2, G2_2 = ft.podatki(file2, prazneVrstice)

# stevilo maxwellovih elementov
N1 = 6
N2 = 6

# nastavimo zacetne vrednosti in omejitve parametrov
par1 = Parameters()
for i in range(1,N1+1):
    gstr = 'g'+str(i)
    lstr = 'l'+str(i)
    par1.add(gstr,value=1,min=1.e-4)
    par1.add(lstr,value=1,min=1.e-4)

# minimizacija napake - non-linear least squares method
out1 = minimize(ft.maxwell,par1,args=(N1,omega1,G1_1,G2_1),method='leastsq')

# pridobimo parametre iz fitanja
g1, l1 = ft.koeficienti(par1,N1)

# nastavimo zacetne vrednosti in omejitve parametrov
par2 = Parameters()
for i in range(1,N2+1):
示例#20
0
def EMAFit(x, data, nSample, **kwargs):

    maxtry = 100
    if "maxtry" in kwargs:
        maxtry = kwargs["maxtry"]
    # initial guess of each parameter
    nW = len(x)
    ini_a = np.nanmax(data)
    maxxW = x[np.nanargmax(data)]

    # if maxxW <= x[0]:
    #     return [False,False]

    # Weights of each point
    DtWeights = data - data + 1
    # DtWeights[data > np.nanpercentile(data, 75.)] = 4.
    # DtWeights[data < np.nanpercentile(data, 25.)] = 4.

    HM = (np.max(data) - np.min(data)) / 2. + np.min(data)
    FWHM = np.absolute(x[np.nanargmin(np.absolute(data - HM))] -
                       maxxW) * 2. / 2.355
    minchisquare = -1
    #resx = np.absolute(x[1] - x[0])

    means = np.array([ini_a / 2., ini_a / 2. * 10., 50., 10., FWHM, FWHM / 5.])

    parnames = ['a', 'c', 'xa', 'xc', 'xsca', 'xscc', 'sigmaa', 'sigmac', 'b']

    if 'fixparam' in kwargs:
        fixpars = kwargs['fixparam']
        fixvals = kwargs['fixvalue']

    if 'consparam' in kwargs:
        conspars = kwargs['consparam']
        consvalsl = kwargs['consvaluel']
        consvalsu = kwargs['consvalueu']

    # DoSameSource = False
    # if 'sameSource' in kwargs:
    #     if kwargs['sameSource'] == True:
    #         DoSameSource = True

    PDom = False  #no precursor source
    SDom = False  #no primary source
    SameSource = False  #source location is the same for aerosols and precursors
    SameSigma = False

    if 'sameSource' in kwargs:
        if kwargs['sameSource'] == True:
            SameSource = True

    if 'sameSigma' in kwargs:
        if kwargs['sameSigma'] == True:
            SameSigma = True

    if 'pDom' in kwargs:
        if kwargs['pDom'] == True:
            PDom = True

    if 'sDom' in kwargs:
        if kwargs['sDom'] == True:
            SDom = True

    if 'solver' in kwargs:
        solver = kwargs['solver']
    else:
        solver = 'trust-constr'

    FoundSolution = False
    ntry = 0
    SampleDoubled = False

    while (FoundSolution == False) & (ntry < maxtry):

        ntry = ntry + 1
        SolutionUpdated = False

        design = lhs(6, samples=nSample)
        #as the parameters are supposed to be positive, we set the sigma of searching to be half of the initial guess
        stds = 0.5 * means

        for i in np.arange(6):
            design[:, i] = norm(loc=means[i], scale=stds[i]).ppf(design[:, i])

        #solution for one sets of initial guess
        for jfit in np.arange(nSample + 1):
            if jfit < nSample:
                fitpars = design[jfit, :]
            else:
                fitpars = means
            if np.min(fitpars) < 0:
                continue

            if solver == 'lmfit':

                params = Parameters()
                params.add('a', value=fitpars[0],
                           min=0.)  # x-direction integration of total burden
                params.add('xa', value=fitpars[2], min=1. / 3)
                params.add('xc', value=fitpars[3], min=1. / 3)
                # params.add('fc', value=inifc, min=0., max=1.)
                params.add('c', value=fitpars[1], min=0.)

                params.add('xsca', value=maxxW, min=np.min(x), max=np.max(x))
                params.add('xscc', value=maxxW, min=np.min(x), max=np.max(x))

                params.add('sigmaa',
                           value=fitpars[4],
                           min=0.5 / samplewd,
                           max=np.max([maxxW - np.min(x),
                                       np.max(x) - maxxW
                                       ]))  # standard deviation
                params.add('sigmac',
                           value=fitpars[5],
                           min=0.5 / samplewd,
                           max=np.max([maxxW - np.min(x),
                                       np.max(x) - maxxW
                                       ]))  # standard deviation
                params.add('b', value=np.min(data), min=0.,
                           max=np.max(data))  # background concentration

                out = minimize(residualEMA,
                               params,
                               args=(x, data),
                               iter_cb=EMACall)
                chisqr = np.sum(residualEMA(out.params, x, data, DtWeights)**2)

                if (minchisquare < 0) | (chisqr < minchisquare):
                    EMAout = out.params
                    minchisquare = chisqr

                if np.sqrt(minchisquare) / np.mean(data) < 0.01:
                    break

            elif solver == 'trust-constr':  # Do the scipy.optimize fit using "trust_constr" method

                global ukninds, kninds, knvals

                ukninds = np.arange(9).astype(int)
                kninds = np.array([]).astype(int)
                knvals = []

                x0 = np.zeros(9)
                x0[0:4] = fitpars[0:4]
                x0[6:8] = fitpars[4:6]
                x0[8] = np.min(data)
                x0[4] = maxxW  # , min = x[0], max = x[-1])
                x0[5] = maxxW  # , min = x[0], max = x[-1])

                lowbs = np.array(
                    [0., 0., 0., 0.,
                     np.min(x),
                     np.min(x), 0., 0., 0.])
                highbs = np.array([
                    np.inf, np.inf, np.inf, np.inf,
                    np.max(x),
                    np.max(x), np.inf, np.inf,
                    np.max(data)
                ])  #np.max([maxxW - np.min(x), np.max(x) - maxxW])

                if 'consparam' in kwargs:
                    for ipar in np.arange(len(consvalsl)):
                        lowbs[conspars[ipar]] = consvalsl[ipar]
                        highbs[conspars[ipar]] = consvalsu[ipar]
                        x0[conspars[ipar]] = np.mean(
                            [consvalsl[ipar], consvalsu[ipar]])

                # test if solutions are similar for different "fixsource" handling
                if PDom == True:
                    kninds = np.append(kninds,
                                       np.array([1, 3, 5, 7]).astype(int))
                    knvals = np.append(knvals, [0., 0., 0., 0.])

                    delinds = ~np.isin(ukninds, [1, 3, 5, 7])
                    ukninds = ukninds[delinds]
                    x0 = x0[delinds]

                if SDom == True:
                    kninds = np.append(kninds, np.array([0, 4, 6]).astype(int))
                    knvals = np.append(knvals, [0., 0., 0.])

                    delinds = ~np.isin(ukninds, [0, 4, 6])
                    ukninds = ukninds[delinds]
                    x0 = x0[delinds]

                if 'fixparam' in kwargs:
                    for ipar in np.arange(len(fixvals)):
                        if ~np.isin(fixpars[ipar], ukninds):
                            continue
                        kninds = np.append(kninds, np.int(fixpars[ipar]))
                        knvals = np.append(knvals, fixvals[ipar])
                        delinds = ~np.isin(ukninds, fixpars[ipar])
                        ukninds = ukninds[delinds]
                        x0 = x0[delinds]

                # process bounds to be consistent with x0 and ukninds...
                bounds = Bounds(lowbs[ukninds],
                                highbs[ukninds],
                                keep_feasible=True)
                lconstmat = [[]]
                lconstmin = []
                lconstmax = []

                if SameSource == True:

                    if ~np.isin(4, ukninds) & ~np.isin(5, ukninds):
                        lconstmat = np.append(lconstmat,
                                              [np.zeros(len(ukninds))],
                                              axis=0)

                        lconstmat[:, ukninds == 4] = 1
                        lconstmat[:, ukninds == 5] = 1
                        lconstmin = np.append(lconstmin, 0.)
                        lconstmax = np.append(lconstmax, 0.)

                if SameSigma == True:
                    if ~np.isin(6, ukninds) & ~np.isin(7, ukninds):
                        lconstmat = np.append(lconstmat,
                                              [np.zeros(len(ukninds))],
                                              axis=0)
                        lconstmat[:, ukninds == 6] = 1
                        lconstmat[:, ukninds == 7] = 1
                        lconstmin = np.append(lconstmin, 0.)
                        lconstmax = np.append(lconstmax, 0.)

                nlconstr = NonlinearConstraint(EMANLConstr1, -np.inf, 0.)

                if (SameSource == True) | (SameSigma == True):
                    lconstr = LinearConstraint(lconstmat, lconstmin, lconstmax)
                    constraints = [lconstr, nlconstr]
                else:
                    constraints = nlconstr

                try:
                    res = scipyminimize(EMAchisqr1, x0, args=(x, data, DtWeights), method='trust-constr', \
                                        constraints=constraints,options={'verbose': 0,'maxiter':50000}, bounds=bounds)  #constraints=constraints,
                    pars = res.x
                    Hess = nd.Hessian(EMAchisqr1)(pars, x, data, DtWeights)
                    chisqr = EMAchisqr1(pars, x, data, DtWeights)

                    try:
                        invHess = np.linalg.inv(Hess)
                    except:
                        invHess = np.linalg.pinv(Hess)
                    if np.any(np.isnan(invHess) == True):
                        invHess = np.linalg.pinv(Hess)

                except:
                    continue

                if np.any(np.isnan(invHess) == True):
                    continue

                sigmas = np.sqrt(invHess * chisqr / (data.size - pars.size))

                params = Parameters()

                for ikn in np.arange(len(kninds)):
                    params.add(parnames[kninds[ikn]],
                               value=knvals[ikn],
                               brute_step=0.)
                for iukn in np.arange(len(ukninds)):
                    params.add(parnames[ukninds[iukn]],
                               value=pars[iukn],
                               brute_step=sigmas[iukn, iukn])

                if (minchisquare < 0) | (chisqr < minchisquare):
                    SolutionUpdated = True

                    EMAout = params
                    minchisquare = chisqr
                    # update initial guess
                    means = np.array([EMAout['a'].value, EMAout['c'].value, EMAout['xa'].value, EMAout['xc'].value,\
                                      EMAout['sigmaa'].value, EMAout['sigmac'].value])

                if minchisquare < 0:
                    continue

                if np.sqrt(minchisquare) / np.mean(data) < 0.01:
                    FoundSolution = True
                    ntry = maxtry
                    break
            else:
                print(
                    "solver wrong! specify one of these: pyipm, trust-constr, lmfit !"
                )
                return [False, False]

        #print('EMA',ntry,jfit,nSample,chisqr,minchisquare)
        if (SolutionUpdated == False) & (SampleDoubled == False):
            nSample = np.int(nSample * 5)
            SampleDoubled = True
            continue

        if (SolutionUpdated == True) & (SampleDoubled == True):
            nSample = np.int(nSample / 5)
            SampleDoubled = False
            continue

        if (SolutionUpdated == False) & (SampleDoubled == True):
            nSample = np.int(nSample / 5)
            ntry = maxtry
            SampleDoubled = False
            continue
    if minchisquare < 0:
        return [False, False]
    else:
        return [EMAout, True]
示例#21
0
 def setUp(self):
     self.params = Parameters()
     self.params.add_many(('a', 1., True, None, None, None),
                          ('b', 2., True, None, None, None),
                          ('c', 3., True, None, None, '2. * a'))
示例#22
0
def EMGFit(x, data, nSample, **kwargs):

    maxtry = 100
    if "maxtry" in kwargs:
        maxtry = kwargs["maxtry"]
    parnames = ['a', 'x0', 'xsc', 'sigma', 'b']
    # initial guess of each parameter
    nW = len(x)
    ini_a = np.nanmax(data)

    #Weights of each point
    DtWeights = data - data + 1
    # DtWeights[data>np.nanpercentile(data,75.)]=4.
    # DtWeights[data < np.nanpercentile(data, 25.)] = 4.

    maxxW = x[np.nanargmax(data)]
    # print maxxW,x[0]
    # if maxxW <= x[0]:
    #     return [False,False]
    #resx=np.absolute(x[1]-x[0])

    HM = (np.max(data) - np.min(data)) / 2. + np.min(data)
    FWHM = np.absolute(x[np.nanargmin(np.absolute(data - HM))] -
                       maxxW) * 2. / 2.355

    minchisquare = -1
    means = np.array([ini_a, 50., FWHM])

    Gaussian = False

    GauDecay = False

    if 'GauDecay' in kwargs:
        if kwargs['GauDecay'] == True:
            GauDecay = True
            avgw10 = kwargs['minx0'] / 3600.

    if 'fixparam' in kwargs:
        fixpars = kwargs['fixparam']
        fixvals = kwargs['fixvalue']

    if 'solver' in kwargs:
        solver = kwargs['solver']
    else:
        solver = 'trust-constr'

    FoundSolution = False
    SampleDoubled = False
    ntry = 0

    while (FoundSolution == False) & (ntry < maxtry):

        ntry = ntry + 1
        SolutionUpdated = False
        design = lhs(3, samples=nSample)
        stds = 0.5 * means
        for i in np.arange(3):
            design[:, i] = norm(loc=means[i], scale=stds[i]).ppf(design[:, i])

        for jfit in np.arange(nSample + 1):

            if jfit < nSample:
                fitpars = design[jfit, :]
            else:
                fitpars = means

            if np.min(fitpars) < 0:
                continue

            if solver == 'lmfit':
                params = Parameters()
                params.add('a', value=fitpars[0],
                           min=0.)  # x-direction integration of total burden
                params.add('x0', value=fitpars[1], min=resx /
                           3)  # x-direction distance in one-lifetime

                params.add('xsc', value=maxxW, min=np.min(x), max=np.max(x))

                params.add('sigma',
                           value=fitpars[2],
                           min=0.5,
                           max=np.max([maxxW - np.min(x),
                                       np.max(x) - maxxW
                                       ]))  # # standard deviation
                params.add('b', value=np.min(data), min=0.,
                           max=np.max(data))  # background concentration

                out = minimize(residualEMG,
                               params,
                               args=(x, data),
                               iter_cb=EMGCall)

                chisqr = np.sum(residualEMG(out.params, x, data)**2)

                if (minchisquare < 0) | (chisqr < minchisquare):
                    EMGout = out.params
                    minchisquare = chisqr

                if np.sqrt(minchisquare) / np.mean(data) < 0.01:
                    break

            elif solver == 'trust-constr':  # Do the scipy.optimize fit using "trust_constr" method

                global uknindsg, knindsg, knvalsg

                uknindsg = np.arange(5).astype(int)
                knindsg = np.array([]).astype(int)
                knvalsg = []

                x0 = np.zeros(5)
                x0[0:2] = fitpars[0:2]
                x0[3] = fitpars[2]
                x0[2] = maxxW  # , min = x[0], max = x[-1])
                x0[4] = np.min(data)

                lbds = np.array([0., 0., np.min(x), 0., 0.])
                hbds = np.array([
                    np.inf, np.inf,
                    np.max(x), np.inf,
                    np.max(data)
                ])  #np.max([maxxW - np.min(x), np.max(x) - maxxW])

                # test if solutions are similar for different "fixsource" handling

                if 'fixparam' in kwargs:
                    for ipar in np.arange(len(fixvals)):

                        if (fixpars[ipar]
                                == 1) & (np.absolute(fixvals[ipar]) <= 1.e-20):
                            Gaussian = True

                        if ~np.isin(fixpars[ipar], uknindsg):
                            continue
                        knindsg = np.append(knindsg, np.int(fixpars[ipar]))
                        knvalsg = np.append(knvalsg, fixvals[ipar])
                        delinds = ~np.isin(uknindsg, fixpars[ipar])
                        uknindsg = uknindsg[delinds]
                        x0 = x0[delinds]

                        # process bounds to be consistent with x0 and ukninds...

                bounds = Bounds(lbds[uknindsg],
                                hbds[uknindsg],
                                keep_feasible=True)

                nlconstr = NonlinearConstraint(EMGNLConstr1, -np.inf, 100.)

                try:

                    if Gaussian == True:
                        res = scipyminimize(EMGchisqr2, x0, args=(x, data, DtWeights), method='trust-constr', \
                                            options={'verbose': 0 ,'maxiter':50000}, \
                                            bounds=bounds)  # constraints=[lconstr, nlconstr],
                        Hess = nd.Hessian(EMGchisqr2)(res.x, x, data)

                        pars = res.x

                        chisqr = EMGchisqr2(pars, x, data)
                    else:

                        res = scipyminimize(EMGchisqr1, x0, args=(x, data, DtWeights), method='trust-constr', \
                                            constraints=[nlconstr],options={'verbose': 0,'maxiter':50000}, \
                                            bounds=bounds)  # constraints=[nlconstr],
                        Hess = nd.Hessian(EMGchisqr1)(res.x, x, data,
                                                      DtWeights)

                        pars = res.x

                        chisqr = EMGchisqr1(pars, x, data, DtWeights)

                    try:
                        invHess = np.linalg.inv(Hess)
                    except:
                        invHess = np.linalg.pinv(Hess)
                    if np.any(np.isnan(invHess) == True):
                        invHess = np.linalg.pinv(Hess)

                except:
                    continue

                if np.any(np.isnan(invHess) == True):
                    continue

                sigmas = np.sqrt(invHess * chisqr / (data.size - pars.size))

                params = Parameters()
                for ikn in np.arange(len(knindsg)):
                    params.add(parnames[knindsg[ikn]],
                               value=knvalsg[ikn],
                               brute_step=0.)
                for iukn in np.arange(len(uknindsg)):
                    params.add(parnames[uknindsg[iukn]],
                               value=pars[iukn],
                               brute_step=sigmas[iukn, iukn])

                if (minchisquare < 0) | (chisqr < minchisquare):
                    SolutionUpdated = True

                    EMGout = params
                    minchisquare = chisqr
                    # update initial guess
                    means = np.array([
                        EMGout['a'].value, EMGout['x0'].value,
                        EMGout['sigma'].value
                    ])

                if np.sqrt(minchisquare) / np.mean(data) < 0.01:
                    FoundSolution = True
                    ntry = maxtry
                    break

            else:

                print(
                    "solver wrong! specify one of these: pyipm, trust-constr, lmfit !"
                )
                return [False, False]

        if (SolutionUpdated == False) & (SampleDoubled == False):

            nSample = np.int(nSample * 5)
            SampleDoubled = True
            continue

        if (SolutionUpdated == True) & (SampleDoubled == True):
            nSample = np.int(nSample / 5)
            SampleDoubled = False
            continue

        if (SolutionUpdated == False) & (SampleDoubled == True):
            nSample = np.int(nSample / 5)
            ntry = maxtry
            SampleDoubled = False
            continue

    if minchisquare < 0:
        return [False, False]
    else:

        return [EMGout, True]
示例#23
0
len_times = len(str(int(times.mean())))

# Check if `init_t0` is in JD or MJD
if len_init_t0 == 7 and len_times != 7:
    if len_times == 5:
        init_t0 = init_t0 - 2400000.5
    elif len_times == 4:
        init_t0 = init_t0 - 2450000.5
    else:
        raise ValueError('The `init_t0` is {} and `times.mean()` is {}'.format(int(init_t0), int(times.mean())))

# Check if `init_t0` is in MJD or Simplified-MJD
if len(str(int(init_t0))) > len(str(int(times.mean()))): init_t0 = init_t0 - 50000

print('Initializing Parameters')
initialParams = Parameters()
initialParams.add_many(
    ('period'   , init_period, False),
    ('deltaTc'  , -0.00005    , True  ,-0.05, 0.05),
    ('deltaEc'  , 0.00005     , True  ,-0.05, 0.05),
    ('inc'      , init_inc   , False, 80.0, 90.),
    ('aprs'     , init_aprs  , False, 0.0, 100.),
    ('tdepth'   , init_tdepth, True, 0.0, 0.3 ),
    ('edepth'   , init_fpfs  , True, 0.0, 0.1),
    ('ecc'      , init_ecc   , False, 0.0, 1.0 ),
    ('omega'    , init_omega , False, -180, 180 ),
    ('u1'       , init_u1    , True , 0.0, 1.0 ),
    ('u2'       , init_u2    , True,  0.0, 1.0 ),
    ('tCenter'  , init_t0    , False),
    ('intcept0' , 1.0        , False),
    ('slope0'   , 0          , False),
示例#24
0
def EMG2DFit(x, y, data, x01h, samplewd, nSample, **kwargs):

    #data is 2-D distribution of columns (e.g. AOD, SO2 mol/m2)
    # initial guess of each parameter
    #x,y always in the grid resolution (dxy==1)
    #7 parameters to fit, similar in two different parameterizations
    #samplewd: resolution, i.e. how many km represented by 1 grid

    npar = 7  #(a,x0,ux,uy,sigma,eta,b)

    GauDecay = False
    if 'GauDecay' in kwargs:
        if kwargs['GauDecay'] == True:
            GauDecay = True

    if 'fixparam' in kwargs:
        fixpars = kwargs['fixparam']
        fixvals = kwargs['fixvalue']

    # if GauDecay == False:
    #     eta=kwargs['eta']

    if 'solver' in kwargs:
        solver = kwargs['solver']
    else:
        solver = 'trust-constr'

    #total emission (kg)
    ini_a = np.sum(data)  #dxy==1
    #location of maxima
    maxxW = x[np.nanargmax(data)]
    maxyW = y[np.nanargmax(data)]

    if np.array(maxxW).size > 1:
        maxxW = maxxW[0]
        maxyW = maxyW[0]
    #take the across-wind data at maxima to estimate sigma

    ydata = data[x == maxxW]
    peaky = y[x == maxxW]
    HM = (np.max(ydata) - np.min(ydata)) / 2. + np.min(ydata)
    FWHM = np.absolute(peaky[np.nanargmin(np.absolute(ydata - HM))] -
                       maxyW) * 2. / 2.355

    minchisquare = -1

    #latin hypercube construction of initials
    design = lhs(3, samples=nSample)
    if GauDecay == False:
        means = np.array([ini_a, x01h, FWHM])
    else:
        means = np.array([ini_a / x01h, x01h, FWHM])

    stds = nSample * means

    for i in np.arange(3):
        design[:, i] = norm(loc=means[i], scale=stds[i]).ppf(design[:, i])

    for jfit in np.arange(nSample + 1):

        if jfit < nSample:
            fitpars = design[jfit, :]
        else:
            fitpars = means

        if np.min(fitpars) < 0:
            continue
        try:
            if solver == 'trust-constr':  # Do the scipy.optimize fit using "trust_constr" method

                # initials and bounds
                x0 = np.zeros(npar)  # (a,x0,ux,uy,sigma,eta,b)
                x0[0:2] = fitpars[0:2]
                x0[2] = maxxW
                x0[3] = maxyW
                x0[4] = fitpars[2]
                x0[5] = np.min(data)
                x0[6] = 1.5 * samplewd

                bounds = Bounds([0., 1. / 3, np.min(x), np.min(y), 0.5, 0.,0.], \
                                [np.inf, np.inf, np.max(x), np.max(y), np.inf, np.max(data),np.inf])

                if 'fixparam' in kwargs:
                    for ipar in np.arange(len(fixvals)):
                        bounds.lb[fixpars[ipar]] = fixvals[ipar]
                        bounds.ub[fixpars[ipar]] = fixvals[ipar]
                        x0[fixpars[ipar]] = fixvals[ipar]

                if GauDecay == False:
                    # EMG Fitting

                    nlconstr = NonlinearConstraint(EMG2DConstr, -np.inf, 20.)
                    res = scipyminimize(EMG2Dchisqr, x0, args=(x, y, data), method='trust-constr', \
                                        constraints=[nlconstr], options={'verbose': 0,'maxiter':50000}, \
                                        bounds=bounds)  # constraints=[lconstr, nlconstr],
                    pars = res.x
                    Hess = nd.Hessian(EMG2Dchisqr)(pars, x, y, data)

                    chisqr = EMG2Dchisqr(pars, x, y, data)

                elif GauDecay == True:
                    # 2-D gaussian decay fitting
                    res = scipyminimize(GauDecay2Dchisqr, x0, args=(x, y, data), method='trust-constr', \
                                        options={'verbose': 0,'maxiter':50000}, bounds=bounds)  # constraints=[lconstr, nlconstr],
                    pars = res.x
                    Hess = nd.Hessian(GauDecay2Dchisqr)(pars, x, y, data)

                    chisqr = GauDecay2Dchisqr(pars, x, y, data)
                else:
                    print(
                        "Something went wrong, the type of fitting is not determined!"
                    )
                    return [False, False]

                sigmas = np.sqrt(
                    np.linalg.inv(Hess) * chisqr / (data.size - pars.size))

                params = Parameters()
                params.add('x0', value=pars[1], brute_step=sigmas[1, 1])
                params.add('ux', value=pars[2], brute_step=sigmas[2, 2])
                params.add('uy', value=pars[3], brute_step=sigmas[3, 3])
                params.add('sigma', value=pars[4], brute_step=sigmas[4, 4])
                if GauDecay == True:
                    params.add('Qu', value=pars[0], brute_step=sigmas[
                        0, 0])  # x-direction integration of total burden
                else:
                    params.add('a', value=pars[0], brute_step=sigmas[
                        0, 0])  # x-direction integration of total burden
                    params.add('eta', value=pars[6], brute_step=sigmas[
                        6, 6])  # x-direction integration of total burden
                params.add('b', value=pars[5], brute_step=sigmas[5, 5])

                if (minchisquare < 0) | (chisqr < minchisquare):
                    EMGout = params
                    minchisquare = chisqr

                if np.sqrt(minchisquare) / np.mean(data) < 0.01:
                    break

            else:

                print(
                    "solver wrong! specify one of these: pyipm, trust-constr, lmfit !"
                )
                return [False, False]

        except:
            print("Unsuccessful Fitting!")
            continue

    if minchisquare < 0:
        return [False, False]
    else:

        return [EMGout, True]
示例#25
0
    offset = pars['line_off'].value
    model = yg + offset + x * slope
    if data is None:
        return model
    if sigma is None:
        return (model - data)

    return (model - data) / sigma


n = 201
xmin = 0.
xmax = 20.0
x = linspace(xmin, xmax, n)

p_true = Parameters()
p_true.add('amp_g', value=21.0)
p_true.add('cen_g', value=8.1)
p_true.add('wid_g', value=1.6)
p_true.add('line_off', value=-1.023)
p_true.add('line_slope', value=0.62)

data = (gaussian(x, p_true['amp_g'].value, p_true['cen_g'].value,
                 p_true['wid_g'].value) + random.normal(scale=0.23, size=n) +
        x * p_true['line_slope'].value + p_true['line_off'].value)

if HASPYLAB:
    pylab.plot(x, data, 'r+')

p_fit = Parameters()
p_fit.add('amp_g', value=10.0)
def initialize_Objective_Function_PUF_Does_Flips(initial_mutation_penalties, Temperature, mutation_range_high, mutation_range_low):
    # Function to initialize the model parameters and package them in a Parameters object for lmfit. Note that parameters are initialized
    # and fit as exp(-ddGparam/kT) because there is no reason to have to make the additional computation to compute the partition function from
    # the ddG values during fitting.
    # Inputs:
    # initial_mutational_penalties--provided as ddG values in kcal/mol
    # inital_flip_params--provided as ddG values in kcal/mol
    # mutation_range_high--upperbounds on the amount a base penalty can change from the initial value in kcal/mol
    # mutation_range_low--lowerbounds on the amount a base penalty can change from the inital value in kcal/mol
    # Temperature--Temperature (in degrees Celsius) that the experimental ddGs were collected at
    # Outputs:
    # params--Parameters object for lmfit
    # param_names--Names of the parameters to be fit

    #########################################################################
    # Define the ddG conversion factor (-kT)*2.30258509299 (the 2.3025 factor converts from ln to log10)
    ddG_conversion_factor = -(Temperature+273.15)*0.0019872041*2.30258509299

    #########################################################################
    # Define fit parameters and intialize as relative Ka effects (Karel = kd,WT/kd,register = exp(-ddG/RT))--Using relative Ka's allows parameters for a register to be multiplied and then go straight into the sum of the partition function
    fitParameters = pd.DataFrame(index=['upperbound', 'initial', 'lowerbound'],
                                 columns=['oneA', 'oneC', 'oneT', 'oneG', 'twoA', 'twoC', 'twoT', 'twoG', 
                                          'threeA', 'threeC', 'threeT', 'threeG', 'fourA', 'fourC', 'fourT', 'fourG', 
                                          'fiveA', 'fiveC', 'fiveT', 'fiveG', 'sixA', 'sixC', 'sixT', 'sixG', 
                                          'sevenA', 'sevenC', 'sevenT', 'sevenG', 'eightA', 'eightC', 'eightT', 'eightG',
                                          'nineA', 'nineC', 'nineT', 'nineG'])
        
    #########################################################################
    # Initialize mutational penalties
    # Compute upper and lower bounds for positional penalties
    initial_mutation_penalties_low = 10**((initial_mutation_penalties-mutation_range_low)/(ddG_conversion_factor))
    initial_mutation_penalties_high = 10**((initial_mutation_penalties+mutation_range_high)/(ddG_conversion_factor))
    initial_mutation_penalties = 10**(initial_mutation_penalties/(ddG_conversion_factor))

    k=0
    for i in ['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']:
        for j in ['A', 'C', 'G', 'T']:
            fitParameters.loc[:, i+j] = [initial_mutation_penalties_low[k],initial_mutation_penalties[k],initial_mutation_penalties_high[k]]
            k = k+1

    #########################################################################
    # Define consensus bases
    consensus_bases = ['oneT', 'twoG', 'threeT', 'fourA', 'fiveT', 'sixA', 'sevenT', 'eightA', 'nineT']

    #########################################################################
    # Define the names of the fit parameters.
    param_names = fitParameters.columns.tolist()
    
    #########################################################################
    # Store initial fit parameters in Parameters object for fitting with lmfit.
    params = Parameters()
    for param in param_names:
        if param in consensus_bases:
            params.add(param, value=fitParameters.loc['initial', param],
                   min = fitParameters.loc['lowerbound', param],
                   max = fitParameters.loc['upperbound', param], vary = False)
        else:
            params.add(param, value=fitParameters.loc['initial', param],
                   min = fitParameters.loc['lowerbound', param],
                   max = fitParameters.loc['upperbound', param])
        
    return params, param_names
示例#27
0
def simple_flux_from_greybody(lambdavector,
                              Trf=None,
                              b=None,
                              Lrf=None,
                              zin=None,
                              ngal=None):
    '''
	Return flux densities at any wavelength of interest (in the range 1-10000 micron),
	assuming a galaxy (at given redshift) graybody spectral energy distribution (SED),
	with a power law replacing the Wien part of the spectrum to account for the
	variability of dust temperatures within the galaxy. The two different functional
	forms are stitched together by imposing that the two functions and their first
	derivatives coincide. The code contains the nitty-gritty details explicitly.

	Inputs:
	alphain = spectral index of the power law replacing the Wien part of the spectrum, to account for the variability of dust temperatures within a galaxy [default = 2; see Blain 1999 and Blain et al. 2003]
	betain = spectral index of the emissivity law for the graybody [default = 2; see Hildebrand 1985]
	Trf = rest-frame temperature [in K; default = 20K]
	Lrf = rest-frame FIR bolometric luminosity [in L_sun; default = 10^10]
	zin = galaxy redshift [default = 0.001]
	lambdavector = array of wavelengths of interest [in microns; default = (24, 70, 160, 250, 350, 500)];

	AUTHOR:
	Lorenzo Moncelsi [[email protected]]

	HISTORY:
	20June2012: created in IDL
	November2015: converted to Python
	'''

    nwv = len(lambdavector)
    nuvector = c * 1.e6 / lambdavector  # Hz

    nsed = 1e4
    lambda_mod = loggen(1e3, 8.0, nsed)  # microns
    nu_mod = c * 1.e6 / lambda_mod  # Hz

    #Lorenzo's version had: H0=70.5, Omega_M=0.274, Omega_L=0.726 (Hinshaw et al. 2009)
    #cosmo = Planck15#(H0 = 70.5 * u.km / u.s / u.Mpc, Om0 = 0.273)
    conversion = 4.0 * np.pi * (
        1.0E-13 * cosmo.luminosity_distance(zin) * 3.08568025E22
    )**2.0 / L_sun  # 4 * pi * D_L^2    units are L_sun/(Jy x Hz)

    Lir = Lrf / conversion  # Jy x Hz

    Ain = np.zeros(ngal) + 1.0e-36  #good starting parameter
    betain = np.zeros(ngal) + b
    alphain = np.zeros(ngal) + 2.0

    fit_params = Parameters()
    fit_params.add('Ain', value=Ain)
    #fit_params.add('Tin', value= Trf/(1.+zin), vary = False)
    #fit_params.add('betain', value= b, vary = False)
    #fit_params.add('alphain', value= alphain, vary = False)

    #pdb.set_trace()
    #THE LM FIT IS HERE
    #Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,ngal))
    Pfin = minimize(sedint,
                    fit_params,
                    args=(nu_mod, Lir.value, ngal, Trf / (1. + zin), b,
                          alphain))

    #pdb.set_trace()
    flux_mJy = sed(Pfin.params, nuvector, ngal, Trf / (1. + zin), b, alphain)

    return flux_mJy
示例#28
0
def i_sense_fit_simultaneous(x, z, centers, widths, x0bounds, constrain = None, span = None):
    """ fit multiple sensor current data simultaneously
        with the option to force one or more parameters to the same value across all 
        datasets """
        
    def i_sense_dataset(params, i, xx):
        # x0, theta, i0, i1, i2
        
        x0 = params['x0_{0:d}'.format(i)]
        theta = params['theta_{0:d}'.format(i)]
        i0 = params['i0_{0:d}'.format(i)]
        i1 = params['i1_{0:d}'.format(i)]
        i2 = params['i2_{0:d}'.format(i)]
        
        return i_sense(xx, x0, theta, i0, i1, i2)
    
    def i_sense_objective(params, xx, zz, idx0, idx1):
        """ calculate total residual for fits to several data sets held
            in a 2-D array"""
        
        n,m = zz.shape
        resid = []
        # make residual per data set
        for i in range(n):
            resid.append(zz[i,idx0[i]:idx1[i]] - i_sense_dataset(params, i, xx[i,idx0[i]:idx1[i]]))
        # now flatten this to a 1D array, as minimize() needs
        return np.concatenate(resid)
    
    # get the dimensions of z
    if(z.ndim==1):
        m = len(z)
        n = 1
        z.shape = (n,m)
    elif(z.ndim==2):
        n,m = z.shape
    else:
        raise ValueError('the shape of zarray is wrong')
    
    # deal with the shape of x
    # should have a number of rows = 1 or number of rows = len(z)
    
    if(x.ndim==1 or x.shape[0]==1):
        x = np.tile(x, (n,1))
    elif(x.shape[0]==n):
        pass
    else:
        raise ValueError('the shape of xarray is wrong')
        
    if(span):
        icenters = np.nanargmin(np.abs(x.transpose()-centers), axis=0)
        ilow = np.nanargmin(np.abs(x.transpose()-(centers-span)), axis=0)
        ihigh = np.nanargmin(np.abs(x.transpose()-(centers+span)), axis=0)
    else:
        ilow = np.zeros(n, dtype=np.int)
        ihigh = -1*np.ones(n, dtype=np.int)
    
    columns = ['x0', 'theta', 'i0', 'i1', 'i2']
    df = pd.DataFrame(columns=columns)
    
    # add constraints specified in the 'constrain' list
    if(constrain):
        
        # create parameters, one per data set
        fit_params = Parameters()

        for i in range(n):
            fit_params.add('x0_{0:d}'.format(i), value=centers[i], min=x0bounds[0], max=x0bounds[1])
            fit_params.add('theta_{0:d}'.format(i), value=widths[i], min=0.05, max=10.0)
            fit_params.add('i0_{0:d}'.format(i), 
                            value=abs(z[i,ilow[i]:ihigh[i]].max()-z[i,ilow[i]:ihigh[i]].min()),
                            min=0.001, max=10.0)
            fit_params.add('i1_{0:d}'.format(i), value=0.1, min=0.0, max=10.0)
            fit_params.add('i2_{0:d}'.format(i), value=z[i,ilow[i]:ihigh[i]].mean(), min=0.0, max=20.0)

        for p in constrain:
            for i in range(1,n):
                fit_params['{0}_{1:d}'.format(p,i)].expr = '{0}_{1:d}'.format(p,0)

        # run the global fit to all the data sets
        m = minimize(i_sense_objective, fit_params, args=(x, z, ilow, ihigh))
    
        valdict = m.params.valuesdict()
        for i in range(n):
            df.loc[i] = [valdict['{0}_{1:d}'.format(c, i)] for c in columns]
    else:
        # no parameters need to be fixed between data sets
        # fit them all separately (much faster)
        for i in range(n):
            p0 = [centers[i], widths[i], abs(z[i,ilow[i]:ihigh[i]].max()-z[i,ilow[i]:ihigh[i]].min()),
                      0.1, z[i,ilow[i]:ihigh[i]].mean()]
            bounds = [(x0bounds[0], 0.05, 0.001, 0.0, 0.0), (x0bounds[1], 10.0, 10.0, 10.0, 20.0)]
            df.loc[i], _ = curve_fit(i_sense, x[i,ilow[i]:ihigh[i]], z[i,ilow[i]:ihigh[i]], p0=p0, bounds=bounds)
                          
    return df
示例#29
0
	    def fit_lm(self):
	        # use Levenberg Mardquart method


	         # define objective function: returns the array to be minimized
	        def fcn2min(params, x, y, yerr):

	            n = len(x)
	            model = np.zeros(n,dtype=ctypes.c_double)
	            model = np.require(model,dtype=ctypes.c_double,requirements='C')

	            occultquadC( x,params['RpRs'].value,params['aRs'].value, params['period'].value, params['inc'].value,
	                        params['gamma1'].value, params['gamma2'].value, params['ecc'].value, params['omega'].value,
	                        params['tmid'].value, n, model )

	            model *= (params['a0'] + x*params['a1'] + x*x*params['a2'])

	            return (model - y)/yerr

	        #Rp,aR,P,i,u1,u2,e,omega,tmid,a0,a1,a2 = self.p_init
	        v = [ (i[0] != i[1]) for i in self.bounds ] # boolean array to vary parameters
	        pnames = ['RpRs','aRs','period','inc','gamma1','gamma2','ecc','omega','tmid','a0','a1','a2']
	        params = Parameters()

	        for j in range(len(self.p_init)):

	            # algorithm does not like distance between min and max to be zero
	            if v[j] == True:
	                params.add(pnames[j], value= self.p_init[j], vary=v[j], min=self.bounds[j][0], max=self.bounds[j][1] )
	            else:
	                if (self.bounds[j][0] == None):

	                    if (self.bounds[j][1] == None): # no upper bound
	                        params.add(pnames[j], value= self.p_init[j], vary=True )
	                    else: # upper bound
	                        params.add(pnames[j], value= self.p_init[j], vary=True,max = self.bounds[j][1] )

	                elif (self.bounds[j][1] == None):

	                    if (self.bounds[j][0] == None): # no lower bound
	                        params.add(pnames[j], value= self.p_init[j], vary=True )
	                    else: # lower bound
	                        params.add(pnames[j], value= self.p_init[j], vary=True,min = self.bounds[j][0] )

	                else:
	                    params.add(pnames[j], value= self.p_init[j], vary=v[j] )


	         # do fit, here with leastsq model
	        result = lminimize(fcn2min, params, args=(self.t,self.y,self.yerr))


	        params = result.params
	        n = len(self.t)
	        model = np.zeros(n,dtype=ctypes.c_double)
	        model = np.require(model,dtype=ctypes.c_double,requirements='C')
	        occultquadC( self.t,params['RpRs'].value,params['aRs'].value, params['period'].value, params['inc'].value,
	                    params['gamma1'].value, params['gamma2'].value, params['ecc'].value, params['omega'].value,
	                    params['tmid'].value, n, model )

	        self.final_model = model
	        self.residuals = result.residual
	        self.params = result.params
	        self.result = result


	        A0 = params['a0'].value
	        A1 = params['a1'].value
	        A2 = params['a2'].value
	        self.amcurve = A0 + self.t*A1 + self.t*self.t*A2
	        self.final_curve = self.final_model/self.amcurve
	        self.phase = (self.t-params['tmid'].value)/params['period']
示例#30
0
def gauss_fit(pnr, min_peak_sep, threshold=None, weighted=False, plot=False):
    """
    improve the precision in the location of the peaks by fitting them
    using a sum of Gaussian distributions
    NOTE: unlike gauss_fit_interp, it does not assume a Poissonian distribution
    :param pnr: 2D histogram, output of np.histogram, assumes it's a sum of gaussians
    :param min_peak_sep: Minimum distance between each detected peak.
    :param threshold: Normalized threshold, float between [0., 1.]
    :param weighted: if True, it associate a poissonian error to the
        frequencies
    """

    # unpack the histogram into x and y values
    frequencies = pnr[0]

    # match the number of bins to the frequencies, find the step size
    x_val = pnr[1]
    step = np.diff(x_val)[0]
    x_val = x_val[:-1] + step / 2.

    # find a first approximation of the peak location using local differences
    peaks_pos, peak_height = peaks(pnr, min_peak_sep, threshold)

    # build a fitting model with a number of gaussian distributions matching
    # the number of peaks
    fit_model = np.sum([
        GaussianModel(prefix='g{}_'.format(k + 1))
        for k, _ in enumerate(peaks_pos)
    ])

    # Generate the initial conditions for the fit
    p = Parameters()

    p.add('A', np.max(peak_height) * min_peak_sep)
    p.add('g1_sigma', min_peak_sep / 5, min=0)
    p.add('sigma_p', min_peak_sep / np.sqrt(2) / np.pi, min=0)

    # Centers
    p.add('g1_center', peaks_pos[0], min=0)
    p.add('g2_center', peaks_pos[1], min=0)
    [
        p.add(
            'g{}_center'.format(k + 3),
            j,
            # expr='g{}_center + Delta_E'.format(k + 1)
        ) for k, j in enumerate(peaks_pos[2:])
    ]

    # amplitudes
    [
        p.add(
            'g{}_amplitude'.format(k + 1),
            j * min_peak_sep / np.sqrt(2),
            # expr='A * exp(-n_bar) * n_bar**{} / factorial({})'.format(k, k),
            min=0) for k, j in enumerate(peak_height)
    ]

    # fixed width
    [
        p.add(
            'g{}_sigma'.format(k + 2),
            min_peak_sep / np.sqrt(2) / np.pi,
            min=0,
            # expr='sigma_p * sqrt({})'.format(k + 1)
            # expr='sigma_p'
        ) for k, _ in enumerate(peak_height[1:])
    ]

    if weighted:
        # generates the poissonian errors, correcting for zero values
        err = np.sqrt(frequencies)
        err[frequencies == 0] = 1

        result = fit_model.fit(frequencies,
                               x=x_val,
                               params=p,
                               weights=1 / err,
                               method='powell')
    else:
        result = fit_model.fit(frequencies, x=x_val, params=p)

    if plot:
        plt.figure(figsize=(10, 5))
        plt.errorbar(pnr[1][:-1] + step / 2,
                     pnr[0],
                     yerr=np.sqrt(pnr[0]),
                     linestyle='',
                     ecolor='black',
                     color='black')
        plt.plot(x_val, result.eval(x=x_val))
        [
            plt.scatter(x_val,
                        result.eval_components(x=x_val)['g{}_'.format(k + 1)],
                        marker='.') for k, _ in enumerate(result.components)
        ]
        # plt.axvline(th01,color='black', label='th01')
        plt.legend()
        print result.fit_report()
    return result