示例#1
0
def make_gauss(folder):

    df = pd.read_csv(folder + "data.csv")
    print(df)
    # print(df['20.0'])
    df.drop(["Unnamed: 0", "t"], axis=1, inplace=True)

    # print(df)

    xs = np.array([float(x) for x in df.columns])
    ys = np.array([sum(df[x]) for x in df.columns])

    def f(x, sigma, amp, mu, base):
        return base + amp * np.exp(-((x - mu) / sigma)**2 / 2)

    gmodel = models.Model(f)
    # params = gmodel.guess(ys, x=xs)
    params = Parameters()
    params.add_many(('sigma', 5, True, 1), ('amp', 1000, True, 1),
                    ('mu', 35, True, 1), ('base', 200, True, 1))
    fit_result = gmodel.fit(ys, params=params, x=xs)
    print(folder)
    print(fit_result.fit_report())
    print()

    ax.plot(xs, ys, label=folder[:-1])
    ax.plot(xs, fit_result.eval(), label=folder[:-1] + 'fit')
示例#2
0
    def _setup_params_from(self, initial_guess, data):  # 5 ms
        params = Parameters()
        x, y, z = self._make_center_priors(data, initial_guess)  # 4.47 ms
        min_index = data.medium_index * 1.001
        n = Parameter(name='n',
                      value=initial_guess['n'],
                      min=min_index,
                      max=2.33)
        r = Parameter(name='r', value=initial_guess['r'], min=0.05, max=5)
        params.add_many(x, y, z, n, r)
        if self.theory == 'mieonly':
            alpha_val = self._alpha_guess(initial_guess)
            params.add(name='alpha', value=alpha_val, min=0.05, max=1.0)
        elif self.theory == 'mielens':
            angle_val = self._lens_guess(initial_guess)
            params.add(name='lens_angle', value=angle_val, min=0.05, max=1.1)
        if self.theory == 'mielensalpha':
            alpha_val = self._alpha_guess(initial_guess)
            angle_val = self._lens_guess(initial_guess)
            params.add(name='alpha', value=alpha_val, min=0.05, max=1.0)
            params.add(name='lens_angle', value=angle_val, min=0.05, max=1.1)

        if 'illum_wavelen' in initial_guess:
            wavelength = initial_guess['illum_wavelen']
            params.add(name='illum_wavelen',
                       value=wavelength,
                       min=.1,
                       max=2.000)
        return params
示例#3
0
def calc_A_unbinned(data, bg_params, sig_params):
    '''Given input data and the true distribution parameters, calculate the 95% UL for the unbinned
    data.  The bg and signal parameters are held fixed.  The best-fit A value is determined first,
    then the 95% UL is determined by scanning for the correct value of A that leads to a p-value of
    0.05.  This procedure must be run many times and averaged to get the mean UL value and error
    bands.'''

    mu    = sig_params[0]
    sigma = sig_params[1]
    alpha = bg_params[0]
    beta  = bg_params[1]
    gamma = bg_params[2]

    params = Parameters()
    params.add_many(
        ('C'     , 0.01  , True  , 0    , 1    , None , None) ,
        ('mu'    , mu    , False , None , None , None , None) ,
        ('sigma' , sigma , False , None , None , None , None) ,
        ('alpha' , alpha , False , None , None , None , None) ,
        ('beta'  , beta  , False , None , None , None , None) ,
        ('gamma' , gamma , False , None , None , None , None)
    )

    bg_sig_model = Model(bg_sig_pdf, params)

    # Obtain the best fit value for A
    mle_fitter = NLLFitter(bg_sig_model)
    mle_res = mle_fitter.fit(np.asarray(data), calculate_corr=False,
                             verbose=False)

    return mle_res.x[0]
示例#4
0
def fitz_star(spec, zguess, fluxpoly=True):
   
   flux_median = np.median(spec['flux'])
   parameters = Parameters()
   #                    (Name,      Value,            Vary,   Min,  Max,   Expr)
   parameters.add_many(('z',        zguess,           True,  None,  None,  None),
                       ('eigen1',   flux_median*0.1,  True,  None,  None,  None),
                       ('eigen2',   flux_median*0.1,  True,  None,  None,  None),
                       ('eigen3',   flux_median*0.1,  True,  None,  None,  None),
                       ('eigen4',   flux_median*0.1,  True,  None,  None,  None),
                       ('eigen5',   flux_median*0.1,  True,  None,  None,  None),
                       ('eigen6',   flux_median*0.1,  True,  None,  None,  None),
                       ('eigen7',   flux_median*0.1,  True,  None,  None,  None),
                       ('eigen8',   flux_median*0.1,  True,  None,  None,  None),
                       ('eigen9',   flux_median*0.1,  True,  None,  None,  None),
                       ('eigen10',   flux_median*0.1,  True,  None,  None,  None),
                       ('eigen11',   flux_median*0.1,  True,  None,  None,  None),
                       ('fluxcal0', 0.0,              fluxpoly,  None,  None,  None),
                       ('fluxcal1', 0.0,              fluxpoly,  None,  None,  None),
                       ('fluxcal2', 0.0,              fluxpoly,  None,  None,  None))
   
   star_model = Model(eigensum_star, missing='drop')
   result = star_model.fit(spec['flux'], wave=spec['wave'], weights=1/spec['error'],
                             params=parameters, missing='drop')
   return result
示例#5
0
文件: DLfit.py 项目: Radioteddy/MerPy
def params_creating(filename, w):
    """
    Returns Parameters object with initial guesses

    Parameters:
    ----------
    filename: str
        Name of file. May be various text formats, data structure example is in file init_guess.init
    w:  1darray(dtype=float) 
        array with experminetal frequencies

    Return:
    --------
    parameter: Parameters()
    """
    parameter = Parameters()
    e0, gamma, A = read_from_file(filename, w)
    n = len(e0)
    for i in range(n):
        str1 = 'E0_' + str(i + 1)
        str2 = 'gamma_' + str(i + 1)
        str3 = 'A_' + str(i + 1)
        parameter.add_many((str1, e0[i, 0], True, e0[i, 1], e0[i, 2]),
                           (str2, gamma[i, 0], True, gamma[i, 1], gamma[i, 2]),
                           (str3, A[i, 0], True, A[i, 1], A[i, 2]))
    return parameter
示例#6
0
def approximate_angles(goal,
                       side_lengths,
                       angle_guesses=(90, 90, 90),
                       report=True,
                       iter_cb=None):
    """Given the desired end effector location (goal), the lengths of the arm segments, and the initial guesses of the
    angles, approximate the angles the servos must be set to."""

    # The base servo angle can be calculated and fixed.
    base = np.rad2deg(np.arctan(goal[0] / goal[2]))

    # define the starting values of the parameters of the minimized function
    base = Parameter('base', base, vary=False)  # base is exact
    shoulder = Parameter('shoulder', angle_guesses[0], min=0, max=180)
    elbow = Parameter('elbow', angle_guesses[1], min=0, max=180)
    wrist = Parameter('wrist', angle_guesses[2], min=0, max=180)
    params = Parameters()
    params.add_many(base, shoulder, elbow, wrist)

    result = minimize(end_effector,
                      params,
                      args=(goal, side_lengths),
                      method="leastsq",
                      iter_cb=iter_cb)

    if report:
        print(fit_report(result.params))

    return result
def parameters(ini_values):
    from lmfit import Parameters
	
    k1, k2, k3, k4, k5, k6, k7, k8, k9, k10, k11, UA, mu_0, E, q, prim_stab_0, LDH_0 = ini_values
	
    p = Parameters()
    #          (         Name,       Value,  Vary,    Min,     Max)    
    p.add_many((         'k1',          k1,  True,    1.6,     2.1),
               (         'k2',          k2,  True,    8.0,    46.0),
               (         'k3',          k3,  True,    0.0,     6.0),
               (         'k4',          k4,  True,    0.0,     2.1),
               (         'k5',          k5,  True,    0.0,    0.03),
               (         'k6',          k6,  True,    0.0,    39.0),
               (         'k7',          k7,  True,    0.0,     2.7),
               (         'k8',          k8,  True,    0.0,     7.9),
               (         'k9',          k9,  True,    0.0,    13.1),
               (        'k10',         k10,  True,    0.7,    10.9),
               (        'k11',         k11,  True,    2.0,     3.6),
               (         'UA',          UA,  True,  275.0,   402.0),
               (       'mu_0',        mu_0, False,    0.0,     0.1),
               (          'E',           E, False, 5000.0,    None),
               (          'q',           q, False,    0.0,    17.0),
               ('prim_stab_0', prim_stab_0, False,    0.5,     1.3),
               (      'LDH_0',       LDH_0, False,   None,    None))
    return p
示例#8
0
def mle_ou(t, s):
    """Maximum-likelihood estimator for standard OU"""
    def log_likelihood(q):
        """Calculates log likelihood of a standard OU path"""
        K = q["kappa"]
        theta = q["theta"]
        sigma = q["sigma"]
        dt = np.diff(t)
        mu = VasicekModel.mean(s[:-1], dt, K, theta)
        sigma = VasicekModel.std(dt, K, sigma)
        return -np.sum(scipy.stats.norm.logpdf(s[1:], loc=mu, scale=sigma))

    params = Parameters()
    params.add_many(
        ("kappa", 0.5, True, 1e-6, None),
        ("theta", np.mean(s), True, 0.0, None),
        ("sigma", est_sigma_quadratic_variation(t, s), True, 1e-8, None),
    )

    result = minimize(
        log_likelihood,
        params,
        method="L-BFGS-B",
        options={
            "maxiter": 500,
            "disp": False
        },
    )
    return result
def heartrate_model(heartrate, power, **kwargs):
    """
    Source:
    De Smet et al., Heart rate modelling as a potential physical finess assessment for runners and cyclists.
    http://ceur-ws.org/Vol-1842/paper_13.pdf
    """
    # Initial model parameters
    model_params = Parameters()
    model_params.add_many(
        ('hr_rest', kwargs.get('hr_rest', 75)),
        ('hr_max', kwargs.get('hr_max', 200)),
        ('dhr', kwargs.get('dhr', 0.30)),
        ('tau_rise', kwargs.get('tau_rise', 24)),
        ('tau_fall', kwargs.get('tau_fall', 30)),
        ('hr_drift', kwargs.get('hr_drift', 3 * 10**-5)),
    )

    model = minimize(
        fcn=_heartrate_model_residuals,
        params=model_params,
        method='nelder',  # Nelder-Mead
        args=(power, heartrate))

    predictions = _heartrate_model_predict(model.params, power)

    return model, predictions
示例#10
0
def confidence_interval():
    """Return the result of the confidence interval (ci) calculation."""
    def residual(pars, x, data=None):
        argu = (x * pars['decay'])**2
        shift = pars['shift']
        if abs(shift) > np.pi / 2:
            shift = shift - np.sign(shift) * np.pi
        model = pars['amp'] * np.sin(shift +
                                     x / pars['period']) * np.exp(-argu)
        if data is None:
            return model
        return model - data

    p_true = Parameters()
    p_true.add_many(('amp', 14.0), ('period', 5.33), ('shift', 0.123),
                    ('decay', 0.010))

    x = np.linspace(0.0, 250.0, 2500)
    data = residual(p_true, x) + np.random.normal(scale=0.7215, size=x.size)

    fit_params = Parameters()
    fit_params.add_many(('amp', 13.0), ('period', 2), ('shift', 0.0),
                        ('decay', 0.02))

    mini = Minimizer(residual,
                     fit_params,
                     fcn_args=(x, ),
                     fcn_kws={'data': data})
    out = mini.leastsq()
    ci = conf_interval(mini, out)
    return ci
示例#11
0
def generate_q0_via_nll_unbinned_constrained(bg, data, bg_params):
    '''Perform two nll fits to data, one for bg+signal, one for bg-only.
    Use these values to create the q0 statistic.'''

    data = np.asarray(data)
    bg = np.asarray(bg)
    _bg_params = bg_params.copy()
    for p in _bg_params:
        _bg_params[p].vary = False

    bg_model = Model(bg_pdf, _bg_params)
    mc_bg_only_fitter = NLLFitter(bg_model, verbose=False)
    mc_bg_only_fitter.fit(bg, calculate_corr=False)

    bg_nll = bg_model.calc_nll(None, data)

    _sig_params = Parameters()
    _sig_params.add_many(
        ('C', 0.1, True, 0, 1, None, None),
        ('mu', 125.77, False, 120, 130, None, None),
        ('sigma', 2.775, False, 1, 4, None, None),
        ('a1', _bg_params['a1'].value, False, -1, 1, None, None),
        ('a2', _bg_params['a2'].value, False, -1, 1, None, None),
        ('a3', _bg_params['a3'].value, False, -1, 1, None, None))

    bg_sig_model = Model(bg_sig_pdf, _sig_params)

    mc_bg_sig_fitter = NLLFitter(bg_sig_model, verbose=False)
    mc_bg_sig_result = mc_bg_sig_fitter.fit(data, calculate_corr=False)
    bg_sig_nll = mc_bg_sig_result.fun
    q0 = 2 * max(bg_nll - bg_sig_nll, 0)

    return q0
示例#12
0
    def assess(self):
        # TODO: mutually exclusive args
        # TODO: check for when best and worst are not extrema

        if self.optimal_fit:
            points_params = Parameters()
            worst = Parameter("worst", value=self.worst, vary=False)
            lower_middle = Parameter("lower_middle")
            middle = Parameter("middle")
            upper_middle = Parameter("upper_middle")
            best = Parameter("best", value=self.best, vary=False)
            for param in [lower_middle, middle, upper_middle]:
                param.set(min=self.data.min(), max=self.data.max())
            points_params.add_many(worst, lower_middle, middle, upper_middle,
                                   best)

            def objective(params):
                v = params.valuesdict()
                u = Utility(name="u", points=list(v.values()))
                u.fit()
                chisqr = u.result.chisqr
                return chisqr

            self.optimal_points = minimize(objective,
                                           points_params,
                                           method=self.method)
            v = self.optimal_points.params.valuesdict()
            self.points = np.array(list(v.values()))
        return self.points
示例#13
0
def build_params_linear():
	params = Parameters()
	params.add_many(
		('a0', 1.),
		('a1', 1.),
		)
	return params
示例#14
0
 def test_fittheory(self):
     """
     Test the fitting of the available analytical functions
     """
     testspec = generate_cdespectrum()
     testspec = testspec.subspectrum(2500., 3700., clone=True)
     testfitter = Fitter.fromspectrum(testspec)
     testpars = Parameters()
     #        (Name,    Value,  Vary,   Min,    Max,     Expr)
     testpars.add_many(
         ('H', 0.1, True, 0.0, None, None, None),
         ('xR', 3000., True, 3000. - 50., 3000. + 50., None, None),
         ('w', 50.0, True, 0.0, None, None, None),
         ('tau', 50.0, True, 0.0, None, None, None))
     testfitter.add_analytical('flipped_egh',
                               testpars,
                               funcname='test fEGH')
     testpars = Parameters()
     #    (Name,    Value,  Vary,   Min,    Max,     Expr)
     testpars.add_many(
         ('peak', 1.0, True, 0.0, None, None, None),
         ('pos', 3300., True, 3300. - 300., 3300. + 300., None, None),
         ('fwhm', 500.0, True, 0.0, None, None, None),
     )
     testfitter.add_analytical('gaussian',
                               testpars,
                               funcname='test gaussian')
     testfitter.perform_fit()
示例#15
0
def Fit_frvsT(temp, freq, fitpara):
    # create a set of Parameters
    params = Parameters()
    print fitpara
    params.add_many(
        ('CPWC', fitpara[0], False, None, None, None),
        ('CPWG', fitpara[1], False, None, None, None),
        ('thick', fitpara[2], False, None, None, None),
        ('BCS', fitpara[3], False, None, None, None),
        ('Tc', fitpara[4], True, None, None, None),
        ('f0', fitpara[5], False, None, None, None),
        ('sigman', fitpara[6], False, None, None, None),
        ('A', fitpara[7], True, fitpara[7] * 0.9, fitpara[7] * 1.1, None))

    # do fit, here with leastsq model
    result = minimize(Fit_frvsT_Func, params, args=(temp, freq))

    # calculate final result
    residual = result.residual

    Tc = result.params['Tc'].value
    Tc_err = np.abs(result.params['Tc'].stderr / Tc)
    A = result.params['A'].value
    A_err = np.abs(result.params['A'].stderr / A)
    print fit_report(result)
    return Tc, Tc_err, A, A_err, fit_report(result)
示例#16
0
def generate_initial_params(data_bg_mul2, data_bg_mul8, seed=5):

    # fit to the data distributions

    bg_params = Parameters()
    bg_params.add_many(
        ('alpha', -1.80808e+01, True, 1e-20, 20, None, None),
        ('beta', -8.21174e-02, True, -10, -1e-20, None, None),
        ('gamma', 8.06289e-01, True, 1e-20, 10, None, None)
    )

    bg_model = Model(bg_pdf, bg_params)
    bg_fitter = NLLFitter(bg_model)
    bg_result = bg_fitter.fit(data_bg_mul2, calculate_corr=False)

    n_bg = len(data_bg_mul8)

    gRandom.SetSeed(seed)

    # Set up bg sampling
    bg_pdf_ROOT = functools.partial(bg_pdf, doROOT=True)
    tf1_bg_pdf = TF1("tf1_bg_pdf", bg_pdf_ROOT, 2800, 13000, 3)
    tf1_bg_pdf.SetParameters(*bg_result.x)
    mc_bg = [tf1_bg_pdf.GetRandom() for i in range(n_bg)]

    be_bg = bayesian_blocks(mc_bg, p0=0.02)
    be_bg[-1] += 0.1
    be_bg = np.append(be_bg, [13000])
    be_bg[0] = 2800
    # print be_bg
    # hist(data_bg_mul8, bins=be_bg, scale='binwidth')
    # plt.show()

    return bg_result, n_bg, be_bg
示例#17
0
def fitting_bb_data(all_data):
    """ Fits the black body portion of the spectrum for each datafile.

    This definition goes through all of the Run classes for a given dataset and filters out the
    peaks to fit a black body curve to the filtered data. The LMFIT fitting routine is used as a wrapper for the SCIPY optmize tools to fit the Planck's Black Body curve. This function feeds in initial guesses for the parameters and returns a best fitted parameters for the curve.  Keyword Arguments:
    all_data -- List of Run classes

    """

    filtered_data = []
    for dat in all_data:
        counts = np.asarray(dat.calibrated_counts)
        lam = np.asarray(dat.calibrated_lam)
        bb = sig.medfilt(counts, kernel_size=81)

        p = Parameters()
        #          (Name   ,        Value,    Vary,    Min,     Max,    Expr)
        p.add_many(('T', 5000., True, None, None, None),
                   ('scale', 1E-15, True, None, None, None),
                   ('shift', 0.0, False, None, None, None))

        func = Model(pbb)
        result = func.fit(bb, lam=lam, params=p)

        dat.bb = bb
        dat.bb_err = 0.0
        dat.temp = result.params['T'].value
        dat.temp_err = result.params['T'].stderr
        dat.aic = result.aic
        filtered_data.append(dat)

    return filtered_data
示例#18
0
def main():
    #Input the name of the file to be read from the command line
    filename = str(sys.argv[1])
    #Read out the temperatures and times associated with them
    time, temps = np.loadtxt(filename, skiprows=1, usecols=(0, 1), unpack=True)
    plt.plot(time, temps, 'x', label="Heating Data")
    plt.legend()
    plt.xlabel("Time (min)")
    plt.ylabel("Temperature (C)")
    plt.show()

    exponentialModel = Model(exponentialFunction)
    p = Parameters()
    #The parameters in the equation are coupled, and so we can't solve for any of their actual values without knowing one
    #We choose an estimate of 30000 J/min for the heating rate
    p.add_many(
        ('h', 30000, False, None, None, None),
        ('k', 6000, True, 0, 10000, None),
        ('c', 600000, True, 0, 1000000, None),
        ('Tout', 16, False, None, None, None),
    )
    #Perform a least-squares regression to the data using the model function and parameters
    exponentialFit = exponentialModel.fit(temps, x=time, params=p)
    print(exponentialFit.fit_report())

    #Plot data and save the resulting figure.
    plt.plot(time, temps, 'x', label="Heating Data")
    plt.plot(time, exponentialFit.best_fit, label='Fit to Data')
    plt.legend()
    plt.xlabel("Time (min)")
    plt.ylabel("Temperature (C)")
    plt.grid()
    plt.suptitle("Fitting of Room Heating")
    plt.savefig("Room_Temp_Fitting.png")
    plt.show()
示例#19
0
def Fit_frvsT(temp, freq, fitpara):
    # create a set of Parameters
    params = Parameters()
    print fitpara
    params.add_many(('CPWC',  fitpara[0], False, None, None,  None),
                    ('CPWG',  fitpara[1], False, None, None,  None),
                    ('thick', fitpara[2], False, None, None,  None),
                    ('BCS',   fitpara[3], False, None, None,  None),
                    ('Tc',    fitpara[4],  True, None, None,  None),
                    ('f0',    fitpara[5], False, None, None,  None),
                    ('sigman',fitpara[6], False, None, None,  None),
                    ('A',     fitpara[7],  True, fitpara[7]*0.9, fitpara[7]*1.1,  None))

    # do fit, here with leastsq model
    result = minimize(Fit_frvsT_Func, params, args=(temp, freq))
    
    # calculate final result
    residual = result.residual
    
    Tc = result.params['Tc'].value
    Tc_err = np.abs(result.params['Tc'].stderr/Tc)
    A = result.params['A'].value
    A_err = np.abs(result.params['A'].stderr/A)
    print fit_report(result)
    return Tc, Tc_err, A, A_err, fit_report(result)
示例#20
0
def smooth_and_remove_step(x_lst, y_lst, x_min_flt,x_max_flt,rmv_step_bool):
    ''' 
    Takes entire data set, x and y
    cuts down the spectra s.t x_min < x < x_max
    THEN
    Removes a step function from y_lst
    '''
    
    # Restrict the fit
    x_fit = []
    y_fit = []
    
    top_lst = []
    bottom_lst = []
    
    for x,y in zip(x_lst, y_lst):
        # Restrict the fitting region
        if x_min_flt < x < x_max_flt:
            x_fit.append(float(x))
            y_fit.append(float(y))
        
        # Find top and bottom of step 
        if x < x_min_flt + 7:
            bottom_lst.append(float(y))
        elif x > x_max_flt - 7:
            top_lst.append(float(y))
    
    x_fit = np.asarray(x_fit)
    y_fit = np.asarray(y_fit)   
  
    top = np.mean(np.asarray(top_lst))
    bottom = np.mean(np.asarray(bottom_lst))
    delta = top-bottom
    
    if (rmv_step_bool):
        # Step Parameters
        step_at = 100
        step_width = 1    
        pp = Parameters()
        pp.add_many(
                ('amplitude',delta),
                    ('sigma',step_width),
                    ('center',step_at)
                    )
        step = StepModel(form = 'erf', prefix='', independent_vars=['x'])
        
        y_fit = np.asarray([yy-bottom-step.eval(x=xx, params=pp) for xx,yy in zip(x_fit,y_fit)])
    
    # rest is the same as smooth_the_data 
    
    # now we find the parameters using the - d^2/dx^2
    ysmooth = interp.interp1d(x_fit, y_fit, kind='cubic')
    # differentiate x 2
    yp = np.gradient(ysmooth(x_fit))
    ypp = np.gradient(yp)
    # we want the peaks of -d2/dx2 
    ypp = np.asarray([-x for x in ypp])
    
    return x_fit, y_fit, ysmooth, yp, ypp
def parameters(ini_values):
    from lmfit import Parameters
    from Adjust_Kinetics import params
    p = Parameters()
    pi = params(ini_values)
    for i,tup in enumerate(pi):
        p.add_many(tup)
    return p
示例#22
0
def build_params():
	params = Parameters()
	params.add_many(
		('a0', 1.),
		('a1', 1.),
		('a2', 0.1),
		)
	return params
示例#23
0
    def test_add_many_params(self):
        # test that we can add many parameters, but only parameters are added.
        a = Parameter('a', 1)
        b = Parameter('b', 2)

        p = Parameters()
        p.add_many(a, b)

        assert_(list(p.keys()) == ['a', 'b'])
示例#24
0
def sipm_phdfit(x, y, npk, nz_pe=0):
    """
    Function to fit a SiPM phd array with a Generalized Poisson 

    Parameters
    ----------
    x : float array
        Typically the pulse heigth in mV
    y : int array
        The number of events per bin
    npk : int scalar
        The number of distinguishable peaks in the PHD - for initial parameter guess
    nz_pe : If 0 - include pedestal in PHD
            If 1 - exclude pedestal

    Returns
    -------
    result : lmfit result object 
        Including initial guess, best fit, and all fit parameters

    """
    ymax = y.max()
    #find peaks in the PHD to npk
    for i in range(50):
        peaks, p_prop = find_peaks(y,
                                   prominence=ymax * (1 - i / 50),
                                   height=ymax / 10)
        if len(peaks) >= npk: break
    #now estimate the initial fit parameters
    mu = np.sum(p_prop['peak_heights'] *
                np.arange(nz_pe, npk + nz_pe)) / np.sum(p_prop['peak_heights'])
    nev = np.sum(y)
    xtalk = 0.5  #based on CHEC-S devices
    v_pe = np.mean(np.diff(x[peaks]))
    xoff = x[peaks[0]] - v_pe * nz_pe
    v_n = v_pe * 0.2
    v_gain = v_pe * 0.1
    thresh = 0

    smod = Model(sipm_fitfunc)
    pars = Parameters()
    # parameter constraints
    #           (Name,     Value,  Vary,   Min,  Max,  Expr, Brute Step)
    pars.add_many(('xoff', xoff, True, -50.0, 50.0, None, None),
                  ('mu', mu, True, 0.01, 50.0, None, None),
                  ('nev', nev, True, 1, 1e8, None, None),
                  ('xtalk', xtalk, True, 0.0, 0.75, None, None),
                  ('v_pe', v_pe, True, 0.0, 50.0, None, None),
                  ('v_n', v_n, True, 0.0, 50.0, None, None),
                  ('v_gain', v_gain, True, 0.0, 50.0, None, None),
                  ('thresh', thresh, False, 0, 500, None, None),
                  ('nz_pe', nz_pe, False, 0, 1, None, None))
    #solve
    result = smod.fit(y, params=pars, x=x, method='leastsq')
    return result
示例#25
0
def test_resample():
    p = Parameters()
    #           (Name,  Value,  Vary,   Min,  Max,  Expr)
    p.add_many(('amplitude',    100,  True, None, None,  None),
               ('decay'    ,   6e-7,  True,  None, None,  None),
               ('nu'       ,  1.0, True, None, None, None))
    x = linspace(0,500,50)
    y = strExp(x,p)
    y_err = abs(y - (y * normal(0.0,0.1,len(y))))

    return resample(x,y,yerr=None,max_replacement=100)
示例#26
0
 def test_fitlab(self):
   """
   Test the fitting of lab data
   """
   testspec = generate_cdespectrum()
   testfitter = Fitter(testspec.x.value,2.*testspec.y.value)
   testpars = Parameters()
   #                 (Name,  Value,  Vary,   Min,     Max,     Expr)
   testpars.add_many(('mul', 1.0,    True,   0.0,     None,    None))
   testfitter.add_empirical(testspec,testpars)
   testfitter.perform_fit()
示例#27
0
def test_peakfit():
    from lmfit.utilfuncs import gauss

    def residual(pars, x, data=None):
        g1 = gauss(x, pars['a1'].value, pars['c1'].value, pars['w1'].value)
        g2 = gauss(x, pars['a2'].value, pars['c2'].value, pars['w2'].value)
        model = g1 + g2
        if data is None:
            return model
        return (model - data)

    n = 601
    xmin = 0.
    xmax = 15.0
    noise = np.random.normal(scale=.65, size=n)
    x = np.linspace(xmin, xmax, n)

    org_params = Parameters()
    org_params.add_many(('a1', 12.0, True, None, None, None),
                        ('c1', 5.3, True, None, None, None),
                        ('w1', 1.0, True, None, None, None),
                        ('a2', 9.1, True, None, None, None),
                        ('c2', 8.1, True, None, None, None),
                        ('w2', 2.5, True, None, None, None))

    data = residual(org_params, x) + noise

    fit_params = Parameters()
    fit_params.add_many(('a1', 8.0, True, None, 14., None),
                        ('c1', 5.0, True, None, None, None),
                        ('w1', 0.7, True, None, None, None),
                        ('a2', 3.1, True, None, None, None),
                        ('c2', 8.8, True, None, None, None))

    fit_params.add('w2', expr='2.5*w1')

    myfit = Minimizer(residual,
                      fit_params,
                      fcn_args=(x, ),
                      fcn_kws={'data': data})

    myfit.prepare_fit()

    init = residual(fit_params, x)

    myfit.leastsq()

    print(' N fev = ', myfit.nfev)
    print(myfit.chisqr, myfit.redchi, myfit.nfree)

    report_fit(fit_params)

    fit = residual(fit_params, x)
    check_paras(fit_params, org_params)
示例#28
0
文件: Gold.py 项目: bzwartsenberg/Aa
def FD2p(x, data, T): #generate parameters for FD2
    params = FD2Guess(x,data,T)
    p = Parameters()
    p.add_many(('EF',   params[0],  True,   x[0],   x[-1],  None),
               ('T',    params[1],  False,  None,   None,   None),
               ('sig',  params[2],  True,   0.0,    None,   None),
               ('A',    params[3],  True,   0.0,    None,   None),
               ('dy1',  params[4],  True,   None,   None,   None),
               ('dy2',  params[5],  False,  None,   None,   None),
               ('c',    params[6],  True,   None,   None,   None))
    return p
示例#29
0
    def on_fit_data_triggered(self):
        try:
            print("start fitting")
            import operator
            # list_of_models = [Model(v.getModelFunction(),prefix=k, nan_policy="propagate") for k,v in self.noise_components.items()]
            list_of_models = list()
            list_of_params = list()
            for name, component in self.noise_components.items():
                if not component.enabled:
                    continue
                model, param = component.getFittingModelAndParams(
                    logMode=False)
                list_of_models.append(model)
                parameters = list(param.values())
                list_of_params.extend(parameters)
                # print()
            # list_of_models = [v.getFittingModelAndParams for k,v in self.noise_components.items()]
            # print("list of parameters")
            print(list_of_params)
            fit_model = reduce(operator.add, list_of_models)
            fit_parameters = Parameters()
            fit_parameters.add_many(*list_of_params)
            print()
            print(fit_parameters)

            data = self._displayData
            result = fit_model.fit(data, fit_parameters, f=self._displayFreq)
            print(result.fit_report())
            # print("best values")
            # print(result.params)
            # print(result.init_fit)
            # print("init fit")
            # print(result.init_fit)
            # print("best fit")
            # print(result.best_fit)
            # freq, data_converted = self.coordinate_transform.convert(self._displayFreq, result.init_fit)
            # self._initFitDataCurve.setData(freq, data_converted)
            # freq, data_converted = self.coordinate_transform.convert(self._displayFreq, result.best_fit)
            # self._bestFitDataCurve.setData(freq, data_converted)

            for name, param in result.best_values.items():  #items():
                try:
                    component_name, param_name = name.split(
                        BaseNoiseComponent.PREFIX_SPLITTER)
                    component = self.noise_components[component_name]
                    setattr(component, param_name, param)

                except Exception as e:
                    print("exception while setting fitted values")
                    print(e)

        except Exception as e:
            print("Exception occured while fitting data")
            print(str(e))
示例#30
0
 def test_fitlab(self):
     """
     Test the fitting of lab data
     """
     testspec = generate_cdespectrum()
     testfitter = Fitter(testspec.x.value, 2. * testspec.y.value)
     testpars = Parameters()
     #                 (Name,  Value,  Vary,   Min,     Max,     Expr)
     testpars.add_many(('mul', 1.0, True, 0.0, None, None))
     testfitter.add_empirical(testspec, testpars)
     testfitter.perform_fit()
示例#31
0
def fitting_bb_data(all_data):
    """ Fits the black body portion of the spectrum for each datafile.

    This definition goes through all of the Run classes for a given dataset and filters out the
    peaks to fit a black body curve to the filtered data. The LMFIT fitting routine is used as a wrapper for the SCIPY optmize tools to fit the Planck's Black Body curve. This function feeds in initial guesses for the parameters and returns a best fitted parameters for the curve.  Keyword Arguments:
    all_data -- List of Run classes

    """

    #cal_data = np.loadtxt(os.getcwd()+'/suspect_calibration_data/CalibrationFile.txt')
    filtered_data = []
    #count = 0
    for dat in all_data:
        counts = np.asarray(dat.calibrated_counts)
        lam = np.asarray(dat.calibrated_lam)
        bb = sig.medfilt(counts, kernel_size=81)

        p = Parameters()
        #          (Name   ,        Value,    Vary,    Min,     Max,    Expr)
        p.add_many(('T', 5000., True, None, None, None),
                   ('scale', 1E-11, True, None, None, None),
                   ('shift', 0.0, False, None, None, None))

        func = Model(pbb)
        result = func.fit(bb, lam=lam, params=p)
        #print(dat.filename)
        #print(result.fit_report())

        dat.bb = bb
        dat.temp = result.params['T'].value
        dat.temp_err = result.params['T'].stderr
        dat.aic = result.aic
        filtered_data.append(dat)

    # pp = PdfPages('Filter_Test.pdf')
    # plt.figure()
    # plt.plot(lam,counts, 'k-', label='Raw Data')
    # plt.plot(lam[1::], corrected_counts, 'm-', label='Corrected Data')
    # ##plt.plot(cal_lam[1::], deconc_counts[1], 'm-', label='Deconc Data')
    # ##plt.plot(cal_lam[1::], rebinned_counts, 'c--', label='Deconc Data')
    # ##plt.plot(lam[1::], bb, 'r-', label='Filtered Data')
    # plt.plot(lam[1::], result.best_fit, 'b-', label='Fit Filtered Data')
    # plt.legend()
    # plt.savefig(pp, format='pdf')

    # plt.figure()
    ## plt.plot(lamp_det_lam[1::], cal_calculated, 'ko', label='Calculated')
    # plt.plot(cal_lam, cal_counts, 'b*', label='From File')
    # plt.legend()
    # plt.savefig(pp, format='pdf')
    #pp.close()

    return filtered_data
示例#32
0
def sineFITwLINEARbackground(param1, param2, **kwargs):
    """
    Bounded LS minimisation for the fitting of a sinefunction to a given dataset *with a linear background instead of a constant background*. It is assumed that you can represent param2 as a function of param1. The lmfit package is used to perform the bounded LS minimisation.
    
    DANGER this routine assumes you have a fixed frequency of 1 [unit**-1]. For example:
    - if param1 is time and param2 is flux, you will have a sine with a frequency of 1 c/d. 
    - if param1 is position and param2 is flux, you will have a sine with a frequency of 1 c/pix.
    
    Returns: The optimal lmfit parameter class.
    
    TODO provide options through kwargs to set the boundaries
    
    Returns: The optimal lmfit parameter class.
    
    @param param1: param1 measurements [???]
    @type param1: numpy array of length N
    @param param2: param2 measurements [???]
    @type param2: numpy array of length N
    
    @return: paramSINE
    @rtype: lmfit parameter class
    
    @kwargs: show_ME: Boolean to indicate if you want to see the report_fit - Default is False [Boolean]
    """
    show_ME = kwargs.get('show_ME', False)
    # Determination of the guesses to start your bounded LS fit with.
    constantGUESS = np.median(param2)  # param2
    amplitudeGUESS = np.max(np.abs(constantGUESS - param2)) / 2.  # param2
    frequencyGUESS = 1.  # DANGER Here is the frequency assumption assumption.
    phaseGUESS = 0.1  # Using the param1[np.where(param2==np.max(param2))]-param1[0]%1-0.5 is best, when there is no scatter on param2
    slopeGUESS, constantGUESS = np.polyfit(param1, param2, 1)

    paramSINE = Parameters()
    #Make a params class for lmfit.
    #		  	(Name,		Value,		Vary,	Min,				Max,				Expr)
    paramSINE.add_many(
        ('amplitude', amplitudeGUESS, True, amplitudeGUESS * 0.1,
         amplitudeGUESS * 1.2, None),
        (
            'frequency', frequencyGUESS, False, frequencyGUESS - 0.05,
            frequencyGUESS + 0.05, None
        ),  # DANGER Here is the frequency assumption assumption. (It is set to non-vary.)
        ('constant', constantGUESS, True, -abs(constantGUESS) * 1.5,
         abs(constantGUESS) * 1.5, None),
        ('phase', phaseGUESS, True, 0., 1., None),
        ('slope', slopeGUESS, True, -2 * slopeGUESS, +2 * slopeGUESS, None))
    resultSIN = minimize(BRITE.fitfunctions.ff_lmfitlmfit_sinslope_vs_data,
                         paramSINE,
                         args=(param1, param2))
    if show_ME:
        report_fit(paramsSIN, show_correl=False)

    return paramSINE
示例#33
0
 def test_fitres(self):
   """
   Test the returning of fit results
   """
   testspec = generate_cdespectrum()
   testfitter = Fitter(testspec.x.value,2.*testspec.y.value)
   testpars = Parameters()
   #                 (Name,  Value,  Vary,   Min,     Max,     Expr)
   testpars.add_many(('mul', 1.0,    True,   0.0,     None,    None))
   testfitter.add_empirical(testspec,testpars)
   testfitter.perform_fit()
   res = testfitter.fit_results()
示例#34
0
 def test_fitres(self):
     """
 Test the returning of fit results
 """
     testspec = generate_cdespectrum()
     testfitter = Fitter(testspec.x.value, 2. * testspec.y.value)
     testpars = Parameters()
     #                 (Name,  Value,  Vary,   Min,     Max,     Expr)
     testpars.add_many(('mul', 1.0, True, 0.0, None, None))
     testfitter.add_empirical(testspec, testpars)
     testfitter.perform_fit()
     res = testfitter.fit_results()
示例#35
0
def gaussian_peak_fitting(all_data, peak_wavelength):
    all_data = fitting_bb_data(all_data)
    for dat in all_data:
        counts = np.asarray(dat.corrected_counts)
        lam = np.asarray(dat.wavelengths)
        peak_indexes = [
            i for i in range(np.size(lam))
            if lam[i] > (peak_wavelength - 10.) and lam[i] < (peak_wavelength +
                                                              10.)
        ]

        ### Before BB substraction
        peak_lam = lam[peak_indexes[0]:peak_indexes[-1]]
        peak_counts = counts[peak_indexes[0]:peak_indexes[-1]]
        p = Parameters()
        #          (Name   ,        Value,    Vary,    Min,     Max,    Expr)
        p.add_many(('amp', 15000, True, None, None, None),
                   ('cen', peak_wavelength, True, None, None, None),
                   ('wid', 3.0, True, None, None, None),
                   ('scale', 0.0, True, None, None, None))

        func = Model(gaussian)
        result = func.fit(peak_counts, x=peak_lam, params=p)
        print(result.fit_report())

        ### After BB subtraction
        peak_bb_sub_counts = peak_counts - np.asarray(
            dat.bb[peak_indexes[0]:peak_indexes[-1]])
        p = Parameters()
        #          (Name   ,        Value,    Vary,    Min,     Max,    Expr)
        p.add_many(('amp', 14000., True, None, None, None),
                   ('cen', peak_wavelength, True, None, None, None),
                   ('wid', 3.0, True, None, None, None),
                   ('scale', 0.0, True, None, None, None))

        func = Model(gaussian)
        result_bb = func.fit(peak_bb_sub_counts, x=peak_lam, params=p)
        print("With Black Body Subtraction")
        print(result_bb.fit_report())

        pp = PdfPages('Gaussian_Fit.pdf')
        plt.figure()
        #plt.plot(lam,counts, 'k-', label='Raw Data')
        plt.plot(peak_lam, peak_counts, 'ro', label='Peak Data')
        plt.plot(peak_lam, result.best_fit, 'b-', label='Best Fit Peak Data')
        plt.plot(peak_lam, peak_bb_sub_counts, 'ko', label='Peak BB Sub Data')
        plt.plot(peak_lam,
                 result_bb.best_fit,
                 'm-',
                 label='Best Fit Peak BB Sub Data')
        plt.legend()
        plt.savefig(pp, format='pdf')
        pp.close()
示例#36
0
def test_peakfit():
    from lmfit.utilfuncs import gaussian
    def residual(pars, x, data=None):
        g1 = gaussian(x, pars['a1'].value, pars['c1'].value, pars['w1'].value)
        g2 = gaussian(x, pars['a2'].value, pars['c2'].value, pars['w2'].value)
        model = g1 + g2
        if data is None:
            return model
        return (model - data)

    n    = 601
    xmin = 0.
    xmax = 15.0
    noise = np.random.normal(scale=.65, size=n)
    x = np.linspace(xmin, xmax, n)

    org_params = Parameters()
    org_params.add_many(('a1', 12.0, True, None, None, None),
                        ('c1',  5.3, True, None, None, None),
                        ('w1',  1.0, True, None, None, None),
                        ('a2',  9.1, True, None, None, None),
                        ('c2',  8.1, True, None, None, None),
                        ('w2',  2.5, True, None, None, None))

    data  = residual(org_params, x) + noise


    fit_params = Parameters()
    fit_params.add_many(('a1',  8.0, True, None, 14., None),
                        ('c1',  5.0, True, None, None, None),
                        ('w1',  0.7, True, None, None, None),
                        ('a2',  3.1, True, None, None, None),
                        ('c2',  8.8, True, None, None, None))

    fit_params.add('w2', expr='2.5*w1')

    myfit = Minimizer(residual, fit_params,
                      fcn_args=(x,), fcn_kws={'data':data})

    myfit.prepare_fit()

    init = residual(fit_params, x)


    myfit.leastsq()

    print(' N fev = ', myfit.nfev)
    print(myfit.chisqr, myfit.redchi, myfit.nfree)

    report_fit(fit_params)

    fit = residual(fit_params, x)
    check_paras(fit_params, org_params)
示例#37
0
def fit_experimental(time, YI, initial, x0, constants):
    def obj_func(params, time, YI):
        # unpack parameters to be fitted
        v = params.valuesdict()

        # create vector for lstsq solution
        constant_vector = array(
            (v['a'], v['b'], v['c'], v['d'], v['e'], v['f'], v['g']),
            dtype=float)

        # join each fit into a single residual array
        all_YI = array([])
        all_fits = array([])

        for i in range(len(time)):
            print 'Busy with fit %d' % (i)

            # simulate the Metrastat
            concentrations = odeint(metrastat,
                                    initial[i],
                                    time[i],
                                    args=(v['k12'], constants[i]))

            # use the simulation to estimate the YI
            fit = dot(array(concentrations), constant_vector)

            # update residual function
            all_YI = one_row(YI[i], all_YI)
            all_fits = one_row(fit, all_fits)

        print '\n\n\nNext iteration:\n\n'

        return all_YI - all_fits

    # set initial parameter values
    params = Parameters()

    #               (Name,   Value,  Vary,   Min,    Max,    Expr)
    params.add_many(('a', x0[0], True, None, None, None),
                    ('b', x0[1], True, None, None, None),
                    ('c', x0[2], True, None, None, None),
                    ('d', x0[3], True, None, None, None),
                    ('e', x0[4], True, None, None, None),
                    ('f', x0[5], True, None, None, None),
                    ('g', x0[6], True, None, None, None),
                    ('k12', x0[7], True, None, None, None))

    # do fit with leastsq model
    minimize(obj_func, params, args=(time, YI))

    return params.valuesdict()
示例#38
0
 def test_plotfit(self):
   """
   Test the plotting of fit results
   """
   testspec = generate_cdespectrum()
   testfitter = Fitter(testspec.x.value,2.*testspec.y.value)
   testpars = Parameters()
   #                 (Name,  Value,  Vary,   Min,     Max,     Expr)
   testpars.add_many(('mul', 1.0,    True,   0.0,     None,    None))
   testfitter.add_empirical(testspec,testpars)
   testfitter.perform_fit()
   fig = plt.figure()
   ax = fig.add_subplot(111)
   testfitter.plot_fitresults(ax)
   plt.close()
示例#39
0
    def time_confinterval(self):
        np.random.seed(0)
        x = np.linspace(0.3,10,100)
        y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)

        p = Parameters()
        p.add_many(('a', 0.1), ('b', 1))

        def residual(p):
            a = p['a'].value
            b = p['b'].value

            return 1/(a*x)+b-y

        minimizer = Minimizer(residual, p)
        out = minimizer.leastsq()
        return conf_interval(minimizer, out)
示例#40
0
def sineFITwLINEARbackground(param1, param2, **kwargs):
    """
    Bounded LS minimisation for the fitting of a sinefunction to a given dataset *with a linear background instead of a constant background*. It is assumed that you can represent param2 as a function of param1. The lmfit package is used to perform the bounded LS minimisation.
    
    DANGER this routine assumes you have a fixed frequency of 1 [unit**-1]. For example:
    - if param1 is time and param2 is flux, you will have a sine with a frequency of 1 c/d. 
    - if param1 is position and param2 is flux, you will have a sine with a frequency of 1 c/pix.
    
    Returns: The optimal lmfit parameter class.
    
    TODO provide options through kwargs to set the boundaries
    
    Returns: The optimal lmfit parameter class.
    
    @param param1: param1 measurements [???]
    @type param1: numpy array of length N
    @param param2: param2 measurements [???]
    @type param2: numpy array of length N
    
    @return: paramSINE
    @rtype: lmfit parameter class
    
    @kwargs: show_ME: Boolean to indicate if you want to see the report_fit - Default is False [Boolean]
    """
    show_ME = kwargs.get('show_ME', False)
    # Determination of the guesses to start your bounded LS fit with.
    constantGUESS = np.median(param2) 				# param2
    amplitudeGUESS = np.max(np.abs(constantGUESS-param2))/2. 	# param2
    frequencyGUESS = 1. 					# DANGER Here is the frequency assumption assumption.
    phaseGUESS = 0.1 						# Using the param1[np.where(param2==np.max(param2))]-param1[0]%1-0.5 is best, when there is no scatter on param2
    slopeGUESS, constantGUESS = np.polyfit(param1, param2, 1)
    
    paramSINE = Parameters()
    #Make a params class for lmfit. 
    #		  	(Name,		Value,		Vary,	Min,				Max,				Expr)
    paramSINE.add_many(('amplitude',	amplitudeGUESS,	True,	amplitudeGUESS*0.1,		amplitudeGUESS*1.2,		None),
		      ('frequency',	frequencyGUESS,	False,	frequencyGUESS-0.05,		frequencyGUESS+0.05,		None), # DANGER Here is the frequency assumption assumption. (It is set to non-vary.)
		      ('constant',	constantGUESS,	True,	-abs(constantGUESS)*1.5,	abs(constantGUESS)*1.5,		None),
		      ('phase',		phaseGUESS,	True,	0.,				1.,				None),
		      ('slope',		slopeGUESS,	True,	-2*slopeGUESS,			+2*slopeGUESS,			None))
    resultSIN = minimize(BRITE.fitfunctions.ff_lmfitlmfit_sinslope_vs_data, paramSINE, args=(param1, param2))
    if show_ME:
      report_fit(paramsSIN, show_correl=False)
    
    return paramSINE  
示例#41
0
 def test_fitres_tofile(self):
   """
   Test the dumping of fit results to a file
   """
   testspec = generate_cdespectrum()
   testfitter = Fitter(testspec.x.value,2.*testspec.y.value)
   testpars = Parameters()
   #                 (Name,  Value,  Vary,   Min,     Max,     Expr)
   testpars.add_many(('mul', 1.0,    True,   0.0,     None,    None))
   testfitter.add_empirical(testspec,testpars)
   testpars=Parameters()
   #                (Name,    Value,  Vary,   Min,    Max,     Expr)
   testpars.add_many(
                    ('peak',   1.0,     True,   0.0,        None, None),
                    ('pos',    3000., True,   3000.-200.,  3000.+200., None),
                    ('fwhm',   50.0,  True,    0.0,        None, None),
                    )
   testfitter.add_analytical('gaussian',testpars,funcname='test gaussian')
   testfitter.perform_fit()
   res = testfitter.fitresults_tofile('testfile')
示例#42
0
 def test_fittheory(self):
   """
   Test the fitting of the available analytical functions
   """
   testspec = generate_cdespectrum()
   testfitter = Fitter.fromspectrum(testspec)
   testpars = Parameters()
   #                 (Name,    Value,  Vary,  Min,    Max, Expr)
   testpars.add_many(('lor1',   1.67, False,  None,   None,   None),
                     ('lor2',   195., False,   None,   None,   None),
                     ('lor3',    1.5, False,   None,   None,   None),
                     ('peak', 0.05,  True,   0.0,    0.1,    None),
                     ('pos',  2139.9,  True,   2129.9, 2149.9, None))
   testfitter.add_analytical('cde_lorentzian',testpars,funcname='test lorentzian')
   testpars=Parameters()
   #                (Name,    Value,  Vary,   Min,    Max,     Expr)
   testpars.add_many(
                    ('H',      1.0,    True,   0.0,        None, None),
                    ('xR',     3000.,  True,   3000.-50.,  3000.+50., None),
                    ('w',      50.0,  True,    0.0,        None, None),
                    ('tau',    50.0,  True,    0.0,        None, None)
                    )
   testfitter.add_analytical('flipped_egh',testpars,funcname='test fEGH')
   testpars=Parameters()
   #                (Name,    Value,  Vary,   Min,    Max,     Expr)
   testpars.add_many(
                    ('peak',   1.0,     True,   0.0,        None, None),
                    ('pos',    3000., True,   3000.-200.,  3000.+200., None),
                    ('fwhm',   50.0,  True,    0.0,        None, None),
                    )
   testfitter.add_analytical('gaussian',testpars,funcname='test gaussian')
   testfitter.perform_fit()
示例#43
0
def test_peakfit():
    def residual(pars, x, data=None):
        g1 = gaussian(x, pars['a1'], pars['c1'], pars['w1'])
        g2 = gaussian(x, pars['a2'], pars['c2'], pars['w2'])
        model = g1 + g2
        if data is None:
            return model
        return (model - data)

    n = 601
    xmin = 0.
    xmax = 15.0
    noise = np.random.normal(scale=.65, size=n)
    x = np.linspace(xmin, xmax, n)

    org_params = Parameters()
    org_params.add_many(('a1', 12.0, True, None, None, None),
                        ('c1', 5.3, True, None, None, None),
                        ('w1', 1.0, True, None, None, None),
                        ('a2', 9.1, True, None, None, None),
                        ('c2', 8.1, True, None, None, None),
                        ('w2', 2.5, True, None, None, None))

    data = residual(org_params, x) + noise

    fit_params = Parameters()
    fit_params.add_many(('a1', 8.0, True, None, 14., None),
                        ('c1', 5.0, True, None, None, None),
                        ('w1', 0.7, True, None, None, None),
                        ('a2', 3.1, True, None, None, None),
                        ('c2', 8.8, True, None, None, None))

    fit_params.add('w2', expr='2.5*w1')

    myfit = Minimizer(residual, fit_params, fcn_args=(x,),
                      fcn_kws={'data': data})

    myfit.prepare_fit()
    out = myfit.leastsq()
    check_paras(out.params, org_params)
示例#44
0
class MinimizerClassSuite:
    """
    Benchmarks for the Minimizer class
    """
    def setup(self):
        self.x = np.linspace(1, 10, 250)
        np.random.seed(0)
        self.y = (3.0 * np.exp(-self.x / 2)
                  - 5.0 * np.exp(-(self.x - 0.1) / 10.)
                  + 0.1 * np.random.randn(len(self.x)))

        self.p = Parameters()
        self.p.add_many(('a1', 4., True, 0., 10.),
                        ('a2', 4., True, -10., 10.),
                        ('t1', 3., True, 0.01, 10.),
                        ('t2', 3., True, 0.01, 20.))

        self.p_emcee = deepcopy(self.p)
        self.p_emcee.add('noise', 0.2, True, 0.001, 1.)

        self.mini_de = Minimizer(Minimizer_Residual,
                                 self.p,
                                 fcn_args=(self.x, self.y),
                                 kws={'seed': 1,
                                      'polish': False,
                                      'maxiter': 100})

        self.mini_emcee = Minimizer(Minimizer_lnprob,
                                    self.p_emcee,
                                    fcn_args=(self.x, self.y))

    def time_differential_evolution(self):
        return self.mini_de.minimize(method='differential_evolution')

    def time_emcee(self):
        return self.mini_emcee.emcee(self.p_emcee, steps=100, seed=1)
示例#45
0
  def test_fittheory_convolved(self):
    """
    Test the fitting of the available analytical functions with convolution enabled
    """
    testspec = generate_cdespectrum()
    testspec1 = testspec.subspectrum(2000.,2300.,clone=True)
    testpsf1 = convolution.Gaussian1DKernel(5)
    print testspec1
    testfitter1 = Fitter.fromspectrum(testspec1,psf=testpsf1)
    testpars = Parameters()
    #                 (Name,    Value,  Vary,  Min,    Max, Expr)
    testpars.add_many(('lor1',   1.67, False,  None,   None,   None),
                      ('lor2',   195., False,   None,   None,   None),
                      ('lor3',    1.5, False,   None,   None,   None),
                      ('peak', 0.05,  True,   0.0,    0.1,    None),
                      ('pos',  2139.9,  True,   2129.9, 2149.9, None))
    testfitter1.add_analytical('cde_lorentzian',testpars,funcname='test lorentzian')
    testfitter1.perform_fit()

    testspec2 = testspec.subspectrum(2500.,3700.,clone=True)
    testpsf2 = convolution.Gaussian1DKernel(5)
    testfitter2 = Fitter.fromspectrum(testspec2,psf=testpsf2)
    testpars=Parameters()
    #                (Name,    Value,  Vary,   Min,    Max,     Expr)
    testpars.add_many(
                     ('H',      1.0,    True,   0.0,        None, None),
                     ('xR',     3000.,  True,   3000.-50.,  3000.+50., None),
                     ('w',      50.0,  True,    0.0,        None, None),
                     ('tau',    50.0,  True,    0.0,        None, None)
                     )
    testfitter2.add_analytical('flipped_egh',testpars,funcname='test fEGH')
    testfitter2.perform_fit()
    testfitter3 = Fitter.fromspectrum(testspec2,psf=testpsf2)
    testpars=Parameters()
    #                (Name,    Value,  Vary,   Min,    Max,     Expr)
    testpars.add_many(
                     ('peak',   1.0,     True,   0.0,        None, None),
                     ('pos',    3000., True,   3000.-200.,  3000.+200., None),
                     ('fwhm',   50.0,  True,    0.0,        None, None),
                     )
    testfitter3.add_analytical('gaussian',testpars,funcname='test gaussian')
    testfitter3.perform_fit()
示例#46
0
from lmfit import minimize, Minimizer, Parameters

# EXAMPLE 1 #
# taken from: https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brute.html

# create a set of Parameters
params = Parameters()
params.add_many(
        ('a', 2, False, None, None, None),
        ('b', 3, False, None, None, None),
        ('c', 7, False, None, None, None),
        ('d', 8, False, None, None, None),
        ('e', 9, False, None, None, None),
        ('f', 10, False, None, None, None),
        ('g', 44, False, None, None, None),
        ('h', -1, False, None, None, None),
        ('i', 2, False, None, None, None),
        ('j', 26, False, None, None, None),
        ('k', 1, False, None, None, None),
        ('l', -2, False, None, None, None),
        ('scale', 0.5, False, None, None, None),
        ('x', 0.0, True, -4.0, 4.0, None, 0.25),
        ('y', 0.0, True, -4.0, 4.0, None, 0.25),
    )

# define functions
def f1(p):
    par = p.valuesdict()
    return (par['a'] * par['x']**2 + par['b'] * par['x'] * par['y'] +
            par['c'] * par['y']**2 + par['d']*par['x'] + par['e']*par['y'] + par['f'])
示例#47
0
class DecompositionFitter(object):
    combinations = [(dist_1_key, dist_2_key) for dist_1_key in _keys for dist_2_key in _keys]

    def __init__(self, relations):
        self.data = relations.to_vector()

        self.params = Parameters()
        self.params.add_many(
            ("before_dist_1_beginning_dist_2_beginning", 0.5, True, 0.0, 1.0, None),
            ("similarity_dist_1_beginning_dist_2_beginning", 0.5, True, 0.0, 1.0, None),
            (
                "after_dist_1_beginning_dist_2_beginning",
                None,
                False,
                None,
                None,
                "1 - before_dist_1_beginning_dist_2_beginning",
            ),
            ("before_dist_1_beginning_dist_2_ending", 0.5, True, 0.0, 1.0, None),
            ("similarity_dist_1_beginning_dist_2_ending", 0.5, True, 0.0, 1.0, None),
            (
                "after_dist_1_beginning_dist_2_ending",
                None,
                False,
                None,
                None,
                "1 - before_dist_1_beginning_dist_2_ending",
            ),
            ("before_dist_1_ending_dist_2_beginning", 0.5, True, 0.0, 1.0, None),
            ("similarity_dist_1_ending_dist_2_beginning", 0.5, True, 0.0, 1.0, None),
            (
                "after_dist_1_ending_dist_2_beginning",
                None,
                False,
                None,
                None,
                "1 - before_dist_1_ending_dist_2_beginning",
            ),
            ("before_dist_1_ending_dist_2_ending", 0.5, True, 0.0, 1.0, None),
            ("similarity_dist_1_ending_dist_2_ending", 0.5, True, 0.0, 1.0, None),
            ("after_dist_1_ending_dist_2_ending", None, False, None, None, "1 - before_dist_1_ending_dist_2_ending"),
        )

        minimize(self.fitness, self.params)

        for param_key in self.params:
            self.params[param_key].value = round(self.params[param_key].value, 6)

    def fitness(self, params):
        model = numpy.zeros(13)

        before_dist_1_beginning_dist_2_beginning = params["before_dist_1_beginning_dist_2_beginning"].value
        similarity_dist_1_beginning_dist_2_beginning = params["similarity_dist_1_beginning_dist_2_beginning"].value
        after_dist_1_beginning_dist_2_beginning = params["after_dist_1_beginning_dist_2_beginning"].value
        before_dist_1_beginning_dist_2_ending = params["before_dist_1_beginning_dist_2_ending"].value
        similarity_dist_1_beginning_dist_2_ending = params["similarity_dist_1_beginning_dist_2_ending"].value
        after_dist_1_beginning_dist_2_ending = params["after_dist_1_beginning_dist_2_ending"].value
        before_dist_1_ending_dist_2_beginning = params["before_dist_1_ending_dist_2_beginning"].value
        similarity_dist_1_ending_dist_2_beginning = params["similarity_dist_1_ending_dist_2_beginning"].value
        after_dist_1_ending_dist_2_beginning = params["after_dist_1_ending_dist_2_beginning"].value
        before_dist_1_ending_dist_2_ending = params["before_dist_1_ending_dist_2_ending"].value
        similarity_dist_1_ending_dist_2_ending = params["similarity_dist_1_ending_dist_2_ending"].value
        after_dist_1_ending_dist_2_ending = params["after_dist_1_ending_dist_2_ending"].value

        same_dist_1_beginning_dist_2_beginning = similarity_dist_1_beginning_dist_2_beginning * (
            1 - fabs(before_dist_1_beginning_dist_2_beginning - after_dist_1_beginning_dist_2_beginning)
        )

        same_dist_1_beginning_dist_2_ending = similarity_dist_1_beginning_dist_2_ending * (
            1 - fabs(before_dist_1_beginning_dist_2_ending - after_dist_1_beginning_dist_2_ending)
        )

        same_dist_1_ending_dist_2_beginning = similarity_dist_1_ending_dist_2_beginning * (
            1 - fabs(before_dist_1_ending_dist_2_beginning - after_dist_1_ending_dist_2_beginning)
        )

        same_dist_1_ending_dist_2_ending = similarity_dist_1_ending_dist_2_ending * (
            1 - fabs(before_dist_1_ending_dist_2_ending - after_dist_1_ending_dist_2_ending)
        )

        model[0] = (
            before_dist_1_beginning_dist_2_beginning
            * before_dist_1_beginning_dist_2_ending
            * before_dist_1_ending_dist_2_beginning
            * before_dist_1_ending_dist_2_ending
        )

        model[1] = (
            before_dist_1_beginning_dist_2_beginning
            * before_dist_1_beginning_dist_2_ending
            * same_dist_1_ending_dist_2_beginning
            * before_dist_1_ending_dist_2_ending
        )

        model[2] = (
            before_dist_1_beginning_dist_2_beginning
            * before_dist_1_beginning_dist_2_ending
            * after_dist_1_ending_dist_2_beginning
            * before_dist_1_ending_dist_2_ending
        )

        model[3] = (
            before_dist_1_beginning_dist_2_beginning
            * before_dist_1_beginning_dist_2_ending
            * after_dist_1_ending_dist_2_beginning
            * same_dist_1_ending_dist_2_ending
        )

        model[4] = (
            before_dist_1_beginning_dist_2_beginning
            * before_dist_1_beginning_dist_2_ending
            * after_dist_1_ending_dist_2_beginning
            * after_dist_1_ending_dist_2_ending
        )

        model[5] = (
            same_dist_1_beginning_dist_2_beginning
            * before_dist_1_beginning_dist_2_ending
            * after_dist_1_ending_dist_2_beginning
            * before_dist_1_ending_dist_2_ending
        )

        model[6] = (
            same_dist_1_beginning_dist_2_beginning
            * before_dist_1_beginning_dist_2_ending
            * after_dist_1_ending_dist_2_beginning
            * same_dist_1_ending_dist_2_ending
        )

        model[7] = (
            same_dist_1_beginning_dist_2_beginning
            * before_dist_1_beginning_dist_2_ending
            * after_dist_1_ending_dist_2_beginning
            * after_dist_1_ending_dist_2_ending
        )

        model[8] = (
            after_dist_1_beginning_dist_2_beginning
            * before_dist_1_beginning_dist_2_ending
            * after_dist_1_ending_dist_2_beginning
            * before_dist_1_ending_dist_2_ending
        )

        model[9] = (
            after_dist_1_beginning_dist_2_beginning
            * before_dist_1_beginning_dist_2_ending
            * after_dist_1_ending_dist_2_beginning
            * same_dist_1_ending_dist_2_ending
        )

        model[10] = (
            after_dist_1_beginning_dist_2_beginning
            * before_dist_1_beginning_dist_2_ending
            * after_dist_1_ending_dist_2_beginning
            * after_dist_1_ending_dist_2_ending
        )

        model[11] = (
            after_dist_1_beginning_dist_2_beginning
            * same_dist_1_beginning_dist_2_ending
            * after_dist_1_ending_dist_2_beginning
            * after_dist_1_ending_dist_2_ending
        )

        model[12] = (
            after_dist_1_beginning_dist_2_beginning
            * after_dist_1_beginning_dist_2_ending
            * after_dist_1_ending_dist_2_beginning
            * after_dist_1_ending_dist_2_ending
        )

        return model - self.data

    def compare(self, dist_1_key="beginning", dist_2_key="beginning"):
        before = self.params["before_dist_1_" + dist_1_key + "_dist_2_" + dist_2_key].value
        after = self.params["after_dist_1_" + dist_1_key + "_dist_2_" + dist_2_key].value
        similarity = self.params["similarity_dist_1_" + dist_1_key + "_dist_2_" + dist_2_key].value
        # before, similarity, after = round(before, 6), round(similarity, 6), round(after, 6)
        same = similarity * (1 - fabs(before - after))
        return before, same, after

    def get_composition_data(self):
        data = []
        for key in self.combinations:
            before, same, after = self.compare(*key)
            data.append(before)
            data.append(same)
        return data

    def check(self):
        from spatiotemporal.temporal_events import FormulaCreator

        print self.data
        print FormulaCreator(self).calculate_relations().to_vector()
        print
示例#48
0
class TestParameters(unittest.TestCase):

    def setUp(self):
        self.params = Parameters()
        self.params.add_many(('a', 1., True, None, None, None),
                             ('b', 2., True, None, None, None),
                             ('c', 3., True, None, None, '2. * a'))

    def test_expr_was_evaluated(self):
        self.params.update_constraints()
        assert_almost_equal(self.params['c'].value,
                            2 * self.params['a'].value)

    def test_deepcopy(self):
        # check that a simple copy works
        b = deepcopy(self.params)
        assert_(self.params == b)

        # check that we can add a symbol to the interpreter
        self.params['b'].expr = 'sin(1)'
        self.params['b'].value = 10
        assert_almost_equal(self.params['b'].value, np.sin(1))
        assert_almost_equal(self.params._asteval.symtable['b'], np.sin(1))

        # check that the symbols in the interpreter are still the same after
        # deepcopying
        b = deepcopy(self.params)

        unique_symbols_params = self.params._asteval.user_defined_symbols()
        unique_symbols_b = self.params._asteval.user_defined_symbols()
        assert_(unique_symbols_b == unique_symbols_params)
        for unique_symbol in unique_symbols_b:
            if self.params._asteval.symtable[unique_symbol] is np.nan:
                continue

            assert_(self.params._asteval.symtable[unique_symbol]
                    ==
                    b._asteval.symtable[unique_symbol])

    def test_add_many_params(self):
        # test that we can add many parameters, but only parameters are added.
        a = Parameter('a', 1)
        b = Parameter('b', 2)

        p = Parameters()
        p.add_many(a, b)

        assert_(list(p.keys()) == ['a', 'b'])

    def test_expr_and_constraints_GH265(self):
        # test that parameters are reevaluated if they have bounds and expr
        # see GH265
        p = Parameters()

        p['a'] = Parameter('a', 10, True)
        p['b'] = Parameter('b', 10, True, 0, 20)

        assert_equal(p['b'].min, 0)
        assert_equal(p['b'].max, 20)

        p['a'].expr = '2 * b'
        assert_almost_equal(p['a'].value, 20)

        p['b'].value = 15
        assert_almost_equal(p['b'].value, 15)
        assert_almost_equal(p['a'].value, 30)

        p['b'].value = 30
        assert_almost_equal(p['b'].value, 20)
        assert_almost_equal(p['a'].value, 40)

    def test_pickle_parameter(self):
        # test that we can pickle a Parameter
        p = Parameter('a', 10, True, 0, 1)
        pkl = pickle.dumps(p)

        q = pickle.loads(pkl)

        assert_(p == q)

    def test_pickle_parameters(self):
        # test that we can pickle a Parameters object
        p = Parameters()
        p.add('a', 10, True, 0, 100)
        p.add('b', 10, True, 0, 100, 'a * sin(1)')
        p.update_constraints()
        p._asteval.symtable['abc'] = '2 * 3.142'

        pkl = pickle.dumps(p, -1)
        q = pickle.loads(pkl)

        q.update_constraints()
        assert_(p == q)
        assert_(not p is q)

        # now test if the asteval machinery survived
        assert_(q._asteval.symtable['abc'] == '2 * 3.142')

    def test_isclose(self):
        assert_(isclose(1., 1+1e-5, atol=1e-4, rtol=0))
        assert_(not isclose(1., 1+1e-5, atol=1e-6, rtol=0))
        assert_(isclose(1e10, 1.00001e10, rtol=1e-5, atol=1e-8))
        assert_(not isclose(0, np.inf))
        assert_(not isclose(-np.inf, np.inf))
        assert_(isclose(np.inf, np.inf))
        assert_(not isclose(np.nan, np.nan))
示例#49
0
class TestParameters(unittest.TestCase):

    def setUp(self):
        self.params = Parameters()
        self.params.add_many(('a', 1., True, None, None, None),
                             ('b', 2., True, None, None, None),
                             ('c', 3., True, None, None, '2. * a'))

    def test_expr_was_evaluated(self):
        self.params.update_constraints()
        assert_almost_equal(self.params['c'].value,
                            2 * self.params['a'].value)

    def test_copy(self):
        # check simple Parameters.copy() does not fail
        # on non-trivial Parameters
        p1 = Parameters()
        p1.add('t', 2.0, min=0.0, max=5.0)
        p1.add('x', 10.0)
        p1.add('y', expr='x*t + sqrt(t)/3.0')

        p2 = p1.copy()
        assert(isinstance(p2, Parameters))
        assert('t' in p2)
        assert('y' in p2)
        assert(p2['t'].max < 6.0)
        assert(np.isinf(p2['x'].max) and p2['x'].max > 0)
        assert(np.isinf(p2['x'].min) and p2['x'].min < 0)
        assert('sqrt(t)' in p2['y'].expr )
        assert(p2._asteval is not None)
        assert(p2._asteval.symtable is not None)
        assert((p2['y'].value > 20) and (p2['y'].value < 21))


    def test_deepcopy(self):
        # check that a simple copy works
        b = deepcopy(self.params)
        assert_(self.params == b)

        # check that we can add a symbol to the interpreter
        self.params['b'].expr = 'sin(1)'
        self.params['b'].value = 10
        assert_almost_equal(self.params['b'].value, np.sin(1))
        assert_almost_equal(self.params._asteval.symtable['b'], np.sin(1))

        # check that the symbols in the interpreter are still the same after
        # deepcopying
        b = deepcopy(self.params)

        unique_symbols_params = self.params._asteval.user_defined_symbols()
        unique_symbols_b = self.params._asteval.user_defined_symbols()
        assert_(unique_symbols_b == unique_symbols_params)
        for unique_symbol in unique_symbols_b:
            if self.params._asteval.symtable[unique_symbol] is np.nan:
                continue

            assert_(self.params._asteval.symtable[unique_symbol]
                    ==
                    b._asteval.symtable[unique_symbol])

    def test_add_many_params(self):
        # test that we can add many parameters, but only parameters are added.
        a = Parameter('a', 1)
        b = Parameter('b', 2)

        p = Parameters()
        p.add_many(a, b)

        assert_(list(p.keys()) == ['a', 'b'])

    def test_expr_and_constraints_GH265(self):
        # test that parameters are reevaluated if they have bounds and expr
        # see GH265
        p = Parameters()

        p['a'] = Parameter('a', 10, True)
        p['b'] = Parameter('b', 10, True, 0, 20)

        assert_equal(p['b'].min, 0)
        assert_equal(p['b'].max, 20)

        p['a'].expr = '2 * b'
        assert_almost_equal(p['a'].value, 20)

        p['b'].value = 15
        assert_almost_equal(p['b'].value, 15)
        assert_almost_equal(p['a'].value, 30)

        p['b'].value = 30
        assert_almost_equal(p['b'].value, 20)
        assert_almost_equal(p['a'].value, 40)

    def test_pickle_parameter(self):
        # test that we can pickle a Parameter
        p = Parameter('a', 10, True, 0, 1)
        pkl = pickle.dumps(p)

        q = pickle.loads(pkl)

        assert_(p == q)

    def test_pickle_parameters(self):
        # test that we can pickle a Parameters object
        p = Parameters()
        p.add('a', 10, True, 0, 100)
        p.add('b', 10, True, 0, 100, 'a * sin(1)')
        p.update_constraints()
        p._asteval.symtable['abc'] = '2 * 3.142'

        pkl = pickle.dumps(p, -1)
        q = pickle.loads(pkl)

        q.update_constraints()
        assert_(p == q)
        assert_(not p is q)

        # now test if the asteval machinery survived
        assert_(q._asteval.symtable['abc'] == '2 * 3.142')

        # check that unpickling of Parameters is not affected by expr that
        # refer to Parameter that are added later on. In the following
        # example var_0.expr refers to var_1, which is a Parameter later
        # on in the Parameters OrderedDict.
        p = Parameters()
        p.add('var_0', value=1)
        p.add('var_1', value=2)
        p['var_0'].expr = 'var_1'
        pkl = pickle.dumps(p)
        q = pickle.loads(pkl)

    def test_isclose(self):
        assert_(isclose(1., 1+1e-5, atol=1e-4, rtol=0))
        assert_(not isclose(1., 1+1e-5, atol=1e-6, rtol=0))
        assert_(isclose(1e10, 1.00001e10, rtol=1e-5, atol=1e-8))
        assert_(not isclose(0, np.inf))
        assert_(not isclose(-np.inf, np.inf))
        assert_(isclose(np.inf, np.inf))
        assert_(not isclose(np.nan, np.nan))
示例#50
0
        spcorr[:,0] = spforcorr[:,0]    
        spcorr[:,1] = interpolate.splev(spforcorr[:,0],tck,der=0)
        
        if sptarget is None: #in such case we return the corrected spectrum
            return spcorr
        return (spcorr[:,1] - sptarget[:,1])
    ###########################################################################        
        
    # Now we choose the portion of spectra to fit
    DiamondtoFit = corrdiamond[np.where((corrdiamond[:,0]> 655) & (corrdiamond[:,0] < 670))]
    SampletoFit =  corrsample[np.where((corrsample[:,0]> 655) & (corrsample[:,0] < 670))]
   
    # Now we enter the model parameters
    params = Parameters()
    params.add_many(('xshift',   1,   True,  -15,      15,  None),
                    ('yshift',   0,   True, None,    None,  None),
                    ('yshiftcarr',   1e-2,   True, None,    None,  None))
        
    # Now we chose the algorithm and run the optimization
    algorithm = "leastsq"
    result = minimize(residual, params,method = algorithm, args=(DiamondtoFit, SampletoFit))

    cds = residual(result.params,corrdiamond)    
    
    # To apply the correction for x shift, we need to interpolate to create new datasets
    tck = interpolate.splrep(corrsample[:,0],corrsample[:,1],s=0)
    tck2 = interpolate.splrep(cds[:,0],cds[:,1],s=0)
        
    # The following rows contain the spectra corrected from x and y shifts
    diamondfinal = np.zeros((len(x),2))
    samplefinal = np.zeros((len(x),2))
示例#51
0
         spcorr[:,0] = spforcorr[:,0]    
         spcorr[:,1] = interpolate.splev(spcorr[:,0],tck,der=0)
         
         if sptarget is None: #in such case we return the corrected spectrum
             return spcorr
         return (spcorr[:,1] - sptarget[:,1])
     #######################################################################        
     
     # Now we choose the portion of spectra to fit
     DiamondtoFit = corrdiamond[np.where((corrdiamond[:,0]> dconvfit[0,0]) & (corrdiamond[:,0] < dconvfit[0,1]))]
     SampletoFit =  corrsample[np.where((corrsample[:,0]> dconvfit[0,0]) & (corrsample[:,0] < dconvfit[0,1]))]
    
     # Now we enter the model parameters
     params = Parameters()
     params.add_many(('xshift',   1,   True,  -15,      15,  None),
                     ('yshift',   1e-1,   True, None,    None,  None),
                     ('yshiftcarr',   1e-5,   True, None,    None,  None))
         
     # Now we chose the algorithm and run the optimization
     algorithm = "leastsq"
     result = minimize(residual, params,method = algorithm, args=(DiamondtoFit, SampletoFit))
 
     cds = residual(params,corrdiamond)    
     
     # To apply the correction for x shift, we need to interpolate to create new datasets
     tck = interpolate.splrep(corrsample[:,0],corrsample[:,1],s=0)
     tck2 = interpolate.splrep(cds[:,0],cds[:,1],s=0)
     
     # The following rows contain the spectra corrected from x and y shifts
     diamondfinal = np.zeros((len(x),2))
     samplefinal = np.zeros((len(x),2))
示例#52
0
class TestParameters(unittest.TestCase):

    def setUp(self):
        self.params = Parameters()
        self.params.add_many(('a', 1., True, None, None, None),
                             ('b', 2., True, None, None, None),
                             ('c', 3., True, None, None, '2. * a'))

    def test_expr_was_evaluated(self):
        self.params.update_constraints()
        assert_almost_equal(self.params['c'].value,
                            2 * self.params['a'].value)

    def test_copy(self):
        # check simple Parameters.copy() does not fail
        # on non-trivial Parameters
        p1 = Parameters()
        p1.add('t', 2.0, min=0.0, max=5.0)
        p1.add('x', 10.0)
        p1.add('y', expr='x*t + sqrt(t)/3.0')

        p2 = p1.copy()
        assert(isinstance(p2, Parameters))
        assert('t' in p2)
        assert('y' in p2)
        assert(p2['t'].max < 6.0)
        assert(np.isinf(p2['x'].max) and p2['x'].max > 0)
        assert(np.isinf(p2['x'].min) and p2['x'].min < 0)
        assert('sqrt(t)' in p2['y'].expr)
        assert(p2._asteval is not None)
        assert(p2._asteval.symtable is not None)
        assert((p2['y'].value > 20) and (p2['y'].value < 21))

    def test_copy_function(self):
        # check copy(Parameters) does not fail
        p1 = Parameters()
        p1.add('t', 2.0, min=0.0, max=5.0)
        p1.add('x', 10.0)
        p1.add('y', expr='x*t + sqrt(t)/3.0')

        p2 = copy(p1)
        assert(isinstance(p2, Parameters))

        # change the 'x' value in the original
        p1['x'].value = 4.0

        assert(p2['x'].value > 9.8)
        assert(p2['x'].value < 10.2)
        assert(np.isinf(p2['x'].max) and p2['x'].max > 0)

        assert('t' in p2)
        assert('y' in p2)
        assert(p2['t'].max < 6.0)

        assert(np.isinf(p2['x'].min) and p2['x'].min < 0)
        assert('sqrt(t)' in p2['y'].expr)
        assert(p2._asteval is not None)
        assert(p2._asteval.symtable is not None)
        assert((p2['y'].value > 20) and (p2['y'].value < 21))

        assert(p1['y'].value < 10)

    def test_deepcopy(self):
        # check that a simple copy works
        b = deepcopy(self.params)
        assert_(self.params == b)

        # check that we can add a symbol to the interpreter
        self.params['b'].expr = 'sin(1)'
        self.params['b'].value = 10
        assert_almost_equal(self.params['b'].value, np.sin(1))
        assert_almost_equal(self.params._asteval.symtable['b'], np.sin(1))

        # check that the symbols in the interpreter are still the same after
        # deepcopying
        b = deepcopy(self.params)

        unique_symbols_params = self.params._asteval.user_defined_symbols()
        unique_symbols_b = self.params._asteval.user_defined_symbols()
        assert_(unique_symbols_b == unique_symbols_params)
        for unique_symbol in unique_symbols_b:
            if self.params._asteval.symtable[unique_symbol] is np.nan:
                continue

            assert_(self.params._asteval.symtable[unique_symbol]
                    ==
                    b._asteval.symtable[unique_symbol])

    def test_add_many_params(self):
        # test that we can add many parameters, but only parameters are added.
        a = Parameter('a', 1)
        b = Parameter('b', 2)

        p = Parameters()
        p.add_many(a, b)

        assert_(list(p.keys()) == ['a', 'b'])

    def test_expr_and_constraints_GH265(self):
        # test that parameters are reevaluated if they have bounds and expr
        # see GH265
        p = Parameters()

        p['a'] = Parameter('a', 10, True)
        p['b'] = Parameter('b', 10, True, 0, 20)

        assert_equal(p['b'].min, 0)
        assert_equal(p['b'].max, 20)

        p['a'].expr = '2 * b'
        assert_almost_equal(p['a'].value, 20)

        p['b'].value = 15
        assert_almost_equal(p['b'].value, 15)
        assert_almost_equal(p['a'].value, 30)

        p['b'].value = 30
        assert_almost_equal(p['b'].value, 20)
        assert_almost_equal(p['a'].value, 40)

    def test_pickle_parameter(self):
        # test that we can pickle a Parameter
        p = Parameter('a', 10, True, 0, 1)
        pkl = pickle.dumps(p)

        q = pickle.loads(pkl)

        assert_(p == q)

    def test_pickle_parameters(self):
        # test that we can pickle a Parameters object
        p = Parameters()
        p.add('a', 10, True, 0, 100)
        p.add('b', 10, True, 0, 100, 'a * sin(1)')
        p.update_constraints()
        p._asteval.symtable['abc'] = '2 * 3.142'

        pkl = pickle.dumps(p, -1)
        q = pickle.loads(pkl)

        q.update_constraints()
        assert_(p == q)
        assert_(p is not q)

        # now test if the asteval machinery survived
        assert_(q._asteval.symtable['abc'] == '2 * 3.142')

        # check that unpickling of Parameters is not affected by expr that
        # refer to Parameter that are added later on. In the following
        # example var_0.expr refers to var_1, which is a Parameter later
        # on in the Parameters OrderedDict.
        p = Parameters()
        p.add('var_0', value=1)
        p.add('var_1', value=2)
        p['var_0'].expr = 'var_1'
        pkl = pickle.dumps(p)
        q = pickle.loads(pkl)

    def test_params_usersyms(self):
        # test passing usersymes to Parameters()
        def myfun(x):
            return x**3

        params = Parameters(usersyms={"myfun": myfun})
        params.add("a", value=2.3)
        params.add("b", expr="myfun(a)")

        xx = np.linspace(0, 1, 10)
        yy = 3 * xx + np.random.normal(scale=0.002, size=len(xx))

        model = Model(lambda x, a: a * x)
        result = model.fit(yy, params=params, x=xx)
        assert_(np.isclose(result.params['a'].value, 3.0, rtol=0.025))
        assert_(result.nfev > 3)
        assert_(result.nfev < 300)

    def test_set_symtable(self):
        # test that we use Parameter.set(value=XXX) and have
        # that new value be used in constraint expressions
        pars = Parameters()
        pars.add('x', value=1.0)
        pars.add('y', expr='x + 1')

        assert_(np.isclose(pars['y'].value, 2.0))
        pars['x'].set(value=3.0)
        assert_(np.isclose(pars['y'].value, 4.0))

    def test_dumps_loads_parameters(self):
        # test that we can dumps() and then loads() a Parameters
        pars = Parameters()
        pars.add('x', value=1.0)
        pars.add('y', value=2.0)
        pars['x'].expr = 'y / 2.0'

        dumps = pars.dumps()

        newpars = Parameters().loads(dumps)
        newpars['y'].value = 100.0
        assert_(np.isclose(newpars['x'].value, 50.0))

    def test_isclose(self):
        assert_(np.isclose(1., 1+1e-5, atol=1e-4, rtol=0))
        assert_(not np.isclose(1., 1+1e-5, atol=1e-6, rtol=0))
        assert_(np.isclose(1e10, 1.00001e10, rtol=1e-5, atol=1e-8))
        assert_(not np.isclose(0, np.inf))
        assert_(not np.isclose(-np.inf, np.inf))
        assert_(np.isclose(np.inf, np.inf))
        assert_(not np.isclose(np.nan, np.nan))

    def test_expr_with_bounds(self):
        "test an expression with bounds, without value"
        pars = Parameters()
        pars.add('c1', value=0.2)
        pars.add('c2', value=0.2)
        pars.add('c3', value=0.2)
        pars.add('csum', value=0.8)
        # this should not raise TypeError:
        pars.add('c4', expr='csum-c1-c2-c3', min=0, max=1)
        assert_(np.isclose(pars['c4'].value, 0.2))

    def test_invalid_expr_exceptions(self):
        "test if an exception is raised for invalid expressions (GH486)"""
        p1 = Parameters()
        p1.add('t', 2.0, min=0.0, max=5.0)
        p1.add('x', 10.0)
        with self.assertRaises(SyntaxError):
            p1.add('y', expr='x*t + sqrt(t)/')
        assert(len(p1['y']._expr_eval.error) > 0)
        p1.add('y', expr='x*t + sqrt(t)/3.0')
        p1['y'].set(expr='x*3.0 + t**2')
        assert('x*3' in p1['y'].expr)
        assert(len(p1['y']._expr_eval.error) == 0)
        with self.assertRaises(SyntaxError):
            p1['y'].set(expr='t+')
        assert(len(p1['y']._expr_eval.error) > 0)
        assert_almost_equal(p1['y'].value, 34.0)

    def test_eval(self):
        # check that eval() works with usersyms and parameter values
        def myfun(x):
            return 2.0 * x
        p = Parameters(usersyms={"myfun": myfun})
        p.add("a", value=4.0)
        p.add("b", value=3.0)
        assert_almost_equal(p.eval("myfun(2.0) * a"), 16)
        assert_almost_equal(p.eval("b / myfun(3.0)"), 0.5)
示例#53
0
文件: match-iir.py 项目: antorsae/axo
delta_time   = numpy.zeros(samples)
delta_time [0] = 1.0
x_freq = numpy.linspace(0.0, sample_rate/2, (samples/2)+1)

pre1_filters = '-eadb:-{adb} -el:RThighshelf,{RThighshelf_A},{RThighshelf_f0},{RThighshelf_Q} -el:RTlowshelf,{RTlowshelf_A},{RTlowshelf_f0},{RTlowshelf_Q}'
fmin = 20
fmax = 22000

pre1 = Parameters()

#               (Name,              Value,      Vary,   Min,    Max,    Expr)
pre1.add_many(  ('adb',             9.28136887,     True,   0,      None,   None),
                ('RThighshelf_A',   4.38211718,     True,   0,      None,   None),
                ('RThighshelf_f0',  134.645409,     True,   100,    200,    None),
                ('RThighshelf_Q',   0.48178881,     True,   0.1,    3,      None),
                ('RTlowshelf_A',    3.42318251,     True,   0,      None,   None),
                ('RTlowshelf_f0',   1747.22195,     True,   1600,   1800,   None),
                ('RTlowshelf_Q',    0.48331721,     True,   0.1,    3,      None),
                ('sample_rate',     sample_rate,    False,  None,   None,   None))



#out = minimize(residual, pre1, args=(pre1_filters, x_freq, fmin, fmax, delta_time), kws={'data':pre1_reference}, method='nelder')
#pre1 = out.params
#print(fit_report(out))

pre1_matched = residual(pre1, pre1_filters, x_freq, fmin, fmax, delta_time)

plotFilter("pre1.png", "Pre 1", fmin, fmax, x_freq, pre1_reference, pre1_matched)

示例#54
0
from stress_to_spike import stress_to_group_current, stress_to_fr_inst
from gen_function import stress_to_current, get_interp_stress
import cy_lif_model as lif_model
from model_constants import MC_GROUPS, REF_DISPL, REF_STIM
from fit_model import get_data_dicts, get_single_residual


TEST_DATA_PATH = './data/test/'


# Commonly used constants
params = {
    'tau_arr': np.array([8, 500, 1000, np.inf]),
    'k_arr': np.array([1.35, 2, .15, 1.5])}
lmpars = Parameters()
lmpars.add_many(('tau1', 8), ('tau2', 200), ('tau3', 1832), ('tau4', np.inf),
                ('k1', 0.782), ('k2', 0.304), ('k3', 0.051), ('k4', 0.047))
interp_static_displ = .35
extrap_static_displ = .65


def load_test_csv(vname_list):
    data = {}
    for vname in vname_list:
        data[vname] = np.genfromtxt('%s%s.csv' % (TEST_DATA_PATH, vname),
                                    delimiter=',')
    return data


def save_test_csv(data):
    for key, item in data.items():
        np.savetxt('%s%s.csv' % (TEST_DATA_PATH, key), item, delimiter=',')
示例#55
0
    interestspectra = sample[np.where((sample[:,0] > lb)&(sample[:,0] < hb))]
    ese0 = interestspectra[:,2]/abs(interestspectra[:,1]) #take ese  as a percentage, we assume that the treatment was made correctly for error determination... if not, please put  sigma = None
    interestspectra[:,1] = interestspectra[:,1]/np.amax(interestspectra[:,1])*100 # normalise spectra to maximum, easier to handle after 
    sigma = abs(ese0*interestspectra[:,1]) #calculate good ese
    #sigma = None # you can activate that if you are not sure about the errors

    xfit = interestspectra[:,0] # region to be fitted
    data = interestspectra[:,1] # region to be fitted

    params = Parameters()
    ####################### FOR MELT:
    ####################### COMMENT IF NOT WANTED
    #               (Name,  Value,  Vary,   Min,  Max,  Expr)
    params.add_many(('a1',   1,   True, 0,      None,  None),
                    ('f1',   5200,  True, 750,    None,  None),
                    ('l1',   1,  True, 0,      None,  None),
                    ('a2',   1,  True, 0,      None,  None),
                    ('f2',   5400,  True, None,   None,  None),
                    ('l2',   1,  True, None,   None,  None))  
                         
    result = minimize(residual_melt, params, args=(xfit, data)) # fit data with leastsq model from scipy
    model = fit_report(params) # the report
    yout, peak1,peak2,= residual_melt(params,xfit) # the different peaks
    
    #### We just calculate the different areas up to 4700 cmm-1 and those of the gaussians
    # Select interest areas for calculating the areas of OH and H2Omol peaks
    intarea45 = sample[np.where((sample[:,0]> 4100) & (sample[:,0]<4700))]
    area4500 = np.trapz(intarea45[:,1],intarea45[:,0])
    esearea4500 = 1/sqrt(area4500) # We assume that RELATIVE errors on areas are globally equal to 1/sqrt(Area)
      
    # now for the gaussians
    # unpack parameters:
示例#56
0
 ax1 = plt.subplot(gs[0])
 ax2 = plt.subplot(gs[1])
 
 ax1.set_title('RT')
 ax2.set_title(temperature[lg])
 
 ax1.plot(inputRT[:,0], inputRT[:,1],'k-')
 ax2.plot(inputHT[:,0],inputHT[:,1],'r-')
 params = Parameters()
 #               (Name,  Value,  Vary,   Min,  Max,  Expr)
 params.add_many(('a1',   10,   True,  0,      None,  None),
                 ('f1',   1278,   True, 1259,    1300,  None),
                 ('l1',   5,   True,  0,      50,  None),
                 ('e1',   0.5,   True,  0,      1,  None),
                 ('a2',   100,   True,  0,      None,  None),
                 ('f2',   1332,   True, 1300,    1400,  None),
                 ('l2',   5,   True,  0,      50,  None),
                 ('e2',   0.5,   True,  0,      1,  None),
                 ('a3',   2,   True,  0,      None,  None),
                 ('f3',   1882,   True, 1800,    1920,  None),
                 ('l3',   4,   True,  0,      50,  None),
                 ('e3',   0.5,   True,  0,      1,  None))
                 
 paramsHT = Parameters()
 #               (Name,  Value,  Vary,   Min,  Max,  Expr)
 paramsHT.add_many(('a1',   10,   True,  0,      None,  None),
                 ('f1',   1261,   True, 1249,    1280,  None),
                 ('l1',   5,   True,  0,      50,  None),
                 ('e1',   0.5,   True,  0,      1,  None),
                 ('a2',   100,   True,  0,      None,  None),
                 ('f2',   1304,   True, 1300,    1330,  None),
                 ('l2',   5,   True,  0,      50,  None),
示例#57
0
    g2 = gauss(x, pars['a2'].value, pars['c2'].value, pars['w2'].value)
    model = g1 + g2
    if data is None:
        return model
    return (model - data)

n    = 601
xmin = 0.
xmax = 15.0
noise = random.normal(scale=.65, size=n)
x = linspace(xmin, xmax, n)

fit_params = Parameters()
fit_params.add_many(('a1', 12.0, True, None, None, None),
                    ('c1',  5.3, True, None, None, None),
                    ('w1',  1.0, True, None, None, None),
                    ('a2',  9.1, True, None, None, None),
                    ('c2',  8.1, True, None, None, None),
                    ('w2',  2.5, True, None, None, None))

data  = residual(fit_params, x) + noise

pylab.plot(x, data, 'r+')

fit_params = Parameters()
fit_params.add_many(('a1',  8.0, True, None, 14., None),
                    ('c1',  5.0, True, None, None, None),
                    ('w1',  0.7, True, None, None, None),
                    ('a2',  3.1, True, None, None, None),
                    ('c2',  8.8, True, None, None, None))

fit_params.add('w2', expr='2.5*w1')
示例#58
0
def lame_lmfit_gaussian_centering(imageCube, yguess=15, xguess=15, subArraySize=10, init_params=None, nSig=False, useMoments=False, method='leastsq'):
    """Class methods are similar to regular functions.

    Note:
        Do not include the `self` parameter in the ``Args`` section.

    Args:
        param1: The first parameter.
        param2: The second parameter.

    Returns:
        True if successful, False otherwise.

    """
    
    imageSize  = imageCube.shape[1]
    
    nparams    = 6
    if init_params is None:
        useMoments = True
        init_params = moments(imageCube[0])
    
    ihg, iyc, ixc, iyw, ixw, ibg  = arange(nparams)
    lmfit_init_params = Parameters()
    lmfit_init_params.add_many(
        ('height'  , init_params[ihg], True  , 0.0 , inf   ),
        ('center_y', init_params[iyc], True  , 0.0 , imageSize),
        ('center_x', init_params[ixc], True  , 0.0 , imageSize),
        ('width_y' , init_params[iyw], True  , 0.0 , imageSize),
        ('width_x' , init_params[ixw], True  , 0.0 , imageSize),
        ('offset'  , init_params[ibg], True))
    
    gfit_model = Model(gaussian, independent_vars=['yy', 'xx'])
    
    yy0, xx0 = indices(imageCube[0].shape)
    
    npix   = subArraySize//2
    ylower = yguess - npix
    yupper = yguess + npix
    xlower = xguess - npix
    xupper = xguess + npix
    
    ylower, xlower, yupper, xupper = int32([ylower, xlower, yupper, xupper])
    
    yy = yy0[ylower:yupper, xlower:xupper]
    xx = xx0[ylower:yupper, xlower:xupper]
    
    heights, ycenters, xcenters, ywidths, xwidths, offsets = zeros((nparams, nFrames))
    
    for k, image in enumerate(imageCube):
        subFrameNow = image[ylower:yupper, xlower:xupper]
        subFrameNow[isnan(subFrameNow)] = median(subFrameNow)
        
        subFrameNow = gaussianFilter(subFrameNow, nSig) if not isinstance(nSig, bool) else subFrameNow
        
        init_params = moments(subFrameNow) if useMoments else init_params
        
        gfit_res    = gfit_model.fit(subFrameNow, params=lmfit_init_params, xx=xx, yy=yy, method=method)
        
        heights[k]  = gfit_res.best_values['height']
        ycenters[k] = gfit_res.best_values['center_y']
        xcenters[k] = gfit_res.best_values['center_x']
        ywidths[k]  = gfit_res.best_values['width_y']
        xwidths[k]  = gfit_res.best_values['width_x']
        offsets[k]  = gfit_res.best_values['offset']
    
    return heights, ycenters, xcenters, ywidths, xwidths, offsets
示例#59
0
    if data is None:
        return model
    return model - data


n = 601
xmin = 0.0
xmax = 15.0
noise = random.normal(scale=0.65, size=n)
x = linspace(xmin, xmax, n)

fit_params = Parameters()
fit_params.add_many(
    ("a1", 12.0, True, None, None, None),
    ("c1", 5.3, True, None, None, None),
    ("w1", 1.0, True, None, None, None),
    ("a2", 9.1, True, None, None, None),
    ("c2", 8.1, True, None, None, None),
    ("w2", 2.5, True, None, None, None),
)

data = residual(fit_params, x) + noise

if HASPYLAB:
    pylab.plot(x, data, "r+")

fit_params = Parameters()
fit_params.add_many(
    ("a1", 8.0, True, None, 14.0, None),
    ("c1", 5.0, True, None, None, None),
    ("w1", 0.7, True, None, None, None),
    ("a2", 3.1, True, None, None, None),