Beispiel #1
0
def gaussian_constant_delta_chi_squared(light_curve, num_attempts=1):
    """ Compute the difference in chi-squared between a Gaussian and a straight (constant) line. """

    gaussian_chisqr = 1E6
    for ii in range(num_attempts):
        gaussian_params = Parameters()

        t0 = np.random.normal(light_curve.mjd[np.argmin(light_curve.mag)], 1.)
        if t0 > light_curve.mjd.max() or t0 < light_curve.mjd.min():
            t0 = light_curve.mjd[np.argmin(light_curve.mag)]
        gaussian_params.add('A', value=np.random.uniform(-1., -20.), min=-1E4, max=0.)
        gaussian_params.add('mu', value=t0, min=light_curve.mjd.min(), max=light_curve.mjd.max())
        gaussian_params.add('sigma', value=abs(np.random.normal(10., 2.)), min=1.)
        gaussian_params.add('B', value=np.random.normal(np.median(light_curve.mag), 0.5))

        gaussian_result = minimize(gaussian_error_func, gaussian_params, args=(light_curve.mjd, light_curve.mag, light_curve.error))

        if gaussian_result.chisqr < gaussian_chisqr:
            gaussian_chisqr = gaussian_result.chisqr

    constant_chisqr = 1E6
    for ii in range(num_attempts):
        constant_params = Parameters()
        constant_params.add('b', value=np.random.normal(np.median(light_curve.mag), 0.5))
        constant_result = minimize(constant_error_func, constant_params, args=(light_curve.mjd, light_curve.mag, light_curve.error))

        if constant_result.chisqr < constant_chisqr:
            constant_chisqr = constant_result.chisqr

    return constant_chisqr - gaussian_chisqr
Beispiel #2
0
def pp_lmfit_fitting(plan, real_perf_values, method='leastsq'):
    '''least squares or differential evolution fitting with bounds'''
    real_perf_values = np.array(real_perf_values)
    load_scale_factor = calc_pp_load_scale_factor(plan)
    perf_scale_factor = calc_pp_perf_scale_factor(real_perf_values)
    scaled_plan = list(map(lambda l: load_scale_factor * l, plan))
    scaled_perfs = list(map(lambda p: perf_scale_factor * p, real_perf_values))
    args = (scaled_plan,
            np.array(scaled_perfs),
            calc_residuals,
            unpack_pp_lmfit_parms,
            pp_performance_over_time2)
    params = lmfit.Parameters()
    params.add(name='perfpot', value=0.5, min=0, max=1)
    params.add(name='straindelay', value=4.0, min=0.001, max=30)
    params.add(name='responsedelay', value=2.0, min=0.001, max=30)
    params.add(name='overflowdelay', value=15, min=0.001, max=30)
    lmfit.minimize(objective_f, params, method=method, args=args)
    model_perfs = pp_performance_over_time(scaled_plan,
                                           0.0,
                                           0.0,
                                           params['perfpot'],
                                           params['straindelay'],
                                           params['responsedelay'],
                                           params['overflowdelay'])
    model_perfs = filter_model_perfs_2_real_perfs(model_perfs, real_perf_values)
    scaled_perfs = list(filter(lambda x: x > 0.0, scaled_perfs))
    assert(len(model_perfs) == len(scaled_perfs))
    rmse = calc_rmse(scaled_perfs, model_perfs)
    return (((params['perfpot'].value,
            params['straindelay'].value,
            params['responsedelay'].value,
            params['overflowdelay'].value), rmse),
            load_scale_factor,
            perf_scale_factor)
Beispiel #3
0
def do_lmfit(data, params, B=None, errs=None, dojac=True):
    """
    Fit the model to the data
    data may contain 'flagged' or 'masked' data with the value of np.NaN
    input: data - pixel information
           params - and lmfit.Model instance
    return: fit results, modified model
    """
    # copy the params so as not to change the initial conditions
    # in case we want to use them elsewhere
    params = copy.deepcopy(params)
    data = np.array(data)
    mask = np.where(np.isfinite(data))

    def residual(params, **kwargs):
        f = ntwodgaussian_lmfit(params)  # A function describing the model
        model = f(*mask)  # The actual model
        if B is None:
            return model-data[mask]
        else:
            return (model - data[mask]).dot(B)

    if dojac:
        result = lmfit.minimize(residual, params, kws={'x':mask[0],'y':mask[1],'B':B,'errs':errs}, Dfun = jacobian)
    else:
        result = lmfit.minimize(residual, params, kws={'x':mask[0],'y':mask[1],'B':B,'errs':errs})

    # Remake the residual so that it is once again (model - data)
    if B is not None:
        result.residual = result.residual.dot(inv(B))
    return result, params
Beispiel #4
0
def psd_fit_errors(f, psd, white=1., ftol=1e-8):
    log_Pj = log(psd)
    params = lmfit.Parameters()
    params.add("f_knee", value=0.1, min=0.0, max=25.)
    params.add("log_white", value=log(white), vary=True)
    params.add("alpha", value=2., min=0.0, max=20.)
    out = lmfit.minimize(_residuals, params, args=(f, log_Pj), ftol=ftol, scale_covar=True)
    fk = params["f_knee"].value
    log_w = params["log_white"].value
    alpha = params["alpha"].value
    fkstd = params["f_knee"].stderr
    log_wstd = params["log_white"].stderr
    alphastd = params["alpha"].stderr
    if (fk>10.) or (alpha>10.):
        params = lmfit.Parameters()
        params.add("log_white", value=log(white), vary=True)
        out = lmfit.minimize(_residuals_white, params, args=(f, log_Pj))
        log_w = params["log_white"].value
        fk = 0.
        alpha = 0.
        fkstd = 0.
        alphastd = 0.
        log_wstd = params["log_white"].stderr
    w = exp(log_w)
    wstd = abs(log_wstd*w)
    return fk, w, alpha, fkstd, wstd, alphastd
def isotropic(filename, v0, x0, rho, g=9.81):
    try:
        results = read_results_file(filename)
        Volume_array_normalized = v0 - results['External_volume']
        spring_position_array_normalized = x0 - results['Ylow']
        params = Parameters()
        params.add('A', value=1)
        params.add('B', value=0)
        try:
            result = minimize(residual_isotropic, params, args=(spring_position_array_normalized, Volume_array_normalized))
        except TypeError:
            result = minimize(residual_isotropic, params, args=(spring_position_array_normalized, Volume_array_normalized),method="nelder")
        v = result.params.valuesdict()
        x_th = np.arange(np.amin(Volume_array_normalized), np.amax(Volume_array_normalized), 0.0000001)
        y_th = v['A'] * x_th + v['B']
        # report_fit(result.params, min_correl=0.5)
        filename_list = filename.split("\\")
        txt = filename_list[-1].split("_")
        txt = "_".join(txt[0:-1])
        k = -rho * g / v['A']
        print "Stiffness: " + str(k)
        list_results = [txt, k, v['B']]
        return list_results
    except:
        return None
def test_bounded_jacobian():
    pars = Parameters()
    pars.add('x0', value=2.0)
    pars.add('x1', value=2.0, min=1.5)

    global jac_count

    jac_count = 0

    def resid(params):
        x0 = params['x0']
        x1 = params['x1']
        return np.array([10 * (x1 - x0*x0), 1-x0])

    def jac(params):
        global jac_count
        jac_count += 1
        x0 = params['x0']
        return np.array([[-20*x0, 10], [-1, 0]])

    out0 = minimize(resid, pars, Dfun=None)

    assert_paramval(out0.params['x0'], 1.2243, tol=0.02)
    assert_paramval(out0.params['x1'], 1.5000, tol=0.02)
    assert(jac_count == 0)

    out1 = minimize(resid, pars, Dfun=jac)

    assert_paramval(out1.params['x0'], 1.2243, tol=0.02)
    assert_paramval(out1.params['x1'], 1.5000, tol=0.02)
    assert(jac_count > 5)
Beispiel #7
0
def ff_lmfit_fitting(plan, real_perf_values, method='leastsq'):
    '''least squares or differential evolution fitting with bounds'''
    real_perf_values = np.array(real_perf_values)
    args = (plan,
            real_perf_values,
            calc_residuals,
            unpack_ff_lmfit_parms,
            ff_performance_over_time2)
    params = lmfit.Parameters()
    params.add(name='initial_p',
               value=real_perf_values[0],
               min=0,
               max=max(real_perf_values))
    params.add(name='k_1', value=1.0, min=0.01, max=5.0)
    params.add(name='tau_1', value=30.0, min=1.00, max=70.0)
    params.add(name='k_2', value=1.0, min=0.01, max=5.0)
    params.add(name='tau_2', value=15.0, min=1.00, max=70.0)
    lmfit.minimize(objective_f, params, method=method, args=args)
    model_perfs = ff_performance_over_time(plan,
                                           params['initial_p'],
                                           params['k_1'],
                                           params['tau_1'],
                                           params['k_2'],
                                           params['tau_2'])
    model_perfs = filter_model_perfs_2_real_perfs(model_perfs, real_perf_values)
    real_perf_values = list(filter(lambda x: x > 0.0, real_perf_values))
    assert(len(model_perfs) == len(real_perf_values))
    rmse = calc_rmse(real_perf_values, model_perfs)
    return (params['initial_p'].value,
            params['k_1'].value,
            params['tau_1'].value,
            params['k_2'].value,
            params['tau_2'].value), rmse
Beispiel #8
0
def cross_validate(model, fitData, pars, outf, folds=5):
    residual1 = partial(residual, model)
    train_errors = []
    test_errors = []
    test_rms = []
    for i, (train_index, test_index) in enumerate(KFold(len(fitData),
                                                        n_folds=folds,
                                                        shuffle=True)):
        trainData = [fitData[x] for x in train_index]
        testData = [fitData[x] for x in test_index]

        fit_result = minimize(residual1, pars, args=([], trainData, []))
        train_errors.append(np.abs(fit_result.residual).mean())
        

        errors = [model(d, pars) - d["exc"] for d in testData]
        test_errors.append(np.abs(errors).mean())
        test_rms.append(  LA.norm(errors)/math.sqrt(len(errors)) )
    outf.write('Train '+ repr(np.mean(train_errors)) + ' +- ' + 
          repr(np.std(train_errors)) + 
          ' Test  '+ repr(np.mean(test_errors)) + ' +- ' + 
          repr(np.std(test_errors))+ '\n')
    outf.write('Test RMS  '+ repr(np.mean(test_rms)) + ' +- ' + 
          repr(np.std(test_rms))+ '\n')
    print "Train:", np.mean(train_errors),"+-",np.std(train_errors), \
        "Test:", np.mean(test_errors),"+-",np.std(test_errors)
    print "TestRMS:", np.mean(test_rms),"+-",np.std(test_rms)
    fit_result = minimize(residual1, pars, args=([], fitData, []))
    return fit_result
Beispiel #9
0
    def best_fit(self, params=None):
        """
        Calculates the best fit model by minimizing over the parameters:
        - spline fitting to the continuum
        - rotational broadening
        """
        if params is None:
            params = lmfit.Parameters()

        # Rotational broadening parameters
        params.add('vsini', value=1.0, min=0.0, max=10.0)

        # Spline parameters
        params = add_spline_positions(params, self.knot_x)

        # Perform fit
        if self.opt == 'lm':
            out = lmfit.minimize(self.objective, params)
            self.best_chisq = np.sum(self.objective(out.params)**2)
        elif self.opt == 'nelder':
            out = lmfit.minimize(self.objective, params, method='nelder')
            self.best_chisq = self.objective(out.params)

        self.best_params = out.params

        return self.best_chisq
Beispiel #10
0
    def optimize_density_and_scaling(self, density_min, density_max, bkg_min, bkg_max, iterations,
                                      callback_fcn = None, output_txt=None):
        params = Parameters()
        params.add("density", value=self.density, min=density_min, max=density_max)
        params.add("background_scaling", value=self.background_scaling, min=bkg_min, max=bkg_max)

        self.iteration = 0

        def optimization_fcn(params):
            density = params['density'].value
            background_scaling = params['background_scaling'].value

            self.background_spectrum.scaling = background_scaling
            self.calculate_spectra()
            self.optimize_sq(iterations,fcn_callback=callback_fcn)

            r, fr = self.limit_spectrum(self.fr_spectrum, 0, self.r_cutoff).data

            output = (-fr - 4 * np.pi * convert_density_to_atoms_per_cubic_angstrom(self.composition, density) *
                      r) ** 2

            self.write_output(u'{} X: {:.3f} Den: {:.3f}'.format(self.iteration, np.sum(output)/(r[1]-r[0]), density))
            self.iteration+=1
            return output

        minimize(optimization_fcn, params)
        self.write_fit_results(params)
    def __init__(self, relations):
        self.data = relations.to_vector()
	# parameters are used by lmfit.minimizer.minimize which is based on
        # scipy.optimize; each one is a tuple consisting of
        # name, value, vary, min, max, expr
        self.params = Parameters()
	# sets initial values for parameters
        self.params.add_many(
            ('before_dist_1_beginning_dist_2_beginning', 0.5, True, 0.0, 1.0, None),
            ('similarity_dist_1_beginning_dist_2_beginning', 0.5, True, 0.0, 1.0, None),
            ('after_dist_1_beginning_dist_2_beginning', None, False, None, None,
             '1 - before_dist_1_beginning_dist_2_beginning'),
            ('before_dist_1_beginning_dist_2_ending', 0.5, True, 0.0, 1.0, None),
            ('similarity_dist_1_beginning_dist_2_ending', 0.5, True, 0.0, 1.0, None),
            ('after_dist_1_beginning_dist_2_ending', None, False, None, None,
             '1 - before_dist_1_beginning_dist_2_ending'),
            ('before_dist_1_ending_dist_2_beginning', 0.5, True, 0.0, 1.0, None),
            ('similarity_dist_1_ending_dist_2_beginning', 0.5, True, 0.0, 1.0, None),
            ('after_dist_1_ending_dist_2_beginning', None, False, None, None,
             '1 - before_dist_1_ending_dist_2_beginning'),
            ('before_dist_1_ending_dist_2_ending', 0.5, True, 0.0, 1.0, None),
            ('similarity_dist_1_ending_dist_2_ending', 0.5, True, 0.0, 1.0, None),
            ('after_dist_1_ending_dist_2_ending', None, False, None, None,
             '1 - before_dist_1_ending_dist_2_ending')
        )
	
	# minimizes the value of the paramters using the fitness function
        minimize(self.fitness, self.params)

        for param_key in self.params:
            self.params[param_key].value = round(self.params[param_key].value, 6)
Beispiel #12
0
def optimize(rawdata, mode):

    x=np.array([rawdata[0],rawdata[2]])
    data=np.array([rawdata[1],rawdata[3]])
    seq=rawdata[1]
    
    #Single Impulse
    if mode == 1:
        [lambdaval,deltaval]=singleImpulse(rawdata[1],10)
        if deltaval > -1:
            deltaval=rawdata[0][deltaval]
    #Multi Impulse Begin
    if mode == 0:
        [lambdaval, deltaval, peaks] = multiImpulse(rawdata[1], 10)
        for i in deltaval:
            if i > -1:
                i = rawdata[0][i]
    #Multi Impulse End
    Gmin=int(max(data[0]))
    ninit=data[0][0]
    params=Parameters()
    if mode < 3:
        params.add('alpha',value=0.3,min=0.0)
    params.add('beta',value=0.05,min=0.0)
    if mode < 4:
        params.add('gamma',value=0.05,min=0.0,max=1.0)
    params.add('G',value=2*Gmin,min=Gmin,max=100000)
    params.add('n',value=ninit,vary=False)
    
    #SpikeM
    if mode == 5:
        params.add('eps', value=0.0, min=0.0)

    #Single Impulse
    if mode == 1:
        params.add('lambdaval',value=lambdaval,min=0,max=lambdaval+1)
        params.add('delta',value=deltaval,vary=False)
    
    #Multi Impulse Begin
    if mode == 0:
        if peaks > 5:
            print 'Too many peaks.(' + str(peaks) + ')'
        for i in range(peaks):
            params.add('lambdaval'+str(i), value=lambdaval[i], min=0, max=lambdaval[i]+1)
            params.add('delta'+str(i), value=deltaval[i], vary=False)
    #Multi Impulse End
    #print x
    #print data
    
    #Single Impulse
    if mode > 0:
        result=minimize(fcn2minSingleImpulse,params,args=(x,data,mode))
    else:
    #Multi Impulse
        result=minimize(fcn2minMultiImpulse,params,args=(x,data,peaks))
    return result
    def main(self, print_to_screen):
        """ Loop over all our A-Ci measured curves and fit the Farquhar model
        parameters to this data 
        
        Parameters
        ----------
        print_to_screen : logical
            print fitting result to screen? Default is no!
        """
        all_data = self.read_data(self.infname, infile_type="meas")
        fp = self.open_output_files(self.ofname)
        wr = self.write_file_hdr(fp, self.header)

        # Loop over all the measured data and fit the model params.
        for id in np.unique(all_data["fitgroup"]):
            data = all_data[np.where(all_data["fitgroup"] == id)]

            # Fit Jmax vs T first
            params = self.setup_model_params(peaked=self.peaked)
            result = minimize(self.residual, params, engine="leastsq", args=(data, data["Jmax"]))
            if print_to_screen:
                self.print_fit_to_screen(result)

            # Did we resolve the error bars during the fit? If yes then
            # move onto the next A-Ci curve
            if result.errorbars:
                self.succes_count += 1

            (peak_fit) = self.forward_run(result, data)
            if self.peaked:
                Topt = self.calc_Topt(result.params["Hd"].value, result.params["Ea"].value, result.params["delS"].value)
            else:
                Topt = -9999.9  # not calculated

            self.report_fits(wr, result, data, data["Jmax"], peak_fit, "Jmax", Topt, id)

            # Fit Vcmax vs T next
            params = self.setup_model_params(peaked=self.peaked)
            result = minimize(self.residual, params, engine="leastsq", args=(data, data["Vcmax"]))
            if print_to_screen:
                self.print_fit_to_screen(result)

            # Did we resolve the error bars during the fit? If yes then
            # move onto the next A-Ci curve
            if result.errorbars:
                self.succes_count += 1

            (peak_fit) = self.forward_run(result, data)
            if self.peaked:
                Topt = self.calc_Topt(result.params["Hd"].value, result.params["Ea"].value, result.params["delS"].value)
            else:
                Topt = -9999.9  # not calculated

            self.report_fits(wr, result, data, data["Vcmax"], peak_fit, "Vcmax", Topt, id)

        fp.close()
Beispiel #14
0
def fit(P, model, x, z, data, weights=None, fit_range=None, redchi_marker=None):
	pars = copy_params(P, False)
	fix_params(pars, {'nD':1., 'nB':None, 'ReB':None, 'MeB':None})
	exp_out = lm.minimize(residual, P, args=(sersic, x, z, data, weights, [10, x[-1]]))

	pars = copy_params(exp_out.params, False)
	fix_params(pars, {'nD':1.})
	out = lm.minimize(residual, P, args=(sersic2, x, z, data, weights, fit_range))
	if redchi_marker is not None:
		# total = sersic2(out.params, x, z, data, weights, fit_range, False)
		res_excl = residual(out.params, sersic2, x, z, data, weights, [0, redchi_marker])
		return out, res_excl
	else: return out
Beispiel #15
0
def fit_it(params, args, method='nelder', kwargs=None):
    """ Carries out the fit.

    Parameters
    ----------
    params : lmfit.Parameters() instance
        Call load_params to generate.
    args : tuple
        Arguments to pass to the function to minimize. Must contain a
        wavelength array as first argument, optional second and third argument
        will be interpreted as data and errors, respectively.
        arrays are optional.
    kwargs : tuple
        keyword arguments, will be passed directly to the lmfit.minimize()
        function. See lmfit docs for options.

    Returns
    -------
    result : lmfit.Minimizer() object
    """
    # For testing:
    x = args[0]
    data = args[1]
    errs = args[2]
    earlymodel = build_model(params, x)
    # Now: fitting.
    print params
    try:
        result = lf.minimize(build_model, params, args=args, method=method)
        result = lf.minimize(build_model, params, args=args, method=method)
        result = lf.minimize(build_model, params, args=args, method='lestsq')
    except:
        result = lf.minimize(build_model, params, args=args, method=method)
        result = lf.minimize(build_model, params, args=args, method='leastsq')
    lf.report_errors(params)
    # Now: moar plots
    latemodel = build_model(result.params, x)
    #import matplotlib.pyplot as plt
    #plt.clf()
    #plt.errorbar(x, data, yerr=errs, color='black', label='Data', lw=2.)
    #plt.axhline(y=0., color='black')
    #plt.plot(x, earlymodel, lw=1.6, label='Guess', color='green')
    #plt.plot(x, latemodel, lw=1.6, label='Fit', color='orange')
    #plt.legend(fancybox=True, shadow=True)
    #plt.grid()
    #plt.title('Plot of initial guess and LMfit best fit.')
    #plt.xlabel(u'Wavelegth')
    #plt.ylabel('Flux')
    #plt.show()

    return result
def polar_plot(dataset, fig_name=None, polar=True, ax=None,
               reset_scaling=False,
               fit_mask=np.ones(16, dtype=bool)):
    if ax is None:
        fig = plt.figure(fig_name)
        fig.clf()
        ax = fig.add_subplot(111, polar=polar)
    else:
        fig = ax.figure
    phi = np.linspace(0, 2 * np.pi, 16, endpoint=False)
    phi_line = np.linspace(0, 2 * np.pi, 2**10)

    if reset_scaling:
        det_factors = dataset.det_factors.copy()
        dataset.det_factors = np.ones_like(det_factors)

    auger = dataset.auger_amplitudes.mean(axis=0)
    photo = dataset.photo_amplitudes.mean(axis=0)

    if reset_scaling:
        det_calib = auger.max() / auger
        ax.plot(phi, auger, 'gx')
        auger *= det_calib

        ax.plot(phi, photo, 'rx')
        photo *= det_calib

    ax.plot(phi, auger, 'gs', label='auger')
    ax.plot(phi, photo, 'ro', label='photo')

    params = cookie_box.initial_params(photo)
    params['beta'].vary = False
    params['beta'].value = 2
    lmfit.minimize(cookie_box.model_function, params,
                   args=(phi[fit_mask], photo[fit_mask]))
    lmfit.report_fit(params)

    ax.plot(phi_line, cookie_box.model_function(params, phi_line),
            '-m', label='{:.1f} % lin {:.1f} deg'.format(
            params['linear'].value*100,
            np.rad2deg(params['tilt'].value)))

    ax.grid(True)
    ax.legend(loc='center', bbox_to_anchor=(0, 0), fontsize='medium')
    plt.tight_layout()

    if reset_scaling:
        dataset.det_factors = det_factors

    return fig
def get_bestd(wb, peak_idx, initial, Dguess=-12):
    """Returns best-fit initial and isotropic D """
    x, y = wb.xy_picker(peak_idx=peak_idx, wholeblock=False, heights_instead=False)
    L3 = []
    D3 = []
    for k in range(3):
        L3.append(wb.profiles[k].len_microns)
        D3.append(Dguess)
    # best fit initial and corresponding D
    params = diffusion.params_setup3D(L3, D3, wb.time_seconds, initial=initial,
                                      isotropic=True, vinit=False)
    minimize(func2min, params, args=(x, y), kws={'raypaths' : wb.raypaths, 'show_plot' : False})
    bestD = params['log10Dx'].value
    return bestD
Beispiel #18
0
 def fit(self,initial,k,va,vb,vi):
     if k==1:
         self.result = minimize(self.residual,initial) #ftol=1e-6
     else:
         minimize(self.residual,initial)
         A = initial['A'].value
         B = initial['B'].value
         alpha = initial['alpha'].value
         beta = initial['beta'].value
         N_white = initial['N_white'].value
         fc = initial['fc'].value
         i = initial['i'].value
         p2 = self.guess(A,B,alpha,beta,fc,i,N_white=N_white,va=va,vb=vb,vi=vi)
         self.result = minimize(self.residual,p2)
Beispiel #19
0
def test_simple():
    # create data to be fitted
    np.random.seed(1)
    x = np.linspace(0, 15, 301)
    data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
            np.random.normal(size=len(x), scale=0.2))

    # define objective function: returns the array to be minimized
    def fcn2min(params, x, data):
        """model decaying sine wave, subtract data"""
        amp = params['amp']
        shift = params['shift']
        omega = params['omega']
        decay = params['decay']

        model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
        return model - data

    # create a set of Parameters
    params = Parameters()
    params.add('amp', value=10, min=0)
    params.add('decay', value=0.1)
    params.add('shift', value=0.0, min=-pi/2., max=pi/2)
    params.add('omega', value=3.0)

    # do fit, here with leastsq model
    result = minimize(fcn2min, params, args=(x, data))

    # assert that the real parameters are found
    for para, val in zip(result.params.values(), [5, 0.025, -.1, 2]):
        check(para, val)
def herm_gauss_fitting(posn):
    y, x = posn
    spec = subcube[:, y, x].value
    spec_axis = subcube.spectral_axis.value
    chanwidth = np.abs(spec_axis[1] - spec_axis[0])

    p_gh = Parameters()
    p_gh.add('amp', value=spec.max(), vary=True)
    p_gh.add('center', value=spec_axis[spec.argmax()], min=np.min(spec_axis),
             max=np.max(spec_axis))
    p_gh.add('sig', value=30 * chanwidth, min=chanwidth, max=None)
    p_gh.add('skew', value=0, vary=True, min=None, max=None)
    p_gh.add('kurt', value=0, vary=True, min=None, max=None)

    def gausserr_gh(p, x, y):
        return gaussfunc_gh(p, x) - y

    fitout_gh = minimize(gausserr_gh, p_gh, args=(spec_axis, spec))

    fit_gh = gaussfunc_gh(fitout_gh.params, spec_axis)

    verbose = False
    if verbose:
        import matplotlib.pyplot as p
        p.plot(spec_axis, fit_gh)

    return spec_axis[fit_gh.argmax()], y, x
def fit_objective(objective, fit_params, x, data, params, pde=False, **kwargs):
    # check for fit method
    if 'method' not in kwargs:
        kwargs['method'] = 'least_squares'

    return lmfit.minimize(objective, fit_params,
                          args=(x, data, params, pde), **kwargs)
Beispiel #22
0
def fit_truncated(profile, infoDF, fit_result, break_bounds, break_R, fix_brk=False):
	R, I, W = profile.R.values, profile.M.values, profile.M_err_down.values
	bulge, _ = F.sersic(fit_result, infoDF.zp, R, comp=True)
	I = F.convert_mag(F.convert_I(I, infoDF.zp) - bulge, infoDF.zp)
	mask = (~np.isnan(I)) & (R > break_bounds[0])
	R = R[mask]
	I = I[mask]
	W = W[mask]
	res = lambda P, x,y,w,z: (y - trunc_mod(P, x, z)) / w
	
	P = lm.Parameters()
	hmin, hmax = 0.01, 20.0
	mumin, mumax = 14.0, 30.0

	P.add('mu02', value=25., vary=True, min=mumin, max=mumax)
	P.add('h2', value= 5., min=hmin, max=hmax)
	P.add('Rbr', value = break_R, vary=not fix_brk, min=break_bounds[1], max=break_bounds[2])
	P.add('deltah', value=1./300, max=(1./hmin)-(1./hmax), min=(-1./hmin)+(1./hmax))
	P.add('deltamu', expr='1.086 * Rbr * deltah')
	P.add('invh1', expr='(1./h2) + deltah')
	P.add('mu01', expr='mu02 - deltamu')
	result = lm.minimize(res, P, args=(R, I, W, infoDF.zp))
	innerresult = [P['mu01'].value, 1./P['invh1'].value]
	outerresult = [P['mu02'].value, P['h2'].value]
	# lm.report_fit(result.params, show_correl=False)
	return np.array([innerresult, outerresult]), P['Rbr'].value
Beispiel #23
0
 def fit(self, initial):
     """
     Fit using the data and model given at
     instantiation. Parameter initial is a Parameters object
     containing initial values. It is modified by lmfit.
     """
     self.result = lmfit.minimize(self.residual, initial, method=self.method)
Beispiel #24
0
    def fit(self, data_x, data_y, data_dy, width=None):
        """
        Fit peaks in the data, returns x_axis points, baseline (background) 
        and fit (peaks) data points. The parameters of the fit (peaks parameters)
        can be extracted from params variable.

        """
        self._initialize(data_x, data_y)
        if width is not None:
            self._restrict_width(width[0], width[1])
        result = minimize(self.residual, self.params, 
                          args=(data_x, data_y, data_dy))
        self.params = result.params

        x = numpy.linspace(data_x[0], data_x[-1], 1000)
        y0 = self.fit_func(self.params, x)

        if self.baseline == 'linear':
            yb = self._linear(self.params, data_x)
        elif self.baseline == 'quadratic':
            yb = self._quadratic(self.params, data_x)

        functions = {'x_axis' : x, 'baseline': yb, 'fit': y0}

        return functions
Beispiel #25
0
def fit_microlensing_event(light_curve, initial_params={}):
    """ Fit a microlensing event to the light curve """

    t0 = np.random.normal(light_curve.mjd[np.argmin(light_curve.mag)], 2.)
    if t0 > light_curve.mjd.max() or t0 < light_curve.mjd.min():
        t0 = light_curve.mjd[np.argmin(light_curve.mag)]

    #initial_tE = initial_params.get("tE", 10**np.random.uniform(1., 2.5))
    initial_tE = initial_params.get("tE", np.random.uniform(2, 500))
    initial_t0 = initial_params.get("t0", t0)
    #initial_u0 = initial_params.get("u0", np.random.uniform(1E-6, 1.33))
    initial_u0 = initial_params.get("u0", 10**np.random.uniform(-3, 0.12))
    initial_m0 = initial_params.get("m0", np.random.normal(np.median(light_curve.mag), 0.5))

    params = Parameters()
    params.add('tE', value=initial_tE, min=2., max=1000.)
    params.add('t0', value=initial_t0, min=light_curve.mjd.min(), max=light_curve.mjd.max())
    params.add('u0', value=initial_u0, min=0.0, max=1.34)
    params.add('m0', value=initial_m0)

    result = minimize(microlensing_error_func, params, args=(light_curve.mjd, light_curve.mag, light_curve.error))

    return {"tE" : params["tE"], \
            "t0" : params["t0"], \
            "u0" : params["u0"], \
            "m0" : params["m0"], \
            "result" : result}
Beispiel #26
0
 def fit_lattice(self):
   """
   Start a non-linear least squared fit for lattice parameters.
   """
   self.lattice = minimize(self._residual_lattice, self.latt_par)    
   self.qx = self._q_x()
   self.qz = self._q_z()
Beispiel #27
0
def NIST_Test(DataSet, method='leastsq', start='start2', plot=True):

    NISTdata = ReadNistData(DataSet)
    resid, npar, dimx = Models[DataSet]
    y = NISTdata['y']
    x = NISTdata['x']

    params = Parameters()
    for i in range(npar):
        pname = 'b%i' % (i+1)
        cval  = NISTdata['cert_values'][i]
        cerr  = NISTdata['cert_stderr'][i]
        pval1 = NISTdata[start][i]
        params.add(pname, value=pval1)


    myfit = minimize(resid, params, method=method, args=(x,), kws={'y':y})


    digs = Compare_NIST_Results(DataSet, myfit, params, NISTdata)

    if plot and HASPYLAB:
        fit = -resid(params, x, )
        pylab.plot(x, y, 'ro')
        pylab.plot(x, fit, 'k+-')
        pylab.show()

    return digs > 2
def fit_circle(x, y, xc=0.0, yc=0.0):
    """
    Fit a circle to the shape of an arc with coordinates x, y

    Optionally provide initial guesses for the circle parameters: 
    xc, yc, Rc
    """
    params = lmfit.Parameters()
    params.add("xc", value=xc)
    params.add("yc", value=yc)
    lmfit.minimize(model_minus_data, params, args=(x, y))
    lmfit.report_errors(params)
    xc = params["xc"].value
    yc = params["yc"].value
    Rc = Rc_from_data(x, y, xc, yc)
    return Rc, xc, yc
def parab_select(mag, std, sel, it):
    std_aux = std[:]

    from lmfit import minimize, Parameters, Parameter, report_fit

    params = Parameters()
    params.add('a', value= 0.003)#, min=0.003, max = 0.015)
    params.add('b', value= 1e-12)#, min =1.0e-12, max=1e-7)
    params.add('c', value= 0.5)#,  min =0.3, max =0.7)
    params.add('d', value= 1e-9)#, min =1e-11, max=1e-7)
    params.add('e', value= 1.2)#,  min =0.8, max =1.6)

    out = minimize(residual, params, args=(mag[std < 999999], std[std < 999999]))
    res = residual(params, mag, std)
    sel[abs(res)>3.0*np.std(residual(params, mag[std < 999999], std[std < 999999]))] = False

    fig = plt.figure()
    fig.clf()
    ax = fig.add_subplot(1,1,1)
    ax.errorbar(mag[sel], std[sel], fmt='ok', mec='k', markersize=4, label='Selected stars')
    ax.errorbar(mag[~sel], std_aux[~sel],fmt='or', mec='r', markersize=4, label='Discarded stars')
    mag_fit = np.linspace(np.sort(mag)[0], np.sort(mag)[-1], 500)
    ax.errorbar(mag_fit, residual(params, mag_fit), label='Fitted error',fmt='-b')
    ax.set_xlabel(r'$\overline{m}$ (mag)')
    ax.set_ylabel(r'$\sigma$')
    ax.set_ylim((std[std<9999].min(), std[std<9999].max()))
    ax.set_yscale('log')
    fig.savefig(param['output_path'] + '/RMSvsMAG/ref_stars_selection_{}.eps'.format(it), bbox_inches='tight', pad_inches=0.05)
    plt.close(fig)
    return sel
Beispiel #30
0
def ruby_fit(data_x, data_y):
	""" return: fitted data y, fitted parameters """
	param_init = ruby_init(data_x,data_y)
	result = minimize(objective, param_init, args=(data_y,data_x, "ruby"))
	y = ruby_model(result.params,data_x)

	return result.params, y
def schoolfield_low(Subset, Temp, Trait, n):
    """
     Low temperature inactivation reduced schoolfield model 
     (Mechanistic)

     Optimises paramter values using minimize from lmfit package

     Calls Functions:
     fit_measure(resid_func, out, Temp, Trait)
     calc_AICc(out, Temp)
     
     Returns a list of:
     Optimised Parameters: B0, E, El, Tl
     BIC
     AIC
     AICc
     Rsquared
     adjusted Rsquared
     """

    # variable values
    # Temp = np.array(Subset.ConTemp_K)
    # Trait = np.array(Subset.OriginalTraitValue)

    # estimated parameters - can change
    B0 = np.array(Subset.B0)[0]
    E = np.array(Subset.E)[0]
    El = np.array(Subset.El)[0]
    Tl = np.array(Subset.Tl)[0]

    # estimated params - cannot change
    B0_orig = B0
    E_orig = E
    El_orig = El
    Tl_orig = Tl

    # temp peak - using as a bound
    Tpeak = np.array(Subset.Tpeak)[0]

    # an initial bestfit list with an arbitarily large AIC
    #         [B0, E, El, Tl, BIC, AIC]
    bestfit = [0, 0, 0, 0, 0, 100000, 0]

    # DNC - Did Not Converge flag
    # this ensures the above "best" does not get returned if none converge
    DNC = True
    #.............................................................................
    # repeat multiple times to get the best converge
    for i in range(n):
        # this try and except block handles error (being our estimated params dont converge)
        # this ensures the code runs for n times without stoppign even if its hits an error
        try:
            if i != 0:
                B0 = np.random.normal(B0_orig)
                E = abs(np.random.normal(E_orig))
                El = abs(np.random.normal(El_orig))
                Tl = np.random.normal(Tl_orig)

            # create dictinary of parameters. Can modify attributes of each.
            params = Parameters()
            # add with tuples:(NAME, VALUE, VARY, MIN, MAX, EXPR, BRUTE_STEP)
            params.add_many(("B0", B0, True, 0, 10, None, None),
                            ("E", E, True, 0, 3, None, None),
                            ("El", El, True, 0, 3, None, None),
                            ("Tl", Tl, True, 270, Tpeak, None, None))
            # minimize residuals
            out = minimize(school_low_resids, params, args=(Temp, Trait))
            #...............................................................
            # write error report
            #A = report_fit(out.params)
            #..............................................................
            #...............................................................
            ## store results of best fit (based on aic score)
            if out.aic < bestfit[5]:
                # if try gets to this point, it has converged at least once
                DNC = False
                # calculate goodness of fit measures
                goodness_of_fit = fit_measure(school_low_resids, out, Temp,
                                              Trait)
                # calculate AICc
                AICc = calc_AICc(out, Temp)
                # bestfit takes final params and measures of fit
                bestfit = [
                    out.params["B0"].value, out.params["E"].value,
                    out.params["El"].value, out.params["Tl"].value, out.bic,
                    out.aic, AICc
                ]
                # merge best fit and goodness fo fit
                bestfit = bestfit + goodness_of_fit
            # calculate final result to test plot
            #final = Trait + out.residual
        except Exception as e:
            pass
            #print(e)
        #except IOError:
        #pass

    # print(final)
    # print(bestfit)
    # plt.plot(Temp, Trait, 'o')
    # plt.plot(Temp, final, 'r')
    # plt.show()

    if not DNC:
        return bestfit
    else:
        return None
Beispiel #32
0
def profile_fit(snr, kevs, prfs, epss, r_prfs, mu, eta2=None, B0=None,
    r_trans=0, amp=1, multishift=False, ab_fit=None, mu_free=False, eta2_free=False,
    B0_free=True, rminarc_f = 1.2, model_kws=None, **lmfit_kws):
    """Fit measured thin rim profiles to model profiles, with single
    translation and amplitude shift for profiles at all energies
    (yet another convenience wrapper...)

    The model_kws 'get_prfs', 'irad_adapt', 'rminarc' are set/overriden so that
    profile fitting will work correctly.

    Inputs:
        snr, kevs as usual
        prfs, epss, r_prfs are lists of profiles, errors, r_grids
        mu, B0, eta2 (float) initial guesses
        mu_free, eta2_free (bool) which parameters shall vary in fits?
        ab_fit (float) if specified, allow ab to be a fit parameter
            (if you don't want it to vary, specify via model_kws instead)
            (BUT, enable idamp=True for this to work)
        **lmfit_kws takes extra kwargs for lmfit.minimize (includes lsq kws)

        model_kws={'icut':True, 'verbose':True, 'rminarc':60, ...}
    Output:
        lmfit.Minimizer with fit information / parameters
    """
    assert len(kevs) == len(prfs) == len(epss) == len(r_prfs)

    if model_kws is None:
        model_kws = {}
    model_kws['get_prfs'] = True
    model_kws['irad_adapt'] = False
    model_kws['get_fwhms'] = False
    model_kws['get_data'] = False

    # require rminarc > r_prfs at all energies, default safety factor 1.2
    rminarc = []
    for r_prf in r_prfs:
        rminarc.append(max(r_prf) - min(r_prf))
    model_kws['rminarc'] = rminarc_f * np.array(rminarc)

    p = lmfit.Parameters()
    for pstr in ['mu', 'B0', 'eta2']:
        pval = locals()[pstr]
        if pval is None:
            pval = snr.par_init[pstr]
        p.add(pstr, value=pval, vary=locals()[pstr+'_free'],
              min=snr.par_lims[pstr][0], max=snr.par_lims[pstr][1])

    if ab_fit is not None:
        p.add('ab', value=ab_fit, vary=True, min=0, max=1e3)

    if multishift:
        assert len(kevs) == len(r_trans) == len(amp)
        for n in xrange(len(amp)):
            p.add('r_trans_{:d}'.format(n), value=r_trans[n], vary=True)
            p.add('amp_{:d}'.format(n), value=amp[n], vary=True)
    else:
        p.add('r_trans', value=r_trans, vary=True)
        p.add('amp', value=amp, min=0, vary=True)

    res = lmfit.minimize(prf_objective_func, p,
                         args=(prfs, epss, r_prfs, multishift, kevs, snr),
                         kws=model_kws, **lmfit_kws)
    return res
Beispiel #33
0
def fitbandy(t,
             cf,
             err=None,
             mode='standard',
             modes=1,
             init={},
             fix=None,
             doplot=False,
             marker='o',
             qv=None,
             h_plot=None,
             ax=None,
             output='pars',
             xl=None,
             ylim=None,
             color=None):

    #make initial guess for parameters
    for i in range(modes):
        for s in 'tgb':
            vn = s + '{}'.format(i)
            if vn not in init.keys():
                if s == 't':
                    t0 = np.percentile(t, 100 / (modes + 1) * (i + 1))
                    init[vn] = (t0, t0 / 100, t0 * 100)
                elif s == 'g':
                    init[vn] = (1, .2, 1.8)
                elif s == 'b':
                    init[vn] = (.1, 0, 1)
    if 'a' not in init.keys():
        init['a'] = (0., 0., .2)

    #initialize parameters
    pars = lmfit.Parameters()
    for i in range(modes):
        for s in 'tgb':
            vn = s + '{}'.format(i)
            pars.add(vn,
                     value=init[vn][0],
                     min=init[vn][1],
                     max=init[vn][2],
                     vary=1)
    pars.add('a',
             value=init['a'][0],
             min=init['a'][1],
             max=init['a'][2],
             vary=0)

    if 'off' in mode:
        pars['a'].set(vary=1)

    if fix is not None:
        for vn in fix.keys():
            pars[vn].set(value=fix[vn], vary=0)

    if err is not None:
        wgt = 1. / err
    else:
        wgt = 1. / t

    if 'semilogx' in mode:
        if err is not None:
            wgt = 1. / np.log10(err)
        else:
            wgt = 1. / np.log10(t)

    # define residual function
    def res(pars, x, data=None, eps=None):
        """2D Residual function to minimize
        """
        v = pars.valuesdict()
        for i in range(modes):
            if i == 0:
                model = V2a(x,
                            t=v['t{}'.format(i)],
                            b=v['b{}'.format(i)],
                            g=v['g{}'.format(i)],
                            a=v['a'])
            else:
                model += V2a(x,
                             t=v['t{}'.format(i)],
                             b=v['b{}'.format(i)],
                             g=v['g{}'.format(i)],
                             a=0)
        if eps is not None:
            resid = np.abs(data - model) / np.abs(eps)
        else:
            resid = np.abs(data - model)
        return resid

    out = lmfit.minimize(res,
                         pars,
                         args=(t, ),
                         kws={
                             'data': cf,
                             'eps': err
                         },
                         nan_policy='omit')

    # do all the plotting
    if doplot:
        if xl is None:
            if ax is None:
                ax = plt.gca()
            xl = ax.get_xlim()
            if xl[0] == 0:
                xl = (np.min(t) * 0.9, np.max(t) * 1.1)
        xf = np.logspace(np.log10(xl[0]), np.log10(xl[1]), 50)

        for i in range(modes):
            if i == 0:
                g2f = V2a(xf,
                          t=out.params['t{}'.format(i)].value,
                          b=out.params['b{}'.format(i)].value,
                          g=out.params['g{}'.format(i)].value,
                          a=out.params['a'].value)
            else:
                g2f += V2a(xf,
                           t=out.params['t{}'.format(i)].value,
                           b=out.params['b{}'.format(i)].value,
                           g=out.params['g{}'.format(i)].value,
                           a=0)

        if 'legf' in doplot:
            pard = {
                't': r'$t: {:.2g}\mathrm{{s}},\,$',
                'g': r'$\gamma: {:.2g},\,$',
                'b': r'$\mathrm{{b}}: {:.2g},\,$',
                'a': r'$\mathrm{{a}}: {:.3g},\,$'
            }
            labstr_fit = ''
            for i in range(modes):
                for vn in 'tgba':
                    if vn == 'a' and i > 0:
                        continue
                    elif vn == 'a' and i == 0:
                        vnn = 'a'
                    else:
                        vnn = vn + str(i)
                    if vnn in out.var_names:
                        labstr_fit += pard[vn].format(out.params[vnn].value)
                    elif fix is not None and vnn in fix.keys():
                        labstr_fit += 'fix ' + pard[vn].format(fix[vnn])
                    else:
                        labstr_fit += pard[vn].format(0)
        else:
            labstr_fit = ''

        if 'legd' in doplot:
            if qv is not None:
                labstr_data = r'$\mathsf{{q}} = {:.3f}\,\mathsf{{nm}}^{{-1}}$'.format(
                    qv * 10)
            else:
                labstr_data = 'data'
        else:
            labstr_data = ''

        pl = []
        if 'fit' in doplot:
            if 'log' in doplot:
                g2f = np.log(np.sqrt(out.best_values['b'] / (g2f - 1)))
                xf = xf / out.best_values['t']
            pl.append(ax.plot(xf, g2f, '-', label=labstr_fit, linewidth=1))

        if 'data' in doplot:
            if 'log' in doplot:
                t = t / out.best_values['t']
                cf = np.sqrt((cf - 1) / out.best_values['b'])
                pl.append(ax.plot(t, cf, marker, markersize=2.5))
            pl.append(ax.plot(t, cf, marker, label=labstr_data,
                              markersize=2.5))

        if color is None:
            if h_plot is not None:
                color = h_plot.get_color()
            elif 'data' in doplot:
                color = pl[0][0].get_color()
            else:
                color = 'gray'

        if 'log' in doplot:
            ax.set_xscale('linear')
            ax.set_yscale('linear')
        else:
            ax.set_xscale('log')
            ax.set_yscale('linear')

        for p in pl:
            p[0].set_color(color)

        if 'leg' in doplot:
            ax.legend()

        if 'report' in doplot:
            print(out.fit_report())

        ax.set_xlabel(r'delay time $\tau$ [s]')
        ax.set_ylabel(r'$g_2(\tau)$')

        niceplot(ax)
    if output == 'pars':
        pars_arr = np.zeros((3 * modes + 1, 2))
        for i, vn in enumerate(pars.keys()):
            pars_arr[i, 0] = out.params[vn].value
            pars_arr[i, 1] = 1. * out.params[vn].stderr
        return pars_arr
    elif output == 'fit':
        return out
Beispiel #34
0
    ax.set_ylabel("Events/bin")
    n, bins, patches = ax.hist(ys, bins=bin_num)

    if (fit_suppress):
        pass
    else:
        y_data = n
        x_data = (bins[1:] + bins[0:-1]) / 2

        init_param = Parameters()
        init_param.add("area", value=1E3)
        init_param.add("mean", value=0.0)
        init_param.add("sigma", value=0.24)

        fit_result = minimize(residual,
                              init_param,
                              args=(x_data, y_data, np.sqrt(y_data)))
        print("\n----------- Show fit reasult -----------")
        report_fit(fit_result)
        fit_mean = fit_result.params.valuesdict()["mean"]
        fit_mean_err = np.sqrt(fit_result.covar.diagonal()[1])
        fit_sigma = fit_result.params.valuesdict()["sigma"]
        fit_sigma_err = np.sqrt(fit_result.covar.diagonal()[2])

        plot_x = np.linspace(x_data[0], x_data[-1], 100)
        ax.plot(
            plot_x,
            func(fit_result.params, plot_x),
            label=
            "fit result (mean:{0:1.2f}+/-{1:1.2f}[mV], sigma:{2:1.2f}+/-{3:1.2f}[mV])"
            .format(fit_mean, fit_mean_err, fit_sigma, fit_sigma_err),
Beispiel #35
0
def fit_2comp(data_low,
              data_upp,
              vaxis_11,
              vaxis_22,
              ra_range=[0, 1],
              dec_range=[0, 1],
              cutoff=0.010,
              varyv=2,
              writefits=False,
              interactive=False,
              mode='single'):
    """
	fits_2comp(data_low, data_upp)
	Use Monte Carlo approach to derive the temperature based on two transitions.
	Also locate two velocity components, if there are any, and derive temperatures respectively.
	The velocity components are pre-identified based on GBT data by Zoey.
	"""
    if mode == 'single':
        trot = pylab.np.zeros([naxisy, naxisx])
        trot_error = pylab.np.zeros([naxisy, naxisx])
        linew11 = pylab.np.zeros([naxisy, naxisx])
        #linew22 = pylab.np.zeros([naxisy,naxisx])
        #linewratio = pylab.np.zeros([naxisy,naxisx])
        peakv = pylab.np.zeros([naxisy, naxisx])
    elif mode == 'double':
        trot = pylab.np.zeros([2, naxisy, naxisx])
        trot_error = pylab.np.zeros([2, naxisy, naxisx])
        linew11 = pylab.np.zeros([2, naxisy, naxisx])
        #linew22 = pylab.np.zeros([2,naxisy,naxisx])
        #linewratio = pylab.np.zeros([2,naxisy,naxisx])
        peakv = pylab.np.zeros([2, naxisy, naxisx])

    #lookup = fits.open('intrinsiclw_lookup.fits')
    #metalu = lookup[0].data
    #hdrlu = lookup[0].header
    #crval1 = hdrlu['CRVAL1']
    #cdelt1 = hdrlu['CDELT1']
    #crpix1 = hdrlu['CRPIX1']
    #crval2 = hdrlu['CRVAL2']
    #cdelt2 = hdrlu['CDELT2']
    #crpix2 = hdrlu['CRPIX2']

    if interactive:
        pylab.ion()
        f = pylab.figure(figsize=(14, 8))
        ax = f.add_subplot(111)

    for i in dec_range:
        for j in ra_range:
            #print 'Start to fit pixel (%d,%d)' % (j,i)
            spec_low = data_low[10:-10, i, j]
            spec_upp = data_upp[25:-20, i, j]

            vaxis_low = vaxis_11[10:-10]
            vaxis_upp = vaxis_22[25:-20]

            spec_low = spec_low[::-1]
            spec_upp = spec_upp[::-1]
            vaxis_low = vaxis_low[::-1]
            vaxis_upp = vaxis_upp[::-1]

            spec = pylab.np.concatenate((spec_low, spec_upp))
            vaxis = pylab.np.concatenate((vaxis_low, vaxis_upp + 40.0))

            #if interactive:
            #pylab.plot(vaxis, spec, 'k+', label='Original')
            #pylab.legend()
            #pylab.show()

            unsatisfied = True
            while unsatisfied:
                if interactive:
                    f.clear()
                    pylab.plot(vaxis, spec, 'k-', label='Original')
                    cutoff_line = [cutoff] * len(vaxis)
                    cutoff_line_minus = [-1.0 * cutoff] * len(vaxis)
                    pylab.plot(vaxis, cutoff_line, 'r-')
                    pylab.plot(vaxis, cutoff_line_minus, 'r-')
                    #clickvalue = []
                    if mode == 'single':
                        cid = f.canvas.mpl_connect('button_press_event',
                                                   onclick)
                        raw_input('Click on the plot to select Vlsr...')
                        print clickvalue
                        if len(clickvalue) >= 1:
                            print 'Please select at least one velocity! The last one will be used.'
                            vlsr1 = clickvalue[-1]
                        elif len(clickvalue) == 0:
                            vlsr1 = 0.0
                        print 'The Vlsr is %0.2f' % vlsr1
                        raw_input('Press any key to start fitting...')
                        f.canvas.mpl_disconnect(cid)
                        vlsr2 = 0.0
                    elif mode == 'double':
                        cid = f.canvas.mpl_connect('button_press_event',
                                                   onclick)
                        raw_input('Click on the plot to select Vlsrs...')
                        print clickvalue
                        if len(clickvalue) >= 2:
                            print 'Please select at least two velocities! The last two will be used.'
                            vlsr1, vlsr2 = clickvalue[-2], clickvalue[-1]
                        elif len(clickvalue) == 1:
                            vlsr1 = clickvalue[-1]
                            vlsr2 = 0.0
                        elif len(clickvalue) == 0:
                            vlsr1, vlsr2 = 0.0, 0.0
                        print 'Or input two velocities manually:'
                        manualv = raw_input()
                        manualv = manualv.split()
                        if len(manualv) == 2:
                            vlsr1, vlsr2 = pylab.np.float_(manualv)
                        else:
                            print 'Invalid input...'
                        print 'The two Vlsrs are %0.2f km/s and %0.2f km/s.' % (
                            vlsr1, vlsr2)
                        raw_input('Press any key to start fitting...')
                        f.canvas.mpl_disconnect(cid)
                    else:
                        vlsr1, vlsr2 = 0.0, 0.0
                else:
                    if mode == 'single':
                        if spec_low.max() >= cutoff:
                            vlsr1 = __xcorrelate__(spec_low, vaxis_low)
                            # If vlsr is out of range:
                            if vlsr1 <= 82 or vlsr1 >= 92:
                                vlsr1 = 0.0
                            # If the intensity at vlsr is smaller than cutoff
                            if spec_low[pylab.np.abs(
                                    vaxis_low - vlsr1).argmin()] <= cutoff:
                                vlsr1 = 0.0
                            # If the intensity at both satellites is smaller than cutoff
                            if spec_low[pylab.np.abs(
                                    vaxis_low - vlsr1 + 7.47385).argmin(
                                    )] <= cutoff and spec_low[pylab.np.abs(
                                        vaxis_low - vlsr1 -
                                        7.56923).argmin()] <= cutoff:
                                vlsr1 = 0.0
                            # If the intensity of (2,2) is smaller than cutoff
                            if spec_upp[pylab.np.abs(
                                    vaxis_upp - vlsr1).argmin()] <= cutoff:
                                vlsr1 = 0.0
                        else:
                            vlsr1 = 0.0
                        vlsr2 = 0.0
                    elif mode == 'double':
                        vlsr1, vlsr2 = 85.4, 87.4
                    else:
                        vlsr1, vlsr2 = 0.0, 0.0

                # 17 parameters, but only 7 are indenpendent
                params = Parameters()
                if vlsr1 != 0:
                    params.add('peaki', value=0.030, min=0, max=0.050)
                    params.add('tau11', value=1.0, min=0, max=10.0)
                    if varyv > 0:
                        params.add('peakv',
                                   value=vlsr1,
                                   min=vlsr1 - varyv * onevpix,
                                   max=vlsr1 + varyv * onevpix)
                    elif varyv == 0:
                        params.add('peakv', value=vlsr1, vary=False)
                    params.add('sigmav', value=1.0, min=0, max=5.0)
                    params.add('peaki_s1',
                               expr="peaki*(1-exp(-tau11_s1))/(1-exp(-tau11))")
                    params.add('tau11_s1', expr='tau11*0.278')
                    params.add('peakv_s1', expr='peakv-7.47385')
                    params.add('sigmav_s1', expr='sigmav')
                    params.add('peaki_s2', expr='peaki_s1')
                    params.add('tau11_s2', expr='tau11_s1')
                    params.add('peakv_s2', expr='peakv+7.56923')
                    params.add('sigmav_s2', expr='sigmav')
                    params.add('peaki_upp',
                               expr='peaki*(1-exp(-tau22))/(1-exp(-tau11))',
                               min=0,
                               max=0.050)
                    params.add('tau22', value=1.0, min=0, max=10.0)
                    params.add('peakv_upp', expr='peakv+40.0')
                    params.add('sigmav_upp', expr='sigmav')
                    params.add('Trot',
                               value=0.0,
                               expr='-41.5/log(0.282*tau22/tau11)',
                               min=0)
                # another 17 parameters for the second component
                if vlsr2 != 0:
                    params.add('peaki_c2', value=0.030, min=0, max=0.050)
                    params.add('tau11_c2', value=1.0, min=0, max=10.0)
                    if varyv > 0:
                        params.add('peakv_c2',
                                   value=vlsr2,
                                   min=vlsr2 - varyv * onevpix,
                                   max=vlsr2 + varyv * onevpix)
                    elif varyv == 0:
                        params.add('peakv_c2', value=vlsr2, vary=False)
                    params.add('sigmav_c2', value=1.0, min=0, max=5.0)
                    params.add(
                        'peaki_s1_c2',
                        expr="peaki_c2*(1-exp(-tau11_s1_c2))/(1-exp(-tau11_c2))"
                    )
                    params.add('tau11_s1_c2', expr='tau11_c2*0.278')
                    params.add('peakv_s1_c2', expr='peakv_c2-7.47385')
                    params.add('sigmav_s1_c2', expr='sigmav_c2')
                    params.add('peaki_s2_c2', expr='peaki_s1_c2')
                    params.add('tau11_s2_c2', expr='tau11_s1_c2')
                    params.add('peakv_s2_c2', expr='peakv_c2+7.56923')
                    params.add('sigmav_s2_c2', expr='sigmav_c2')
                    params.add(
                        'peaki_upp_c2',
                        expr='peaki_c2*(1-exp(-tau22_c2))/(1-exp(-tau11_c2))',
                        min=0,
                        max=0.050)
                    params.add('tau22_c2', value=1.0, min=0, max=10.0)
                    params.add('peakv_upp_c2', expr='peakv_c2+40.0')
                    params.add('sigmav_upp_c2', expr="sigmav_c2")
                    params.add('Trot_c2',
                               value=0.0,
                               expr='-41.5/log(0.282*tau22_c2/tau11_c2)',
                               min=0)

                # do fit, here with leastsq model
                if vlsr1 != 0 and vlsr2 != 0:
                    try:
                        result = minimize(__model_11_2c__,
                                          params,
                                          args=(vaxis, spec))
                    except RuntimeError:
                        print 'Pixel (%d, %d) fitting failed...' % (j, i)
                        continue
                elif vlsr1 != 0 or vlsr2 != 0:
                    try:
                        result = minimize(__model_11__,
                                          params,
                                          args=(vaxis, spec))
                    except RuntimeError:
                        print 'Pixel (%d, %d) fitting failed...' % (j, i)
                        continue
                else:
                    unsatisfied = False
                    continue
                #print params['Trot'].value

                if interactive:
                    final = spec + result.residual
                    report_fit(params)
                    pylab.plot(vaxis, final, 'r', label='Fitting result')
                    if vlsr1 != 0 and vlsr2 != 0:
                        final_c1 = __model_11__(params, vaxis, spec) + spec
                        final_c2 = final - final_c1
                        pylab.plot(vaxis,
                                   final_c1,
                                   'm--',
                                   label='1st component',
                                   linewidth=2)
                        pylab.plot(vaxis,
                                   final_c2,
                                   'c--',
                                   label='2nd component',
                                   linewidth=2)
                        pylab.text(0.05,
                                   0.9,
                                   '1st Trot=%.1f K' % params['Trot'].value,
                                   transform=ax.transAxes,
                                   color='m')
                        pylab.text(0.05,
                                   0.8,
                                   '2nd Trot=%.1f K' % params['Trot_c2'].value,
                                   transform=ax.transAxes,
                                   color='c')
                    elif vlsr1 != 0 or vlsr2 != 0:
                        pylab.text(0.05,
                                   0.9,
                                   'Trot=%.1f K' % params['Trot'].value,
                                   transform=ax.transAxes,
                                   color='r')
                    pylab.legend()
                    pylab.show()
                    print 'Is the fitting ok? y/n'
                    yn = raw_input()
                    if yn == 'y':
                        unsatisfied = False
                    else:
                        unsatisfied = True
                    #raw_input('Press any key to continue...')
                    f.clear()
                else:
                    unsatisfied = False

                if writefits:
                    # write the temperature
                    if mode == 'single':
                        trot[i, j] = params['Trot'].value
                        trot[pylab.np.where(trot > 50.)] = 50.
                        trot[pylab.np.where(trot < 0.)] = 0.
                        trot_error[i, j] = params['Trot'].stderr
                        trot_error[pylab.np.where(trot_error > 10.)] = 10.
                        trot_error[pylab.np.where(trot_error < 0.)] = 0.
                        linew11[i, j] = params['sigmav'].value
                        peakv[i, j] = params['peakv'].value
                    if mode == 'double':
                        trot[:, i, j] = [
                            params['Trot'].value, params['Trot_c2'].value
                        ]
                        trot[pylab.np.where(trot > 50.)] = 50.
                        trot[pylab.np.where(trot < 0.)] = 0.
                        trot_error[:, i, j] = [
                            params['Trot'].stderr, params['Trot_c2'].value
                        ]
                        trot_error[pylab.np.where(trot_error > 10.)] = 10.
                        trot_error[pylab.np.where(trot_error < 0.)] = 0.
                        linew11[:, i, j] = [
                            params['sigmav'].value, params['sigmav_c2'].value
                        ]
                        peakv[:, i, j] = [
                            params['peakv'].value, params['peakv_c2'].value
                        ]

                    #col_index = np.argmin(params['tau11'].value)
                    #linew_intrinsic = (np.where(linew11))

    if writefits:
        hdrt = hdr1
        hdrt.remove('naxis3')
        hdrt.remove('crpix3')
        hdrt.remove('cdelt3')
        hdrt.remove('crval3')
        hdrt.remove('ctype3')
        hdrt['naxis'] = 2
        hdrt['bunit'] = 'K'
        fits.writeto('Trot_fit.fits', trot, header=hdrt, clobber=True)
        fits.writeto('Trot_error.fits', trot_error, header=hdrt, clobber=True)
        hdrt['bunit'] = 'km/s'
        fits.writeto('linew_low.fits', linew11, header=hdrt, clobber=True)
        fits.writeto('peakv.fits', peakv, header=hdrt, clobber=True)
Beispiel #36
0
    def fit_stress_model_dod(self):
        print("---- DoD degradation model ------")
        i = 0  # index of file

        for file in self.data_file_paths:
            # imports data
            data_cyc = read_csv(file)
            dod = data_cyc['DoD']
            cyc_nb = data_cyc.iloc[:, 1]

            self.cyc_nb_v.append(cyc_nb)
            self.dod_v.append(dod)

            chemistry = data_cyc.columns[1].split('_')[2]
            stress_data = 0.2 * np.divide(1, cyc_nb)

            # parameters definition
            params = Parameters()

            # the parameters proposed below come from several tests
            # the initial values need to be not to far from the solution
            # in order the algorithm to converge
            if chemistry == "LFP":
                params.add('k_d1', value=1e5)
                params.add('k_d2', value=-0.5)
                params.add('k_d3', value=0, vary=False)
            elif chemistry == "NMC":
                params.add('k_d1', value=1e+04)
                params.add('k_d2', value=-1)
                params.add('k_d3', value=3e+02)
            elif chemistry == "LMO":
                params.add('k_d1', value=1e+05)
                params.add('k_d2', value=-0.5)
                params.add('k_d3', value=-1e+05)

            if chemistry == "LFP":
                residuals = residuals_exp_deg_model_after_1_dod
            elif chemistry == "LMO" or "NMC":
                residuals = residuals_emp_deg_model_after_1_dod
            else:
                sys.exit(
                    "The chemistry is incorrect or not supported by the model")

            # least square minimisation
            opt_param = minimize(fcn=residuals,
                                 params=params,
                                 args=(dod, stress_data, 1),
                                 method='leastsq')
            # leastsq seems to work better than least_squares for those types of equations

            # stores optimisation output
            self.opt_params.append(opt_param.params['k_d1'].value)
            self.opt_params.append(opt_param.params['k_d2'].value)
            self.opt_params.append(opt_param.params['k_d3'].value)

            # stores chemistry
            self.chemistry.append(chemistry)

            # prints results
            list_opt_params = [
                '{:.2e}'.format(opt_param.params['k_d1'].value),
                '{:.2e}'.format(opt_param.params['k_d2'].value),
                '{:.2e}'.format(opt_param.params['k_d3'].value)
            ]

            print("Chemistry:", chemistry)
            if chemistry == "LFP":
                print("k_d1 = ", list_opt_params[0])
                print("k_d2 = ", list_opt_params[1])
            else:
                print("k_d1 = ", list_opt_params[0])
                print("k_d2 = ", list_opt_params[1])
                print("k_d3 = ", list_opt_params[2])

            # index incrementation
            i = i + 1
Beispiel #37
0
        #     x0 = input('Enter approximate x0 value! ')
        #     x0 = float(x0) - 10
        selector = SelectFromCollection(ax, pts)

        input('Press any key to accept selected points')
        data2 = selector.xys[selector.ind]
        # print("Selected points:")
        # print(selector.xys[selector.ind])
        # print(type(selector.xys[selector.ind]))
        selector.disconnect()

        par = lm.Parameters()
        par.add('k', value=1)
        par.add('n', value=1)

        result1 = lm.minimize(premica, par, args=(data[:, 0], data[:, 1]))
        lm.report_fit(result1.params)
        tem = []
        for i in result1.params:
            tem.append(((str(
                result1.params[i]).split(',')[1]).split('=')[1]).split('+/-'))
        print(tem)
        # print(tem[2][0])
        final = data[:, 1] + result1.residual

        result2 = lm.minimize(premica, par, args=(data2[:, 0], data2[:, 1]))
        lm.report_fit(result2.params)
        tem2 = []
        for i in result2.params:
            tem2.append(((str(
                result2.params[i]).split(',')[1]).split('=')[1]).split('+/-'))
def fit_pg_likelihood(prob,
                      npix,
                      err=None,
                      kv=None,
                      init={},
                      fix=None,
                      method='Nelder-mead'):
    """ Fit the Poisson-Gamma distribution using the likelihood ratio approach.
    """
    if kv is None:
        kv = np.arange(prob.shape[0] - 2)

    kb = prob[1]
    prob = prob[kv + 2]

    #make initial guess for parameters
    for vn in ['M']:
        if vn not in init.keys():
            if vn == 'M':
                init[vn] = (4, 1, None)

    # initialize fit parameters
    pars = lmfit.Parameters()
    pars.add('M', value=init['M'][0], min=init['M'][1], max=init['M'][2])

    if fix is not None:
        for vn in fix.keys():
            pars[vn].set(value=fix[vn], vary=0)

    if err is not None:
        err = np.abs(err)
        wgt = err.copy()
        wgt[wgt > 0] = 1. / wgt[wgt > 0]**2
    else:
        wgt = None

    # independent variable is <k>
    def likelihood_ratio(pars, prob, eps=None):
        """2D Residual function to minimize
        """
        v = pars.valuesdict()

        chi2 = 0
        for i in range(kv.size):
            probi = prob[i]
            ind = np.where(probi)[0]
            pg = poisgam(kb[ind], v['M'], kv[i], ind_var='kb')
            chi2 += np.sum(probi[ind] * np.log(pg / probi[ind]))
        chi2 *= -2 * npix
        return chi2

    out = lmfit.minimize(likelihood_ratio,
                         pars,
                         args=(prob, ),
                         kws={'eps': wgt},
                         method=method,
                         nan_policy='omit')

    pars_arr = np.zeros((1, 2))
    for i, vn in enumerate(['M']):
        pars_arr[i, 0] = 1. / out.params[vn].value
        # pars_arr[i,1] = pars_arr[i,0]**2*out.params[vn].stderr
    gof = np.array([out.chisqr, out.redchi, out.bic, out.aic])

    return pars_arr, gof, out, lmfit.fit_report(out)
Beispiel #39
0
    max=expect_em_line_pos +
    30)  #Starting position calculated from redshift value of galaxy.
spaxel_params.add(
    "FWHM", value=2.8,
    vary=False)  #galaxy_info["LSF"], vary=False) # Line Spread Function
spaxel_params.add("Gauss_bkg", value=0.01)
spaxel_params.add("Gauss_grad", value=0.0001)

# Loop through spectra from list format of data.
if fit_spaxel == True:
    for y, x in tqdm(zip(non_zero_index[0], non_zero_index[1]),
                     total=len(non_zero_index[0])):
        get_data_residuals = []
        fit_results = minimize(spaxel_by_spaxel,
                               spaxel_params,
                               args=(wavelength, res_cube[:, y, x],
                                     np.repeat(np.nanstd(res_cube[:, y, x], 0),
                                               len(wavelength)), z),
                               nan_policy="propagate")
        pos = y * x_data + x
        gauss_A[pos] = fit_results.params["Amp"].value
        obj_residuals[pos] = fit_results.residual
        data_residuals[pos] = fit_results.residual * np.repeat(
            np.nanstd(res_cube[:, y, x], 0), len(wavelength))
        g_bkg[pos] = fit_results.params["Gauss_bkg"].value
        g_grad[pos] = fit_results.params["Gauss_grad"].value

    list_of_rN = np.array([np.nanstd(d_r) for d_r in data_residuals])

    A_rN = np.array([A / rN for A, rN in zip(gauss_A, list_of_rN)])
    gauss_F = np.array(gauss_A) * np.sqrt(2 * np.pi) * 1.19
Beispiel #40
0
def chemical_potential(n_e: u.m ** -3, T: u.K):
    r"""
    Calculate the ideal chemical potential.

    Parameters
    ----------
    n_e: ~astropy.units.Quantity
        Electron number density.

    T : ~astropy.units.Quantity
        The temperature.

    Returns
    -------
    beta_mu: ~astropy.units.Quantity
        The dimensionless ideal chemical potential. That is the ratio of
        the ideal chemical potential to the thermal energy.

    Raises
    ------
    TypeError
        If argument is not a `~astropy.units.Quantity`.

    ~astropy.units.UnitConversionError
        If argument is in incorrect units.

    ValueError
        If argument contains invalid values.

    Warns
    -----
    ~astropy.units.UnitsWarning
        If units are not provided, SI units are assumed.

    Notes
    -----
    The ideal chemical potential is given by [1]_:

    .. math::
        \chi_a = I_{1/2}(\beta \mu_a^{ideal})

    where :math:`\chi` is the degeneracy parameter, :math:`I_{1/2}` is the
    Fermi integral with order 1/2, :math:`\beta` is the inverse thermal
    energy :math:`\beta = 1/(k_B T)`, and :math:`\mu_a^{ideal}`
    is the ideal chemical potential.

    The definition for the ideal chemical potential is implicit, so it must
    be obtained numerically by solving for the Fermi integral for values
    of chemical potential approaching the degeneracy parameter. Since values
    returned from the Fermi_integral are complex, a nonlinear
    Levenberg-Marquardt least squares method is used to iteratively approach
    a value of :math:`\mu` which minimizes
    :math:`I_{1/2}(\beta \mu_a^{ideal}) - \chi_a`

    This function returns :math:`\beta \mu^{ideal}` the dimensionless
    ideal chemical potential.

    Warning: at present this function is limited to relatively small
    arguments due to limitations in the `~mpmath` package's implementation
    of `~mpmath.polylog`, which PlasmaPy uses in calculating the Fermi
    integral.

    References
    ----------
    .. [1] Bonitz, Michael. Quantum kinetic theory. Stuttgart: Teubner, 1998.

    Example
    -------
    >>> from astropy import units as u
    >>> chemical_potential(n_e=1e21*u.cm**-3,T=11000*u.K)
    <Quantity 2.00039985e-12>

    """
    # deBroglie wavelength
    lambdaDB = thermal_deBroglie_wavelength(T)
    # degeneracy parameter
    degen = (n_e * lambdaDB ** 3).to(u.dimensionless_unscaled)

    def residual(params, data, eps_data):
        """Residual function for fitting parameters to Fermi_integral."""
        alpha = params['alpha'].value
        # note that alpha = mu / (k_B * T)
        model = Fermi_integral(alpha, 0.5)
        complexResidue = (data - model) / eps_data
        return complexResidue.view(np.float)

    # setting parameters for fitting along with bounds
    alphaGuess = 1 * u.dimensionless_unscaled
    params = Parameters()
    params.add('alpha', value=alphaGuess, min=0.0)
    # calling minimize function from lmfit to fit by minimizing the residual
    data = np.array([degen])  # result of Fermi_integral - degen should be zero
    eps_data = np.array([1e-15])  # numerical error
    minFit = minimize(residual, params, args=(data, eps_data))
    beta_mu = minFit.params['alpha'].value * u.dimensionless_unscaled
    return beta_mu
Beispiel #41
0
def fit_force_extension(e, f, bps, model_func=None, params=None, min_e=None,
                        max_e=None, max_f=None, max_e_dyn_L0=True,
                        verbose=False, return_model_func=False, **kwargs):
    """
    Fit a model function, e.g. a worm-like chain model, to force extension data
    of DNA.

    Parameters
    ----------
    e : 1D numpy.ndarray of type float
        The extension (m).
    f : 1D numpy.ndarray of type float
        The force (N).
    bps : float
        The number of basepairs of the DNA. If you do not know the number of
        basepairs, try an upper estimate to fit the data. The number of
        basepairs is used to calculate the start value for the contour length
        L_0 for the fitting procedure. See also the function
        `get_DNA_fit_params.
    model_func : func
        Set model function, that should have the header model_func(e, **params)
        and return f. Defauts to the function `worm_like_chain`.
    params : lmfit.Parameters
        The parameters to be fitted (defaults to the output of
        `get_DNA_fit_params(bps, **kwargs)`).
    min_e : float
        Minimum extension in m (defaults to float(-inf)) to be used to fit the
        model.
    max_e : float
        Maximum extension in m (defaults to float(+inf)) to be used to fit the
        data. See also parameter `max_e_dyn_L0`.
    max_f : float
        Maximum force in N (defaults to 15e-12) to be used to fit the data.
    max_e_dyn_L0 : bool
        Set `max_e` dynamically to the contour length 'L_0' for every fitting
        evaluation. If L0 is greater than the parameter `max_e`, the value of
        `max_e` will be used, instead.
    verbose : bool
        Be verbose about the fit result.
    return_model_func : bool
        Return the used model_func additionally to the fit result.
    **kwargs
        Arguments passed to the parameter function `get_DNA_fit_params`.

    Returns
    -------
    lmfit.minimizer.MinimizerResult
        If `return_model_func` is False.
    (lmfit.minimizer.MinimizerResult, model_func)
        If `return_model_func` is True
    """
    # Choose the model function and initialize the fit parameters
    model_func = model_func or worm_like_chain
    params = params or get_DNA_fit_params(bps, **kwargs)

    # Crop the data. Choose boundaries to avoid nan values and a max force of
    # 15 pN, up to where the wlc model is valid.
    min_x = min_e
    max_x = max_e
    max_y = max_f or 15e-12
    e, f = crop_x_y(e, f, min_x=min_x, max_x=max_x, max_y=max_y,
                    include_bounds=False)

    # Choose the parameters for the function residual, which calculates the
    # difference of the model function to the given data
    residual_args = model_func, e, f
    residual_kwargs = {}
    if max_e_dyn_L0:
        # Crop e and f data variates to contour length 'L_0', i.e. up to where
        # the wlc model is valid
        residual_kwargs['max_x_param'] = 'L_0'

    # Do the fitting:
    #   minimize -> residual(params, residual_args) -> model_func(e, params)
    out = minimize(residual, params, args=residual_args, kws=residual_kwargs)

    if verbose:
        print(fit_report(out))
        print('[[DNA related info]]')
        print('    Number of base-pairs: {:.0f}'.format(
                            np.round(out.params['L_0'] / params['L_0'] * bps)))

    if return_model_func:
        return out, model_func
    else:
        return out
def briere(Subset, Temp, Trait, n):
    """
     Briere model (Phenomenological)

     Optimises paramter values using minimize from lmfit package

     Calls Functions:
     fit_measure(resid_func, out, Temp, Trait)
     calc_AICc(out, Temp)

     Returns a list of:
     Optimised Parameters: B0, Tm, T0
     BIC
     AIC
     AICc
     Rsquared
     adjusted Rsquared

     """
    #** look at np.array vs np.asarray

    # variable values
    # Temp = np.array(Subset.ConTemp)
    # Trait = np.array(Subset.OriginalTraitValue)

    # estimated parameter values
    B0 = np.array(Subset.b_B0)[0]
    #B0 = Subset.loc[Subset.index[0], "b_B0"]
    T0 = np.array(Subset.b_T0)[0]
    Tm = np.array(Subset.b_Tm)[0]

    # save orginal estimated param values
    B0_orig = B0
    T0_orig = T0
    Tm_orig = Tm

    # temp peak - using as a bound
    Tpeak = (np.array(Subset.Tpeak)[0] - 273.15)

    # an initial bestfit list with an arbitarily large AIC
    #         [B0, T0, Tm, Chisqr, BIC, AIC]
    bestfit = [0, 0, 0, 0, 0, 100000, 0]

    # DNC - Did Not Converge flag
    # this ensures the above "best" does not get returned if none converge
    DNC = True

    for i in range(n):
        try:
            # for every try after the first one
            if i != 0:
                # resample param values
                B0 = np.random.normal(B0_orig)
                T0 = np.random.normal(T0_orig)
                Tm = np.random.normal(Tm_orig)

            # create dictinary of params
            params = Parameters()
            # add with tuples: (NAME VALUE VARY MIN  MAX  EXPR  BRUTE_STEP)
            params.add_many(("B0", B0, True, 0, 1, None, None),
                            ("T0", T0, True, -10, Tpeak, None, None),
                            ("Tm", Tm, True, Tpeak, 100, None, None))

            # minimize residuals
            out = minimize(briere_resids, params, args=(Temp, Trait))
            #...............................................................
            # write error report
            #A = report_fit(out.params)
            #...............................................................
            ## store results of best fit (based on aic score)
            if out.aic < bestfit[5]:
                # if try gets to this point, it has converged at least once
                # so set DNC to False
                DNC = False
                # calculate AICc
                AICc = calc_AICc(out, Temp)
                # calculate goodness of fit measures
                goodness_of_fit = fit_measure(briere_resids, out, Temp, Trait)
                # bestfit takes final params and measures of fit
                bestfit = [
                    out.params["B0"].value, out.params["T0"].value,
                    out.params["Tm"].value, out.bic, out.aic, AICc
                ]
                # merge best fit and goodness fo fit
                bestfit = bestfit + goodness_of_fit
            # calculate final result to test plot
            #final = Trait + out.residual
        # except Exception as e:
        #      print(e)
        except IOError:
            pass
        #except:
        #print("Error")

    # print(final)
    # plt.plot(Temp, Trait, 'o')
    # plt.plot(Temp, final, 'r')
    # plt.show()

    if not DNC:
        return bestfit
    else:
        return None
def cubic(Subset, FinalID, Temp, Trait, n):
    """
     General Cubic Polynomial model (Phenomenological).
     
     Optimises paramter values using minimize from lmfit package

     Calls Functions:
     fit_measure(resid_func, out, Temp, Trait)
     calc_AICc(out, Temp)

     Returns a list of:
     Optimised Parameters: B0, B1, B2, B3
     BIC
     AIC
     AICc
     Rsquared
     adjusted Rsquared
     """

    # variable values
    # Temp = np.asarray(Subset.ConTemp)
    # Trait = np.asarray(Subset.OriginalTraitValue)

    # estimated parameter values - can change
    B0 = np.array(Subset.c_B0)[0]
    B1 = np.array(Subset.c_B1)[0]
    B2 = np.array(Subset.c_B2)[0]
    B3 = np.array(Subset.c_B3)[0]

    # estimated parameter values - cannot change
    B0_orig = B0
    B1_orig = B1
    B2_orig = B2
    B3_orig = B3

    # an initial bestfit list with an arbitarily large AIC
    #         [FinalID, B0, B1, B2, B3, Chisqr, BIC, AIC]
    bestfit = [FinalID, 0, 0, 0, 0, 0, 100000, 0]

    # DNC - Did Not Converge flag
    # this ensures the above "best" does not get returned if none converge
    DNC = True

    for i in range(n):
        try:
            if i != 0:
                # resample param values
                B0 = np.random.normal(B0_orig)
                B1 = np.random.normal(B1_orig)
                B2 = np.random.normal(B2_orig)
                B3 = np.random.normal(B3_orig)
            # create dictinary of params
            params = Parameters()
            # add with tuples: (NAME VALUE VARY MIN  MAX  EXPR  BRUTE_STEP)
            params.add_many(("B0", B0, True, None, None, None, None),
                            ("B1", B1, True, None, None, None, None),
                            ("B2", B2, True, None, None, None, None),
                            ("B3", B3, True, None, None, None, None))
            # minimize residuals
            out = minimize(cubic_resids, params, args=(Temp, Trait))
            #...............................................................
            # write error report
            #A = report_fit(out.params)
            #...............................................................
            ## store results of best fit (based on aic score)
            if out.aic < bestfit[6]:
                # if try gets to this point, it has converged at least once
                # so set DNC to False
                DNC = False
                # calculate AICc
                AICc = calc_AICc(out, Temp)
                # calculate goodness of fit measures
                goodness_of_fit = fit_measure(cubic_resids, out, Temp, Trait)
                # bestfit takes final params and measures of fit
                bestfit = [
                    FinalID, out.params["B0"].value, out.params["B1"].value,
                    out.params["B2"].value, out.params["B3"].value, out.bic,
                    out.aic, AICc
                ]
                # merge best fit and goodness fo fit
                bestfit = bestfit + goodness_of_fit
            # calculate final result
            #final = Trait + out.residual
        except:
            print("Error")
    # print(final)
    # plt.plot(Temp, Trait, 'o')
    # plt.plot(Temp, final, 'r')
    # plt.show()
    if not DNC:
        return bestfit
    else:
        return None
Beispiel #44
0
def ChrisFit_2GB_ColCorr_LMfit(params,
                               wavelengths,
                               fluxes,
                               errors,
                               instruments,
                               limits=[False]):

    # Perform initial fit, to produce colour-corrected fluxes
    corr_result = lmfit.minimize(ChrisFit_2GB_LMfit,
                                 params,
                                 args=(wavelengths, fluxes, errors),
                                 method='powell')
    params = corr_result.params

    # Loop over each wavelength, and use initial fit to colour-correct fluxes
    fluxes_corr = np.empty([wavelengths.shape[0]])
    for w in range(0, wavelengths.shape[0]):
        corr_output = ChrisFit_ColourCorrection(wavelengths[w],
                                                instruments[w],
                                                params['T_w'].value,
                                                params['T_c'].value,
                                                params['M_w'].value,
                                                params['M_c'].value,
                                                beta=params['beta'].value)
        fluxes_corr[w] = np.divide(fluxes[w], corr_output[0])

    # Extract parameters
    T_w = params['T_w'].value
    T_c = params['T_c'].value
    M_w = params['M_w'].value
    M_c = params['M_c'].value
    D = params['D'].value
    lambda_0 = params['lambda_0'].value
    kappa_0 = params['kappa_0'].value
    beta = params['beta'].value

    # Convert wavelength to frequency, and find kappa
    nu = np.divide(c, wavelengths)
    nu_0 = np.divide(c, lambda_0)
    kappa_nu = kappa_0 * (np.divide(nu, nu_0))**beta

    # Evaluate hot dust Planck function
    B_w_prefactor = np.divide((2.0 * h * nu**3.0), c**2.0)
    B_w_e = np.divide((h * nu), (k * T_w))
    B_w = B_w_prefactor * (np.e**B_w_e - 1)**-1.0

    # Evaluate cold dust Planck function
    B_c_prefactor = np.divide((2.0 * h * nu**3.0), c**2.0)
    B_c_e = np.divide((h * nu), (k * T_c))
    B_c = B_c_prefactor * (np.e**B_c_e - 1)**-1.0

    # Calculate fluxes of fit, and find chi_squared
    M_w_kilograms = M_w * 2E30
    M_c_kilograms = M_c * 2E30
    D_metres = D * 3.26 * 9.5E15
    fit = (1E26 * kappa_nu * D_metres**-2.0 * M_w_kilograms *
           B_w) + (1E26 * kappa_nu * D_metres**-2.0 * M_c_kilograms * B_c)
    chi_squared = (np.divide((fluxes_corr - fit)**2.0, errors**2.0))

    # Adjust chi-squared to account for limits
    if (True in limits) == True:
        chi_squared[np.where((np.array(limits) == True)
                             & (fit - fluxes < 0))] = 0.0

    return chi_squared
Beispiel #45
0
data_yerr = data1_yerr

#pyplot.plot(data1_x,data1_y)
#pyplot.plot(data2_x,data2_y)
#pyplot.plot(data_x,data_y)

params = lmfit.Parameters()
params.add('amplitude', value=119786)
params.add('gamma', value=30.63)
params.add('offset', value=0.104)
params.add('beta', value=0.76)
params.add('Omega', value=30.704, vary=False)
params.add('B', value=1.68, max=4.0)
params.add('center', value=160)

result = lmfit.minimize(micro_fit, params, args=(data_x, data_y, data1_yerr))

fit_values = data_y + result.residual

lmfit.report_errors(params)

normalization = params['amplitude'] / (params['gamma'] / 2.0)**2

pyplot.errorbar(data_x - params['center'],
                data_y / normalization,
                data_yerr / normalization,
                linestyle='None',
                markersize=3.0,
                fmt='o')
pyplot.plot(
    np.arange(100, 220, 0.1) - params['center'],
Beispiel #46
0
    def fit_RCRa_2fc(self, base_f, base_y):
        # get crossover frequency
        mask = base_f > 3e8  # avoid detecting the crossover associated with the leak
        fc = base_f[mask][np.argmin(
            np.abs(base_y[mask].real - base_y[mask].imag))]
        if fc < 3.1e8: fc = 1e9

        # define masks
        masks = [base_f < fc]
        masks += [base_f < fc * 2]

        # fit
        for i, mask in enumerate(masks):
            w = 2. * np.pi * base_f[mask]
            y = base_y[mask]  # minus sign because Y12 and not Y11

            # create fit parameters
            params = Parameters()
            params.add('r',
                       value=self.values['r'],
                       min=100,
                       max=100e3,
                       vary=True if i in [0, 1] else False)
            params.add('c',
                       value=self.values['c'],
                       min=1e-15,
                       max=1e-12,
                       vary=True if i in [0, 1] else False)
            params.add('ra',
                       value=self.values['ra'],
                       min=1,
                       max=10e3,
                       vary=True if i in [1] else False)

            # don't fit the following params
            # TODO: could generate these automatically
            params.add('l',
                       value=self.values['l'],
                       min=0.,
                       max=1e-6,
                       vary=False)
            params.add('gl',
                       value=self.values['gl'],
                       min=0.,
                       max=1000e-6,
                       vary=False)
            params.add('ca',
                       value=self.values['ca'],
                       min=0.,
                       max=1e-12,
                       vary=False)
            params.add('rasup',
                       value=self.values['rasup'],
                       min=0.,
                       max=10e3,
                       vary=False)

            # execute fit
            res = minimize(self.objective, params, args=(w, y))
            for p in self.values:
                self.values[p] = res.params[p].value
Beispiel #47
0
def fitT1IRabs(params, TI, data):
    """fits signal vs TI data to T1IRabs model"""
    result = lmfit.minimize(T1IRabs, params, args=(TI, data))
    final = data + result.residual
    return final
Beispiel #48
0
def ChrisFit(source_name,
             wavelengths,
             fluxes,
             errors,
             instruments,
             components,
             distance,
             limits=[False],
             beta=2.0,
             kappa_0=0.051,
             lambda_0=500E-6,
             guess_mass=False,
             redshift=0.0,
             col_corr=True,
             min_temp=5.0,
             plotting=True,
             plot_pdf=True,
             bootstrapping=False,
             verbose=True,
             algorithm='leastsq',
             output_dir=False,
             percentile=False):

    # Announce the name of the source being processed
    if verbose == True:
        print(' ')
        print('Fitting source: ' + str(source_name))

    # Set boolean depending upon number of components in fit
    components = int(components)
    if components == 1:
        warm_boolean = False
    elif components == 2:
        warm_boolean = True

    # Deal with free or fixed beta
    if beta == 'free':
        beta_boolean = True
        beta = 2.0
    else:
        beta_boolean = False
        beta = float(beta)

    # Ensure input is in numpy arrays where necessary
    wavelengths = np.array(wavelengths)
    fluxes = np.array(fluxes)
    errors = np.array(errors)

    # Use provided guess mass, or crudely estimate sensible initial guess for dust mass
    if guess_mass != False:
        M_c_guess = float(guess_mass)
    else:
        M_c_guess = 5E-9 * distance**2.0

    # Package parameters for initial fit
    params = lmfit.Parameters()
    params.add('beta', value=beta, vary=beta_boolean)
    params.add('D', value=distance, vary=False)
    params.add('lambda_0', value=lambda_0, vary=False)
    params.add('kappa_0', value=kappa_0, vary=False)
    params.add('components', value=components, vary=False, min=1, max=2)
    if warm_boolean == False:
        params.add('T_c', value=20.0, vary=True, min=10.0, max=200.0)
        params.add('T_w', value=0, vary=False)
        params.add('M_c',
                   value=M_c_guess,
                   vary=True,
                   min=np.divide(M_c_guess, 1E6))
        params.add('M_w', value=0, vary=False)
    elif warm_boolean == True:
        params.add('T_c', value=20.0, vary=True, min=min_temp, max=200.0)
        params.add('T_offset', value=30.0, vary=True, min=0.0, max=50.0)
        params.add('T_w', expr='T_c + T_offset')
        params.add('M_c',
                   value=M_c_guess,
                   vary=True,
                   min=np.divide(M_c_guess, 1E4))
        params.add('M_ratio', value=1E-2, vary=True, min=1E-6, max=1E4)
        params.add('M_w', expr='M_c * M_ratio')

    # Perform initial, using LMfit
    if verbose == True:
        print('Performing initial fit...')
    if algorithm == 'leastsq':
        result = lmfit.minimize(ChrisFit_2GB_LMfit,
                                params,
                                args=(wavelengths, fluxes, errors, limits),
                                method=algorithm,
                                maxfev=1000000,
                                xtol=1E-14,
                                ftol=1E-14)
    else:
        result = lmfit.minimize(ChrisFit_2GB_LMfit,
                                params,
                                args=(wavelengths, fluxes, errors, limits),
                                method=algorithm)

    # If required, use initial fit to perform colour corrections, and then re-fit to corrected fluxes
    if col_corr == False:
        fluxes_corr = np.copy(fluxes)
    if col_corr == True:
        if verbose == True:
            print('Performing colour-corrected fit...')

        # Loop over each wavelength, and use initial fit to colour-correct fluxes
        fluxes_corr = np.empty([wavelengths.shape[0]])
        for w in range(0, wavelengths.shape[0]):
            corr_output = ChrisFit_ColourCorrection(
                wavelengths[w],
                instruments[w],
                result.params['T_w'].value,
                result.params['T_c'].value,
                result.params['M_w'].value,
                result.params['M_c'].value,
                beta=result.params['beta'].value)
            fluxes_corr[w] = np.divide(fluxes[w], corr_output[0])
            #print 'Band: '+str(1E6*wavelengths[w])+'um;   Correction: '+str(100.0*(1.0-(1.0/corr_output[0])))[:6]+'%'

        # Perform colour-corrected fit, using LMfit
        if algorithm == 'leastsq':
            result = lmfit.minimize(ChrisFit_2GB_LMfit,
                                    result.params,
                                    args=(wavelengths, fluxes_corr, errors,
                                          limits),
                                    method=algorithm,
                                    maxfev=1000000,
                                    xtol=1E-14,
                                    ftol=1E-14)
        else:
            result = lmfit.minimize(ChrisFit_2GB_LMfit,
                                    result.params,
                                    args=(wavelengths, fluxes_corr, errors,
                                          limits),
                                    method=algorithm)

    # Extract best-fit values, and make sure that warm and cold components are ordered correctly
    beta = result.params['beta'].value
    T_order, M_both = np.array([
        result.params['T_w'].value, result.params['T_c'].value
    ]), np.array([result.params['M_w'].value, result.params['M_c'].value])
    if components == 1:
        T_w = np.min(T_order)
        T_c = np.max(T_order)
        M_w = 0.0
        M_c = M_both[np.where(T_order == T_c)][0]
        M_d = M_c
    elif components == 2:
        T_w = np.max(T_order)
        T_c = np.min(T_order)
        M_w = M_both[np.where(T_order == T_w)][0]
        M_c = M_both[np.where(T_order == T_c)][0]
        M_d = M_w + M_c
    if verbose == True:
        print(' ')
        print('Best-fit cold dust temp of: ' + str(T_c)[0:5] + ' K')
        print('Best-fit cold dust mass of: ' + str(np.log10(M_c))[0:5] +
              ' log10 Msol')
        if components == 2:
            print('Best-fit warm dust temp of: ' + str(T_w)[0:5] + ' K')
            print('Best-fit warm dust mass of: ' + str(np.log10(M_w))[0:5] +
                  ' log10 Msol')
        if beta == 'free':
            print('Best-fit beta of: ' + str(beta)[0:4])

    # Calculate chi-squared of fit
    fit = ChrisFit_2GB_Flux(wavelengths,
                            T_w,
                            T_c,
                            M_w,
                            M_c,
                            distance,
                            kappa_0=kappa_0,
                            lambda_0=lambda_0,
                            beta=beta)
    chi_squared = (np.divide((fluxes_corr - fit)**2.0, errors**2.0))
    if (True in limits) == True:
        chi_squared[np.where((np.array(limits) == True)
                             & (fit - fluxes < 0))] = 0.0

    # Calculate residuals
    residuals = np.zeros([wavelengths.shape[0]])
    for w in range(0, wavelengths.shape[0]):
        residuals[w] = (ChrisFit_2GB_Flux(wavelengths[w],
                                          T_w,
                                          T_c,
                                          M_w,
                                          M_c,
                                          distance,
                                          kappa_0=kappa_0,
                                          lambda_0=lambda_0,
                                          beta=beta) - fluxes_corr[w]
                        )  # / errors[w]

    # Commence bootstrapping, if required
    if bootstrapping != False:
        if verbose == True:
            print(' ')
            print('Bootstrapping fit...')
        if str(bootstrapping) == 'True':
            bs_iter = 1000
            bootstrapping = True
        else:
            bs_iter = int(bootstrapping)
            bootstrapping = True

        # Generate peturbation values
        bs_peturbs = np.zeros([fluxes_corr.shape[0], bs_iter])
        for w in range(0, fluxes_corr.shape[0]):
            bs_peturbs[w, :] = np.array(
                np.random.normal(loc=0.0, scale=errors[w], size=bs_iter))

        # Start bootstrap iterations
        bs_T_w_array, bs_T_c_array, bs_M_w_array, bs_M_c_array, bs_beta_array = np.zeros(
            [bs_iter]), np.zeros([bs_iter]), np.zeros([bs_iter]), np.zeros(
                [bs_iter]), np.zeros([bs_iter])
        for b in range(0, bs_iter):
            if np.mod(b, 100) == 0:
                if verbose == True:
                    print('Bootstrap iterations: ' + str(b) + ' - ' +
                          str(b + 100))

            # Peturb corrected fluxes within errors
            bs_fluxes = np.copy(fluxes_corr)
            for w in range(0, fluxes_corr.shape[0]):
                bs_fluxes[w] += bs_peturbs[w, b]

            # Repackage variables for bootstrap
            bs_params = lmfit.Parameters()
            bs_params.add('beta', value=beta, vary=beta_boolean)
            bs_params.add('D', value=distance, vary=False)
            bs_params.add('lambda_0', value=lambda_0, vary=False)
            bs_params.add('kappa_0', value=kappa_0, vary=False)
            bs_params.add('components',
                          value=components,
                          vary=False,
                          min=1,
                          max=2)
            if warm_boolean == False:
                bs_params.add('T_c',
                              value=20.0,
                              vary=True,
                              min=10.0,
                              max=200.0)
                bs_params.add('T_w', value=0, vary=False)
                bs_params.add('M_c',
                              value=M_c_guess,
                              vary=True,
                              min=np.divide(M_c_guess, 1E6))
                bs_params.add('M_w', value=0, vary=False)
            elif warm_boolean == True:
                bs_params.add('T_c',
                              value=20.0,
                              vary=True,
                              min=min_temp,
                              max=200.0)
                bs_params.add('T_offset',
                              value=30.0,
                              vary=True,
                              min=10.0,
                              max=50.0)
                bs_params.add('T_w', expr='T_c + T_offset')
                bs_params.add('M_c',
                              value=M_c_guess,
                              vary=True,
                              min=np.divide(M_c_guess, 1E6))
                bs_params.add('M_ratio',
                              value=1E-2,
                              vary=True,
                              min=1E-6,
                              max=1.0)
                bs_params.add('M_w', expr='M_c * M_ratio')

            # Perform bootstrap fit
            if algorithm == 'leastsq':
                bs_result = lmfit.minimize(ChrisFit_2GB_LMfit,
                                           bs_params,
                                           args=(wavelengths, bs_fluxes,
                                                 errors, limits),
                                           method=algorithm,
                                           maxfev=1000,
                                           xtol=2E-9,
                                           ftol=2E-9)
            else:
                bs_result = lmfit.minimize(ChrisFit_2GB_LMfit,
                                           bs_params,
                                           args=(wavelengths, bs_fluxes,
                                                 errors, limits),
                                           method=algorithm)

            # Retrieve output values, and ensure they are in correct order
            bs_T_order, bs_M_order = np.array([
                bs_result.params['T_w'].value, bs_result.params['T_c'].value
            ]), np.array(
                [bs_result.params['M_w'].value, bs_result.params['M_c'].value])
            if components == 1:
                bs_T_w_array[b] = np.min(bs_T_order)
                bs_T_c_array[b] = np.max(bs_T_order)
                bs_M_w_array[b] = 0.0
                bs_M_c_array[b] = bs_M_order[np.where(
                    bs_T_order == bs_T_c_array[b])][0]
            elif components == 2:
                bs_T_w_array[b] = np.max(bs_T_order)
                bs_T_c_array[b] = np.min(bs_T_order)
                bs_M_w_array[b] = bs_M_order[np.where(
                    bs_T_order == bs_T_w_array[b])][0]
                bs_M_c_array[b] = bs_M_order[np.where(
                    bs_T_order == bs_T_c_array[b])][0]
            bs_beta_array[b] = bs_result.params['beta'].value

        # Sigma-clip temperature and beta output
        bs_T_w_clip = ChrisFuncs.SigmaClip(bs_T_w_array, median=True)
        bs_T_w_sigma, bs_T_w_mu = bs_T_w_clip[0], bs_T_w_clip[1]
        bs_T_c_clip = ChrisFuncs.SigmaClip(bs_T_c_array, median=True)
        bs_T_c_sigma, bs_T_c_mu = bs_T_c_clip[0], bs_T_c_clip[1]
        bs_beta_clip = ChrisFuncs.SigmaClip(bs_beta_array, median=True)
        bs_beta_sigma, bs_beta_mu = bs_beta_clip[0], bs_beta_clip[1]

        # Find sigma-clip bootstrapped dust masses
        bs_M_w_sigma = ChrisFuncs.SigmaClip(np.log10(bs_M_w_array),
                                            median=True)[0]
        bs_M_c_sigma = ChrisFuncs.SigmaClip(np.log10(bs_M_c_array),
                                            median=True)[0]
        bs_M_w_mu = 10.0**ChrisFuncs.SigmaClip(np.log10(bs_M_w_array),
                                               median=True)[1]
        bs_M_c_mu = 10.0**ChrisFuncs.SigmaClip(np.log10(bs_M_c_array),
                                               median=True)[1]
        bs_M_d_mu = 10.0**ChrisFuncs.SigmaClip(np.log10(bs_M_w_array +
                                                        bs_M_c_array),
                                               median=True)[1]
        if components == 1:
            bs_M_d_sigma = bs_M_c_sigma
        elif components == 2:
            bs_M_d_sigma = ChrisFuncs.SigmaClip(np.log10(bs_M_w_array +
                                                         bs_M_c_array),
                                                median=True)[0]

        # Calculate uncertainties relative to best-fit values
        bs_T_w_sigma_up, bs_T_w_sigma_down = np.abs(T_w - (
            bs_T_w_mu + bs_T_w_sigma)), np.abs(T_w -
                                               (bs_T_w_mu - bs_T_w_sigma))
        bs_T_c_sigma_up, bs_T_c_sigma_down = np.abs(T_c - (
            bs_T_c_mu + bs_T_c_sigma)), np.abs(T_c -
                                               (bs_T_c_mu - bs_T_c_sigma))
        bs_M_w_sigma_up, bs_M_w_sigma_down = np.abs(M_w - (
            bs_M_w_mu + bs_M_w_sigma)), np.abs(M_w -
                                               (bs_M_w_mu - bs_M_w_sigma))
        bs_M_c_sigma_up, bs_M_c_sigma_down = np.abs(M_c - (
            bs_M_c_mu + bs_M_c_sigma)), np.abs(M_c -
                                               (bs_M_c_mu - bs_M_c_sigma))
        bs_M_d_sigma_up, bs_M_d_sigma_down = np.abs(M_d - (
            bs_M_d_mu + bs_M_d_sigma)), np.abs(M_d -
                                               (bs_M_d_mu - bs_M_d_sigma))
        bs_beta_sigma_up, bs_beta_sigma_down = np.abs(beta - (
            bs_beta_mu + bs_beta_sigma)), np.abs(beta -
                                                 (bs_beta_mu - bs_beta_sigma))

        # Translate mass uncertainties into log space
        bs_M_w_sigma_log = ChrisFuncs.SigmaClip(np.log10(bs_M_w_array),
                                                median=True)[0]
        bs_M_c_sigma_log = ChrisFuncs.SigmaClip(np.log10(bs_M_c_array),
                                                median=True)[0]
        if components == 1:
            bs_M_d_sigma_log = bs_M_c_sigma_log
        elif components == 2:
            bs_M_d_sigma_log = ChrisFuncs.SigmaClip(np.log10(bs_M_w_array +
                                                             bs_M_c_array),
                                                    median=True)[0]

        # Calculate uncertainties as a percentile, if requested
        if percentile > 0:
            bs_T_w_sigma = (np.sort(
                np.abs(ChrisFuncs.Nanless(bs_T_w_array) - T_w)))[int(
                    (np.divide(float(percentile), 100.0)) *
                    ChrisFuncs.Nanless(bs_T_w_array).shape[0])]
            bs_T_c_sigma = (np.sort(
                np.abs(ChrisFuncs.Nanless(bs_T_c_array) - T_c)))[int(
                    (np.divide(float(percentile), 100.0)) *
                    ChrisFuncs.Nanless(bs_T_c_array).shape[0])]
            bs_M_w_sigma = (np.sort(
                np.abs(ChrisFuncs.Nanless(bs_M_w_array) - M_w)))[int(
                    (np.divide(float(percentile), 100.0)) *
                    ChrisFuncs.Nanless(bs_M_w_array).shape[0])]
            bs_M_c_sigma = (np.sort(
                np.abs(ChrisFuncs.Nanless(bs_M_c_array) - M_c)))[int(
                    (np.divide(float(percentile), 100.0)) *
                    ChrisFuncs.Nanless(bs_M_c_array).shape[0])]
            bs_beta_sigma = (np.sort(
                np.abs(ChrisFuncs.Nanless(bs_beta_array) - beta)))[int(
                    (np.divide(float(percentile), 100.0)) *
                    ChrisFuncs.Nanless(bs_beta_array).shape[0])]
            if components == 1:
                bs_M_d_sigma = bs_M_c_sigma
            elif components == 2:
                bs_M_d_array = bs_M_w_array + bs_M_c_array
                bs_M_d_sigma = (np.sort(
                    np.abs(ChrisFuncs.Nanless(bs_M_d_array) - M_d)))[int(
                        (np.divide(float(percentile), 100.0)) *
                        ChrisFuncs.Nanless(bs_M_d_array).shape[0])]
            bs_M_w_sigma_log = np.log10(np.divide((bs_M_w_sigma + M_w), M_w))
            bs_M_c_sigma_log = np.log10(np.divide((bs_M_c_sigma + M_c), M_c))
            bs_M_d_sigma_log = np.log10(np.divide((bs_M_d_sigma + M_d), M_d))

        # Reporty uncertainties
        if verbose == True:
            print(' ')
            print('Cold dust temp uncertainty of: ' + str(bs_T_c_sigma)[0:5] +
                  ' K')
            print('Cold dust mass uncertainty of: ' +
                  str(bs_M_c_sigma_log)[0:5] + ' log10 Msol')
            if components == 2:
                print('Warm dust temp uncertainty of: ' +
                      str(bs_T_w_sigma)[0:5] + ' K')
                print('Warm dust mass uncertainty of: ' +
                      str(bs_M_w_sigma_log)[0:5] + ' log10 Msol')
            if beta == 'free':
                print('Beta uncertainty of: ' + str(bs_beta_sigma)[0:4])

    # Return NaN values if bootstrapping not requested
    elif bootstrapping == False:
        bs_T_w_sigma, bs_T_c_sigma, bs_M_w_sigma, bs_M_c_sigma, bs_M_d_sigma, bs_beta_sigma = np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN
        bs_T_w_mu, bs_T_c_mu, bs_M_w_mu, bs_M_c_mu, bs_M_d_mu, bs_beta_mu = np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN
        bs_T_w_array, bs_T_c_array, bs_M_w_array, bs_M_c_array, bs_M_d_array, bs_beta_array = np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN
        bs_T_w_sigma_down, bs_T_w_sigma_up, bs_T_c_sigma_down, bs_T_c_sigma_up, bs_M_w_sigma_down, bs_M_w_sigma_up, bs_M_c_sigma_down, bs_M_c_sigma_up, bs_M_d_sigma_down, bs_M_d_sigma_up, bs_beta_sigma_down, bs_beta_sigma_up = np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN
        bs_M_w_sigma_log, bs_M_c_sigma_log, bs_M_d_sigma_log = np.NaN, np.NaN, np.NaN

    # Carry out plotting, if required
    if plotting == False:
        fig, ax = [], []
    if plotting == True:
        plt.close('all')
        font_family = 'serif'
        fig = plt.figure(figsize=(8, 6))
        ax = fig.add_axes([0.125, 0.125, 0.825, 0.825])

        # Generate fit components
        fit_wavelengths = np.linspace(10E-6, 10000E-6, num=10000)
        fit_fluxes_w = ChrisFit_2GB_Flux(fit_wavelengths,
                                         T_w,
                                         0.0,
                                         M_w,
                                         0.0,
                                         distance,
                                         kappa_0=kappa_0,
                                         lambda_0=lambda_0,
                                         beta=beta)
        fit_fluxes_c = ChrisFit_2GB_Flux(fit_wavelengths,
                                         0.0,
                                         T_c,
                                         0.0,
                                         M_c,
                                         distance,
                                         kappa_0=kappa_0,
                                         lambda_0=lambda_0,
                                         beta=beta)
        fit_fluxes_tot = ChrisFit_2GB_Flux(fit_wavelengths,
                                           T_w,
                                           T_c,
                                           M_w,
                                           M_c,
                                           distance,
                                           kappa_0=kappa_0,
                                           lambda_0=lambda_0,
                                           beta=beta)

        # Plot fits
        ax.plot(fit_wavelengths * 1E6,
                fit_fluxes_w,
                ls='--',
                lw=1.0,
                c='black')
        ax.plot(fit_wavelengths * 1E6,
                fit_fluxes_c,
                ls='--',
                lw=1.0,
                c='black')
        ax.plot(fit_wavelengths * 1E6, fit_fluxes_tot, ls='-', lw=1.5, c='red')

        # Assemble strings for plot text in various circumstances
        chi_squared_string = '$\chi^{2}$ = ' + str(
            np.around(np.sum(chi_squared), decimals=3))[0:5]
        if bootstrapping == False:
            T_c_string = 'T$_{c}$ = ' + str(np.around(T_c,
                                                      decimals=3))[0:5] + ' K'
            M_c_string = ',   M$_{c}$ = ' + str(
                np.around(np.log10(M_c),
                          decimals=3))[0:5] + ' log$_{10}$M$_{\odot}$'
            T_w_string = ''
            M_w_string = ''
            M_d_string = ''
            if components == 2:
                T_w_string = 'T$_{w}$ = ' + str(np.around(
                    T_w, decimals=3))[0:5] + ' K'
                M_w_string = ',   M$_{w}$ = ' + str(
                    np.around(np.log10(M_w),
                              decimals=3))[0:5] + ' log$_{10}$M$_{\odot}$'
                M_d_string = ',   M$_{d}$ = ' + str(
                    np.around(np.log10(M_d),
                              decimals=3))[0:5] + ' log$_{10}$M$_{\odot}$'
            if beta_boolean == True:
                beta_string = ',   $\\beta$ = ' + str(
                    np.around(beta, decimals=2))[0:4]
        elif bootstrapping == True:
            T_c_string = 'T$_{c}$ = (' + str(np.around(
                T_c, decimals=3))[0:5] + ' $\pm$ ' + str(
                    np.around(bs_T_c_sigma, decimals=3))[0:5] + ') K'
            M_c_string = ',   M$_{c}$ = (' + str(
                np.around(np.log10(M_c), decimals=3))[0:5] + ' $\pm$ ' + str(
                    np.around(bs_M_c_sigma_log,
                              decimals=3))[0:5] + ') log$_{10}$M$_{\odot}$'
            T_w_string = ''
            M_w_string = ''
            M_d_string = ''
            if components == 2:
                T_w_string = 'T$_{w}$ = (' + str(np.around(
                    T_w, decimals=3))[0:5] + ' $\pm$ ' + str(
                        np.around(bs_T_w_sigma, decimals=3))[0:5] + ') K'
                M_w_string = ',   M$_{w}$ = (' + str(
                    np.around(
                        np.log10(M_w), decimals=3))[0:5] + ' $\pm$ ' + str(
                            np.around(
                                bs_M_w_sigma_log,
                                decimals=3))[0:5] + ') log$_{10}$M$_{\odot}$'
                M_d_string = ',   M$_{d}$ = (' + str(
                    np.around(
                        np.log10(M_d), decimals=3))[0:5] + ' $\pm$ ' + str(
                            np.around(
                                bs_M_d_sigma_log,
                                decimals=3))[0:5] + ') log$_{10}$M$_{\odot}$'
            if beta_boolean == True:
                beta_string = ',   $\\beta$ = ' + str(
                    np.around(beta, decimals=2))[0:4] + ' $\pm$ ' + str(
                        np.around(bs_beta_sigma, decimals=3))[0:4]
        if beta_boolean == False:
            beta_string = ''

        # Place text on figure
        ax.text(0.035,
                0.925,
                source_name,
                fontsize=15,
                fontweight='bold',
                transform=ax.transAxes,
                family=font_family)
        if components == 1:
            ax.text(0.035,
                    0.865,
                    T_c_string + M_c_string,
                    fontsize=14,
                    transform=ax.transAxes,
                    family=font_family)
            ax.text(0.035,
                    0.805,
                    chi_squared_string + beta_string + M_d_string,
                    fontsize=14,
                    transform=ax.transAxes,
                    family=font_family)
        if components == 2:
            ax.text(0.035,
                    0.865,
                    T_c_string + M_c_string,
                    fontsize=14,
                    transform=ax.transAxes,
                    family=font_family)
            ax.text(0.035,
                    0.805,
                    T_w_string + M_w_string,
                    fontsize=14,
                    transform=ax.transAxes,
                    family=font_family)
            ax.text(0.035,
                    0.745,
                    chi_squared_string + beta_string + M_d_string,
                    fontsize=14,
                    transform=ax.transAxes,
                    family=font_family)

        # Set up figure axes
        ax.set_xscale('log')
        ax.set_yscale('log')
        ax.set_xlabel(r'Wavelength ($\mu$m)',
                      fontsize=17.5,
                      fontname=font_family)
        ax.set_ylabel('Flux Density (Jy)', fontsize=17.5, fontname=font_family)

        # Format font of tick labels
        for xlabel in ax.get_xticklabels():
            xlabel.set_fontproperties(
                matplotlib.font_manager.FontProperties(family=font_family,
                                                       size=15))
        for ylabel in ax.get_yticklabels():
            ylabel.set_fontproperties(
                matplotlib.font_manager.FontProperties(family=font_family,
                                                       size=15))

        # Create seperature flux and error arrays for plot
        fluxes_plot, errors_plot = np.copy(fluxes_corr), np.copy(errors)
        errors_up, errors_down = np.copy(errors), np.copy(errors)

        # Format errorbars deal with negative fluxes
        errors_plot[np.where(fluxes_plot <= 0)] -= fluxes_plot[np.where(
            fluxes_plot <= 0)]
        fluxes_plot[np.where(fluxes_plot <= 0)] = 1E-50

        # Format errobars to account for non-detections
        det = np.where(fluxes_plot > errors_plot)
        errors_down[np.where(
            errors_down > fluxes_plot)] = 0.999 * fluxes_plot[np.where(
                errors_down > fluxes_plot)]

        # Plot datapoints
        if (True in limits) == False:
            ax.errorbar(wavelengths * 1E6,
                        fluxes_plot,
                        yerr=[errors_down, errors_up],
                        ecolor='black',
                        elinewidth=1.15,
                        capthick=1.15,
                        marker='x',
                        color='black',
                        markersize=5.0,
                        markeredgewidth=1.15,
                        linewidth=0)
        else:
            lim_true, lim_false = np.where(np.array(limits) == True), np.where(
                np.array(limits) == False)
            ax.errorbar(wavelengths[lim_false] * 1E6,
                        fluxes_plot[lim_false],
                        yerr=[errors_down[lim_false], errors_up[lim_false]],
                        ecolor='black',
                        elinewidth=1.15,
                        capthick=1.15,
                        marker='x',
                        color='black',
                        markersize=5.0,
                        markeredgewidth=1.15,
                        linewidth=0)
            ax.errorbar(wavelengths[lim_true] * 1E6,
                        fluxes_plot[lim_true],
                        yerr=[errors_down[lim_true], errors_up[lim_true]],
                        ecolor='gray',
                        elinewidth=1.15,
                        capthick=1.15,
                        marker='x',
                        color='gray',
                        markersize=5.0,
                        markeredgewidth=1.15,
                        linewidth=0)

        # Scale x-axes to account for wavelengths provided
        xlim_min = 1E6 * 10.0**(np.floor(np.log10(np.min(wavelengths))))
        xlim_max = 1E6 * 10.0**(np.ceil(np.log10(np.max(wavelengths))))
        ax.set_xlim(xlim_min, xlim_max)

        # Scale y-axes to account for range of values and non-detections
        ylim_min = 10.0**(
            -1.0 +
            np.round(np.log10(np.min(fluxes_plot[det] - errors_plot[det]))))
        ylim_max = 10.0**(1.0 + np.ceil(
            np.log10(1.1 * np.max(fluxes_plot[det] + errors_plot[det]))))
        ax.set_ylim(ylim_min, ylim_max)

        # Save figures to designated'Output' folder
        comp_strings = ['Eh', 'One', 'Two']
        if output_dir == False:
            if not os.path.exists('Output'):
                os.mkdir('Output')
            fig.savefig(os.path.join(
                'Output', source_name + ' ' + comp_strings[components] +
                ' Component.png'),
                        dpi=175.0)
            if plot_pdf == True:
                fig.savefig(
                    os.path.join(
                        'Output', source_name + ' ' +
                        comp_strings[components] + ' Component.pdf'))
        if output_dir != False:
            fig.savefig(os.path.join(
                output_dir, source_name + ' ' + comp_strings[components] +
                ' Component.png'),
                        dpi=175.0)
            if plot_pdf == True:
                fig.savefig(
                    os.path.join(
                        output_dir, source_name + ' ' +
                        comp_strings[components] + ' Component.pdf'))

    # Enter placeholder value for one-component warm output, and return output
    if components == 1:
        T_w = np.NaN
        M_w = np.NaN
        bs_T_w_sigma = np.NaN
        bs_M_w_sigma = np.NaN
    if verbose == True:
        print(' ')
        print(' ')
    return chi_squared,\
    [T_c, M_c, T_w, M_w, M_d, beta],\
    [bs_T_c_sigma, bs_M_c_sigma_log, bs_T_w_sigma, bs_M_w_sigma_log, bs_M_d_sigma_log, bs_beta_sigma],\
    fluxes_corr,\
    residuals,\
    [fig, ax],\
    [bs_T_c_mu, bs_M_c_mu, bs_T_w_mu, bs_M_w_mu, bs_M_d_mu, bs_beta_mu],\
    [bs_T_c_array, bs_M_c_array, bs_T_w_array, bs_M_w_array, bs_M_w_array+bs_M_c_array, bs_beta_array],\
    [[bs_T_c_sigma_down, bs_T_c_sigma_up],[bs_M_c_sigma_down, bs_M_c_sigma_up], [bs_T_w_sigma_down, bs_T_w_sigma_up], [bs_M_w_sigma_down, bs_M_w_sigma_up], [bs_M_d_sigma_down, bs_M_d_sigma_up], [bs_beta_sigma_down, bs_beta_sigma_up]]
Beispiel #49
0
def rabi_flop_fit_thermal(params, t, data):
    model = flop.compute_evolution_thermal(
        params['nbar'].value,
        params['detuning'].value,
        params['time_2pi'].value,
        t,
        excitation_scaling=params['excitation_scaling'].value)
    return model - data


'''
perform the fit
'''
region = (fitting_region[0] <= times) * (times <= fitting_region[1])
result = lmfit.minimize(rabi_flop_fit_thermal,
                        params,
                        args=(times[region], prob[region]))
fit_values = flop.compute_evolution_thermal(
    params['nbar'].value,
    params['detuning'].value,
    params['time_2pi'].value,
    detailed_times - offset_time,
    excitation_scaling=params['excitation_scaling'].value)
lmfit.report_errors(params)
'''
make the plot
'''
pyplot.figure()
pyplot.plot(detailed_times,
            guess_evolution,
            '--k',
def test_bounded_parameters():
    # create data to be fitted
    np.random.seed(1)
    x = np.linspace(0, 15, 301)
    data = (5. * np.sin(2 * x - 0.1) * np.exp(-x * x * 0.025) +
            np.random.normal(size=len(x), scale=0.2))

    # define objective function: returns the array to be minimized
    def fcn2min(params, x, data):
        """ model decaying sine wave, subtract data"""
        amp = params['amp']
        shift = params['shift']
        omega = params['omega']
        decay = params['decay']

        model = amp * np.sin(x * omega + shift) * np.exp(-x * x * decay)
        return model - data

    # create a set of Parameters
    params = Parameters()
    params.add('amp', value=10, min=0, max=50)
    params.add('decay', value=0.1, min=0, max=10)
    params.add('shift', value=0.0, min=-pi / 2., max=pi / 2.)
    params.add('omega', value=3.0, min=0, max=np.inf)

    # do fit, here with leastsq model
    result = minimize(fcn2min, params, args=(x, data))

    # assert that the real parameters are found
    for para, val in zip(result.params.values(), [5, 0.025, -.1, 2]):
        check(para, val)

    # assert that the covariance matrix is correct [cf. lmfit v0.9.10]
    cov_x = np.array(
        [[1.42428250e-03, 9.45395985e-06, -4.33997922e-05, 1.07362106e-05],
         [9.45395985e-06, 1.84110424e-07, -2.90588963e-07, 7.19107184e-08],
         [-4.33997922e-05, -2.90588963e-07, 9.53427031e-05, -2.37750362e-05],
         [1.07362106e-05, 7.19107184e-08, -2.37750362e-05, 9.60952336e-06]])
    assert_allclose(result.covar, cov_x, rtol=1e-6)

    # assert that stderr and correlations are correct [cf. lmfit v0.9.10]
    assert_almost_equal(result.params['amp'].stderr, 0.03773967, decimal=6)
    assert_almost_equal(result.params['decay'].stderr, 4.2908e-04, decimal=6)
    assert_almost_equal(result.params['shift'].stderr, 0.00976436, decimal=6)
    assert_almost_equal(result.params['omega'].stderr, 0.00309992, decimal=6)

    assert_almost_equal(result.params['amp'].correl['decay'],
                        0.5838166760743324,
                        decimal=6)
    assert_almost_equal(result.params['amp'].correl['shift'],
                        -0.11777303073961824,
                        decimal=6)
    assert_almost_equal(result.params['amp'].correl['omega'],
                        0.09177027400788784,
                        decimal=6)
    assert_almost_equal(result.params['decay'].correl['shift'],
                        -0.0693579417651835,
                        decimal=6)
    assert_almost_equal(result.params['decay'].correl['omega'],
                        0.05406342001021014,
                        decimal=6)
    assert_almost_equal(result.params['shift'].correl['omega'],
                        -0.7854644476455469,
                        decimal=6)
Beispiel #51
0
    amp = params['amp'].value

    y_model = offset + amp * np.sin(x * omega) * np.exp(-x / decay)
    return y_model - ydata


params = lmfit.Parameters()

params.add('offset', 2.0)
params.add('omega', 3.3)
params.add('amp', 2.5)
params.add('decay', 1.0, min=0)

method = 'L-BFGS-B'

o1 = lmfit.minimize(resid, params, args=(x, yn), method=method)
print("# Fit using sum of squares:\n")
lmfit.report_fit(o1)

o2 = lmfit.minimize(resid,
                    params,
                    args=(x, yn),
                    method=method,
                    reduce_fcn='neglogcauchy')
print("\n\n# Robust Fit, using log-likelihood with Cauchy PDF:\n")
lmfit.report_fit(o2)

plt.plot(x, y, 'ko', lw=2)
plt.plot(x, yn, 'k--*', lw=1)
plt.plot(x, yn + o1.residual, 'r-', lw=2)
plt.plot(x, yn + o2.residual, 'b-', lw=2)
Beispiel #52
0
def fit2(template,
         spec1,
         spec2,
         mask=None,
         params=None,
         isshow=False,
         isprint=False):
    """
    Some spectrum are observed by multichannel instrument. So there are
    multiple spectral files corresponding to one observation. This function try
    to fit the two part of the spectrum simultaneously using 2 scale curves,
    1 shift, 2 sigma pars. the mask window format should be like this
    [[l1, r1], [l2, r2], ...]
    """
    temp1 = template
    fname = template.filename
    temp2 = Model(fname)
    temp1.reset_zoom(spec1.wave)
    temp2.reset_zoom(spec2.wave)
    pars = Parameters()
    ascale_valst = [3.12, 0.284, -0.023, 0.2, -0.0086, 0.127]
    ascalepar = set_pars(pars,
                         prefix='a_scale',
                         order=5,
                         valuelst=ascale_valst)
    bscale_valst = [3.9, 0.16, -0.019, -0.03, 0.055, -0.02]
    bscalepar = set_pars(pars,
                         prefix='b_scale',
                         order=5,
                         valuelst=bscale_valst)
    asigmapar = set_pars(pars,
                         prefix='a_sigma',
                         order=[1],
                         valuelst=[1.0e-4],
                         minlst=[1.0e-8])
    bsigmapar = set_pars(pars,
                         prefix='b_sigma',
                         order=[1],
                         valuelst=[1.0e-4],
                         minlst=[1.0e-8])
    shiftpar = set_pars(pars,
                        prefix='shift',
                        order=[1],
                        valuelst=[-2.7713e-04])
    temp1.set_lmpar_name(ascalepar, None, shiftpar)
    temp2.set_lmpar_name(bscalepar, None, shiftpar)
    wave = np.append(spec1.wave, spec2.wave)
    flux = np.append(spec1.flux_unit, spec2.flux_unit)
    err = np.append(spec1.err_unit, spec2.err_unit)
    arg_mask = func.mask(wave, mask)

    def residual(pars, x, data, eps=None):
        # print('Flag 1')
        # print(asigmapar)
        arrsigma1 = read_lmpar(pars, dic_parnames=asigmapar)
        arrsigma2 = read_lmpar(pars, dic_parnames=bsigmapar)
        flux_unit1 = np.array(
            convol.gauss_filter(spec1.wave, spec1.flux_unit, arrsigma1))
        flux_unit2 = np.array(
            convol.gauss_filter(spec2.wave, spec2.flux_unit, arrsigma2))
        # print(len(spec2.wave))
        # print(len(flux_unit1))
        # print(len(spec2.err_unit))
        residual1 = temp1.residual(pars,
                                   spec1.wave,
                                   flux_unit1,
                                   eps=spec1.err_unit)
        residual2 = temp2.residual(pars,
                                   spec2.wave,
                                   flux_unit2,
                                   eps=spec2.err_unit)
        all_res = np.append(residual1, residual2)
        all_res[arg_mask] = 1.0
        return all_res

    out = minimize(residual, pars, args=(wave, flux, err))
    # report_fit(out)
    scale1_par = temp1.get_scale_par(out.params)
    scale1 = temp1.get_scale(spec1.wave, scale1_par)
    scale2_par = temp2.get_scale_par(out.params)
    scale2 = temp2.get_scale(spec2.wave, scale2_par)
    shift_par = temp1.get_shift_par(out.params)
    temp_new_wave = temp1.get_wave(shift_par)
    plt.plot(temp_new_wave, temp1.flux)
    arrsigma1 = read_lmpar(out.params, dic_parnames=asigmapar)
    arrsigma2 = read_lmpar(out.params, dic_parnames=bsigmapar)
    flux_unit1 = convol.gauss_filter(spec1.wave, spec1.flux_unit, arrsigma1)
    flux_unit2 = convol.gauss_filter(spec2.wave, spec2.flux_unit, arrsigma2)
    plt.plot(spec1.wave, flux_unit1 / scale1)
    plt.plot(spec2.wave, flux_unit2 / scale2)
    # plt.plot(wave[arg_mask], flux[arg_mask], color='red')
    shift = out.params['shift1'].value * c
    shifterr = out.params['shift1'].stderr * c
    bluename = os.path.basename(spec1.filename)
    redname = os.path.basename(spec2.filename)
    print('-' * 20 + ' velocity ' + '-' * 20)
    print(bluename + '  ' + redname)
    print(shift.to('km/s'))
    print(shifterr.to('km/s'))
    plt.show()
    return out
        assert (resid**2).sum() == (np.abs(resid)**2).sum()
        return resid

    diskmod = model(x1, x2, y1, y2, data.max())
    print("Initial disk model max: {0} data max: {1}".format(
        diskmod.max(), data.max()))

    parameters = lmfit.Parameters()
    parameters.add('x1', value=x1)
    parameters.add('x2', value=x2)
    parameters.add('y1', value=y1)
    parameters.add('y2', value=y2)
    parameters.add('scale', value=data.max())
    parhist[band][len(parameters)] = []
    result = lmfit.minimize(residual,
                            parameters,
                            epsfcn=epsfcn,
                            kws={'maskptsrc': False})
    print("Basic fit parameters (linear model):")
    result.params.pretty_print()
    #print("red Chi^2: {0:0.3g}".format(result.chisqr / (ndata - result.nvarys)))
    print("red Chi^2: {0:0.3g}".format(result.redchi))
    print(result.message)
    print()

    bestdiskmod_beam = model(**result.params)

    # Create a "beam" that is really the vertical x horizontal scale height
    # to be convolved with the observed beam
    # old versions for QA2 cutout parameters.add('kernelmajor', value=0.064)
    # old versions for QA2 cutout parameters.add('kernelminor', value=0.043)
    parameters.add('kernelmajor', value=0.054, min=0.01, max=0.3)
Beispiel #54
0
def fit_lamost():
    bluelst, redlst = [], []
    namelst = glob.glob('/home/zzx/workspace/data/stellar_X/*.fits')
    namelst = [
        '/home/zzx/workspace/data/stellar_X/med-58409-TD045606N223435B01_sp16-102.fits'
    ]
    for name in namelst:
        # fig1 = plt.figure()
        # fig2 = plt.figure()
        # ax1 = fig1.add_subplot(111)
        # ax2 = fig2.add_subplot(111)
        print(name)
        size = len(fits.open(name))
        for ind in range(3, size):
            spec = specio.Spectrum(name, ind)
            spec.clean_cosmic_ray()
            if 'B' in spec.header['EXTNAME']:
                bluelst.append(spec)
            else:
                redlst.append(spec)
    name = namelst[0]
    model_blue = Model(name, 3)
    model_blue.clean_cosmic_ray()
    model_red = Model(name, 11)
    model_red.clean_cosmic_ray()
    params = Parameters()
    shiftparname = set_pars(params, 'shift', [1], valuelst=[0.0])
    scalevalst = [
        0.99608100, -0.00931768, 0.00319284, 5.5658e-04, -4.4060e-04, 0.0
    ]
    bscaleparname = set_pars(params, 'b_scale', 5, valuelst=scalevalst)
    rscaleparname = set_pars(params, 'r_scale', 5, valuelst=scalevalst)
    bsimgapar = set_pars(params, 'b_sigma', [1], valuelst=[0.0004])
    rsigmapar = set_pars(params, 'r_sigma', [1], valuelst=[0.0004])
    model_blue.set_lmpar_name(bscaleparname, bsimgapar, shiftparname)
    model_red.set_lmpar_name(rscaleparname, rsigmapar, shiftparname)
    shiftlst, shifterrlst = [], []

    def residual(pars, x1, data1, eps1, x2, data2, eps2):
        res1 = model_blue.residual(pars, x1, data1, eps1)
        res2 = model_red.residual(pars, x2, data2, eps2)
        return np.append(res2, res1)
        return res2

    for ind in range(len(redlst)):
        bspec = bluelst[ind]
        # bspec = redlst[ind]
        rspec = redlst[ind]
        arg1 = func.select(bspec.wave, [[4920, 5300]])
        bnw = bspec.wave[arg1]
        bnf = bspec.flux[arg1]
        bne = bspec.err[arg1]
        arg2 = func.select(rspec.wave, [[6320, 6860]])
        rnw = rspec.wave[arg2]
        rnf = rspec.flux[arg2]
        rne = rspec.err[arg2]
        bfakeerr = np.ones(len(bnw), dtype=np.float64) * 0.01
        rfakeerr = np.ones(len(rnw), dtype=np.float64) * 0.01
        bne = bfakeerr
        rne = rfakeerr
        # out = minimize(model_blue.residual, params, args=(nw, nf))
        # out = minimize(model_red.residual, params, args=(nw, nf))
        out = minimize(residual, params, args=(bnw, bnf, bne, rnw, rnf, rne))
        report_fit(out)
        shiftlst.append(out.params['shift1'].value * c)
        shifterrlst.append(out.params['shift1'].stderr * c)

        plt.figure()

        spec_fit_blue = model_blue.get_spectrum(out.params, model_blue.wave)
        spec_fit_red = model_red.get_spectrum(out.params, model_red.wave)

        plt.plot(bnw, bnf)
        plt.plot(model_blue.wave, spec_fit_blue)

        plt.figure()
        plt.plot(rnw, rnf)
        plt.plot(model_red.wave, spec_fit_red)
        plt.show()

    for ind, value in enumerate(shiftlst):
        # print(value.to('km/s'))
        print(value.to('km/s'), shifterrlst[ind].to('km/s'))
y = (3.0*np.exp(-x/2) - 5.0*np.exp(-(x-0.1) / 10.) +
     0.1*np.random.randn(x.size))
if HASPYLAB:
    plt.plot(x, y, 'b')
    plt.show()

p = lmfit.Parameters()
p.add_many(('a1', 4), ('a2', 4), ('t1', 3), ('t2', 3., True))


def residual(p):
    v = p.valuesdict()
    return v['a1']*np.exp(-x/v['t1']) + v['a2']*np.exp(-(x-0.1) / v['t2']) - y


mi = lmfit.minimize(residual, p, method='nelder', nan_policy='omit')
lmfit.printfuncs.report_fit(mi.params, min_correl=0.5)
if HASPYLAB:
    plt.figure()
    plt.plot(x, y, 'b')
    plt.plot(x, residual(mi.params) + y, 'r', label='best fit')
    plt.legend(loc='best')
    plt.show()

# Place bounds on the ln(sigma) parameter that emcee will automatically add
# to estimate the true uncertainty in the data since is_weighted=False
mi.params.add('__lnsigma', value=np.log(0.1), min=np.log(0.001), max=np.log(2))

res = lmfit.minimize(residual, method='emcee', nan_policy='omit', burn=300,
                     steps=1000, thin=20, params=mi.params, is_weighted=False)
Beispiel #56
0
def fit(template,
        wave,
        flux,
        err,
        params=None,
        show=False,
        isprint=False,
        mask=None):
    print('run fit')
    if params is None:
        params = Parameters()
        # set_pars(params, 'shift', [0, 1], valuelst=[1.16, -0.00076])
        shiftparname = set_pars(params, 'shift', [1], valuelst=[0.0])
        # sigmaparname = set_pars(params, 'sigma', [1], valuelst=[0.00001],
        #                         minlst=[1.0e-8], maxlst=[5.0e-4])
        scalevalst = [1.0, -1.0, -1.0, 0.22, -0.1, -0.13]
        scaleparname = set_pars(params, 'scale', 5, valuelst=scalevalst)
        # template.set_lmpar_name(scaleparname, sigmaparname, shiftparname)
        template.set_lmpar_name(scaleparname, None, shiftparname)
    # start = time.process_time()
    # print(start)
    if mask is not None:
        argsel = func.mask(wave, mask)
    # print('len wave, flux, err')
    # print(len(wave))
    # print(len(flux))
    # print(len(err))

    def residual(pars, x, data, eps):
        # print('flag')
        # print(pars)
        # print(x)
        # print('length of template is ')
        # print(len(template.wave))
        flux_fit1 = template.get_spectrum(pars, x)
        # print(len(flux_fit1))
        # arrpar = read_lmpar(pars, sigmaparname)
        # flux_fit2 = np.array(convol.gauss_filter(x, data, arrpar))
        if mask is not None:
            return ((flux_fit1 - data) / eps)[argsel]
        return (flux_fit1 - flux_fit2) / eps

    # out = minimize(template.residual, params, args=(wave, flux, err),
    #                method='leastsq')
    # end = time.process_time()
    # print("Time used:", end-start)
    out = minimize(residual, params, args=(wave, flux, err), method='leastsq')
    if isprint:
        report_fit(out)
    # shift = out.params['shift1'].value * c
    # shifterr = out.params['shift1'].stderr * c
    # print('-'*20+' velocity '+'-'*20)
    # print(shift.to('km/s'))
    # print(shifterr.to('km/s'))
    if show:

        plt.figure()

        # arrpar = read_lmpar(out.params, sigmaparname)
        # print('gaussian filter par = ')
        # print(arrpar)
        plt.plot(wave, flux)

        spec_fit = template.get_spectrum(out.params, wave)
        # lower = spec_fit - err
        # upper = spec_fit + err
        #
        # plt.fill_between(wave, lower, upper, alpha=0.3, color='grey')

        plt.plot(wave, spec_fit)
        plt.show()
    return out
Beispiel #57
0
def fit_sed_lmfit_hz(xdata,
                     flux,
                     guesses=(0, 0),
                     err=None,
                     blackbody_function='blackbody',
                     quiet=True,
                     sc=1e20,
                     **kwargs):
    """
    Parameters
    ----------
    xdata : array
        Array of the frequencies of the data
    flux : array
        The fluxes corresponding to the xdata values.  Should be in
        erg/s/cm^2/Hz
    guesses : (Temperature,Column) or (Temperature,Beta,Column)
        The input guesses.  3 parameters are used for modified blackbody
        fitting, two for temperature fitting.
    blackbody_function: str
        The blackbody function to fit, either 'blackbody', 'modified', or
        'modified_blackbody'
    quiet : bool
        quiet flag passed to mpfit
    sc : float
        A numerical parameter to enable the fitter to function properly.
        It is unclear what values this needs to take, 1e20 seems to work
        by bringing the units from erg/s/cm^2/Hz to Jy, i.e. bringing them
        into the "of order 1" regime.  This does NOT affect the output *units*,
        though it may affect the quality of the fit.

    Returns
    -------
    lm : lmfit parameters
        The lmfit-py result structure.  Each parameter has many properties.

    Examples
    --------
    >>> from astropy import units as u
    >>> import numpy as np
    >>> wavelengths = np.array([20,70,160,250,350,500,850,1100]) * u.um
    >>> frequencies = wavelengths.to(u.Hz, u.spectral())
    >>> temperature = 15 * u.K
    >>> column = 1e22 * u.cm**-2
    >>> flux = modified_blackbody(frequencies, temperature, beta=1.75,
    ...                           column=column)
    >>> err = 0.1 * flux
    >>> np.random.seed(0)
    >>> noise = np.random.randn(frequencies.size) * err
    >>> tguess, bguess, nguess = 20.,2.,21.5
    >>> bbunit = u.erg/u.s/u.cm**2/u.Hz
    >>> lm = fit_sed_lmfit_hz(frequencies.to(u.Hz).value,
    ...                       (flux+noise).to(bbunit).value,
    ...                       err=err.to(bbunit).value,
    ...                       blackbody_function='modified',
    ...                       guesses=(tguess,bguess,nguess))
    >>> print(lm.params)
    
    >>> # If you want to fit for a fixed beta, do this:
    >>> import lmfit
    >>> parlist = [(n,lmfit.Parameter(x))
    ...            for n,x in zip(('T','beta','N'),(20.,2.,21.5))]
    >>> parameters = lmfit.Parameters(OrderedDict(parlist))
    >>> parameters['beta'].vary = False
    >>> lm = fit_sed_lmfit_hz(frequencies.to(u.Hz).value,
    ...                       flux.to(bbunit).value,
    ...                       err=err.to(bbunit).value,
    ...                       blackbody_function='modified',
    ...                       guesses=parameters)
    >>> print(lm.params)
    """
    try:
        import lmfit
    except ImportError:
        print("Cannot import lmfit: cannot use lmfit-based fitter.")

    bbfd = {
        'blackbody': _blackbody_hz,
        'modified': _modified_blackbody_hz,
        'modified_blackbody': _modified_blackbody_hz
    }

    bbf = bbfd[blackbody_function]

    def lmfitfun(x, y, err):
        if err is None:

            def f(p):
                return (y * sc - bbf(x, *[p[par].value
                                          for par in p], **kwargs) * sc)
        else:

            def f(p):
                return (y * sc -
                        bbf(x, *[p[par].value
                                 for par in p], **kwargs) * sc) / (err * sc)

        return f

    if not isinstance(guesses, lmfit.Parameters):
        guesspars = lmfit.Parameters()

        for n, x in zip(('T', 'beta', 'N'), guesses):
            guesspars.add(value=x, name=n)
    else:
        guesspars = guesses

    minimizer = lmfit.minimize(lmfitfun(xdata, np.array(flux), err), guesspars)

    return minimizer
#pyplot.plot(x_cooled, y_cooled,'ob')

pyplot.axis([0, 0.2, -0.6, 0.6])

pyplot.ylabel('Parity flop with cooling')

yerr = np.sqrt(1 - y_cooled**2) / 20

params = lmfit.Parameters()

params.add('A', value=0.4)
params.add('tau', value=1)
params.add('freq', value=50)
params.add('phase', value=0.0)

result = lmfit.minimize(cosine_fit, params, args=(x_cooled, y_cooled, yerr))

fit_values = y_cooled + result.residual

lmfit.report_errors(params)

red_chi = np.sum(result.residual**2) / (np.size(y_cooled) - 4)
print red_chi

x_plot = np.linspace(x_cooled.min(), x_cooled.max(), 1000)

pyplot.errorbar(x_cooled, y_cooled, yerr)

pyplot.plot(x_plot, cosine_model(params, x_plot))

plot2 = figure.add_subplot(212)
def spectra_fit(fit_params, *args, **kwargs):
    print '\nperforming minimization'

    fit_kws = {'nan_policy': 'omit'}
    if len(args) in (1, 2, 3):
        sim_data_1, meas_data_full_1, meas_data_1 = args[0]
        if len(args) == 1:
            print '    single sprectrum fit'
            res = lmfit.minimize(minimize,
                                 fit_params,
                                 args=((sim_data_1, meas_data_1), ),
                                 **fit_kws)

    if len(args) in (2, 3):
        sim_data_2, meas_data_full_2, meas_data_2 = args[1]
        if len(args) == 2:
            print '    double spectrum fit'
            res = lmfit.minimize(minimize,
                                 fit_params,
                                 args=((sim_data_1, meas_data_1),
                                       (sim_data_2, meas_data_2)),
                                 **fit_kws)

    if len(args) == 3:
        sim_data_3, meas_data_full_3, meas_data_3 = args[2]
        if len(args) == 3:
            print '    triple spectrum fit'
            res = lmfit.minimize(minimize,
                                 fit_params,
                                 args=((sim_data_1, meas_data_1),
                                       (sim_data_2, meas_data_2),
                                       (sim_data_3, meas_data_3)),
                                 **fit_kws)

    if kwargs['print_info']:
        print '\n', res.message
        print lmfit.fit_report(res)

    if kwargs['show_plots']:
        if len(args) == 1:
            sim_with_res = plot_fitted_spectra(
                res.params['shift'].value, res.params['spread'].value,
                ((res.params['alpha_1'].value, ),
                 (res.params['beta_1'].value, ),
                 (res.params['gamma_1'].value, ), (res.params['c1_1'].value, ),
                 (res.params['c2_1'].value, ),
                 (res.params['y_scale_1'].value, ), (sim_data_1, ),
                 (meas_data_full_1, ), (meas_data_1, )))
        if len(args) == 2:
            sim_with_res = plot_fitted_spectra(
                res.params['shift'].value, res.params['spread'].value,
                ((res.params['alpha_1'].value, res.params['alpha_2'].value),
                 (res.params['beta_1'].value, res.params['beta_2'].value),
                 (res.params['gamma_1'].value, res.params['gamma_2'].value),
                 (res.params['c1_1'].value, res.params['c1_2'].value),
                 (res.params['c2_1'].value, res.params['c2_2'].value),
                 (res.params['y_scale_1'].value,
                  res.params['y_scale_2'].value), (sim_data_1, sim_data_2),
                 (meas_data_full_1, meas_data_full_2),
                 (meas_data_1, meas_data_2)))
        if len(args) == 3:
            sim_with_res = plot_fitted_spectra(
                res.params['shift'].value, res.params['spread'].value,
                ((res.params['alpha_1'].value, res.params['alpha_2'].value,
                  res.params['alpha_3'].value),
                 (res.params['beta_1'].value, res.params['beta_2'].value,
                  res.params['beta_3'].value),
                 (res.params['gamma_1'].value, res.params['gamma_2'].value,
                  res.params['gamma_3'].value),
                 (res.params['c1_1'].value, res.params['c1_2'].value,
                  res.params['c1_3'].value),
                 (res.params['c2_1'].value, res.params['c2_2'].value,
                  res.params['c2_3'].value),
                 (res.params['y_scale_1'].value, res.params['y_scale_2'].value,
                  res.params['y_scale_3']),
                 (sim_data_1, sim_data_2, sim_data_3),
                 (meas_data_full_1, meas_data_full_2, meas_data_full_3),
                 (meas_data_1, meas_data_2, meas_data_3)))

    # get shift term
    shift_term = res.params['shift'].value
    spread_term = res.params['spread'].value
    #print res.var_names
    #print res.covar
    #print res.params['spread'].correl
    return shift_term, spread_term, res, sim_with_res
Beispiel #60
0
def bayes_fit(xdata, ydata, distribution, burn=100, steps=1000, thin=20):
    """Identify and fit an arbitrary number of peaks in a 1-d spectrum array.

    Parameters
    ----------
    xdata : 1-d array
        X data.

    ydata : 1-d array
        Y data.

    Returns
    -------
    results : lmfit.MinimizerResults.
        results of the fit. To get parameters, use `results.params`.
    """
    # Identify peaks
    index = find_peaks_cwt(ydata, widths=np.arange(1, 100))

    # Number of peaks
    n_peaks = len(index)

    # Construct initial guesses
    parameters = lmfit.Parameters()

    for peak_i in range(n_peaks):
        idx = index[peak_i]

        # Add center parameter
        parameters.add(name='peak_{}_center'.format(peak_i), value=xdata[idx])

        # Add height parameter
        parameters.add(name='peak_{}_height'.format(peak_i), value=ydata[idx])

        # Add width parameter
        parameters.add(
            name='peak_{}_width'.format(peak_i),
            value=.1,
        )

    # Minimize the above residual function.
    ML_results = lmfit.minimize(residual,
                                parameters,
                                args=[distribution, xdata],
                                kws={'ydata': ydata})

    # Add a noise term for the Bayesian fit
    ML_results.params.add('noise', value=1, min=0.001, max=2)

    # Define the log probability expression for the emcee fitter
    def lnprob(params=ML_results.params):
        noise = params['noise']
        return -0.5 * np.sum((residual(params, distribution, xdata, ydata) /
                              noise)**2 + np.log(2 * np.pi * noise**2))

    # Build a minizer object for the emcee search
    mini = lmfit.Minimizer(lnprob, ML_results.params)

    # Use the emcee version of minimizer class to perform MCMC sampling
    bayes_results = mini.emcee(burn=burn,
                               steps=steps,
                               thin=thin,
                               params=ML_results.params)

    return bayes_results, parameters