def decon_known(df,center): """ This method uses multiple make_lor to do a multiple-peak deconvolutoin using multiple Lorentzians and one arc tangent step function Notice: parameters are NOT automatically deleted if they are not used in the deconvolution of data with fewer peaks than the last time Parameters ---------- df : pandas dataframe df is a column-wise dataframe that records the data you want to deconvolve. center : array the array recording the centers of all peaks. Returns ------- out : lmfit.Model the composit multiple-peak model for the deconvolution. """ arctan_mod=StepModel(form='atan',prefix='arctan_') paras.update(arctan_mod.make_params()) paras['arctan_center'].set(value=inflection(norm.Energy,df),vary=False,min=0.0) paras['arctan_amplitude'].set(value=1.0,vary=False) paras['arctan_sigma'].set(value=1.0,min=0.01) mod=arctan_mod for i in range(len(center)): this=make_lor(df,i,center,2.0)['model'] mod=mod+this paras.update(make_lor(df,i,center,2.0)['paras']) out=mod.fit(df,params=paras,x=norm.Energy,weights=df) return {'out':out}
def fitlogistic(x, y, dias): # fit a logistic function model = StepModel(form="logistic") # parameters to fit guesses by lmfit parameters = model.guess(y, x=x) output = model.fit(y, parameters, x=x) amplitude = output.params["amplitude"].value amplitude = math.floor(amplitude) center = output.params["center"].value sigma = output.params["sigma"].value fit = [] xfit = [] cumulative = [] for i in range(61, dias): if i == 61: xfit.append(i) alpha = (i - center) / sigma value = amplitude * (1 - (1 / (1 + math.exp(alpha)))) fit.append(value) cumulative.append(0) else: xfit.append(i) alpha = (i - center) / sigma value = amplitude * (1 - (1 / (1 + math.exp(alpha)))) fit.append(value) c = value - fit[i - 62] cumulative.append(c) return amplitude, center, sigma, xfit, fit, cumulative, output.fit_report()
def alignment_plot(self, yt, pitch, yf): '''Make a pretty, three-panel plot at the end of an auto-alignment''' BMMuser = user_ns['BMMuser'] close_all_plots() fig = plt.figure(tight_layout=True) #, figsize=(9,6)) gs = gridspec.GridSpec(1,3) if self.orientation == 'parallel': motor = 'xafs_y' else: motor = 'xafs_x' t = fig.add_subplot(gs[0, 0]) tt = user_ns['db'][yt].table() yy = tt[motor] signal = tt['It']/tt['I0'] if float(signal[2]) > list(signal)[-2] : ss = -(signal - signal[2]) self.inverted = 'inverted ' else: ss = signal - signal[2] self.inverted = '' mod = StepModel(form='erf') pars = mod.guess(ss, x=numpy.array(yy)) out = mod.fit(ss, pars, x=numpy.array(yy)) t.scatter(yy, out.data) t.plot(yy, out.best_fit, color='red') t.scatter(out.params['center'].value, out.params['amplitude'].value/2, s=120, marker='x', color='green') t.set_xlabel(f'{motor} (mm)') t.set_ylabel(f'{self.inverted}data and error function') p = fig.add_subplot(gs[0, 1]) tp = user_ns['db'][pitch].table() xp = tp['xafs_pitch'] signal = tp['It']/tp['I0'] target = signal.idxmax() p.plot(xp, signal) p.scatter(xp[target], signal.max(), s=120, marker='x', color='green') p.set_xlabel('xafs_pitch (deg)') p.set_ylabel('It/I0') p.set_title(f'alignment of spinner {self.current()}') f = fig.add_subplot(gs[0, 2]) tf = user_ns['db'][yf].table() yy = tf[motor] signal = (tf[BMMuser.xs1] + tf[BMMuser.xs2] + tf[BMMuser.xs3] + tf[BMMuser.xs4]) / tf['I0'] #if BMMuser.element in ('Zr', 'Sc', 'Nb'): # com = signal.idxmax() # centroid = yy[com] #else: com = int(center_of_mass(signal)[0])+1 centroid = yy[com] f.plot(yy, signal) f.scatter(centroid, signal[com], s=120, marker='x', color='green') f.set_xlabel(f'{motor} (mm)') f.set_ylabel('If/I0') fig.canvas.draw() fig.canvas.flush_events() plt.show()
def align_linear(self, force=False, drop=None): '''Fit an error function to the linear scan against It. Plot the result. Move to the centroid of the error function.''' if self.orientation == 'parallel': motor = user_ns['xafs_liny'] else: motor = user_ns['xafs_linx'] yield from linescan(motor, 'it', -2.3, 2.3, 51, pluck=False) close_last_plot() table = user_ns['db'][-1].table() yy = table[motor.name] signal = table['It']/table['I0'] if drop is not None: yy = yy[:-drop] signal = signal[:-drop] if float(signal[2]) > list(signal)[-2] : ss = -(signal - signal[2]) self.inverted = 'inverted ' else: ss = signal - signal[2] self.inverted = '' mod = StepModel(form='erf') pars = mod.guess(ss, x=numpy.array(yy)) out = mod.fit(ss, pars, x=numpy.array(yy)) print(whisper(out.fit_report(min_correl=0))) target = out.params['center'].value yield from mv(motor, target) self.y_plot(yy, out)
def wafer_edge(motor='x'): '''Fit an error function to the linear scan against It. Plot the result. Move to the centroid of the error function.''' if motor == 'x': motor = user_ns['xafs_linx'] else: motor = user_ns['xafs_liny'] yield from linescan(motor, 'it', -2, 2, 41, pluck=False) close_last_plot() table = user_ns['db'][-1].table() yy = table[motor.name] signal = table['It'] / table['I0'] if float(signal[2]) > list(signal)[-2]: ss = -(signal - signal[2]) else: ss = signal - signal[2] mod = StepModel(form='erf') pars = mod.guess(ss, x=numpy.array(yy)) out = mod.fit(ss, pars, x=numpy.array(yy)) print(whisper(out.fit_report(min_correl=0))) out.plot() target = out.params['center'].value yield from mv(motor, target) yield from resting_state_plan() print( f'Edge found at X={user_ns["xafs_x"].position} and Y={user_ns["xafs_y"].position}' )
def find_fit_sigmoid(x, y): model_gompertz = lm.models.Model(gompertz) params_gompertz = lm.Parameters() params_gompertz.add('asymptote', value=1E-3, min=1E-8) params_gompertz.add('displacement', value=1E-3, min=1E-8) params_gompertz.add('step_center', value=1E-3, min=1E-8) result_gompertz = model_gompertz.fit(y, params_gompertz, x=x) step_mod = StepModel(form='erf', prefix='step_') line_mod = LinearModel(prefix='line_') params_stln = line_mod.make_params(intercept=y.min(), slope=0) params_stln += step_mod.guess(y, x=x, center=90) model_stln = step_mod + line_mod result_stln = model_stln.fit(y, params_stln, x=x) ret_result = None ret_model = None if result_stln.chisqr < result_gompertz.chisqr: ret_result = result_stln ret_model = model_stln else: ret_result = result_gompertz ret_model = model_gompertz return ret_result, ret_model
def gauss_step_const(signal, guess): """ Fits high contrast data very well """ if guess == False: return [0, 0] else: amp, centre, stdev, offset = guess data = np.array([range(len(signal)), signal]).T X = data[:,0] Y = data[:,1] # gauss_mod = Model(gaussian) gauss_mod = Model(gaussian) const_mod = ConstantModel() step_mod = StepModel(prefix='step') pars = gauss_mod.make_params(height=amp, center=centre, width=stdev / 3., offset=offset) # pars = gauss_mod.make_params(amplitude=amp, center=centre, sigma=stdev / 3.) gauss_mod.set_param_hint('sigma', value = stdev / 3., min=stdev / 2., max=stdev) pars += step_mod.guess(Y, x=X, center=centre) pars += const_mod.guess(Y, x=X) mod = const_mod + gauss_mod + step_mod result = mod.fit(Y, pars, x=X) # write error report #print result.fit_report() print "contrast fit", result.redchi return X, result.best_fit, result.redchi
def align_y(self, force=False, drop=None): '''Fit an error function to the xafs_y scan against It. Plot the result. Move to the centroid of the error function.''' xafs_y = user_ns['xafs_y'] db = user_ns['db'] yield from linescan(xafs_y, 'it', -1, 1, 31, pluck=False) close_last_plot() table = db[-1].table() yy = table['xafs_y'] signal = table['It'] / table['I0'] if drop is not None: yy = yy[:-drop] signal = signal[:-drop] if float(signal[2]) > list(signal)[-2]: ss = -(signal - signal[2]) self.inverted = 'inverted ' else: ss = signal - signal[2] self.inverted = '' mod = StepModel(form='erf') pars = mod.guess(ss, x=numpy.array(yy)) out = mod.fit(ss, pars, x=numpy.array(yy)) print(whisper(out.fit_report(min_correl=0))) self.y_plot(yy, out) target = out.params['center'].value yield from mv(xafs_y, target)
def smooth_and_remove_step(x_lst, y_lst, x_min_flt,x_max_flt,rmv_step_bool): ''' Takes entire data set, x and y cuts down the spectra s.t x_min < x < x_max THEN Removes a step function from y_lst ''' # Restrict the fit x_fit = [] y_fit = [] top_lst = [] bottom_lst = [] for x,y in zip(x_lst, y_lst): # Restrict the fitting region if x_min_flt < x < x_max_flt: x_fit.append(float(x)) y_fit.append(float(y)) # Find top and bottom of step if x < x_min_flt + 7: bottom_lst.append(float(y)) elif x > x_max_flt - 7: top_lst.append(float(y)) x_fit = np.asarray(x_fit) y_fit = np.asarray(y_fit) top = np.mean(np.asarray(top_lst)) bottom = np.mean(np.asarray(bottom_lst)) delta = top-bottom if (rmv_step_bool): # Step Parameters step_at = 100 step_width = 1 pp = Parameters() pp.add_many( ('amplitude',delta), ('sigma',step_width), ('center',step_at) ) step = StepModel(form = 'erf', prefix='', independent_vars=['x']) y_fit = np.asarray([yy-bottom-step.eval(x=xx, params=pp) for xx,yy in zip(x_fit,y_fit)]) # rest is the same as smooth_the_data # now we find the parameters using the - d^2/dx^2 ysmooth = interp.interp1d(x_fit, y_fit, kind='cubic') # differentiate x 2 yp = np.gradient(ysmooth(x_fit)) ypp = np.gradient(yp) # we want the peaks of -d2/dx2 ypp = np.asarray([-x for x in ypp]) return x_fit, y_fit, ysmooth, yp, ypp
def get_zero_model(): xdata = np.linspace(-100, 200, 30) ydata = np.zeros(301) model = StepModel(form='linear', prefix='step_') zero_model = model.fit(ydata, x=xdata) return zero_model
def predictive_model(data: pd.DataFrame, interesting_rows, day_zero_n_patients: int = 20, days_in_future: int = 30, aggregated: bool = False): data = data[interesting_rows].iloc[:, :] from lmfit.models import StepModel, ExponentialModel fig = plt.figure(figsize=(10, 5)) for c in range(len(data.index)): if aggregated: values = data.values[c, 4:][data.iloc[c, 4:] > day_zero_n_patients] else: values = np.concatenate( ([0], np.diff( data.values[c, 4:][data.iloc[c, 4:] > day_zero_n_patients]))) n = values.shape[0] x = np.asarray(range(values.shape[0]), dtype='float64') y = np.asarray(values, dtype='float64') if len(x) == 0: continue label = "{}-{}".format(data.values[c, 0], data.values[c, 1]) plt.plot(x, y, label=label) if data.values[c, 1] in ["China", "US"]: continue try: model_step = StepModel() model_exp = ExponentialModel() params_step = model_step.guess(y, x=x) params_exp = model_exp.guess(y, x=x) result_step = model_step.fit(y, params_step, x=x) result_exp = model_exp.fit(y, params_exp, x=x) except Exception: continue x_pred = np.asarray(range(days_in_future)) plt.plot(x_pred, model_step.eval(result_step.params, x=x_pred), ':', label='fit-{}'.format(label)) plt.plot(x_pred, model_exp.eval(result_exp.params, x=x_pred), '.', label='fit-{}'.format(label)) # print(result.fit_report()) # result.plot_fit() plt.legend(prop={"size": 7}) plt.yscale('log') plt.xticks(rotation=45) plt.grid(which='both') now = datetime.now() dt_string = now.strftime("%d%m%Y-%H%M%S")
def make_model(centers, center_pm=None, amplitude=None, fwhm=None, cdf=False, **kwargs): """ Build model to be fitted consisting of len(centers) gaussians. Parameters ---------- centers : array like of type float Initial center positions of the gaussians to build the model from. center_pm : float The centers can by varied within the initial center position +/- the center_pm value. Defaults to 10 kDa. amplitude : float The initial value of the amplitude (area) of the gaussians. Defaults to 1 / len(centers). fwhm : float Initial full width at half maximum of the gaussians in kDa. Defaults to 10 kDa. Remark: 'fwhm = 2.3548 * sigma'. cdf : bool Build model for cumulative distribution function. **kwargs : dict Keyword arguments to adjust the model creation. Possible parameters: 'sigma_center_line_pars' : array like containing [slope, intercept] of the linear realation of sigma to center. If it is set, the sigma of the gaussians are constrained to the expression 'slope * center + intercept'. Returns ------- lmfit.model.CompositeModel """ center_pm = 10 if center_pm is None else center_pm amplitude = 1/len(centers) if amplitude is None else amplitude # prob fwhm = 10 if fwhm is None else fwhm sigma = fwhm/2.3548 sigma_center_line_pars = kwargs.pop('sigma_center_line_pars', None) for i, center in enumerate(centers): prefix = 'p{}_'.format(i + 1) # Decide on Model if cdf: mod = StepModel(prefix=prefix, form='erf') else: mod = GaussianModel(prefix=prefix) # Create Composite Model if i == 0: model = mod else: model += mod model.set_param_hint('{}amplitude'.format(prefix), value=amplitude, min=0, max=1) model.set_param_hint('{}center'.format(prefix), value=center, min=max(0,center-center_pm), max=center+center_pm) if sigma_center_line_pars is None: kwargs_sigma = {'value': sigma, 'min': 0, 'max': 10*sigma} else: expr = '{}center * {} + {}'.format(prefix, *sigma_center_line_pars) kwargs_sigma = {'expr': expr} model.set_param_hint('{}sigma'.format(prefix), **kwargs_sigma) return model
def test_stepmodel_erf(): x, y = get_data() stepmod = StepModel(form='linear') const = ConstantModel() pars = stepmod.guess(y, x) pars = pars + const.make_params(c=3 * y.min()) mod = stepmod + const out = mod.fit(y, pars, x=x) assert (out.nfev > 5) assert (out.nvarys == 4) assert (out.chisqr > 1) assert (out.params['c'].value > 3) assert (out.params['center'].value > 1) assert (out.params['center'].value < 4) assert (out.params['amplitude'].value > 50) assert (out.params['sigma'].value > 0.2) assert (out.params['sigma'].value < 1.5)
def test_stepmodel_erf(): x, y = get_data() stepmod = StepModel(form='linear') const = ConstantModel() pars = stepmod.guess(y, x) pars = pars + const.make_params(c=3*y.min()) mod = stepmod + const out = mod.fit(y, pars, x=x) assert(out.nfev > 5) assert(out.nvarys == 4) assert(out.chisqr > 1) assert(out.params['c'].value > 3) assert(out.params['center'].value > 1) assert(out.params['center'].value < 4) assert(out.params['amplitude'].value > 50) assert(out.params['sigma'].value > 0.2) assert(out.params['sigma'].value < 1.5)
def Step(signal, guess): if guess == False: return [0, 0, 0] else: amp, centre, stdev, offset = guess data = np.array([range(len(signal)), signal]).T X = data[:,0] Y = data[:,1] step_mod = StepModel(prefix='step') const_mod = ConstantModel(prefix='const_') pars = step_mod.guess(Y, x=X, center=centre) pars += const_mod.guess(Y, x=X) mod = step_mod + const_mod result = mod.fit(Y, pars, x=X) # write error report #print result.fit_report() return X, result.best_fit, result.redchi, 0
def get_fit(df, country): x, y = df[df[country] > 0][country].index.values, df[ df[country] > 0][country].values mod = StepModel(form='logistic') pars = mod.guess(y, x=x) # Give no weight # fit = mod.fit(y, pars, x=x) # Give weight to highest points # fit = mod.fit(y, pars, x=x, weights=(1 / (y + 1e-3))[::-1]) # Or give weight to newest points fit = mod.fit(y, pars, x=x, weights=(1 / (x + 1e-3))[::-1]) # Or give weight to least and highest points using sech # y_max = y.max() # coe = 10 / y_max # fit = mod.fit(y, pars, x=x, weights=(1 - 1/np.cosh(coe*(y - y_max / 2)))) # Or give weight to least and highest points using polynomial # y_max = y.max() # fit = mod.fit(y, pars, x=x, weights=pow(y - y_max / 2, 4) / pow(y_max / 2, 4)) return fit
def GaussStepConst(signal, guess): """ Fits high contrast data very well """ if guess == False: return [0, 0, 0] else: amp, centre, stdev, offset = guess data = np.array([range(len(signal)), signal]).T X = data[:,0] Y = data[:,1] # gauss_mod = Model(gaussian) gauss_mod = Model(gaussian) const_mod = ConstantModel() step_mod = StepModel(prefix='step') gauss_mod.set_param_hint('width', value = stdev / 2., min=stdev / 3., max=stdev) gauss_mod.set_param_hint('fwhm', expr='2.3548*width') pars = gauss_mod.make_params(height=amp, center=centre, width=stdev / 2., offset=offset) pars += step_mod.guess(Y, x=X, center=centre) pars += const_mod.guess(Y, x=X) pars['width'].vary = False mod = const_mod + gauss_mod + step_mod result = mod.fit(Y, pars, x=X) # write error report #print result.fit_report() fwhm = result.best_values['width'] * 2.3548 return X, result.best_fit, result.redchi, fwhm
if funccenter[i + 1] - funccenter[i] > 0.8: b.append(i) b.append(i + 1) for j in b: if j not in b_new: b_new.append(j) funccenter = funccenter[b_new] ## First fitting attempt gaussnum = len(funccenter) funcnum = gaussnum # initial guess x0F, lower bound lb, upper bound up x0f = np.zeros((funcnum, 4)) lb = np.zeros((funcnum, 4)) ub = np.zeros((funcnum, 4)) # initial guess for error function step1 = StepModel(form='arctan', prefix='step1_') # pars.update(step2.guess(y,x=x)) pars = step1.make_params() pars['step1_center'].set(e0 + 6, min=e0 + 3, max=e0 + 8) pars['step1_amplitude'].set(0.5, min=0.1, max=1) pars['step1_sigma'].set(0.5, min=0.3, max=0.8) mod = step1 # initial guess for gaussian x0f[:funcnum, 2] = funccenter[:] for n0 in range(0, funcnum): x0f[n0, 0:2] = [0.5, 0.5] lb[n0, :] = [0.2, 0.2, x0f[n0, 2] - 1, 0] ub[n0, :] = [3, 0.7, x0f[n0, 2] + 1, 0.1] gauss = GaussianModel(prefix='g%s_' % int(n0 + 1)) pars.update(gauss.make_params()) pars['g%s_amplitude' % int(n0 + 1)].set(x0f[n0][0],
# <examples/doc_builtinmodels_stepmodel.py> import matplotlib.pyplot as plt import numpy as np from lmfit.models import LinearModel, StepModel x = np.linspace(0, 10, 201) y = np.ones_like(x) y[:48] = 0.0 y[48:77] = np.arange(77-48)/(77.0-48) np.random.seed(0) y = 110.2 * (y + 9e-3*np.random.randn(x.size)) + 12.0 + 2.22*x step_mod = StepModel(form='erf', prefix='step_') line_mod = LinearModel(prefix='line_') pars = line_mod.make_params(intercept=y.min(), slope=0) pars += step_mod.guess(y, x=x, center=2.5) mod = step_mod + line_mod out = mod.fit(y, pars, x=x) print(out.fit_report()) plt.plot(x, y, 'b') plt.plot(x, out.init_fit, 'k--', label='initial fit') plt.plot(x, out.best_fit, 'r-', label='best fit') plt.legend(loc='best') plt.show() # <end examples/doc_builtinmodels_stepmodel.py>
#!/usr/bin/env python # <examples/doc_builtinmodels_stepmodel.py> import matplotlib.pyplot as plt import numpy as np from lmfit.models import LinearModel, StepModel x = np.linspace(0, 10, 201) y = np.ones_like(x) y[:48] = 0.0 y[48:77] = np.arange(77-48)/(77.0-48) np.random.seed(0) y = 110.2 * (y + 9e-3*np.random.randn(len(x))) + 12.0 + 2.22*x step_mod = StepModel(form='erf', prefix='step_') line_mod = LinearModel(prefix='line_') pars = line_mod.make_params(intercept=y.min(), slope=0) pars += step_mod.guess(y, x=x, center=2.5) mod = step_mod + line_mod out = mod.fit(y, pars, x=x) print(out.fit_report()) plt.plot(x, y, 'b') plt.plot(x, out.init_fit, 'k--') plt.plot(x, out.best_fit, 'r-') plt.show() # <end examples/doc_builtinmodels_stepmodel.py>
def fit_Voigt_and_step(x_lst,y_lst,x_min_flt,x_max_flt, pre, width_1, width_2, print_all_fits_bool,place_to_save_str): ''' x_lst = x axis y_lst = spectra to fit first = beginning of fitting regions last = end of fitting region print_all_fits = Bool, do you want to save all plots place_to_save = string that is the filename where we're saving the data returns result object ''' # Restrict the fit x_bkp = x_lst y_bkp = y_lst x_lst, y_lst, y_p, ypp = smooth_and_remove_step(x_lst, y_lst, x_min_flt, x_max_flt,True) ''' ******************************************************* Section of bad code that it'd take too long to do right ******************************************************* ''' step_at = 95 step_width = 10 prefp = pre prefs = "stp" prefc = 'c' w_guess = 3 # sigma ''' ******************************************************* Section of bad code that it'd take too long to do right ******************************************************* ''' # this is the money # defines the model that'll be fit peak = VoigtModel(prefix = prefp, independent_vars=['x'],nan_policy='raise') step = StepModel(prefix = prefs, independent_vars=['x'],form='logistic') const = ConstantModel(prefix = prefc,independent_vars=['x'], nan_policy='raise', form ='logistic') mod = peak + step + const #mod = peak + const # guess parameters x_max = x_lst[np.argmax(y_lst)] y_max = y_lst[np.argmax(y_lst)] # Peak # here we set up the peak fitting guess. Then the peak fitter will make a parameter object out of them mod.set_param_hint(prefp+'amplitude', value = value_max*y_max, min = .6*value_max_min*y_max,max = 4*value_max_max*y_max, vary=True) # mod.set_param_hint(prefp+'center', value = x_max, min = x_max*(1-wiggle_room), max = x_max*(1+wiggle_room),vary=True) mod.set_param_hint(prefp+'center', value = x_max,min = x_max*.97, max = x_max*1.03, vary=True) # Basically FWHM/3.6 if pre =='one': # fitting with only one peak mod.set_param_hint(prefp+'sigma', value = width_1, min = .25*width_2, max = 2*width_1,vary=True) else: # fitting with two peaks mod.set_param_hint(prefp+'sigma', value = width_2, min = 0, max = width_1,vary=True) # Constant top = [] bottom = [] for a,b in zip(x_lst,y_lst): if a > 135: top.append(b) elif a < 93: bottom.append(b) top = np.mean(np.asarray(top)) bottom = np.mean(np.asarray(bottom)) mod.set_param_hint(prefc+'c', value = bottom, min = -3*bottom, max = 3*bottom,vary=True) # restrict the fit again x_fit = [] y_fit = [] for a,b in zip(x_lst,y_lst): if 80 < a < 135: x_fit.append(a) y_fit.append(b) top = y_fit[0] bottom = y_fit[-1] # Step # Step height delta = 2*abs(top - bottom) if delta == 0: delta = 1 mod.set_param_hint(prefs+'amplitude', value = delta, min = -3*delta, max = 3*delta, vary=True) # Charastic width mod.set_param_hint(prefs+'sigma', value = 3,min = 1, max = 3, vary=False) # The half way point... mod.set_param_hint(prefs+'center', value = step_at, min = step_at-step_width, max = step_at+step_width, vary = False) result = mod.fit(y_fit, x=x_fit, params = mod.make_params()) # If print all fits ... if print_all_fits_bool: x_dense = np.arange(x_min_flt,x_max_flt,(x_max_flt-x_min_flt)/300.0).tolist() # each component for x in result.best_values: if prefp in x: # Get peak peak.set_param_hint(x, value = result.best_values[str(x)]) elif prefs in x: # Get step step.set_param_hint(x, value = result.best_values[str(x)]) # Data - 'background' y_m_background = [] for a,b in zip(x_lst,y_lst): y_m_background.append(b - result.eval(x=a) + peak.eval(x=a, params=peak.make_params())) peak_only = [peak.eval(x=yy, params=peak.make_params()) for yy in x_dense] #stp_only = [result.best_values['stpamplitude'] + result.best_values['cc']]*len(x_dense) # sum of them #y_fit = [a+b for a,b in zip(peak_only,stp_only)] y_fit = [a+b for a in peak_only] plt.plot(x_dense,peak_only, 'g', label = 'Peak Only') #plt.plot(x_dense,stp_only, 'g--', label = None) #plt.plot(x_dense, y_fit, 'g', label = "Fit Result") plt.plot(x_lst,y_lst,'bx', label= "Data") plt.plot(x_lst,y_m_background,'ko', label= "Data-Background") plt.title("Fit vs Data") plt.xlabel("Inv Cm") plt.ylabel("counts") plt.legend() plt.savefig(place_to_save_str+"Voigt&Step") plt.clf() return result
from typing import Dict, Union import pandas as pd import lmfit from lmfit.models import StepModel from covid19_data_analyzer.data_functions.analysis.factory_functions import ( batch_fit_model, fit_data_model, predict_trend, ) LOGISTIC_MODEL = StepModel(form="logistic") def fit_data_logistic_curve( covid19_data: pd.DataFrame, parent_region: str, region: str, data_set: str = "confirmed", sigma: Union[int, float] = 5, ) -> Dict[str, Union[lmfit.model.ModelResult, pd.DataFrame]]: """ Implementation of fit_data_model, with setting specific to the logistic curve model Parameters ---------- covid19_data : pd.DataFrame Full covid19 data from a data_source
# In[45]: spark_prices = spark_prices[0:50] utilities = utilities[0:50] # In[46]: df = pd.DataFrame({"sparks": spark_prices, "utilities": utilities}) df = df.sort_values(by="sparks") df = df.round(2) spark_prices = list(df["sparks"]) utilities = list(df["utilities"]) # In[47]: model = StepModel(form='linear', prefix='step_') fitted_model = model.fit(utilities, x=spark_prices) # print results # plot data and best-fit fitted_model.plot() # In[64]: lmodel = Model(two_lines) # In[74]: params = lmodel.make_params(offset1=0, slope1=0,
absorption coef for the known sample. Returns ------- float returns the energy where the point of inflection of df happens. """ energy_diff=np.diff(energy) df_diff=np.diff(df) slope=(df_diff/energy_diff) slope_min=np.amin(-1*slope) index=np.where(-slope_min-slope==0)[0] return energy[index].item() #make parameters for the deconvolution of known arctan_mod=StepModel(form='atan',prefix='arctan_') paras=arctan_mod.make_params() #construct the model with 5 arctangents and 5 Lorentzians atan2=StepModel(form='atan',prefix='atan2_') atan3=StepModel(form='atan',prefix='atan3_') atan4=StepModel(form='atan',prefix='atan4_') atan5=StepModel(form='atan',prefix='atan5_') atan6=StepModel(form='atan',prefix='atan6_') lor2=LorentzianModel(prefix='l2_') lor3=LorentzianModel(prefix='l3_') lor4=LorentzianModel(prefix='l4_') lor5=LorentzianModel(prefix='l5_') lor6=LorentzianModel(prefix='l6_') model=atan2+atan3+atan4+atan5+atan6+lor2+lor3+lor4+lor5+lor6 model.set_param_hint('l2_amplitude', min=0.0) model.set_param_hint('l3_amplitude', min=0.0)
def computeRegressionVars(timeseries): timeseries = sorted(timeseries, key=lambda srs: srs["t"]) y = np.array([val["y"] for val in timeseries], dtype=np.uint32) t = np.array([val["t"] for val in timeseries], dtype=np.uint64) / 1000 / 24 / 3600 print(y) day0 = t[0] t = t - t[0] dayz = t[-1] print("Days detected", dayz) f = interpolate.interp1d(t, y) xdata = np.arange(0, dayz, 1, dtype=np.uint16) ydata = f(xdata) print(xdata) print(ydata) # model data as Step + Line step_mod = StepModel(form='logistic', prefix='step_') model = step_mod # make named parameters, giving initial values: for sig in [0.1, 7, .5, 4, 2]: pars = model.make_params(line_intercept=ydata.min(), line_slope=0, step_center=xdata.mean(), step_amplitude=ydata.std(), step_sigma=sig) # fit data to this model with these parameters try: print("Fitting curve...") out = model.fit(ydata, pars, x=xdata) print("curve fitted!") except Exception as e: print(e) print("Fit exception hit, retrying with other inits") #composite_err=(out.params["step_sigma"].stderr/out.params["step_sigma"].value)+(out.params["step_center"].stderr/out.params["step_center"].value)+(out.params["step_amplitude"].stderr/out.params["step_amplitude"].value) #print("composite error = ",composite_err) if out.params["step_sigma"].value > 0 and out.params[ "step_sigma"].value < 30: break print(fit_report(out)) amplitudeErr = (out.params["step_amplitude"].stderr / out.params["step_amplitude"].value) amplitude = out.params["step_amplitude"].value centerErr = (out.params["step_center"].stderr / out.params["step_center"].value) center = out.params["step_center"].value sigmaErr = (out.params["step_sigma"].stderr / out.params["step_sigma"].value) sigma = out.params["step_sigma"].value # print("Time Series Day 0",day0) # print("Projected Total Cases",amplitude) # print("Projected Turning Point",center) # print("Projected Sigma",sigma) return { "day0": day0 * 1000 * 24 * 3600, "amplitude": amplitude, "amplitud_err": amplitudeErr, "center": center, "center_err": centerErr, "sigma": sigma, "sigma_err": sigmaErr }
def vec_latency_VX_vs_T(traces, participants, conditions, journal, sw_c=['NS', 'NS'], sw_e=['PS', 'AS'], adj_axis=300, crit=0.01, Out_crit=1.5, close_policy=False, fig_width=12): def nan_helper(y): """ Helper to handle indices and logical indices of NaNs. Main reason of that code ? => Avoid errors when using lmfit see use below for an example """ return np.isnan(y), lambda z: z.nonzero()[0] for cond in conditions: # excluded participants if cond != 'Healthy': subjs = participants['ctl'][cond][participants['ctl'][cond] != 2] else: subjs = participants['ctl'][cond] for s in subjs: fig, ax = plt.subplots(1, 1, figsize=(fig_width, fig_width / 1.6180)) for c, switches, col_code, VA_col in zip( ['ctl', 'exp'], [sw_c, sw_e], [['turquoise', 'b'], ['r', 'g']], ['b', 'k']): latencies = [] for side, col, l_style in zip(['left', 'right'], col_code, ['-', '--']): Y = np.nanmean(np.concatenate( (traces[c][cond][s][switches[0]][side], traces[c][cond][s][switches[1]][side]), axis=0), axis=0) X = np.arange(Y.size) - adj_axis # Y interpolation of NaNs nans, x = nan_helper(Y) Y[nans] = np.interp(x(nans), x(~nans), Y[~nans]) #_ = ax.plot(X, Y, color=col, linestyle=l_style, # linewidth=3, label='mean {} side velocity'.format(side)) #error_vec = Out_crit*np.nanstd(np.concatenate((traces[c][cond][s][switches[0]][side], # traces[c][cond][s][switches[1]][side]), axis=0), axis=0) # _ = ax.fill_between(X, Y-error_vec, Y+error_vec, facecolor=col, alpha=0.3) # Mean trace smoothing using Levenberg–Marquardt algorithm mod = StepModel(form='erf') pars = mod.guess(Y, x=X) out = mod.fit(Y, pars, x=X) ax.plot(np.asarray(X), out.best_fit, color=col) current = np.concatenate( (traces[c][cond][s][switches[0]][side], traces[c][cond][s][switches[1]][side]), axis=0).shape[0] sum_of = np.concatenate( (traces[c][cond][s][switches[0]]['left'], traces[c][cond][s][switches[1]]['left']), axis=0).shape[0] + np.concatenate( (traces[c][cond][s][switches[0]]['right'], traces[c][cond][s][switches[1]]['right']), axis=0).shape[0] perc_side = current / sum_of list_l = [] for tps in range(len(X)): if perc_side * abs(out.best_fit[tps]) > crit: list_l.append(X[tps]) if len(list_l) != 0: latencies.append(list_l[0]) else: ax.text(adj_axis, 1.5, "NO LATENCY FOUND !", color='r', fontsize=15) ax.axvline(np.mean(latencies), color=VA_col, linewidth=3, label='latency {} = {} ms'.format( c, np.mean(latencies))) ax.set_ylabel('Smooth eye velocity (°/s)', fontsize=14) ax.set_xlabel('Time', fontsize=11) ax.set_xlim([-100, 1000]) ax.set_ylim([-8, 8]) _ = ax.set_title('{} {} dep rule = {}'.format( cond, int(s), journal[c][cond][s][:, 1][0][0])) _ = ax.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.) if close_policy: plt.close('all')
def get_county_fit(df, tp): x, y = df.index.values, df[tp].values mod = StepModel(form='logistic') pars = mod.guess(y, x=x) fit = mod.fit(y, pars, x=x, weights=(1 / (x + 1e-3))[::-1]) return fit
def fit_Voigt_and_step(x_lst,y_lst,x_min_flt,x_max_flt,print_all_fits_bool,place_to_save_str): ''' x_lst = x axis y_lst = spectra to fit first = beginning of fitting regions last = end of fitting region print_all_fits = Bool, do you want to save all plots place_to_save = string that is the filename where we're saving the data ''' import numpy as np # for smoothing the curves import scipy.interpolate as interp #import splev from lmfit.models import VoigtModel, StepModel, ConstantModel from lmfit import CompositeModel # Restrict the fit x_fit = [] y_fit = [] for x,y in zip(x_lst, y_lst): if x_min_flt < x < x_max_flt: x_fit.append(float(x)) y_fit.append(float(y)) x_fit = np.asarray(x_fit) y_fit = np.asarray(y_fit) # now we find the parameters using the - d^2/dx^2 ysmooth = interp.interp1d(x_fit, y_fit, kind='cubic') # differentiate x 2 yp = np.gradient(ysmooth(x_fit)) ypp = np.gradient(yp) # we want the peaks of -d2/dx2 ypp = np.asarray([-x for x in ypp]) ''' ******************************************************* Section of bad code that it'd take too long to do right ******************************************************* ''' step_at = 100 step_width = 3 prefp = "one" prefs = "stp" prefc = 'c' w_guess = 3 # sigma ''' ******************************************************* Section of bad code that it'd take too long to do right ******************************************************* ''' # this is the money # defines the model that'll be fit peak = VoigtModel(prefix = prefp, independent_vars=['x'],nan_policy='raise') step = StepModel(prefix = prefs, independent_vars=['x'], nan_policy='raise') const = ConstantModel(prefix = prefc,independent_vars=['x'], nan_policy='raise', form ='logistic') mod = peak + step + const # guess parameters x_max = x_fit[np.argmax(y_fit)] y_max = y_fit[np.argmax(y_fit)] # Peak # here we set up the peak fitting guess. Then the peak fitter will make a parameter object out of them mod.set_param_hint(prefp+'amplitude', value = 4*y_max, min = y_max,max = 30*y_max, vary=True) # mod.set_param_hint(prefp+'center', value = x_max, min = x_max*(1-wiggle_room), max = x_max*(1+wiggle_room),vary=True) mod.set_param_hint(prefp+'center', value = x_max, vary=True) # Basically FWHM/3.6 mod.set_param_hint(prefp+'sigma', value = w_guess, min = 0, max = 5*w_guess,vary=True) # Step # Step height delta = abs(y_fit[-1]-y_fit[0]) mod.set_param_hint(prefs+'amplitude', value = delta, min = delta*.9, max = delta*1.1, vary=True) # Charastic width mod.set_param_hint(prefs+'sigma', value = 2,min = 1, max = 3, vary=True) # The half way point... mod.set_param_hint(prefs+'center', value = step_at, min = step_at-step_width, max = step_at+step_width, vary = True) # Constant mod.set_param_hint(prefc+'c', value = y_fit[-1], min = 0, max = 2*y_fit[0],vary=True) result = mod.fit(y_fit, x=x_fit, params = mod.make_params()) # If print all fits ... if print_all_fits_bool: x_dense = np.arange(x_min_flt,x_max_flt,(x_max_flt-x_min_flt)/300.0).tolist() result.plot_fit(xlabel='Inv Cm', ylabel='counts',datafmt = 'xb', numpoints=len(x_fit)*10) for x in result.best_values: if prefp in x: # Get peak peak.set_param_hint(x, value = result.best_values[str(x)]) elif prefs in x: # Get step step.set_param_hint(x, value = result.best_values[str(x)]) comp = [result.best_values['cc'] + peak.eval(x=yy, params=peak.make_params()) for yy in x_dense] plt.plot(x_dense,comp, 'green', label = None) comp = [result.best_values['stpamplitude'] + result.best_values['cc']]*len(x_dense) plt.plot(x_dense, comp, 'green', label= None) # comp = [result.best_values['cc'] + step.eval(x=yy, params=step.make_params()) for yy in x_dense] # plt.plot(x_dense, comp, 'green', label= None) plt.title("Fit vs Data") plt.legend() plt.savefig(place_to_save_str) plt.clf() return result.best_values
C = [] for i in range(N): #C.append(X[:,i].dot(y_label[i])) C.append(X.T[i].sum()) C = np.array(C) print(C.shape) dC = [0.5 for i in C] print(dC) ig = [5.,6.,5.,1.] # model data as Step + Line step_mod = StepModel(form='linear', prefix='step_') line_mod = LinearModel(prefix='line_') model = step_mod + line_mod # make named parameters, giving initial values: pars = model.make_params(line_intercept=C.min(), line_slope=0, step_center=x.mean(), step_amplitude=C.std(), step_sigma=2.0) # fit data to this model with these parameters out = model.fit(C, pars, x=x) # print results