def exponential_fit_offset(x, y, amp_guess=1, decay_guess=1, offset_guess=0, errors=True): """ Simple helper function that speeds up single exponetial fit with offset. Uses lmfit Parameters ---------- x, y (float) Sample length x, y arrays of data to be fitted Returns ------- Returns slope and intercept, with uncertainties (use uncertainties package if availabe) !!!!! not tested or working !!!!! """ from lmfit.models import ExpressionModel mod = ExpressionModel("offset + amp * exp(-x/decay)") par = mod.make_params(amp=amp_guess, decay=decay_guess, offset_guess=offset_guess) out = mod.fit(y, params=par, x=x) s = out.params['slope'] i = out.params['intercept'] if errors: try: from uncertainties import ufloat return ufloat(s.value, s.stderr), ufloat(i.value, i.stderr) except: return s.value, s.stderr, i.value, i.stderr else: return s.value, i.value
def model_decay_curve(dfout, Column): ''' This function models the decay curve for core, and new genes''' # Model the Core gene curve using an Exponential Decay Function: # Fc = Kc*exp(-N/τc) + Ω print(f'\n\nFitting Exponential Decay function to {Column}') print('Using Exponential Decay Function: K*exp-(N/\u03C4) + \u03A9') # Initialize model Custom_Exponential = ExpressionModel( 'A * exp(-x/tau) + omega', independent_vars=['x'] ) # Initialize custom parameters Expression_Params = Custom_Exponential.make_params() # add params with tuples: (NAME VALUE VARY MIN MAX EXPR BRUTE_STEP) Expression_Params.add_many( ('A', 5, True, 0, None, None, 0.1), ('tau', 5, True, 0, None, None, 0.1), ('omega', 5, True, 0, None, None, 0.1) ) EDM_Fit = Custom_Exponential.fit( dfout[Column], Expression_Params, x=dfout['n'] ) dfout[f'{Column}_EDM'] = EDM_Fit.best_fit omega = EDM_Fit.best_values['omega'] return dfout, omega
def CreateModel(stim_events): ''' creates lmfit model object with a gamma variate at every stimulus timepoint. parameter values seem to work well both for neurons and astrocytes (in case of astrocyte/isoflurane the first gamma variate will just cover the whole time course and all others get almost zero amplitude) ''' script = """ def gammavar(ymax, tmax, a, x, t0): x_input = x * (x > t0) + (t0 + tmax) * (x <= t0) return (exp(log(ymax)+a*(1+log((x_input-t0)/tmax)-(x_input-t0)/tmax))) * (x > t0) """ model = ExpressionModel(f'gammavar(ymax0, tmax0, a0, x, 0.02)', init_script=script, intependent_vars=['x']) k = 1 for i in stim_events[1:]: model += ExpressionModel(f'gammavar(ymax{k}, tmax{k}, a{k}, x, {i})', init_script=script, intependent_vars=['x']) k += 1 params = model.make_params() params['ymax0'].set(value=1, min=0.002, max=10) params['tmax0'].set(value=2, min=0.1, max=8) params['a0'].set(1, min=0.05, max=5) for i in range(1, len(stim_events)): params[f'tmax{i}'].set(expr='tmax0') params[f'a{i}'].set(expr='a0') params[f'ymax{i}'].set(0.8, min=0.002, max=10) return model, params
def exp_fits(x,x0,xend,t_res): """ fits an exponential signal increase starting from x0 to xend, optionally with an exponential washout term of the contrast agent. output contains various parameters computed from the fit """ # model = ExpressionModel('B*(1 - exp(-ktrans * (x)))') model = ExpressionModel('B*(1 - exp(-ktrans * (x))) * exp(-w * x)', nan_policy='propagate') # with washout params = model.make_params() params['ktrans'].set(value=0.001,min=0.0001, max=0.1) params['B'].set(value=0.1, min=0.01, max=50) params['w'].set(value=0.0001,min=0, max=0.001) signal = x[x0:xend]-x[x0] time = np.arange(0, signal.size, 1)*t_res result = model.fit(signal, params, x=time) slope = result.best_values['B']*result.best_values['ktrans'] #derivative of model function at point x=0 # integral = np.sum(result.best_fit) maximum = np.max(result.best_fit) TTP = np.argmax(result.best_fit) * t_res return result, slope, maximum, TTP, signal, time
def findEfermiByArcTan(energy, intensity): """ Searches Efermi energy by fitting xanes by arctan. :param energy: :param intensity: :return: best_params = {'a':..., 'x0':...}, arctan_y """ assert len(energy) == len( intensity), f'{len(energy)} != {len(intensity)} ' + str( energy.shape) + ' ' + str(intensity.shape) last = np.mean(intensity[-5:]) efermi0, _, _ = findExpEfermi(energy, intensity, 0.5 * last) mod = ExpressionModel('b/(1+exp(-a*(x - x0)))+c') params = mod.make_params(a=0.3, x0=efermi0, b=last, c=0) # - стартовые params['a'].set(min=0) params['b'].set(min=0) result = mod.fit(intensity, params, x=energy) return result.best_values, result.best_fit
def CreateModel_simple(stim_events): script = """ def gammavar(ymax, tmax, a, x, t0): x_input = x * (x > t0) + (t0 + tmax) * (x <= t0) return (exp(log(ymax)+a*(1+log((x_input-t0)/tmax)-(x_input-t0)/tmax))) * (x > t0) """ model = ExpressionModel(f'gammavar(ymax0, tmax0, a0, x, 0.02)', init_script=script, intependent_vars=['x']) params = model.make_params() params['ymax0'].set(value=1, min=0.002, max=10) params['tmax0'].set(value=2, min=0.1, max=20) params['a0'].set(1, min=0.05, max=5) return model, params
def exponential_fit_offset( x, y, amp_guess=1, decay_guess=1, offset_guess=0, errors=True ): """ Simple helper function that speeds up single exponetial fit with offset. Uses lmfit Parameters ---------- x, y (float) Sample length x, y arrays of data to be fitted Returns ------- Returns slope and intercept, with uncertainties (use uncertainties package if availabe) !!!!! not tested or working !!!!! """ from lmfit.models import ExpressionModel mod = ExpressionModel("offset + amp * exp(-x/decay)") par = mod.make_params(amp=amp_guess, decay=decay_guess, offset_guess=offset_guess) out = mod.fit(y, params=par, x=x) s = out.params["slope"] i = out.params["intercept"] if errors: try: from uncertainties import ufloat return ufloat(s.value, s.stderr), ufloat(i.value, i.stderr) except: return s.value, s.stderr, i.value, i.stderr else: return s.value, i.value
def substractBase(x, y, peakInterval, baseFitInterval, model, usePositiveConstrains, extrapolate=None, useStartParams=None): """ Fit base by Cauchy function and substract from y. :param x: argument :param y: function values :param peakInterval: interval of peak search (do not included in base fitting) :param baseFitInterval: interval of base fit. Usually it includes peakInterval :param model: 'cauchy' or 'bezier' or 'arctan' :param usePositiveConstrains: add constrain y_base <= y :param extrapolate: {'left':percent_dx_left, 'right':percent_dx_right} :return: x_peak, y_sub - peak with substracted base (on interval peakInterval); x_base, y_base - base on baseFitInterval; y_peak - peak part of original func; y_sub_full - y_sub expanded to baseFitInterval """ assert model in ['cauchy', 'bezier', 'arctan'] assert len(x) == len(y) if extrapolate is None: extrapolate = {} ind_peak = (x >= peakInterval[0]) & (x <= peakInterval[1]) ind_base_full = (x >= baseFitInterval[0]) & (x <= baseFitInterval[1]) ind_base = ind_base_full & ~ind_peak x_peak = x[ind_peak] y_peak = y[ind_peak] x_base = x[ind_base] y_base = y[ind_base] x_base_full = x[ind_base_full] y_base_full = y[ind_base_full] # make x_fit y_fit by extrapolating base inside peak interval (linear extrapolation from both ends) if usePositiveConstrains: ind_base_left = (x < peakInterval[0]) & ind_base_full b1, a1 = linearReg(x[ind_base_left], y[ind_base_left]) ind_base_right = (x > peakInterval[1]) & ind_base_full b2, a2 = linearReg(x[ind_base_right], y[ind_base_right]) y_gap = np.max([a1 * x_peak + b1, a2 * x_peak + b2], axis=0).reshape(-1) assert len(y_gap) == len(x_peak) x_fit = x_base_full y_fit = np.concatenate((y[ind_base_left], y_gap, y[ind_base_right])) assert len(x_fit) == len(y_fit), str(len(x_fit)) + " " + str( len(y_fit)) else: x_fit = x_base y_fit = y_base x1 = x_base[0] x2 = x_base[-1] y1 = y_base[0] y2 = y_base[-1] if 'left' in extrapolate: n = np.where(x_base <= x1 + (x2 - x1) / 10)[0][-1] + 1 if n < 2: n = 2 slope, intercept, _, _, _ = scipy.stats.linregress( x_base[:n], y_base[:n]) percent = extrapolate['left'] count = np.round(len(x_base) * percent) first = x1 - (x2 - x1) * percent last = x1 - (x2 - x1) / count new_x = np.linspace(first, last, count) x_base = np.insert(x_base, 0, new_x) y_base = np.insert(y_base, 0, new_x * slope + intercept) if 'right' in extrapolate: n = np.where(x_base >= x2 - (x2 - x1) / 10)[0][-1] + 1 if n < 2: n = 2 slope, intercept, _, _, _ = scipy.stats.linregress( x_base[-n:], y_base[-n:]) percent = extrapolate['right'] count = np.round(len(x_base) * percent) last = x2 + (x2 - x1) * percent first = x2 + (x2 - x1) / count new_x = np.linspace(first, last, count) x_base = np.append(x_base, new_x) y_base = np.append(y_base, new_x * slope + intercept) assert (len(x_peak) >= 2) and (len(x_base) >= 2), 'len(x_peak) = ' + str( len(x_peak)) + ' len(x_base) = ' + str(len(x_base)) minx = np.min(x) maxx = np.max(x) maxy = np.max(y) if model == 'cauchy': fff = lambda x, a, b, g, d: a / ((x - b)**2 + g) + d mod = ExpressionModel('a/((x-b)**2+g) + d') b0 = x2 + x2 - x1 g0 = 1 a0 = (y2 - y1) / (1 / ((x2 - b0)**2 + g0) - 1 / ((x1 - b0)**2 + g0)) d0 = y1 - a0 / ((x1 - b0)**2 + g0) params = mod.make_params(a=a0, b=b0, g=g0, d=d0) param_order = {'a': 0, 'b': 1, 'g': 2, 'd': 3} start0 = [ params['a'].value, params['b'].value, params['g'].value, params['d'].value ] result = mod.fit(y_fit, params, x=x_fit) start = [ result.params['a'].value, result.params['b'].value, result.params['g'].value, result.params['d'].value ] bounds = [[0, 1e3 * maxy], [minx, maxx + (maxx - minx) * 10], [0, (maxx - minx) * 10], [-maxy, maxy]] elif model == 'arctan': fff = lambda x, a, b, c, x0, d: b / (1 + np.exp(-a * (x - x0)) ) + c + d * (x - x_base[0]) mod = ExpressionModel('b/(1+exp(-a*(x - x0)))+c+d*(x-' + str(x_base[0]) + ')') efermi0, _, _ = findExpEfermi(x, y, 0.5 * np.mean(y[-5:])) if efermi0 < x_peak[0]: efermi0 = x_peak[0] a0 = 1 b0 = y[-1] - y[0] c0 = y[0] x00 = efermi0 d0 = (y_peak[0] - y_base[0]) / (x_peak[0] - x_base[0]) params = mod.make_params(a=a0, b=b0, c=c0, x0=x00, d=d0) param_order = {'a': 0, 'b': 1, 'c': 2, 'x0': 3, 'd': 4} start0 = [ params['a'].value, params['b'].value, params['c'].value, params['x0'].value, params['d'].value ] assert np.all(x[1:] - x[:-1] > 0), str(x) max_dy = np.max((y[1:] - y[:-1]) / (x[1:] - x[:-1])) params['a'].set(min=0) params['a'].set(max=max_dy / (np.max(y) - np.min(y)) * 10) params['b'].set(min=0) params['x0'].set(min=x_peak[0]) params['d'].set(min=0) params['d'].set(max=3 * (y_peak[0] - y_base[0]) / (x_peak[0] - x_base[0])) dist = np.max([abs(x00 - minx), abs(x00 - maxx), maxx - minx]) bounds = [[0, a0 * 100], [0, maxy * 10], [-maxy, maxy], [minx - dist, maxx + dist * 10], [0, 3 * (y_peak[0] - y_base[0]) / (x_peak[0] - x_base[0])]] # TODO: remove lmfit, because scipy.optimize.minimize works better if useStartParams is None: result = mod.fit(y_fit, params, x=x_fit) # result.plot() # plt.show() # print(result.fit_report()) start = [ result.params['a'].value, result.params['b'].value, result.params['c'].value, result.params['x0'].value, result.params['d'].value ] else: start = useStartParams else: Mtk = lambda n, t, k: t**k * (1 - t)**(n - k) * scipy.misc.comb(n, k) BezierCoeff = lambda ts: [[Mtk(3, t, k) for k in range(4)] for t in ts] t = np.linspace(0, 1, len(x_base)) Pseudoinverse = np.linalg.pinv(BezierCoeff(t)) data = np.column_stack((x_base, y_base)) control_points = Pseudoinverse.dot(data) Bezier = np.array(BezierCoeff(tPlot)).dot(control_points) assert not usePositiveConstrains return x_peak, y_peak - app_y_base_inside_peak, x_base_full, app_y_base_full, y_peak, y_base_full - app_y_base_full def func(params): y_app = fff(x_base, *params) return np.linalg.norm(y_app - y_base) if useStartParams is None: res = scipy.optimize.minimize(func, start0, bounds=bounds) # print(func(start), res.fun) if res.fun < func(start): for name in result.params: # print(f'Setting {name} = ',res.x[param_order[name]]) result.params[name].set(res.x[param_order[name]]) # print(result.params) start = res.x info = {'optimParam': start, 'optimVal': func(start)} if usePositiveConstrains: #while True: #if np.all(fff(x_peak,*start)<=y_peak): break #dx = np.max(x_peak)-np.min(x_peak) #dy = np.max(y_peak)-np.min(y_peak) #start[1] += dx*0.01 #start[3] -= dy*0.01 constrains = tuple() for i in range(len(x_peak)): cons_fun = lambda params, i=i: fff(x_peak[i], *params) constrains += (scipy.optimize.NonlinearConstraint( cons_fun, -maxy, y_peak[i]), ) # print(bounds) res = scipy.optimize.minimize(func, start, bounds=bounds, constraints=constrains) params = res.x app_y_base_inside_peak = fff(x_peak, *params) app_y_base_full = fff(x_base_full, *params) info = {'optimParam': params, 'optimVal': res.fun} else: app_y_base_inside_peak = mod.eval(result.params, x=x_peak) app_y_base_full = mod.eval(result.params, x=x_base_full) return x_peak, y_peak - app_y_base_inside_peak, x_base_full, app_y_base_full, y_peak, y_base_full - app_y_base_full, info
@author: miseminger """ import matplotlib.pyplot as plt import numpy as np from lmfit.models import ExpressionModel x = np.array([0, 1.25, 2.5, 5, 10, 20]) #protein concentration (nM) y = np.array([0.1475, 0.334, 0.444, 0.532, 0.598, 0.6615]) #mean absorbance y_err = np.array([0.0175, 0.003, 0.013, 0.003, 0.013, 0.0235]) #standard error of the mean fit_model = ExpressionModel('B * x**h / (Kd**h + x**h)', independent_vars=['x']) #Hill function params = fit_model.make_params(B=0.7, h=0.8, Kd=1.6) #include guesses for parameters params['B'].min = 0.5 #set minimum values for parameters params['h'].min = 0.5 params['Kd'].min = 1 result = fit_model.fit(y, params, x=x, weights=1.0 / y_err) print(result.fit_report()) #plot x_mod = np.linspace(0, 25, num=1000) B_best = result.params['B'].value h_best = result.params['h'].value Kd_best = result.params['Kd'].value y_best = B_best * x_mod**h_best / (Kd_best**h_best + x_mod**h_best)
def ground_state(): masses = list() #this reads in the params of the run #list_of_files = glob.glob('./Data/*.dat') # create the list of file list_of_files = glob.glob('*.dat') for file_name in list_of_files: print(file_name) f = open(file_name, 'r') #f = open('test.dat', 'r') params = f.readline() params = list(params.split()) params = list(map(int,params)) f.close() Nconf = params[0] Nt = params[1] eof = Nconf*Nt numeffs = Nt-1 corr = np.loadtxt(file_name,skiprows=1) # read in correlator data jack = list() jackerr = list() jackavg = list() #it = t-1; for i in range(Nt): tmp = list(corr[i:eof:Nt,1]) err, avg, jkl = jackknife_err(tmp) jack.append(jkl) jackerr.append(err) jackavg.append(avg) Cavg = np.array(jackavg) Cerr = np.array(jackerr) tmin,tmax = 12 , Nt//2 times = np.arange(tmin,tmax) x = times mod = ExpressionModel('A1 * exp(-m1*x)')# note 'x' is the default independent variable in lmfit # lmfit is smart with the ExpressionModel function to be able to determine # the fit parameters, A1 and m1. params = mod.make_params(A1=1., m1=0.3) dof = x.shape[0] - len(params) out = mod.fit(Cavg[tmin:tmax], params, x=times, weights=Cerr[tmin:tmax]) print(out.fit_report()) plt.errorbar(x,Cavg[tmin:tmax], Cerr[tmin:tmax],fmt='b.') plt.scatter(x,Cavg[tmin:tmax]) plt.plot(x, out.best_fit, 'r-') plt.show()
def first_excited_state(): masses = list() #this reads in the params of the run #list_of_files = glob.glob('./Data/*.dat') # create the list of file list_of_files = glob.glob('*.dat') for file_name in list_of_files: print(file_name) f = open(file_name, 'r') params = f.readline() params = list(params.split()) params = list(map(int,params)) f.close() Nconf = params[0] Nt = params[1] eof = Nconf*Nt numeffs = Nt-1 corr = np.loadtxt(file_name,skiprows=1) # read in correlator data jack = list() jackerr = list() jackavg = list() #it = t-1; for i in range(Nt): tmp = list(corr[i:eof:Nt,1]) err, avg, jkl = jackknife_err(tmp) jack.append(jkl) jackerr.append(err) jackavg.append(avg) Cavg = np.array(jackavg) Cerr = np.array(jackerr) sigma = sigma_mat(Nconf, Nt, Cavg, corr) tmin,tmax = 12, Nt//2 times = np.arange(tmin,tmax) x = times mod = ExpressionModel('A1 * exp(-m1*x) + A2 * exp(-m2*x)') params = mod.make_params(A1=1., A2=1., m1=0.3, m2=0.6) dof = x.shape[0] - len(params) out = mod.fit(Cavg[tmin:tmax], params, x=times, weights=Cerr[tmin:tmax]) print(out.fit_report()) plt.errorbar(x,Cavg[tmin:tmax], np.diag(np.sqrt(sigma[tmin:tmax,tmin:tmax])),fmt='b.') plt.scatter(x,Cavg[tmin:tmax]) plt.plot(x, out.best_fit, 'r-') plt.show()