Vmodels = [[]] * Nmax # The basic model for the components (can be e.g. dl.dd_rice) basisModel = dl.dd_gauss # Model construction for n in range(Nmax): # Construct the n-Gaussian model Pmodels[n] = dl.lincombine(*[basisModel] * (n + 1)) # Construct the corresponding dipolar signal model Vmodels[n] = dl.dipolarmodel(t, r, Pmodel=Pmodels[n]) # Fit the models to the data fits = [[]] * Nmax for n in range(Nmax): fits[n] = dl.fit(Vmodels[n], Vexp, reg=False) #%% # Extract the values of the Akaike information criterion for each fit aic = np.array([fit.stats['aic'] for fit in fits]) # Compute the relative difference in AIC aic -= aic.min() # Plotting fig = plt.figure(figsize=[6, 6]) gs = GridSpec(1, 3, figure=fig) ax1 = fig.add_subplot(gs[0, :-1]) for n in range(Nmax): # Evaluate the n-Gaussian distance distribution model Pfit = fits[n].evaluate(Pmodels[n], *[r] * (n + 1))
# Distance vector r = np.linspace(2,5,150) # nm # Construct the dipolar models for the individual signals V1model = dl.dipolarmodel(ts[0],r) V2model = dl.dipolarmodel(ts[1],r) # Make the global model by joining the individual models globalmodel = dl.merge(V1model,V2model) # Link the distance distribution into a global parameter globalmodel = dl.link(globalmodel,P=['P_1','P_2']) # Fit the model to the data fit = dl.fit(globalmodel,Vs) # %% plt.figure(figsize=[10,7]) violet = '#4550e6' for n in range(len(fit.model)): # Extract fitted dipolar signal Vfit = fit.model[n] Vci = fit.modelUncert[n].ci(95) # Extract fitted distance distribution Pfit = fit.P scale = np.trapz(Pfit,r) Pci95 = fit.PUncert.ci(95)/scale
r = np.linspace(2, 6, 100) # 4-pulse DEER can have up to four different dipolar pathways Nmax = 4 # Create the 4-pulse DEER signal models with increasing number of pathways experiment = dl.ex_4pdeer(𝜏1, 𝜏2) Vmodels = [ dl.dipolarmodel(t, r, npathways=n + 1, experiment=experiment) for n in range(Nmax) ] # Fit the individual models to the data fits = [[]] * Nmax for n, Vmodel in enumerate(Vmodels): fits[n] = dl.fit(Vmodel, Vexp) #%% # Extract the values of the Akaike information criterion for each fit aic = np.array([fit.stats['aic'] for fit in fits]) # Compute the relative difference in AIC aic = aic - aic.min() + 1 # ...add plus one for log-scale # Plotting colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red'] fig = plt.figure(figsize=[8, 9]) gs = GridSpec(1, 3, figure=fig) ax1 = fig.add_subplot(gs[0, :-1]) for n in range(len(Vmodels)): # Get the fits of the dipolar signal models
description='Dissociation constant') titrmodel = dl.relate(titrmodel, weight_2_1=lambda weight_1_1: 1 - weight_1_1, weight_1_1=lambda Kdis: chemicalequilibrium(Kdis, L[0]), weight_2_2=lambda weight_1_2: 1 - weight_1_2, weight_1_2=lambda Kdis: chemicalequilibrium(Kdis, L[1]), weight_2_3=lambda weight_1_3: 1 - weight_1_3, weight_1_3=lambda Kdis: chemicalequilibrium(Kdis, L[2]), weight_2_4=lambda weight_1_4: 1 - weight_1_4, weight_1_4=lambda Kdis: chemicalequilibrium(Kdis, L[3]), weight_2_5=lambda weight_1_5: 1 - weight_1_5, weight_1_5=lambda Kdis: chemicalequilibrium(Kdis, L[4])) # Fit the model to the data fit = dl.fit(titrmodel, Vs, regparam=0.5) # %% # Evaluate the dose-response curve at the fit with confidence bands xAfcn = lambda Kdis: np.squeeze( np.array([chemicalequilibrium(Kdis, Ln) for Ln in L])) xBfcn = lambda Kdis: np.squeeze( np.array([1 - chemicalequilibrium(Kdis, Ln) for Ln in L])) xAfit = xAfcn(fit.Kdis) xBfit = xBfcn(fit.Kdis) xAci = fit.propagate(xAfcn, lb=np.zeros_like(L), ub=np.ones_like(L)).ci(95) xBci = fit.propagate(xBfcn, lb=np.zeros_like(L), ub=np.ones_like(L)).ci(95) # Plot the dose-reponse curve plt.plot(L, xAfit, '-o')
# Load the experimental data t, Vexp = np.load('../data/example_data_#1.npy') # Pre-process Vexp = dl.correctphase(Vexp) Vexp = Vexp / np.max(Vexp) # Distance vector r = np.linspace(2, 5.5, 80) # Construct the 4-pulse DEER dipolar model Vmodel = dl.dipolarmodel(t, r) Vmodel.reftime.set(par0=0.5, lb=0.0, ub=1.0) # Fit the model to the data using covariane-based uncertainty fit_cm = dl.fit(Vmodel, Vexp) # Fit the model to the data using bootstrapped uncertainty fit_bs = dl.fit(Vmodel, Vexp, bootstrap=10) # Compute the covariance-based uncertainty bands of the distance distribution Pci50_cm = fit_cm.PUncert.ci(50) Pci95_cm = fit_cm.PUncert.ci(95) # Compute the bootstrapped uncertainty bands of the distance distribution Pci50_bs = fit_bs.PUncert.ci(50) Pci95_bs = fit_bs.PUncert.ci(95) #%% # Plot the results
# %% # Load the experimental data t, Vexp = np.load('../data/example_5pdeer_#1.npy') # Distance vector r = np.linspace(2, 5, 200) # nm # Construct dipolar model with two dipolar pathways Vmodel = dl.dipolarmodel(t, r, npathways=2) # The refocusing time of the second pathway can be well estimated by visual inspection Vmodel.reftime2.set(lb=3, ub=4, par0=3.5) # Fit the model to the data fit = dl.fit(Vmodel, Vexp) # %% # Extract fitted dipolar signal Vfit = fit.model Vci = fit.modelUncert.ci(95) # Extract fitted distance distribution Pfit = fit.P scale = np.trapz(Pfit, r) Pci95 = fit.PUncert.ci(95) / scale Pci50 = fit.PUncert.ci(50) / scale Pfit = Pfit / scale # Extract the unmodulated contribution
import deerlab as dl # %% # Load the experimental data t,Vexp = np.load('../data/example_4pdeer_#1.npy') # Distance vector r = np.linspace(2,5,100) # nm # Construct the model Vmodel = dl.dipolarmodel(t,r) # Fit the model to the data fit = dl.fit(Vmodel,Vexp,bootstrap=20) # In this example, just for the sake of time, we will just use 20 bootstrap samples. #%% # Extract fitted dipolar signal Vfit = fit.model Vci = fit.modelUncert.ci(95) # Extract fitted distance distribution Pfit = fit.P scale = np.trapz(Pfit,r) Pci95 = fit.PUncert.ci(95)/scale Pci50 = fit.PUncert.ci(50)/scale Pfit = Pfit/scale
# %% # Load the experimental dataset t,V = np.load('../data/example_data_#1.npy') # Pre-process V = dl.correctphase(V) V = V/np.max(V) # Construct the dipolar signal model r = np.linspace(1,7,100) Vmodel = dl.dipolarmodel(t,r) # Fit the model to the data fit = dl.fit(Vmodel,V) fit.plot(axis=t) plt.ylabel('V(t)') plt.xlabel('Time $t$ (μs)') plt.show() # From the fit results, extract the distribution and the covariance matrix Pfit = fit.P Pci95 = fit.PUncert.ci(95) # Select a bimodal Gaussian model for the distance distribution Pmodel = dl.dd_gauss2 # Fit the Gaussian model to the non-parametric distance distribution fit = dl.fit(Pmodel,Pfit,r)
V5p = V5p / np.max(V5p) # Run fit r = np.linspace(2, 4.5, 100) # Construct the individual dipolar signal models V4pmodel = dl.dipolarmodel(t4p, r, npathways=1) V5pmodel = dl.dipolarmodel(t5p, r, npathways=2) V5pmodel.reftime2.set(lb=3, ub=3.5, par0=3.2) # Make the joint model with the distribution as a global parameters globalmodel = dl.merge(V4pmodel, V5pmodel) globalmodel = dl.link(globalmodel, P=['P_1', 'P_2']) # Fit the model to the data (with fixed regularization parameter) fit = dl.fit(globalmodel, [V4p, V5p], regparam=0.5) # %% plt.figure(figsize=[10, 7]) violet = '#4550e6' # Extract fitted distance distribution Pfit = fit.P scale = np.trapz(Pfit, r) Pci95 = fit.PUncert.ci(95) / scale Pci50 = fit.PUncert.ci(50) / scale Pfit = Pfit / scale for n, (t, V) in enumerate(zip([t4p, t5p], [V4p, V5p])): # Extract fitted dipolar signal
def profile_analysis(model,y, *args, parameters='all', grids=None, samples=50, noiselvl=None, verbose=False,**kargs): r""" Profile likelihood analysis for uncertainty quantification Parameters ---------- model : :ref:`Model` Model object describing the data. All non-linear model parameters are profiled by default. y : array_like or list of array_like Experimental dataset(s). args : positional arguments Any other positional arguments to be passed to the ``fit`` function. See the documentation of the ``fit`` function for further details. parameters : string or list thereof Model parameters to profile. If set to ``'all'`` all non-linear parameters in the model are analyzed. samples : integer scalar Number of points to take to estimate the profile function. Ignored if ``grids`` is specified. grids : dict of array_like Grids of values on which to evaluate the profile for each parameter. Must be a dictionary of grids with keys corresponding to the names of the model parameters to be profiled. Overrides the ``samples`` argument. noiselvl : float, optional Noise level(s) of the datasets. If set to ``None`` it is determined automatically. verbose : boolean, optional Specifies whether to print the progress of the bootstrap analysis on the command window, the default is false. kargs : keyword-argument pairs Any other keyword-argument pairs to be passed to the ``fit`` function. See the documentation of the ``fit`` function for further details. Returns ------- profuq : dict of :ref:`UQResult` Dictionary containing the profile uncertainty quantification for each profiled non-linear model parameter. The respective parameter's results can be accessed via the model parameter name. """ if noiselvl is None: noiselvl = noiselevel(y) # Optimize the whole model to fit the data if verbose: print(f'Profile analysis routine started.') print(f'Performing full model fit...') fitresult = fit(model, y, *args, **kargs) # Prepare the statistical threshold function threshold = lambda coverage: noiselvl**2*chi2.ppf(coverage, df=1) + fitresult.cost if parameters=='all': parameters = model._parameter_list() elif not isinstance(parameters,list): parameters = [parameters] # Loop over all parameters in the model uqresults = {} for parameter in parameters: if np.any(getattr(model,parameter).linear): if verbose: print(f"Skipping linear parameter '{parameter}'.") uqresults[parameter] = None continue if getattr(model, parameter).frozen: if verbose: print(f"Skipping frozen parameter '{parameter}'.") uqresults[parameter] = None continue # Construct the values of the model parameter to profile if grids is None: start = np.maximum(getattr(model, parameter).lb, getattr(fitresult,parameter)-10*getattr(fitresult,f'{parameter}Uncert').std) stop = np.minimum(getattr(model, parameter).ub, getattr(fitresult,parameter)+10*getattr(fitresult,f'{parameter}Uncert').std) grid = np.linspace(start,stop,samples) else: grid = grids[parameter] if verbose: tqdm.write(f"Profiling model parameter '{parameter}':",end='') # Calculate the profile objective function for the parameter profile = np.zeros(len(grid)) for n,value in enumerate(tqdm(grid, disable=not verbose)): # Freeze the model parameter at current value getattr(model, parameter).freeze(value) # Optimize the rest with warnings.catch_warnings(): warnings.simplefilter("ignore") fitresult_ = fit(model, y, *args, **kargs) # Extract the objective function value profile[n] = fitresult_.cost # Unfreeze the parameter getattr(model, parameter).unfreeze() profile = {'x':np.squeeze(grid),'y':profile} uqresults[parameter] = UQResult('profile', data=getattr(fitresult,parameter), profiles=profile, threshold=threshold, noiselvl=noiselvl) uqresults[parameter].profile = uqresults[parameter].profile[0] return uqresults
globalModel.eta.set(lb=0.468, ub=0.57, par0=0.520) globalModel.kdis.set(lb=0.0, ub=0.09, par0=0.01) globalModel.kld.set(lb=0.0, ub=1, par0=0.12) globalModel.Dld.set(lb=2, ub=4, par0=2.5) globalModel.rmean_dis.set(lb=3, ub=6.35, par0=3.7) globalModel.rmean_ld.set(lb=1, ub=8, par0=2.6) globalModel.width_dis.set(lb=0.25, ub=0.74, par0=0.44) globalModel.width_ld.set(lb=0.2, ub=2, par0=0.7) globalModel.lam1.set(lb=0.3, ub=0.5, par0=0.4) globalModel.lam2.set(lb=0.0, ub=0.2, par0=0.08) globalModel.reftime1.set(lb=0.1, ub=0.3, par0=0.2) globalModel.reftime2_1.set(lb=3.2, ub=3.8, par0=3.4) globalModel.reftime2_2.set(lb=2.0, ub=2.5, par0=2.2) # Fit the model to the data fit = dl.fit(globalModel, Vs, nonlin_tol=1e-3) # Plot the results plt.figure(figsize=[9, 9]) violet = '#4550e6' orange = 'tab:orange' plt.subplot(3, 2, 1) plt.plot(ts[0], Vs[0], '.', color='grey', label='Data') plt.plot(ts[0], fit.model[0], label='Fit') plt.fill_between(ts[0], fit.modelUncert[0].ci(95)[:, 0], fit.modelUncert[0].ci(95)[:, 1], alpha=0.3) plt.ylim([0.2, 1]) plt.legend(frameon=False, loc='best')
Vmodels = [[]] * Nsignals for n in range(Nsignals): Vmodels[n] = dl.dipolarmodel(ts[n], r, Pmodel) Vmodels[n].reftime.set(lb=0, ub=0.5, par0=0.2) # Combine the individual signal models into a single global models globalmodel = dl.merge(*Vmodels) # Link the global parameters toghether globalmodel = dl.link(globalmodel, meanA=['meanA_1', 'meanA_2', 'meanA_3'], meanB=['meanB_1', 'meanB_2', 'meanB_3'], widthA=['widthA_1', 'widthA_2', 'widthA_3'], widthB=['widthB_1', 'widthB_2', 'widthB_3']) # Fit the datasets to the model globally fit = dl.fit(globalmodel, Vexps) # Extract the fitted fractions fracAfit = [fit.fracA_1, fit.fracA_2, fit.fracA_3] fracBfit = [1 - fit.fracA_1, 1 - fit.fracA_2, 1 - fit.fracA_3] plt.figure(figsize=(10, 8)) for i in range(Nsignals): # Get the fitted signals and confidence bands Vfit = fit.model[i] Vfit_ci = fit.modelUncert[i].ci(95) # Get the fitted distributions of the two states PAfit = fracAfit[i] * dl.dd_gauss(r, fit.meanA, fit.widthA) PBfit = fracBfit[i] * dl.dd_gauss(r, fit.meanB, fit.widthB)