Example #1
0
def polyfit(x,y,u=None):
    '''Determine the weighted least-squares fit for 2nd order polynomial'''

    x = np.asarray(x)    
    y = np.asarray(y)

    if u is not None: 
        u[u==0]=1
        weight = 1./np.asarray(u) 
    else:
        weight = np.ones_like(x)

    params = Parameters()
    params.add('a', value=0)
    params.add('b', value=(y.max()-y.min())/(x.max()-x.min()))
    params.add('c', value=0.0)

    def residual(pars, x, data=None,w=None):
        model = pars['a'].value + pars['b'].value*x + pars['c'].value*x**2
        if data is None:
            return model
        return (model - data) #* w
    
    myfit = Minimizer(residual, params,fcn_args=(x,), fcn_kws={'data':y,'w':weight})
    myfit.leastsq()     
    
    return [params['c'].value,params['b'].value,params['a'].value]
Example #2
0
def NIST_Test(DataSet, start='start2', plot=True):

    NISTdata = ReadNistData(DataSet)
    resid, npar, dimx = Models[DataSet]
    y = NISTdata['y']
    x = NISTdata['x']

    params = Parameters()
    for i in range(npar):
        pname = 'b%i' % (i+1)
        cval  = NISTdata['cert_values'][i]
        cerr  = NISTdata['cert_stderr'][i]
        pval1 = NISTdata[start][i]
        params.add(pname, value=pval1)


    myfit = Minimizer(resid, params, fcn_args=(x,), fcn_kws={'y':y},
                      scale_covar=True)

    myfit.prepare_fit()
    myfit.leastsq()

    digs = Compare_NIST_Results(DataSet, myfit, params, NISTdata)

    if plot and HASPYLAB:
        fit = -resid(params, x, )
        pylab.plot(x, y, 'r+')
        pylab.plot(x, fit, 'ko--')
        pylab.show()

    return digs > 2
Example #3
0
class LmModel(object):
    """ 
    Base class for all models. 
    
    Models take x and y and return 
    """
    def __init__(self,x,y):
        self.x, self.y=x,y
        self.parameters=Parameters()
        self.min=Minimizer(self.residual, self.parameters)
    
    def print_para(self):
        for i in self.parameters.values:
            print i
            
    def func(self,paras):
        raise NotImplementedError
    
    def est_startvals(self):
        raise NotImplementedError
    
    def residual(self,paras):
        return self.func(paras)-self.y
        
    def fit(self):
        self.min.leastsq()
        self.y_model=self.func(self.parameters)
Example #4
0
def autobk(energy, mu, rbkg=1, nknots=None, group=None, e0=None,
           kmin=0, kmax=None, kw=1, dk=0, win=None, vary_e0=True,
           chi_std=None, nfft=2048, kstep=0.05, _larch=None):
    if _larch is None:
        raise Warning("cannot calculate autobk spline -- larch broken?")

    # get array indices for rkbg and e0: irbkg, ie0
    rgrid = np.pi/(kstep*nfft)
    if rbkg < 2*rgrid: rbkg = 2*rgrid
    irbkg = int(1.01 + rbkg/rgrid)
    if e0 is None:
        e0 = find_e0(energy, mu, group=group, _larch=_larch)
    ie0 = _index_nearest(energy, e0)

    # save ungridded k (kraw) and grided k (kout)
    # and ftwin (*k-weighting) for FT in residual
    kraw = np.sqrt(ETOK*(energy[ie0:] - e0))
    if kmax is None:
        kmax = max(kraw)
    kout  = kstep * np.arange(int(1.01+kmax/kstep))
    ftwin = kout**kw * ftwindow(kout, xmin=kmin, xmax=kmax,
                                window=win, dx=dk)

    # calc k-value and initial guess for y-values of spline params
    nspline = max(4, min(60, 2*int(rbkg*(kmax-kmin)/np.pi) + 1))
    spl_y  = np.zeros(nspline)
    spl_k  = np.zeros(nspline)
    for i in range(nspline):
        q = kmin + i*(kmax-kmin)/(nspline - 1)
        ik = _index_nearest(kraw, q)
        i1 = min(len(kraw)-1, ik + 5)
        i2 = max(0, ik - 5)
        spl_k[i] = kraw[ik]
        spl_y[i] = (2*mu[ik] + mu[i1] + mu[i2] ) / 4.0
    # get spline represention: knots, coefs, order=3
    # coefs will be varied in fit.
    knots, coefs, order = splrep(spl_k, spl_y)

    # set fit parameters from initial coefficients
    fparams = Parameters()
    for i, v in enumerate(coefs):
        fparams.add("c%i" % i, value=v, vary=i<len(spl_y))

    fitkws = dict(knots=knots, order=order, kraw=kraw, mu=mu[ie0:],
                  irbkg=irbkg, kout=kout, ftwin=ftwin, nfft=nfft)
    # do fit
    fit = Minimizer(__resid, fparams, fcn_kws=fitkws)
    fit.leastsq()

    # write final results
    coefs = [p.value for p in fparams.values()]
    bkg, chi = spline_eval(kraw, mu[ie0:], knots, coefs, order, kout)
    obkg  = np.zeros(len(mu))
    obkg[:ie0] = mu[:ie0]
    obkg[ie0:] = bkg
    if _larch.symtable.isgroup(group):
        setattr(group, 'bkg',  obkg)
        setattr(group, 'chie', mu-obkg)
        setattr(group, 'k',    kout)
        setattr(group, 'chi',  chi)
Example #5
0
def test_peakfit():
    from lmfit.utilfuncs import gaussian
    def residual(pars, x, data=None):
        g1 = gaussian(x, pars['a1'].value, pars['c1'].value, pars['w1'].value)
        g2 = gaussian(x, pars['a2'].value, pars['c2'].value, pars['w2'].value)
        model = g1 + g2
        if data is None:
            return model
        return (model - data)

    n    = 601
    xmin = 0.
    xmax = 15.0
    noise = np.random.normal(scale=.65, size=n)
    x = np.linspace(xmin, xmax, n)

    org_params = Parameters()
    org_params.add_many(('a1', 12.0, True, None, None, None),
                        ('c1',  5.3, True, None, None, None),
                        ('w1',  1.0, True, None, None, None),
                        ('a2',  9.1, True, None, None, None),
                        ('c2',  8.1, True, None, None, None),
                        ('w2',  2.5, True, None, None, None))

    data  = residual(org_params, x) + noise


    fit_params = Parameters()
    fit_params.add_many(('a1',  8.0, True, None, 14., None),
                        ('c1',  5.0, True, None, None, None),
                        ('w1',  0.7, True, None, None, None),
                        ('a2',  3.1, True, None, None, None),
                        ('c2',  8.8, True, None, None, None))

    fit_params.add('w2', expr='2.5*w1')

    myfit = Minimizer(residual, fit_params,
                      fcn_args=(x,), fcn_kws={'data':data})

    myfit.prepare_fit()

    init = residual(fit_params, x)


    myfit.leastsq()

    print(' N fev = ', myfit.nfev)
    print(myfit.chisqr, myfit.redchi, myfit.nfree)

    report_fit(fit_params)

    fit = residual(fit_params, x)
    check_paras(fit_params, org_params)
	def __FitEvent(self):
		try:
			dt = 1000./self.Fs 	# time-step in ms.
			# edat=np.asarray( np.abs(self.eventData),  dtype='float64' )
			edat=self.dataPolarity*np.asarray( self.eventData,  dtype='float64' )

			# control numpy error reporting
			np.seterr(invalid='ignore', over='ignore', under='ignore')

			ts = np.array([ t*dt for t in range(0,len(edat)) ], dtype='float64')

			# estimate initial guess for events
			initguess=self._characterizeevent(edat, np.abs(util.avg(edat[:10])), self.baseSD, self.InitThreshold, 6.)
			self.nStates=len(initguess)-1

			# setup fit params
			params=Parameters()

			for i in range(1, len(initguess)):
				params.add('a'+str(i-1), value=initguess[i][0]-initguess[i-1][0]) 
				params.add('mu'+str(i-1), value=initguess[i][1]*dt) 
				params.add('tau'+str(i-1), value=dt*7.5)

			params.add('b', value=initguess[0][0])
			

			optfit=Minimizer(self.__objfunc, params, fcn_args=(ts,edat,))
			optfit.prepare_fit()

	
			optfit.leastsq(xtol=self.FitTol,ftol=self.FitTol,maxfev=self.FitIters)

			if optfit.success:
				self.__recordevent(optfit)
			else:
				#print optfit.message, optfit.lmdif_message
				self.rejectEvent('eFitConvergence')
		except KeyboardInterrupt:
			self.rejectEvent('eFitUserStop')
			raise
		except InvalidEvent:
			self.rejectEvent('eInvalidEvent')
		except:
	 		self.rejectEvent('eFitFailure')
	 		raise
def test_constraints1():
    def residual(pars, x, sigma=None, data=None):
        yg = gaussian(x, pars['amp_g'], pars['cen_g'], pars['wid_g'])
        yl = lorentzian(x, pars['amp_l'], pars['cen_l'], pars['wid_l'])

        model =  yg +  yl + pars['line_off'] + x * pars['line_slope']
        if data is None:
            return model
        if sigma is None:
            return (model - data)
        return (model - data)/sigma


    n = 601
    xmin = 0.
    xmax = 20.0
    x = linspace(xmin, xmax, n)

    data = (gaussian(x, 21, 8.1, 1.2) +
            lorentzian(x, 10, 9.6, 2.4) +
            random.normal(scale=0.23,  size=n) +
            x*0.5)


    pfit = Parameters()
    pfit.add(name='amp_g',  value=10)
    pfit.add(name='cen_g',  value=9)
    pfit.add(name='wid_g',  value=1)

    pfit.add(name='amp_tot',  value=20)
    pfit.add(name='amp_l',  expr='amp_tot - amp_g')
    pfit.add(name='cen_l',  expr='1.5+cen_g')
    pfit.add(name='wid_l',  expr='2*wid_g')

    pfit.add(name='line_slope', value=0.0)
    pfit.add(name='line_off', value=0.0)

    sigma = 0.021  # estimate of data error (for all data points)

    myfit = Minimizer(residual, pfit,
                      fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
                      scale_covar=True)

    myfit.prepare_fit()
    init = residual(myfit.params, x)

    result = myfit.leastsq()

    print(' Nfev = ', result.nfev)
    print( result.chisqr, result.redchi, result.nfree)

    report_fit(result.params)
    pfit= result.params
    fit = residual(result.params, x)
    assert(pfit['cen_l'].value == 1.5 + pfit['cen_g'].value)
    assert(pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value)
    assert(pfit['wid_l'].value == 2 * pfit['wid_g'].value)
Example #8
0
def test_derive():
    def func(pars, x, data=None):
        model = pars['a'] * np.exp(-pars['b'] * x) + pars['c']
        if data is None:
            return model
        return model - data

    def dfunc(pars, x, data=None):
        v = np.exp(-pars['b']*x)
        return np.array([v, -pars['a']*x*v, np.ones(len(x))])

    def f(var, x):
        return var[0] * np.exp(-var[1] * x) + var[2]

    params1 = Parameters()
    params1.add('a', value=10)
    params1.add('b', value=10)
    params1.add('c', value=10)

    params2 = Parameters()
    params2.add('a', value=10)
    params2.add('b', value=10)
    params2.add('c', value=10)

    a, b, c = 2.5, 1.3, 0.8
    x = np.linspace(0, 4, 50)
    y = f([a, b, c], x)
    data = y + 0.15*np.random.normal(size=len(x))

    # fit without analytic derivative
    min1 = Minimizer(func, params1, fcn_args=(x,), fcn_kws={'data': data})
    out1 = min1.leastsq()

    # fit with analytic derivative
    min2 = Minimizer(func, params2, fcn_args=(x,), fcn_kws={'data': data})
    out2 = min2.leastsq(Dfun=dfunc, col_deriv=1)

    check_wo_stderr(out1.params['a'], out2.params['a'].value, 0.00005)
    check_wo_stderr(out1.params['b'], out2.params['b'].value, 0.00005)
    check_wo_stderr(out1.params['c'], out2.params['c'].value, 0.00005)
Example #9
0
	def fitevent(self, edat, initguess):
		try:
			dt = 1000./self.Fs 	# time-step in ms.

			# control numpy error reporting
			np.seterr(invalid='ignore', over='ignore', under='ignore')

			ts = np.array([ t*dt for t in range(0,len(edat)) ], dtype='float64')

			self.nStates=len(initguess)
			initRCConst=dt*5.

			# setup fit params
			params=Parameters()

			for i in range(0, len(initguess)):
				params.add('a'+str(i), value=initguess[i][0]) 
				params.add('mu'+str(i), value=initguess[i][1]) 
				if self.LinkRCConst:				
					if i==0:
						params.add('tau'+str(i), value=initRCConst)
					else:
						params.add('tau'+str(i), value=initRCConst, expr='tau0')
				else:
					params.add('tau'+str(i), value=initRCConst)

			params.add('b', value=self.baseMean )
			

			igdict=params.valuesdict()

			optfit=Minimizer(self._objfunc, params, fcn_args=(ts,edat,))
			optfit.prepare_fit()
			result=optfit.leastsq(xtol=self.FitTol,ftol=self.FitTol,maxfev=self.FitIters)

			if result.success:
				tt=[init[0] for init, final in zip(igdict.items(), (result.params.valuesdict()).items()) if init==final]
				if len(tt) > 0:
					self.flagEvent('wInitGuessUnchanged')

				self._recordevent(result)
			else:
				#print optfit.message, optfit.lmdif_message
				self.rejectEvent('eFitConvergence')
		except KeyboardInterrupt:
			self.rejectEvent('eFitUserStop')
			raise
		except InvalidEvent:
			self.rejectEvent('eInvalidEvent')
		except:
	 		self.rejectEvent('eFitFailure')
Example #10
0
    def time_confinterval(self):
        np.random.seed(0)
        x = np.linspace(0.3,10,100)
        y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)

        p = Parameters()
        p.add_many(('a', 0.1), ('b', 1))

        def residual(p):
            a = p['a'].value
            b = p['b'].value

            return 1/(a*x)+b-y

        minimizer = Minimizer(residual, p)
        out = minimizer.leastsq()
        return conf_interval(minimizer, out)
Example #11
0
def test_peakfit():
    def residual(pars, x, data=None):
        g1 = gaussian(x, pars['a1'], pars['c1'], pars['w1'])
        g2 = gaussian(x, pars['a2'], pars['c2'], pars['w2'])
        model = g1 + g2
        if data is None:
            return model
        return (model - data)

    n = 601
    xmin = 0.
    xmax = 15.0
    noise = np.random.normal(scale=.65, size=n)
    x = np.linspace(xmin, xmax, n)

    org_params = Parameters()
    org_params.add_many(('a1', 12.0, True, None, None, None),
                        ('c1', 5.3, True, None, None, None),
                        ('w1', 1.0, True, None, None, None),
                        ('a2', 9.1, True, None, None, None),
                        ('c2', 8.1, True, None, None, None),
                        ('w2', 2.5, True, None, None, None))

    data = residual(org_params, x) + noise

    fit_params = Parameters()
    fit_params.add_many(('a1', 8.0, True, None, 14., None),
                        ('c1', 5.0, True, None, None, None),
                        ('w1', 0.7, True, None, None, None),
                        ('a2', 3.1, True, None, None, None),
                        ('c2', 8.8, True, None, None, None))

    fit_params.add('w2', expr='2.5*w1')

    myfit = Minimizer(residual, fit_params, fcn_args=(x,),
                      fcn_kws={'data': data})

    myfit.prepare_fit()
    out = myfit.leastsq()
    check_paras(out.params, org_params)
Example #12
0
def test_ci_report():
    """test confidence interval report"""

    def residual(pars, x, data=None):
        argu = (x*pars['decay'])**2
        shift = pars['shift']
        if abs(shift) > np.pi/2:
            shift = shift - np.sign(shift)*np.pi
        model = pars['amp']*np.sin(shift + x/pars['period']) * np.exp(-argu)
        if data is None:
            return model
        return model - data

    p_true = Parameters()
    p_true.add('amp', value=14.0)
    p_true.add('period', value=5.33)
    p_true.add('shift', value=0.123)
    p_true.add('decay', value=0.010)

    n = 2500
    xmin = 0.
    xmax = 250.0
    x = np.linspace(xmin, xmax, n)
    data = residual(p_true, x) + np.random.normal(scale=0.7215, size=n)

    fit_params = Parameters()
    fit_params.add('amp', value=13.0)
    fit_params.add('period', value=2)
    fit_params.add('shift', value=0.0)
    fit_params.add('decay', value=0.02)

    mini = Minimizer(residual, fit_params, fcn_args=(x,),
                     fcn_kws={'data': data})
    out = mini.leastsq()
    report = fit_report(out)
    assert(len(report) > 500)

    ci, tr = conf_interval(mini, out, trace=True)
    report = ci_report(ci)
    assert(len(report) > 250)
Example #13
0
def feffit(paramgroup, datasets, rmax_out=10, path_outputs=True, _larch=None, **kws):
    """execute a Feffit fit: a fit of feff paths to a list of datasets

    Parameters:
    ------------
      paramgroup:   group containing parameters for fit
      datasets:     Feffit Dataset group or list of Feffit Dataset group.
      rmax_out:     maximum R value to calculate output arrays.
      path_output:  Flag to set whether all Path outputs should be written.

    Returns:
    ---------
      a fit results group.  This will contain subgroups of:

        datasets: an array of FeffitDataSet groups used in the fit.
        params:   This will be identical to the input parameter group.
        fit:      an object which points to the low-level fit.

     Statistical parameters will be put into the params group.  Each
     dataset will have a 'data' and 'model' subgroup, each with arrays:
        k            wavenumber array of k
        chi          chi(k).
        kwin         window Omega(k) (length of input chi(k)).
        r            uniform array of R, out to rmax_out.
        chir         complex array of chi(R).
        chir_mag     magnitude of chi(R).
        chir_pha     phase of chi(R).
        chir_re      real part of chi(R).
        chir_im      imaginary part of chi(R).
    """


    def _resid(params, datasets=None, paramgroup=None, **kwargs):
        """ this is the residual function"""
        params2group(params, paramgroup)
        return concatenate([d._residual(paramgroup) for d in datasets])

    if isNamedClass(datasets, FeffitDataSet):
        datasets = [datasets]

    params = group2params(paramgroup, _larch=_larch)

    for ds in datasets:
        if not isNamedClass(ds, FeffitDataSet):
            print( "feffit needs a list of FeffitDataSets")
            return
        ds.prepare_fit()

    fit = Minimizer(_resid, params,
                    fcn_kws=dict(datasets=datasets,
                                 paramgroup=paramgroup),
                    scale_covar=True, **kws)

    result = fit.leastsq()

    params2group(result.params, paramgroup)
    dat = concatenate([d._residual(paramgroup, data_only=True) for d in datasets])

    n_idp = 0
    for ds in datasets:
        n_idp += ds.n_idp

    # here we rescale chi-square and reduced chi-square to n_idp
    npts =  len(result.residual)
    chi_square  = result.chisqr * n_idp*1.0 / npts
    chi_reduced = chi_square/(n_idp*1.0 - result.nvarys)
    rfactor = (result.residual**2).sum() / (dat**2).sum()
    # calculate 'aic', 'bic' rescaled to n_idp
    # note that neg2_loglikel is -2*log(likelihood)
    neg2_loglikel = n_idp * np.log(chi_square / n_idp)
    aic = neg2_loglikel + 2 * result.nvarys
    bic = neg2_loglikel + np.log(n_idp) * result.nvarys


    # With scale_covar = True, Minimizer() scales the uncertainties
    # by reduced chi-square assuming params.nfree is the correct value
    # for degrees-of-freedom. But n_idp-params.nvarys is a better measure,
    # so we rescale uncertainties here.

    covar = getattr(result, 'covar', None)
    # print("COVAR " , covar)
    if covar is not None:
        err_scale = (result.nfree / (n_idp - result.nvarys))
        for name in result.var_names:
            p = result.params[name]
            if isParameter(p) and p.vary:
                p.stderr *= sqrt(err_scale)

        # next, propagate uncertainties to constraints and path parameters.
        result.covar *= err_scale
        vsave, vbest = {}, []

        # 1. save current params
        for vname in result.var_names:
            par = result.params[vname]
            vsave[vname] = par
            vbest.append(par.value)

        # 2. get correlated uncertainties, set params accordingly
        uvars = correlated_values(vbest, result.covar)
        # 3. evaluate constrained params, save stderr
        for nam, obj in result.params.items():
            eval_stderr(obj, uvars,  result.var_names, result.params)

        # 3. evaluate path params, save stderr
        for ds in datasets:
            for p in ds.pathlist:
                p.store_feffdat()
                for pname in ('degen', 's02', 'e0', 'ei',
                              'deltar', 'sigma2', 'third', 'fourth'):
                    obj = p.params[PATHPAR_FMT % (pname, p.label)]
                    eval_stderr(obj, uvars,  result.var_names, result.params)


        # restore saved parameters again
        for vname in result.var_names:
            # setattr(params, vname, vsave[vname])
            params[vname] = vsave[vname]

        # clear any errors evaluting uncertainties
        if _larch is not None and (len(_larch.error) > 0):
            _larch.error = []

    # reset the parameters group with the newly updated uncertainties
    params2group(result.params, paramgroup)

    # here we create outputs arrays for chi(k), chi(r):
    for ds in datasets:
        ds.save_ffts(rmax_out=rmax_out, path_outputs=path_outputs)

    out = Group(name='feffit results', datasets=datasets,
                fitter=fit, fit_details=result, chi_square=chi_square,
                n_independent=n_idp, chi_reduced=chi_reduced,
                rfactor=rfactor, aic=aic, bic=bic, covar=covar)

    for attr in ('params', 'nvarys', 'nfree', 'ndata', 'var_names', 'nfev',
                 'success', 'errorbars', 'message', 'lmdif_message'):
        setattr(out, attr, getattr(result, attr, None))
    return out
def test_constraints2():
    """add a user-defined function to symbol table"""
    def residual(pars, x, sigma=None, data=None):
        yg = gaussian(x, pars['amp_g'].value,
                      pars['cen_g'].value, pars['wid_g'].value)
        yl = lorentzian(x, pars['amp_l'].value,
                   pars['cen_l'].value, pars['wid_l'].value)

        slope = pars['line_slope'].value
        offset = pars['line_off'].value
        model =  yg +  yl + offset + x * slope
        if data is None:
            return model
        if sigma is None:
            return (model - data)
        return (model - data)/sigma


    n = 601
    xmin = 0.
    xmax = 20.0
    x = linspace(xmin, xmax, n)

    data = (gaussian(x, 21, 8.1, 1.2) +
            lorentzian(x, 10, 9.6, 2.4) +
            random.normal(scale=0.23,  size=n) +
            x*0.5)

    pfit = Parameters()
    pfit.add(name='amp_g',  value=10)
    pfit.add(name='cen_g',  value=9)
    pfit.add(name='wid_g',  value=1)

    pfit.add(name='amp_tot',  value=20)
    pfit.add(name='amp_l',  expr='amp_tot - amp_g')
    pfit.add(name='cen_l',  expr='1.5+cen_g')
    pfit.add(name='line_slope', value=0.0)
    pfit.add(name='line_off', value=0.0)

    sigma = 0.021  # estimate of data error (for all data points)

    myfit = Minimizer(residual, pfit,
                      fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
                      scale_covar=True)

    def width_func(wpar):
        """ """
        return 2*wpar

    myfit.params._asteval.symtable['wfun'] = width_func

    try:
        myfit.params.add(name='wid_l', expr='wfun(wid_g)')
    except:
        assert(False)

    result = myfit.leastsq()

    print(' Nfev = ', result.nfev)
    print( result.chisqr, result.redchi, result.nfree)
    report_fit(result.params)
    pfit= result.params
    fit = residual(result.params, x)
    assert(pfit['cen_l'].value == 1.5 + pfit['cen_g'].value)
    assert(pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value)
    assert(pfit['wid_l'].value == 2 * pfit['wid_g'].value)
Example #15
0
def pre_edge(energy, mu=None, group=None, e0=None, step=None,
             nnorm=None, nvict=0, pre1=None, pre2=-50,
             norm1=100, norm2=None, make_flat=True, emin_area=None,
             _larch=None):
    """pre edge subtraction, normalization for XAFS

    This performs a number of steps:
       1. determine E0 (if not supplied) from max of deriv(mu)
       2. fit a line of polymonial to the region below the edge
       3. fit a polymonial to the region above the edge
       4. extrapolae the two curves to E0 to determine the edge jump
       5. estimate area from emin_area to norm2, to get norm_area

    Arguments
    ----------
    energy:  array of x-ray energies, in eV, or group (see note)
    mu:      array of mu(E)
    group:   output group
    e0:      edge energy, in eV. If None, it will be determined here.
    step:    edge jump.  If None, it will be determined here.
    pre1:    low E range (relative to E0) for pre-edge fit
    pre2:    high E range (relative to E0) for pre-edge fit
    nvict:   energy exponent to use for pre-edg fit.  See Note
    norm1:   low E range (relative to E0) for post-edge fit
    norm2:   high E range (relative to E0) for post-edge fit
    nnorm:   degree of polynomial (ie, nnorm+1 coefficients will be found) for
             post-edge normalization curve. Default=None (see note)
    make_flat: boolean (Default True) to calculate flattened output.
    emin_area: energy threshold for area normalization (see note)


    Returns
    -------
      None

    The following attributes will be written to the output group:
        e0          energy origin
        edge_step   edge step
        norm        normalized mu(E), using polynomial
        norm_area   normalized mu(E), using integrated area
        flat        flattened, normalized mu(E)
        pre_edge    determined pre-edge curve
        post_edge   determined post-edge, normalization curve
        dmude       derivative of mu(E)

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    -----
    1  If the first argument is a Group, it must contain 'energy' and 'mu'.
       If it exists, group.e0 will be used as e0.
       See First Argrument Group in Documentation
    2  nvict gives an exponent to the energy term for the fits to the pre-edge
       and the post-edge region.  For the pre-edge, a line (m * energy + b) is
       fit to mu(energy)*energy**nvict over the pre-edge region,
       energy=[e0+pre1, e0+pre2].  For the post-edge, a polynomial of order
       nnorm will be fit to mu(energy)*energy**nvict of the post-edge region
       energy=[e0+norm1, e0+norm2].
    3  nnorm will default to 2 in norm2-norm1>400, to 1 if 100>norm2-norm1>300,
       and to 0 in norm2-norm1<100.
    4  norm_area will be estimated so that the area between emin_area and norm2
       is equal to (norm2-emin_area).  By default emin_area will be set to the
       *nominal* edge energy for the element and edge - 3*core_level_width

    """


    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='pre_edge')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    pre_dat = preedge(energy, mu, e0=e0, step=step, nnorm=nnorm,
                      nvict=nvict, pre1=pre1, pre2=pre2, norm1=norm1,
                      norm2=norm2)


    group = set_xafsGroup(group, _larch=_larch)

    e0    = pre_dat['e0']
    norm  = pre_dat['norm']
    norm1 = pre_dat['norm1']
    norm2 = pre_dat['norm2']
    # generate flattened spectra, by fitting a quadratic to .norm
    # and removing that.
    flat = norm
    ie0 = index_nearest(energy, e0)
    p1 = index_of(energy, norm1+e0)
    p2 = index_nearest(energy, norm2+e0)
    if p2-p1 < 2:
        p2 = min(len(energy), p1 + 2)

    if make_flat and p2-p1 > 4:
        enx, mux = remove_nans2(energy[p1:p2], norm[p1:p2])
        # enx, mux = (energy[p1:p2], norm[p1:p2])
        fpars = Parameters()
        ncoefs = len(pre_dat['norm_coefs'])
        fpars.add('c0', value=0, vary=True)
        fpars.add('c1', value=0, vary=(ncoefs>1))
        fpars.add('c2', value=0, vary=(ncoefs>2))
        fit = Minimizer(flat_resid, fpars, fcn_args=(enx, mux))
        result = fit.leastsq(xtol=1.e-6, ftol=1.e-6)

        fc0 = result.params['c0'].value
        fc1 = result.params['c1'].value
        fc2 = result.params['c2'].value

        flat_diff   = fc0 + energy * (fc1 + energy * fc2)
        flat        = norm - (flat_diff  - flat_diff[ie0])
        flat[:ie0]  = norm[:ie0]


    group.e0 = e0
    group.norm = norm
    group.norm_poly = 1.0*norm
    group.flat = flat
    group.dmude = np.gradient(mu)/np.gradient(energy)
    group.edge_step  = pre_dat['edge_step']
    group.edge_step_poly = pre_dat['edge_step']
    group.pre_edge   = pre_dat['pre_edge']
    group.post_edge  = pre_dat['post_edge']

    group.pre_edge_details = Group()
    group.pre_edge_details.pre1   = pre_dat['pre1']
    group.pre_edge_details.pre2   = pre_dat['pre2']
    group.pre_edge_details.nnorm  = pre_dat['nnorm']
    group.pre_edge_details.norm1  = pre_dat['norm1']
    group.pre_edge_details.norm2  = pre_dat['norm2']
    group.pre_edge_details.nvict  = pre_dat['nvict']
    group.pre_edge_details.pre1_input  = pre_dat['pre1_input']
    group.pre_edge_details.norm2_input  = pre_dat['norm2_input']
    group.pre_edge_details.pre_slope  = pre_dat['precoefs'][0]
    group.pre_edge_details.pre_offset = pre_dat['precoefs'][1]

    for i in range(MAX_NNORM):
        if hasattr(group, 'norm_c%i' % i):
            delattr(group, 'norm_c%i' % i)
    for i, c in enumerate(pre_dat['norm_coefs']):
        setattr(group.pre_edge_details, 'norm_c%i' % i, c)

    # guess element and edge
    group.atsym = getattr(group, 'atsym', None)
    group.edge = getattr(group, 'edge', None)

    if group.atsym is None or group.edge is None:
        _atsym, _edge = guess_edge(group.e0, _larch=_larch)
        if group.atsym is None: group.atsym = _atsym
        if group.edge is None:  group.edge = _edge

    # calcuate area-normalization
    if emin_area is None:
        emin_area = (xray_edge(group.atsym, group.edge).edge
                     - 2*core_width(group.atsym, group.edge))
    i1 = index_of(energy, emin_area)
    i2 = index_of(energy, e0+norm2)
    en = energy[i1:i2]
    area_step = max(1.e-15, simps(norm[i1:i2], en) / en.ptp())
    group.edge_step_area = group.edge_step_poly * area_step
    group.norm_area = norm/area_step
    group.pre_edge_details.emin_area = emin_area

    return
Example #16
0
myfit.prepare_fit()
# 
for scale_covar in (True, False):
    myfit.scale_covar = scale_covar
    print '  ====  scale_covar = ', myfit.scale_covar, ' ==='
    for sigma in (0.1, 0.2, 0.23, 0.5):
        myfit.userkws['sigma'] = sigma

        p_fit['amp_g'].value  = 10
        p_fit['cen_g'].value  =  9
        p_fit['wid_g'].value  =  1
        p_fit['line_slope'].value =0.0
        p_fit['line_off'].value   =0.0

        myfit.leastsq()
        print '  sigma          = ', sigma
        print '  chisqr         = ', myfit.chisqr
        print '  reduced_chisqr = ', myfit.redchi

        report_errors(p_fit, modelpars=p_true, show_correl=False)
        print '  =============================='

        
# if HASPYLAB:
#     fit = residual(p_fit, x)
#     pylab.plot(x, fit, 'k-')
#     pylab.show()
#

Example #17
0
def test_derive():
    def func(pars, x, data=None):
        a = pars['a'].value
        b = pars['b'].value
        c = pars['c'].value

        model=a * np.exp(-b * x)+c
        if data is None:
            return model
        return (model - data)

    def dfunc(pars, x, data=None):
        a = pars['a'].value
        b = pars['b'].value
        c = pars['c'].value
        v = np.exp(-b*x)
        return [v, -a*x*v, np.ones(len(x))]

    def f(var, x):
        return var[0]* np.exp(-var[1] * x)+var[2]

    params1 = Parameters()
    params1.add('a', value=10)
    params1.add('b', value=10)
    params1.add('c', value=10)

    params2 = Parameters()
    params2.add('a', value=10)
    params2.add('b', value=10)
    params2.add('c', value=10)

    a, b, c = 2.5, 1.3, 0.8
    x = np.linspace(0,4,50)
    y = f([a, b, c], x)
    data = y + 0.15*np.random.normal(size=len(x))

    # fit without analytic derivative
    min1 = Minimizer(func, params1, fcn_args=(x,), fcn_kws={'data':data})
    min1.leastsq()
    fit1 = func(params1, x)

    # fit with analytic derivative
    min2 = Minimizer(func, params2, fcn_args=(x,), fcn_kws={'data':data})
    min2.leastsq(Dfun=dfunc, col_deriv=1)
    fit2 = func(params2, x)

    print ('''Comparison of fit to exponential decay
    with and without analytic derivatives, to
       model = a*exp(-b*x) + c
    for a = %.2f, b = %.2f, c = %.2f
    ==============================================
    Statistic/Parameter|   Without   | With      |
    ----------------------------------------------
    N Function Calls   |   %3i       |   %3i     |
    Chi-square         |   %.4f    |   %.4f  |
       a               |   %.4f    |   %.4f  |
       b               |   %.4f    |   %.4f  |
       c               |   %.4f    |   %.4f  |
    ----------------------------------------------
    ''' %  (a, b, c,
            min1.nfev,   min2.nfev,
            min1.chisqr, min2.chisqr,
            params1['a'].value, params2['a'].value,
            params1['b'].value, params2['b'].value,
            params1['c'].value, params2['c'].value ))

    check_wo_stderr(params1['a'], params2['a'].value, 0.000001)
    check_wo_stderr(params1['b'], params2['b'].value, 0.000001)
    check_wo_stderr(params1['c'], params2['c'].value, 0.000001)
Example #18
0
                             yBinSize=yBinSize,
                             normFactor=normFactor)

if not isinstance(points, np.ndarray): points = np.array(points)

y, x = 0, 1


def residuals_func(model_params, times, flux, fluxerr):
    model = transit_line_model(model_params, times)
    return (model - flux) / fluxerr


partial_residualspartial_ = partial(residuals_func,
                                    times=timeSliceKmod,
                                    flux=fluxSliceK / np.median(fluxSliceK),
                                    fluxerr=ferrSliceK / np.median(fluxSliceK))

mle0 = Minimizer(partial_residuals, initialParams)

start = time()
fitResult = mle0.leastsq()
print("LMFIT operation took {} seconds".format(time() - start))

report_errors(fitResult.params)

# # plt.scatter([p[0] for p in points], [p[1] for p in points], s=0.1, c=interpolFluxes)
# plt.scatter(points.T[x], points.T[y], s=0.1, c=interpolFluxes)
# plt.colorbar()
# plt.show()
Example #19
0
	def __FitEvent(self):
		try:
			varyBlockedCurrent=True

			i0=np.abs(self.baseMean)
			i0sig=self.baseSD
			dt = 1000./self.Fs 	# time-step in ms.
			# edat=np.asarray( np.abs(self.eventData),  dtype='float64' )
			edat=self.dataPolarity*np.asarray( self.eventData,  dtype='float64' )

			blockedCurrent=min(edat)
			tauVal=dt

			estart 	= self.__eventStartIndex( self.__threadList( edat, range(0,len(edat)) ), i0, i0sig ) - 1
			eend 	= self.__eventEndIndex( self.__threadList( edat, range(0,len(edat)) ), i0, i0sig ) - 2

			# For long events, fix the blocked current to speed up the fit
			#if (eend-estart) > 1000:
			#	blockedCurrent=np.mean(edat[estart+50:eend-50])

			# control numpy error reporting
			np.seterr(invalid='ignore', over='ignore', under='ignore')

			ts = np.array([ t*dt for t in range(0,len(edat)) ], dtype='float64')

			#pl.plot(ts,edat)
			#pl.show()

			params=Parameters()

			# print self.absDataStartIndex

			params.add('mu1', value=estart * dt)
			params.add('mu2', value=eend * dt)
			params.add('a', value=(i0-blockedCurrent), vary=varyBlockedCurrent)
			params.add('b', value = i0)
			params.add('tau1', value = tauVal)

			if self.LinkRCConst:
				params.add('tau2', value = tauVal, expr='tau1')
			else:
				params.add('tau2', value = tauVal)


			optfit=Minimizer(self.__objfunc, params, fcn_args=(ts,edat,))
			optfit.prepare_fit()

			result=optfit.leastsq(xtol=self.FitTol,ftol=self.FitTol,maxfev=self.FitIters)

			# print optfit.params['b'].value, optfit.params['b'].value - optfit.params['a'].value, optfit.params['mu1'].value, optfit.params['mu2'].value
			if result.success:
				if result.params['mu1'].value < 0.0 or result.params['mu2'].value < 0.0:
					# print 'eInvalidFitParams1', optfit.params['b'].value, optfit.params['b'].value - optfit.params['a'].value, optfit.params['mu1'].value, optfit.params['mu2'].value
					self.rejectEvent('eInvalidResTime')
				# The start of the event is set past the length of the data
				elif result.params['mu1'].value > ts[-1]:
					# print 'eInvalidFitParams2', optfit.params['b'].value, optfit.params['b'].value - optfit.params['a'].value, optfit.params['mu1'].value, optfit.params['mu2'].value
					self.rejectEvent('eInvalidEventStart')
				else:
					self.mdOpenChCurrent 	= result.params['b'].value 
					self.mdBlockedCurrent	= result.params['b'].value - result.params['a'].value
					self.mdEventStart		= result.params['mu1'].value 
					self.mdEventEnd			= result.params['mu2'].value
					self.mdRCConst1			= result.params['tau1'].value
					self.mdRCConst2			= result.params['tau2'].value
					self.mdAbsEventStart	= self.mdEventStart + self.absDataStartIndex * dt

					self.mdBlockDepth		= self.mdBlockedCurrent/self.mdOpenChCurrent
					self.mdResTime			= self.mdEventEnd - self.mdEventStart
					
					self.mdRedChiSq			= result.chisqr/( np.var(result.residual) * (len(self.eventData) - result.nvarys -1) )

					# if (eend-estart) > 1000:
					# 	print blockedCurrent, self.mdBlockedCurrent, self.mdOpenChCurrent, self.mdResTime, self.mdRiseTime, self.mdRedChiSq, optfit.chisqr
					# if self.mdBlockDepth > self.BlockRejectRatio:
					# 	# print 'eBlockDepthHigh', optfit.params['b'].value, optfit.params['b'].value - optfit.params['a'].value, optfit.params['mu1'].value, optfit.params['mu2'].value
					# 	self.rejectEvent('eBlockDepthHigh')
						
					if math.isnan(self.mdRedChiSq):
						self.rejectEvent('eInvalidChiSq')
					if self.mdBlockDepth < 0 or self.mdBlockDepth > 1:
						self.rejectEvent('eInvalidBlockDepth')
					if self.mdRCConst1 <= 0 or self.mdRCConst2 <= 0:
						self.rejectEvent('eInvalidRCConstant')

					#print i0, i0sig, [optfit.params['a'].value, optfit.params['b'].value, optfit.params['mu1'].value, optfit.params['mu2'].value, optfit.params['tau'].value]
			else:
				# print optfit.message, optfit.lmdif_message
				self.rejectEvent('eFitConvergence')

		except KeyboardInterrupt:
			self.rejectEvent('eFitUserStop')
			raise
		except:
			# print optfit.message, optfit.lmdif_message
	 		self.rejectEvent('eFitFailure')
Example #20
0
params1.add('b', value=10)
params1.add('c', value=10)

params2 = Parameters()
params2.add('a', value=10)
params2.add('b', value=10)
params2.add('c', value=10)

a, b, c = 2.5, 1.3, 0.8
x = np.linspace(0,4,50)
y = f([a, b, c], x)
data = y + 0.15*np.random.normal(size=len(x))

# fit without analytic derivative
min1 = Minimizer(func, params1, fcn_args=(x,), fcn_kws={'data':data})
min1.leastsq()
fit1 = func(params1, x)

# fit with analytic derivative
min2 = Minimizer(func, params2, fcn_args=(x,), fcn_kws={'data':data})
min2.leastsq(Dfun=dfunc, col_deriv=1)
fit2 = func(params2, x)

print '''Comparison of fit to exponential decay
with and without analytic derivatives, to
   model = a*exp(-b*x) + c
for a = %.2f, b = %.2f, c = %.2f
==============================================
Statistic/Parameter|   Without   | With      |
----------------------------------------------
N Function Calls   |   %3i       |   %3i     |
Example #21
0
                      'data': data
                  })

myfit.prepare_fit()
#
for scale_covar in (True, False):
    myfit.scale_covar = scale_covar
    print '  ====  scale_covar = ', myfit.scale_covar, ' ==='
    for sigma in (0.1, 0.2, 0.23, 0.5):
        myfit.userkws['sigma'] = sigma

        p_fit['amp_g'].value = 10
        p_fit['cen_g'].value = 9
        p_fit['wid_g'].value = 1
        p_fit['line_slope'].value = 0.0
        p_fit['line_off'].value = 0.0

        out = myfit.leastsq()
        print '  sigma          = ', sigma
        print '  chisqr         = ', out.chisqr
        print '  reduced_chisqr = ', out.redchi

        report_fit(out.params, modelpars=p_true, show_correl=False)
        print '  =============================='

# if HASPYLAB:
#     fit = residual(p_fit, x)
#     pylab.plot(x, fit, 'k-')
#     pylab.show()
#
Example #22
0
def test_derive():
    def func(pars, x, data=None):
        a = pars['a'].value
        b = pars['b'].value
        c = pars['c'].value

        model = a * np.exp(-b * x) + c
        if data is None:
            return model
        return (model - data)

    def dfunc(pars, x, data=None):
        a = pars['a'].value
        b = pars['b'].value
        c = pars['c'].value
        v = np.exp(-b * x)
        return [v, -a * x * v, np.ones(len(x))]

    def f(var, x):
        return var[0] * np.exp(-var[1] * x) + var[2]

    params1 = Parameters()
    params1.add('a', value=10)
    params1.add('b', value=10)
    params1.add('c', value=10)

    params2 = Parameters()
    params2.add('a', value=10)
    params2.add('b', value=10)
    params2.add('c', value=10)

    a, b, c = 2.5, 1.3, 0.8
    x = np.linspace(0, 4, 50)
    y = f([a, b, c], x)
    data = y + 0.15 * np.random.normal(size=len(x))

    # fit without analytic derivative
    min1 = Minimizer(func, params1, fcn_args=(x, ), fcn_kws={'data': data})
    min1.leastsq()
    fit1 = func(params1, x)

    # fit with analytic derivative
    min2 = Minimizer(func, params2, fcn_args=(x, ), fcn_kws={'data': data})
    min2.leastsq(Dfun=dfunc, col_deriv=1)
    fit2 = func(params2, x)

    print('''Comparison of fit to exponential decay
    with and without analytic derivatives, to
       model = a*exp(-b*x) + c
    for a = %.2f, b = %.2f, c = %.2f
    ==============================================
    Statistic/Parameter|   Without   | With      |
    ----------------------------------------------
    N Function Calls   |   %3i       |   %3i     |
    Chi-square         |   %.4f    |   %.4f  |
       a               |   %.4f    |   %.4f  |
       b               |   %.4f    |   %.4f  |
       c               |   %.4f    |   %.4f  |
    ----------------------------------------------
    ''' % (a, b, c, min1.nfev, min2.nfev, min1.chisqr, min2.chisqr,
           params1['a'].value, params2['a'].value, params1['b'].value,
           params2['b'].value, params1['c'].value, params2['c'].value))

    check_wo_stderr(params1['a'], params2['a'].value, 0.000001)
    check_wo_stderr(params1['b'], params2['b'].value, 0.000001)
    check_wo_stderr(params1['c'], params2['c'].value, 0.000001)
Example #23
0
    return model - data


n = 601
random.seed(0)
x = linspace(0, 20.0, n)

data = (gaussian(x, 21, 6.1, 1.2) + lorentzian(x, 10, 9.6, 1.3) +
        random.normal(scale=0.1, size=n))

pfit = Parameters()
pfit.add(name='amp_g', value=10)
pfit.add(name='amp_l', value=10)
pfit.add(name='cen_g', value=5)
pfit.add(name='peak_split', value=2.5, min=0, max=5, vary=True)
pfit.add(name='cen_l', expr='peak_split+cen_g')
pfit.add(name='wid_g', value=1)
pfit.add(name='wid_l', expr='wid_g')

mini = Minimizer(residual, pfit, fcn_args=(x, data))
out = mini.leastsq()

report_fit(out.params)

best_fit = data + out.residual

if HASPYLAB:
    plt.plot(x, data, 'bo')
    plt.plot(x, best_fit, 'r--')
    plt.show()
Example #24
0
params1.add('b', value=10)
params1.add('c', value=10)

params2 = Parameters()
params2.add('a', value=10)
params2.add('b', value=10)
params2.add('c', value=10)

a, b, c = 2.5, 1.3, 0.8
x = np.linspace(0, 4, 50)
y = f([a, b, c], x)
data = y + 0.15 * np.random.normal(size=len(x))

# fit without analytic derivative
min1 = Minimizer(func, params1, fcn_args=(x, ), fcn_kws={'data': data})
out1 = min1.leastsq()
fit1 = func(out1.params, x)

# fit with analytic derivative
min2 = Minimizer(func, params2, fcn_args=(x, ), fcn_kws={'data': data})
out2 = min2.leastsq(Dfun=dfunc, col_deriv=1)
fit2 = func(out2.params, x)

print '''Comparison of fit to exponential decay
with and without analytic derivatives, to
   model = a*exp(-b*x) + c
for a = %.2f, b = %.2f, c = %.2f
==============================================
Statistic/Parameter|   Without   | With      |
----------------------------------------------
N Function Calls   |   %3i       |   %3i     |
Example #25
0
    def extractfeatures_inside(self, DICOMImages, image_pos_pat, image_ori_pat, series_path, phases_series, VOI_mesh):
        """ Start pixVals for collection pixel values at VOI """
        pixVals = []
        deltaS = {}
        
        # necessary to read point coords
        VOIPnt = [0,0,0]
        ijk = [0,0,0]
        pco = [0,0,0]
        
        for i in range(len(DICOMImages)):
            abspath_PhaseID = series_path+os.sep+str(phases_series[i]) 
            print phases_series[i]
            
            # Get total number of files
            load = Inputs_init()
            [len_listSeries_files, FileNms_slices_sorted_stack] = load.ReadDicomfiles(abspath_PhaseID)
            mostleft_slice = FileNms_slices_sorted_stack.slices[0]
            
            # Get dicom header, retrieve
            dicomInfo_series = dicom.read_file(abspath_PhaseID+os.sep+str(mostleft_slice)) 
            
            # (0008,0031) AT S Series Time            # hh.mm.ss.frac
            seriesTime = str(dicomInfo_series[0x0008,0x0031].value) 
            # (0008,0033) AT S Image Time             # hh.mm.ss.frac
            imageTime = str(dicomInfo_series[0x0008,0x0033].value)
            
            # (0008,0032) AT S Acquisition Time       # hh.mm.ss.frac
            ti = str(dicomInfo_series[0x0008,0x0032].value) 
            
            acquisitionTimepoint = datetime.time(hour=int(ti[0:2]), minute=int(ti[2:4]), second=int(ti[4:6]))
            self.timepoints.append( datetime.datetime.combine(datetime.date.today(), acquisitionTimepoint) )
            
            # find mapping to Dicom space  
            [transformed_image, transform_cube] = Display().dicomTransform(DICOMImages[i], image_pos_pat, image_ori_pat)
            
            ### Get inside of VOI            
            [VOI_scalars, VOIdims] = self.createMaskfromMesh(VOI_mesh, transformed_image)
            print "\n VOIdims"
            print VOIdims
            
            # get non zero elements
            image_scalars = transformed_image.GetPointData().GetScalars()
            numpy_VOI_imagedata = vtk_to_numpy(image_scalars)     
            
            numpy_VOI_imagedata = numpy_VOI_imagedata.reshape(VOIdims[2], VOIdims[1], VOIdims[0]) 
            numpy_VOI_imagedata = numpy_VOI_imagedata.transpose(2,1,0)
            
            print "Shape of VOI_imagedata: "
            print numpy_VOI_imagedata.shape
            
            #################### HERE GET IT AND MASK IT OUT
            self.nonzeroVOIextracted = nonzero(VOI_scalars)
            print self.nonzeroVOIextracted
            
            VOI_imagedata = numpy_VOI_imagedata[self.nonzeroVOIextracted]     
            
            print "shape of VOI_imagedata  Clipped:"
            print VOI_imagedata.shape
        
            for j in range( len(VOI_imagedata) ):
                pixValx = VOI_imagedata[j]
                pixVals.append(pixValx)
                        
            # Now collect pixVals
            print "Saving %s" % 'delta'+str(i)
            deltaS['delta'+str(i)] = pixVals
            pixVals = []
                    
        print self.timepoints
        
        # Collecting timepoints in proper format
        t_delta = []
        t_delta.append(0)
        total_time = 0
        for i in range(len(DICOMImages)-1):
            current_time = self.timepoints[i+1]
            previous_time = self.timepoints[i]
            difference_time =current_time - previous_time
            timestop = divmod(difference_time.total_seconds(), 60)
            t_delta.append( t_delta[i] + timestop[0]+timestop[1]*(1./60))
            total_time = total_time+timestop[0]+timestop[1]*(1./60)
            
        # finally print t_delta
        print t_delta
        t = array(t_delta)
        print "total_time"
        print total_time
        
        ##############################################################
        # Finished sampling deltaS
        # APply lmfit to deltaS
        # first sample the mean
        data_deltaS = []; t_deltaS = []; mean_deltaS = []; sd_deltaS = []; se_deltaS = []; n_deltaS = []
        
        # append So and to
        data_deltaS.append( 0 )       
        t_deltaS.append(0)
        mean_deltaS.append( mean(deltaS['delta0']) )
        sd_deltaS.append(0)
        se_deltaS.append(0)
        n_deltaS.append( len(deltaS['delta0']) )
        
        for k in range(1,len(DICOMImages)):
            deltaS_i =  ( mean(array(deltaS['delta'+str(k)]).astype(float)) -  mean(deltaS['delta0']) )/  mean(deltaS['delta0'])
            data_deltaS.append( deltaS_i )
            t_deltaS.append(k)
            print 'delta'+str(k)
            print  data_deltaS[k]
            
            ##############################################################
            # Calculate data_error
            # estimate the population mean and SD from our samples to find SE
            # SE tells us the distribution of individual scores around the sampled mean.
            mean_deltaS_i = mean(array(deltaS['delta'+str(k)]))
            std_deltaS_i = std(array(deltaS['delta'+str(k)]))
            n_deltaS_i = len(array(deltaS['delta'+str(k)]))
                
            sd_deltaS.append( std_deltaS_i )
            mean_deltaS.append( mean_deltaS_i )
            
            # Standard Error of the mean SE
            # the smaller the variability in the data, the more confident we are that one value (the mean) accurately reflects them.
            se_deltaS.append(std_deltaS_i/sqrt(n_deltaS_i))
            n_deltaS.append(n_deltaS_i)
                        
        # make array for data_deltaS
        data = array(data_deltaS)
        
        print "\n================\nMean and SE (i.e VOI sample data)"
        print mean_deltaS
        print se_deltaS
        
        # create a set of Parameters
        params = Parameters()
        params.add('amp',   value= 10,  min=0)
        params.add('alpha', value= 1, min=0) 
        params.add('beta', value= 0.05, min=0.0001, max=0.9)
        
        # do fit, here with leastsq model
        # define objective function: returns the array to be minimized
        def fcn2min(params, t, data):
            global model, model_res, x
            """ model EMM for Bilateral DCE-MRI, subtract data"""
            # unpack parameters:
            #  extract .value attribute for each parameter
            amp = params['amp'].value    # Upper limit of deltaS
            alpha = params['alpha'].value    # rate of signal increase min-1
            beta = params['beta'].value        # rate of signal decrease min-1
                    
            model = amp * (1- exp(-alpha*t)) * exp(-beta*t)
            
            x = linspace(0, t[4], 101)
            model_res = amp * (1- exp(-alpha*x)) * exp(-beta*x)
        
            return model - data
        
        #####
        myfit = Minimizer(fcn2min,  params, fcn_args=(t,), fcn_kws={'data':data})
        myfit.prepare_fit()
        myfit.leastsq()
            
        # On a successful fit using the leastsq method, several goodness-of-fit statistics
        # and values related to the uncertainty in the fitted variables will be calculated
        print "myfit.success"
        print myfit.success
        print "myfit.residual"
        print myfit.residual
        print "myfit.chisqr"
        print myfit.chisqr
        print "myfit.redchi"
        print myfit.redchi
            
        # calculate final result
        final = data + myfit.residual
        # write error report
        report_errors(params)
        
        # Calculate R-square
        # R_square = sum( y_fitted - y_mean)/ sum(y_data - y_mean)
        R_square = sum( (model - mean(data))**2 )/ sum( (data - mean(data))**2 )
        print "R^2"
        print R_square
        
        self.amp = params['amp'].value
        self.alpha = params['alpha'].value
        self.beta = params['beta'].value
        
        ##################################################
        # Now Calculate Extract parameters from model
        self.iAUC1 = params['amp'].value *( ((1-exp(-params['beta'].value*t[1]))/params['beta'].value) + (exp((-params['alpha'].value+params['beta'].value)*t[1])-1)/(params['alpha'].value+params['beta'].value) )
        print "iAUC1"
        print self.iAUC1
        
        self.Slope_ini = params['amp'].value*params['alpha'].value
        print "Slope_ini"
        print self.Slope_ini
    
        self.Tpeak = (1/params['alpha'].value)*log(1+(params['alpha'].value/params['beta'].value))
        print "Tpeak"
        print self.Tpeak
    
        self.Kpeak = -params['amp'].value * params['alpha'].value * params['beta'].value
        print "Kpeak"
        print self.Kpeak
    
        self.SER = exp( (t[4]-t[1])*params['beta'].value) * ( (1-exp(-params['alpha'].value*t[1]))/(1-exp(-params['alpha'].value*t[4])) )
        print "SER"
        print self.SER
        
        ##################################################
        # Now Calculate enhancement Kinetic based features
        # Based on the course of signal intensity within the lesion
        print "\n Saving %s" % 'Crk'
        So = array(deltaS['delta0']).astype(float)
        Crk = {'Cr0': mean(So)}  
        C = {}
        Carray = []
        
        for k in range(1,len(DICOMImages)):
            Sk = array(deltaS['delta'+str(k)]).astype(float)
            Cr = 0
            for j in range( len(So) ):
                # extract average enhancement over the lesion at each time point
                Cr = Cr + (Sk[j] - So[j])/So[j]
                Carray.append((Sk[j] - So[j])/So[j])
                
            # compile
            C['C'+str(k)] = Carray
            Crk['Cr'+str(k)] = Cr/len(Sk)
        
        # Extract Fii_1
        for k in range(1,5):
            currentCr = array(Crk['Cr'+str(k)]).astype(float)
            print currentCr
            if( self.maxCr < currentCr):
                self.maxCr = float(currentCr)
                self.peakCr = int(k)
                
        print "Maximum Upate (Fii_1) = %d " %  self.maxCr
        print "Peak Cr (Fii_2) = %d " %  self.peakCr
        
        # Uptake rate
        self.UptakeRate = float(self.maxCr/self.peakCr)    
        print "Uptake rate (Fii_3) "
        print self.UptakeRate
        
        # WashOut Rate
        if( self.peakCr == 4):
            self.washoutRate = 0
        else:
            self.washoutRate = float( (self.maxCr - array(Crk['Cr'+str(4)]).astype(float))/(4-self.peakCr) )
        print "WashOut rate (Fii_4) "
        print self.washoutRate


        ##################################################
        # Now Calculate enhancement-variance Kinetic based features
        # Based on Crk['Cr'+str(k)] = Cr/len(Sk)
        print "\n Saving %s" % 'Vrk'
        Vrk = {}
        
        for k in range(1,5):
            Ci = array(C['C'+str(k)]).astype(float)    
            Cri = array(Crk['Cr'+str(k)]).astype(float)
            Vr = 0
            for j in range( len(Ci) ):
                # extract average enhancement over the lesion at each time point
                Vr = Vr + (Ci[j] - Cri)**2
            # compile
            Vrk['Vr'+str(k)] = Vr/(len(Ci)-1)
        
        # Extract Fiii_1
        for k in range(1,5):
            currentVr = array(Vrk['Vr'+str(k)]).astype(float)
            if( self.maxVr < currentVr):
                print currentVr
                self.maxVr = float(currentVr)
                self.peakVr = int(k)
        
        print "Maximum Variation of enhan (Fiii_1) = %d " %  self.maxVr
        print "Peak Vr (Fii_2) = %d " %  self.peakVr
        
        # Vr_increasingRate 
        self.Vr_increasingRate = self.maxVr/self.peakVr    
        print "Vr_increasingRate (Fiii_3)" 
        print self.Vr_increasingRate
        
        # Vr_decreasingRate
        if( self.peakVr == 4):
            Vr_decreasingRate = 0
        else:
            Vr_decreasingRate = float((self.maxVr - array(Vrk['Vr'+str(4)]).astype(float))/(4-self.peakVr))
        print "Vr_decreasingRate (Fiii_4) "
        print Vr_decreasingRate
        
        # Vr_post_1 
        self.Vr_post_1 = float( array(Vrk['Vr'+str(1)]).astype(float))
        print "Vr_post_1 (Fiii_5)"
        print self.Vr_post_1
 
        ##################################################
        # orgamize into dataframe
        self.dynamicEMM_inside = DataFrame( data=array([[ self.amp, self.alpha, self.beta, self.iAUC1, self.Slope_ini, self.Tpeak, self.Kpeak, self.SER, self.maxCr, self.peakCr, self.UptakeRate, self.washoutRate, self.maxVr, self.peakVr, self.Vr_increasingRate, self.Vr_post_1]]), 
                                columns=['A.inside', 'alpha.inside', 'beta.inside', 'iAUC1.inside', 'Slope_ini.inside', 'Tpeak.inside', 'Kpeak.inside', 'SER.inside', 'maxCr.inside', 'peakCr.inside', 'UptakeRate.inside', 'washoutRate.inside', 'maxVr.inside', 'peakVr.inside','Vr_increasingRate.inside', 'Vr_post_1.inside'])

        #############################################################
        # try to plot results
        pylab.figure()
        pylab.errorbar(t, data, yerr=se_deltaS, fmt='ro', label='data+SE') # data 'ro' red dots as markers
        pylab.plot(t, final, 'b+', label='data+residuals')    # data+residuals 'b+' blue pluses
        pylab.plot(t, model, 'b', label='model')    # model fit 'b' blue
        pylab.plot(x, model_res, 'k', label='model fit')    # model fit 'k' blakc
        pylab.xlabel(" post-contrast time (min)")
        pylab.ylabel("delta S(t)")
        pylab.legend()
        
        return self.dynamicEMM_inside
Example #26
0
def pre_edge(energy,
             mu=None,
             group=None,
             e0=None,
             step=None,
             nnorm=None,
             nvict=0,
             pre1=None,
             pre2=None,
             norm1=None,
             norm2=None,
             make_flat=True,
             _larch=None):
    """pre edge subtraction, normalization for XAFS

    This performs a number of steps:
       1. determine E0 (if not supplied) from max of deriv(mu)
       2. fit a line of polymonial to the region below the edge
       3. fit a polymonial to the region above the edge
       4. extrapolate the two curves to E0 and take their difference
          to determine the edge jump

    Arguments
    ----------
    energy:  array of x-ray energies, in eV, or group (see note 1)
    mu:      array of mu(E)
    group:   output group
    e0:      edge energy, in eV. If None, it will be determined here.
    step:    edge jump.  If None, it will be determined here.
    pre1:    low E range (relative to E0) for pre-edge fit
    pre2:    high E range (relative to E0) for pre-edge fit
    nvict:   energy exponent to use for pre-edg fit.  See Notes.
    norm1:   low E range (relative to E0) for post-edge fit
    norm2:   high E range (relative to E0) for post-edge fit
    nnorm:   degree of polynomial (ie, nnorm+1 coefficients will be found) for
             post-edge normalization curve. See Notes.
    make_flat: boolean (Default True) to calculate flattened output.

    Returns
    -------
      None: The following attributes will be written to the output group:
        e0          energy origin
        edge_step   edge step
        norm        normalized mu(E), using polynomial
        norm_area   normalized mu(E), using integrated area
        flat        flattened, normalized mu(E)
        pre_edge    determined pre-edge curve
        post_edge   determined post-edge, normalization curve
        dmude       derivative of mu(E)

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    -----
      1. Supports `First Argument Group` convention, requiring group members `energy` and `mu`.
      2. Support `Set XAFS Group` convention within Larch or if `_larch` is set.
      3. pre_edge: a line is fit to mu(energy)*energy**nvict over the region,
         energy=[e0+pre1, e0+pre2]. pre1 and pre2 default to None, which will set
             pre1 = e0 - 2nd energy point, rounded to 5 eV
             pre2 = roughly pre1/3.0, rounded to 5 eV
      4. post-edge: a polynomial of order nnorm is fit to mu(energy)*energy**nvict
         between energy=[e0+norm1, e0+norm2]. nnorm, norm1, norm2 default to None,
         which will set:
              norm2 = max energy - e0, rounded to 5 eV
              norm1 = roughly min(150, norm2/3.0), rounded to 5 eV
              nnorm = 2 in norm2-norm1>350, 1 if norm2-norm1>50, or 0 if less.
      5. flattening fits a quadratic curve (no matter nnorm) to the post-edge
         normalized mu(E) and subtracts that curve from it.
    """

    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='pre_edge')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    pre_dat = preedge(energy,
                      mu,
                      e0=e0,
                      step=step,
                      nnorm=nnorm,
                      nvict=nvict,
                      pre1=pre1,
                      pre2=pre2,
                      norm1=norm1,
                      norm2=norm2)

    group = set_xafsGroup(group, _larch=_larch)

    e0 = pre_dat['e0']
    norm = pre_dat['norm']
    norm1 = pre_dat['norm1']
    norm2 = pre_dat['norm2']
    # generate flattened spectra, by fitting a quadratic to .norm
    # and removing that.
    flat = norm
    ie0 = index_nearest(energy, e0)
    p1 = index_of(energy, norm1 + e0)
    p2 = index_nearest(energy, norm2 + e0)
    if p2 - p1 < 2:
        p2 = min(len(energy), p1 + 2)

    if make_flat and p2 - p1 > 4:
        enx, mux = remove_nans2(energy[p1:p2], norm[p1:p2])
        # enx, mux = (energy[p1:p2], norm[p1:p2])
        fpars = Parameters()
        ncoefs = len(pre_dat['norm_coefs'])
        fpars.add('c0', value=0, vary=True)
        fpars.add('c1', value=0, vary=(ncoefs > 1))
        fpars.add('c2', value=0, vary=(ncoefs > 2))
        fit = Minimizer(flat_resid, fpars, fcn_args=(enx, mux))
        result = fit.leastsq(xtol=1.e-6, ftol=1.e-6)

        fc0 = result.params['c0'].value
        fc1 = result.params['c1'].value
        fc2 = result.params['c2'].value

        flat_diff = fc0 + energy * (fc1 + energy * fc2)
        flat = norm - (flat_diff - flat_diff[ie0])
        flat[:ie0] = norm[:ie0]

    group.e0 = e0
    group.norm = norm
    group.norm_poly = 1.0 * norm
    group.flat = flat
    group.dmude = np.gradient(mu) / np.gradient(energy)
    group.edge_step = pre_dat['edge_step']
    group.edge_step_poly = pre_dat['edge_step']
    group.pre_edge = pre_dat['pre_edge']
    group.post_edge = pre_dat['post_edge']

    group.pre_edge_details = Group()
    for attr in ('pre1', 'pre2', 'norm1', 'norm2', 'nnorm', 'nvict'):
        setattr(group.pre_edge_details, attr, pre_dat.get(attr, None))

    group.pre_edge_details.pre_slope = pre_dat['precoefs'][0]
    group.pre_edge_details.pre_offset = pre_dat['precoefs'][1]

    for i in range(MAX_NNORM):
        if hasattr(group, 'norm_c%i' % i):
            delattr(group, 'norm_c%i' % i)
    for i, c in enumerate(pre_dat['norm_coefs']):
        setattr(group.pre_edge_details, 'norm_c%i' % i, c)

    # guess element and edge
    group.atsym = getattr(group, 'atsym', None)
    group.edge = getattr(group, 'edge', None)

    if group.atsym is None or group.edge is None:
        _atsym, _edge = guess_edge(group.e0)
        if group.atsym is None: group.atsym = _atsym
        if group.edge is None: group.edge = _edge
    return
Example #27
0
    def featureMap(self, DICOMImages, img_features, time_points, featuresKeys, caseLabeloutput, path_outputFolder):
        """Extracts feature maps per pixel based on request from vector of keywords featuresKeys """
        ## Retrive image data
        VOIshape = img_features['VOI0'].shape
        print VOIshape
        self.init_features(img_features, featuresKeys)
        data_deltaS=[]  
        self.allvar_F_r_i=[]
        
        # append So and to
        data_deltaS.append( 0 )  
        
        # Based on the course of signal intensity within the lesion
        So = array(img_features['VOI0']).astype(float)
        Crk = {'Cr0': mean(So)}  
        C = {}
        Carray = []
        
        # iterate point-by-point to extract feature map       
        for i in range(VOIshape[0]):
            for j in range(VOIshape[1]):
                for k in range(VOIshape[2]):
                    for timep in range(1, len(DICOMImages)):
                        pix_deltaS = (img_features['VOI'+str(timep)][i,j,k].astype(float) - img_features['VOI0'][i,j,k].astype(float))/img_features['VOI0'][i,j,k].astype(float)
                        if pix_deltaS<0: pix_deltaS=0 
                        data_deltaS.append( pix_deltaS )
                        
                        F_r_i =  array(img_features['VOI'+str(timep)]).astype(float)
                        n_F_r_i, min_max_F_r_i, mean_F_r_i, var_F_r_i, skew_F_r_i, kurt_F_r_i = stats.describe(F_r_i)
                        self.allvar_F_r_i.append(var_F_r_i)
                    
                    data = array(data_deltaS)
                    #print data                        
                    
                    # create a set of Parameters
                    params = Parameters()
                    params.add('amp',   value= 10,  min=0)
                    params.add('alpha', value= 1, min=0) 
                    params.add('beta', value= 0.05, min=0.0001, max=0.9)
                    
                    # do fit, here with leastsq self.model
                    myfit = Minimizer(self.fcn2min,  params, fcn_args=(time_points,), fcn_kws={'data':data})
                    myfit.prepare_fit()
                    myfit.leastsq()
 
                    ####################################                              
                    # Calculate R-square: R_square = sum( y_fitted - y_mean)/ sum(y_data - y_mean)
                    R_square = sum( (self.model - mean(data))**2 )/ sum( (data - mean(data))**2 )
                    #print "R^2:"
                    #print R_square
                    self.R_square_map[i,j,k] = R_square
                    
                    if 'amp' in featuresKeys:
                        amp = params['amp'].value
                        print "amp:"
                        print amp
                        self.amp_map[i,j,k] = amp
                    
                    if 'beta' in featuresKeys:
                        beta = params['beta'].value
                        self.beta_map[i,j,k] = beta
                        
                    if 'alpha' in featuresKeys:
                        alpha = params['alpha'].value
                        print "alpha:"
                        print alpha
                        self.alpha_map[i,j,k] = alpha
                        
                    if 'iAUC1' in featuresKeys:
                        iAUC1 = params['amp'].value *( ((1-exp(-params['beta'].value*t[1]))/params['beta'].value) + (exp((-params['alpha'].value+params['beta'].value)*t[1])-1)/(params['alpha'].value+params['beta'].value) )
                        print "iAUC1"
                        print iAUC1
                        self.iAUC1_map[i,j,k] = iAUC1
                        
                    if 'Slope_ini' in featuresKeys:
                        Slope_ini = params['amp'].value*params['alpha'].value
                        print "Slope_ini"
                        print Slope_ini
                        self.Slope_ini_map[i,j,k] = Slope_ini
                    
                    if 'Tpeak' in featuresKeys:
                        Tpeak = (1/params['alpha'].value)*log(1+(params['alpha'].value/params['beta'].value))
                        self.Tpeak_map[i,j,k] = Tpeak
                    
                    if 'Kpeak' in featuresKeys:
                        Kpeak = -params['amp'].value * params['alpha'].value * params['beta'].value
                        self.Kpeak_map[i,j,k] = Kpeak
                    
                    if 'SER' in featuresKeys:
                        SER = exp( (t[4]-t[1])*params['beta'].value) * ( (1-exp(-params['alpha'].value*t[1]))/(1-exp(-params['alpha'].value*t[4])) )
                        print "SER"
                        print SER
                        self.SER_map[i,j,k] = SER
                        
                    if 'maxCr' in featuresKeys:
                        print "Maximum Upate (Fii_1) = %d " %  self.maxCr
                        self.maxC_map[i,j,k] = self.maxCr
                        
                    if 'peakCr' in featuresKeys:
                        print "Peak Cr (Fii_2) = %d " %  self.peakCr
                        self.peakCr_map[i,j,k] = self.peakCr    
                        
                    if 'UptakeRate' in featuresKeys:
                        self.UptakeRate = float(self.maxCr/self.peakCr)    
                        print "Uptake rate (Fii_3) "
                        print self.UptakeRate
                        self.UptakeRate_map[i,j,k] = self.UptakeRate
                        
                    if 'washoutRate' in featuresKeys:          
                        if( self.peakCr == 4):
                            self.washoutRate = 0
                        else:
                            self.washoutRate = float( (self.maxCr - array(Crk['Cr'+str(4)]).astype(float))/(4-self.peakCr) )
                        print "WashOut rate (Fii_4) "
                        print self.washoutRate
                        self.washoutRate_map[i,j,k] = self.washoutRate
                        
                    if 'var_F_r_i' in featuresKeys:  
                        print("Variance F_r_i: {0:8.6f}".format( mean(self.allvar_F_r_i) ))
                        self.allvar_F_r_i_map[i,j,k] = mean(self.allvar_F_r_i)
                    
                    data_deltaS=[]
                    data_deltaS.append( 0 )
                    
        # convert feature maps to image
        if 'beta' in featuresKeys:       
            beta_map_stencil = self.convertfeatureMap2vtkImage(self.beta_map, self.imageStencil, 1000) 
            print path_outputFolder
            print caseLabeloutput
            # ## save mask as metafile image
            os.chdir(path_outputFolder)
            vtkmask_w = vtk.vtkMetaImageWriter()
            vtkmask_w.SetInput(beta_map_stencil )
            vtkmask_w.SetFileName( 'beta_'+os.sep+caseLabeloutput+'.mhd' )
            vtkmask_w.Write()
            vtkmask_w.Update()
            
            self.xImagePlaneWidget.SetWindowLevel(640,75)
            self.yImagePlaneWidget.SetWindowLevel(640,75)
            self.zImagePlaneWidget.SetWindowLevel(640,75)
            self.renderer1.Render()            
            self.visualize_map(beta_map_stencil)    
            
            
        if 'Tpeak' in featuresKeys:
            Tpeak_map_stencil = self.convertfeatureMap2vtkImage(self.Tpeak_map, self.imageStencil, 1) 
            print path_outputFolder
            print caseLabeloutput
            # ## save mask as metafile image
            os.chdir(path_outputFolder)
            vtkmask_w = vtk.vtkMetaImageWriter()
            vtkmask_w.SetInput( Tpeak_map_stencil )
            vtkmask_w.SetFileName( 'Tpeak_'+os.sep+caseLabeloutput+'.mhd' )
            vtkmask_w.Write()
            vtkmask_w.Update()
            
            self.xImagePlaneWidget.SetWindowLevel(240,35)
            self.yImagePlaneWidget.SetWindowLevel(240,35)
            self.zImagePlaneWidget.SetWindowLevel(240,35)
            self.renderer1.Render()
            self.visualize_map(Tpeak_map_stencil)    
        
        if 'Kpeak' in featuresKeys:
            Kpeak_map_stencil = self.convertfeatureMap2vtkImage(self.Kpeak_map, self.imageStencil, 1) 
            print path_outputFolder
            print caseLabeloutput
            # ## save mask as metafile image
            os.chdir(path_outputFolder)
            vtkmask_w = vtk.vtkMetaImageWriter()
            vtkmask_w.SetInput( Kpeak_map_stencil )
            vtkmask_w.SetFileName( 'Kpeak_'+os.sep+caseLabeloutput+'.mhd' )
            vtkmask_w.Write()
            vtkmask_w.Update()
            
            self.xImagePlaneWidget.SetWindowLevel(118,15)
            self.yImagePlaneWidget.SetWindowLevel(118,15)
            self.zImagePlaneWidget.SetWindowLevel(118,15)
            self.renderer1.Render()           
            self.visualize_map(Kpeak_map_stencil) 
            
               
        
        return
                     
        
        
Example #28
0
                      'data': data
                  })

myfit.prepare_fit()
#
for scale_covar in (True, False):
    myfit.scale_covar = scale_covar
    print '  ====  scale_covar = ', myfit.scale_covar, ' ==='
    for sigma in (0.1, 0.2, 0.23, 0.5):
        myfit.userkws['sigma'] = sigma

        p_fit['amp_g'].value = 10
        p_fit['cen_g'].value = 9
        p_fit['wid_g'].value = 1
        p_fit['line_slope'].value = 0.0
        p_fit['line_off'].value = 0.0

        myfit.leastsq()
        print '  sigma          = ', sigma
        print '  chisqr         = ', myfit.chisqr
        print '  reduced_chisqr = ', myfit.redchi

        report_errors(p_fit, modelpars=p_true, show_correl=False)
        print '  =============================='

# if HASPYLAB:
#     fit = residual(p_fit, x)
#     pylab.plot(x, fit, 'k-')
#     pylab.show()
#
Example #29
0
def feffit(paramgroup,
           datasets,
           rmax_out=10,
           path_outputs=True,
           _larch=None,
           **kws):
    """execute a Feffit fit: a fit of feff paths to a list of datasets

    Parameters:
    ------------
      paramgroup:   group containing parameters for fit
      datasets:     Feffit Dataset group or list of Feffit Dataset group.
      rmax_out:     maximum R value to calculate output arrays.
      path_output:  Flag to set whether all Path outputs should be written.

    Returns:
    ---------
      a fit results group.  This will contain subgroups of:

        datasets: an array of FeffitDataSet groups used in the fit.
        params:   This will be identical to the input parameter group.
        fit:      an object which points to the low-level fit.

     Statistical parameters will be put into the params group.  Each
     dataset will have a 'data' and 'model' subgroup, each with arrays:
        k            wavenumber array of k
        chi          chi(k).
        kwin         window Omega(k) (length of input chi(k)).
        r            uniform array of R, out to rmax_out.
        chir         complex array of chi(R).
        chir_mag     magnitude of chi(R).
        chir_pha     phase of chi(R).
        chir_re      real part of chi(R).
        chir_im      imaginary part of chi(R).
    """
    def _resid(params, datasets=None, paramgroup=None, **kwargs):
        """ this is the residual function"""
        params2group(params, paramgroup)
        return concatenate([d._residual(paramgroup) for d in datasets])

    if isNamedClass(datasets, FeffitDataSet):
        datasets = [datasets]

    params = group2params(paramgroup)
    for ds in datasets:
        if not isNamedClass(ds, FeffitDataSet):
            print("feffit needs a list of FeffitDataSets")
            return
        ds.prepare_fit(params=params)

    fit = Minimizer(_resid,
                    params,
                    fcn_kws=dict(datasets=datasets, paramgroup=paramgroup),
                    scale_covar=True,
                    **kws)

    result = fit.leastsq()

    params2group(result.params, paramgroup)
    dat = concatenate(
        [d._residual(paramgroup, data_only=True) for d in datasets])

    n_idp = 0
    for ds in datasets:
        n_idp += ds.n_idp

    # here we rescale chi-square and reduced chi-square to n_idp
    npts = len(result.residual)
    chi_square = result.chisqr * n_idp * 1.0 / npts
    chi_reduced = chi_square / (n_idp * 1.0 - result.nvarys)
    rfactor = (result.residual**2).sum() / (dat**2).sum()
    # calculate 'aic', 'bic' rescaled to n_idp
    # note that neg2_loglikel is -2*log(likelihood)
    neg2_loglikel = n_idp * np.log(chi_square / n_idp)
    aic = neg2_loglikel + 2 * result.nvarys
    bic = neg2_loglikel + np.log(n_idp) * result.nvarys

    # With scale_covar = True, Minimizer() scales the uncertainties
    # by reduced chi-square assuming params.nfree is the correct value
    # for degrees-of-freedom. But n_idp-params.nvarys is a better measure,
    # so we rescale uncertainties here.

    covar = getattr(result, 'covar', None)
    # print("COVAR " , covar)
    if covar is not None:
        err_scale = (result.nfree / (n_idp - result.nvarys))
        for name in result.var_names:
            p = result.params[name]
            if isParameter(p) and p.vary:
                p.stderr *= sqrt(err_scale)

        # next, propagate uncertainties to constraints and path parameters.
        result.covar *= err_scale
        vsave, vbest = {}, []

        # 1. save current params
        for vname in result.var_names:
            par = result.params[vname]
            vsave[vname] = par
            vbest.append(par.value)

        # 2. get correlated uncertainties, set params accordingly
        uvars = correlated_values(vbest, result.covar)
        # 3. evaluate constrained params, save stderr
        for nam, obj in result.params.items():
            eval_stderr(obj, uvars, result.var_names, result.params)

        # 3. evaluate path params, save stderr
        for ds in datasets:
            for p in ds.pathlist:
                p.store_feffdat()
                for pname in ('degen', 's02', 'e0', 'ei', 'deltar', 'sigma2',
                              'third', 'fourth'):
                    obj = p.params[PATHPAR_FMT % (pname, p.label)]
                    eval_stderr(obj, uvars, result.var_names, result.params)

        # restore saved parameters again
        for vname in result.var_names:
            # setattr(params, vname, vsave[vname])
            params[vname] = vsave[vname]

        # clear any errors evaluting uncertainties
        if _larch is not None and (len(_larch.error) > 0):
            _larch.error = []

    # reset the parameters group with the newly updated uncertainties
    params2group(result.params, paramgroup)

    # here we create outputs arrays for chi(k), chi(r):
    for ds in datasets:
        ds.save_ffts(rmax_out=rmax_out, path_outputs=path_outputs)

    out = Group(name='feffit results',
                datasets=datasets,
                fitter=fit,
                fit_details=result,
                chi_square=chi_square,
                n_independent=n_idp,
                chi_reduced=chi_reduced,
                rfactor=rfactor,
                aic=aic,
                bic=bic,
                covar=covar)

    for attr in ('params', 'nvarys', 'nfree', 'ndata', 'var_names', 'nfev',
                 'success', 'errorbars', 'message', 'lmdif_message'):
        setattr(out, attr, getattr(result, attr, None))
    return out
Example #30
0
    params.add('bigw2', value=bigw2, min=0, max=2 * np.pi)
    params.add('inc2', value=inc2, min=0, max=2 * np.pi)
    params.add('e2', value=0, vary=False)
    params.add('a2', value=a2, min=0)
    params.add('P2', value=period, vary=False)
    params.add('T2', value=T2, min=0)

    #params.add('pscale', value=1)

    #do fit, minimizer uses LM for least square fitting of model to data
    minner = Minimizer(triple_model,
                       params,
                       fcn_args=(xpos_all, ypos_all, t_all, error_maj_all,
                                 error_min_all, error_pa_all),
                       nan_policy='omit')
    result = minner.leastsq(xtol=1e-5, ftol=1e-5)
    params_inner.append([
        period, result.params['a2'], result.params['e2'], result.params['w2'],
        result.params['bigw2'], result.params['inc2'], result.params['T2']
    ])
    params_outer.append([
        result.params['P'], result.params['a'], result.params['e'],
        result.params['w'], result.params['bigw'], result.params['inc'],
        result.params['T']
    ])
    chi2.append(result.redchi)
params_inner = np.array(params_inner)
params_outer = np.array(params_outer)
chi2 = np.array(chi2)

plt.plot(params_inner[:, 0], 1 / chi2, 'o-')
def test_constraints(with_plot=True):
    with_plot = with_plot and WITHPLOT

    def residual(pars, x, sigma=None, data=None):
        yg = gaussian(x, pars['amp_g'], pars['cen_g'], pars['wid_g'])
        yl = lorentzian(x, pars['amp_l'], pars['cen_l'], pars['wid_l'])

        model =  yg +  yl + pars['line_off'] + x * pars['line_slope']
        if data is None:
            return model
        if sigma is None:
            return (model - data)
        return (model - data) / sigma


    n = 201
    xmin = 0.
    xmax = 20.0
    x = linspace(xmin, xmax, n)

    data = (gaussian(x, 21, 8.1, 1.2) +
            lorentzian(x, 10, 9.6, 2.4) +
            random.normal(scale=0.23,  size=n) +
            x*0.5)

    if with_plot:
        pylab.plot(x, data, 'r+')

    pfit = Parameters()
    pfit.add(name='amp_g',  value=10)
    pfit.add(name='cen_g',  value=9)
    pfit.add(name='wid_g',  value=1)

    pfit.add(name='amp_tot',  value=20)
    pfit.add(name='amp_l',  expr='amp_tot - amp_g')
    pfit.add(name='cen_l',  expr='1.5+cen_g')
    pfit.add(name='wid_l',  expr='2*wid_g')

    pfit.add(name='line_slope', value=0.0)
    pfit.add(name='line_off', value=0.0)

    sigma = 0.021  # estimate of data error (for all data points)

    myfit = Minimizer(residual, pfit,
                      fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
                      scale_covar=True)

    myfit.prepare_fit()
    init = residual(myfit.params, x)

    result = myfit.leastsq()

    print(' Nfev = ', result.nfev)
    print( result.chisqr, result.redchi, result.nfree)

    report_fit(result.params, min_correl=0.3)

    fit = residual(result.params, x)
    if with_plot:
        pylab.plot(x, fit, 'b-')
    assert(result.params['cen_l'].value == 1.5 + result.params['cen_g'].value)
    assert(result.params['amp_l'].value == result.params['amp_tot'].value - result.params['amp_g'].value)
    assert(result.params['wid_l'].value == 2 * result.params['wid_g'].value)

    # now, change fit slightly and re-run
    myfit.params['wid_l'].expr = '1.25*wid_g'
    result = myfit.leastsq()
    report_fit(result.params, min_correl=0.4)
    fit2 = residual(result.params, x)
    if with_plot:
        pylab.plot(x, fit2, 'k')
        pylab.show()

    assert(result.params['cen_l'].value == 1.5 + result.params['cen_g'].value)
    assert(result.params['amp_l'].value == result.params['amp_tot'].value - result.params['amp_g'].value)
    assert(result.params['wid_l'].value == 1.25 * result.params['wid_g'].value)
Example #32
0
	def __FitEvent(self):
		try:
			varyBlockedCurrent=True

			i0=np.abs(self.baseMean)
			i0sig=self.baseSD
			dt = 1000./self.Fs 	# time-step in ms.
			# edat=np.asarray( np.abs(self.eventData),  dtype='float64' )
			edat=self.dataPolarity*np.asarray( self.eventData,  dtype='float64' )

			blockedCurrent=min(edat)
			tauVal=dt

			estart 	= self.__eventStartIndex( self.__threadList( edat, range(0,len(edat)) ), i0, i0sig ) - 1
			eend 	= self.__eventEndIndex( self.__threadList( edat, range(0,len(edat)) ), i0, i0sig ) - 2

			# For long events, fix the blocked current to speed up the fit
			if (eend-estart) > 1000:
				blockedCurrent=np.mean(edat[estart+50:eend-50])

			# control numpy error reporting
			np.seterr(invalid='ignore', over='ignore', under='ignore')

			ts = np.array([ t*dt for t in range(0,len(edat)) ], dtype='float64')

			params=Parameters()

			# print self.absDataStartIndex

			params.add('mu1', value=estart * dt)
			params.add('mu2', value=eend * dt)
			params.add('a', value=(i0-blockedCurrent), vary=varyBlockedCurrent)
			params.add('b', value = i0)
			params.add('tau1', value = tauVal)

			if self.LinkRCConst:
				params.add('tau2', value = tauVal, expr='tau1')
			else:
				params.add('tau2', value = tauVal)


			optfit=Minimizer(self.__objfunc, params, fcn_args=(ts,edat,))
			optfit.prepare_fit()

			result=optfit.leastsq(xtol=self.FitTol,ftol=self.FitTol,maxfev=self.FitIters)

			# print optfit.params['b'].value, optfit.params['b'].value - optfit.params['a'].value, optfit.params['mu1'].value, optfit.params['mu2'].value
			if result.success:
				if result.params['mu1'].value < 0.0 or result.params['mu2'].value < 0.0:
					# print 'eInvalidFitParams1', optfit.params['b'].value, optfit.params['b'].value - optfit.params['a'].value, optfit.params['mu1'].value, optfit.params['mu2'].value
					self.rejectEvent('eInvalidResTime')
				# The start of the event is set past the length of the data
				elif result.params['mu1'].value > ts[-1]:
					# print 'eInvalidFitParams2', optfit.params['b'].value, optfit.params['b'].value - optfit.params['a'].value, optfit.params['mu1'].value, optfit.params['mu2'].value
					self.rejectEvent('eInvalidEventStart')
				else:
					self.mdOpenChCurrent 	= result.params['b'].value 
					self.mdBlockedCurrent	= result.params['b'].value - result.params['a'].value
					self.mdEventStart		= result.params['mu1'].value 
					self.mdEventEnd			= result.params['mu2'].value
					self.mdRCConst1			= result.params['tau1'].value
					self.mdRCConst2			= result.params['tau2'].value
					self.mdAbsEventStart	= self.mdEventStart + self.absDataStartIndex * dt

					self.mdBlockDepth		= self.mdBlockedCurrent/self.mdOpenChCurrent
					self.mdResTime			= self.mdEventEnd - self.mdEventStart
					
					self.mdRedChiSq			= result.chisqr/( np.var(result.residual) * (len(self.eventData) - result.nvarys -1) )

					# if (eend-estart) > 1000:
					# 	print blockedCurrent, self.mdBlockedCurrent, self.mdOpenChCurrent, self.mdResTime, self.mdRiseTime, self.mdRedChiSq, optfit.chisqr
					# if self.mdBlockDepth > self.BlockRejectRatio:
					# 	# print 'eBlockDepthHigh', optfit.params['b'].value, optfit.params['b'].value - optfit.params['a'].value, optfit.params['mu1'].value, optfit.params['mu2'].value
					# 	self.rejectEvent('eBlockDepthHigh')
						
					if math.isnan(self.mdRedChiSq):
						self.rejectEvent('eInvalidChiSq')
					if self.mdBlockDepth < 0 or self.mdBlockDepth > 1:
						self.rejectEvent('eInvalidBlockDepth')
					if self.mdRCConst1 <= 0 or self.mdRCConst2 <= 0:
						self.rejectEvent('eInvalidRCConstant')

					#print i0, i0sig, [optfit.params['a'].value, optfit.params['b'].value, optfit.params['mu1'].value, optfit.params['mu2'].value, optfit.params['tau'].value]
			else:
				# print optfit.message, optfit.lmdif_message
				self.rejectEvent('eFitConvergence')

		except KeyboardInterrupt:
			self.rejectEvent('eFitUserStop')
			raise
		except:
			# print optfit.message, optfit.lmdif_message
	 		self.rejectEvent('eFitFailure')
pfit.add(name='cen_g', value=9)
pfit.add(name='wid_g', value=1)
pfit.add(name='amp_tot', value=20)
pfit.add(name='amp_l', expr='amp_tot - amp_g')
pfit.add(name='cen_l', expr='1.5+cen_g')
pfit.add(name='wid_l', expr='2*wid_g')
pfit.add(name='line_slope', value=0.0)
pfit.add(name='line_off', value=0.0)

sigma = 0.021  # estimate of data error (for all data points)

myfit = Minimizer(residual, pfit,
                  fcn_args=(x,), fcn_kws={'sigma': sigma, 'data': data},
                  scale_covar=True)

myfit.prepare_fit()
init = residual(myfit.params, x)

if HASPYLAB:
    pylab.plot(x, init, 'b--')

result = myfit.leastsq()

report_fit(result)

fit = residual(result.params, x)

if HASPYLAB:
    pylab.plot(x, fit, 'k-')
    pylab.show()
Example #34
0
# print config JSON info
OutputFormatter.printConfig(configJson)

config = Config(configJson)

# retrieve params from config
equationModel = config.getEquationModel()
params = config.getParams()
data = config.getData()
exp = config.getExp()

OutputFormatter.printExperimentalData(data, exp)

minimizer = Minimizer(equationModel.residual, params, fcn_args=(data, exp, verbose))
out = minimizer.leastsq()

# show output
#lmfit.printfuncs.report_fit(out.params)

print(lmfit.fit_report(out))

# confidence
# ci = lmfit.conf_interval(minimizer, out)

# show output
# lmfit.printfuncs.report_ci(ci)

calc = equationModel.model(params, data, False)

# print results
myfit.prepare_fit()
#
for scale_covar in (True, False):
    myfit.scale_covar = scale_covar
    print '  ====  scale_covar = ', myfit.scale_covar, ' ==='
    for sigma in (0.1, 0.2, 0.23, 0.5):
        myfit.userkws['sigma'] = sigma

        p_fit['amp_g'].value  = 10
        p_fit['cen_g'].value  =  9
        p_fit['wid_g'].value  =  1
        p_fit['line_slope'].value =0.0
        p_fit['line_off'].value   =0.0

        out = myfit.leastsq()
        print '  sigma          = ', sigma
        print '  chisqr         = ', out.chisqr
        print '  reduced_chisqr = ', out.redchi

        report_fit(out.params, modelpars=p_true, show_correl=False)
        print '  =============================='


# if HASPYLAB:
#     fit = residual(p_fit, x)
#     pylab.plot(x, fit, 'k-')
#     pylab.show()
#

Example #36
0
def pre_edge(energy, mu=None, group=None, e0=None, step=None,
             nnorm=2, nvict=0, pre1=None, pre2=-50,
             norm1=100, norm2=None, make_flat=True, _larch=None):
    """pre edge subtraction, normalization for XAFS

    This performs a number of steps:
       1. determine E0 (if not supplied) from max of deriv(mu)
       2. fit a line of polymonial to the region below the edge
       3. fit a polymonial to the region above the edge
       4. extrapolae the two curves to E0 to determine the edge jump

    Arguments
    ----------
    energy:  array of x-ray energies, in eV, or group (see note)
    mu:      array of mu(E)
    group:   output group
    e0:      edge energy, in eV.  If None, it will be determined here.
    step:    edge jump.  If None, it will be determined here.
    pre1:    low E range (relative to E0) for pre-edge fit
    pre2:    high E range (relative to E0) for pre-edge fit
    nvict:   energy exponent to use for pre-edg fit.  See Note
    norm1:   low E range (relative to E0) for post-edge fit
    norm2:   high E range (relative to E0) for post-edge fit
    nnorm:   degree of polynomial (ie, nnorm+1 coefficients will be found) for
             post-edge normalization curve. Default=2 (quadratic), max=5
    make_flat: boolean (Default True) to calculate flattened output.


    Returns
    -------
      None

    The following attributes will be written to the output group:
        e0          energy origin
        edge_step   edge step
        norm        normalized mu(E)
        flat        flattened, normalized mu(E)
        pre_edge    determined pre-edge curve
        post_edge   determined post-edge, normalization curve
        dmude       derivative of mu(E)

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    -----
     1 nvict gives an exponent to the energy term for the fits to the pre-edge
       and the post-edge region.  For the pre-edge, a line (m * energy + b) is
       fit to mu(energy)*energy**nvict over the pre-edge region,
       energy=[e0+pre1, e0+pre2].  For the post-edge, a polynomial of order
       nnorm will be fit to mu(energy)*energy**nvict of the post-edge region
       energy=[e0+norm1, e0+norm2].

     2 If the first argument is a Group, it must contain 'energy' and 'mu'.
       If it exists, group.e0 will be used as e0.
       See First Argrument Group in Documentation
    """


    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='pre_edge')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    pre_dat = preedge(energy, mu, e0=e0, step=step, nnorm=nnorm,
                      nvict=nvict, pre1=pre1, pre2=pre2, norm1=norm1,
                      norm2=norm2)


    group = set_xafsGroup(group, _larch=_larch)

    e0    = pre_dat['e0']
    norm  = pre_dat['norm']
    norm1 = pre_dat['norm1']
    norm2 = pre_dat['norm2']
    # generate flattened spectra, by fitting a quadratic to .norm
    # and removing that.
    flat = norm
    ie0 = index_nearest(energy, e0)
    p1 = index_of(energy, norm1+e0)
    p2 = index_nearest(energy, norm2+e0)
    if p2-p1 < 2:
        p2 = min(len(energy), p1 + 2)

    if make_flat and p2-p1 > 4:
        enx, mux = remove_nans2(energy[p1:p2], norm[p1:p2])
        # enx, mux = (energy[p1:p2], norm[p1:p2])
        fpars = Parameters()
        ncoefs = len(pre_dat['norm_coefs'])
        fpars.add('c0', value=0, vary=True)
        fpars.add('c1', value=0, vary=(ncoefs>1))
        fpars.add('c2', value=0, vary=(ncoefs>2))
        fit = Minimizer(flat_resid, fpars, fcn_args=(enx, mux))
        result = fit.leastsq(xtol=1.e-6, ftol=1.e-6)

        fc0 = result.params['c0'].value
        fc1 = result.params['c1'].value
        fc2 = result.params['c2'].value

        flat_diff   = fc0 + energy * (fc1 + energy * fc2)
        flat        = norm - (flat_diff  - flat_diff[ie0])
        flat[:ie0]  = norm[:ie0]


    group.e0 = e0
    group.norm = norm
    group.flat = flat
    group.dmude = np.gradient(mu)/np.gradient(energy)
    group.edge_step  = pre_dat['edge_step']
    group.pre_edge   = pre_dat['pre_edge']
    group.post_edge  = pre_dat['post_edge']

    group.pre_edge_details = Group()
    group.pre_edge_details.pre1   = pre_dat['pre1']
    group.pre_edge_details.pre2   = pre_dat['pre2']
    group.pre_edge_details.nnorm  = pre_dat['nnorm']
    group.pre_edge_details.norm1  = pre_dat['norm1']
    group.pre_edge_details.norm2  = pre_dat['norm2']
    group.pre_edge_details.nvict  = pre_dat['nvict']
    group.pre_edge_details.pre1_input  = pre_dat['pre1_input']
    group.pre_edge_details.norm2_input  = pre_dat['norm2_input']
    group.pre_edge_details.pre_slope  = pre_dat['precoefs'][0]
    group.pre_edge_details.pre_offset = pre_dat['precoefs'][1]

    for i in range(MAX_NNORM):
        if hasattr(group, 'norm_c%i' % i):
            delattr(group, 'norm_c%i' % i)
    for i, c in enumerate(pre_dat['norm_coefs']):
        setattr(group.pre_edge_details, 'norm_c%i' % i, c)
    return
Example #37
0
params1.add('b', value=10)
params1.add('c', value=10)

params2 = Parameters()
params2.add('a', value=10)
params2.add('b', value=10)
params2.add('c', value=10)

a, b, c = 2.5, 1.3, 0.8
x = np.linspace(0, 4, 50)
y = f([a, b, c], x)
data = y + 0.15*np.random.normal(size=len(x))

# fit without analytic derivative
min1 = Minimizer(func, params1, fcn_args=(x,), fcn_kws={'data': data})
out1 = min1.leastsq()
fit1 = func(out1.params, x)

# fit with analytic derivative
min2 = Minimizer(func, params2, fcn_args=(x,), fcn_kws={'data': data})
out2 = min2.leastsq(Dfun=dfunc, col_deriv=1)
fit2 = func(out2.params, x)

print('''Comparison of fit to exponential decay
with and without analytic derivatives, to
   model = a*exp(-b*x) + c
for a = %.2f, b = %.2f, c = %.2f
==============================================
Statistic/Parameter|   Without   | With      |
----------------------------------------------
N Function Calls   |   %3i       |   %3i     |
Example #38
0
class FitModel(object):
    """base class for fitting models

    only supports polynomial background (offset, slop, quad)

    """
    invalid_bkg_msg = """Warning: unrecoginzed background option '%s'
expected one of the following:
   %s
"""
    def __init__(self, background=None, **kws):
        self.params = Parameters()
        self.has_initial_guess = False
        self.bkg = None
        self.initialize_background(background=background, **kws)

    def initialize_background(self, background=None,
                              offset=0, slope=0, quad=0):
        """initialize background parameters"""
        if background is None:
            return
        if background not in VALID_BKGS:
            print( self.invalid_bkg_msg % (repr(background),
                                          ', '.join(VALID_BKGS)))

        kwargs = {'offset':offset}
        if background.startswith('line'):
            kwargs['slope'] = slope
        if background.startswith('quad'):
            kwargs['quad'] = quad

        self.bkg = PolyBackground(**kwargs)

        for nam, par in self.bkg.params.items():
            self.params[nam] = par

    def calc_background(self, x):
        if self.bkg is None:
            return 0
        return self.bkg.calculate(x)

    def __objective(self, params, y=None, x=None, dy=None, **kws):
        """fit objective function"""
        bkg = 0
        if x is not None: bkg = self.calc_background(x)
        if y is None:     y   = 0.0
        if dy is None:    dy  = 1.0
        model = self.model(self.params, x=x, dy=dy, **kws)
        return (model + bkg - y)/dy

    def model(self, params, x=None, **kws):
        raise NotImplementedError

    def guess_starting_values(self, params, y, x=None, **kws):
        raise NotImplementedError

    def fit_report(self, params=None, **kws):
        if params is None:
            params = self.params
        return lmfit.fit_report(params, **kws)

    def fit(self, y, x=None, dy=None, **kws):
        fcn_kws={'y':y, 'x':x, 'dy':dy}
        fcn_kws.update(kws)
        if not self.has_initial_guess:
            self.guess_starting_values(y, x=x, **kws)
        self.minimizer = Minimizer(self.__objective, self.params,
                                   fcn_kws=fcn_kws, scale_covar=True)
        self.minimizer.prepare_fit()
        self.init = self.model(self.params, x=x, **kws)
        self.minimizer.leastsq()
                                     temp_night_fit, delta_T_fit, T_star,
                                     spider_params)

print('Initializing Parameters')
initialParams = Parameters()
initialParams.add_many(
    ('nrg_ratio', np.max([nrg_ratio_fit, 0.0]), True, 0.0, 1.0),
    ('temp_night', temp_night_fit, True, 0.0, np.inf),
    ('delta_T', delta_T_fit, True, 0.0, np.inf))

from functools import partial

partial_residuals = partial(chisq_lmfit, args=args)

mle0 = Minimizer(partial_residuals, initialParams, nan_policy='omit')
fitResult = mle0.leastsq(initialParams)


def logprior_func(p):
    for key, val in p.items():
        # Uniform Prior
        if val.min >= val.value >= val.max: return -np.inf

    # Establish that the limb darkening parameters
    #  cannot sum to 1 or greater
    #  Kipping et al 201? and Espinoza et al 201?
    if 'u1' in p.keys() and 'u2' in p.keys():
        if p['u1'] + p['u2'] >= 1: return -np.inf

    return 0
def test_constraints():
    def residual(pars, x, sigma=None, data=None):
        yg = gauss(x, pars['amp_g'].value, pars['cen_g'].value,
                   pars['wid_g'].value)
        yl = loren(x, pars['amp_l'].value, pars['cen_l'].value,
                   pars['wid_l'].value)

        slope = pars['line_slope'].value
        offset = pars['line_off'].value
        model = yg + yl + offset + x * slope
        if data is None:
            return model
        if sigma is None:
            return (model - data)
        return (model - data) / sigma

    n = 601
    xmin = 0.
    xmax = 20.0
    x = linspace(xmin, xmax, n)

    data = (gauss(x, 21, 8.1, 1.2) + loren(x, 10, 9.6, 2.4) +
            random.normal(scale=0.23, size=n) + x * 0.5)

    pfit = [
        Parameter(name='amp_g', value=10),
        Parameter(name='cen_g', value=9),
        Parameter(name='wid_g', value=1),
        Parameter(name='amp_tot', value=20),
        Parameter(name='amp_l', expr='amp_tot - amp_g'),
        Parameter(name='cen_l', expr='1.5+cen_g'),
        Parameter(name='wid_l', expr='2*wid_g'),
        Parameter(name='line_slope', value=0.0),
        Parameter(name='line_off', value=0.0)
    ]

    sigma = 0.021  # estimate of data error (for all data points)

    myfit = Minimizer(residual,
                      pfit,
                      fcn_args=(x, ),
                      fcn_kws={
                          'sigma': sigma,
                          'data': data
                      },
                      scale_covar=True)

    myfit.prepare_fit()
    init = residual(myfit.params, x)

    myfit.leastsq()

    print(' Nfev = ', myfit.nfev)
    print(myfit.chisqr, myfit.redchi, myfit.nfree)

    report_fit(myfit.params)
    pfit = myfit.params
    fit = residual(myfit.params, x)
    assert (pfit['cen_l'].value == 1.5 + pfit['cen_g'].value)
    assert (pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value)
    assert (pfit['wid_l'].value == 2 * pfit['wid_g'].value)
                        pars['wid_l'].value)
    return (model - data)


n = 601
random.seed(0)
x = linspace(0, 20.0, n)

data = (gaussian(x,   21, 6.1, 1.2) +
        lorentzian(x, 10, 9.6, 1.3) +
        random.normal(scale=0.1,  size=n))

pfit = Parameters()
pfit.add(name='amp_g',  value=10)
pfit.add(name='amp_l',  value=10)
pfit.add(name='cen_g',  value=5)
pfit.add(name='peak_split',  value=2.5, min=0, max=5, vary=True)
pfit.add(name='cen_l',  expr='peak_split+cen_g')
pfit.add(name='wid_g',  value=1)
pfit.add(name='wid_l',  expr='wid_g')

mini = Minimizer(residual, pfit, fcn_args=(x, data))
out  = mini.leastsq()

report_fit(out.params)

best_fit = data + out.residual
plt.plot(x, data, 'bo')
plt.plot(x, best_fit, 'r--')
plt.show()
def test_constraints(with_plot=True):
    with_plot = with_plot and WITHPLOT

    def residual(pars, x, sigma=None, data=None):
        yg = gaussian(x, pars['amp_g'], pars['cen_g'], pars['wid_g'])
        yl = lorentzian(x, pars['amp_l'], pars['cen_l'], pars['wid_l'])

        model = yg + yl + pars['line_off'] + x * pars['line_slope']
        if data is None:
            return model
        if sigma is None:
            return (model - data)
        return (model - data) / sigma

    n = 201
    xmin = 0.
    xmax = 20.0
    x = linspace(xmin, xmax, n)

    data = (gaussian(x, 21, 8.1, 1.2) + lorentzian(x, 10, 9.6, 2.4) +
            random.normal(scale=0.23, size=n) + x * 0.5)

    if with_plot:
        pylab.plot(x, data, 'r+')

    pfit = Parameters()
    pfit.add(name='amp_g', value=10)
    pfit.add(name='cen_g', value=9)
    pfit.add(name='wid_g', value=1)

    pfit.add(name='amp_tot', value=20)
    pfit.add(name='amp_l', expr='amp_tot - amp_g')
    pfit.add(name='cen_l', expr='1.5+cen_g')
    pfit.add(name='wid_l', expr='2*wid_g')

    pfit.add(name='line_slope', value=0.0)
    pfit.add(name='line_off', value=0.0)

    sigma = 0.021  # estimate of data error (for all data points)

    myfit = Minimizer(residual,
                      pfit,
                      fcn_args=(x, ),
                      fcn_kws={
                          'sigma': sigma,
                          'data': data
                      },
                      scale_covar=True)

    myfit.prepare_fit()
    init = residual(myfit.params, x)

    result = myfit.leastsq()

    print(' Nfev = ', result.nfev)
    print(result.chisqr, result.redchi, result.nfree)

    report_fit(result.params, min_correl=0.3)

    fit = residual(result.params, x)
    if with_plot:
        pylab.plot(x, fit, 'b-')
    assert (result.params['cen_l'].value == 1.5 + result.params['cen_g'].value)
    assert (result.params['amp_l'].value == result.params['amp_tot'].value -
            result.params['amp_g'].value)
    assert (result.params['wid_l'].value == 2 * result.params['wid_g'].value)

    # now, change fit slightly and re-run
    myfit.params['wid_l'].expr = '1.25*wid_g'
    result = myfit.leastsq()
    report_fit(result.params, min_correl=0.4)
    fit2 = residual(result.params, x)
    if with_plot:
        pylab.plot(x, fit2, 'k')
        pylab.show()

    assert (result.params['cen_l'].value == 1.5 + result.params['cen_g'].value)
    assert (result.params['amp_l'].value == result.params['amp_tot'].value -
            result.params['amp_g'].value)
    assert (result.params['wid_l'].value == 1.25 *
            result.params['wid_g'].value)
    def extractfeatures_inside(self, DICOMImages, image_pos_pat, image_ori_pat, series_path, phases_series, VOI_mesh):
        """ Start pixVals for collection pixel values at VOI """
        pixVals = []
        deltaS = {}

        # necessary to read point coords
        VOIPnt = [0, 0, 0]
        ijk = [0, 0, 0]
        pco = [0, 0, 0]

        for i in range(len(DICOMImages)):
            abspath_PhaseID = series_path + os.sep + str(phases_series[i])
            print phases_series[i]

            # Get total number of files
            load = Inputs_init()
            [len_listSeries_files, FileNms_slices_sorted_stack] = load.ReadDicomfiles(abspath_PhaseID)
            mostleft_slice = FileNms_slices_sorted_stack.slices[0]

            # Get dicom header, retrieve
            dicomInfo_series = dicom.read_file(abspath_PhaseID + os.sep + str(mostleft_slice))

            # (0008,0031) AT S Series Time            # hh.mm.ss.frac
            seriesTime = str(dicomInfo_series[0x0008, 0x0031].value)
            # (0008,0033) AT S Image Time             # hh.mm.ss.frac
            imageTime = str(dicomInfo_series[0x0008, 0x0033].value)

            # (0008,0032) AT S Acquisition Time       # hh.mm.ss.frac
            ti = str(dicomInfo_series[0x0008, 0x0032].value)

            acquisitionTimepoint = datetime.time(hour=int(ti[0:2]), minute=int(ti[2:4]), second=int(ti[4:6]))
            self.timepoints.append(datetime.datetime.combine(datetime.date.today(), acquisitionTimepoint))

            # find mapping to Dicom space
            [transformed_image, transform_cube] = Display().dicomTransform(DICOMImages[i], image_pos_pat, image_ori_pat)

            ### Get inside of VOI
            [VOI_scalars, VOIdims] = self.createMaskfromMesh(VOI_mesh, transformed_image)
            print "\n VOIdims"
            print VOIdims

            # get non zero elements
            image_scalars = transformed_image.GetPointData().GetScalars()
            numpy_VOI_imagedata = vtk_to_numpy(image_scalars)

            numpy_VOI_imagedata = numpy_VOI_imagedata.reshape(VOIdims[2], VOIdims[1], VOIdims[0])
            numpy_VOI_imagedata = numpy_VOI_imagedata.transpose(2, 1, 0)

            print "Shape of VOI_imagedata: "
            print numpy_VOI_imagedata.shape

            #################### HERE GET IT AND MASK IT OUT
            self.nonzeroVOIextracted = nonzero(VOI_scalars)
            print self.nonzeroVOIextracted

            VOI_imagedata = numpy_VOI_imagedata[self.nonzeroVOIextracted]

            print "shape of VOI_imagedata  Clipped:"
            print VOI_imagedata.shape

            for j in range(len(VOI_imagedata)):
                pixValx = VOI_imagedata[j]
                pixVals.append(pixValx)

            # Now collect pixVals
            print "Saving %s" % "delta" + str(i)
            deltaS["delta" + str(i)] = pixVals
            pixVals = []

        print self.timepoints

        # Collecting timepoints in proper format
        t_delta = []
        t_delta.append(0)
        total_time = 0
        for i in range(len(DICOMImages) - 1):
            current_time = self.timepoints[i + 1]
            previous_time = self.timepoints[i]
            difference_time = current_time - previous_time
            timestop = divmod(difference_time.total_seconds(), 60)
            t_delta.append(t_delta[i] + timestop[0] + timestop[1] * (1.0 / 60))
            total_time = total_time + timestop[0] + timestop[1] * (1.0 / 60)

        # finally print t_delta
        print t_delta
        t = array(t_delta)
        print "total_time"
        print total_time

        ##############################################################
        # Finished sampling deltaS
        # APply lmfit to deltaS
        # first sample the mean
        data_deltaS = []
        t_deltaS = []
        mean_deltaS = []
        sd_deltaS = []
        se_deltaS = []
        n_deltaS = []

        # append So and to
        data_deltaS.append(0)
        t_deltaS.append(0)
        mean_deltaS.append(mean(deltaS["delta0"]))
        sd_deltaS.append(0)
        se_deltaS.append(0)
        n_deltaS.append(len(deltaS["delta0"]))

        for k in range(1, len(DICOMImages)):
            deltaS_i = (mean(array(deltaS["delta" + str(k)]).astype(float)) - mean(deltaS["delta0"])) / mean(
                deltaS["delta0"]
            )
            data_deltaS.append(deltaS_i)
            t_deltaS.append(k)
            print "delta" + str(k)
            print data_deltaS[k]

            ##############################################################
            # Calculate data_error
            # estimate the population mean and SD from our samples to find SE
            # SE tells us the distribution of individual scores around the sampled mean.
            mean_deltaS_i = mean(array(deltaS["delta" + str(k)]))
            std_deltaS_i = std(array(deltaS["delta" + str(k)]))
            n_deltaS_i = len(array(deltaS["delta" + str(k)]))

            sd_deltaS.append(std_deltaS_i)
            mean_deltaS.append(mean_deltaS_i)

            # Standard Error of the mean SE
            # the smaller the variability in the data, the more confident we are that one value (the mean) accurately reflects them.
            se_deltaS.append(std_deltaS_i / sqrt(n_deltaS_i))
            n_deltaS.append(n_deltaS_i)

        # make array for data_deltaS
        data = array(data_deltaS)

        print "\n================\nMean and SE (i.e VOI sample data)"
        print mean_deltaS
        print se_deltaS

        # create a set of Parameters
        params = Parameters()
        params.add("amp", value=10, min=0)
        params.add("alpha", value=1, min=0)
        params.add("beta", value=0.05, min=0.0001, max=0.9)

        # do fit, here with leastsq model
        # define objective function: returns the array to be minimized
        def fcn2min(params, t, data):
            global model, model_res, x
            """ model EMM for Bilateral DCE-MRI, subtract data"""
            # unpack parameters:
            #  extract .value attribute for each parameter
            amp = params["amp"].value  # Upper limit of deltaS
            alpha = params["alpha"].value  # rate of signal increase min-1
            beta = params["beta"].value  # rate of signal decrease min-1

            model = amp * (1 - exp(-alpha * t)) * exp(-beta * t)

            x = linspace(0, t[4], 101)
            model_res = amp * (1 - exp(-alpha * x)) * exp(-beta * x)

            return model - data

        #####
        myfit = Minimizer(fcn2min, params, fcn_args=(t,), fcn_kws={"data": data})
        myfit.prepare_fit()
        myfit.leastsq()

        # On a successful fit using the leastsq method, several goodness-of-fit statistics
        # and values related to the uncertainty in the fitted variables will be calculated
        print "myfit.success"
        print myfit.success
        print "myfit.residual"
        print myfit.residual
        print "myfit.chisqr"
        print myfit.chisqr
        print "myfit.redchi"
        print myfit.redchi

        # calculate final result
        final = data + myfit.residual
        # write error report
        report_errors(params)

        # Calculate R-square
        # R_square = sum( y_fitted - y_mean)/ sum(y_data - y_mean)
        R_square = sum((model - mean(data)) ** 2) / sum((data - mean(data)) ** 2)
        print "R^2"
        print R_square

        self.amp = params["amp"].value
        self.alpha = params["alpha"].value
        self.beta = params["beta"].value

        ##################################################
        # Now Calculate Extract parameters from model
        self.iAUC1 = params["amp"].value * (
            ((1 - exp(-params["beta"].value * t[1])) / params["beta"].value)
            + (exp((-params["alpha"].value + params["beta"].value) * t[1]) - 1)
            / (params["alpha"].value + params["beta"].value)
        )
        print "iAUC1"
        print self.iAUC1

        self.Slope_ini = params["amp"].value * params["alpha"].value
        print "Slope_ini"
        print self.Slope_ini

        self.Tpeak = (1 / params["alpha"].value) * log(1 + (params["alpha"].value / params["beta"].value))
        print "Tpeak"
        print self.Tpeak

        self.Kpeak = -params["amp"].value * params["alpha"].value * params["beta"].value
        print "Kpeak"
        print self.Kpeak

        self.SER = exp((t[4] - t[1]) * params["beta"].value) * (
            (1 - exp(-params["alpha"].value * t[1])) / (1 - exp(-params["alpha"].value * t[4]))
        )
        print "SER"
        print self.SER

        ##################################################
        # Now Calculate enhancement Kinetic based features
        # Based on the course of signal intensity within the lesion
        print "\n Saving %s" % "Crk"
        So = array(deltaS["delta0"]).astype(float)
        Crk = {"Cr0": mean(So)}
        C = {}
        Carray = []

        for k in range(1, len(DICOMImages)):
            Sk = array(deltaS["delta" + str(k)]).astype(float)
            Cr = 0
            for j in range(len(So)):
                # extract average enhancement over the lesion at each time point
                Cr = Cr + (Sk[j] - So[j]) / So[j]
                Carray.append((Sk[j] - So[j]) / So[j])

            # compile
            C["C" + str(k)] = Carray
            Crk["Cr" + str(k)] = Cr / len(Sk)

        # Extract Fii_1
        for k in range(1, 5):
            currentCr = array(Crk["Cr" + str(k)]).astype(float)
            print currentCr
            if self.maxCr < currentCr:
                self.maxCr = float(currentCr)
                self.peakCr = int(k)

        print "Maximum Upate (Fii_1) = %d " % self.maxCr
        print "Peak Cr (Fii_2) = %d " % self.peakCr

        # Uptake rate
        self.UptakeRate = float(self.maxCr / self.peakCr)
        print "Uptake rate (Fii_3) "
        print self.UptakeRate

        # WashOut Rate
        if self.peakCr == 4:
            self.washoutRate = 0
        else:
            self.washoutRate = float((self.maxCr - array(Crk["Cr" + str(4)]).astype(float)) / (4 - self.peakCr))
        print "WashOut rate (Fii_4) "
        print self.washoutRate

        ##################################################
        # Now Calculate enhancement-variance Kinetic based features
        # Based on Crk['Cr'+str(k)] = Cr/len(Sk)
        print "\n Saving %s" % "Vrk"
        Vrk = {}

        for k in range(1, 5):
            Ci = array(C["C" + str(k)]).astype(float)
            Cri = array(Crk["Cr" + str(k)]).astype(float)
            Vr = 0
            for j in range(len(Ci)):
                # extract average enhancement over the lesion at each time point
                Vr = Vr + (Ci[j] - Cri) ** 2
            # compile
            Vrk["Vr" + str(k)] = Vr / (len(Ci) - 1)

        # Extract Fiii_1
        for k in range(1, 5):
            currentVr = array(Vrk["Vr" + str(k)]).astype(float)
            if self.maxVr < currentVr:
                print currentVr
                self.maxVr = float(currentVr)
                self.peakVr = int(k)

        print "Maximum Variation of enhan (Fiii_1) = %d " % self.maxVr
        print "Peak Vr (Fii_2) = %d " % self.peakVr

        # Vr_increasingRate
        self.Vr_increasingRate = self.maxVr / self.peakVr
        print "Vr_increasingRate (Fiii_3)"
        print self.Vr_increasingRate

        # Vr_decreasingRate
        if self.peakVr == 4:
            self.Vr_decreasingRate = 0
        else:
            self.Vr_decreasingRate = float((self.maxVr - array(Vrk["Vr" + str(4)]).astype(float)) / (4 - self.peakVr))
        print "Vr_decreasingRate (Fiii_4) "
        print self.Vr_decreasingRate

        # Vr_post_1
        self.Vr_post_1 = float(array(Vrk["Vr" + str(1)]).astype(float))
        print "Vr_post_1 (Fiii_5)"
        print self.Vr_post_1

        ##################################################
        # orgamize into dataframe
        self.dynamicEMM_inside = DataFrame(
            data=array(
                [
                    [
                        self.amp,
                        self.alpha,
                        self.beta,
                        self.iAUC1,
                        self.Slope_ini,
                        self.Tpeak,
                        self.Kpeak,
                        self.SER,
                        self.maxCr,
                        self.peakCr,
                        self.UptakeRate,
                        self.washoutRate,
                        self.maxVr,
                        self.peakVr,
                        self.Vr_increasingRate,
                        self.Vr_decreasingRate,
                        self.Vr_post_1,
                    ]
                ]
            ),
            columns=[
                "A.inside",
                "alpha.inside",
                "beta.inside",
                "iAUC1.inside",
                "Slope_ini.inside",
                "Tpeak.inside",
                "Kpeak.inside",
                "SER.inside",
                "maxCr.inside",
                "peakCr.inside",
                "UptakeRate.inside",
                "washoutRate.inside",
                "maxVr.inside",
                "peakVr.inside",
                "Vr_increasingRate.inside",
                "Vr_decreasingRate.inside",
                "Vr_post_1.inside",
            ],
        )

        #############################################################
        # try to plot results
        pylab.figure()
        pylab.errorbar(t, data, yerr=se_deltaS, fmt="ro", label="data+SE")  # data 'ro' red dots as markers
        pylab.plot(t, final, "b+", label="data+residuals")  # data+residuals 'b+' blue pluses
        pylab.plot(t, model, "b", label="model")  # model fit 'b' blue
        pylab.plot(x, model_res, "k", label="model fit")  # model fit 'k' blakc
        pylab.xlabel(" post-contrast time (min)")
        pylab.ylabel("delta S(t)")
        pylab.legend()

        return self.dynamicEMM_inside
    def extractfeatures_contour(self, DICOMImages, image_pos_pat, image_ori_pat, series_path, phases_series, VOI_mesh):
        """ Start pixVals for collection pixel values at VOI """
        pixVals = []
        deltaS = {}
        
        # necessary to read point coords
        VOIPnt = [0,0,0]
        ijk = [0,0,0]
        pco = [0,0,0]
        
        for i in range(len(DICOMImages)):
            abspath_PhaseID = series_path+os.sep+str(phases_series[i]) 
            print phases_series[i]
             
            # Get total number of files
            [len_listSeries_files, FileNms_slices_sorted_stack] = processDicoms.ReadDicomfiles(abspath_PhaseID)
            mostleft_slice = FileNms_slices_sorted_stack.slices[0]
            
            # Get dicom header, retrieve
            dicomInfo_series = dicom.read_file(abspath_PhaseID+os.sep+str(mostleft_slice)) 
            
            # (0008,0031) AT S Series Time            # hh.mm.ss.frac
            seriesTime = str(dicomInfo_series[0x0008,0x0031].value) 
            # (0008,0033) AT S Image Time             # hh.mm.ss.frac
            imageTime = str(dicomInfo_series[0x0008,0x0033].value)
            
            # (0008,0032) AT S Acquisition Time       # hh.mm.ss.frac
            ti = str(dicomInfo_series[0x0008,0x0032].value) 
            
            acquisitionTimepoint = datetime.time(hour=int(ti[0:2]), minute=int(ti[2:4]), second=int(ti[4:6]))
            self.timepoints.append( datetime.datetime.combine(datetime.date.today(), acquisitionTimepoint) )
            
            # find mapping to Dicom space  
            [transformed_image, transform_cube] = Display().dicomTransform(DICOMImages[i], image_pos_pat, image_ori_pat)
 
            for j in range( VOI_mesh.GetNumberOfPoints() ):
                VOI_mesh.GetPoint(j, VOIPnt)      
                
                # extract pixID at location VOIPnt
                pixId = transformed_image.FindPoint(VOIPnt[0], VOIPnt[1], VOIPnt[2])
                im_pt = [0,0,0]
                
                transformed_image.GetPoint(pixId,im_pt)           
                inorout = transformed_image.ComputeStructuredCoordinates( im_pt, ijk, pco)
                if(inorout == 0):
                    pass
                else:
                    pixValx = transformed_image.GetScalarComponentAsFloat( ijk[0], ijk[1], ijk[2], 0)
                    pixVals.append(pixValx)
                        
            # Now collect pixVals
            print "Saving %s" % 'delta'+str(i)
            deltaS['delta'+str(i)] = pixVals
            pixVals = []
                    
        print self.timepoints
        
        # Collecting timepoints in proper format
        t_delta = []
        t_delta.append(0)
        total_time = 0
        for i in range(len(DICOMImages)-1):
            current_time = self.timepoints[i+1]
            previous_time = self.timepoints[i]
            difference_time =current_time - previous_time
            timestop = divmod(difference_time.total_seconds(), 60)
            t_delta.append( t_delta[i] + timestop[0]+timestop[1]*(1./60))
            total_time = total_time+timestop[0]+timestop[1]*(1./60)
            
        # finally print t_delta
        print t_delta
        t = array(t_delta)
        print "total_time"
        print total_time
        
        ##############################################################
        # Finished sampling deltaS
        # APply lmfit to deltaS
        # first sample the mean
        data_deltaS = []; t_deltaS = []; mean_deltaS = []; sd_deltaS = []; se_deltaS = []; n_deltaS = []
        
        # append So and to
        data_deltaS.append( 0 )       
        t_deltaS.append(0)
        mean_deltaS.append( mean(deltaS['delta0']) )
        sd_deltaS.append(0)
        se_deltaS.append(0)
        n_deltaS.append( len(deltaS['delta0']) )
        
        for k in range(1,len(DICOMImages)):
            deltaS_i =  ( mean(array(deltaS['delta'+str(k)]).astype(float)) -  mean(deltaS['delta0']) )/  mean(deltaS['delta0'])
            data_deltaS.append( deltaS_i )
            t_deltaS.append(k)
            print 'delta'+str(k)
            print  data_deltaS[k]
            
            ##############################################################
            # Calculate data_error
            # estimate the population mean and SD from our samples to find SE
            # SE tells us the distribution of individual scores around the sampled mean.
            mean_deltaS_i = mean(array(deltaS['delta'+str(k)]))
            std_deltaS_i = std(array(deltaS['delta'+str(k)]))
            n_deltaS_i = len(array(deltaS['delta'+str(k)]))
                
            sd_deltaS.append( std_deltaS_i )
            mean_deltaS.append( mean_deltaS_i )
            
            # Standard Error of the mean SE
            # the smaller the variability in the data, the more confident we are that one value (the mean) accurately reflects them.
            se_deltaS.append(std_deltaS_i/sqrt(n_deltaS_i))
            n_deltaS.append(n_deltaS_i)
                        
        # make array for data_deltaS
        data = array(data_deltaS)
        
        print "\n================\nMean and SE (i.e VOI sample data)"
        print mean_deltaS
        print se_deltaS
        
        # create a set of Parameters
        params = Parameters()
        params.add('amp',   value= 10,  min=0)
        params.add('alpha', value= 1, min=0) 
        params.add('beta', value= 0.05, min=0.0001, max=0.9)
        
        # do fit, here with leastsq model
        # define objective function: returns the array to be minimized
        def fcn2min(params, t, data):
            global model, model_res, x
            """ model EMM for Bilateral DCE-MRI, subtract data"""
            # unpack parameters:
            #  extract .value attribute for each parameter
            amp = params['amp'].value    # Upper limit of deltaS
            alpha = params['alpha'].value    # rate of signal increase min-1
            beta = params['beta'].value        # rate of signal decrease min-1
                    
            model = amp * (1- exp(-alpha*t)) * exp(-beta*t)
            
            x = linspace(0, t[4], 101)
            model_res = amp * (1- exp(-alpha*x)) * exp(-beta*x)
        
            return model - data
        
        #####
        myfit = Minimizer(fcn2min,  params, fcn_args=(t,), fcn_kws={'data':data})
        myfit.prepare_fit()
        myfit.leastsq()
            
        # On a successful fit using the leastsq method, several goodness-of-fit statistics
        # and values related to the uncertainty in the fitted variables will be calculated
        print "myfit.success"
        print myfit.success
        print "myfit.residual"
        print myfit.residual
        print "myfit.chisqr"
        print myfit.chisqr
        print "myfit.redchi"
        print myfit.redchi
            
        # calculate final result
        #final = data + myfit.residual
        # write error report
        report_errors(params)
        
        # Calculate R-square
        # R_square = sum( y_fitted - y_mean)/ sum(y_data - y_mean)
        R_square = sum( (model - mean(data))**2 )/ sum( (data - mean(data))**2 )
        print "R^2"
        print R_square
        
        self.amp = params['amp'].value
        self.alpha = params['alpha'].value
        self.beta = params['beta'].value
        
        ##################################################
        # Now Calculate Extract parameters from model
        self.iAUC1 = params['amp'].value *( ((1-exp(-params['beta'].value*t[1]))/params['beta'].value) + (exp((-params['alpha'].value+params['beta'].value)*t[1])-1)/(params['alpha'].value+params['beta'].value) )
        print "iAUC1"
        print self.iAUC1
        
        self.Slope_ini = params['amp'].value*params['alpha'].value
        print "Slope_ini"
        print self.Slope_ini
    
        self.Tpeak = (1/params['alpha'].value)*log(1+(params['alpha'].value/params['beta'].value))
        print "Tpeak"
        print self.Tpeak
    
        self.Kpeak = -params['amp'].value * params['alpha'].value * params['beta'].value
        print "Kpeak"
        print self.Kpeak
    
        self.SER = exp( (t[4]-t[1])*params['beta'].value) * ( (1-exp(-params['alpha'].value*t[1]))/(1-exp(-params['alpha'].value*t[4])) )
        print "SER"
        print self.SER
        
        ##################################################
        # Now Calculate enhancement Kinetic based features
        # Based on the course of signal intensity within the lesion
        print "\n Saving %s" % 'Crk'
        So = array(deltaS['delta0']).astype(float)
        Crk = {'Cr0': mean(So)}  
        C = {}
        
        for k in range(1,len(DICOMImages)):
            Sk = array(deltaS['delta'+str(k)]).astype(float)
            print Sk
            Cr = 0
            Carray = []
            for j in range( len(So) ):
                # extract average enhancement over the lesion at each time point
                Cr = Cr + (Sk[j] - So[j])/So[j]
                Carray.append((Sk[j] - So[j])/So[j])
                
            # compile
            C['C'+str(k)] = Carray
            Crk['Cr'+str(k)] = float(Cr/len(Sk))
        
        # Extract Fii_1
        for k in range(1,5):
            currentCr = array(Crk['Cr'+str(k)]).astype(float)
            print currentCr
            if( self.maxCr < currentCr):
                self.maxCr = float(currentCr)
                self.peakCr = int(k)
                
        print "Maximum Upate (Fii_1) = %d " %  self.maxCr
        print "Peak Cr (Fii_2) = %d " %  self.peakCr
        
        # Uptake rate
        self.UptakeRate = float(self.maxCr/self.peakCr)    
        print "Uptake rate (Fii_3) "
        print self.UptakeRate
        
        # WashOut Rate
        if( self.peakCr == 4):
            self.washoutRate = 0
        else:
            self.washoutRate = float( (self.maxCr - array(Crk['Cr'+str(4)]).astype(float))/(4-self.peakCr) )
        print "WashOut rate (Fii_4) "
        print self.washoutRate


        ##################################################
        # Now Calculate enhancement-variance Kinetic based features
        # Based on Crk['Cr'+str(k)] = Cr/len(Sk)
        print "\n Saving %s" % 'Vrk'
        Vrk = {}
        
        for k in range(1,5):
            Ci = array(C['C'+str(k)]).astype(float)    
            Cri = array(Crk['Cr'+str(k)]).astype(float)
            Vr = 0
            for j in range( len(Ci) ):
                # extract average enhancement over the lesion at each time point
                Vr = Vr + (Ci[j] - Cri)**2
            # compile
            Vrk['Vr'+str(k)] = Vr/(len(Ci)-1)
        
        # Extract Fiii_1
        for k in range(1,5):
            currentVr = array(Vrk['Vr'+str(k)]).astype(float)
            if( self.maxVr < currentVr):
                print currentVr
                self.maxVr = float(currentVr)
                self.peakVr = int(k)
        
        print "Maximum Variation of enhan (Fiii_1) = %d " %  self.maxVr
        print "Peak Vr (Fii_2) = %d " %  self.peakVr
        
        # Vr_increasingRate 
        self.Vr_increasingRate = self.maxVr/self.peakVr    
        print "Vr_increasingRate (Fiii_3)" 
        print self.Vr_increasingRate
        
        # Vr_decreasingRate
        if( self.peakVr == 4):
            self.Vr_decreasingRate = 0
        else:
            self.Vr_decreasingRate = float((self.maxVr - array(Vrk['Vr'+str(4)]).astype(float))/(4-self.peakVr))
        print "Vr_decreasingRate (Fiii_4) "
        print self.Vr_decreasingRate
        
        # Vr_post_1 
        self.Vr_post_1 = float( array(Vrk['Vr'+str(1)]).astype(float))
        print "Vr_post_1 (Fiii_5)"
        print self.Vr_post_1
 
        ##################################################
        # orgamize into dataframe
        self.dynamicEMM_contour = DataFrame( data=array([[ self.amp, self.alpha, self.beta, self.iAUC1, self.Slope_ini, self.Tpeak, self.Kpeak, self.SER, self.maxCr, self.peakCr, self.UptakeRate, self.washoutRate, self.maxVr, self.peakVr, self.Vr_increasingRate, self.Vr_decreasingRate, self.Vr_post_1]]), 
                                columns=['A.contour', 'alpha.contour', 'beta.contour', 'iAUC1.contour', 'Slope_ini.contour', 'Tpeak.contour', 'Kpeak.contour', 'SER.contour', 'maxCr.contour', 'peakCr.contour', 'UptakeRate.contour', 'washoutRate.contour', 'maxVr.contour', 'peakVr.contour','Vr_increasingRate.contour', 'Vr_decreasingRate.contour', 'Vr_post_1.contour'])

        #############################################################
        # try to plot results
        pylab.figure()
        pylab.errorbar(t, data, yerr=se_deltaS, fmt='ro', label='data+SE') # data 'ro' red dots as markers
        pylab.plot(t, model, 'b', label='model')    # model fit 'b' blue
        pylab.plot(x, model_res, 'k', label='model fit')    # model fit 'k' blakc
        pylab.xlabel(" post-contrast time (min)")
        pylab.ylabel("delta S(t)")
        pylab.legend()
        
        return self.dynamicEMM_contour