예제 #1
0
파일: fit_NIST.py 프로젝트: burlyc/lmfit-py
def NIST_Test(DataSet, start='start2', plot=True):

    NISTdata = ReadNistData(DataSet)
    resid, npar, dimx = Models[DataSet]
    y = NISTdata['y']
    x = NISTdata['x']

    params = Parameters()
    for i in range(npar):
        pname = 'b%i' % (i+1)
        cval  = NISTdata['cert_values'][i]
        cerr  = NISTdata['cert_stderr'][i]
        pval1 = NISTdata[start][i]
        params.add(pname, value=pval1)


    myfit = Minimizer(resid, params, fcn_args=(x,), fcn_kws={'y':y},
                      scale_covar=True)

    myfit.prepare_fit()
    myfit.leastsq()

    digs = Compare_NIST_Results(DataSet, myfit, params, NISTdata)

    if plot and HASPYLAB:
        fit = -resid(params, x, )
        pylab.plot(x, y, 'r+')
        pylab.plot(x, fit, 'ko--')
        pylab.show()

    return digs > 2
예제 #2
0
def test_constraints1():
    def residual(pars, x, sigma=None, data=None):
        yg = gaussian(x, pars['amp_g'], pars['cen_g'], pars['wid_g'])
        yl = lorentzian(x, pars['amp_l'], pars['cen_l'], pars['wid_l'])

        model = yg + yl + pars['line_off'] + x * pars['line_slope']
        if data is None:
            return model
        if sigma is None:
            return (model - data)
        return (model - data) / sigma

    n = 601
    xmin = 0.
    xmax = 20.0
    x = linspace(xmin, xmax, n)

    data = (gaussian(x, 21, 8.1, 1.2) + lorentzian(x, 10, 9.6, 2.4) +
            random.normal(scale=0.23, size=n) + x * 0.5)

    pfit = Parameters()
    pfit.add(name='amp_g', value=10)
    pfit.add(name='cen_g', value=9)
    pfit.add(name='wid_g', value=1)

    pfit.add(name='amp_tot', value=20)
    pfit.add(name='amp_l', expr='amp_tot - amp_g')
    pfit.add(name='cen_l', expr='1.5+cen_g')
    pfit.add(name='wid_l', expr='2*wid_g')

    pfit.add(name='line_slope', value=0.0)
    pfit.add(name='line_off', value=0.0)

    sigma = 0.021  # estimate of data error (for all data points)

    myfit = Minimizer(residual,
                      pfit,
                      fcn_args=(x, ),
                      fcn_kws={
                          'sigma': sigma,
                          'data': data
                      },
                      scale_covar=True)

    myfit.prepare_fit()
    init = residual(myfit.params, x)

    result = myfit.leastsq()

    print(' Nfev = ', result.nfev)
    print(result.chisqr, result.redchi, result.nfree)

    report_fit(result.params)
    pfit = result.params
    fit = residual(result.params, x)
    assert (pfit['cen_l'].value == 1.5 + pfit['cen_g'].value)
    assert (pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value)
    assert (pfit['wid_l'].value == 2 * pfit['wid_g'].value)
def test_constraints1():
    def residual(pars, x, sigma=None, data=None):
        yg = gaussian(x, pars['amp_g'], pars['cen_g'], pars['wid_g'])
        yl = lorentzian(x, pars['amp_l'], pars['cen_l'], pars['wid_l'])

        model =  yg +  yl + pars['line_off'] + x * pars['line_slope']
        if data is None:
            return model
        if sigma is None:
            return (model - data)
        return (model - data)/sigma


    n = 601
    xmin = 0.
    xmax = 20.0
    x = linspace(xmin, xmax, n)

    data = (gaussian(x, 21, 8.1, 1.2) +
            lorentzian(x, 10, 9.6, 2.4) +
            random.normal(scale=0.23,  size=n) +
            x*0.5)


    pfit = Parameters()
    pfit.add(name='amp_g',  value=10)
    pfit.add(name='cen_g',  value=9)
    pfit.add(name='wid_g',  value=1)

    pfit.add(name='amp_tot',  value=20)
    pfit.add(name='amp_l',  expr='amp_tot - amp_g')
    pfit.add(name='cen_l',  expr='1.5+cen_g')
    pfit.add(name='wid_l',  expr='2*wid_g')

    pfit.add(name='line_slope', value=0.0)
    pfit.add(name='line_off', value=0.0)

    sigma = 0.021  # estimate of data error (for all data points)

    myfit = Minimizer(residual, pfit,
                      fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
                      scale_covar=True)

    myfit.prepare_fit()
    init = residual(myfit.params, x)

    result = myfit.leastsq()

    print(' Nfev = ', result.nfev)
    print( result.chisqr, result.redchi, result.nfree)

    report_fit(result.params)
    pfit= result.params
    fit = residual(result.params, x)
    assert(pfit['cen_l'].value == 1.5 + pfit['cen_g'].value)
    assert(pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value)
    assert(pfit['wid_l'].value == 2 * pfit['wid_g'].value)
예제 #4
0
def test_peakfit():
    from lmfit.utilfuncs import gauss

    def residual(pars, x, data=None):
        g1 = gauss(x, pars['a1'].value, pars['c1'].value, pars['w1'].value)
        g2 = gauss(x, pars['a2'].value, pars['c2'].value, pars['w2'].value)
        model = g1 + g2
        if data is None:
            return model
        return (model - data)

    n = 601
    xmin = 0.
    xmax = 15.0
    noise = np.random.normal(scale=.65, size=n)
    x = np.linspace(xmin, xmax, n)

    org_params = Parameters()
    org_params.add_many(('a1', 12.0, True, None, None, None),
                        ('c1', 5.3, True, None, None, None),
                        ('w1', 1.0, True, None, None, None),
                        ('a2', 9.1, True, None, None, None),
                        ('c2', 8.1, True, None, None, None),
                        ('w2', 2.5, True, None, None, None))

    data = residual(org_params, x) + noise

    fit_params = Parameters()
    fit_params.add_many(('a1', 8.0, True, None, 14., None),
                        ('c1', 5.0, True, None, None, None),
                        ('w1', 0.7, True, None, None, None),
                        ('a2', 3.1, True, None, None, None),
                        ('c2', 8.8, True, None, None, None))

    fit_params.add('w2', expr='2.5*w1')

    myfit = Minimizer(residual,
                      fit_params,
                      fcn_args=(x, ),
                      fcn_kws={'data': data})

    myfit.prepare_fit()

    init = residual(fit_params, x)

    myfit.leastsq()

    print(' N fev = ', myfit.nfev)
    print(myfit.chisqr, myfit.redchi, myfit.nfree)

    report_fit(fit_params)

    fit = residual(fit_params, x)
    check_paras(fit_params, org_params)
예제 #5
0
    def __FitEvent(self):
        try:
            dt = 1000. / self.Fs  # time-step in ms.
            # edat=np.asarray( np.abs(self.eventData),  dtype='float64' )
            edat = self.dataPolarity * np.asarray(self.eventData,
                                                  dtype='float64')

            # control numpy error reporting
            np.seterr(invalid='ignore', over='ignore', under='ignore')

            ts = np.array([t * dt for t in range(0, len(edat))],
                          dtype='float64')

            # estimate initial guess for events
            initguess = self._characterizeevent(edat,
                                                np.abs(util.avg(edat[:10])),
                                                self.baseSD,
                                                self.InitThreshold, 6.)
            self.nStates = len(initguess) - 1

            # setup fit params
            params = Parameters()

            for i in range(1, len(initguess)):
                params.add('a' + str(i - 1),
                           value=initguess[i][0] - initguess[i - 1][0])
                params.add('mu' + str(i - 1), value=initguess[i][1] * dt)
                params.add('tau' + str(i - 1), value=dt * 7.5)

            params.add('b', value=initguess[0][0])

            optfit = Minimizer(self.__objfunc, params, fcn_args=(
                ts,
                edat,
            ))
            optfit.prepare_fit()

            optfit.leastsq(xtol=self.FitTol,
                           ftol=self.FitTol,
                           maxfev=self.FitIters)

            if optfit.success:
                self.__recordevent(optfit)
            else:
                #print optfit.message, optfit.lmdif_message
                self.rejectEvent('eFitConvergence')
        except KeyboardInterrupt:
            self.rejectEvent('eFitUserStop')
            raise
        except InvalidEvent:
            self.rejectEvent('eInvalidEvent')
        except:
            self.rejectEvent('eFitFailure')
            raise
예제 #6
0
파일: test_nose.py 프로젝트: omdv/lmfit-py
def test_peakfit():
    from lmfit.utilfuncs import gaussian
    def residual(pars, x, data=None):
        g1 = gaussian(x, pars['a1'].value, pars['c1'].value, pars['w1'].value)
        g2 = gaussian(x, pars['a2'].value, pars['c2'].value, pars['w2'].value)
        model = g1 + g2
        if data is None:
            return model
        return (model - data)

    n    = 601
    xmin = 0.
    xmax = 15.0
    noise = np.random.normal(scale=.65, size=n)
    x = np.linspace(xmin, xmax, n)

    org_params = Parameters()
    org_params.add_many(('a1', 12.0, True, None, None, None),
                        ('c1',  5.3, True, None, None, None),
                        ('w1',  1.0, True, None, None, None),
                        ('a2',  9.1, True, None, None, None),
                        ('c2',  8.1, True, None, None, None),
                        ('w2',  2.5, True, None, None, None))

    data  = residual(org_params, x) + noise


    fit_params = Parameters()
    fit_params.add_many(('a1',  8.0, True, None, 14., None),
                        ('c1',  5.0, True, None, None, None),
                        ('w1',  0.7, True, None, None, None),
                        ('a2',  3.1, True, None, None, None),
                        ('c2',  8.8, True, None, None, None))

    fit_params.add('w2', expr='2.5*w1')

    myfit = Minimizer(residual, fit_params,
                      fcn_args=(x,), fcn_kws={'data':data})

    myfit.prepare_fit()

    init = residual(fit_params, x)


    myfit.leastsq()

    print(' N fev = ', myfit.nfev)
    print(myfit.chisqr, myfit.redchi, myfit.nfree)

    report_fit(fit_params)

    fit = residual(fit_params, x)
    check_paras(fit_params, org_params)
예제 #7
0
	def fitevent(self, edat, initguess):
		try:
			dt = 1000./self.Fs 	# time-step in ms.

			# control numpy error reporting
			np.seterr(invalid='ignore', over='ignore', under='ignore')

			ts = np.array([ t*dt for t in range(0,len(edat)) ], dtype='float64')

			self.nStates=len(initguess)
			initRCConst=dt*5.

			# setup fit params
			params=Parameters()

			for i in range(0, len(initguess)):
				params.add('a'+str(i), value=initguess[i][0]) 
				params.add('mu'+str(i), value=initguess[i][1]) 
				if self.LinkRCConst:				
					if i==0:
						params.add('tau'+str(i), value=initRCConst)
					else:
						params.add('tau'+str(i), value=initRCConst, expr='tau0')
				else:
					params.add('tau'+str(i), value=initRCConst)

			params.add('b', value=self.baseMean )
			

			igdict=params.valuesdict()

			optfit=Minimizer(self._objfunc, params, fcn_args=(ts,edat,))
			optfit.prepare_fit()
			result=optfit.leastsq(xtol=self.FitTol,ftol=self.FitTol,maxfev=self.FitIters)

			if result.success:
				tt=[init[0] for init, final in zip(igdict.items(), (result.params.valuesdict()).items()) if init==final]
				if len(tt) > 0:
					self.flagEvent('wInitGuessUnchanged')

				self._recordevent(result)
			else:
				#print optfit.message, optfit.lmdif_message
				self.rejectEvent('eFitConvergence')
		except KeyboardInterrupt:
			self.rejectEvent('eFitUserStop')
			raise
		except InvalidEvent:
			self.rejectEvent('eInvalidEvent')
		except:
	 		self.rejectEvent('eFitFailure')
예제 #8
0
파일: adept.py 프로젝트: forstater/mosaic
	def fitevent(self, edat, initguess):
		try:
			dt = 1000./self.Fs 	# time-step in ms.

			# control numpy error reporting
			np.seterr(invalid='ignore', over='ignore', under='ignore')

			ts = np.array([ t*dt for t in range(0,len(edat)) ], dtype='float64')

			self.nStates=len(initguess)
			initRCConst=dt*5.

			# setup fit params
			params=Parameters()

			for i in range(0, len(initguess)):
				params.add('a'+str(i), value=initguess[i][0]) 
				params.add('mu'+str(i), value=initguess[i][1]) 
				if self.LinkRCConst:				
					if i==0:
						params.add('tau'+str(i), value=initRCConst)
					else:
						params.add('tau'+str(i), value=initRCConst, expr='tau0')
				else:
					params.add('tau'+str(i), value=initRCConst)

			params.add('b', value=self.baseMean )
			

			igdict=params.valuesdict()

			optfit=Minimizer(self._objfunc, params, fcn_args=(ts,edat,))
			optfit.prepare_fit()
			result=optfit.leastsq(xtol=self.FitTol,ftol=self.FitTol,maxfev=self.FitIters)

			if result.success:
				tt=[init[0] for init, final in zip(igdict.items(), (result.params.valuesdict()).items()) if init==final]
				if len(tt) > 0:
					self.flagEvent('wInitGuessUnchanged')

				self._recordevent(result)
			else:
				#print optfit.message, optfit.lmdif_message
				self.rejectEvent('eFitConvergence')
		except KeyboardInterrupt:
			self.rejectEvent('eFitUserStop')
			raise
		except InvalidEvent:
			self.rejectEvent('eInvalidEvent')
		except:
	 		self.rejectEvent('eFitFailure')
예제 #9
0
	def __FitEvent(self):
		try:
			dt = 1000./self.Fs 	# time-step in ms.
			# edat=np.asarray( np.abs(self.eventData),  dtype='float64' )
			edat=self.dataPolarity*np.asarray( self.eventData,  dtype='float64' )

			# control numpy error reporting
			np.seterr(invalid='ignore', over='ignore', under='ignore')

			ts = np.array([ t*dt for t in range(0,len(edat)) ], dtype='float64')

			# estimate initial guess for events
			initguess=self._characterizeevent(edat, np.abs(util.avg(edat[:10])), self.baseSD, self.InitThreshold, 6.)
			self.nStates=len(initguess)-1

			# setup fit params
			params=Parameters()

			for i in range(1, len(initguess)):
				params.add('a'+str(i-1), value=initguess[i][0]-initguess[i-1][0]) 
				params.add('mu'+str(i-1), value=initguess[i][1]*dt) 
				params.add('tau'+str(i-1), value=dt*7.5)

			params.add('b', value=initguess[0][0])
			

			optfit=Minimizer(self.__objfunc, params, fcn_args=(ts,edat,))
			optfit.prepare_fit()

	
			optfit.leastsq(xtol=self.FitTol,ftol=self.FitTol,maxfev=self.FitIters)

			if optfit.success:
				self.__recordevent(optfit)
			else:
				#print optfit.message, optfit.lmdif_message
				self.rejectEvent('eFitConvergence')
		except KeyboardInterrupt:
			self.rejectEvent('eFitUserStop')
			raise
		except InvalidEvent:
			self.rejectEvent('eInvalidEvent')
		except:
	 		self.rejectEvent('eFitFailure')
	 		raise
예제 #10
0
def test_peakfit():
    def residual(pars, x, data=None):
        g1 = gaussian(x, pars['a1'], pars['c1'], pars['w1'])
        g2 = gaussian(x, pars['a2'], pars['c2'], pars['w2'])
        model = g1 + g2
        if data is None:
            return model
        return (model - data)

    n = 601
    xmin = 0.
    xmax = 15.0
    noise = np.random.normal(scale=.65, size=n)
    x = np.linspace(xmin, xmax, n)

    org_params = Parameters()
    org_params.add_many(('a1', 12.0, True, None, None, None),
                        ('c1', 5.3, True, None, None, None),
                        ('w1', 1.0, True, None, None, None),
                        ('a2', 9.1, True, None, None, None),
                        ('c2', 8.1, True, None, None, None),
                        ('w2', 2.5, True, None, None, None))

    data = residual(org_params, x) + noise

    fit_params = Parameters()
    fit_params.add_many(('a1', 8.0, True, None, 14., None),
                        ('c1', 5.0, True, None, None, None),
                        ('w1', 0.7, True, None, None, None),
                        ('a2', 3.1, True, None, None, None),
                        ('c2', 8.8, True, None, None, None))

    fit_params.add('w2', expr='2.5*w1')

    myfit = Minimizer(residual,
                      fit_params,
                      fcn_args=(x, ),
                      fcn_kws={'data': data})

    myfit.prepare_fit()
    out = myfit.leastsq()
    check_paras(out.params, org_params)
예제 #11
0
파일: test_nose.py 프로젝트: lmfit/lmfit-py
def test_peakfit():
    def residual(pars, x, data=None):
        g1 = gaussian(x, pars['a1'], pars['c1'], pars['w1'])
        g2 = gaussian(x, pars['a2'], pars['c2'], pars['w2'])
        model = g1 + g2
        if data is None:
            return model
        return (model - data)

    n = 601
    xmin = 0.
    xmax = 15.0
    noise = np.random.normal(scale=.65, size=n)
    x = np.linspace(xmin, xmax, n)

    org_params = Parameters()
    org_params.add_many(('a1', 12.0, True, None, None, None),
                        ('c1', 5.3, True, None, None, None),
                        ('w1', 1.0, True, None, None, None),
                        ('a2', 9.1, True, None, None, None),
                        ('c2', 8.1, True, None, None, None),
                        ('w2', 2.5, True, None, None, None))

    data = residual(org_params, x) + noise

    fit_params = Parameters()
    fit_params.add_many(('a1', 8.0, True, None, 14., None),
                        ('c1', 5.0, True, None, None, None),
                        ('w1', 0.7, True, None, None, None),
                        ('a2', 3.1, True, None, None, None),
                        ('c2', 8.8, True, None, None, None))

    fit_params.add('w2', expr='2.5*w1')

    myfit = Minimizer(residual, fit_params, fcn_args=(x,),
                      fcn_kws={'data': data})

    myfit.prepare_fit()
    out = myfit.leastsq()
    check_paras(out.params, org_params)
예제 #12
0
파일: fit_NIST.py 프로젝트: burlyc/lmfit-py
def NIST_Test(DataSet, start='start2', plot=True):

    NISTdata = ReadNistData(DataSet)
    resid, npar, dimx = Models[DataSet]
    y = NISTdata['y']
    x = NISTdata['x']

    params = Parameters()
    for i in range(npar):
        pname = 'b%i' % (i + 1)
        cval = NISTdata['cert_values'][i]
        cerr = NISTdata['cert_stderr'][i]
        pval1 = NISTdata[start][i]
        params.add(pname, value=pval1)

    myfit = Minimizer(resid,
                      params,
                      fcn_args=(x, ),
                      fcn_kws={'y': y},
                      scale_covar=True)

    myfit.prepare_fit()
    myfit.leastsq()

    digs = Compare_NIST_Results(DataSet, myfit, params, NISTdata)

    if plot and HASPYLAB:
        fit = -resid(
            params,
            x,
        )
        pylab.plot(x, y, 'r+')
        pylab.plot(x, fit, 'ko--')
        pylab.show()

    return digs > 2
예제 #13
0
    def extractfeatures_contour(self, DICOMImages, image_pos_pat, image_ori_pat, series_path, phases_series, VOI_mesh):
        """ Start pixVals for collection pixel values at VOI """
        pixVals = []
        deltaS = {}
        
        # necessary to read point coords
        VOIPnt = [0,0,0]
        ijk = [0,0,0]
        pco = [0,0,0]
        
        for i in range(len(DICOMImages)):
            abspath_PhaseID = series_path+os.sep+str(phases_series[i]) 
            print phases_series[i]
             
            # Get total number of files
            [len_listSeries_files, FileNms_slices_sorted_stack] = processDicoms.ReadDicomfiles(abspath_PhaseID)
            mostleft_slice = FileNms_slices_sorted_stack.slices[0]
            
            # Get dicom header, retrieve
            dicomInfo_series = dicom.read_file(abspath_PhaseID+os.sep+str(mostleft_slice)) 
            
            # (0008,0031) AT S Series Time            # hh.mm.ss.frac
            seriesTime = str(dicomInfo_series[0x0008,0x0031].value) 
            # (0008,0033) AT S Image Time             # hh.mm.ss.frac
            imageTime = str(dicomInfo_series[0x0008,0x0033].value)
            
            # (0008,0032) AT S Acquisition Time       # hh.mm.ss.frac
            ti = str(dicomInfo_series[0x0008,0x0032].value) 
            
            acquisitionTimepoint = datetime.time(hour=int(ti[0:2]), minute=int(ti[2:4]), second=int(ti[4:6]))
            self.timepoints.append( datetime.datetime.combine(datetime.date.today(), acquisitionTimepoint) )
            
            # find mapping to Dicom space  
            [transformed_image, transform_cube] = Display().dicomTransform(DICOMImages[i], image_pos_pat, image_ori_pat)
 
            for j in range( VOI_mesh.GetNumberOfPoints() ):
                VOI_mesh.GetPoint(j, VOIPnt)      
                
                # extract pixID at location VOIPnt
                pixId = transformed_image.FindPoint(VOIPnt[0], VOIPnt[1], VOIPnt[2])
                im_pt = [0,0,0]
                
                transformed_image.GetPoint(pixId,im_pt)           
                inorout = transformed_image.ComputeStructuredCoordinates( im_pt, ijk, pco)
                if(inorout == 0):
                    pass
                else:
                    pixValx = transformed_image.GetScalarComponentAsFloat( ijk[0], ijk[1], ijk[2], 0)
                    pixVals.append(pixValx)
                        
            # Now collect pixVals
            print "Saving %s" % 'delta'+str(i)
            deltaS['delta'+str(i)] = pixVals
            pixVals = []
                    
        print self.timepoints
        
        # Collecting timepoints in proper format
        t_delta = []
        t_delta.append(0)
        total_time = 0
        for i in range(len(DICOMImages)-1):
            current_time = self.timepoints[i+1]
            previous_time = self.timepoints[i]
            difference_time =current_time - previous_time
            timestop = divmod(difference_time.total_seconds(), 60)
            t_delta.append( t_delta[i] + timestop[0]+timestop[1]*(1./60))
            total_time = total_time+timestop[0]+timestop[1]*(1./60)
            
        # finally print t_delta
        print t_delta
        t = array(t_delta)
        print "total_time"
        print total_time
        
        ##############################################################
        # Finished sampling deltaS
        # APply lmfit to deltaS
        # first sample the mean
        data_deltaS = []; t_deltaS = []; mean_deltaS = []; sd_deltaS = []; se_deltaS = []; n_deltaS = []
        
        # append So and to
        data_deltaS.append( 0 )       
        t_deltaS.append(0)
        mean_deltaS.append( mean(deltaS['delta0']) )
        sd_deltaS.append(0)
        se_deltaS.append(0)
        n_deltaS.append( len(deltaS['delta0']) )
        
        for k in range(1,len(DICOMImages)):
            deltaS_i =  ( mean(array(deltaS['delta'+str(k)]).astype(float)) -  mean(deltaS['delta0']) )/  mean(deltaS['delta0'])
            data_deltaS.append( deltaS_i )
            t_deltaS.append(k)
            print 'delta'+str(k)
            print  data_deltaS[k]
            
            ##############################################################
            # Calculate data_error
            # estimate the population mean and SD from our samples to find SE
            # SE tells us the distribution of individual scores around the sampled mean.
            mean_deltaS_i = mean(array(deltaS['delta'+str(k)]))
            std_deltaS_i = std(array(deltaS['delta'+str(k)]))
            n_deltaS_i = len(array(deltaS['delta'+str(k)]))
                
            sd_deltaS.append( std_deltaS_i )
            mean_deltaS.append( mean_deltaS_i )
            
            # Standard Error of the mean SE
            # the smaller the variability in the data, the more confident we are that one value (the mean) accurately reflects them.
            se_deltaS.append(std_deltaS_i/sqrt(n_deltaS_i))
            n_deltaS.append(n_deltaS_i)
                        
        # make array for data_deltaS
        data = array(data_deltaS)
        
        print "\n================\nMean and SE (i.e VOI sample data)"
        print mean_deltaS
        print se_deltaS
        
        # create a set of Parameters
        params = Parameters()
        params.add('amp',   value= 10,  min=0)
        params.add('alpha', value= 1, min=0) 
        params.add('beta', value= 0.05, min=0.0001, max=0.9)
        
        # do fit, here with leastsq model
        # define objective function: returns the array to be minimized
        def fcn2min(params, t, data):
            global model, model_res, x
            """ model EMM for Bilateral DCE-MRI, subtract data"""
            # unpack parameters:
            #  extract .value attribute for each parameter
            amp = params['amp'].value    # Upper limit of deltaS
            alpha = params['alpha'].value    # rate of signal increase min-1
            beta = params['beta'].value        # rate of signal decrease min-1
                    
            model = amp * (1- exp(-alpha*t)) * exp(-beta*t)
            
            x = linspace(0, t[4], 101)
            model_res = amp * (1- exp(-alpha*x)) * exp(-beta*x)
        
            return model - data
        
        #####
        myfit = Minimizer(fcn2min,  params, fcn_args=(t,), fcn_kws={'data':data})
        myfit.prepare_fit()
        myfit.leastsq()
            
        # On a successful fit using the leastsq method, several goodness-of-fit statistics
        # and values related to the uncertainty in the fitted variables will be calculated
        print "myfit.success"
        print myfit.success
        print "myfit.residual"
        print myfit.residual
        print "myfit.chisqr"
        print myfit.chisqr
        print "myfit.redchi"
        print myfit.redchi
            
        # calculate final result
        #final = data + myfit.residual
        # write error report
        report_errors(params)
        
        # Calculate R-square
        # R_square = sum( y_fitted - y_mean)/ sum(y_data - y_mean)
        R_square = sum( (model - mean(data))**2 )/ sum( (data - mean(data))**2 )
        print "R^2"
        print R_square
        
        self.amp = params['amp'].value
        self.alpha = params['alpha'].value
        self.beta = params['beta'].value
        
        ##################################################
        # Now Calculate Extract parameters from model
        self.iAUC1 = params['amp'].value *( ((1-exp(-params['beta'].value*t[1]))/params['beta'].value) + (exp((-params['alpha'].value+params['beta'].value)*t[1])-1)/(params['alpha'].value+params['beta'].value) )
        print "iAUC1"
        print self.iAUC1
        
        self.Slope_ini = params['amp'].value*params['alpha'].value
        print "Slope_ini"
        print self.Slope_ini
    
        self.Tpeak = (1/params['alpha'].value)*log(1+(params['alpha'].value/params['beta'].value))
        print "Tpeak"
        print self.Tpeak
    
        self.Kpeak = -params['amp'].value * params['alpha'].value * params['beta'].value
        print "Kpeak"
        print self.Kpeak
    
        self.SER = exp( (t[4]-t[1])*params['beta'].value) * ( (1-exp(-params['alpha'].value*t[1]))/(1-exp(-params['alpha'].value*t[4])) )
        print "SER"
        print self.SER
        
        ##################################################
        # Now Calculate enhancement Kinetic based features
        # Based on the course of signal intensity within the lesion
        print "\n Saving %s" % 'Crk'
        So = array(deltaS['delta0']).astype(float)
        Crk = {'Cr0': mean(So)}  
        C = {}
        
        for k in range(1,len(DICOMImages)):
            Sk = array(deltaS['delta'+str(k)]).astype(float)
            print Sk
            Cr = 0
            Carray = []
            for j in range( len(So) ):
                # extract average enhancement over the lesion at each time point
                Cr = Cr + (Sk[j] - So[j])/So[j]
                Carray.append((Sk[j] - So[j])/So[j])
                
            # compile
            C['C'+str(k)] = Carray
            Crk['Cr'+str(k)] = float(Cr/len(Sk))
        
        # Extract Fii_1
        for k in range(1,5):
            currentCr = array(Crk['Cr'+str(k)]).astype(float)
            print currentCr
            if( self.maxCr < currentCr):
                self.maxCr = float(currentCr)
                self.peakCr = int(k)
                
        print "Maximum Upate (Fii_1) = %d " %  self.maxCr
        print "Peak Cr (Fii_2) = %d " %  self.peakCr
        
        # Uptake rate
        self.UptakeRate = float(self.maxCr/self.peakCr)    
        print "Uptake rate (Fii_3) "
        print self.UptakeRate
        
        # WashOut Rate
        if( self.peakCr == 4):
            self.washoutRate = 0
        else:
            self.washoutRate = float( (self.maxCr - array(Crk['Cr'+str(4)]).astype(float))/(4-self.peakCr) )
        print "WashOut rate (Fii_4) "
        print self.washoutRate


        ##################################################
        # Now Calculate enhancement-variance Kinetic based features
        # Based on Crk['Cr'+str(k)] = Cr/len(Sk)
        print "\n Saving %s" % 'Vrk'
        Vrk = {}
        
        for k in range(1,5):
            Ci = array(C['C'+str(k)]).astype(float)    
            Cri = array(Crk['Cr'+str(k)]).astype(float)
            Vr = 0
            for j in range( len(Ci) ):
                # extract average enhancement over the lesion at each time point
                Vr = Vr + (Ci[j] - Cri)**2
            # compile
            Vrk['Vr'+str(k)] = Vr/(len(Ci)-1)
        
        # Extract Fiii_1
        for k in range(1,5):
            currentVr = array(Vrk['Vr'+str(k)]).astype(float)
            if( self.maxVr < currentVr):
                print currentVr
                self.maxVr = float(currentVr)
                self.peakVr = int(k)
        
        print "Maximum Variation of enhan (Fiii_1) = %d " %  self.maxVr
        print "Peak Vr (Fii_2) = %d " %  self.peakVr
        
        # Vr_increasingRate 
        self.Vr_increasingRate = self.maxVr/self.peakVr    
        print "Vr_increasingRate (Fiii_3)" 
        print self.Vr_increasingRate
        
        # Vr_decreasingRate
        if( self.peakVr == 4):
            self.Vr_decreasingRate = 0
        else:
            self.Vr_decreasingRate = float((self.maxVr - array(Vrk['Vr'+str(4)]).astype(float))/(4-self.peakVr))
        print "Vr_decreasingRate (Fiii_4) "
        print self.Vr_decreasingRate
        
        # Vr_post_1 
        self.Vr_post_1 = float( array(Vrk['Vr'+str(1)]).astype(float))
        print "Vr_post_1 (Fiii_5)"
        print self.Vr_post_1
 
        ##################################################
        # orgamize into dataframe
        self.dynamicEMM_contour = DataFrame( data=array([[ self.amp, self.alpha, self.beta, self.iAUC1, self.Slope_ini, self.Tpeak, self.Kpeak, self.SER, self.maxCr, self.peakCr, self.UptakeRate, self.washoutRate, self.maxVr, self.peakVr, self.Vr_increasingRate, self.Vr_decreasingRate, self.Vr_post_1]]), 
                                columns=['A.contour', 'alpha.contour', 'beta.contour', 'iAUC1.contour', 'Slope_ini.contour', 'Tpeak.contour', 'Kpeak.contour', 'SER.contour', 'maxCr.contour', 'peakCr.contour', 'UptakeRate.contour', 'washoutRate.contour', 'maxVr.contour', 'peakVr.contour','Vr_increasingRate.contour', 'Vr_decreasingRate.contour', 'Vr_post_1.contour'])

        #############################################################
        # try to plot results
        pylab.figure()
        pylab.errorbar(t, data, yerr=se_deltaS, fmt='ro', label='data+SE') # data 'ro' red dots as markers
        pylab.plot(t, model, 'b', label='model')    # model fit 'b' blue
        pylab.plot(x, model_res, 'k', label='model fit')    # model fit 'k' blakc
        pylab.xlabel(" post-contrast time (min)")
        pylab.ylabel("delta S(t)")
        pylab.legend()
        
        return self.dynamicEMM_contour
예제 #14
0
    def extractfeatures_inside(self, DICOMImages, image_pos_pat, image_ori_pat, series_path, phases_series, VOI_mesh):
        """ Start pixVals for collection pixel values at VOI """
        pixVals = []
        deltaS = {}
        
        # necessary to read point coords
        VOIPnt = [0,0,0]
        ijk = [0,0,0]
        pco = [0,0,0]
        
        for i in range(len(DICOMImages)):
            abspath_PhaseID = series_path+os.sep+str(phases_series[i]) 
            print phases_series[i]
            
            # Get total number of files
            load = Inputs_init()
            [len_listSeries_files, FileNms_slices_sorted_stack] = load.ReadDicomfiles(abspath_PhaseID)
            mostleft_slice = FileNms_slices_sorted_stack.slices[0]
            
            # Get dicom header, retrieve
            dicomInfo_series = dicom.read_file(abspath_PhaseID+os.sep+str(mostleft_slice)) 
            
            # (0008,0031) AT S Series Time            # hh.mm.ss.frac
            seriesTime = str(dicomInfo_series[0x0008,0x0031].value) 
            # (0008,0033) AT S Image Time             # hh.mm.ss.frac
            imageTime = str(dicomInfo_series[0x0008,0x0033].value)
            
            # (0008,0032) AT S Acquisition Time       # hh.mm.ss.frac
            ti = str(dicomInfo_series[0x0008,0x0032].value) 
            
            acquisitionTimepoint = datetime.time(hour=int(ti[0:2]), minute=int(ti[2:4]), second=int(ti[4:6]))
            self.timepoints.append( datetime.datetime.combine(datetime.date.today(), acquisitionTimepoint) )
            
            # find mapping to Dicom space  
            [transformed_image, transform_cube] = Display().dicomTransform(DICOMImages[i], image_pos_pat, image_ori_pat)
            
            ### Get inside of VOI            
            [VOI_scalars, VOIdims] = self.createMaskfromMesh(VOI_mesh, transformed_image)
            print "\n VOIdims"
            print VOIdims
            
            # get non zero elements
            image_scalars = transformed_image.GetPointData().GetScalars()
            numpy_VOI_imagedata = vtk_to_numpy(image_scalars)     
            
            numpy_VOI_imagedata = numpy_VOI_imagedata.reshape(VOIdims[2], VOIdims[1], VOIdims[0]) 
            numpy_VOI_imagedata = numpy_VOI_imagedata.transpose(2,1,0)
            
            print "Shape of VOI_imagedata: "
            print numpy_VOI_imagedata.shape
            
            #################### HERE GET IT AND MASK IT OUT
            self.nonzeroVOIextracted = nonzero(VOI_scalars)
            print self.nonzeroVOIextracted
            
            VOI_imagedata = numpy_VOI_imagedata[self.nonzeroVOIextracted]     
            
            print "shape of VOI_imagedata  Clipped:"
            print VOI_imagedata.shape
        
            for j in range( len(VOI_imagedata) ):
                pixValx = VOI_imagedata[j]
                pixVals.append(pixValx)
                        
            # Now collect pixVals
            print "Saving %s" % 'delta'+str(i)
            deltaS['delta'+str(i)] = pixVals
            pixVals = []
                    
        print self.timepoints
        
        # Collecting timepoints in proper format
        t_delta = []
        t_delta.append(0)
        total_time = 0
        for i in range(len(DICOMImages)-1):
            current_time = self.timepoints[i+1]
            previous_time = self.timepoints[i]
            difference_time =current_time - previous_time
            timestop = divmod(difference_time.total_seconds(), 60)
            t_delta.append( t_delta[i] + timestop[0]+timestop[1]*(1./60))
            total_time = total_time+timestop[0]+timestop[1]*(1./60)
            
        # finally print t_delta
        print t_delta
        t = array(t_delta)
        print "total_time"
        print total_time
        
        ##############################################################
        # Finished sampling deltaS
        # APply lmfit to deltaS
        # first sample the mean
        data_deltaS = []; t_deltaS = []; mean_deltaS = []; sd_deltaS = []; se_deltaS = []; n_deltaS = []
        
        # append So and to
        data_deltaS.append( 0 )       
        t_deltaS.append(0)
        mean_deltaS.append( mean(deltaS['delta0']) )
        sd_deltaS.append(0)
        se_deltaS.append(0)
        n_deltaS.append( len(deltaS['delta0']) )
        
        for k in range(1,len(DICOMImages)):
            deltaS_i =  ( mean(array(deltaS['delta'+str(k)]).astype(float)) -  mean(deltaS['delta0']) )/  mean(deltaS['delta0'])
            data_deltaS.append( deltaS_i )
            t_deltaS.append(k)
            print 'delta'+str(k)
            print  data_deltaS[k]
            
            ##############################################################
            # Calculate data_error
            # estimate the population mean and SD from our samples to find SE
            # SE tells us the distribution of individual scores around the sampled mean.
            mean_deltaS_i = mean(array(deltaS['delta'+str(k)]))
            std_deltaS_i = std(array(deltaS['delta'+str(k)]))
            n_deltaS_i = len(array(deltaS['delta'+str(k)]))
                
            sd_deltaS.append( std_deltaS_i )
            mean_deltaS.append( mean_deltaS_i )
            
            # Standard Error of the mean SE
            # the smaller the variability in the data, the more confident we are that one value (the mean) accurately reflects them.
            se_deltaS.append(std_deltaS_i/sqrt(n_deltaS_i))
            n_deltaS.append(n_deltaS_i)
                        
        # make array for data_deltaS
        data = array(data_deltaS)
        
        print "\n================\nMean and SE (i.e VOI sample data)"
        print mean_deltaS
        print se_deltaS
        
        # create a set of Parameters
        params = Parameters()
        params.add('amp',   value= 10,  min=0)
        params.add('alpha', value= 1, min=0) 
        params.add('beta', value= 0.05, min=0.0001, max=0.9)
        
        # do fit, here with leastsq model
        # define objective function: returns the array to be minimized
        def fcn2min(params, t, data):
            global model, model_res, x
            """ model EMM for Bilateral DCE-MRI, subtract data"""
            # unpack parameters:
            #  extract .value attribute for each parameter
            amp = params['amp'].value    # Upper limit of deltaS
            alpha = params['alpha'].value    # rate of signal increase min-1
            beta = params['beta'].value        # rate of signal decrease min-1
                    
            model = amp * (1- exp(-alpha*t)) * exp(-beta*t)
            
            x = linspace(0, t[4], 101)
            model_res = amp * (1- exp(-alpha*x)) * exp(-beta*x)
        
            return model - data
        
        #####
        myfit = Minimizer(fcn2min,  params, fcn_args=(t,), fcn_kws={'data':data})
        myfit.prepare_fit()
        myfit.leastsq()
            
        # On a successful fit using the leastsq method, several goodness-of-fit statistics
        # and values related to the uncertainty in the fitted variables will be calculated
        print "myfit.success"
        print myfit.success
        print "myfit.residual"
        print myfit.residual
        print "myfit.chisqr"
        print myfit.chisqr
        print "myfit.redchi"
        print myfit.redchi
            
        # calculate final result
        final = data + myfit.residual
        # write error report
        report_errors(params)
        
        # Calculate R-square
        # R_square = sum( y_fitted - y_mean)/ sum(y_data - y_mean)
        R_square = sum( (model - mean(data))**2 )/ sum( (data - mean(data))**2 )
        print "R^2"
        print R_square
        
        self.amp = params['amp'].value
        self.alpha = params['alpha'].value
        self.beta = params['beta'].value
        
        ##################################################
        # Now Calculate Extract parameters from model
        self.iAUC1 = params['amp'].value *( ((1-exp(-params['beta'].value*t[1]))/params['beta'].value) + (exp((-params['alpha'].value+params['beta'].value)*t[1])-1)/(params['alpha'].value+params['beta'].value) )
        print "iAUC1"
        print self.iAUC1
        
        self.Slope_ini = params['amp'].value*params['alpha'].value
        print "Slope_ini"
        print self.Slope_ini
    
        self.Tpeak = (1/params['alpha'].value)*log(1+(params['alpha'].value/params['beta'].value))
        print "Tpeak"
        print self.Tpeak
    
        self.Kpeak = -params['amp'].value * params['alpha'].value * params['beta'].value
        print "Kpeak"
        print self.Kpeak
    
        self.SER = exp( (t[4]-t[1])*params['beta'].value) * ( (1-exp(-params['alpha'].value*t[1]))/(1-exp(-params['alpha'].value*t[4])) )
        print "SER"
        print self.SER
        
        ##################################################
        # Now Calculate enhancement Kinetic based features
        # Based on the course of signal intensity within the lesion
        print "\n Saving %s" % 'Crk'
        So = array(deltaS['delta0']).astype(float)
        Crk = {'Cr0': mean(So)}  
        C = {}
        Carray = []
        
        for k in range(1,len(DICOMImages)):
            Sk = array(deltaS['delta'+str(k)]).astype(float)
            Cr = 0
            for j in range( len(So) ):
                # extract average enhancement over the lesion at each time point
                Cr = Cr + (Sk[j] - So[j])/So[j]
                Carray.append((Sk[j] - So[j])/So[j])
                
            # compile
            C['C'+str(k)] = Carray
            Crk['Cr'+str(k)] = Cr/len(Sk)
        
        # Extract Fii_1
        for k in range(1,5):
            currentCr = array(Crk['Cr'+str(k)]).astype(float)
            print currentCr
            if( self.maxCr < currentCr):
                self.maxCr = float(currentCr)
                self.peakCr = int(k)
                
        print "Maximum Upate (Fii_1) = %d " %  self.maxCr
        print "Peak Cr (Fii_2) = %d " %  self.peakCr
        
        # Uptake rate
        self.UptakeRate = float(self.maxCr/self.peakCr)    
        print "Uptake rate (Fii_3) "
        print self.UptakeRate
        
        # WashOut Rate
        if( self.peakCr == 4):
            self.washoutRate = 0
        else:
            self.washoutRate = float( (self.maxCr - array(Crk['Cr'+str(4)]).astype(float))/(4-self.peakCr) )
        print "WashOut rate (Fii_4) "
        print self.washoutRate


        ##################################################
        # Now Calculate enhancement-variance Kinetic based features
        # Based on Crk['Cr'+str(k)] = Cr/len(Sk)
        print "\n Saving %s" % 'Vrk'
        Vrk = {}
        
        for k in range(1,5):
            Ci = array(C['C'+str(k)]).astype(float)    
            Cri = array(Crk['Cr'+str(k)]).astype(float)
            Vr = 0
            for j in range( len(Ci) ):
                # extract average enhancement over the lesion at each time point
                Vr = Vr + (Ci[j] - Cri)**2
            # compile
            Vrk['Vr'+str(k)] = Vr/(len(Ci)-1)
        
        # Extract Fiii_1
        for k in range(1,5):
            currentVr = array(Vrk['Vr'+str(k)]).astype(float)
            if( self.maxVr < currentVr):
                print currentVr
                self.maxVr = float(currentVr)
                self.peakVr = int(k)
        
        print "Maximum Variation of enhan (Fiii_1) = %d " %  self.maxVr
        print "Peak Vr (Fii_2) = %d " %  self.peakVr
        
        # Vr_increasingRate 
        self.Vr_increasingRate = self.maxVr/self.peakVr    
        print "Vr_increasingRate (Fiii_3)" 
        print self.Vr_increasingRate
        
        # Vr_decreasingRate
        if( self.peakVr == 4):
            Vr_decreasingRate = 0
        else:
            Vr_decreasingRate = float((self.maxVr - array(Vrk['Vr'+str(4)]).astype(float))/(4-self.peakVr))
        print "Vr_decreasingRate (Fiii_4) "
        print Vr_decreasingRate
        
        # Vr_post_1 
        self.Vr_post_1 = float( array(Vrk['Vr'+str(1)]).astype(float))
        print "Vr_post_1 (Fiii_5)"
        print self.Vr_post_1
 
        ##################################################
        # orgamize into dataframe
        self.dynamicEMM_inside = DataFrame( data=array([[ self.amp, self.alpha, self.beta, self.iAUC1, self.Slope_ini, self.Tpeak, self.Kpeak, self.SER, self.maxCr, self.peakCr, self.UptakeRate, self.washoutRate, self.maxVr, self.peakVr, self.Vr_increasingRate, self.Vr_post_1]]), 
                                columns=['A.inside', 'alpha.inside', 'beta.inside', 'iAUC1.inside', 'Slope_ini.inside', 'Tpeak.inside', 'Kpeak.inside', 'SER.inside', 'maxCr.inside', 'peakCr.inside', 'UptakeRate.inside', 'washoutRate.inside', 'maxVr.inside', 'peakVr.inside','Vr_increasingRate.inside', 'Vr_post_1.inside'])

        #############################################################
        # try to plot results
        pylab.figure()
        pylab.errorbar(t, data, yerr=se_deltaS, fmt='ro', label='data+SE') # data 'ro' red dots as markers
        pylab.plot(t, final, 'b+', label='data+residuals')    # data+residuals 'b+' blue pluses
        pylab.plot(t, model, 'b', label='model')    # model fit 'b' blue
        pylab.plot(x, model_res, 'k', label='model fit')    # model fit 'k' blakc
        pylab.xlabel(" post-contrast time (min)")
        pylab.ylabel("delta S(t)")
        pylab.legend()
        
        return self.dynamicEMM_inside
예제 #15
0
p_fit = Parameters()
p_fit.add('amp_g', value=10.0)
p_fit.add('cen_g', value=9)
p_fit.add('wid_g', value=1)
p_fit.add('line_slope', value=0.0)
p_fit.add('line_off', value=0.0)

myfit = Minimizer(residual,
                  p_fit,
                  fcn_args=(x, ),
                  fcn_kws={
                      'sigma': 0.2,
                      'data': data
                  })

myfit.prepare_fit()
#
for scale_covar in (True, False):
    myfit.scale_covar = scale_covar
    print '  ====  scale_covar = ', myfit.scale_covar, ' ==='
    for sigma in (0.1, 0.2, 0.23, 0.5):
        myfit.userkws['sigma'] = sigma

        p_fit['amp_g'].value = 10
        p_fit['cen_g'].value = 9
        p_fit['wid_g'].value = 1
        p_fit['line_slope'].value = 0.0
        p_fit['line_off'].value = 0.0

        myfit.leastsq()
        print '  sigma          = ', sigma
예제 #16
0
        Parameter(name='cen_g', value=9),
        Parameter(name='wid_g', value=1),
        Parameter(name='frac', value=0.50),
        Parameter(name='amp_l', expr='amp_g'),
        Parameter(name='cen_l', expr='cen_g'),
        Parameter(name='wid_l', expr='wid_g'),
        Parameter(name='line_slope', value=0.0),
        Parameter(name='line_off', value=0.0)]

sigma = 0.021  # estimate of data error (for all data points)

myfit = Minimizer(residual, pfit, # iter_cb=per_iteration,
                  fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
                  scale_covar=True)

myfit.prepare_fit()
init = residual(myfit.params, x)

if HASPYLAB:
    pylab.plot(x, init, 'b--')

# fit with Nelder-Mead simplex method
supported_methods = ('BFGS', 'COBYLA', 'SLSQP', 'Powell', 'Nelder-Mead')
myfit.scalar_minimize(method='Nelder-Mead')


print(' Nfev = ', myfit.nfev)
# print( myfit.chisqr, myfit.redchi, myfit.nfree)
# report_errors(myfit.params, modelpars=p_true)

fit = residual(myfit.params, x)
p_fit.add('cen_g', value=max_x)
p_fit.add('wid_g', value=2.0)
p_fit.add('frac',  value=0.50)
p_fit.add('amp_l', expr='0.5*amp_g')
p_fit.add('cen_l', value=12.5)
p_fit.add('wid_l', expr='2.5*wid_g')
p_fit.add('line_slope', value=0.0)
p_fit.add('line_off', value=0.0)

sigma = 0.041  # estimate of data error (for all data points)

myfit = Minimizer(residual, None,            # iter_cb=per_iteration,
                  fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
                  scale_covar=True)

myfit.prepare_fit(params=p_fit)
init = residual(p_fit, x)

if HASPYLAB:
    pylab.plot(x, init, 'b--')

myfit.leastsq()

print(' Nfev = ', myfit.nfev)
print( myfit.chisqr, myfit.redchi, myfit.nfree)

report_errors(myfit.params, modelpars=p_true)

fit = residual(myfit.params, x)

if HASPYLAB:
예제 #18
0
    def __FitEvent(self):
        try:
            varyBlockedCurrent = True

            i0 = np.abs(self.baseMean)
            i0sig = self.baseSD
            dt = 1000. / self.Fs  # time-step in ms.
            # edat=np.asarray( np.abs(self.eventData),  dtype='float64' )
            edat = self.dataPolarity * np.asarray(self.eventData,
                                                  dtype='float64')

            blockedCurrent = min(edat)
            tauVal = dt

            estart = self.__eventStartIndex(
                self.__threadList(edat, range(0, len(edat))), i0, i0sig) - 1
            eend = self.__eventEndIndex(
                self.__threadList(edat, range(0, len(edat))), i0, i0sig) - 2

            # For long events, fix the blocked current to speed up the fit
            if (eend - estart) > 1000:
                blockedCurrent = np.mean(edat[estart + 50:eend - 50])

            # control numpy error reporting
            np.seterr(invalid='ignore', over='ignore', under='ignore')

            ts = np.array([t * dt for t in range(0, len(edat))],
                          dtype='float64')

            params = Parameters()

            # print self.absDataStartIndex

            params.add('mu1', value=estart * dt)
            params.add('mu2', value=eend * dt)
            params.add('a',
                       value=(i0 - blockedCurrent),
                       vary=varyBlockedCurrent)
            params.add('b', value=i0)
            params.add('tau', value=tauVal)

            optfit = Minimizer(self.__objfunc, params, fcn_args=(
                ts,
                edat,
            ))
            optfit.prepare_fit()

            optfit.leastsq(xtol=self.FitTol,
                           ftol=self.FitTol,
                           maxfev=self.FitIters)

            # print optfit.params['b'].value, optfit.params['b'].value - optfit.params['a'].value, optfit.params['mu1'].value, optfit.params['mu2'].value
            if optfit.success:
                if optfit.params['mu1'].value < 0.0 or optfit.params[
                        'mu2'].value < 0.0:
                    # print 'eInvalidFitParams1', optfit.params['b'].value, optfit.params['b'].value - optfit.params['a'].value, optfit.params['mu1'].value, optfit.params['mu2'].value
                    self.rejectEvent('eInvalidResTime')
                # The start of the event is set past the length of the data
                elif optfit.params['mu1'].value > ts[-1]:
                    # print 'eInvalidFitParams2', optfit.params['b'].value, optfit.params['b'].value - optfit.params['a'].value, optfit.params['mu1'].value, optfit.params['mu2'].value
                    self.rejectEvent('eInvalidEventStart')
                else:
                    self.mdOpenChCurrent = optfit.params['b'].value
                    self.mdBlockedCurrent = optfit.params[
                        'b'].value - optfit.params['a'].value
                    self.mdEventStart = optfit.params['mu1'].value
                    self.mdEventEnd = optfit.params['mu2'].value
                    self.mdRiseTime = optfit.params['tau'].value
                    self.mdAbsEventStart = self.mdEventStart + self.absDataStartIndex * dt

                    self.mdBlockDepth = self.mdBlockedCurrent / self.mdOpenChCurrent
                    self.mdResTime = self.mdEventEnd - self.mdEventStart

                    self.mdRedChiSq = optfit.chisqr / (
                        np.var(optfit.residual) *
                        (len(self.eventData) - optfit.nvarys - 1))

                    # if (eend-estart) > 1000:
                    # 	print blockedCurrent, self.mdBlockedCurrent, self.mdOpenChCurrent, self.mdResTime, self.mdRiseTime, self.mdRedChiSq, optfit.chisqr
                    # if self.mdBlockDepth > self.BlockRejectRatio:
                    # 	# print 'eBlockDepthHigh', optfit.params['b'].value, optfit.params['b'].value - optfit.params['a'].value, optfit.params['mu1'].value, optfit.params['mu2'].value
                    # 	self.rejectEvent('eBlockDepthHigh')

                    if math.isnan(self.mdRedChiSq):
                        self.rejectEvent('eInvalidChiSq')
                    if self.mdBlockDepth < 0 or self.mdBlockDepth > 1:
                        self.rejectEvent('eInvalidBlockDepth')

                    #print i0, i0sig, [optfit.params['a'].value, optfit.params['b'].value, optfit.params['mu1'].value, optfit.params['mu2'].value, optfit.params['tau'].value]
            else:
                # print optfit.message, optfit.lmdif_message
                self.rejectEvent('eFitConvergence')

        except KeyboardInterrupt:
            self.rejectEvent('eFitUserStop')
            raise
        except:
            # print optfit.message, optfit.lmdif_message
            self.rejectEvent('eFitFailure')
def test_constraints(with_plot=True):
    with_plot = with_plot and WITHPLOT

    def residual(pars, x, sigma=None, data=None):
        yg = gaussian(x, pars['amp_g'], pars['cen_g'], pars['wid_g'])
        yl = lorentzian(x, pars['amp_l'], pars['cen_l'], pars['wid_l'])

        model =  yg +  yl + pars['line_off'] + x * pars['line_slope']
        if data is None:
            return model
        if sigma is None:
            return (model - data)
        return (model - data) / sigma


    n = 201
    xmin = 0.
    xmax = 20.0
    x = linspace(xmin, xmax, n)

    data = (gaussian(x, 21, 8.1, 1.2) +
            lorentzian(x, 10, 9.6, 2.4) +
            random.normal(scale=0.23,  size=n) +
            x*0.5)

    if with_plot:
        pylab.plot(x, data, 'r+')

    pfit = Parameters()
    pfit.add(name='amp_g',  value=10)
    pfit.add(name='cen_g',  value=9)
    pfit.add(name='wid_g',  value=1)

    pfit.add(name='amp_tot',  value=20)
    pfit.add(name='amp_l',  expr='amp_tot - amp_g')
    pfit.add(name='cen_l',  expr='1.5+cen_g')
    pfit.add(name='wid_l',  expr='2*wid_g')

    pfit.add(name='line_slope', value=0.0)
    pfit.add(name='line_off', value=0.0)

    sigma = 0.021  # estimate of data error (for all data points)

    myfit = Minimizer(residual, pfit,
                      fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
                      scale_covar=True)

    myfit.prepare_fit()
    init = residual(myfit.params, x)

    result = myfit.leastsq()

    print(' Nfev = ', result.nfev)
    print( result.chisqr, result.redchi, result.nfree)

    report_fit(result.params, min_correl=0.3)

    fit = residual(result.params, x)
    if with_plot:
        pylab.plot(x, fit, 'b-')
    assert(result.params['cen_l'].value == 1.5 + result.params['cen_g'].value)
    assert(result.params['amp_l'].value == result.params['amp_tot'].value - result.params['amp_g'].value)
    assert(result.params['wid_l'].value == 2 * result.params['wid_g'].value)

    # now, change fit slightly and re-run
    myfit.params['wid_l'].expr = '1.25*wid_g'
    result = myfit.leastsq()
    report_fit(result.params, min_correl=0.4)
    fit2 = residual(result.params, x)
    if with_plot:
        pylab.plot(x, fit2, 'k')
        pylab.show()

    assert(result.params['cen_l'].value == 1.5 + result.params['cen_g'].value)
    assert(result.params['amp_l'].value == result.params['amp_tot'].value - result.params['amp_g'].value)
    assert(result.params['wid_l'].value == 1.25 * result.params['wid_g'].value)
예제 #20
0
    def extractfeatures_inside(self, DICOMImages, image_pos_pat, image_ori_pat, series_path, phases_series, VOI_mesh):
        """ Start pixVals for collection pixel values at VOI """
        pixVals = []
        deltaS = {}

        # necessary to read point coords
        VOIPnt = [0, 0, 0]
        ijk = [0, 0, 0]
        pco = [0, 0, 0]

        for i in range(len(DICOMImages)):
            abspath_PhaseID = series_path + os.sep + str(phases_series[i])
            print phases_series[i]

            # Get total number of files
            load = Inputs_init()
            [len_listSeries_files, FileNms_slices_sorted_stack] = load.ReadDicomfiles(abspath_PhaseID)
            mostleft_slice = FileNms_slices_sorted_stack.slices[0]

            # Get dicom header, retrieve
            dicomInfo_series = dicom.read_file(abspath_PhaseID + os.sep + str(mostleft_slice))

            # (0008,0031) AT S Series Time            # hh.mm.ss.frac
            seriesTime = str(dicomInfo_series[0x0008, 0x0031].value)
            # (0008,0033) AT S Image Time             # hh.mm.ss.frac
            imageTime = str(dicomInfo_series[0x0008, 0x0033].value)

            # (0008,0032) AT S Acquisition Time       # hh.mm.ss.frac
            ti = str(dicomInfo_series[0x0008, 0x0032].value)

            acquisitionTimepoint = datetime.time(hour=int(ti[0:2]), minute=int(ti[2:4]), second=int(ti[4:6]))
            self.timepoints.append(datetime.datetime.combine(datetime.date.today(), acquisitionTimepoint))

            # find mapping to Dicom space
            [transformed_image, transform_cube] = Display().dicomTransform(DICOMImages[i], image_pos_pat, image_ori_pat)

            ### Get inside of VOI
            [VOI_scalars, VOIdims] = self.createMaskfromMesh(VOI_mesh, transformed_image)
            print "\n VOIdims"
            print VOIdims

            # get non zero elements
            image_scalars = transformed_image.GetPointData().GetScalars()
            numpy_VOI_imagedata = vtk_to_numpy(image_scalars)

            numpy_VOI_imagedata = numpy_VOI_imagedata.reshape(VOIdims[2], VOIdims[1], VOIdims[0])
            numpy_VOI_imagedata = numpy_VOI_imagedata.transpose(2, 1, 0)

            print "Shape of VOI_imagedata: "
            print numpy_VOI_imagedata.shape

            #################### HERE GET IT AND MASK IT OUT
            self.nonzeroVOIextracted = nonzero(VOI_scalars)
            print self.nonzeroVOIextracted

            VOI_imagedata = numpy_VOI_imagedata[self.nonzeroVOIextracted]

            print "shape of VOI_imagedata  Clipped:"
            print VOI_imagedata.shape

            for j in range(len(VOI_imagedata)):
                pixValx = VOI_imagedata[j]
                pixVals.append(pixValx)

            # Now collect pixVals
            print "Saving %s" % "delta" + str(i)
            deltaS["delta" + str(i)] = pixVals
            pixVals = []

        print self.timepoints

        # Collecting timepoints in proper format
        t_delta = []
        t_delta.append(0)
        total_time = 0
        for i in range(len(DICOMImages) - 1):
            current_time = self.timepoints[i + 1]
            previous_time = self.timepoints[i]
            difference_time = current_time - previous_time
            timestop = divmod(difference_time.total_seconds(), 60)
            t_delta.append(t_delta[i] + timestop[0] + timestop[1] * (1.0 / 60))
            total_time = total_time + timestop[0] + timestop[1] * (1.0 / 60)

        # finally print t_delta
        print t_delta
        t = array(t_delta)
        print "total_time"
        print total_time

        ##############################################################
        # Finished sampling deltaS
        # APply lmfit to deltaS
        # first sample the mean
        data_deltaS = []
        t_deltaS = []
        mean_deltaS = []
        sd_deltaS = []
        se_deltaS = []
        n_deltaS = []

        # append So and to
        data_deltaS.append(0)
        t_deltaS.append(0)
        mean_deltaS.append(mean(deltaS["delta0"]))
        sd_deltaS.append(0)
        se_deltaS.append(0)
        n_deltaS.append(len(deltaS["delta0"]))

        for k in range(1, len(DICOMImages)):
            deltaS_i = (mean(array(deltaS["delta" + str(k)]).astype(float)) - mean(deltaS["delta0"])) / mean(
                deltaS["delta0"]
            )
            data_deltaS.append(deltaS_i)
            t_deltaS.append(k)
            print "delta" + str(k)
            print data_deltaS[k]

            ##############################################################
            # Calculate data_error
            # estimate the population mean and SD from our samples to find SE
            # SE tells us the distribution of individual scores around the sampled mean.
            mean_deltaS_i = mean(array(deltaS["delta" + str(k)]))
            std_deltaS_i = std(array(deltaS["delta" + str(k)]))
            n_deltaS_i = len(array(deltaS["delta" + str(k)]))

            sd_deltaS.append(std_deltaS_i)
            mean_deltaS.append(mean_deltaS_i)

            # Standard Error of the mean SE
            # the smaller the variability in the data, the more confident we are that one value (the mean) accurately reflects them.
            se_deltaS.append(std_deltaS_i / sqrt(n_deltaS_i))
            n_deltaS.append(n_deltaS_i)

        # make array for data_deltaS
        data = array(data_deltaS)

        print "\n================\nMean and SE (i.e VOI sample data)"
        print mean_deltaS
        print se_deltaS

        # create a set of Parameters
        params = Parameters()
        params.add("amp", value=10, min=0)
        params.add("alpha", value=1, min=0)
        params.add("beta", value=0.05, min=0.0001, max=0.9)

        # do fit, here with leastsq model
        # define objective function: returns the array to be minimized
        def fcn2min(params, t, data):
            global model, model_res, x
            """ model EMM for Bilateral DCE-MRI, subtract data"""
            # unpack parameters:
            #  extract .value attribute for each parameter
            amp = params["amp"].value  # Upper limit of deltaS
            alpha = params["alpha"].value  # rate of signal increase min-1
            beta = params["beta"].value  # rate of signal decrease min-1

            model = amp * (1 - exp(-alpha * t)) * exp(-beta * t)

            x = linspace(0, t[4], 101)
            model_res = amp * (1 - exp(-alpha * x)) * exp(-beta * x)

            return model - data

        #####
        myfit = Minimizer(fcn2min, params, fcn_args=(t,), fcn_kws={"data": data})
        myfit.prepare_fit()
        myfit.leastsq()

        # On a successful fit using the leastsq method, several goodness-of-fit statistics
        # and values related to the uncertainty in the fitted variables will be calculated
        print "myfit.success"
        print myfit.success
        print "myfit.residual"
        print myfit.residual
        print "myfit.chisqr"
        print myfit.chisqr
        print "myfit.redchi"
        print myfit.redchi

        # calculate final result
        final = data + myfit.residual
        # write error report
        report_errors(params)

        # Calculate R-square
        # R_square = sum( y_fitted - y_mean)/ sum(y_data - y_mean)
        R_square = sum((model - mean(data)) ** 2) / sum((data - mean(data)) ** 2)
        print "R^2"
        print R_square

        self.amp = params["amp"].value
        self.alpha = params["alpha"].value
        self.beta = params["beta"].value

        ##################################################
        # Now Calculate Extract parameters from model
        self.iAUC1 = params["amp"].value * (
            ((1 - exp(-params["beta"].value * t[1])) / params["beta"].value)
            + (exp((-params["alpha"].value + params["beta"].value) * t[1]) - 1)
            / (params["alpha"].value + params["beta"].value)
        )
        print "iAUC1"
        print self.iAUC1

        self.Slope_ini = params["amp"].value * params["alpha"].value
        print "Slope_ini"
        print self.Slope_ini

        self.Tpeak = (1 / params["alpha"].value) * log(1 + (params["alpha"].value / params["beta"].value))
        print "Tpeak"
        print self.Tpeak

        self.Kpeak = -params["amp"].value * params["alpha"].value * params["beta"].value
        print "Kpeak"
        print self.Kpeak

        self.SER = exp((t[4] - t[1]) * params["beta"].value) * (
            (1 - exp(-params["alpha"].value * t[1])) / (1 - exp(-params["alpha"].value * t[4]))
        )
        print "SER"
        print self.SER

        ##################################################
        # Now Calculate enhancement Kinetic based features
        # Based on the course of signal intensity within the lesion
        print "\n Saving %s" % "Crk"
        So = array(deltaS["delta0"]).astype(float)
        Crk = {"Cr0": mean(So)}
        C = {}
        Carray = []

        for k in range(1, len(DICOMImages)):
            Sk = array(deltaS["delta" + str(k)]).astype(float)
            Cr = 0
            for j in range(len(So)):
                # extract average enhancement over the lesion at each time point
                Cr = Cr + (Sk[j] - So[j]) / So[j]
                Carray.append((Sk[j] - So[j]) / So[j])

            # compile
            C["C" + str(k)] = Carray
            Crk["Cr" + str(k)] = Cr / len(Sk)

        # Extract Fii_1
        for k in range(1, 5):
            currentCr = array(Crk["Cr" + str(k)]).astype(float)
            print currentCr
            if self.maxCr < currentCr:
                self.maxCr = float(currentCr)
                self.peakCr = int(k)

        print "Maximum Upate (Fii_1) = %d " % self.maxCr
        print "Peak Cr (Fii_2) = %d " % self.peakCr

        # Uptake rate
        self.UptakeRate = float(self.maxCr / self.peakCr)
        print "Uptake rate (Fii_3) "
        print self.UptakeRate

        # WashOut Rate
        if self.peakCr == 4:
            self.washoutRate = 0
        else:
            self.washoutRate = float((self.maxCr - array(Crk["Cr" + str(4)]).astype(float)) / (4 - self.peakCr))
        print "WashOut rate (Fii_4) "
        print self.washoutRate

        ##################################################
        # Now Calculate enhancement-variance Kinetic based features
        # Based on Crk['Cr'+str(k)] = Cr/len(Sk)
        print "\n Saving %s" % "Vrk"
        Vrk = {}

        for k in range(1, 5):
            Ci = array(C["C" + str(k)]).astype(float)
            Cri = array(Crk["Cr" + str(k)]).astype(float)
            Vr = 0
            for j in range(len(Ci)):
                # extract average enhancement over the lesion at each time point
                Vr = Vr + (Ci[j] - Cri) ** 2
            # compile
            Vrk["Vr" + str(k)] = Vr / (len(Ci) - 1)

        # Extract Fiii_1
        for k in range(1, 5):
            currentVr = array(Vrk["Vr" + str(k)]).astype(float)
            if self.maxVr < currentVr:
                print currentVr
                self.maxVr = float(currentVr)
                self.peakVr = int(k)

        print "Maximum Variation of enhan (Fiii_1) = %d " % self.maxVr
        print "Peak Vr (Fii_2) = %d " % self.peakVr

        # Vr_increasingRate
        self.Vr_increasingRate = self.maxVr / self.peakVr
        print "Vr_increasingRate (Fiii_3)"
        print self.Vr_increasingRate

        # Vr_decreasingRate
        if self.peakVr == 4:
            self.Vr_decreasingRate = 0
        else:
            self.Vr_decreasingRate = float((self.maxVr - array(Vrk["Vr" + str(4)]).astype(float)) / (4 - self.peakVr))
        print "Vr_decreasingRate (Fiii_4) "
        print self.Vr_decreasingRate

        # Vr_post_1
        self.Vr_post_1 = float(array(Vrk["Vr" + str(1)]).astype(float))
        print "Vr_post_1 (Fiii_5)"
        print self.Vr_post_1

        ##################################################
        # orgamize into dataframe
        self.dynamicEMM_inside = DataFrame(
            data=array(
                [
                    [
                        self.amp,
                        self.alpha,
                        self.beta,
                        self.iAUC1,
                        self.Slope_ini,
                        self.Tpeak,
                        self.Kpeak,
                        self.SER,
                        self.maxCr,
                        self.peakCr,
                        self.UptakeRate,
                        self.washoutRate,
                        self.maxVr,
                        self.peakVr,
                        self.Vr_increasingRate,
                        self.Vr_decreasingRate,
                        self.Vr_post_1,
                    ]
                ]
            ),
            columns=[
                "A.inside",
                "alpha.inside",
                "beta.inside",
                "iAUC1.inside",
                "Slope_ini.inside",
                "Tpeak.inside",
                "Kpeak.inside",
                "SER.inside",
                "maxCr.inside",
                "peakCr.inside",
                "UptakeRate.inside",
                "washoutRate.inside",
                "maxVr.inside",
                "peakVr.inside",
                "Vr_increasingRate.inside",
                "Vr_decreasingRate.inside",
                "Vr_post_1.inside",
            ],
        )

        #############################################################
        # try to plot results
        pylab.figure()
        pylab.errorbar(t, data, yerr=se_deltaS, fmt="ro", label="data+SE")  # data 'ro' red dots as markers
        pylab.plot(t, final, "b+", label="data+residuals")  # data+residuals 'b+' blue pluses
        pylab.plot(t, model, "b", label="model")  # model fit 'b' blue
        pylab.plot(x, model_res, "k", label="model fit")  # model fit 'k' blakc
        pylab.xlabel(" post-contrast time (min)")
        pylab.ylabel("delta S(t)")
        pylab.legend()

        return self.dynamicEMM_inside
예제 #21
0
파일: models1d.py 프로젝트: jbzdak/lmfit-py
class FitModel(object):
    """base class for fitting models

    only supports polynomial background (offset, slop, quad)

    """
    invalid_bkg_msg = """Warning: unrecoginzed background option '%s'
expected one of the following:
   %s
"""

    def __init__(self, background=None, **kws):
        self.params = Parameters()
        self.initialize_background(background=background, **kws)

    def initialize_background(self,
                              background=None,
                              offset=0,
                              slope=0,
                              quad=0):
        """initialize background parameters"""
        if background is None:
            return
        if background not in VALID_BKGS:
            print self.invalid_bkg_msg % (repr(background),
                                          ', '.join(VALID_BKGS))

        kwargs = {'offset': offset}
        if background.startswith('line'):
            kwargs['slope'] = slope
        if background.startswith('quad'):
            kwargs['quad'] = quad

        self.bkg = PolyBackground(**kwargs)

        for nam, par in self.bkg.params.items():
            self.params[nam] = par

    def calc_background(self, x):
        return self.bkg.calculate(x)

    def __objective(self, params, y=None, x=None, dy=None, **kws):
        """fit objective function"""
        bkg = 0
        if x is not None: bkg = self.calc_background(x)
        if y is None: y = 0.0
        if dy is None: dy = 1.0
        model = self.model(self.params, x=x, dy=dy, **kws)
        return (model + bkg - y) / dy

    def model(self, params, x=None, **kws):
        raise NotImplementedError

    def guess_starting_values(self, params, y, x=None, **kws):
        raise NotImplementedError

    def fit_report(self, params=None):
        if params is None:
            params = self.params
        return lmfit.fit_report(params)

    def fit(self, y, x=None, dy=None, **kws):
        fcn_kws = {'y': y, 'x': x, 'dy': dy}
        fcn_kws.update(kws)
        self.minimizer = Minimizer(self.__objective,
                                   self.params,
                                   fcn_kws=fcn_kws,
                                   scale_covar=True)
        self.minimizer.prepare_fit()
        self.init = self.model(self.params, x=x, **kws)
        self.minimizer.leastsq()
예제 #22
0
	def __FitEvent(self):
		try:
			varyBlockedCurrent=True

			i0=np.abs(self.baseMean)
			i0sig=self.baseSD
			dt = 1000./self.Fs 	# time-step in ms.
			# edat=np.asarray( np.abs(self.eventData),  dtype='float64' )
			edat=self.dataPolarity*np.asarray( self.eventData,  dtype='float64' )

			blockedCurrent=min(edat)
			tauVal=dt

			estart 	= self.__eventStartIndex( self.__threadList( edat, range(0,len(edat)) ), i0, i0sig ) - 1
			eend 	= self.__eventEndIndex( self.__threadList( edat, range(0,len(edat)) ), i0, i0sig ) - 2

			# For long events, fix the blocked current to speed up the fit
			#if (eend-estart) > 1000:
			#	blockedCurrent=np.mean(edat[estart+50:eend-50])

			# control numpy error reporting
			np.seterr(invalid='ignore', over='ignore', under='ignore')

			ts = np.array([ t*dt for t in range(0,len(edat)) ], dtype='float64')

			#pl.plot(ts,edat)
			#pl.show()

			params=Parameters()

			# print self.absDataStartIndex

			params.add('mu1', value=estart * dt)
			params.add('mu2', value=eend * dt)
			params.add('a', value=(i0-blockedCurrent), vary=varyBlockedCurrent)
			params.add('b', value = i0)
			params.add('tau1', value = tauVal)

			if self.LinkRCConst:
				params.add('tau2', value = tauVal, expr='tau1')
			else:
				params.add('tau2', value = tauVal)


			optfit=Minimizer(self.__objfunc, params, fcn_args=(ts,edat,))
			optfit.prepare_fit()

			result=optfit.leastsq(xtol=self.FitTol,ftol=self.FitTol,maxfev=self.FitIters)

			# print optfit.params['b'].value, optfit.params['b'].value - optfit.params['a'].value, optfit.params['mu1'].value, optfit.params['mu2'].value
			if result.success:
				if result.params['mu1'].value < 0.0 or result.params['mu2'].value < 0.0:
					# print 'eInvalidFitParams1', optfit.params['b'].value, optfit.params['b'].value - optfit.params['a'].value, optfit.params['mu1'].value, optfit.params['mu2'].value
					self.rejectEvent('eInvalidResTime')
				# The start of the event is set past the length of the data
				elif result.params['mu1'].value > ts[-1]:
					# print 'eInvalidFitParams2', optfit.params['b'].value, optfit.params['b'].value - optfit.params['a'].value, optfit.params['mu1'].value, optfit.params['mu2'].value
					self.rejectEvent('eInvalidEventStart')
				else:
					self.mdOpenChCurrent 	= result.params['b'].value 
					self.mdBlockedCurrent	= result.params['b'].value - result.params['a'].value
					self.mdEventStart		= result.params['mu1'].value 
					self.mdEventEnd			= result.params['mu2'].value
					self.mdRCConst1			= result.params['tau1'].value
					self.mdRCConst2			= result.params['tau2'].value
					self.mdAbsEventStart	= self.mdEventStart + self.absDataStartIndex * dt

					self.mdBlockDepth		= self.mdBlockedCurrent/self.mdOpenChCurrent
					self.mdResTime			= self.mdEventEnd - self.mdEventStart
					
					self.mdRedChiSq			= result.chisqr/( np.var(result.residual) * (len(self.eventData) - result.nvarys -1) )

					# if (eend-estart) > 1000:
					# 	print blockedCurrent, self.mdBlockedCurrent, self.mdOpenChCurrent, self.mdResTime, self.mdRiseTime, self.mdRedChiSq, optfit.chisqr
					# if self.mdBlockDepth > self.BlockRejectRatio:
					# 	# print 'eBlockDepthHigh', optfit.params['b'].value, optfit.params['b'].value - optfit.params['a'].value, optfit.params['mu1'].value, optfit.params['mu2'].value
					# 	self.rejectEvent('eBlockDepthHigh')
						
					if math.isnan(self.mdRedChiSq):
						self.rejectEvent('eInvalidChiSq')
					if self.mdBlockDepth < 0 or self.mdBlockDepth > 1:
						self.rejectEvent('eInvalidBlockDepth')
					if self.mdRCConst1 <= 0 or self.mdRCConst2 <= 0:
						self.rejectEvent('eInvalidRCConstant')

					#print i0, i0sig, [optfit.params['a'].value, optfit.params['b'].value, optfit.params['mu1'].value, optfit.params['mu2'].value, optfit.params['tau'].value]
			else:
				# print optfit.message, optfit.lmdif_message
				self.rejectEvent('eFitConvergence')

		except KeyboardInterrupt:
			self.rejectEvent('eFitUserStop')
			raise
		except:
			# print optfit.message, optfit.lmdif_message
	 		self.rejectEvent('eFitFailure')
예제 #23
0
    def featureMap(self, DICOMImages, img_features, time_points, featuresKeys, caseLabeloutput, path_outputFolder):
        """Extracts feature maps per pixel based on request from vector of keywords featuresKeys """
        ## Retrive image data
        VOIshape = img_features['VOI0'].shape
        print VOIshape
        self.init_features(img_features, featuresKeys)
        data_deltaS=[]  
        self.allvar_F_r_i=[]
        
        # append So and to
        data_deltaS.append( 0 )  
        
        # Based on the course of signal intensity within the lesion
        So = array(img_features['VOI0']).astype(float)
        Crk = {'Cr0': mean(So)}  
        C = {}
        Carray = []
        
        # iterate point-by-point to extract feature map       
        for i in range(VOIshape[0]):
            for j in range(VOIshape[1]):
                for k in range(VOIshape[2]):
                    for timep in range(1, len(DICOMImages)):
                        pix_deltaS = (img_features['VOI'+str(timep)][i,j,k].astype(float) - img_features['VOI0'][i,j,k].astype(float))/img_features['VOI0'][i,j,k].astype(float)
                        if pix_deltaS<0: pix_deltaS=0 
                        data_deltaS.append( pix_deltaS )
                        
                        F_r_i =  array(img_features['VOI'+str(timep)]).astype(float)
                        n_F_r_i, min_max_F_r_i, mean_F_r_i, var_F_r_i, skew_F_r_i, kurt_F_r_i = stats.describe(F_r_i)
                        self.allvar_F_r_i.append(var_F_r_i)
                    
                    data = array(data_deltaS)
                    #print data                        
                    
                    # create a set of Parameters
                    params = Parameters()
                    params.add('amp',   value= 10,  min=0)
                    params.add('alpha', value= 1, min=0) 
                    params.add('beta', value= 0.05, min=0.0001, max=0.9)
                    
                    # do fit, here with leastsq self.model
                    myfit = Minimizer(self.fcn2min,  params, fcn_args=(time_points,), fcn_kws={'data':data})
                    myfit.prepare_fit()
                    myfit.leastsq()
 
                    ####################################                              
                    # Calculate R-square: R_square = sum( y_fitted - y_mean)/ sum(y_data - y_mean)
                    R_square = sum( (self.model - mean(data))**2 )/ sum( (data - mean(data))**2 )
                    #print "R^2:"
                    #print R_square
                    self.R_square_map[i,j,k] = R_square
                    
                    if 'amp' in featuresKeys:
                        amp = params['amp'].value
                        print "amp:"
                        print amp
                        self.amp_map[i,j,k] = amp
                    
                    if 'beta' in featuresKeys:
                        beta = params['beta'].value
                        self.beta_map[i,j,k] = beta
                        
                    if 'alpha' in featuresKeys:
                        alpha = params['alpha'].value
                        print "alpha:"
                        print alpha
                        self.alpha_map[i,j,k] = alpha
                        
                    if 'iAUC1' in featuresKeys:
                        iAUC1 = params['amp'].value *( ((1-exp(-params['beta'].value*t[1]))/params['beta'].value) + (exp((-params['alpha'].value+params['beta'].value)*t[1])-1)/(params['alpha'].value+params['beta'].value) )
                        print "iAUC1"
                        print iAUC1
                        self.iAUC1_map[i,j,k] = iAUC1
                        
                    if 'Slope_ini' in featuresKeys:
                        Slope_ini = params['amp'].value*params['alpha'].value
                        print "Slope_ini"
                        print Slope_ini
                        self.Slope_ini_map[i,j,k] = Slope_ini
                    
                    if 'Tpeak' in featuresKeys:
                        Tpeak = (1/params['alpha'].value)*log(1+(params['alpha'].value/params['beta'].value))
                        self.Tpeak_map[i,j,k] = Tpeak
                    
                    if 'Kpeak' in featuresKeys:
                        Kpeak = -params['amp'].value * params['alpha'].value * params['beta'].value
                        self.Kpeak_map[i,j,k] = Kpeak
                    
                    if 'SER' in featuresKeys:
                        SER = exp( (t[4]-t[1])*params['beta'].value) * ( (1-exp(-params['alpha'].value*t[1]))/(1-exp(-params['alpha'].value*t[4])) )
                        print "SER"
                        print SER
                        self.SER_map[i,j,k] = SER
                        
                    if 'maxCr' in featuresKeys:
                        print "Maximum Upate (Fii_1) = %d " %  self.maxCr
                        self.maxC_map[i,j,k] = self.maxCr
                        
                    if 'peakCr' in featuresKeys:
                        print "Peak Cr (Fii_2) = %d " %  self.peakCr
                        self.peakCr_map[i,j,k] = self.peakCr    
                        
                    if 'UptakeRate' in featuresKeys:
                        self.UptakeRate = float(self.maxCr/self.peakCr)    
                        print "Uptake rate (Fii_3) "
                        print self.UptakeRate
                        self.UptakeRate_map[i,j,k] = self.UptakeRate
                        
                    if 'washoutRate' in featuresKeys:          
                        if( self.peakCr == 4):
                            self.washoutRate = 0
                        else:
                            self.washoutRate = float( (self.maxCr - array(Crk['Cr'+str(4)]).astype(float))/(4-self.peakCr) )
                        print "WashOut rate (Fii_4) "
                        print self.washoutRate
                        self.washoutRate_map[i,j,k] = self.washoutRate
                        
                    if 'var_F_r_i' in featuresKeys:  
                        print("Variance F_r_i: {0:8.6f}".format( mean(self.allvar_F_r_i) ))
                        self.allvar_F_r_i_map[i,j,k] = mean(self.allvar_F_r_i)
                    
                    data_deltaS=[]
                    data_deltaS.append( 0 )
                    
        # convert feature maps to image
        if 'beta' in featuresKeys:       
            beta_map_stencil = self.convertfeatureMap2vtkImage(self.beta_map, self.imageStencil, 1000) 
            print path_outputFolder
            print caseLabeloutput
            # ## save mask as metafile image
            os.chdir(path_outputFolder)
            vtkmask_w = vtk.vtkMetaImageWriter()
            vtkmask_w.SetInput(beta_map_stencil )
            vtkmask_w.SetFileName( 'beta_'+os.sep+caseLabeloutput+'.mhd' )
            vtkmask_w.Write()
            vtkmask_w.Update()
            
            self.xImagePlaneWidget.SetWindowLevel(640,75)
            self.yImagePlaneWidget.SetWindowLevel(640,75)
            self.zImagePlaneWidget.SetWindowLevel(640,75)
            self.renderer1.Render()            
            self.visualize_map(beta_map_stencil)    
            
            
        if 'Tpeak' in featuresKeys:
            Tpeak_map_stencil = self.convertfeatureMap2vtkImage(self.Tpeak_map, self.imageStencil, 1) 
            print path_outputFolder
            print caseLabeloutput
            # ## save mask as metafile image
            os.chdir(path_outputFolder)
            vtkmask_w = vtk.vtkMetaImageWriter()
            vtkmask_w.SetInput( Tpeak_map_stencil )
            vtkmask_w.SetFileName( 'Tpeak_'+os.sep+caseLabeloutput+'.mhd' )
            vtkmask_w.Write()
            vtkmask_w.Update()
            
            self.xImagePlaneWidget.SetWindowLevel(240,35)
            self.yImagePlaneWidget.SetWindowLevel(240,35)
            self.zImagePlaneWidget.SetWindowLevel(240,35)
            self.renderer1.Render()
            self.visualize_map(Tpeak_map_stencil)    
        
        if 'Kpeak' in featuresKeys:
            Kpeak_map_stencil = self.convertfeatureMap2vtkImage(self.Kpeak_map, self.imageStencil, 1) 
            print path_outputFolder
            print caseLabeloutput
            # ## save mask as metafile image
            os.chdir(path_outputFolder)
            vtkmask_w = vtk.vtkMetaImageWriter()
            vtkmask_w.SetInput( Kpeak_map_stencil )
            vtkmask_w.SetFileName( 'Kpeak_'+os.sep+caseLabeloutput+'.mhd' )
            vtkmask_w.Write()
            vtkmask_w.Update()
            
            self.xImagePlaneWidget.SetWindowLevel(118,15)
            self.yImagePlaneWidget.SetWindowLevel(118,15)
            self.zImagePlaneWidget.SetWindowLevel(118,15)
            self.renderer1.Render()           
            self.visualize_map(Kpeak_map_stencil) 
            
               
        
        return
                     
        
        
예제 #24
0
class FitModel(object):
    """base class for fitting models

    only supports polynomial background (offset, slop, quad)

    """
    invalid_bkg_msg = """Warning: unrecoginzed background option '%s'
expected one of the following:
   %s
"""
    def __init__(self, background=None, **kws):
        self.params = Parameters()
        self.has_initial_guess = False
        self.bkg = None
        self.initialize_background(background=background, **kws)

    def initialize_background(self, background=None,
                              offset=0, slope=0, quad=0):
        """initialize background parameters"""
        if background is None:
            return
        if background not in VALID_BKGS:
            print( self.invalid_bkg_msg % (repr(background),
                                          ', '.join(VALID_BKGS)))

        kwargs = {'offset':offset}
        if background.startswith('line'):
            kwargs['slope'] = slope
        if background.startswith('quad'):
            kwargs['quad'] = quad

        self.bkg = PolyBackground(**kwargs)

        for nam, par in self.bkg.params.items():
            self.params[nam] = par

    def calc_background(self, x):
        if self.bkg is None:
            return 0
        return self.bkg.calculate(x)

    def __objective(self, params, y=None, x=None, dy=None, **kws):
        """fit objective function"""
        bkg = 0
        if x is not None: bkg = self.calc_background(x)
        if y is None:     y   = 0.0
        if dy is None:    dy  = 1.0
        model = self.model(self.params, x=x, dy=dy, **kws)
        return (model + bkg - y)/dy

    def model(self, params, x=None, **kws):
        raise NotImplementedError

    def guess_starting_values(self, params, y, x=None, **kws):
        raise NotImplementedError

    def fit_report(self, params=None, **kws):
        if params is None:
            params = self.params
        return lmfit.fit_report(params, **kws)

    def fit(self, y, x=None, dy=None, **kws):
        fcn_kws={'y':y, 'x':x, 'dy':dy}
        fcn_kws.update(kws)
        if not self.has_initial_guess:
            self.guess_starting_values(y, x=x, **kws)
        self.minimizer = Minimizer(self.__objective, self.params,
                                   fcn_kws=fcn_kws, scale_covar=True)
        self.minimizer.prepare_fit()
        self.init = self.model(self.params, x=x, **kws)
        self.minimizer.leastsq()
예제 #25
0
def test_constraints(with_plot=True):
    with_plot = with_plot and WITHPLOT

    def residual(pars, x, sigma=None, data=None):
        yg = gaussian(x, pars['amp_g'].value, pars['cen_g'].value,
                      pars['wid_g'].value)
        yl = lorentzian(x, pars['amp_l'].value, pars['cen_l'].value,
                        pars['wid_l'].value)

        slope = pars['line_slope'].value
        offset = pars['line_off'].value
        model = yg + yl + offset + x * slope
        if data is None:
            return model
        if sigma is None:
            return (model - data)
        return (model - data) / sigma

    n = 201
    xmin = 0.
    xmax = 20.0
    x = linspace(xmin, xmax, n)

    data = (gaussian(x, 21, 8.1, 1.2) + lorentzian(x, 10, 9.6, 2.4) +
            random.normal(scale=0.23, size=n) + x * 0.5)

    if with_plot:
        pylab.plot(x, data, 'r+')

    pfit = Parameters()
    pfit.add(name='amp_g', value=10)
    pfit.add(name='cen_g', value=9)
    pfit.add(name='wid_g', value=1)

    pfit.add(name='amp_tot', value=20)
    pfit.add(name='amp_l', expr='amp_tot - amp_g')
    pfit.add(name='cen_l', expr='1.5+cen_g')
    pfit.add(name='wid_l', expr='2*wid_g')

    pfit.add(name='line_slope', value=0.0)
    pfit.add(name='line_off', value=0.0)

    sigma = 0.021  # estimate of data error (for all data points)

    myfit = Minimizer(residual,
                      pfit,
                      fcn_args=(x, ),
                      fcn_kws={
                          'sigma': sigma,
                          'data': data
                      },
                      scale_covar=True)

    myfit.prepare_fit()
    init = residual(myfit.params, x)

    myfit.leastsq()

    print(' Nfev = ', myfit.nfev)
    print(myfit.chisqr, myfit.redchi, myfit.nfree)

    report_fit(myfit.params, min_correl=0.3)

    fit = residual(myfit.params, x)
    if with_plot:
        pylab.plot(x, fit, 'b-')
    assert (myfit.params['cen_l'].value == 1.5 + myfit.params['cen_g'].value)
    assert (myfit.params['amp_l'].value == myfit.params['amp_tot'].value -
            myfit.params['amp_g'].value)
    assert (myfit.params['wid_l'].value == 2 * myfit.params['wid_g'].value)

    # now, change fit slightly and re-run
    myfit.params['wid_l'].expr = '1.25*wid_g'
    myfit.leastsq()
    report_fit(myfit.params, min_correl=0.4)
    fit2 = residual(myfit.params, x)
    if with_plot:
        pylab.plot(x, fit2, 'k')
        pylab.show()

    assert (myfit.params['cen_l'].value == 1.5 + myfit.params['cen_g'].value)
    assert (myfit.params['amp_l'].value == myfit.params['amp_tot'].value -
            myfit.params['amp_g'].value)
    assert (myfit.params['wid_l'].value == 1.25 * myfit.params['wid_g'].value)