示例#1
0
def i_sense1d(x,
              z,
              params: lm.Parameters = None,
              func: Callable = i_sense,
              auto_bin=False):
    """Fits charge transition data with function passed
    Other functions could be i_sense_digamma for example"""
    transition_model = lm.Model(func)
    z = pd.Series(z, dtype=np.float32)
    x = pd.Series(x, dtype=np.float32)
    if np.count_nonzero(~np.isnan(
            z)) > 10:  # Prevent trying to work on rows with not enough data
        z, x = CU.remove_nans(z, x)
        if auto_bin is True and len(z) > FIT_NUM_BINS:
            logger.debug(f'Binning data of len {len(z)} before fitting')
            bin_size = int(np.ceil(len(z) / FIT_NUM_BINS))
            x, z = CU.bin_data([x, z], bin_size)
        if params is None:
            params = get_param_estimates(x, z)[0]

        if func in [i_sense_digamma, i_sense_digamma_quad
                    ] and 'g' not in params.keys():
            _append_param_estimate_1d(params, ['g'])
        if func == i_sense_digamma_quad and 'quad' not in params.keys():
            _append_param_estimate_1d(params, ['quad'])

        result = transition_model.fit(z, x=x, params=params, nan_policy='omit')
        return result
    else:
        return None
示例#2
0
def neon_init(x_list, y_list):
	""" Initialize parameters for neon peaks 
	x_list: list of x peaks 
	y_list: list of y peaks
	returns: params 
	"""
	params = Parameters()
	BG     = 100.
	params.add("BG", value = BG)
	n      = len(x_list)
	A_variables = []
	X_variables = []
	W_variables = []
	MU_variables = []
	for i in range(n):
		A_variables.append("A%d"%i)
		X_variables.append("X%d"%i)
		W_variables.append("W%d"%i)
		MU_variables.append("MU%d"%i)
	W  = np.ones(n)
	MU = W*0.5
	for i in range(n):
		params.add(X_variables[i], value = x_list[i], min = x_list[i]-2., max = x_list[i]+2.)
		params.add(A_variables[i], value = y_list[i])
		params.add(W_variables[i], value = W[i])
		params.add(MU_variables[i], value = MU[i])
	print "number of params: %d"%len(params.keys())
	return params
示例#3
0
def neon_init(x_list, y_list):
    """ Initialize parameters for neon peaks 
	x_list: list of x peaks 
	y_list: list of y peaks
	returns: params 
	"""
    params = Parameters()
    BG = 100.
    params.add("BG", value=BG)
    n = len(x_list)
    A_variables = []
    X_variables = []
    W_variables = []
    MU_variables = []
    for i in range(n):
        A_variables.append("A%d" % i)
        X_variables.append("X%d" % i)
        W_variables.append("W%d" % i)
        MU_variables.append("MU%d" % i)
    W = np.ones(n)
    MU = W * 0.5
    for i in range(n):
        params.add(X_variables[i],
                   value=x_list[i],
                   min=x_list[i] - 2.,
                   max=x_list[i] + 2.)
        params.add(A_variables[i], value=y_list[i])
        params.add(W_variables[i], value=W[i])
        params.add(MU_variables[i], value=MU[i])
    print "number of params: %d" % len(params.keys())
    return params
示例#4
0
    def button_click(self):
        print('Fit Button Pressed')
        self.x = np.array([])
        self.y = np.array([])

        for k in range(self.no_of_rows):

            hlp = self.tableWidget.item(k, 0)
            if not hlp is None:
                self.x = np.append(self.x, np.float(hlp.text()))
            else:
                break
            hlp = self.tableWidget.item(k, 1)
            if not hlp is None:
                self.y = np.append(self.y, np.float(hlp.text()))

        print(self.x)
        print(self.y)

        params = Parameters()
        params.add('amplitude',
                   value=np.max(self.y),
                   min=(np.max(self.y) - np.min(self.y)) / 2.0,
                   max=(np.max(self.y) - np.min(self.y)))
        params.add('waist',
                   value=(np.max(self.x) - np.min(self.x)) / 2.0,
                   min=10.0,
                   max=2000)
        params.add('x_offset',
                   value=np.mean(self.x),
                   min=np.min(self.x),
                   max=np.max(self.x))
        params.add('y_offset',
                   value=np.min(self.y),
                   min=0.00,
                   max=np.max(self.y),
                   vary=False)

        # do fit, here with leastsq model
        minner = Minimizer(fcn2min, params, fcn_args=(self.x, self.y))
        result = minner.minimize()

        # Store the Confidence data from the fit
        con_report = lmfit.fit_report(result.params)

        # write error report
        self.textbox.setText("")
        for k in params.keys():
            my_str = str(result.params[k].value)
            self.textbox.append(str(k) + " = " + my_str + "\n")
        self.textbox.append(
            con_report)  # include the confidence data in the textbox

        self.canvas.x = self.x
        self.canvas.y = self.y

        self.canvas.plot(fit_plot=result)
        print(params)
示例#5
0
    def test_add_many_params(self):
        # test that we can add many parameters, but only parameters are added.
        a = Parameter('a', 1)
        b = Parameter('b', 2)

        p = Parameters()
        p.add_many(a, b)

        assert_(list(p.keys()) == ['a', 'b'])
示例#6
0
def ssf_init(B0=None,
             E=None,
             Eh=None,
             El=None,
             Th=None,
             Tl=None,
             randomise=True):
    """ Initialise full schoolfield parameters

    Parameters
    ----------
    B0: int
        Normalisation constant
    E: int
        Activation energy
    Eh: int
        High temperatre deactivtion energy
    El: int
        Low temperature deactivation energy
    Th: int
        Temperature of high temperature deactivation
    Tl: int
        Temperature of low temperatre deactivation

    Returns
    -------
    params: lmfit.Parameter.Parameters object
        parameter object with parameter constraints

    """

    if B0 is not None:
        B0 = B0
    if E is not None:
        E = E
    if Eh is not None:
        Eh = Eh
    if El is not None:
        El = El
    if Th is not None:
        Th = Th
    if Tl is not None:
        Tl = Tl

    params = Parameters()
    params.add("B0", value=B0, vary=True, min=-np.inf, max=np.inf)
    params.add("E", value=E, vary=True, min=10E-3, max=np.inf)
    params.add("Eh", value=Eh, vary=True, min=10E-3, max=np.inf)
    params.add("El", value=El, vary=True, min=10E-3, max=np.inf)
    params.add("Th", value=Th, vary=True, min=273.15, max=np.inf)
    params.add("Tl", value=Tl, vary=True, min=273.15, max=np.inf)

    if randomise:
        for i in params.keys():
            params[i].value = np.random.normal()

    return params
示例#7
0
    def sample_data_button_click(self):
        print('sample data button pressed')
        self.x = np.array([])
        self.y = np.array([])

        hlp = self.sample_data()

        self.x = hlp[:, 0]
        self.y = hlp[:, 1]

        print(self.x)
        print(self.y)

        params = Parameters()
        params.add('amplitude',
                   value=np.max(self.y),
                   min=(np.max(self.y) - np.min(self.y)) / 2.0,
                   max=(np.max(self.y) - np.min(self.y)))
        params.add('waist',
                   value=(np.max(self.x) - np.min(self.x)) / 2.0,
                   min=10.0,
                   max=2000)
        params.add('x_offset',
                   value=np.mean(self.x),
                   min=np.min(self.x),
                   max=np.max(self.x))
        params.add('y_offset',
                   value=0.0,
                   min=0.00,
                   max=np.max(self.y),
                   vary=False)

        # do fit, here with leastsq model
        minner = Minimizer(fcn2min, params, fcn_args=(self.x, self.y))
        result = minner.minimize()

        # Store the Confidence data from the fit
        con_report = lmfit.fit_report(result.params)

        # write error report
        self.textbox.setText("")
        for k in params.keys():
            my_str = str(result.params[k].value)
            self.textbox.append(str(k) + " = " + my_str + "\n")
        self.textbox.append(
            con_report)  # include the confidence data in the textbox

        self.canvas.x = self.x
        self.canvas.y = self.y

        (fit_x, fit_y) = fcn2min(result.params, self.x, None, plot_fit=True)
        self.canvas.plot(fit_plot=[fit_x, fit_y])
        print(params)
示例#8
0
    def button_click(self):
        print('Fit Button Pressed')
        self.x = self.conv2list(self.text_x.toPlainText())
        self.y = self.conv2list(self.text_y.toPlainText())

        print(self.x)
        print(self.y)

        # convert
        offset = self.x[0]
        self.x = (self.x - self.x[0]) * 1.0 / 10.0 * 25.4 * 1000.0  # in um

        params = Parameters()
        params.add('amplitude',
                   value=np.max(self.y),
                   min=(np.max(self.y) - np.min(self.y)) / 2.0,
                   max=(np.max(self.y) - np.min(self.y)))
        params.add('waist',
                   value=(np.max(self.x) - np.min(self.x)) / 2.0,
                   min=10.0,
                   max=2000)
        params.add('x_offset',
                   value=np.mean(self.x),
                   min=np.min(self.x),
                   max=np.max(self.x))
        params.add('y_offset',
                   value=np.min(self.y),
                   min=0.00,
                   max=np.max(self.y),
                   vary=False)

        # do fit, here with leastsq model
        minner = Minimizer(fcn2min, params, fcn_args=(self.x, self.y))
        result = minner.minimize()

        # Store the Confidence data from the fit
        con_report = lmfit.fit_report(result.params)

        # write error report
        self.textbox.setText("")
        for k in params.keys():
            my_str = str(result.params[k].value)
            self.textbox.append(str(k) + " = " + my_str + "\n")
        self.textbox.append(
            con_report)  # include the confidence data in the textbox

        self.canvas.x = self.x
        self.canvas.y = self.y

        (fit_x, fit_y) = fcn2min(result.params, self.x, None, plot_fit=True)
        self.canvas.plot(fit_plot=[fit_x, fit_y])
        print(params)
示例#9
0
def xFitInitParams2(refParams, data_range, resultParams=None):
    """
    Initialize parameters for the next fitting iteration using the results of the previous fit
    and, if necessary, the default values of a reference set of parameters    

    Parameters
    ----------
    fitParams : lmfit Parameters object
        Fit parameters to initialize.
    refParams : lmfit Parameters object
        Reference Parameters object, containing default values and a fixed 
        number of Parameter objects, as defined from the model fit function.
    data_range : Range
        Range of indices of nData datasets to include in fit.
    resultParams : lmfit Parameters object, optional
        Parameters object yielded by the previously performed fit, if any.
        The default is None.

    Returns
    -------
    fitParams : TYPE
        DESCRIPTION.

    """
    # Initialize lmfit Parameters object
    fitParams = Parameters()
    # For those parameters that have been computed in the last run, 
    # use as initial values for the next run the best fit values obtained in the last
    if resultParams is not None:
        for key in resultParams.keys():
            # try:
            fitParams[key] = cp.copy(resultParams[key])
            # except KeyError: # in case fitParams has been modified since last fitting run
            #     continue

    # Create additional fit parameters, e.g. if the number of datasets has been extended
    for spec_idx in data_range:
    # loop over indices of datasets, in order to create fit parameters for each of them
        for k in refParams.keys():
            if k in ['A', 'xp']:
            # fit parameters that are different for each dataset are assigned individual names
                par_key = f'{k}{spec_idx}'
                if par_key not in fitParams.keys():
                    fitParams.add( par_key, value=refParams[k].value, 
                                   min=refParams[k].min, vary=refParams[k].vary )
            elif resultParams is None: # if there are no shared fit parameters, because no fit has been performed yet
            # all other parameters are shared by all datasets and are assigned the "generic" name from refParams
                fitParams[k] = cp.copy(refParams[k])
    return fitParams
示例#10
0
    def button_profile_click(self):
        print('Fit Profile Button Pressed')
        self.x2 = self.conv2list(self.text_x_profile.toPlainText())
        self.y2 = self.conv2list(self.text_y_profile.toPlainText())

        print(self.x2)
        print(self.y2)

        # convert
        self.x2 = (self.x2) * 1000.0  # in um

        params = Parameters()
        params.add('waist', value=200.0, min=10.0, max=2000)
        params.add('x_offset',
                   value=np.mean(self.x2),
                   min=-3000.0e3,
                   max=+3000.0e3)
        params.add('my_lambda', value=780e-9 / 1e-6,
                   vary=False)  # all units are in um

        # do fit, here with leastsq model
        minner = Minimizer(fcn2min_profile,
                           params,
                           fcn_args=(self.x2, self.y2))
        result = minner.minimize()

        # Store the Confidence data from the fit
        con_report = lmfit.fit_report(result.params)

        # write error report
        self.textbox_profile.setText("")
        for k in params.keys():
            my_str = str(result.params[k].value)
            self.textbox.append(str(k) + " = " + my_str + "\n")
        self.textbox_profile.append(
            con_report)  # include the confidence data in the textbox

        self.canvas_profile.x = self.x2
        self.canvas_profile.y = self.y2

        (fit_x, fit_y) = fcn2min_profile(result.params,
                                         self.x2,
                                         None,
                                         plot_fit=True)
        self.canvas_profile.plot(fit_plot=[fit_x, fit_y])
        print(params)
示例#11
0
def run_reactive_model(y, inits, mu,std, ntrials=5000, maxfun=5000, ftol=1.e-3, xtol=1.e-3, all_params=1, ssdlist=[200,250,300,350,400,'rt'], learn=False, acc_vector=None, **kwargs):



	#########################################################
	#	           FITTING LEARN FX 			#
	#########################################################



	p=Parameters()

	if all_params: vary=1
	else: vary=0


	#use this when fitting across all parameters.
	for key, val in inits.items():
		p.add(key, value=val, vary=vary)


	#to fit only the learning terms, those should be the only terms added to the params dictionary.
	#p.add('cor_lr', value= inits['cor_lr'], vary=vary)
	#p.add('err_lr', value= inits['err_lr'], vary=vary)

	popt = Minimizer(ssre_minfunc, p, fcn_args=(y, ntrials, mu, std),
		fcn_kws={'learn':learn, 'acc': acc_vector}, method='Nelder-Mead')
	popt.fmin(maxfun=maxfun, ftol=ftol, xtol=xtol, full_output=True, disp=False)

	params=pd.Series({k:p[k].value for k in p.keys()})
	res=popt.residual
	res[-1]=res[-1]/10; y[-1]=y[-1]/10; yhat=y+res

	pred=pd.DataFrame.from_dict({'ssdlist':ssdlist, 'ydata':y, 'residuals':res,
		'yhat':yhat, 'chi':popt.chisqr}, orient='columns')

	return pred, params
示例#12
0
class DarkGreyModel(ABC):
    '''
    Abstract Base Class for DarkGrey Models
    '''
    
    def __init__(self, params, rec_duration):
        '''
        Initialises the model instance

        Parameters
        ----------
        params : dict
            A dictionary of parameters for the fitting. Key - value pairs should follow the 
            `lmfit.Parameters` declaration: 
            e.g. {'A' : {'value': 10, 'min': 0, 'max': 30}} - sets the initial value and the bounds 
            for parameter `A` 
        rec_duration : float
            The duration of each measurement record in hours 
        '''
        
        self.result = None
        
        # convert the params dict into lmfit parameters
        if isinstance(params, Parameters):
            self.params = deepcopy(params)
        else:
            self.params = Parameters()
            for k, v in params.items():
                self.params.add(k, **v)

        # set the number of records based on the measured variable's values
        self.rec_duration = rec_duration
            
    def fit(self, X, y, method, ic_params=None, obj_func=None):
        '''
        Fits the model by minimising the objective function value

        Parameters
        ----------
        X : dict
            A dictionary of input values for the fitting - these values are fixed during the fit.
        y : np.array
            The measured variable's values for the minimiser to fit to
        method : str
            Name of the fitting method to use. Valid values are described in:
            `lmfit.minimize`
        ic_params : dict
            The initial condition parameters - if passed in these will overwrite 
            the initial conditions in self.params 
        obj_func : function
            The objective function that is passed to `lmfit.minimize`/
            It must have (params, *args, **kwargs) as its method signature.
            Default: `def_obj_func`

        Returns
        -------
        `lmfit.MinimizerResult`
            Object containing the optimized parameters and several
            goodness-of-fit statistics.
        '''    

        # overwrite initial conditions
        if ic_params is not None:
            for k, v in ic_params.items():
                if k in self.params:
                    self.params[k].value = v      
                else:
                    logger.warning(f'Key `{k}` not found in initial conditions params')


        # we are passing X, y to minimise as kwargs 
        self.result = minimize(obj_func or self.def_obj_func, 
                               self.params, 
                               kws={'model': self.model, 'X': X, 'y': y}, 
                               method=method)

        self.params = self.result.params

        return self

    def predict(self, X, ic_params=None):
        '''
        Generates a prediction based on the result parameters and X.

        Parameters
        ----------
        X : dict
            A dictionary of input values
        ic_params : dict
            The initial condition parameters - if passed in these will overwrite 
            the initial conditions in self.params 

        Returns
        -------
        The results of the model
        '''
       
        if ic_params is not None:
            for k, v in ic_params.items():
                self.params[k].value = v     

        return self.model(self.params, X)
    
    def model(self, params, X):
        '''
        A system of differential equations describing the thermal model
        '''
        pass

    def lock(self):
        '''
        Locks the parameters by setting `vary` to False
        '''

        for param in self.params.keys():
            self.params[param].vary = False

        return self

    @staticmethod
    def def_obj_func(params, *args, **kwargs):
        '''
        Default objective function
        Computes the residual between measured data and fitted data
        The model, X and y are passed in as kwargs by `lmfit.minimize`
        '''
        return ((kwargs['model'](params=params, X=kwargs['X']).Z - kwargs['y'])).ravel()        
示例#13
0
fit_params.add('decay', value=0.02)

out = minimize(residual, fit_params, args=(x,), kws={'data':data})

fit = residual(fit_params, x)

print( ' N fev = ', out.nfev)
print( out.chisqr, out.redchi, out.nfree)

report_fit(fit_params)
#ci=calc_ci(out)
ci, tr = conf_interval(out, trace=True)
report_ci(ci)
    
if HASPYLAB:
    names=fit_params.keys()
    i=0  
    gs=pylab.GridSpec(4,4)
    sx={}
    sy={}
    for fixed in names:   
        j=0        
        for free in names:                                         
            if j in sx and i in sy:                
                ax=pylab.subplot(gs[i,j],sharex=sx[j],sharey=sy[i])                                        
            elif i in sy:
                ax=pylab.subplot(gs[i,j],sharey=sy[i])
                sx[j]=ax
            elif j in sx:
                ax=pylab.subplot(gs[i,j],sharex=sx[j])
                sy[i]=ax
示例#14
0
        def residual(params: Parameters):
            ### if result exist in database, ignore calculation
            result = self.db.session.query(Result).filter(Result.task == task) \
                .filter(Result.parameter == str(params)).first()
            if result is not None:
                R = result.residual
                if R is not None:
                    return json.loads(R)
            ###

            ### save ppf file and run NPT
            ppf = PPF(string=task.ppf)
            paras = OrderedDict()
            for k, v in params.items():
                print(v)
                paras[restore_para_name(k)] = v.value
            ppf.set_nb_paras(paras)

            # TODO Fit several torsions one by one
            if torsions is not None and len(torsions) > 0:
                from config import Config
                print('Fit torsion based on new non-bonded parameters')
                for n, torsion in enumerate(torsions):
                    print(torsion)
                    ppf.fit_torsion(Config.DFF_ROOT,
                                    torsion[0],
                                    torsion[1],
                                    torsion[2],
                                    torsion[3],
                                    dfi_name='fit_torsion-%i-%i' %
                                    (task.iteration + 1, n))
            if modify_torsions is not None:
                for torsion in modify_torsions:
                    ppf.modify_torsion(torsion[0], torsion[1], torsion[2])

            ### new iteration
            task.iteration += 1
            self.db.session.commit()

            ppf_out = os.path.join(self.CWD,
                                   '%s-%i.ppf' % (task.name, task.iteration))
            ppf.write(ppf_out)

            if not task.npt_started():
                ### save gtx_dirs and gtx_cmds for running jobs on gtx queue
                gtx_dirs = []
                gtx_cmds = []
                ###
                for target in task.targets:
                    if not target.need_npt:
                        continue
                    if target.npt_started():
                        continue
                    cmds = target.run_npt(ppf_out,
                                          paras,
                                          drde_dict=self.drde_dict)
                    ### save gtx_dirs and gtx_cmds for running jobs on gtx queue
                    if cmds != []:
                        gtx_dirs.append(target.dir_npt)
                        gtx_cmds = cmds

                os.chdir(self.CWD)

                if gtx_dirs != []:
                    from .models import npt, jobmanager
                    commands_list = npt.gmx.generate_gpu_multidir_cmds(
                        gtx_dirs,
                        gtx_cmds,
                        n_parallel=self.n_parallel,
                        n_gpu=jobmanager.ngpu,
                        n_procs=jobmanager.nprocs)
                    for i, commands in enumerate(commands_list):
                        sh = os.path.join(task.dir, '_job.npt-%i.sh' % i)
                        jobmanager.generate_sh(task.dir,
                                               commands,
                                               name='%s-%i-%i' %
                                               (task.name, task.iteration, i),
                                               sh=sh)
                        jobmanager.submit(sh)

            if not task.vacuum_started():
                gtx_dirs = []
                gtx_cmds = []
                for target in task.targets:
                    if not target.need_vacuum:
                        continue
                    if target.vacuum_started():
                        continue
                    cmds = target.run_vacuum(ppf_out,
                                             paras,
                                             drde_dict=self.drde_dict)
                    ### save gtx_dirs and gtx_cmds for running jobs on gtx queue
                    if cmds != []:
                        gtx_dirs.append(target.dir_vacuum)
                        gtx_cmds = cmds

                os.chdir(self.CWD)

                if gtx_dirs != []:
                    from .models import vacuum, jobmanager
                    commands_list = vacuum.gmx.generate_gpu_multidir_cmds(
                        gtx_dirs,
                        gtx_cmds,
                        n_parallel=self.n_parallel,
                        n_gpu=jobmanager.ngpu,
                        n_procs=jobmanager.nprocs)
                    for i, commands in enumerate(commands_list):
                        sh = os.path.join(task.dir, '_job.vacuum-%i.sh' % i)
                        jobmanager.generate_sh(task.dir,
                                               commands,
                                               name='%s-%i-VAC%i' %
                                               (task.name, task.iteration, i),
                                               sh=sh)
                        jobmanager.submit(sh)

            while True:
                if task.npt_finished() and task.vacuum_finished():
                    break
                else:
                    current_time = time.strftime('%m-%d %H:%M')
                    print(current_time + ' Job still running. Wait ...')
                    time.sleep(60)

            Dens = []
            Hvap = []
            R_dens = []
            R_hvap = []
            targets = task.targets.all()
            for target in targets:
                if target.wDens > 1E-4:
                    dens = target.get_density()
                    R_dens.append((dens - target.density) / target.density *
                                  100 * target.wDens)  # deviation  percent
                    Dens.append(dens)
                if target.wHvap > 1E-4:
                    hvap = target.get_hvap()
                    R_hvap.append((hvap - target.hvap) / target.hvap * 100 *
                                  target.wHvap)  # deviation percent
                    Hvap.append(hvap)
            R = R_dens + R_hvap
            os.chdir(self.CWD)

            ### expansivity
            if weight_expansivity != 0:
                R_expa = []
                for i_mol in range(len(targets) // 2):
                    target_T1 = targets[2 * i_mol]
                    target_T2 = targets[2 * i_mol + 1]
                    res_Kt = ((target_T1.sim_dens - target_T2.sim_dens) / (target_T1.density - target_T2.density) - 1) \
                             * 100 * weight_expansivity
                    R_expa.append(res_Kt)

                R += R_expa

            # parameter penalty
            R_pena = []
            for k, v in params.items():
                if k.endswith('r0') or k.endswith('e0'):
                    res = (v.value - adj_nb_paras[restore_para_name(k)]
                           ) / adj_nb_paras[restore_para_name(k)]
                elif k.endswith('bi'):
                    res = v.value - adj_nb_paras[restore_para_name(k)]
                else:
                    res = v.value
                penalty = get_penalty_for_para(k)
                R_pena.append(res * penalty * np.sqrt(len(R_dens)))
            R += R_pena

            ### save result to database
            result = Result(task=task)
            result.iteration = task.iteration
            result.ppf = str(ppf)
            result.parameter = str(params)
            result.residual = json.dumps(R)
            self.db.session.add(result)
            self.db.session.commit()
            ###

            ### write current parameters and residual to log
            txt = '\nITERATION %i, RSQ %.2f\n' % (
                task.iteration, np.sum(list(map(lambda x: x**2, R))))
            txt += '\nPARAMETERS:\n'
            for k, v in self.drde_dict.items():
                txt += '%10.5f  %-12s  Fixed\n' % (v, k)
            for k, v in params.items():
                txt += '%10.5f  %-12s  %10.5f\n' % (
                    v.value, restore_para_name(k), init_params[k])
            txt += '\n%8s %8s %10s %8s %8s %8s %3s %3s %s %s\n' % (
                'RESIDUAL', 'Property', 'Deviation', 'Expt.', 'Simu.',
                'Weight', 'T', 'P', 'Molecule', 'SMILES')

            targets_dens = task.targets.filter(Target.wDens > 1E-4).all()
            for i, r in enumerate(R_dens):
                target = targets_dens[i]
                prop = 'density'
                weight = target.wDens
                txt += '%8.2f %8s %8.2f %% %8.3f %8.3f %8.2f %3i %3i %s %s\n' % (
                    r, prop, r / weight, target.density, Dens[i], weight,
                    target.T, target.P, target.name, target.smiles)

            targets_hvap = task.targets.filter(Target.wHvap > 1E-4).all()
            for i, r in enumerate(R_hvap):
                target = targets_hvap[i]
                prop = 'hvap'
                weight = target.wHvap
                txt += '%8.2f %8s %8.2f %% %8.1f %8.1f %8.2f %3i %3i %s %s\n' % (
                    r, prop, r / weight, target.hvap, Hvap[i], weight,
                    target.T, target.P, target.name, target.smiles)

            if weight_expansivity != 0:
                for i, r in enumerate(R_expa):
                    target = targets[i * 2]
                    prop = 'expan'
                    weight = weight_expansivity
                    txt += '%8.2f %8s %8.2f %% %8s %8s %8.2f %3s %3s %s %s\n' % (
                        r, prop, r / weight, '', '', weight, '', '',
                        target.name, target.smiles)

            for i, r in enumerate(R_pena):
                prop = 'penalty'
                k = list(params.keys())[i]
                txt += '%8.2f %8s %10s %8s %8s %8.2f\n' % (
                    r, prop, k, '', '', get_penalty_for_para(k))

            print(txt)
            with open(LOG, 'a') as log:
                log.write(txt)
            ###

            return R
示例#15
0
        def jacobian(params: Parameters):
            ### if result exist in database, ignore calculation
            result = self.db.session.query(Result).filter(Result.task == task) \
                .filter(Result.parameter == str(params)).first()
            if result is not None:
                J = result.jacobian
                if J is not None:
                    return json.loads(J)
            ###

            paras = OrderedDict()
            for k, v in params.items():
                paras[restore_para_name(k)] = v.value

            J_dens = []
            J_hvap = []
            targets = task.targets.all()
            for target in targets:
                if target.wDens > 1E-4:
                    dDdp_list = target.get_dDens_list_from_paras(paras)
                    J_dens.append([
                        i / target.density * 100 * target.wDens
                        for i in dDdp_list
                    ])  # deviation  percent
                if target.wHvap > 1E-4:
                    dHdp_list = target.get_dHvap_list_from_paras(paras)
                    J_hvap.append([
                        i / target.hvap * 100 * target.wHvap for i in dHdp_list
                    ])  # deviation  percent
            J = J_dens + J_hvap
            os.chdir(self.CWD)

            ### expansivity
            if weight_expansivity != 0:
                J_expa = []
                for i_mol in range(len(targets) // 2):
                    target_T1 = targets[2 * i_mol]
                    target_T2 = targets[2 * i_mol + 1]
                    dExpa = (target_T1.dDdp_array - target_T2.dDdp_array) / (target_T1.density - target_T2.density) \
                            * 100 * weight_expansivity
                    J_expa.append(list(dExpa))

                J += J_expa

            ### parameter penalty
            J_pena = []
            for k, v in params.items():
                if k.endswith('r0') or k.endswith('e0'):
                    d = 1 / adj_nb_paras[restore_para_name(k)]
                else:
                    d = 1
                penalty = get_penalty_for_para(k)
                J_pena.append(d * penalty * np.sqrt(len(J_dens)))
            J_pena = [list(a) for a in np.diag(J_pena)
                      ]  # convert list to diagonal matrix
            J += J_pena

            ### save result to database
            result = self.db.session.query(Result).filter(Result.task == task) \
                .filter(Result.iteration == task.iteration).first()

            result.jacobian = json.dumps(J)
            self.db.session.commit()
            ###

            ### write Jacobian to log
            txt = '\nJACOBIAN MATRIX:\n'
            for k in params.keys():
                txt += '%10s' % restore_para_name(k)
            txt += '\n'

            targets_dens = task.targets.filter(Target.wDens > 1E-4).all()
            for i, row in enumerate(J_dens):
                name = targets_dens[i].name
                prop = 'density'
                for item in row:
                    txt += '%10.2f' % item
                txt += ' %8s %s\n' % (prop, name)

            targets_hvap = task.targets.filter(Target.wHvap > 1E-4).all()
            for i, row in enumerate(J_hvap):
                name = targets_hvap[i].name
                prop = 'hvap'
                for item in row:
                    txt += '%10.2f' % item
                txt += ' %8s %s\n' % (prop, name)

            if weight_expansivity != 0:
                for i, row in enumerate(J_expa):
                    name = targets[2 * i].name
                    prop = 'expan'
                    for item in row:
                        txt += '%10.2f' % item
                    txt += ' %8s %s\n' % (prop, name)

            for i, row in enumerate(J_pena):
                name = restore_para_name(list(params.keys())[i])
                prop = 'penalty'
                for item in row:
                    txt += '%10.2f' % item
                txt += ' %8s %s\n' % (prop, name)

            print(txt)
            with open(LOG, 'a') as log:
                log.write(txt)
            ###

            return J
示例#16
0
    def load_lmfit_parameters(self, x, y, zerolev, err_zerolev, n_comps, wide_component = False, A_limits = 0.30, mu_precission = 2, sigma_limit = 5):
                      
        #Scale parameters
        ind_max = argmax(y)
        self.fit_dict['x_scaler'], self.fit_dict['y_scaler'] = x[ind_max], y[ind_max]

        #Scale the range
        self.fit_dict['x_n'] = x - self.fit_dict.x_scaler
        self.fit_dict['y_n'] = y / self.fit_dict.y_scaler
        self.fit_dict['zerolev_n'] = zerolev / self.fit_dict.y_scaler
        self.fit_dict['sigZerolev_n'] = err_zerolev / self.fit_dict.y_scaler
          
        #Get line maxima and minima
        peak_wave, peak_flux, minima_wave, minima_flux = self.get_lines_peaks(ind_max, n_comps)
        
        #Store peaks location for log        
        self.fit_dict['maxLambdas'] = peak_wave + self.fit_dict['x_scaler']
        self.fit_dict['maxPeaks'] = peak_flux * self.fit_dict['y_scaler']
        self.fit_dict['params_lmfit_wide'] = None
          
        #Lmfit dictionary        
        params = Parameters()
        for i in range(n_comps):  
            index = str(i)
            params.add('A'     + index, value = peak_flux[i] - mean(self.fit_dict.zerolev_n), min = 0.0)
            params.add('mu'    + index, value = peak_wave[i], min = peak_wave[i] - mu_precission, max = peak_wave[i] + mu_precission)
            params.add('sigma' + index, value = 1, min = 0)
            params.add('fwhm'  + index, expr = '2.354820045 * {sigma}'.format(sigma = 'sigma'  + index))
            params.add('area_G' + index, expr = '{A} * {sigma} * {sqrt2pi}'.format(A = 'A'  + index, sigma = 'sigma' + index, sqrt2pi = self.sqrt2pi))
            
        #For blended components we set the same sigma: #WARNING: We could not just delete this
        if n_comps > 1:
            small_components = range(n_comps)
            Highest_index = argmax(self.fit_dict.maxPeaks)
            
            del small_components[Highest_index]
            
            for indx in small_components: #We set the same sigma               
                expresion = 'sigma{index_big} * ((mu{index_small} + {scaler}) / (mu{index_big} + {scaler}))'.format(
                                index_big = Highest_index, index_small = str(indx), scaler = self.fit_dict['x_scaler'])
                params['sigma' + str(indx)].set(expr = expresion) 
                      
        #Special condition: Wide componentine in Halpha
        wide_params_list = []        
        if self.fit_dict.add_wide_component:
            
            #Additional fitter
            params_W = Parameters()
            
            #TRICK TO ADD AN ADDITIONAL VALUE
            n_nindex = str(n_comps)               
            params_W.add('A'  + n_nindex,       value  = 0.2, min = 0)
            params_W.add('mu' + n_nindex,       value = 0.0)
            params_W.add('sigma' + n_nindex,    value = 6, min = 3, max = 20.0)
            params_W.add('fwhm' + n_nindex,     expr = '2.354820045 * {sigma}'.format(sigma = 'sigma'  + n_nindex))
            params_W.add('area_G' + n_nindex,    expr = '{A} * {sigma} * {sqrt2pi}'.format(A = 'A'  + n_nindex, sigma = 'sigma' + n_nindex, sqrt2pi = self.sqrt2pi))
            wide_params_list = params_W.keys()
            
            #Update for Nitrogen relation: Mode 1 adjuxt the fluxes
            params['area_G0'].set(expr = 'area_G2 / {N2_ratio}'.format(N2_ratio = 2.94))
            
            self.fit_dict['params_lmfit_wide'] = params_W
            
        #Store the data 
        self.fit_dict['params_lmfit'] = params
        self.fit_dict['parameters_list'] = array(params.keys() + wide_params_list) 
 
        return
示例#17
0
    def Load_lmfit_parameters(self, x, y, zerolev, err_zerolev, n_comps, wide_component = False, A_limits = 0.30, mu_precission = 2, sigma_limit = 5):
        
        #Scale parameters
        ind_max = argmax(y)
        self.Fitting_dict['x_scaler'] = x[ind_max]
        self.Fitting_dict['y_scaler'] = y[ind_max]
        
        
        #Scale the range
        self.Fitting_dict['x_norm']             = x - self.Fitting_dict['x_scaler']
        self.Fitting_dict['y_norm']             = y / self.Fitting_dict['y_scaler']
        self.Fitting_dict['zerolev_norm']       = zerolev / self.Fitting_dict['y_scaler']
        self.Fitting_dict['sig_zerolev_norm']   = err_zerolev / self.Fitting_dict['y_scaler']
        
        #Get line maxima and minima
        peak_wave, peak_flux, minima_wave, minima_flux = self.get_lines_peaks(ind_max, n_comps)
        
        #Store peaks location for log
        self.Fitting_dict['peak_waves']  = peak_wave + self.Fitting_dict['x_scaler']
        self.Fitting_dict['peak_Maxima'] = peak_flux * self.Fitting_dict['y_scaler']
         
        #Lmfit dictionary        
        params = Parameters()
        for i in range(n_comps):  
            index = str(i)
            params.add('A'     + index, value = peak_flux[i] - mean(self.Fitting_dict['zerolev_norm']), min = 0.0)
            params.add('mu'    + index, value = peak_wave[i], min = peak_wave[i] - mu_precission, max = peak_wave[i] + mu_precission)
            params.add('sigma' + index, value = 1, min = 0)
            params.add('fwhm'  + index, expr = '2.354820045 * {sigma}'.format(sigma = 'sigma'  + index))
            params.add('FluxG' + index, expr = '{A} * {sigma} * {sqrt2pi}'.format(A = 'A'  + index, sigma = 'sigma' + index), sqrt2pi = self.s2pi)
            params.add('FluxG' + index, expr = '({A}*{fwhm})/(2.35*0.3989)'.format(A = 'A'  + index, fwhm = 'fwhm' + index))
            
        #For blended components we set the same wavelength:
        if n_comps > 1:
            
            print self.Fitting_dict['blended wavelengths']
            
            Highest_index       = argmax(self.Fitting_dict['peak_Maxima'])
            small_components    = range(n_comps)    
            del small_components[Highest_index]
            
            for indx in small_components:
                
                #We set the same sigma
                index_small = str(indx)
                expresion = 'sigma{index_big} * ( (mu{index_small} + {scaller}) / (mu{index_big} + {scaller}) )'.format(index_big = Highest_index, index_small = index_small, scaller = self.Fitting_dict['x_scaler'])
                params['sigma' + index_small].set(expr = expresion) 
            
#                 #We force the theoretical - biggest mu
#                 expresion = '{mu_small} - mu{index_big}'.format(mu_small = self.Fitting_dict['blended wavelengths'][indx] - self.Fitting_dict['x_scaler'], index_big = Highest_index)
#                 params['mu' + index_small].set(expr = expresion) 
          
        #Special condition: Wide componentine in Halpha
        Wide_params_list = []        
        if self.Fitting_dict['Add_wideComponent']:
            
            #Additional fitter
            params_W = Parameters()
            
            #TRICK TO ADD AN ADDITIONAL VALUE
            n_nindex = str(n_comps)               
            params_W.add('A'  + n_nindex,       value  = 0.2, min = 0)
            params_W.add('mu' + n_nindex,       value = 0.0)
            params_W.add('sigma' + n_nindex,    value = 6, min = 3, max = 20.0)
            params_W.add('fwhm' + n_nindex,     expr = '2.354820045 * {sigma}'.format(sigma = 'sigma'  + n_nindex))
#             params_W.add('FluxG'+ n_nindex,     expr = '({A}*{fwhm})/(2.35*0.3989)'.format(A = 'A'  + n_nindex, fwhm = 'fwhm' + n_nindex))
            params_W.add('FluxG' + n_nindex, expr = '{A} * {sigma} * (2*3.1415)**0.5'.format(A = 'A'  + n_nindex, sigma = 'sigma' + n_nindex))
            Wide_params_list = params_W.keys()
            
            sqrt(2)

            #Update for Nitrogen relation
#             params['FluxG0'].set(expr = 'FluxG2 / {N2_ratio}'.format(N2_ratio = 2.94))
            expression = '(A2*sigma2) / ({N2_ratio}*sigma0) '.format(N2_ratio = 2.94)
            params['A0'].set(expr = expression) 

        #Store the data
        self.Fitting_dict['lmfit_params']       = params
        self.Fitting_dict['lmfit_params_wide']  = params_W
        self.Fitting_dict['parameters_list']    = array(params.keys() + Wide_params_list) 
 
        return
示例#18
0
def fit_PRF_on_concatenated_data(data_shared,voxels_in_this_slice,n_TRs,n_slices,fit_on_all_data,plotbool,raw_design_matrices, dm_for_BR,
	valid_regressors, n_pixel_elements_convolved, n_pixel_elements_raw,plotdir,voxno,slice_no,randint,roi,TR,model,hrf_params_shared,all_results_shared,conditions,
	results_frames,	postFix=[],max_eccentricity=1,max_xy = 5,orientations=['0','45','90','135','180','225','270','315','X'],stim_radius = 7.5):
	"""
	stim_radius lijkt niet veel uit te maken.
	"""
	# grab data for this fit procedure from shared memory
	time_course = np.array(data_shared[:,voxels_in_this_slice][:,voxno])
	hrf_params = np.array(hrf_params_shared[:,voxels_in_this_slice][:,voxno])

	n_orientations = len(orientations)

	#to plot the time course:
	#%pylab
	#shell
	# then input:
	#pl.plot(range(0,3000), time_course)



	# already initialize the final PRF dict
	PRFs = {}

	if fit_on_all_data:

		#########################################################################################################################################################################################################################
		#### Instantiate parameters 
		#########################################################################################################################################################################################################################

		## initiate search space with Ridge prefit
		Ridge_start_params, PRFs['Ridge'], BR_predicted = fitRidge_for_Dumoulin(dm_for_BR, time_course, valid_regressors=valid_regressors, n_pixel_elements=n_pixel_elements_convolved, alpha=1e14)
		# params['xo_%s'%conditions[0]].value = Ridge_start_params['xo']
		# params['yo_%s'%conditions[0]].value = Ridge_start_params['yo']

		## initiate parameters:
		params = Parameters()
		
		# one baseline parameter
		params.add('baseline',value=0.0)

		# two location parameters
		# xo_yo_search_width_in_degrees = 2

		# these lines work with PRF_01 etc.
		params.add('xo_%s'%conditions[0], value = Ridge_start_params['xo'])
		params.add('yo_%s'%conditions[0], value = Ridge_start_params['yo'])

		# # fit method with ecc boundary
		# params.add('xo_%s'%conditions[0], value = 0.0,min=-max_ecc,max=max_ecc)# if xo_%s>0 else -(sqrt(max_ecc**2-abs(yo_%s)**2) - abs(delta_xo_%s))'%(tuple(np.repeat(conditions[0],5))))
		# params.add('yo_%s'%conditions[0], value = 0.0,min=0,expr='(sqrt(max_ecc**2-abs(xo_%s)**2) - delta_yo_%s'%(tuple(np.repeat(conditions[0],2))))# if yo_%s>0 else -(sqrt(max_ecc**2-abs(xo_%s)**2) - abs(delta_yo_%s))'%(tuple(np.repeat(conditions[0],5))))

		# # these parameters ensure a maximum ecc
		# params.add('delta_yo_%s'%conditions[0], value=0.0,min=0)#,expr='sqrt(max_ecc**2-abs(xo_%s)**2)*2 - delta_delta_yo_%s'%(tuple(np.repeat(conditions[0],2))))
		# params.add('max_ecc',value=max_ecc,vary=False)
		# params.add('sign_yo_%s'%conditions[0],value=0.01)

		# V3 like eccen-sd relation
		# intercept = 0.7 /stim_radius
		# slope = 0.3 / stim_radius
		# start_size = intercept + np.linalg.norm(Ridge_start_params['xo'],Ridge_start_params['yo']) * slope
		params.add('sigma_center_%s'%conditions[0],value=0.1,min=0.0)#min=0.01 # this means initialization at 0.1 * 7.5 = 0.75 degrees, with minimum of 0.075 degrees
		params.add('amp_center_%s'%conditions[0],value=0.05,min=0.0)#min=0.01 # this is initialized at 0.001

		# surround parameters
		params.add('delta_sigma_%s'%conditions[0],value=0.4,min=0.0) # this difference parameter ensures that the surround is always larger than the center#,min=0.0000000001
		params.add('sigma_surround_%s'%conditions[0],value=0.3,expr='sigma_center_%s+delta_sigma_%s'%(conditions[0],conditions[0])) # surround size should roughly be 5 times that of the center
		params.add('delta_amplitude_%s'%conditions[0],value=0.045,min=0.0) # this difference parameter ensures that the surround is never deeper than the center is high,min=0.0000000001
		params.add('amp_surround_%s'%conditions[0],value=-0.005,max=0.0,expr='-amp_center_%s+delta_amplitude_%s'%(conditions[0],conditions[0])) # initialized at 10% of center amplitude #max=-0.0000000001,

		# when fitting an OG model, set all surround and delta parameters to 0 and to not vary and set the expression to None, otherwise it will start to vary anyway
		if model == 'OG':	
			params['amp_surround_%s'%conditions[0]].value,params['amp_surround_%s'%conditions[0]].vary,params['amp_surround_%s'%conditions[0]].expr = 0, False, None
			params['delta_amplitude_%s'%conditions[0]].vary, params['delta_sigma_%s'%conditions[0]].vary,params['sigma_surround_%s'%conditions[0]].vary = False, False, False

		# params['delta_yo_%s'%conditions[0]].value = sqrt(max_ecc**2-abs(Ridge_start_params['xo'])**2) - Ridge_start_params['yo']
		# params['delta_delta_yo_%s'%conditions[0]].value =  sqrt(max_ecc**2-abs(Ridge_start_params['xo'])**2)*2 + params['delta_yo_%s'%conditions[0]].value

	else:

		#########################################################################################################################################################################################################################
		#### INITIATING PARAMETERS with all results
		#########################################################################################################################################################################################################################

		# grab data for this fit procedure from shared memory
		all_results = np.array(all_results_shared[:,voxels_in_this_slice][:,voxno])
		## initiate parameters:
		params = Parameters()

		# shared baseline param:
		params.add('baseline', value = all_results[results_frames['baseline']])

		# location parameters
		for condition in conditions:
			params.add('xo_%s'%condition, value = all_results[results_frames['xo']])
			params.add('yo_%s'%condition, value = all_results[results_frames['yo']])

			# center parameters:
			params.add('sigma_center_%s'%condition,value=all_results[results_frames['sigma_center']]/stim_radius,min=0.0) # this means initialization at 0.05/2 * 15 = 1.5 degrees, ,min=0.0084
			params.add('amp_center_%s'%condition,value=all_results[results_frames['amp_center']],min=0.0) # this is initialized at 0.001 ,min=0.0000000001

			# surround parameters
			params.add('sigma_surround_%s'%condition,value=all_results[results_frames['sigma_surround']]/stim_radius,expr='sigma_center_%s+delta_sigma_%s'%(condition,condition)) # surround size should roughly be 5 times that of the center
			params.add('amp_surround_%s'%condition,value=all_results[results_frames['amp_surround']],max=0.0,expr='-amp_center_%s+delta_amplitude_%s'%(condition,condition)) # initialized at 10% of center amplitudemax=-0.0000000001
			params.add('delta_sigma_%s'%condition,value=all_results[results_frames['delta_sigma']],min=0.0) # this difference parameter ensures that the surround is always larger than the centermin=0.0000000001
			params.add('delta_amplitude_%s'%condition,value=all_results[results_frames['delta_amplitude']],min=0.0) # this difference parameter ensures that the surround is never deeper than the center is highmin=0.0000000001

			# when fitting an OG model, set all surround and delta parameters to 0 and to not vary and set the expression to None, otherwise it will start to vary anyway
			if model == 'OG':	
				params['amp_surround_%s'%condition].value,params['amp_surround_%s'%condition].vary,params['amp_surround_%s'%condition].expr = 0, False, None
				params['delta_amplitude_%s'%condition].vary, params['delta_sigma_%s'%condition].vary,params['sigma_surround_%s'%condition].vary = False, False, False


		g = gpf(design_matrix = raw_design_matrices[conditions[0]], max_eccentricity = max_eccentricity, n_pixel_elements = n_pixel_elements_raw, rtime = TR, ssr = 1,slice_no=slice_no)
		
		# recreate PRFs
		this_surround_PRF = g.twoD_Gaussian(all_results[results_frames['xo']],all_results[results_frames['yo']],
			all_results[results_frames['sigma_surround']]/stim_radius) * all_results[results_frames['amp_surround']]
		this_center_PRF = g.twoD_Gaussian(all_results[results_frames['xo']], all_results[results_frames['yo']],
			all_results[results_frames['sigma_center']]/stim_radius) * all_results[results_frames['amp_center']]
		PRFs['All_fit'] = this_center_PRF + this_surround_PRF

	#########################################################################################################################################################################################################################
	#### Prepare fit object and function
	#########################################################################################################################################################################################################################

	# initiate model prediction object
	ssr = np.round(1/(TR/float(n_slices)))

	gpfs = {}
	for condition in conditions:
		gpfs[condition] = gpf(design_matrix = raw_design_matrices[condition], max_eccentricity = max_eccentricity, n_pixel_elements = n_pixel_elements_raw, rtime = TR, ssr = ssr,slice_no=slice_no)

	def residual(params):
		
		# initiate model prediction at baseline value
		combined_model_prediction = np.ones_like(time_course) * params['baseline'].value

		# now loop over conditions, create prediction and add to total prediction
		for condition in conditions:
			combined_model_prediction +=  gpfs[condition].hrf_model_prediction(params['xo_%s'%condition].value, params['yo_%s'%condition].value, 
				params['sigma_center_%s'%condition].value,hrf_params)[0] * params['amp_center_%s'%condition].value
			combined_model_prediction +=  gpfs[condition].hrf_model_prediction(params['xo_%s'%condition].value, params['yo_%s'%condition].value, 
				params['sigma_surround_%s'%condition].value,hrf_params)[0] * params['amp_surround_%s'%condition].value
		return time_course - combined_model_prediction
	#########################################################################################################################################################################################################################
	#### evalute fit
	#########################################################################################################################################################################################################################

	# optimize parameters
	minimize(residual, params, args=(), kws={},method='powell')

	#########################################################################################################################################################################################################################
	#### Recreate resulting predictions and PRFs with optimized parameters
	#########################################################################################################################################################################################################################

	# initiate model prediction at baseline value
	combined_model_prediction = np.ones_like(time_course) * params['baseline'].value

	# now loop over conditions, create prediction and add to total prediction
	model_predictions = {}
	for condition in conditions:
		this_center_model_prediction = gpfs[condition].hrf_model_prediction(params['xo_%s'%condition].value, params['yo_%s'%condition].value, 
			params['sigma_center_%s'%condition].value,hrf_params)[0] * params['amp_center_%s'%condition].value
		this_surround_model_prediction = gpfs[condition].hrf_model_prediction(params['xo_%s'%condition].value, params['yo_%s'%condition].value, 
			params['sigma_surround_%s'%condition].value, hrf_params)[0] * params['amp_surround_%s'%condition].value
		model_predictions[condition] = this_center_model_prediction + this_surround_model_prediction
		combined_model_prediction += model_predictions[condition]

		# recreate PRFs
		this_center_PRF = gpfs[condition].twoD_Gaussian(params['xo_%s'%condition].value, params['yo_%s'%condition].value,
			params['sigma_center_%s'%condition].value) * params['amp_center_%s'%condition].value
		this_surround_PRF = gpfs[condition].twoD_Gaussian(params['xo_%s'%condition].value, params['yo_%s'%condition].value,
			params['sigma_surround_%s'%condition].value) * params['amp_surround_%s'%condition].value
		PRFs[condition] = this_center_PRF + this_surround_PRF

	#########################################################################################################################################################################################################################
	#### Get fit diagnostics
	#########################################################################################################################################################################################################################

	reconstruction_radius = 10
	this_ssr = 1000 
	t = np.linspace(-reconstruction_radius,reconstruction_radius,this_ssr*reconstruction_radius)
	
	fwhms = {}
	surround_sizes = {}
	for condition in conditions:
		PRF_2D =  params['amp_center_%s'%condition].value * np.exp(-t**2/(2*params['sigma_center_%s'%condition].value**2)) + params['amp_surround_%s'%condition].value * np.exp(-t**2/(2*(params['sigma_surround_%s'%condition].value)**2))
		## then, we fit a spline through this line, and get the roots (the fwhm points) of the spline:
		spline=interpolate.UnivariateSpline(range(len(PRF_2D)),PRF_2D-np.max(PRF_2D)/2,s=0)
		## and compute the distance between them
		try:
			fwhms[condition] = ((np.diff(spline.roots())/len(t)*reconstruction_radius) * stim_radius)[0]
		except:
			## when this procedure fails, set fwhm to 0:
			fwhms[condition] = 0
		
		## now find the surround size in the same way
		if (model == 'OG') + (params['amp_surround_%s'%condition].value == 0):
			surround_sizes[condition] = 0
		else:
			spline = interpolate.UnivariateSpline(range(len(PRF_2D)),PRF_2D+np.min(PRF_2D),s=0)
			surround_sizes[condition] = ((np.diff(spline.roots())/len(t)*reconstruction_radius) * stim_radius)[0]

	## EVALUATE OVERALL MODEL FIT QUALITY
	stats = {}
	stats['spearman'] = spearmanr(time_course,combined_model_prediction)[0]
	stats['pearson'] = pearsonr(time_course,combined_model_prediction)[0]
	stats['RSS'] = np.sum((time_course - combined_model_prediction)**2)
	stats['r_squared'] = 1 - stats['RSS']/np.sum((time_course - np.mean(time_course)) ** 2) 
	stats['kendalls_tau'] = kendalltau(time_course,combined_model_prediction)[0]

	## CREATE SEPERATE RESULTS DICT PER CONDITION
	results = {}
	for condition in conditions:
		results[condition] = {}
		results[condition]['baseline'] = params['baseline'].value
		# params from fit
		for key in params.keys():
			if condition in key:
				if condition in key:
					# leave out the condition in the keys (as the results frames are identical across conditions)
					new_key = key[:-len(condition)-1]
				else:
					new_key = key
				results[condition][new_key] = params[key].value

		results[condition]['ecc'] = np.linalg.norm([params['xo_%s'%condition].value,params['yo_%s'%condition].value]) * stim_radius
		results[condition]['sigma_center'] *= stim_radius
		results[condition]['sigma_surround'] *= stim_radius

		# derived params
		results[condition]['polar'] = np.arctan2(params['yo_%s'%condition].value,params['xo_%s'%condition].value)
		results[condition]['fwhm'] = fwhms[condition]
		results[condition]['surround_size'] = surround_sizes[condition]
		results[condition]['SI'] = ((params['amp_surround_%s'%condition].value * (params['sigma_surround_%s'%condition].value**2) ) 
			/ (params['amp_center_%s'%condition].value * (params['sigma_center_%s'%condition].value**2) ))
		
		# if the resulting PRF falls outside of the stimulus radius,
		# set the multiplier here to 0 so that it falls off the retmaps
		if results[condition]['ecc'] < (stim_radius):
			multiplier = stats['r_squared']
		else:
			multiplier = 0.001

		# here for only voxels within stim region:
		results[condition]['real_polar_stim_region'] = np.cos(results[condition]['polar'])*np.arctanh(multiplier)
		results[condition]['imag_polar_stim_region'] = np.sin(results[condition]['polar'])*np.arctanh(multiplier)
		results[condition]['real_eccen_stim_region'] = np.cos(results[condition]['ecc'])*np.arctanh(multiplier)
		results[condition]['imag_eccen_stim_region'] = np.sin(results[condition]['ecc'])*np.arctanh(multiplier)
		results[condition]['real_fwhm_stim_region'] = np.cos(results[condition]['fwhm'])*np.arctanh(multiplier)
		results[condition]['imag_fwhm_stim_region'] = np.sin(results[condition]['fwhm'])*np.arctanh(multiplier)
		
		# and for all voxels:
		results[condition]['real_polar'] = np.cos(results[condition]['polar'])*np.arctanh(stats['r_squared'])
		results[condition]['imag_polar'] = np.sin(results[condition]['polar'])*np.arctanh(stats['r_squared'])
		results[condition]['real_eccen'] = np.cos(results[condition]['ecc'])*np.arctanh(stats['r_squared'])
		results[condition]['imag_eccen'] = np.sin(results[condition]['ecc'])*np.arctanh(stats['r_squared'])
		results[condition]['real_fwhm'] = np.cos(results[condition]['fwhm'])*np.arctanh(stats['r_squared'])
		results[condition]['imag_fwhm'] = np.sin(results[condition]['fwhm'])*np.arctanh(stats['r_squared'])

	#########################################################################################################################################################################################################################
	#### Plot results
	#########################################################################################################################################################################################################################

	if plotbool * (stats['r_squared']>0.4):# (np.random.randint(10)<10):#* (stats['r_squared']>0.1):#(stats['r_squared']>0.1):# * :# :#* (results['ecc'] < 3) :#:# * * randint ) #* :#* )

		n_TRs = n_TRs[0]
		n_runs = int(len(time_course) / n_TRs)
		if fit_on_all_data:
			plot_conditions = ['Ridge',conditions[0]]
		else:
			plot_conditions = ['All_fit',conditions[0]]
			#plot_conditions = conditions[0] + ['All_fit']
		plot_dir = os.path.join(plotdir, '%s'%roi)
		if not os.path.isdir(plot_dir): 
			try:
				os.mkdir(plot_dir)
			except:
				pass

		f=pl.figure(figsize=(20,8)); rowi = (n_runs+4)

		import colorsys
		colors = np.array([colorsys.hsv_to_rgb(c,0.6,0.9) for c in np.linspace(0,1,3+1)])[:-1]

		for runi in range(n_runs):
			s = f.add_subplot(rowi,1,runi+1)
			pl.plot(time_course[n_TRs*runi:n_TRs*(runi+1)],'-ok',linewidth=0.75,markersize=2.5)#,label='data'
			if not fit_on_all_data:
				for ci, condition in enumerate(conditions):
					pl.plot(model_predictions[condition][n_TRs*runi:n_TRs*(runi+1)]+params['baseline'].value,color=colors[ci],label='%s model'%condition,linewidth=2)				
				pl.plot([0,n_TRs],[params['baseline'].value,params['baseline'].value],color=colors[0],linewidth=1)	
			else:
				pl.plot(combined_model_prediction[n_TRs*runi:n_TRs*(runi+1)],color=colors[0],label='model',linewidth=2)	
			sn.despine(offset=10)
			pl.xlim(0,n_TRs*1.1)
			if runi == (n_runs-1):
				pl.xlabel('TRs')
			else:
				pl.xticks([])
			if runi == (n_runs/2):
				pl.legend(loc='best',fontsize=8)
				if 'psc' in postFix:
					pl.ylabel('% signal change')
				else:
					pl.ylabel('unkown unit')	
			pl.yticks([int(np.min(time_course)),0,int(np.max(time_course))])	
			pl.ylim([int(np.min(time_course)),int(np.max(time_course))])


		rowi = (n_runs+2)/2
		k = 0
		for ci, condition in enumerate(plot_conditions):
			k+= 1
			s = f.add_subplot(rowi,len(plot_conditions)*2,(rowi-1)*len(plot_conditions)*2+k,aspect='equal')
			pl.imshow(PRFs[condition],origin='lowerleft',interpolation='nearest',cmap=cm.coolwarm)

			pl.axis('off')
			s.set_title('%s PRF'%condition)
			
			k+= 1
			if not (condition == 'Ridge') + (condition == 'All_fit'):
				s = f.add_subplot(rowi,len(plot_conditions)*2,(rowi-1)*len(plot_conditions)*2+k)
				pl.imshow(np.ones((n_pixel_elements_raw,n_pixel_elements_raw)),cmap='gray')
				pl.clim(0,1)
				if model == 'OG':
					s.text(n_pixel_elements_raw/2,n_pixel_elements_raw/2, "\n%s PARAMETERS: \n\nbaseline: %.2f\nsize: %.2f\namplitude: %.6f\n\n\nDERIVED QUANTIFICATIONS: \n\nr-squared: %.2f\necc: %.2f\nFWHM: %.2f"%
						(condition,results[condition]['baseline'],results[condition]['sigma_center'],results[condition]['amp_center'],
							stats['r_squared'],results[condition]['ecc'],results[condition]['fwhm']),
						horizontalalignment='center',verticalalignment='center',fontsize=10,bbox={'facecolor':'white', 'alpha':1, 'pad':10})
				elif model == 'DoG':
					s.text(n_pixel_elements_raw/2,n_pixel_elements_raw/2, "\n%s PARAMETERS: \n\nbaseline: %.2f\nsd center: %.2f\nsd surround: %.2f\namp center: %.6f\namp surround: %.6f\n\nDERIVED QUANTIFICATIONS: \n\nr squared: %.2f\necc: %.2f\nFWHM: %.2f\nsurround size: %.2f\nsupression index: %.2f"
						%(condition,results[condition]['baseline'],results[condition]['sigma_center'],results[condition]['sigma_surround'],results[condition]['amp_center'],
						results[condition]['amp_surround'],stats['r_squared'],results[condition]['ecc'],results[condition]['fwhm'],results[condition]['surround_size'],
						results[condition]['SI']),horizontalalignment='center',verticalalignment='center',fontsize=10,bbox={'facecolor':'white', 'alpha':1, 'pad':10})
				pl.axis('off')

		# pl.tight_layout()
		pl.savefig(os.path.join(plot_dir, 'vox_%d_%d_%d.pdf'%(slice_no,voxno,n_pixel_elements_raw)))
		pl.close()

	return results, stats
示例#19
0
class UltraFast_TB(object):
    def __init__(self, times=None,traces=None,wavelengths=None, 
                 input_params=None,reaction_matrix=None,
                 method='leastsq',alpha=0,gamma=0):
                             
        self.times = times
        self.traces = traces
        self.wavelengths = wavelengths
        self.reaction_matrix = reaction_matrix
        self.input_params = input_params

        try:
            self.no_species = self.reaction_matrix.shape[0]
        except AttributeError:
            self.no_species = None

        self.last_residuals = None
        self.fitted_ks = None
        self.fitted_c0 = None
        self.fitted_C = None
        self.fitted_traces = None
        self.fitted_spectra = None

        self.no_resampled_points = None    
        self.resampled_C = None
        self.resampled_times = None
        self.output = None
    
        self.method = method
        if alpha:
            self.regressor = Ridge(fit_intercept=False,alpha=alpha)
        elif gamma:
            self.regressor = Lasso(fit_intercept=False,alpha=gamma)
        else:
            self.regressor = LinearRegression(fit_intercept=False)
            
        # if we are fitting against multiple traces they must be measured at
        # the same wavelengths. Note if we happen to be measuring at the same
        # number of wavelengths but the wavelengths being measured are 
        # different we wil pass this test but the results will still be 
        # meaningless
        no_wavlengths_measured = [st.shape[1] for st in self.traces]
        
        assert len(set(no_wavlengths_measured)) == 1
        
    def apply_svd(self, n):
        """
        Replaces spectral traces with their SVD transformed equivalents
        truncating at the nth component
        """
    
        ## should really handle svd sensibly if we have multiple traces
        ## fitting multiple traces simultaneously requires they all have the
        ## same basis. Could pick the first trace to define the basis
        #svd_trace, s, self.rs_vectors = np.linalg.svd(self.traces[0], full_matrices=True)
        #transformed_traces = [svd_trace[:,:n]]
        #if len(self.traces > 1):
        #    # haven't tested this at all it's probably a bug filled mess
        #    # idea is to represent all the traces with the principle components
        #    # defined by the first set of traces
        #    transformed_traces += [self.rs_vectors.dot(t)[:,:n] for t in self.traces[1:]] 

        # or look for svd like transformation to apply the the entire block of traces?

        # either way current approach is totally dodgey if fitting against 
        # multiple svd transformed traces

        transformed_traces = []
        # wavelengths now correspond to principle components
        
        for trace in self.traces:
            U,s,V = np.linalg.svd(trace, full_matrices=True)
            transformed_traces.append(U[:,:n])
        
        self.traces = transformed_traces
        self.wavelengths = np.arange(n)
        
    def get_spectra(self, conc_traces,spectral_trace):
        """Extraction of predicted spectra given concentration traces and spectral_traces"""
        # linear fit of the fitted_concs to the spectra CANNOT fit intercept here!
        self.regressor.fit(conc_traces,spectral_trace)
        fitted_spectra = self.regressor.coef_
        return fitted_spectra
    
    
    def get_traces(self, conc_traces, spectra):
        """Extraction of fitted spectral traces given concentration traces and spectral traces"""
        # linear fit of the fitted_concs to the spectra CANNOT fit intercept here!
        #self.regressor.fit(conc_traces,spectral_trace)
        #fitted_spectral_traces = self.regressor.predict(conc_traces)
        fitted_spectral_traces = spectra.dot(conc_traces.T)        
        return fitted_spectral_traces.T
     
    
    def dc_dt(self,C,t,K):
        """
        Rate function for the given reaction matrix.
        
        Rows of the reaction matrix correspond reactant species
        Columns of the reaction correspond to product species
        
        e.g. reaction_matrix = [[0, 1, 0],
                                [0, 0, 1],
                                [0, 0, 0]]
              
        Corresponds to the reaction scheme A->B->C.
        
        The generated rate function has three arguments:
            C an array of floats giving the concentration of each species
            t a float giving the current time (not used but necessary for ODEs)
            K an lmfit Parameters object defining with float values 
            representing the rate constants.
        
        And returns:
            dc/dt an array floats corresponding to the derivative of the concentration
            of each species with time at time=t
            
        The above example reaction matrix would give rise to dc/dt = [-k1[A], k1[A]-k2[B], k2[B]]
        """
        
        # dc/dt built up by separately computing the positive and negative contributions.
        # In our example positive_dcdt = [0, k1[A], k2[B]]  and negative_dcdt = [-k1[A],-k2[B],0]
        reaction_matrix = np.array(self.reaction_matrix,dtype=np.int)
        C = np.array(C)
        #K = np.array(K.valuesdict().values())

        # need to have the same number of rate parameters in K
        # as indicated in reaction_matrix!
        assert len(K) == np.sum(reaction_matrix)
        
        # need to be careful about dtypes:
        # reaction matrix dtype is int, rate matrix must be dtype float
        rate_matrix = reaction_matrix.copy()
        rate_matrix.dtype=np.float64
        rate_matrix[reaction_matrix==1] = K
        
        positive_dcdt = rate_matrix.T.dot(C)
        negative_dcdt = rate_matrix.T.sum(axis=0)*C
            
        return positive_dcdt - negative_dcdt
       
        
    def C(self,t,K,c0):
        """
        Concentration function returns concentrations at the times given in t
        Uses odeint to integrate dc/dt using rate constants k over times t at
        initial concentrations c0
        Implicitly uses self.dc_dt
        """
        #ode(self.dc_dt,c0,t,args=(k,)).set_integrator('lsoda')
        #ode(self.dc_dt,c0,t,args=(k,)).set_integrator('vode', method='bdf', order=15)
        
        # if we have any negative times we assume they occur before the 
        # reaction starts hence all negative times are assigned concentration 
        # c0
        
        ## could switch to something like ode15s that the oiginal matlab code 
        ## uses - can odeint cope with equations as stiff as we need?
        ## to use integrate.ode need order of arguments in dc_dt to switch
        
        #r = scipy.integrate.ode(self.dc_dt)
        #r = r.set_integrator('vode', method='bdf', order=15,nsteps=3000)
        #r = r.set_initial_value(c0)
        #r = r.set_f_params((K,))
        #r.integrate(t)
        
        static_times = t[t<0]
        dynamic_times = t[t>=0]

        static_C = np.array([c0 for _ in static_times])

        # odeint always takes the first time point as t0
        # our t0 is always 0 (removing t0 occures before we integrate)
        # so if the first time point is not 0 we add it 
                
        if not dynamic_times.any() or dynamic_times[0]:
            #fancy indexing returns a copy so we can do this
            dynamic_times = np.hstack([[0],dynamic_times])            
            dynamic_C = odeint(self.dc_dt,c0,dynamic_times,args=(K,))[1:]
        else:
            dynamic_C = odeint(self.dc_dt,c0,dynamic_times,args=(K,))
            
        if static_C.any():
            return np.vstack([static_C,dynamic_C])
        else:
            return dynamic_C
   
    def _get_K(self, params):
        no_datasets = len(self.traces)
        n_Ks = int(np.sum(self.reaction_matrix))
       
        K=[]
        for d in range(1,no_datasets+1):
            k_keys = ['k{i}{d}'.format(i=i,d=d) for i in range(1,n_Ks+1)]
            dataset_K = np.array([params[key].value for key in k_keys])
            K.append(dataset_K)
        return K
    
    def _get_C0(self, params):
        no_datasets = len(self.traces)
        n_c0s = self.no_species
        
        C0 = []
        for d in range(1,no_datasets+1):
            c0_keys = ['c0{i}{d}'.format(i=i,d=d) for i in range(1,n_c0s+1)]
            dataset_c0 = np.array([params[key].value for key in c0_keys])
            C0.append(dataset_c0)
            
        return C0
        
    def _get_T0(self, params):
        no_datasets = len(self.traces)
        
        T0_keys = ['t0{d}'.format(d=d) for d in range(1,no_datasets+1)]
        T0 = np.array([params[key].value for key in T0_keys])
        return T0
        
    def _get_OD_offset(self, params):        
        no_datasets = len(self.traces)
        
        OD_keys = ['OD_offset{d}'.format(d=d) for d in range(1,no_datasets+1)]
        OD = np.array([params[key].value for key in OD_keys])
        return OD
        
   # define a function that will measure the error between the fit and the real data:
    def errfunc(self, params):
        """
        Master error function
        
        Computes residuals for a given rate function, rate constants and initial concentrations
        by linearly fitting the integrated concentrations to the provided spectra.
        
        As we wish to simultaneously fit multiple data sets T and S contain multiple
        arrays of times and spectral traces respectively.
        
        params an lmfit Parameters object representing the rate constants, 
        initial concentrations,initial times, and OD offsets.
        
        implit dependence on:
        dc_dt - function to be integrated by odeint to get the concentrations
        
        self.times - the array over which integration occurs
                     since we have several data sets and each one has its own 
                     array of time points, self.times is an array of arrays.
        self.traces - the spectral data, it is an array of array of arrays
        """

        K = self._get_K(params)
        T0 = self._get_T0(params)
        C0 = self._get_C0(params)        
        OD_offset = self._get_OD_offset(params)
        
        offset_times = [t-t0 for t,t0 in zip(self.times,T0)]
        offset_traces = [st - od for st,od in zip(self.traces,OD_offset)]

        # calculated concentrations for the different time sets
        fitted_conc_traces = []   
        for t ,k, c0 in zip(offset_times,K,C0):
            conc_traces = self.C(t,k,c0)

            if np.isnan(conc_traces).any():
                fix = Imputer(missing_values='NaN', strategy='median',axis=0) 
                conc_traces  = fix.fit_transform(conc_traces )
                warnings.warn('Nan found in predicted concentrations')

            fitted_conc_traces.append(conc_traces)
        
        # spectra fitted against all data sets
        # REQUIRES spectral traces to be measured at the SAME WAVELENGTHS!
        
        fitted_spectra = self.get_spectra(np.vstack(fitted_conc_traces),
                                          np.vstack(offset_traces))
                                                
        fitted_spectral_traces = [self.get_traces(c, fitted_spectra) for c in
                                        fitted_conc_traces]
            
        self.residuals = [fst -t for fst,t in zip(fitted_spectral_traces,
                                                  offset_traces)]
            
        all_residuals = np.vstack(self.residuals).ravel()
        
        return all_residuals
        
#        # compute the residuals for each dataset of times/traces
#        for times,traces in zip(self.times,self.traces):
#            offset_times = times-t0
#            fitted_conc_traces = self.C(offset_times, K)
#            
#            # handle case where we have poor parameters causing concentrations
#            # that are higher than floating point allows by replacing them with
#            # the  median concentration for that species.
#            # We expect these instances to be a very poor fit and hence that 
#            # this procedure will not affect the final fitted rate constants
#            if np.isnan(fitted_conc_traces).any():
#                fix = Imputer(missing_values='NaN', strategy='median',axis=0) 
#                fitted_conc_traces  = fix.fit_transform(fitted_conc_traces )
#                warnings.warn('Nan found in predicted concentrations')
#                
#            offset_traces = traces - OD_offset
#            fitted_spectra = self.get_spectra(fitted_conc_traces,
#                                              offset_traces)
#            fitted_spectral_traces = self.get_traces(fitted_conc_traces, 
#                                                     fitted_spectra)
#            current_residuals = fitted_spectral_traces - traces
#            self.residuals.append(current_residuals)
# 
#        # do we need to worry about the order of the flattened residuals? 
#        # e.g. if switch current_residuals for current_residuals.T 
#        # would it matter?
# 
#        # combine residuals for each data set and flatten
#
#        all_residuals = np.hstack(self.residuals).ravel()
#        
#        return all_residuals
    
    def printfunc(self, params, iter, resid, *args, **kwargs):
        """
        Method passed to minimize if we are debugging to print out the
        values of the parameters as minimisation is occuring
        """

        print(iter)  
        print(params.valuesdict())
    
    def fit(self, debug=False):
        """Master fitting function"""
        
        self.expand_params()

        if debug:
            self.output = minimize(self.errfunc, self.input_params, 
                                   method=self.method,iter_cb=self.printfunc)
        else:
            self.output = minimize(self.errfunc, self.input_params,
                                   method=self.method)

        fitted_params = self.output.params
      
        fitted_K = self._get_K(fitted_params)
        fitted_T0 = self._get_T0(fitted_params)
        fitted_OD_offset = self._get_OD_offset(fitted_params)
        fitted_C0 = self._get_C0(fitted_params)
        
        offset_traces = [traces - od for traces,od in zip(self.traces,
                                                          fitted_OD_offset)]
                                                        
        offset_times = [times - t0 for times,t0 in zip(self.times, 
                                                       fitted_T0)]
                                                       
        fitted_C = [self.C(t, fitted_k, c0) for t,fitted_k,c0 in zip(offset_times,
                                                                  fitted_K,
                                                                  fitted_C0)]
       
        fitted_spectra = self.get_spectra(np.vstack(fitted_C),
                                          np.vstack(offset_traces))
                                          
        
        fitted_traces = [self.get_traces(c, fitted_spectra) for c in fitted_C]
         
        self.fitted_spectra = fitted_spectra
        self.fitted_traces = fitted_traces
        self.fitted_ks = fitted_K
        self.fitted_t0 = fitted_T0
        self.fitted_c0 = fitted_C0        
        self.fitted_OD_offset = fitted_OD_offset
        self.fitted_C = fitted_C  
        
        # create master resampled data
        if self.no_resampled_points:
            no_points = self.no_resampled_points
        else:
            no_points = max([len(t) for t in offset_times])*5

        max_time = max(np.hstack(offset_times))
        min_time = min(np.hstack(offset_times))
        
        if min_time > 0:
            min_time = 0
        
        resampled_times = np.linspace(min_time, max_time, no_points)
        
        self.resampled_C = [self.C(resampled_times,k,c0) for k,c0 in zip(fitted_K,
                                                                         fitted_C0)]
        self.resampled_traces = [self.get_traces(c,self.fitted_spectra) for c in
                                        self.resampled_C]
          
        self.resampled_times = [resampled_times + t0 for t0 in fitted_T0]
     
    def expand_params(self):
        """
        If only a single set of parameters has been provided then we expand 
        the parameters by constructing a set for each dataset
        """
        
        no_datasets = len(self.traces)
        no_species = self.reaction_matrix.shape[0]
        
        t0_keys = [key for key in self.input_params.keys() if 't0' in key]
        od_keys = [key for key in self.input_params.keys() if 'OD' in key]
        k_keys = [key for key in self.input_params.keys() if 'k' in key]
        c0_keys = [key for key in self.input_params.keys() if 'c0' in key]
       
        enum_keys = list(enumerate(self.input_params.keys()))
        first_t0 = next(i for i,key in enum_keys if 't0' in key)
        first_od = next(i for i,key in  enum_keys if 'OD' in key)
        first_k = next(i for i,key in enum_keys if 'k' in key)
        first_c0 = next(i for i,key in enum_keys if 'c0' in key)
        
        t0_params = [self.input_params.pop(k) for k in t0_keys]
        od_params = [self.input_params.pop(k) for k in od_keys]
        k_params = [self.input_params.pop(k) for k in k_keys]
        c0_params = [self.input_params.pop(k) for k in c0_keys]
        
        if len(t0_keys) == 1 and t0_keys[0] == 't0':            
            p = t0_params[0]
            new_t0_params = []            
            for d in range(1,no_datasets+1):
                new_p = copy.deepcopy(p)
                new_p.name += str(d)
                new_t0_params.append(new_p)
            t0_params = new_t0_params
            
        if len(od_keys) == 1 and od_keys[0] == 'OD_offset':             
            p = od_params[0]
            new_od_params = []
            for d in range(1,no_datasets+1):
                new_p = copy.deepcopy(p)
                new_p.name += str(d)
                new_od_params.append(new_p)
            od_params = new_od_params
            
        # TODO - this is not adequate - what if the first rate parameter 
        # isn't k1?
        if len(k_keys) == self.reaction_matrix.sum() and k_keys[0] == 'k1':
            new_k_params = []
            for p in k_params:
                for d in range(1,no_datasets+1):
                    new_p = copy.deepcopy(p)                    
                    new_p.name += str(d)
                    new_k_params.append(new_p)
            k_params = new_k_params
            
        if len(c0_keys) == no_species and c0_keys[0] == 'c01':
            new_c0_params = []
            for p in c0_params:
                for d in range(1,no_datasets+1):
                    new_p = copy.deepcopy(p)
                    new_p.name += str(d)
                    new_c0_params.append(new_p)
            c0_params = new_c0_params
            
        # as lmfit parameters objects are ordered dictionaries the order
        # that we do this actually matters and will influence the fitting
        # we would like to allow the used to specify the order and respect the 
        # order they choose.
        
        # NB The ideal order is to have the parameters whos initial values are 
        # better optimised after the parameters whos initial values are worse       
           
        expanded_params = sorted([(t0_params,first_t0),
                                  (od_params,first_od),
                                  (k_params,first_k),
                                  (c0_params,first_c0)], key=lambda e:e[1])
        expanded_params, loc = zip(*expanded_params)
                       
        for ep in expanded_params:
            self.input_params.add_many(*ep)
    
    # TODO order is not yet ideal - would like explicitly given parameters
    # to be optimised last        
    def init_sequential(self, no_species):
        """Initialises parameters for a sequential fit"""        
        
        if not self.no_species is None and self.no_species != no_species:
            raise UserWarning('Inconsistent number of species')
        
        if not self.reaction_matrix is None:
            raise UserWarning('Reaction matrix already specified')

        self.reaction_matrix = np.zeros([no_species, no_species])
        self.no_species = no_species
        
        no_datasets = len(self.traces)
        
        for i in range(no_species-1):       
            self.reaction_matrix[i,i+1] = 1
        
        if self.input_params is None:
            self.input_params = Parameters()
        
        # if no rate constants set, assign n-1 rate constants to a default
        # of 0.1 for each dataset
        if not any(['k' in key for key in self.input_params.valuesdict()]):
            rate_constants = [('k{i}{d}'.format(i=n,d=d),0.1,True,0,None,None) 
                               for n in range(1,no_species)
                               for d in range(1,no_datasets+1)]                  
            self.input_params.add_many(*rate_constants)
          
        # if no t0s assign t0 to a default of 0 and flag them not to be
        # optimised for each dataset
        if not any(['t0' in key for key in self.input_params.valuesdict()]):
            t0 = [('t0{d}'.format(d=d),0,False,None,None,None)
                  for d in range(1,no_datasets+1)]
            self.input_params.add_many(*t0)
        
        # if no OD_offsets assign OD_offset to a default of 0 and flag them
        # not to be optimised for each dataset          
        if not any(['OD' in key for key in self.input_params.valuesdict()]):
            OD_offset = [('OD_offset{d}'.format(d=d),0,False,None,None,None)
                         for d in range(1,no_datasets+1)]
            self.input_params.add_many(*OD_offset)
 
       # if no c0s assign c0 to a default of [1,0,0,...] and flag them
        # not to be optimised  for each dataset        
        if not any(['c0' in key for key in self.input_params.valuesdict()]):
            C0 = [('c0{i}{d}'.format(i=n,d=d),0,False,0,1,None)
                         for n in range(1,no_species+1)
                         for d in range(1,no_datasets+1)]
        
            self.input_params.add_many(*C0)
            
            for d in range(1,no_datasets+1):
                self.input_params['c01{d}'.format(d=d)].value = 1
                
    def fit_sequential(self, no_species, debug=False):
        """
        Utility function to fit assuming a sequential reaction model
        
        Sets the reaction matrix up for a sequential model then calls the 
        master fit() method
        """
 
        self.init_sequential(no_species)
        self.fit(debug)
            
    # TODO order is not yet ideal - would like explicitly given parameters
    # to be optimised last
    def init_parallel(self, no_species):
        """Initialises parameters for a parallel fit"""
        if not self.no_species is None and self.no_species != no_species:
            raise UserWarning('Inconsistent number of species')
         
        if not self.reaction_matrix is None:
            raise UserWarning('Reaction matrix already specified')

        self.reaction_matrix = np.zeros([no_species, no_species])
        self.no_species = no_species
                
        no_datasets = len(self.traces)
        
        for i in range(0,no_species-1,2):
            self.reaction_matrix[i,i+1] = 1
        
        
        if self.input_params is None:
            self.input_params = Parameters()
        
        # if no rate constants set, assign n-1 rate constants to a default
        # of 0.1 for each dataset
        if not any(['k' in key for key in self.input_params.valuesdict()]):
            rate_constants = [('k{i}{d}'.format(i=n,d=d),0.1,True,0,None,None) 
                              for n in range(1,no_species,2)
                              for d in range(1,no_datasets+1)]
            self.input_params.add_many(*rate_constants)

        # if no t0s assign n t0s to a default of 0 and flat them not to be
        # optimised
        if not  any(['t0' in key for key in self.input_params.valuesdict()]):
            t0 = [('t0{i}'.format(i=n),0,False,None,None,None)
                  for n in range(1,no_datasets+1)]
            self.input_params.add_many(*t0)
        
        # if no OD_offsets assign OD_offset to a default of 0 and flag them
        # not to be optimised for each dataset
        if not  any(['OD' in key for key in self.input_params.valuesdict()]):
            OD_offset = [('OD_offset{i}'.format(i=n),0,False,None,None,None)
                         for n in range(1,no_datasets+1)]
            self.input_para,s.add_many(*OD_offset)

        # if no c0s assign c0 to a default of 1 and flag them
        # not to be optimised for each dataset
       # if no c0s assign c0 to a default of [1,0,0,...] and flag them
        # not to be optimised  for each dataset        
        if not any(['c0' in key for key in self.input_params.valuesdict()]):
            C0 = [('c0{i}{d}'.format(i=n,d=d),0,False,0,1,None)
                         for n in range(1,no_species+1)
                         for d in range(1,no_datasets+1)]
        
            self.input_params.add_many(*C0)
            
            for n in range(1,no_species,2):
                for d in range(1,no_datasets+1):
                    self.input_params['c0{i}{d}'.format(i=n,d=d)].value = 1
                    
    def fit_parallel(self, no_species,debug=False):
        """
        Utility function to fit assuming a parallel reaction model
        
        Sets the reaction matrix up for a parallel model then calls the 
        master fit() method
        """
        
        self.init_parallel(no_species)
        self.fit(debug)        
    
    def tex_reaction_scheme(self):
        """Returns a Latex representation of the current reaction scheme"""
        
        if self.reaction_matrix is None or self.input_params is None:
            return 'undefined'
            
        species = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
        eqn = []
        
        reactants, products = self.reaction_matrix.nonzero()
        for r,p,k in zip(reactants, products,self.input_params.keys()):
            eqn.append( species[r] + r'\xrightarrow{{' + k + '}}' + species[p])
        
        latex_eqn = r'$' + ','.join(eqn) + r'$'
        return latex_eqn        
示例#20
0
class lmfitdata(nddata):
    r"""Inherits from an nddata and enables curve fitting through use of a sympy expression.

    The user creates a lmfitdata class object from an existing nddata
    class object, and on this lmfitdata object can define the
    :func:`functional_form` of the curve it would like to fit to the
    data of the original nddata.
    This functional form must be provided as a sympy expression, with
    one of its variables matching the name of the dimension that the
    user would like to fit to.
    """

    def __init__(self, *args, **kwargs):
        # copied from fitdata
        fit_axis = None
        if "fit_axis" in list(kwargs.keys()):
            fit_axis = kwargs.pop("fit_axis")
        if isinstance(args[0], nddata):
            # move nddata attributes into the current instance
            myattrs = normal_attrs(args[0])
            for j in range(0, len(myattrs)):
                self.__setattr__(myattrs[j], args[0].__getattribute__(myattrs[j]))
        else:
            nddata.__init__(self, *args, **kwargs)
        if fit_axis is None:
            if len(self.dimlabels) == 1:
                fit_axis = self.dimlabels[0]
            else:
                raise IndexError(
                    "Right now, we can only auto-determine the fit axis if there is a single axis"
                )

        self.fit_axis = fit_axis
        self.set_to = None
        self.set_indices = None
        self.active_indices = None
        self.expression = None
        return

    @property
    def functional_form(self):
        r"""A property of the myfitclass class which is set by the user,
        takes as input a sympy expression of the desired fit
        expression"""
        print("Getting symbolic function")
        return self.expression

    @functional_form.setter
    def functional_form(self, this_expr):
        """generate parameter descriptions and a numpy (lambda) function from a sympy expresssion

        Parameters
        ==========
        this_expr: sympy expression
        """
        assert issympy(
            this_expr
        ), "for now, the functional form must be a sympy expression"
        self.expression = this_expr
        # {{{ decide which symbols are parameters vs. variables
        if self.expression is None:
            raise ValueError("what expression are you fitting with??")
        all_symbols = self.expression.atoms(sp.Symbol)
        axis_names = set([sp.Symbol(j, real=True) for j in self.dimlabels])
        variable_symbols = axis_names & all_symbols
        self.parameter_symbols = all_symbols - variable_symbols
        this_axis = variable_symbols
        variable_symbols = tuple(variable_symbols)
        self.variable_names = tuple([str(j) for j in variable_symbols])
        parameter_symbols = tuple(self.parameter_symbols)
        self.parameter_names = tuple([str(j) for j in self.parameter_symbols])
        self.fit_axis = set(self.dimlabels)
        self.symbol_list = [str(j) for j in parameter_symbols]
        logging.debug(
            strm(
                "all symbols are",
                all_symbols,
                "axis names are",
                axis_names,
                "variable names are",
                self.variable_names,
                "parameter names are",
                self.parameter_names,
            )
        )
        print(
            "all symbols are",
            all_symbols,
            "axis names are",
            axis_names,
            "variable names are",
            self.variable_names,
            "parameter names are",
            self.parameter_names,
        )
        self.symbolic_vars = all_symbols - axis_names
        self.fit_axis = list(self.fit_axis)[0]
        # }}}
        self.symbolic_vars = list(self.symbolic_vars)
        args = self.symbolic_vars + [str(*this_axis)]
        self.fitfunc_multiarg = sp.lambdify(
            args,
            self.expression,
            modules=[{"ImmutableMatrix": np.ndarray}, "numpy", "scipy"],
        )
        self.fitfunc_multiarg_v2 = sp.lambdify(
            variable_symbols + parameter_symbols,
            self.expression,
            modules=[{"ImmutableMatrix": np.ndarray}, "numpy", "scipy"],
        )

        def fn(p, x):
            p = self.add_inactive_p(p)
            assert len(p) == len(
                self.parameter_names
            ), "length of parameter passed to fitfunc doesnt match number of symbolic parameters"
            return self.fitfunc_multiarg(*tuple(list(p) + [x]))

        self.fitfunc = fn
        self.pars = Parameters()
        for this_name in self.parameter_names:
            self.pars.add(this_name)

    def add_inactive_p(self, p):
        if self.set_indices is not None:
            # {{{uncollapse the function
            temp = p.copy()
            p = np.zeros(len(self.symbol_list))
            p[self.active_mask] = temp
            # }}}
            p[self.set_indices] = self.set_to
        return p

    def set_guess(self, *args, **kwargs):
        """set both the guess and the bounds

        Parameters
        ==========
        guesses: dict of dicts
            each dict has a keyword giving the parameter and a value
            that comprises a dict with guesses (value) and/or constraints
            (min/max)

            Can be passed either as the only argument, or a kwarg called
            guesses, or as the kwargs themselves.
        """
        if len(args) == 1 and type(args[0]) == dict:
            guesses = args[0]
        elif len(kwargs) == 1 and "guesses" in kwargs.keys():
            guesses = kwargs["guesses"]
        else:
            guesses = kwargs
        self.guess_dict = {}
        for this_name in self.pars.keys():
            if this_name in guesses.keys():
                if type(guesses[this_name]) is dict:
                    self.guess_dict[this_name] = {}
                    for k, v in guesses[this_name].items():
                        setattr(self.pars[this_name], k, v)
                        self.guess_dict[this_name][k] = v
                elif np.isscalar(guesses[this_name]):
                    self.pars[this_name].value = guesses[this_name]
                    self.guess_dict[this_name] = {"value":guesses[this_name]}
                else:
                    raise ValueError("what are the keys to your guesses???")
        for j in self.pars:
            logging.info(strm("fit param ---", j))
        logging.info(strm(self.pars))
        return

    def guess(self):
        r"""Old code that we are preserving here -- provide the guess for our
        parameters; by default, based on pseudoinverse"""
        if hasattr(self, "guess_dict"):
            self.guess_dictionary = {
                k: self.guess_dict[k]["value"] for k in self.guess_dict.keys()
            }
            return [self.guess_dictionary[k] for k in self.parameter_names]
        else:
            return [1.0] * len(self.variable_names)

    def settoguess(self):
        "a debugging function, to easily plot the initial guess"
        self.fit_coeff = np.real(self.guess())
        return self

    def _taxis(self, taxis):
        r"You can enter None, to get the fit along the same range as the data, an integer to give the number of points, or a range of data, which will return 300 points"
        if taxis is None:
            taxis = self.getaxis(self.fit_axis).copy()
        elif isinstance(taxis, int):
            taxis = np.linspace(
                self.getaxis(self.fit_axis).min(),
                self.getaxis(self.fit_axis).max(),
                taxis,
            )
        elif not np.isscalar(taxis) and len(taxis) == 2:
            taxis = np.linspace(taxis[0], taxis[1], 300)
        return taxis

    def eval(self, taxis=None, set_what=None, set_to=None):
        """Calculate the fit function along the axis taxis.

        Parameters
        ----------
        taxis: ndarray, int
            :if ndarray: the new axis coordinates along which we want to calculate the fit.
            :if int: number of evenly spaced points along the t-axis along the fit
        set_what: 'str', optional
            forcibly sets a specific symbol
        set_to: double, optional
            the specific value(int) you are assigning the symbol you included

        Returns
        -------
        self: nddata
            the fit function evaluated along the axis coordinates that were passed
        """
        if isinstance(set_what, dict):
            set_to = list(set_what.values())
            set_what = list(set_what.keys())
        if taxis is None:
            taxis = self.getaxis(self.fit_axis)
        else:
            taxis = self._taxis(taxis)
        if hasattr(self, "fit_coeff") and self.fit_coeff is not None:
            p = self.fit_coeff.copy()
        else:
            p = np.array([NaN] * len(self.variable_names))
        # {{{LOCALLY apply any forced values
        if set_what is not None:
            if self.set_indices is not None:
                raise ValueError(
                    "You're trying to set indices in an eval"
                    " function for a function that was fit constrained; this"
                    " is not currently supported"
                )
            set_indices, set_to, active_mask = self.gen_indices(set_what, set_to)
            p[set_indices] = set_to
        # }}}
        # {{{ make a new blank np.array with the fit axis expanded to fit taxis
        newdata = ndshape(self)
        newdata[self.fit_axis] = np.size(taxis)
        newdata = newdata.alloc()
        newdata.set_plot_color(self.get_plot_color())
        # }}}
        # {{{keep all axis labels the same, except the expanded one
        newdata.axis_coords = list(newdata.axis_coords)
        newdata.labels([self.fit_axis], list([taxis]))
        # }}}
        newdata.data[:] = self.fitfunc(p, taxis).flatten()
        return newdata

    def fit(self):
        r"""actually run the fit"""
        # we can ignore set_what, since I think there's a mechanism in
        # lmfit to take care of that (it's for fixing parameters)
        # but the rest of what it's doing is to pull apart the
        # error, axis, etc, to be fed to minimize.
        #
        # It also automatically converts complex data to real data, and
        # does other things for error handling -- let's not just throw this out
        #
        # I think that a lot of this could be copied with little modification
        #
        # But you  should read through and see what the previous fit method is doing
        # and then copy over what you can
        x = self.getaxis(self.fit_axis)
        if np.iscomplex(self.data.flatten()[0]):
            logging.debug(strm("Warning, taking only real part of fitting data!"))
        y = np.real(self.data)
        sigma = self.get_error()
        out = minimize(
            self.residual,
            self.pars,
            args=(x, y, sigma),
        )
        # can you capture the following as a string? maybe return it?
        report_fit(out, show_correl=True)
        # {{{ capture the result for ouput, etc
        self.fit_coeff = [out.params[j].value for j in self.symbol_list]
        assert out.success
        self.covariance = out.covar
        # }}}
        return

    def run_lambda(self, pars):
        """actually run the lambda function we separate this in case we want
        our function to involve something else, as well (e.g. taking a Fourier
        transform)"""
        logging.info(strm(self.getaxis(j) for j in self.variable_names))
        return self.fitfunc_multiarg_v2(
            *(self.getaxis(j) for j in self.variable_names), **pars.valuesdict()
        )

    def residual(self, pars, x, y, sigma=None):
        "calculate the residual OR if data is None, return fake data"
        fit = self.run_lambda(pars)
        if sigma is not None:
            normalization = np.sum(1.0 / sigma[sigma != 0.0 and np.isfinite(sigma)])
            sigma[sigma == 0.0] = 1
            sigma[~np.isfinite(sigma)] = 1
        try:
            # as noted here: https://stackoverflow.com/questions/6949370/scipy-leastsq-dfun-usage
            # this needs to be fit - y, not vice versa
            if sigma is not None:
                retval = (fit - y) / sigma * normalization
            else:
                retval = fit - y
        except ValueError as e:
            raise ValueError(
                strm(
                    "your error (",
                    np.shape(sigma),
                    ") probably doesn't match y (",
                    np.shape(y),
                    ") and fit (",
                    np.shape(fit),
                    ")",
                )
                + explain_error(e)
            )
        return retval

    def copy(self):
        namelist = []
        vallist = []
        for j in dir(self):
            if self._contains_symbolic(j):
                namelist.append(j)
                vallist.append(self.__getattribute__(j))
                self.__delattr__(j)
        new = deepcopy(self)
        for j in range(0, len(namelist)):
            new.__setattr__(namelist[j], vallist[j])
        for j in range(0, len(namelist)):
            self.__setattr__(namelist[j], vallist[j])
        return new

    def gen_indices(self, this_set, set_to):
        r"""pass this this_set and this_set\_to parameters, and it will return:
        indices,values,mask
        indices --> gives the indices that are forced
        values --> the values they are forced to
        mask --> p[mask] are actually active in the fit"""
        if not isinstance(this_set, list):
            this_set = [this_set]
        if not isinstance(set_to, list):
            set_to = [set_to]
        if len(this_set) != len(set_to):
            raise ValueError(
                strm(
                    "length of this_set=",
                    this_set,
                    "and set_to",
                    set_to,
                    "are not the same!",
                )
            )
        logging.debug("*** *** *** *** *** ***")
        logging.debug(str(this_set))
        logging.debug("*** *** *** *** *** ***")
        set_indices = list(map(self.symbol_list.index, this_set))
        active_mask = np.ones(len(self.symbol_list), dtype=bool)
        active_mask[set_indices] = False
        return set_indices, set_to, active_mask

    def output(self, *name):
        r"""give the fit value of a particular symbol, or a dictionary of all values.

        Parameters
        -----------
        name: str (optional)
            name of the symbol.
            If no name is passed, then output returns a dictionary of the
            resulting values.

        Returns
        -------
        retval: dict or float
            Either a dictionary of all the values, or the value itself
        """
        if not hasattr(self, "fit_coeff") or self.fit_coeff is None:
            return None
        p = self.fit_coeff.copy()
        if self.set_indices is not None:
            temp = p.copy()
            p = np.zeros(len(self.symbol_list))
            p[self.active_mask] = temp
            p[self.set_indices] = self.set_to
        if len(name) == 1:
            try:
                return p[self.symbol_list.index(name[0])]
            except:
                raise ValueError(
                    strm(
                        "While running output: couldn't find",
                        name,
                        "in",
                        self.symbol_list,
                    )
                )
        elif len(name) == 0:
            return {self.symbol_list[j]: p[j] for j in range(len(p))}
        else:
            raise ValueError(
                strm("You can't pass", len(name), "arguments to .output()")
            )

    def latex(self):
        r"""show the latex string for the function, with all the symbols substituted by their values"""
        # this should actually be generic to fitdata
        p = self.fit_coeff
        retval = self.function_string
        printfargs = []
        allsymb = []
        locations = []
        # {{{ I replace the symbols manually
        #     Note that I came back and tried to use sympy to do this,
        #     but then realize that sympy will automatically simplify,
        #     e.g. numbers in the denominator, so it ends up changing the
        #     way the function looks.  Though this is a pain, it's
        #     better.
        for j in range(0, len(self.symbol_list)):
            symbol = sympy_latex(self.symbolic_vars[j]).replace("$", "")
            logging.debug(strm('DEBUG: replacing symbol "', symbol, '"'))
            location = retval.find(symbol)
            while location != -1:
                if retval[location - 1] == "-":
                    newstring = (
                        retval[: location - 1]
                        + dp(-1 * p[j])
                        + retval[location + len(symbol) :]
                    )  # replace the symbol in the written function with the appropriate number
                else:
                    newstring = (
                        retval[:location] + dp(p[j]) + retval[location + len(symbol) :]
                    )  # replace the symbol in the written function with the appropriate number
                logging.debug(
                    strm(
                        r"trying to replace", retval[location : location + len(symbol)]
                    )
                )
                retval = newstring
                locations += [location]
                allsymb += [symbol]
                location = retval.find(symbol)
        # }}}
        logging.debug(
            strm(
                r"trying to generate",
                self.function_string,
                "\n",
                retval,
                "\n",
                [allsymb[x] for x in np.argsort(locations)],
                "\n",
                printfargs,
            )
        )
        return retval

    @property
    def function_string(self):
        r"""A property of the myfitclass class which stores a string
        output of the functional form of the desired fit expression
        provided in func:`functional_form` in LaTeX format"""
        retval = sympy_latex(self.expression).replace("$", "")
        return r"$f(%s)=" % (sympy_latex(sympy_symbol(self.fit_axis))) + retval + r"$"

    @function_string.setter
    def function_string(self):
        raise ValueError(
            "You cannot set the string directly -- change the functional_form property instead!"
        )
示例#21
0
def main(argv, SHOW, BLOCK):
	global RECORD, REPLAY, mod
	print_logo()
	fn = argv[1]
	cfg_filename = argv[2]
	detector = argv[3]
#	hdf = h5py.File(fn,'r')
	
	# the allowed keys will be available as cfg members after reading parameter file 
	allowed_keys={"res_param":DictType,"T":FloatType}
	# EXAMPLE OF CFG FILE :
	""""
	# Parameters for resolution function
	# usage : res_param = {detector_number:[mu,wG,wL],...,n:[mun,wGn,wLn]}
	res_param ={
	1:[0.6552,2.604,4.53],
	2:[0.6319,2.603,4.<013],
	..........................
	}
	#Temperature (important : floating type is mandatory)
	T = 297.0
	"""
	
	cfg = read_configuration_file(cfg_filename,allowed_keys= allowed_keys)
	interactive_Entry = True 
	mod=None
	const=None
	CONVOLUTION_METHOD="PSEUDOVOIGT"

	while(1):
		if interactive_Entry:
#			( scan_num , detect_num, 
#			  Ene_array ,Intens_array, Intens_Err) = interactive_extract_data_from_h5(hdf)
			( scan_num , detect_num, 
			  Ene_array ,Intens_array, Intens_Err) = get_data_from_txt(fn,detector)

			if  CONVOLUTION_METHOD=="PSEUDOVOIGT":
				# we build here a pseudo_voigt for convolution, based on configuration parameters peculiar to the detector 
				mu,gaussian_w,lorentz_w , base_line = cfg.res_param[int(detect_num)]
				convolution_Function = PseudoVoigt( mu,lorentz_w ,gaussian_w , base_line)
			else:
				raise Exception, (" I dont know your convolution model=%s, develop it in the code "%CONVOLUTION_METHOD)

			mod = Model(cfg.T,Ene_array,convolution_Function )

			xy, noel = interactive_GUI_get_init_peak_params(Ene_array,Intens_array)
			const=None
			skip = (xy == [])  # xy is a list : [ e0, height0, e1, height....]
			if noel :   # means : energy range was not containing zero , and elastic peak has not been set  by
									# the above GUI routine. We are going to ask for it now and prepend Ec, Ael to xy
				if not REPLAY==0:
					exec(getinstruction(REPLAY))
					xy=[[Ec,Ael]]+xy
				while(1):
					try:
						Ec=float(raw_input('Enter overall scan shift (Ec) : '))
						Ael=float(raw_input('Enter intensity of elastic line (Ael) : '))
						xy=[[Ec,Ael]]+xy
						if RECORD:
							open("interactive_session.log","a").write("Ec,Ael=.%s   # in completion for noel=True \n"%str((Ec,Ael)))

						break
					except:
						print " INPUT ERROR, TRY AGAIN "
						pass
			if not skip:
				# setting up parameter list  : ( position, height, width, position, height.... )
				param_list = np.zeros([len(xy),3    ],"d")
				param_list[:,:2]=xy
				wel,wj =0.1,0.1 #widths of elastic and excitation peaks (initial guess)
				param_list[0,2]=wel
				param_list[1:,2]=wj

				# setting up the model
				params_and_functions = Params_and_Functions()
				params_and_functions.setParams(param_list.flatten())
				# //////////////////////////// contributions
				params_and_functions.setContribution(shape_class=LineModel) # elastic line
				for i in range(len(xy)-1):
					params_and_functions.setContribution(shape_class=LineModel)
				params_and_functions.normalise(mod) 
				print '--------------------------------------------------------------'
				print 'Input parameters :'
				params_and_functions.print_params(cfg.T, File=sys.stdout)

				mod.set_Params_and_Functions(params_and_functions)
			
		else:
			skip=False
		
		if not skip:
			#*************** TRA *********************************************************
			# """
			t0=time.time()
			Fit_Parameters = Parameters()
			pname_dict     = build_param_name_dict(params_and_functions)
			init_params    = params_and_functions.par_array
			print " init params: ",init_params
			if const == None:
				const = default_build_constrains(params_and_functions ,position=3,intensity=2,irange=[0.,params_and_functions.maxheight()*2],width=3)
				print const
				vary_means = {0:False, 1: True, 2: True, 3: False}#To be modified to accept only True and False (0,1)
				for ii in range(len(init_params)):
					# Fit_Parameters.add(pname_dict[ii], value = init_params[ii], min = const[1][ii], max = const[2][ii], vary=True)
					Fit_Parameters.add(pname_dict[ii], value = init_params[ii])
				
				pp = [Fit_Parameters[kk].value for kk in Fit_Parameters.keys()]
				
				Fitted_result = minimize(residual_fit, Fit_Parameters, args=(Ene_array, Intens_array, Intens_Err))
				chisq = Fitted_result.chisqr
				refined_param = []
				sigmapar      = []
				for k in Fitted_result.params.keys():
					refined_param.append(Fitted_result.params[k].value)
					sigmapar.append(Fitted_result.params[k].stderr)
				const = default_build_constrains(params_and_functions,position=2, # refined_param[0] est suppose etre le centre de la ligne elastic
							  prange=[0+(refined_param[0]),Ene_array[-1]*1.2],intensity=2,irange=[0.,params_and_functions.maxheight()*2],width=2,wrange=[0.,2.5])#XXX
				vary_means = {0:False, 1: True, 2: True, 3: False}#To be modified to accept only True and False (0,1)
				for ii in range(len(init_params)):
					Fit_Parameters.add(pname_dict[ii], value = init_params[ii], min = const[1][ii], max = const[2][ii], vary=vary_means[const[0][ii]])
				pp = [Fit_Parameters[k].value for k in Fit_Parameters.keys()]
				
				Fitted_result = minimize(residual_fit, Fit_Parameters, args=(Ene_array, Intens_array, Intens_Err))
				chisq = Fitted_result.chisqr
				refined_param = []
				sigmapar      = []
				for k in Fitted_result.params.keys():
					refined_param.append(Fitted_result.params[k].value)
					sigmapar.append(Fitted_result.params[k].stderr)
			else:
				vary_means = {0:False, 1: True, 2: True, 3: False}#To be modified to accept only True and False (0,1)
				for ii in range(len(init_params)):
					Fit_Parameters.add(pname_dict[ii], value = init_params[ii], min = const[1][ii], max = const[2][ii], vary=vary_means[const[0][ii]])
					
				pp = [Fit_Parameters[kk].value for kk in Fit_Parameters.keys()]
				
				Fitted_result = minimize(residual_fit, Fit_Parameters, args=(Ene_array, Intens_array, Intens_Err))
				chisq = Fitted_result.chisqr
				refined_param = []
				sigmapar      = []
				for k in Fitted_result.params.keys():
					refined_param.append(Fitted_result.params[k].value)
					sigmapar.append(Fitted_result.params[k].stderr)
			t1=time.time()
			print "DOne!"
			
			print 'Exec time for calculation : %f'%(t1-t0)
			print 'number of iteration in Levenberg-Marquardt : %d'%mod.count
			print 'Exec time per iteration : %f'%((t1-t0)/mod.count)
			mod.params_and_functions.par_array[:] =  refined_param  # Note : we update internal values. We dont change the object reference value 
			print 'root-mean-square deviation : %f'%  (np.sqrt(np.sum(((Intens_array-mod.Ft_I(refined_param,Ene_array ))**2)))/len(Ene_array))

			plotted_datas = Plot(mod,refined_param,Ene_array,Intens_array,Intens_Err , show_graph=SHOW) # this function would be used also just
			# for grabbing data columns :  Ldat = [E-Center , A, Err,tot, el, inel1, inel2 ...]

			print '--------------------------------------------------------------'
			print 'Output parameters :'
			params_and_functions.print_params(cfg.T,sigmapar, File=sys.stdout)   # on the screen

			output_dir =  fn.split(".")[0] + '_fit'
			output_stripped_name  = os.path.basename(fn).split(".")[0]
			if not os.path.exists(output_dir):
				os.mkdir(output_dir)
			out_name = '%s/%s.h5'%(output_dir,output_stripped_name )	
			datasetname = "data_%s_%s"%(scan_num,detect_num )

			out_param_fn = os.path.join('%s'%output_dir, '%s_%s.param'%(output_stripped_name, detect_num))
			out = open(out_param_fn,'w')
			elC = params_and_functions.print_params_h5(cfg.T,sigmapar, File=out_name,datasetname= datasetname)  # on file
			elC = params_and_functions.print_params(cfg.T,sigmapar, File=out)  # on file
			out=None
			cs = np.column_stack(plotted_datas)
			cs[:,0] -= elC
			np.savetxt('%s/%s_%s.dat'%(output_dir,output_stripped_name,detect_num), cs , fmt='%12.4e', delimiter=' ')

			file_print ( output_dir, output_stripped_name       ,  scan_num , detect_num)

			try:
				plt.show(block=BLOCK)
			except:
				plt.show()

			interactive_Entry=True
			if not REPLAY==0:
				exec(getinstruction(REPLAY))
			else:
				r = raw_input('Would you like to fit another spectrum (y) or (n) default : [y] ?\nor change temperature (t) ?\nor refine again the previous fit with different constrains (r) ?\n')
				if RECORD:
					open("interactive_session.log","a").write("r='%s'   # in asking Would you like to fit another spectrum :y,n,r,t \n"%r)
			if r in ['n','N']:
				print 'Bye Bye'
				break
			elif r in ['t','T']:
				
				if not REPLAY==0:
					exec(getinstruction(REPLAY))
				else:
					T = raw_input('Temperature ? [297.0]: ')
					if RECORD :
						open("interactive_session.log","a").write("T='%s'   # in asking Temperature ? [297.0] \n"%T)

				if T == '':
					cfg.T = 297.0
				else :
					cfg.T = float(T)
			elif r in ['r','R']:
				const = interactive_define_ext_constrains(params_and_functions,const) # this function might change internal values
																					  # of params_and_functions.par_array
				interactive_Entry=False
			else:
				pass # will continue as default
			plt.close()
示例#22
0
l2, = ax2.plot(t, pz, lw=2, color='k')
ax2.legend(['Pole-zero/Tau Corrected'])
l3, = ax3.plot(t, ff, lw=2, color='blue')
ax3.legend(['Trapezoidal Filter Output'])
l4, = ax4.plot(t, zc, lw=2, color='green')
ax4.legend(['CFD Output'])
#l5,= ax4.plot(t[20:],zc[1],lw=2,color='purple')
ax2.set_xlim(0, 2000)
#ax1.set_ylim(pulse.min()-margin,pulse.max()+margin)
#ax2.set_ylim(ff.min()-margin,ff.max()+margin)
#plt.axis([0,2000,-100,100])
#ax1.autoscale(axis='y')
#ax2.autoscale(axis='y')

axDict = dict()
key_len = len(variables.keys())
for k in variables.keys():
    axDict[k] = plt.axes([0.15, key_len * 0.04 + 0.13, 0.65, 0.03])
    key_len -= 1

#axamp = plt.axes([0.15,0.05, 0.65, 0.03])
#axmean = plt.axes([0.15,0.09, 0.65, 0.03])
#axsigma = plt.axes([0.15,0.13, 0.65, 0.03])

axlen = plt.axes([0.15, 0.05, 0.65, 0.03])
axgap = plt.axes([0.15, 0.09, 0.65, 0.03])
axtau = plt.axes([0.15, 0.13, 0.65, 0.03])
slideDict = dict()
key_len = len(variables.keys())
for k in variables.keys():
    slideDict[k] = Slider(axDict[k],
示例#23
0
class UDFParametersModel(QtCore.QAbstractTableModel):

    def __init__(self, params, parent=None):
        super(UDFParametersModel, self).__init__(parent)
        if params is not None:
            self.params = params
        else:
            self.params = Parameters()

    def rowCount(self, parent=QtCore.QModelIndex()):
        return len(self.params) + 1

    def columnCount(self, parent=QtCore.QModelIndex()):
        return 5

    def flags(self, index):
        row = index.row()
        col = index.column()

        if row == 0 and col == 0:
            retval = QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled
        
        if row == 0 and col > 0:
            retval = False

        #parameter name
        if col == 0 and row > 0:
            retval = (QtCore.Qt.ItemIsEditable |
                      QtCore.Qt.ItemIsEnabled |
                      QtCore.Qt.ItemIsSelectable)

        #parameter value
        if col == 1 and row > 0:
            retval = (QtCore.Qt.ItemIsEditable |
                      QtCore.Qt.ItemIsUserCheckable |
                      QtCore.Qt.ItemIsEnabled |
                      QtCore.Qt.ItemIsSelectable)

        #min/max values
        if (col == 2 or col == 3) and row > 0:
            retval = (QtCore.Qt.ItemIsEditable |
                      QtCore.Qt.ItemIsEnabled |
                      QtCore.Qt.ItemIsSelectable)

        #expr
        if col == 4 and row > 0:
            retval = (QtCore.Qt.ItemIsEditable |
                      QtCore.Qt.ItemIsEnabled |
                      QtCore.Qt.ItemIsSelectable)

        return retval

 #    def layersAboutToBeInserted(self, start, end):
#         self.beginInsertRows(QtCore.QModelIndex(), start, end)
#
#     def layersFinishedBeingInserted(self):
#         self.endInsertRows()
#
#     def layersAboutToBeRemoved(self, start, end):
#         self.beginRemoveRows(QtCore.QModelIndex(), start, end)
#
#     def layersFinishedBeingRemoved(self):
#         self.endRemoveRows()

    def setData(self, index, value, role=QtCore.Qt.EditRole):
        row = index.row()
        col = index.column()
        names = curvefitter.names(self.params)

        if row:
            name = names[row - 1]

        if role == QtCore.Qt.CheckStateRole:
            if row > 0 and col == 1:
                if value == QtCore.Qt.Checked:
                    self.params[name].vary = False
                else:
                    self.params[name].vary = True

        if role == QtCore.Qt.EditRole:
            if row == 0 and col == 0:
                currentparams = self.rowCount() - 1

                validator = QtGui.QIntValidator()
                voutput = validator.validate(value, 1)
                if voutput[0] is QtGui.QValidator.State.Acceptable and int(voutput[1]) >= 0:
                    newparams = int(voutput[1])

                    if newparams == currentparams:
                        return True

                    if newparams > currentparams:
                        self.beginInsertRows(
                            QtCore.QModelIndex(),
                            currentparams + 1,
                            newparams)
                    if newparams < currentparams:
                        self.beginRemoveRows(
                            QtCore.QModelIndex(),
                            newparams + 1,
                            currentparams)

                    if newparams > currentparams:
                        for i in range(currentparams, newparams):
                            self.params.add('p%d'%i, 0, True, -np.inf, np.inf, None)
                        self.endInsertRows()

                    if newparams < currentparams:
                        remove_names = names[newparams:]
                        map(self.params.pop, remove_names)
                        self.endRemoveRows()

                    self.modelReset.emit()
            if row > 0 and col in [1, 2, 3]:
                validator = QtGui.QDoubleValidator()
                voutput = validator.validate(value, 1)
                if voutput[0] == QtGui.QValidator.State.Acceptable:
                    number = float(voutput[1])
                else:
                    return False

                if col == 1:
                    self.params[name].value = number
                if col == 2:
                    self.params[name].min = number
                if col == 3:
                    self.params[name].max = number
            if row > 0 and col == 0:
                #change a parameter name requires making a new dictionary
                if not valid_symbol_name(value):
                    return False

                p = Parameters()
                param = self.params[name]
                newparam = Parameter(value, param.value, param.vary,
                                     param.min, param.max, param.expr)

                for k, v in self.params.items():
                    if k == name:
                        p[value] = newparam
                    else:
                        p[k] = v

                self.params = p

            if row > 0 and col == 4:
                #set an expression
                param = self.params[name]
                param.expr = value

        self.dataChanged.emit(index, index)
        return True

    def data(self, index, role=QtCore.Qt.DisplayRole):
        if not index.isValid():
            return False

        row = index.row()
        col = index.column()
        names = curvefitter.names(self.params)

        if row:
            name = names[row - 1]

        if role == QtCore.Qt.DisplayRole:
            if col == 0:
                if row == 0:
                    return str(len(self.params))
                else:
                    return name
            elif col == 1 and row > 0:
                    return str(self.params[name].value)
            elif col == 2 and row > 0:
                return str(self.params[name].min)
            elif col == 3 and row > 0:
                return str(self.params[name].max)
            elif col == 4 and row > 0:
                return str(self.params[name].expr)

        if role == QtCore.Qt.CheckStateRole:
            if row > 0 and col == 1:
                if self.params[name].vary:
                    return QtCore.Qt.Unchecked
                else:
                    return QtCore.Qt.Checked

    def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
        """ Set the headers to be displayed. """
        if role != QtCore.Qt.DisplayRole:
            return None

        if orientation == QtCore.Qt.Vertical:
            if section == 0:
                return 'number of parameters'
            else:
                names = self.params.keys()
                return names[section - 1]

        if orientation == QtCore.Qt.Horizontal:
            if section == 0:
                return 'name'
            if section == 1:
                return 'value'
            if section == 2:
                return 'lower limit'
            if section == 3:
                return 'upper limit'
            if section == 4:
                return 'expr'
        return None
示例#24
0
class xpvic_fit:
    """
    Class of objects allowing to easily batch fit neutrons diffraction data with
    one or more pseudo-Voigt Ikeda-Carpenter functional forms.
    """
    def __init__(self,
                 data,
                 refParams,
                 h,
                 fit_interval=[-.1, .1],
                 data_range=None,
                 num_fit_func=1):
        """
        Initialize attributes of the class
        
        Parameters
        ----------
        data : Pandas dataframe
            Dataframe where each row of the column called "spectra" contains a 
            dataframe with ENS data, where hh0 is the x-axis data and Inorm is 
            the y-axis data. See the xyData() method.
        refParams : lmfit Parameters object
            Reference Parameters object, containing default values and a fixed 
            number of Parameter objects, as defined from the model fit function.
        h: positive integer
            peak position in (h h 0) reciprocal space
        data_range : Range
            Range of indices of nData datasets to include in fit.
            
        Returns
        -------
        self.xdata_selection : Boolean NumPy array, optional
            Array with same size as nData.spectra[row_index].hh0, with ones where 
            data should be used for fitting, and zeros elsewhere.
        self.data_range : Range
            Range of indices of nData datasets to include in fit.

        """

        # Metadata of the fit: peak position in (h h 0) reciprocal space
        self.h = h
        peak_position = -float(h)
        self.hkl = f"({self.h} {self.h} 0)"

        # Number of fitting function for each curve
        self.num_fit_func = num_fit_func

        # Create range of data to fit and plot
        if data_range is None:
            self.data_range = range(
                len(data))  # by default, use the full range of data
        else:
            self.data_range = data_range
        self.plot_range = self.data_range  # use the same range for plotting

        # Number of spectra to use for the fit
        self.num_spec = len(self.data_range)

        # x-axis data selection
        dat_idx = data_range[-1]
        self.xdata_selection = np.logical_and(
            data.spectra[dat_idx].hh0 > peak_position + fit_interval[0],
            data.spectra[dat_idx].hh0 < peak_position + fit_interval[1])
        self.plot_lim = .15
        self.xdata_plot_selection = np.logical_or(
            np.logical_and(
                data.spectra[dat_idx].hh0 > peak_position - self.plot_lim,
                data.spectra[dat_idx].hh0 < peak_position + fit_interval[0]),
            np.logical_and(
                data.spectra[dat_idx].hh0 > peak_position + fit_interval[1],
                data.spectra[dat_idx].hh0 < peak_position + self.plot_lim))

        # Data to fit
        self.data = data

        # Set of reference parameters to use for the fit
        self.refParams = refParams

        # Number of shared free parameters in the fit
        self.freeSharedPrms = 0
        for key in refParams.keys():
            if refParams[key].vary is True:
                self.freeSharedPrms += 1

        # Create x- and y-axis arrays of data for batch fitting.
        self.makeData()

    def makeData(self):  #
        """
        Create x- and y-axis arrays of data for batch fitting.
            
        Returns
        -------
        X : NumPy array
            Array of x-axis arrays for batch fitting.
        Y : NumPy array
            Array of y-axis arrays for batch fitting.
    
        """
        # Initialize arrays with zero line and as many columns as there are data in each spectrum
        # This is only useful if weigths should be calculated differently for different spectra
        # self.X = np.empty((0,len(self.data.spectra[self.data_range[0]].hh0)))
        # self.Y = np.empty(self.X.shape)
        # self.dY = np.empty(self.X.shape)

        # These lines can be deleted as of 2020-04-30
        # # If no data selection filter is applied, select all the data
        # if data_select is None:
        #     data_select = np.ones(self.data.spectra[self.data_range[0]].hh0.shape, dtype=bool)

        # Create x, y and dy data arrays
        self.X = np.stack([
            self.data.spectra[idx].hh0[self.xdata_selection]
            for idx in self.data_range
        ])
        self.Xplot = np.stack([
            self.data.spectra[idx].hh0[self.xdata_plot_selection]
            for idx in self.data_range
        ])
        self.Y = np.stack([
            self.data.spectra[idx].Inorm[self.xdata_selection]
            for idx in self.data_range
        ])
        self.Yplot = np.stack([
            self.data.spectra[idx].Inorm[self.xdata_plot_selection]
            for idx in self.data_range
        ])
        self.dY = np.stack([
            self.data.spectra[idx].dInorm[self.xdata_selection]
            for idx in self.data_range
        ])

        # Compute weights from data errors
        self.weights = 1 / (self.dY)
        # Set all np.inf values in self.weights to zero
        if np.any(self.weights == np.inf):
            self.weights[self.weights == np.inf] = 0
            # print(np.argwhere(self.weights==np.inf))
            warnings.warn(
                f"Infinite values were encountered in 'weights', at positions \
                          {np.argwhere(self.weights==np.inf)}. They were reset to zero."
            )

        for idx in self.data_range:
            if not np.any(self.dY):
                warnings.warn(
                    f"All errors are zero in spectrum with index {idx}; \
                              using all ones as weights.")
                self.weights[idx] = np.ones(self.weights[idx].shape)

    def initParams(self, resultParams=None, xp=None, A=None, fixParams={}):
        """
        Initialize parameters for the next fitting iteration using the results of the previous fit
        and, if necessary, the default values of a reference set of parameters    
    
        Parameters
        ----------
        xp: NumPy array of length self.num_fit_func
            Initial values of the independent parameter xp, which contains the peak positions
        A: NumPy array of length self.num_fit_func
            Initial values of the independent parameter A, which contains the peak amplitudes
        resultParams : lmfit Parameters object, optional
            Parameters object yielded by the previously performed fit, if any.
            The default is None.
    
        Returns
        -------
        self.init_params : lmfit Parameters object
            Parameters to be used in the fit of all curves.
        """

        # Set default values of xp and A arrays when fitting with only one pVIC function:
        # Default value of xp is np.array([self.refParams['xp'].value])
        if xp is None:
            xp = np.array([
                self.refParams['xp'].value + 0.015 *
                (self.num_fit_func - 1 - 2 * idx)
                for idx in range(self.num_fit_func)
            ])

        # Default value of A is np.array([self.refParams['A'].value])
        if A is None:
            A = np.array([
                self.refParams['A'].value / np.sqrt(self.num_fit_func)
                for _ in range(self.num_fit_func)
            ])

        if type(fixParams) is not dict:
            raise TypeError("The fixParams argument must be a dictionary.")

        # Initialize lmfit Parameters object
        self.init_params = Parameters()
        # For those parameters that have been computed in the last run,
        # use as initial values for the next run the best fit values obtained in the last
        if resultParams is not None:
            for key in resultParams.keys():
                # try:
                self.init_params[key] = cp.copy(resultParams[key])
                # except KeyError: # in case self.init_params has been modified since last fitting run
                #     continue

        # Create additional fit parameters, e.g. if the number of datasets has been extended
        for spec_idx in self.data_range:
            # loop over indices of datasets, in order to create fit parameters for each of them
            for key in self.refParams.keys():
                if key in ['A', 'xp']:
                    # fit parameters that are different for each dataset are assigned individual names
                    for fidx in range(self.num_fit_func):
                        par_key_base = f"{key}{spec_idx}"
                        par_key = f"{par_key_base}_{fidx}"
                        # if par_key not in self.init_params.keys():
                        try:
                            self.init_params.add(par_key,
                                                 value=eval(key)[fidx],
                                                 min=self.refParams[key].min,
                                                 vary=self.refParams[key].vary)
                        except:
                            raise TypeError(f"{key} must be an iterable \
                                            object of length {self.num_fit_func}"
                                            )

                        if fidx > 0 and key == 'A' in fixParams:
                            self.init_params[par_key].expr = \
                            f"{par_key_base}_{fidx-1}/{fixParams[key]}"

                # For the shared fit parameters, if they have not been previously computed
                elif resultParams is None:
                    # They are assigned the "generic" name from self.refParams
                    self.init_params[key] = cp.copy(self.refParams[key])

    def performFit(self, with_weights=True):
        """
        Perfor fit using the xpVIC_residual function, which is the residual 
        function for fitting multiple curves using the pseudo-Voigt-Ikeda-Carpenter
        functional form.

        Returns
        -------
        self.result: Minimizer.Result object from the lmfit module
            Result of the minimize function, containing the fit results.

        """
        if with_weights is True:
            self.result = minimize(xnpVIC_residual,
                                   self.init_params,
                                   args=(self.X, self.Y, self.data_range),
                                   kws={
                                       'weights': self.weights,
                                       'nFunc': self.num_fit_func
                                   })
        else:
            self.result = minimize(xnpVIC_residual,
                                   self.init_params,
                                   args=(self.X, self.Y, self.data_range),
                                   kws={'nFunc': self.num_fit_func})

    def bestFitParams(self):
        """
        Create NumPy array of best fit parameter values for each fitted curve.
    
        Returns
        -------
        self.bestparams : NumPy array
            Array of arrays of best fit parameter values for each fitted curve.
    
        """

        self.bestparams = np.zeros(
            (self.num_spec, self.num_fit_func, len(self.refParams)))
        # self.bestparams.shape = # of datasets x # of parameters in fit function (pVIC)
        for spec_idx in range(self.num_spec):
            for fidx in range(self.num_fit_func):
                for par_idx, refKey in enumerate(self.refParams.keys()):
                    par_key = f"{refKey}{self.data_range[spec_idx]}_{fidx}"
                    # parameter name is a concatenation of the generic parameter name,
                    # as defined in the self.refParams function, and the spectrum index
                    try:
                        self.bestparams[spec_idx][fidx][
                            par_idx] = self.result.params[par_key].value
                    except KeyError:
                        self.bestparams[spec_idx][fidx][
                            par_idx] = self.result.params[refKey].value

    def plotMultipleFits(self, title=None, plotSubFits=False):
        """
        Plot multiple datasets with the corresponding fits.    
        """

        # (Re)compute best fit parameters if they don't exist or if something
        # went wrong during the first computation and all computed parameters are zero
        if not hasattr(self, 'bestparams') or np.any(self.bestparams) == 0:
            self.bestFitParams()

        fig, ax = plt.subplots()
        for spec_idx in self.plot_range:  #
            dat_idx = list(self.data_range).index(spec_idx)  #
            # print(f"data_range index = {self.data_range[dat_idx]}; \
            #       plot_range index = {spec_idx}") # Just to check that the right labels are displayed in the legend
            fieldlabel = f"{self.data['H (T)'][spec_idx]:.3g}T"
            p = plt.errorbar(self.X[dat_idx],
                             self.Y[dat_idx],
                             self.dY[dat_idx],
                             marker='o',
                             elinewidth=1,
                             linewidth=0,
                             label=f"expt {fieldlabel}")
            pcolor = p[-1][0].get_color()[0, :3]
            plt.plot(self.Xplot[dat_idx],
                     self.Yplot[dat_idx],
                     marker='x',
                     linewidth=0,
                     color=pcolor,
                     label=f"excluded")

            plot_center = -float(self.h)
            plot_lim = self.plot_lim
            fit_xrange = np.linspace(plot_center - plot_lim,
                                     plot_center + plot_lim,
                                     num=400)
            best_subfit = np.zeros((self.num_fit_func, fit_xrange.shape[0]))

            for fidx in range(self.num_fit_func):
                best_subfit[fidx] = pVIC(fit_xrange,
                                         *self.bestparams[dat_idx][fidx])
                if plotSubFits is True:
                    plt.plot(fit_xrange,
                             best_subfit[fidx],
                             '-',
                             label=f"subfit {fieldlabel} {fidx}")

            bestfit = np.sum(best_subfit, axis=0)
            plt.plot(fit_xrange,
                     bestfit,
                     '-',
                     color=pcolor,
                     label=f"fit {fieldlabel}")
            plt.legend(loc='best')
        plt.show()

        freeParams = [
            k for k in list(self.init_params.keys())
            if self.init_params[k].vary == True
        ]
        if title is None:
            plt.title(f"TmVO$_4$ neutrons {len(freeParams)} free parameters")
        else:
            plt.title(title)
        plt.xlabel("$h$ in ($h$ $h$ 0)")
        plt.ylabel("$I$ (a.u.)")

        # Set the format of y-axis tick labels
        ax.yaxis.set_major_formatter(FormatStrFormatter('%.2g'))
示例#25
0
fit_params.add('decay', value=0.02)

out = minimize(residual, fit_params, args=(x, ), kws={'data': data})

fit = residual(fit_params, x)

print(' N fev = ', out.nfev)
print(out.chisqr, out.redchi, out.nfree)

report_fit(fit_params)
#ci=calc_ci(out)
ci, tr = conf_interval(out, trace=True)
report_ci(ci)

if HASPYLAB:
    names = fit_params.keys()
    i = 0
    gs = pylab.GridSpec(4, 4)
    sx = {}
    sy = {}
    for fixed in names:
        j = 0
        for free in names:
            if j in sx and i in sy:
                ax = pylab.subplot(gs[i, j], sharex=sx[j], sharey=sy[i])
            elif i in sy:
                ax = pylab.subplot(gs[i, j], sharey=sy[i])
                sx[j] = ax
            elif j in sx:
                ax = pylab.subplot(gs[i, j], sharex=sx[j])
                sy[i] = ax
示例#26
0
def fit_PRF_on_averaged_data(time_course,ci_time_course, design_matrix, n_pixel_elements_raw, n_pixel_elements_convolved, model='OG',plotbool=False, tr_times = [],TR = 1.5,  
						plotdir=[], voxno=[], dm_for_BR = [], valid_regressors = [], slice_no=[], randint=True, roi='unkown_roi',hrf_params=[],all_results=0,
						max_eccentricity = 1,ellipsoid=False,stim_duration_TR=24,n_slices=30,results_frames = [],max_ecc = 1.6,stim_radius=7.5,postFix=[]):
	""""""

	
	orientations = ['0','45','90','135','180','225','270','315','X']
	n_orientations = len(orientations)

	#########################################################################################################################################################################################################################
	#### Initiate Parameters
	#########################################################################################################################################################################################################################

	## initiate parameters:
	params = Parameters()

	# two location parameters
	params.add('xo', value= 0.0 )
	params.add('yo', value= 0.0)
	params.add('ecc',value=0.0,min=0,max=max_ecc,expr='sqrt(xo**2+yo**2)')

	# and a baseline
	params.add('baseline',value=0.0)

	# center parameters:
	# params.add('sigma_center',value=0.1,min=0.0000000001) # this means initialization at 0.1/2 * 15 = 0,75 degrees sd, which amounts to 6.6 degrees fwhm, 
	params.add('sigma_center',value=0.1,min=0.0000000001) # this means initialization at 0.1/2 * 15 = 0,75 degrees sd, which amounts to 6.6 degrees fwhm, 
	params.add('amp_center',value=0.05,min=0.0000000001) # this is initialized at 0.001

	# surround parameters
	params.add('sigma_surround',value=0.5,expr='sigma_center+delta_sigma') # surround size should roughly be 5 times that of the center
	params.add('amp_surround',value=-0.001,max=-0.0000000001,expr='-amp_center+delta_amplitude') # initialized at 10% of center amplitude
	params.add('delta_sigma',value=0.4,min=0.0000000001) # this difference parameter ensures that the surround is always larger than the center
	params.add('delta_amplitude',value=0.049,min=0.0000000001) # this difference parameter ensures that the surround is never deeper than the center is high


	# when fitting an OG model, set all surround and delta parameters to 0 and to not vary and set the expression to None, otherwise it will start to vary anyway
	if model == 'OG':	
		params['amp_surround'].value,params['amp_surround'].vary,params['amp_surround'].expr = 0, False, None
		params['delta_amplitude'].vary, params['delta_sigma'].vary,params['sigma_surround'].vary = False, False, False


	#########################################################################################################################################################################################################################
	#### Prepare data
	########################################################################################################################################################################################################## ###############

	# add empty periods between trials in dm in order to let the model prediction die down
	tr_per_trial = len(time_course)/n_orientations
	add_empty_trs = 20
	padded_dm = np.zeros((len(time_course)+add_empty_trs*n_orientations,n_pixel_elements_raw,n_pixel_elements_raw))
	padd_mask = np.zeros(len(padded_dm)).astype(bool)
	for i in range(n_orientations):
		padd_mask[i*tr_per_trial+add_empty_trs*i:(i+1)*tr_per_trial+add_empty_trs*i] = True		
		padded_dm[i*tr_per_trial+add_empty_trs*i:(i+1)*tr_per_trial+add_empty_trs*i,:,:] = design_matrix[i*tr_per_trial:(i+1)*tr_per_trial,:,:]


	#########################################################################################################################################################################################################################
	#### Prepare fit object and function
	#########################################################################################################################################################################################################################

	# initiate model prediction objec
	ssr = n_slices
	g = gpf(design_matrix = padded_dm, max_eccentricity = max_eccentricity, n_pixel_elements = n_pixel_elements_raw, rtime = TR, ssr = ssr,slice_no=slice_no)#, add_empty_trs=add_empty_trs,tr_per_trial=tr_per_trial,n_orientations=n_orientations)

	# initiate fit funcitonality
	def residual(params, time_course,padd_mask):

		center_model_prediction =  g.hrf_model_prediction(params['xo'].value, params['yo'].value, params['sigma_center'].value,hrf_params)[0] * params['amp_center'].value
		surround_model_prediction =  g.hrf_model_prediction(params['xo'].value, params['yo'].value, params['sigma_surround'].value,hrf_params)[0] * params['amp_surround'].value

		combined_model_prediction =  params['baseline'].value + center_model_prediction + surround_model_prediction 
		return time_course - combined_model_prediction[padd_mask]

	#########################################################################################################################################################################################################################
	#### initialize parameters
	#########################################################################################################################################################################################################################

	if np.size(all_results) == 1:

		## initiate search space with Ridge prefit
		Ridge_start_params, BR_PRF, BR_predicted = fitRidge_for_Dumoulin(dm_for_BR, time_course, valid_regressors=valid_regressors, n_pixel_elements=n_pixel_elements_convolved, alpha=1e14)
		params['xo'].value  = Ridge_start_params['xo']
		params['yo'].value = Ridge_start_params['yo']

	else:

		params['xo'].value = all_results[results_frames['xo']]
		params['yo'].value = all_results[results_frames['yo']]
		params['sigma_center'].value = all_results[results_frames['sigma_center']]
		params['sigma_surround'].value = all_results[results_frames['sigma_surround']]
		params['amp_center'].value = all_results[results_frames['amp_center']]
		params['amp_surround'].value = all_results[results_frames['amp_surround']]
		params['delta_sigma'].value = all_results[results_frames['delta_sigma']]
		params['delta_amplitude'].value = all_results[results_frames['delta_amplitude']]
		params['baseline'].value = all_results[results_frames['baseline']]

		surround_PRF =  g.twoD_Gaussian(params['xo'].value, params['yo'].value,params['sigma_surround'].value) * params['amp_surround'].value
		center_PRF =  g.twoD_Gaussian(params['xo'].value, params['yo'].value,params['sigma_center'].value) * params['amp_center'].value
		BR_PRF = center_PRF + surround_PRF

	#########################################################################################################################################################################################################################
	#### evalute fit
	#########################################################################################################################################################################################################################

	# find optimal parameters:
	minimize(residual, params, args=(), kws={'time_course':time_course,'padd_mask':padd_mask},method='powell')

	#########################################################################################################################################################################################################################
	#### Recreate resulting predictions and PRFs with optimized parameters
	#########################################################################################################################################################################################################################

	trimmed_center_mp = (g.hrf_model_prediction(params['xo'].value, params['yo'].value, params['sigma_center'].value,hrf_params)[0] * params['amp_center'].value)[padd_mask]
	trimmed_surround_mp = (g.hrf_model_prediction(params['xo'].value, params['yo'].value, params['sigma_surround'].value,hrf_params)[0] * params['amp_surround'].value)[padd_mask]
	trimmed_mp = params['baseline'].value + trimmed_center_mp + trimmed_surround_mp 


	raw_center_mp = (g.raw_model_prediction(params['xo'].value, params['yo'].value, params['sigma_center'].value)* params['amp_center'].value)[padd_mask]
	raw_surround_mp = (g.raw_model_prediction(params['xo'].value, params['yo'].value, params['sigma_surround'].value)* params['amp_surround'].value)[padd_mask]
	raw_mp = params['baseline'].value + raw_center_mp + raw_surround_mp

	surround_PRF = g.twoD_Gaussian(params['xo'].value, params['yo'].value,params['sigma_surround'].value) * params['amp_surround'].value
	center_PRF = g.twoD_Gaussian(params['xo'].value, params['yo'].value,params['sigma_center'].value) * params['amp_center'].value
	PRF = center_PRF + surround_PRF

	#########################################################################################################################################################################################################################
	#### Get fit diagnostics
	#########################################################################################################################################################################################################################

	## In a DoG model, the center region is determined by the subtraction of the positive and the negative gaussian. 
	## The size of the positive gaussian is therefore not directly linked to the size of the positive region. 
	## Therefore, the FWHM measure is more appropriate. To get it, we first create the PRF at center position,
	## and select from it the line that runs right through the middle:
	reconstruction_radius = 10
	this_ssr = 1000 
	t = np.linspace(-reconstruction_radius,reconstruction_radius,this_ssr*reconstruction_radius)
	PRF_2D =  params['amp_center'].value * np.exp(-t**2/(2*params['sigma_center'].value**2)) + params['amp_surround'].value * np.exp(-t**2/(2*(params['sigma_surround'].value)**2))
	## then, we fit a spline through this line, and get the roots (the fwhm points) of the spline:
	spline=interpolate.UnivariateSpline(range(len(PRF_2D)),PRF_2D-np.max(PRF_2D)/2,s=0)
	## and compute the distance between them
	try:
		fwhm = ((np.diff(spline.roots())/len(t)*reconstruction_radius) * stim_radius)[0]
	except:
		## when this procedure fails, set fwhm to 0:
		fwhm = 0

	## now find the surround size in the same way
	if (model == 'OG') + (params['amp_surround'].value == 0):
		surround_size = 0
	else:
		spline=interpolate.UnivariateSpline(range(len(PRF_2D)),PRF_2D+np.min(PRF_2D),s=0)
		surround_size = ((np.diff(spline.roots())/len(t)*reconstruction_radius) * stim_radius)[0]

	## EVALUATE FIT QUALITY
	# RSS = np.sum((time_course - trimmed_mp)**2)
	stats = {}
	stats['spearman'] = spearmanr(time_course,trimmed_mp)[0]
	stats['pearson'] = pearsonr(time_course,trimmed_mp)[0]
	stats['RSS'] = np.sum((time_course - trimmed_mp)**2)
	stats['r_squared'] = 1 - stats['RSS']/np.sum((time_course - np.mean(time_course)) ** 2) 
	stats['kendalls_tau'] = kendalltau(time_course,trimmed_mp)[0]

	## SETUP RESULTS DICT
	results={}
	for key in params.keys():
		results[key] = params[key].value
	
	results['ecc'] *= stim_radius# np.linalg.norm([params['xo'].value,params['yo'].value]) * stim_radius
	results['fwhm'] = fwhm
	results['surround_size'] = surround_size 
	results['polar'] = np.arctan2(params['yo'].value,params['xo'].value)
	# if the resulting PRF falls outside of the stimulus radius,
	# set the complex value here to 0 so that it falls off the retmaps	
	if results['ecc'] < (0.9*stim_radius):
		multiplier = stats['r_squared']
	else:
		multiplier = 0.001
	results['real_polar'] = np.cos(results['polar'])*np.arctanh(multiplier)
	results['imag_polar'] = np.sin(results['polar'])*np.arctanh(multiplier)
	results['real_eccen'] = np.cos(results['ecc'])*np.arctanh(multiplier)
	results['imag_eccen'] = np.sin(results['ecc'])*np.arctanh(multiplier)
	results['real_fwhm'] = np.cos(results['fwhm'])*np.arctanh(multiplier)
	results['imag_fwhm'] = np.sin(results['fwhm'])*np.arctanh(multiplier)
	results['SI'] = (params['amp_surround'].value * (params['sigma_surround'].value**2) ) / (params['amp_center'].value * (params['sigma_center'].value**2) )



	#########################################################################################################################################################################################################################
	#### Plot results
	#########################################################################################################################################################################################################################

	# print stats['r_squared']
	if plotbool:# + (stats['r_squared']> 0.1):# * randint:#:# * :# :#* (results['ecc'] < 3) :#:# * * randint ) #* :#* )

		plot_dir = os.path.join(plotdir, '%s'%roi)
		if not os.path.isdir(plot_dir): os.mkdir(plot_dir)

		f=pl.figure(figsize=(18,6)); ri = 2
		sn.set(style="ticks")
		minval = np.min(time_course - ci_time_course)
		maxval = np.max(time_course + ci_time_course)
		for di in range(9):

			this_timecourse = time_course[di*len(time_course)/len(orientations):(di+1)*len(time_course)/len(orientations)]
			this_ci = ci_time_course[di*len(time_course)/len(orientations):(di+1)*len(time_course)/len(orientations)]
			s=f.add_subplot(ri,len(orientations),di+1)
			pl.axhline(results['baseline'],linestyle='-',linewidth=1,color='k')
			pl.plot(tr_times,this_timecourse,'k',linewidth=2,label='data')
			pl.fill_between(tr_times,this_timecourse-this_ci,this_timecourse+this_ci,color='k',alpha=0.15)
			pl.plot(tr_times,trimmed_mp[di*len(trimmed_mp)/len(orientations):(di+1)*len(trimmed_mp)/len(orientations)],'m',linewidth=2,label = 'model')
			pl.plot(tr_times,results['baseline']+trimmed_surround_mp[di*len(trimmed_surround_mp)/len(orientations):(di+1)*len(trimmed_surround_mp)/len(orientations)],'b',linestyle='--',linewidth=1,label = 'surround mp')
			pl.plot(tr_times,results['baseline']+trimmed_center_mp[di*len(trimmed_center_mp)/len(orientations):(di+1)*len(trimmed_center_mp)/len(orientations)],'r',linestyle='--',linewidth=1,label = 'center mp')
			pl.ylim(minval,maxval)
			pl.xlim(0,np.max(tr_times))

			if di == 0:
				if 'psc' in postFix:
					pl.ylabel('% signal change')
				else:
					pl.ylabel('unkown unit')
			else:
				pl.yticks([])
				# pl.tick_params(axis='y',which='both',left='off',right='off',labelleft='off') 
			if di == (len(orientations)-1):
				leg = s.legend(fancybox = True, loc = 'best')
				leg.get_frame().set_alpha(0.5)
				if leg:
					for t in leg.get_texts():
					    t.set_fontsize(8)    # the legend text fontsize
			sn.despine(offset=10)

			pl.title(orientations[di])
			# if di == 4:
			# 	pl.xlabel('time (s)')
			# 	# pl.text(len(tr_times)/2.0,np.min(zip(*all_averaged_data))*0.8,'stimulus',verticalalignment='center',fontsize=10)
			# 	pl.xticks([0,int(stim_duration_TR*TR)],['0','%d'%int(stim_duration_TR*TR)])
			# else:
			pl.xticks([])

		s = f.add_subplot(ri,6,ri*6-5)
		# s = f.add_subplot(ri,2,3)
		pl.imshow(BR_PRF,origin='lowerleft',interpolation='nearest',cmap=cm.coolwarm)
		pl.axis('off')
		s.set_title('Ridge PRF')

		s = f.add_subplot(ri,6,ri*6-4)
		# s = f.add_subplot(ri,7,7)
		pl.imshow(PRF,origin='lowerleft',interpolation='nearest',cmap=cm.coolwarm)
		pl.axis('off')
		s.set_title('Direct model PRF')
		
		# pl.tight_layout()
		# pl.savefig(os.path.join(plot_dir, 'vox_%d_%d_%d.pdf'%(slice_no,voxno,n_pixel_elements_raw)))
		# pl.close()

		s = f.add_subplot(ri,6,ri*6-1)
		pl.imshow(np.ones((n_pixel_elements_raw,n_pixel_elements_raw)),cmap='gray')
		pl.clim(0,1)
		# s.text(n_pixel_elements_raw/2,n_pixel_elements_raw/2, '\nHRF parameters: \n\na1: %.2f\na2: %.2f\nb1: %.2f\nb2: %.2f\nc: %.2f'
		# 	 %(hrf_params['hrf_a1'],hrf_params['hrf_a2'],hrf_params['hrf_b1'],hrf_params['hrf_b2'],hrf_params['hrf_c']),horizontalalignment='center',verticalalignment='center',fontsize=12,bbox={'facecolor':'white', 'alpha':1, 'pad':10})
		# s.text(n_pixel_elements_raw/2,n_pixel_elements_raw/2, '\nHRF parameters: \n\n1: %.2f\n2: %.2f\n3: %.2f\nb2: %.2f\nc: %.2f'
		# 	 %(hrf_params['hrf_a1'],hrf_params['hrf_a2'],hrf_params['hrf_b1'],hrf_params['hrf_b2'],hrf_params['hrf_c']),horizontalalignment='center',verticalalignment='center',fontsize=12,bbox={'facecolor':'white', 'alpha':1, 'pad':10})
		pl.axis('off')

		s = f.add_subplot(ri,6,ri*6-2)
		pl.imshow(np.ones((n_pixel_elements_raw,n_pixel_elements_raw)),cmap='gray')
		pl.clim(0,1)
		s.text(n_pixel_elements_raw/2,n_pixel_elements_raw/2, '\nFIT PARAMETERS: \n\nsd center: %.2f\nsd surround: %.2f\namp center: %.6f\namp surround: %.6f\nbaseline: %.6f\n\nDERIVED QUANTIFICATIONS: \n\nr squared: %.2f\necc: %.2f\nFWHM: %.2f\nsurround size: %.2f\nsupression index: %.2f'
			 %(params['sigma_center'].value,params['sigma_surround'].value,params['amp_center'].value,params['amp_surround'].value,params['baseline'].value,stats['r_squared'],results['ecc'],results['fwhm'],results['surround_size'],results['SI']),horizontalalignment='center',verticalalignment='center',fontsize=12,bbox={'facecolor':'white', 'alpha':1, 'pad':10})

		pl.axis('off')

		with sn.axes_style("dark"):
			s = f.add_subplot(ri,6,ri*6-3)
			# pl.axhline(0,linestyle='--',linewidth=2,color='w')
			t = np.linspace(-1,1,n_pixel_elements_raw)
			PRF_2D =  params['amp_center'].value * np.exp(-t**2/(2*params['sigma_center'].value**2)) + params['amp_surround'].value * np.exp(-t**2/(2*(params['sigma_surround'].value)**2))
			PRF_2D_surround =  params['amp_surround'].value * np.exp(-t**2/(2*(params['sigma_surround'].value)**2))
			PRF_2D_center =  params['amp_center'].value * np.exp(-t**2/(2*params['sigma_center'].value**2))
			spline=interpolate.UnivariateSpline(range(len(PRF_2D)),PRF_2D-np.max(PRF_2D)/2,s=0)
			spline_surround=interpolate.UnivariateSpline(range(len(-PRF_2D_surround)),-PRF_2D_surround-np.max(-PRF_2D_surround)/2,s=0)
			spline_center=interpolate.UnivariateSpline(range(len(PRF_2D_center)),PRF_2D_center-np.max(PRF_2D_center)/2,s=0)
			pl.plot(PRF_2D,'k',linewidth=2)
			pl.plot(PRF_2D_surround,'--b',linewidth=1)
			pl.plot(PRF_2D_center,'--r',linewidth=1)
			pix_per_degree = n_pixel_elements_raw / stim_radius*2
			pl.xticks(np.array([-10,0,10])*pix_per_degree+n_pixel_elements_raw/2,[-10,0,10])
			# pl.yticks([-0.15,0,0.15])
			pl.fill_between(spline_center.roots(),np.min(PRF_2D),np.max(PRF_2D),color='r',alpha=0.1)
			pl.fill_between(spline_surround.roots(),np.min(PRF_2D),np.max(PRF_2D),color='b',alpha=0.1)
			pl.fill_between(spline.roots(),np.min(PRF_2D),np.max(PRF_2D),color='k',alpha=0.5)
			pl.ylabel('a.u.')
			pl.xlabel('visual degrees')
			# pl.text(n_pixel_elements_raw/2,-0.05,'FWHM',color='w',horizontalalignment='center',verticalalignment='center',fontsize=12,fontweight='bold')
			s.set_title('2D PRF profile')
			pl.ylim(np.min(PRF_2D),np.max(PRF_2D))

		with sn.axes_style("darkgrid"):
			xx = np.arange(0,32,TR/float(ssr))
			hrf_kernel = hrf_params[0] * he.hrf.spmt(xx) +hrf_params[1]* he.hrf.dspmt(xx) +hrf_params[2] * he.hrf.ddspmt(xx)
			# hrf_kernel = doubleGamma(np.arange(0,32,TR/float(ssr)),hrf_params['hrf_a1'],hrf_params['hrf_a2'],hrf_params['hrf_b1'],hrf_params['hrf_b2'],hrf_params['hrf_c'])
			hrf_kernel /= np.abs(hrf_kernel).sum()
			s = f.add_subplot(ri,6,ri*6)
			pl.plot(hrf_kernel)
			s.set_title('HRF-kernel')
			sn.despine(offset=5)
			pl.xticks(np.linspace(0,len(hrf_kernel),16),np.arange(0,32,2))
			pl.xlabel('time (s)')

		pl.savefig(os.path.join(plot_dir, 'vox_%d_%d_%d.pdf'%(slice_no,voxno,n_pixel_elements_raw)))
		pl.close()

	# self.results_frames = {'polar':0,'delta_amplitude':1,'ecc':2,'xo':3,'yo':4,'real_eccen':5,'amp_center':6,'surround_size':7,'imag_polar':8,'amp_surround':9,'sigma_surround':10,'real_fwhm':11,'imag_eccen':12,'imag_fwhm':13,'real_polar':14,'SI':15,'delta_sigma':16,:'sigma_center':17,'fwhm':18,'baseline':19}

	return results, stats
示例#27
0
def fit_PRF_on_concatenated_data(data_shared,voxels_in_this_slice,n_TRs,n_slices,fit_on_all_data,plotbool,raw_design_matrices, dm_for_BR,
	valid_regressors, n_pixel_elements_convolved, n_pixel_elements_raw,plotdir,voxno,slice_no,randint,roi,TR,model,hrf_params_shared,all_results_shared,conditions,
	results_frames,	postFix=[],max_eccentricity=1,max_xy = 5,orientations=['0','45','90','135','180','225','270','315','X'],stim_radius = 7.5,
	nuisance_regressors = []):
	"""
	"""

	# grab data for this fit procedure from shared memory
	time_course = np.array(data_shared[:,voxels_in_this_slice][:,voxno])
	hrf_params = np.array(hrf_params_shared[:,voxels_in_this_slice][:,voxno])

	n_orientations = len(orientations)

	# already initialize the final PRF dict
	PRFs = {}

	if fit_on_all_data:

		#########################################################################################################################################################################################################################
		#### Instantiate parameters 
		#########################################################################################################################################################################################################################

		## initiate search space with Ridge prefit
		Ridge_start_params, PRFs['Ridge'], BR_predicted = fitRidge_for_Dumoulin(dm_for_BR, time_course, valid_regressors=valid_regressors, n_pixel_elements=n_pixel_elements_convolved, alpha=1e14)

		## initiate parameters:
		params = Parameters()
		
		# one baseline parameter
		params.add('baseline',value=0.0)

		# two location parameters
		params.add('xo_%s'%conditions[0], value = Ridge_start_params['xo'])
		params.add('yo_%s'%conditions[0], value = Ridge_start_params['yo'])

		params.add('sigma_center_%s'%conditions[0],value=0.1,min=1e-20)#min=1e-201 # this means initialization at 0.1 * 7.5 = 0.75 degrees, with minimum of 0.075 degrees
		params.add('amp_center_%s'%conditions[0],value=0.05,min=1e-20)#min=1e-201 # this is initialized at 0.001

		# surround parameters
		params.add('sigma_surround_%s'%conditions[0],value=0.3,expr='sigma_center_%s+delta_sigma_%s'%(conditions[0],conditions[0])) # surround size should roughly be 5 times that of the center
		params.add('delta_sigma_%s'%conditions[0],value=0.4,min=1e-20) # this difference parameter ensures that the surround is always larger than the center#,min=1e-20000000001
		params.add('amp_surround_%s'%conditions[0],value=-0.005,max=1e-20,expr='-amp_center_%s+delta_amplitude_%s'%(conditions[0],conditions[0])) # initialized at 10% of center amplitude #max=-0.0000000001,
		params.add('delta_amplitude_%s'%conditions[0],value=0.045,min=1e-20) # this difference parameter ensures that the surround is never deeper than the center is high,min=0.0000000001

		# when fitting an OG model, set all surround and delta parameters to 0 and to not vary and set the expression to None, otherwise it will start to vary anyway
		if model == 'OG':	
			params['amp_surround_%s'%conditions[0]].value,params['amp_surround_%s'%conditions[0]].vary,params['amp_surround_%s'%conditions[0]].expr = 0, False, None
			params['delta_sigma_%s'%conditions[0]].vary,params['sigma_surround_%s'%conditions[0]].vary =  False, False
			params['delta_amplitude_%s'%conditions[0]].vary = False
	else:

		#########################################################################################################################################################################################################################
		#### INITIATING PARAMETERS with all results
		#########################################################################################################################################################################################################################

		# grab data for this fit procedure from shared memory
		all_results = np.array(all_results_shared[:,voxels_in_this_slice][:,voxno])

		## initiate parameters:
		params = Parameters()

		# shared baseline param:
		params.add('baseline', value = all_results[results_frames['baseline']])

		# location parameters
		for condition in conditions:
			params.add('xo_%s'%condition, value = all_results[results_frames['xo']])
			params.add('yo_%s'%condition, value = all_results[results_frames['yo']])

			# center parameters:
			params.add('sigma_center_%s'%condition,value=all_results[results_frames['sigma_center']]/stim_radius,min=1e-20) # this means initialization at 0.05/2 * 15 = 1.5 degrees, ,min=0.0084
			params.add('amp_center_%s'%condition,value=all_results[results_frames['amp_center']],min=1e-20) # this is initialized at 0.001 ,min=0.0000000001

			# surround parameters
			params.add('sigma_surround_%s'%condition,value=all_results[results_frames['sigma_surround']]/stim_radius,expr='sigma_center_%s+delta_sigma_%s'%(condition,condition)) # surround size should roughly be 5 times that of the center
			params.add('amp_surround_%s'%condition,value=all_results[results_frames['amp_surround']],max=-1e-20,expr='-amp_center_%s+delta_amplitude_%s'%(condition,condition)) # initialized at 10% of center amplitudemax=-0.0000000001
			params.add('delta_sigma_%s'%condition,value=all_results[results_frames['delta_sigma']],min=1e-20) # this difference parameter ensures that the surround is always larger than the centermin=0.0000000001
			params.add('delta_amplitude_%s'%condition,value=all_results[results_frames['delta_amplitude']],min=1e-20) # this difference parameter ensures that the surround is never deeper than the center is highmin=0.0000000001

			# when fitting an OG model, set all surround and delta parameters to 0 and to not vary and set the expression to None, otherwise it will start to vary anyway
			if model == 'OG':	
				params['amp_surround_%s'%condition].value,params['amp_surround_%s'%condition].vary,params['amp_surround_%s'%condition].expr = 0, False, None
				params['delta_sigma_%s'%condition].vary,params['sigma_surround_%s'%condition].vary = False, False
				params['delta_amplitude_%s'%condition].vary=False

		g = gpf(design_matrix = raw_design_matrices[conditions[0]], max_eccentricity = max_eccentricity, n_pixel_elements = n_pixel_elements_raw, rtime = TR, ssr = 1,slice_no=slice_no)
		
		# recreate PRFs
		this_surround_PRF = g.twoD_Gaussian(all_results[results_frames['xo']],all_results[results_frames['yo']],
			all_results[results_frames['sigma_surround']]/stim_radius) * all_results[results_frames['amp_surround']]
		this_center_PRF = g.twoD_Gaussian(all_results[results_frames['xo']], all_results[results_frames['yo']],
			all_results[results_frames['sigma_center']]/stim_radius) * all_results[results_frames['amp_center']]
		PRFs['All_fit'] = this_center_PRF + this_surround_PRF

	#########################################################################################################################################################################################################################
	#### Prepare fit object and function
	#########################################################################################################################################################################################################################

	# initiate model prediction object
	ssr = np.round(1/(TR/float(n_slices)))
	
	gpfs = {}
	for condition in conditions:
		gpfs[condition] = gpf(design_matrix = raw_design_matrices[condition], max_eccentricity = max_eccentricity, n_pixel_elements = n_pixel_elements_raw, rtime = TR, ssr = ssr,slice_no=slice_no)

	def residual(params,recreate=False):

		# combine all stimulus regressors
		combined_model_prediction = np.ones_like(time_course)*params['baseline'].value
		for ci,condition in enumerate(conditions):
			combined_model_prediction += gpfs[condition].hrf_model_prediction(params['xo_%s'%condition].value, params['yo_%s'%condition].value, 
				params['sigma_center_%s'%condition].value,hrf_params)[0] * params['amp_center_%s'%condition].value
			combined_model_prediction += gpfs[condition].hrf_model_prediction(params['xo_%s'%condition].value, params['yo_%s'%condition].value, 
				params['sigma_surround_%s'%condition].value, hrf_params)[0] * params['amp_surround_%s'%condition].value

		return time_course - combined_model_prediction

	#########################################################################################################################################################################################################################
	#### evalute fit
	#########################################################################################################################################################################################################################

	# optimize parameters
	minimize(residual, params, args=(), kws={},method='powell')

	#########################################################################################################################################################################################################################
	#### Recreate resulting predictions and PRFs with optimized parameters
	#########################################################################################################################################################################################################################

	# initiate model prediction at baseline value
	combined_model_prediction = np.ones_like(time_course) * params['baseline'].value

	# now loop over conditions, create prediction and add to total prediction
	model_predictions = {}
	for ci,condition in enumerate(conditions):
		this_center_model_prediction = gpfs[condition].hrf_model_prediction(params['xo_%s'%condition].value, params['yo_%s'%condition].value, 
			params['sigma_center_%s'%condition].value,hrf_params)[0] * params['amp_center_%s'%condition].value
		this_surround_model_prediction = gpfs[condition].hrf_model_prediction(params['xo_%s'%condition].value, params['yo_%s'%condition].value, 
			params['sigma_surround_%s'%condition].value, hrf_params)[0] * params['amp_surround_%s'%condition].value
		model_predictions[condition] = this_center_model_prediction + this_surround_model_prediction
		combined_model_prediction += model_predictions[condition]

		# recreate PRFs
		this_center_PRF = gpfs[condition].twoD_Gaussian(params['xo_%s'%condition].value, params['yo_%s'%condition].value,
			params['sigma_center_%s'%condition].value) * params['amp_center_%s'%condition].value
		this_surround_PRF = gpfs[condition].twoD_Gaussian(params['xo_%s'%condition].value, params['yo_%s'%condition].value,
			params['sigma_surround_%s'%condition].value) * params['amp_surround_%s'%condition].value
		PRFs[condition] = this_center_PRF + this_surround_PRF


	#########################################################################################################################################################################################################################
	#### Get fit diagnostics
	#########################################################################################################################################################################################################################

	# add fwhm, necessary when fitting DoG
	reconstruction_radius = 10
	this_ssr = 1000 
	t = np.linspace(-reconstruction_radius,reconstruction_radius,this_ssr*reconstruction_radius)
	
	fwhms = {}
	surround_sizes = {}
	for condition in conditions:
		PRF_2D = params['amp_center_%s'%condition].value * np.exp(-t**2/(2*params['sigma_center_%s'%condition].value**2)) + params['amp_surround_%s'%condition].value * np.exp(-t**2/(2*(params['sigma_surround_%s'%condition].value)**2))
		## then, we fit a spline through this line, and get the roots (the fwhm points) of the spline:
		spline=interpolate.UnivariateSpline(range(len(PRF_2D)),PRF_2D-np.max(PRF_2D)/2,s=0)
		## and compute the distance between them
		try:
			fwhms[condition] = ((np.diff(spline.roots())/len(t)*reconstruction_radius) * stim_radius)[0]
		except:
			## when this procedure fails, set fwhm to 0:
			fwhms[condition] = 0
		
		## now find the surround size in the same way
		if (model == 'OG') + (params['amp_surround_%s'%condition].value == 0):
			surround_sizes[condition] = 0
		else:
			spline = interpolate.UnivariateSpline(range(len(PRF_2D)),PRF_2D+np.min(PRF_2D),s=0)
			surround_sizes[condition] = ((np.diff(spline.roots())/len(t)*reconstruction_radius) * stim_radius)[0]

	## EVALUATE OVERALL MODEL FIT QUALITY
	stats = {}
	stats['spearman'] = spearmanr(time_course,combined_model_prediction)[0]
	stats['pearson'] = pearsonr(time_course,combined_model_prediction)[0]
	stats['RSS'] = np.sum((time_course - combined_model_prediction)**2)
	stats['r_squared'] = 1 - stats['RSS']/np.sum((time_course - np.mean(time_course)) ** 2) 
	stats['kendalls_tau'] = kendalltau(time_course,combined_model_prediction)[0]

	## CREATE SEPERATE RESULTS DICT PER CONDITION
	results = {}
	for condition in conditions:
		results[condition] = {}
		results[condition]['baseline'] = params['baseline'].value
		# params from fit
		for key in params.keys():
			if condition in key:
				if condition in key:
					# leave out the condition in the keys (as the results frames are identical across conditions)
					new_key = key[:-len(condition)-1]
				else:
					new_key = key
				results[condition][new_key] = params[key].value

		results[condition]['ecc'] = np.linalg.norm([params['xo_%s'%condition].value,params['yo_%s'%condition].value]) * stim_radius
		results[condition]['sigma_center'] *= stim_radius
		results[condition]['sigma_surround'] *= stim_radius

		# derived params
		results[condition]['polar'] = np.arctan2(params['yo_%s'%condition].value,params['xo_%s'%condition].value)
		results[condition]['fwhm'] = fwhms[condition]
		results[condition]['surround_size'] = surround_sizes[condition]
		results[condition]['SI'] = ((params['amp_surround_%s'%condition].value * (params['sigma_surround_%s'%condition].value**2) ) 
			/ (params['amp_center_%s'%condition].value * (params['sigma_center_%s'%condition].value**2) ))
		
		# if the resulting PRF falls outside of the stimulus radius,
		# set the multiplier here to 0 so that it falls off the retmaps
		if results[condition]['ecc'] < (stim_radius):
			multiplier = stats['r_squared']
		else:
			multiplier = 0.001

		# here for only voxels within stim region:
		results[condition]['real_polar_stim_region'] = np.cos(results[condition]['polar'])*np.arctanh(multiplier)
		results[condition]['imag_polar_stim_region'] = np.sin(results[condition]['polar'])*np.arctanh(multiplier)
		
		# and for all voxels:
		results[condition]['real_polar'] = np.cos(results[condition]['polar'])*np.arctanh(stats['r_squared'])
		results[condition]['imag_polar'] = np.sin(results[condition]['polar'])*np.arctanh(stats['r_squared'])


	#########################################################################################################################################################################################################################
	#### Plot results
	#########################################################################################################################################################################################################################

	if plotbool * (stats['r_squared']>0.0):# (np.random.randint(10)<10):#* (stats['r_squared']>0.1):#(stats['r_squared']>0.1):# * :# :#* (results['ecc'] < 3) :#:# * * randint ) #* :#* )

		n_TRs = n_TRs[0]
		n_runs = int(len(time_course) / n_TRs)
		if fit_on_all_data:
			plot_conditions = ['Ridge','All']
		else:
			plot_conditions = conditions + ['All_fit']
		plot_dir = os.path.join(plotdir, '%s'%roi)
		if not os.path.isdir(plot_dir): os.mkdir(plot_dir)

		f=pl.figure(figsize=(20,8)); rowi = (n_runs+4)

		import colorsys
		colors = np.array([colorsys.hsv_to_rgb(c,0.6,0.9) for c in np.linspace(0,1,3+1)])[:-1]

		for runi in range(n_runs):
			s = f.add_subplot(rowi,1,runi+1)
			pl.plot(time_course[n_TRs*runi:n_TRs*(runi+1)],'-ok',linewidth=0.75,markersize=2.5)#,label='data'
			if not fit_on_all_data:
				for ci, condition in enumerate(conditions):
					pl.plot(model_predictions[condition][n_TRs*runi:n_TRs*(runi+1)]+params['baseline'].value,color=colors[ci],label='%s model'%condition,linewidth=2)				
				pl.plot([0,n_TRs],[params['baseline'].value,params['baseline'].value],color=colors[0],linewidth=1)	
			else:
				pl.plot(combined_model_prediction[n_TRs*runi:n_TRs*(runi+1)],color=colors[0],label='model',linewidth=2)	
			sn.despine(offset=10)
			pl.xlim(0,850)
			if runi == (n_runs-1):
				pl.xlabel('TRs')
			else:
				pl.xticks([])
			if runi == (n_runs/2):
				pl.legend(loc='best',fontsize=8)
				if 'psc' in postFix:
					pl.ylabel('% signal change')
				else:
					pl.ylabel('unkown unit')	
			pl.yticks([int(np.min(time_course)),0,int(np.max(time_course))])	
			pl.ylim([int(np.min(time_course)),int(np.max(time_course))])


		rowi = (n_runs+2)/2
		k = 0
		for ci, condition in enumerate(plot_conditions):
			k+= 1
			s = f.add_subplot(rowi,len(plot_conditions)*2,(rowi-1)*len(plot_conditions)*2+k,aspect='equal')
			pl.imshow(PRFs[condition],origin='lowerleft',interpolation='nearest',cmap=cm.coolwarm)

			pl.axis('off')
			s.set_title('%s PRF'%condition)
			
			k+= 1
			if not (condition == 'Ridge') + (condition == 'All_fit'):
				s = f.add_subplot(rowi,len(plot_conditions)*2,(rowi-1)*len(plot_conditions)*2+k)
				pl.imshow(np.ones((n_pixel_elements_raw,n_pixel_elements_raw)),cmap='gray')
				pl.clim(0,1)
				if model == 'OG':
					s.text(n_pixel_elements_raw/2,n_pixel_elements_raw/2, "\n%s PARAMETERS: \n\nbaseline: %.2f\nsize: %.2f\namplitude: %.6f\n\n\nDERIVED QUANTIFICATIONS: \n\nr-squared: %.2f\necc: %.2f\nFWHM: %.2f"%
						(condition,results[condition]['baseline'],results[condition]['sigma_center'],results[condition]['amp_center'],
							stats['r_squared'],results[condition]['ecc'],results[condition]['fwhm']),
						horizontalalignment='center',verticalalignment='center',fontsize=10,bbox={'facecolor':'white', 'alpha':1, 'pad':10})
				elif model == 'DoG':
					s.text(n_pixel_elements_raw/2,n_pixel_elements_raw/2, "\n%s PARAMETERS: \n\nbaseline: %.2f\nsd center: %.2f\nsd surround: %.2f\namp center: %.6f\namp surround: %.6f\n\nDERIVED QUANTIFICATIONS: \n\nr squared: %.2f\necc: %.2f\nFWHM: %.2f\nsurround size: %.2f\nsupression index: %.2f"
						%(condition,results[condition]['baseline'],results[condition]['sigma_center'],results[condition]['sigma_surround'],results[condition]['amp_center'],
						results[condition]['amp_surround'],stats['r_squared'],results[condition]['ecc'],results[condition]['fwhm'],results[condition]['surround_size'],
						results[condition]['SI']),horizontalalignment='center',verticalalignment='center',fontsize=10,bbox={'facecolor':'white', 'alpha':1, 'pad':10})
				pl.axis('off')

		# pl.tight_layout()
		pl.savefig(os.path.join(plot_dir, 'vox_%d_%d_%d.pdf'%(slice_no,voxno,n_pixel_elements_raw)))
		pl.close()

	return results, stats
示例#28
0
def manglespec3(SpectrumObject, spec_mjd, wanted_filters, wanted_flux, data_table, verbose = False):
    """

    :param verbose:
    :param spec_mjd:
    :param wanted_filters:
    :param wanted_flux:
    :param data_table:
    :param SpectrumObject:

    :return:
    """
    original_spectrum_flux = data_table[data_table["mask"]]["spec_filterflux"].data
    scaled_spectrum_flux = data_table[data_table["mask"]]["mangledspec_filterflux"].data

    if len(scaled_spectrum_flux) == len(wanted_flux):
        params = Parameters()
        for i, flux_tuple in enumerate(zip(scaled_spectrum_flux, wanted_flux)):
            params.add(wanted_filters[i].filter_name, value=flux_tuple[1] / flux_tuple[0])
        else:
            pass

    paramlist = np.array([params[key].value for key in params.keys()])
    data_table["weights"] = Column(np.append(1, np.append(paramlist, 1)), name="weights")

    mc_l, mc_u = functions.calc_linear_terms(data_table[data_table["mask"]], key="weights", verbose=verbose)
    weight_l = mc_l[0] * data_table["lambda_eff"][0] + mc_l[1]
    weight_u = mc_u[0] * data_table["lambda_eff"][-1] + mc_u[1]

    weights = np.append(np.append(weight_l, paramlist), weight_u)
    data_table["weights"] = weights

    ## Do the fit
    out = minimize(manglemin, params, args=(SpectrumObject, data_table), kws=({"verbose": verbose}))
    # out = minimize(manglemin, params, args=(SpectrumObject, data_table), epsfcn=1e-5)
    if verbose: print(fit_report(out))

    paramlist = np.array([out.params[key].value for key in out.params.keys()])

    mc_l, mc_u = functions.calc_linear_terms(data_table, key="weights")
    data_table["weights"][0] = mc_l[0] * data_table["lambda_eff"][0] + mc_l[1]
    data_table["weights"][-1] = mc_u[0] * data_table["lambda_eff"][-1] + mc_u[1]
    weights = data_table["weights"].data
    final_spl = interpolate.CubicSpline(data_table["lambda_eff"], data_table["weights"], bc_type="clamped")

    SpectrumObject.flux = final_spl(SpectrumObject.wavelength) * SpectrumObject.flux / SpectrumObject.scale_factor

    data_table["fitflux"] = data_table["fitflux"] / SpectrumObject.scale_factor
    data_table["spec_filterflux"] = data_table["spec_filterflux"] / SpectrumObject.scale_factor

    # data_table[0]["mangledspec_filterflux"] = data_table[0]["mangledspec_filterflux"] / SpectrumObject.scale_factor
    # data_table[-1]["mangledspec_filterflux"] = data_table[-1]["mangledspec_filterflux"] / SpectrumObject.scale_factor

    # data_table["mangledspec_filterflux"] = data_table["mangledspec_filterflux"] / SpectrumObject.scale_factor
    data_table["mangledspec_filterflux"] = calculate_fluxes(data_table, SpectrumObject)
    fit_dict = OrderedDict()

    fit_dict["SpectrumObject"] = SpectrumObject
    fit_dict["final_spl"] = final_spl
    fit_dict["data_table"] = data_table

    return fit_dict