예제 #1
0
from __future__ import print_function
import numpy as np
from scipy import stats
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel


data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
# in this dir

probit_mod = sm.Probit(data.endog, data.exog)
probit_res = probit_mod.fit()
loglike = probit_mod.loglike
score = probit_mod.score
mod = GenericLikelihoodModel(data.endog, data.exog*2, loglike, score)
res = mod.fit(method="nm", maxiter = 500)

def probitloglike(params, endog, exog):
      """
      Log likelihood for the probit
      """
      q = 2*endog - 1
      X = exog
      return np.add.reduce(stats.norm.logcdf(q*np.dot(X,params)))

mod = GenericLikelihoodModel(data.endog, data.exog, loglike=probitloglike)
res = mod.fit(method="nm", fargs=(data.endog,data.exog), maxiter=500)
print(res)

예제 #2
0
def optimize_IcIsIr(dt,
                    guessParams=[-1, -1, -1],
                    deadtime=1.e-5,
                    method='nm',
                    prior=[np.nan] * 3,
                    prior_sig=[np.nan] * 3,
                    **kwargs):
    """
    This function optimizes the loglikelihood for the bin-free SSD analysis.
    It returns the model fit for the most likely I1, I2, Ir.


    INPUTS:
        dt - Float, [seconds], list of photon inter-arrival times
        params - Floats, [1/seconds], list of initial guess for I1, I2, Ir
                                    if the guess is <0 then equally distribute the total flux amongst all params with a guess <0
        deadtime - float, [seconds], MKID deadtime after photon event
        method - optimization method. 'nm'=Nelder-Mead, 'ncg'=Newton-conjugate gradient, etc...
        paramsFixed - booleans, fix param_i during optimization to the initial guess

        **kwargs - additional kwargs to GenericLikelihoodModel.fit()
                   www.statsmodels.org/stable/dev/generated/statsmodels.base.model.LikelihoodModel.fit.html

    OUTPUTS:
        GenericLikelihoodModelResults Object
        See www.statsmodels.org/stable/dev/generated/statsmodels.base.model.GenericLikelihoodModelResults.html
    """

    if prior is None:
        prior = [np.nan] * 3
    prior = np.append(prior, [np.nan] * (3 - len(prior)))
    if prior_sig is None:
        prior_sig = [np.nan] * 3
    prior_sig = np.append(prior_sig, [np.nan] * (3 - len(prior_sig)))

    #Provide a reasonable guess everywhere that guessParams<0
    #If a prior is given then make that the guess
    #equally distributes the average flux amongst params<0
    #ie. guess=[-1, 30, -1] and I_avg=330 then guess-->[150,30,150].
    guessParams = np.asarray(guessParams)
    assert len(
        guessParams
    ) == 3, "Must provide a guess for I1, I2, Ir. Choose -1 for automatic guess."

    if np.any(prior == None):
        prior[prior == None] = np.nan
    guessParams[np.isfinite(prior)] = prior[np.isfinite(prior)]
    if np.any(guessParams < 0):
        I_avg = (len(dt)) / np.sum(dt)
        I_guess = (I_avg - np.sum(guessParams[guessParams >= 0])) / np.sum(
            guessParams < 0)
        guessParams[guessParams < 0] = max(I_guess, 0)

    #Define some functions
    loglike = partial(posterior,
                      dt=dt,
                      deadtime=deadtime,
                      prior=prior,
                      prior_sig=prior_sig)
    score = partial(posterior_jacobian,
                    dt=dt,
                    deadtime=deadtime,
                    prior=prior,
                    prior_sig=prior_sig)
    hess = partial(posterior_hessian,
                   dt=dt,
                   deadtime=deadtime,
                   prior=prior,
                   prior_sig=prior_sig)

    #Setup model
    endog = np.asarray(dt, dtype=[('dt', 'float64')])
    names = np.asarray(['Ic', 'Is', 'Ir'])
    exog = np.ones(len(endog),
                   dtype={
                       'names': names,
                       'formats': ['float64'] * len(names)
                   })
    model = GenericLikelihoodModel(endog,
                                   exog=exog,
                                   loglike=loglike,
                                   score=score,
                                   hessian=hess)
    try:
        kwargs['disp']
    except KeyError:
        kwargs['disp'] = False  #change default disp kwarg to false

    #fit model
    return model.fit(guessParams, method=method, **kwargs)