コード例 #1
0
are not identified.'''
                etext.append(wstr)
            elif predclose_frac > 0.1:  #TODO: get better diagnosis
                wstr = \
'''Possibly complete quasi-separation: A fraction %f4.2 of observations can be
perfectly predicted. This might indicate that there is complete
quasi-separation. In this case some parameters will not be identified.''' % predclose_frac
                etext.append(wstr)

            if etext:
                smry.add_extra_txt(etext)

        return smry
class DiscreteResultsWrapper(lm.RegressionResultsWrapper):
    pass
wrap.populate_wrapper(DiscreteResultsWrapper, DiscreteResults)

if __name__=="__main__":
    import numpy as np
    import scikits.statsmodels.api as sm
# Scratch work for negative binomial models
# dvisits was written using an R package, I can provide the dataset
# on request until the copyright is cleared up
#TODO: request permission to use dvisits
    data2 = np.genfromtxt('../datasets/dvisits/dvisits.csv', names=True)
# note that this has missing values for Accident
    endog = data2['doctorco']
    exog = data2[['sex','age','agesq','income','levyplus','freepoor',
            'freerepa','illness','actdays','hscore','chcond1',
            'chcond2']].view(float).reshape(len(data2),-1)
    exog = sm.add_constant(exog, prepend=True)
コード例 #2
0
            print('not avalible yet')


class GLMResultsWrapper(lm.RegressionResultsWrapper):
    _attrs = {
        'resid_anscombe': 'rows',
        'resid_deviance': 'rows',
        'resid_pearson': 'rows',
        'resid_response': 'rows',
        'resid_working': 'rows'
    }
    _wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
                                   _attrs)


wrap.populate_wrapper(GLMResultsWrapper, GLMResults)

if __name__ == "__main__":
    import scikits.statsmodels.api as sm
    import numpy as np
    data = sm.datasets.longley.load()
    #data.exog = add_constant(data.exog)
    GLMmod = GLM(data.endog, data.exog).fit()
    GLMT = GLMmod.summary(returns='tables')
    ##    GLMT[0].extend_right(GLMT[1])
    ##    print(GLMT[0])
    ##    print(GLMT[2])
    GLMTp = GLMmod.summary(title='Test GLM')
    """
From Stata
. webuse beetle
コード例 #3
0
#add warnings/notes, added to text format only
        etext =[]
        wstr = \
'''If the model instance has been used for another fit with different fit
parameters, then the fit options might not be the correct ones anymore .'''
        etext.append(wstr)

        if etext:
            smry.add_extra_txt(etext)

        return smry

class RLMResultsWrapper(lm.RegressionResultsWrapper):
    pass
wrap.populate_wrapper(RLMResultsWrapper, RLMResults)

if __name__=="__main__":
#NOTE: This is to be removed
#Delivery Time Data is taken from Montgomery and Peck
    import scikits.statsmodels.api as sm

#delivery time(minutes)
    endog = np.array([16.68, 11.50, 12.03, 14.88, 13.75, 18.11, 8.00, 17.83,
    79.24, 21.50, 40.33, 21.00, 13.50, 19.75, 24.00, 29.00, 15.35, 19.00,
    9.50, 35.10, 17.90, 52.32, 18.75, 19.83, 10.75])

#number of cases, distance (Feet)
    exog = np.array([[7, 3, 3, 4, 6, 7, 2, 7, 30, 5, 16, 10, 4, 6, 9, 10, 6,
    7, 3, 17, 10, 26, 9, 8, 4], [560, 220, 340, 80, 150, 330, 110, 210, 1460,
    605, 688, 215, 255, 462, 448, 776, 200, 132, 36, 770, 140, 810, 450, 635,
コード例 #4
0
        wstr = \
'''If the model instance has been used for another fit with different fit
parameters, then the fit options might not be the correct ones anymore .'''
        etext.append(wstr)

        if etext:
            smry.add_extra_txt(etext)

        return smry


class RLMResultsWrapper(lm.RegressionResultsWrapper):
    pass


wrap.populate_wrapper(RLMResultsWrapper, RLMResults)

if __name__ == "__main__":
    #NOTE: This is to be removed
    #Delivery Time Data is taken from Montgomery and Peck
    import scikits.statsmodels.api as sm

    #delivery time(minutes)
    endog = np.array([
        16.68, 11.50, 12.03, 14.88, 13.75, 18.11, 8.00, 17.83, 79.24, 21.50,
        40.33, 21.00, 13.50, 19.75, 24.00, 29.00, 15.35, 19.00, 9.50, 35.10,
        17.90, 52.32, 18.75, 19.83, 10.75
    ])

    #number of cases, distance (Feet)
    exog = np.array([[
コード例 #5
0
        fcasterr = np.sqrt(sigma2 * np.cumsum(ma_rep**2))

        const = norm.ppf(1 - alpha/2.)
        conf_int = np.c_[forecast - const*fcasterr, forecast + const*fcasterr]

        return forecast, fcasterr, conf_int

class ARMAResultsWrapper(wrap.ResultsWrapper):
    _attrs = {}
    _wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
                                    _attrs)
    _methods = {}
    _wrap_methods = wrap.union_dicts(
                        tsbase.TimeSeriesResultsWrapper._wrap_methods,
                        _methods)
wrap.populate_wrapper(ARMAResultsWrapper, ARMAResults)

if __name__ == "__main__":
    import numpy as np
    import scikits.statsmodels.api as sm

    # simulate arma process
    from scikits.statsmodels.tsa.arima_process import arma_generate_sample
    y = arma_generate_sample([1., -.75],[1.,.25], nsample=1000)
    arma = ARMA(y)
    res = arma.fit(trend='nc', order=(1,1))

    np.random.seed(12345)
    y_arma22 = arma_generate_sample([1.,-.85,.35],[1,.25,-.9], nsample=1000)
    arma22 = ARMA(y_arma22)
    res22 = arma22.fit(trend = 'nc', order=(2,2))
コード例 #6
0
ファイル: tsa_model.py プロジェクト: takluyver/statsmodels
        self._data.predict_dates = dates

class TimeSeriesModelResults(base.LikelihoodModelResults):
    def __init__(self, model, params, normalized_cov_params, scale=1.):
        self._data = model._data
        super(TimeSeriesModelResults,
                self).__init__(model, params, normalized_cov_params, scale)

class TimeSeriesResultsWrapper(wrap.ResultsWrapper):
    _attrs = {}
    _wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._wrap_attrs,
                                    _attrs)
    _methods = {'predict' : 'dates'}
    _wrap_methods = wrap.union_dicts(base.LikelihoodResultsWrapper._wrap_methods,
                                     _methods)
wrap.populate_wrapper(TimeSeriesResultsWrapper,
                      TimeSeriesModelResults)

if __name__ == "__main__":
    import scikits.statsmodels.api as sm
    import datetime
    import pandas

    data = sm.datasets.macrodata.load()

    #make a DataFrame
    #TODO: attach a DataFrame to some of the datasets, for quicker use
    dates = [str(int(x[0])) +':'+ str(int(x[1])) \
             for x in data.data[['year','quarter']]]
    try:
        import scikits.timeseries as ts
        ts_dates = date_array(start_date = Date(year=1959,quarter=1,freq='Q'),
コード例 #7
0
ファイル: arima_model.py プロジェクト: zed/statsmodels
        conf_int = np.c_[forecast - const * fcasterr,
                         forecast + const * fcasterr]

        return forecast, fcasterr, conf_int


class ARMAResultsWrapper(wrap.ResultsWrapper):
    _attrs = {}
    _wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
                                   _attrs)
    _methods = {}
    _wrap_methods = wrap.union_dicts(
        tsbase.TimeSeriesResultsWrapper._wrap_methods, _methods)


wrap.populate_wrapper(ARMAResultsWrapper, ARMAResults)

if __name__ == "__main__":
    import numpy as np
    import scikits.statsmodels.api as sm

    # simulate arma process
    from scikits.statsmodels.tsa.arima_process import arma_generate_sample
    y = arma_generate_sample([1., -.75], [1., .25], nsample=1000)
    arma = ARMA(y)
    res = arma.fit(trend='nc', order=(1, 1))

    np.random.seed(12345)
    y_arma22 = arma_generate_sample([1., -.85, .35], [1, .25, -.9],
                                    nsample=1000)
    arma22 = ARMA(y_arma22)
コード例 #8
0
ファイル: var_model.py プロジェクト: collinstocks/statsmodels
        idx = np.argsort(np.abs(roots))[::-1] # sort by reverse modulus
        return roots[idx]

class VARResultsWrapper(wrap.ResultsWrapper):
    _attrs = {'bse' : 'columns_eq', 'cov_params' : 'cov',
              'params' : 'columns_eq', 'pvalues' : 'columns_eq',
              'tvalues' : 'columns_eq', 'sigma_u' : 'cov_eq',
              'sigma_u_mle' : 'cov_eq',
              'stderr' : 'columns_eq'}
    _wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
                                    _attrs)
    _methods = {}
    _wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
                                     _methods)
    _wrap_methods.pop('cov_params') # not yet a method in VARResults
wrap.populate_wrapper(VARResultsWrapper, VARResults)

class FEVD(object):
    """
    Compute and plot Forecast error variance decomposition and asymptotic
    standard errors
    """
    def __init__(self, model, P=None, periods=None):
        self.periods = periods

        self.model = model
        self.neqs = model.neqs
        self.names = model.names

        self.irfobj = model.irf(var_decomp=P, periods=periods)
        self.orth_irfs = self.irfobj.orth_irfs
コード例 #9
0
ファイル: tsa_model.py プロジェクト: zed/statsmodels
    def __init__(self, model, params, normalized_cov_params, scale=1.):
        self._data = model._data
        super(TimeSeriesModelResults,
              self).__init__(model, params, normalized_cov_params, scale)


class TimeSeriesResultsWrapper(wrap.ResultsWrapper):
    _attrs = {}
    _wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._wrap_attrs,
                                   _attrs)
    _methods = {'predict': 'dates'}
    _wrap_methods = wrap.union_dicts(
        base.LikelihoodResultsWrapper._wrap_methods, _methods)


wrap.populate_wrapper(TimeSeriesResultsWrapper, TimeSeriesModelResults)

if __name__ == "__main__":
    import scikits.statsmodels.api as sm
    import datetime
    import pandas

    data = sm.datasets.macrodata.load()

    #make a DataFrame
    #TODO: attach a DataFrame to some of the datasets, for quicker use
    dates = [str(int(x[0])) +':'+ str(int(x[1])) \
             for x in data.data[['year','quarter']]]
    try:
        import scikits.timeseries as ts
        ts_dates = date_array(start_date=Date(year=1959, quarter=1, freq='Q'),
コード例 #10
0
'''Possibly complete quasi-separation: A fraction %f4.2 of observations can be
perfectly predicted. This might indicate that there is complete
quasi-separation. In this case some parameters will not be identified.''' % predclose_frac
                etext.append(wstr)

            if etext:
                smry.add_extra_txt(etext)

        return smry


class DiscreteResultsWrapper(lm.RegressionResultsWrapper):
    pass


wrap.populate_wrapper(DiscreteResultsWrapper, DiscreteResults)

if __name__ == "__main__":
    import numpy as np
    import scikits.statsmodels.api as sm
    # Scratch work for negative binomial models
    # dvisits was written using an R package, I can provide the dataset
    # on request until the copyright is cleared up
    #TODO: request permission to use dvisits
    data2 = np.genfromtxt('../datasets/dvisits/dvisits.csv', names=True)
    # note that this has missing values for Accident
    endog = data2['doctorco']
    exog = data2[[
        'sex', 'age', 'agesq', 'income', 'levyplus', 'freepoor', 'freerepa',
        'illness', 'actdays', 'hscore', 'chcond1', 'chcond2'
    ]].view(float).reshape(len(data2), -1)
コード例 #11
0
            print('not avalible yet')
        elif returns == html:
            print('not avalible yet')


class GLMResultsWrapper(lm.RegressionResultsWrapper):
    _attrs = {
        'resid_anscombe' : 'rows',
        'resid_deviance' : 'rows',
        'resid_pearson' : 'rows',
        'resid_response' : 'rows',
        'resid_working' : 'rows'
    }
    _wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
                                   _attrs)
wrap.populate_wrapper(GLMResultsWrapper, GLMResults)

if __name__ == "__main__":
    import scikits.statsmodels.api as sm
    import numpy as np
    data = sm.datasets.longley.load()
    #data.exog = add_constant(data.exog)
    GLMmod = GLM(data.endog, data.exog).fit()
    GLMT = GLMmod.summary(returns='tables')
##    GLMT[0].extend_right(GLMT[1])
##    print(GLMT[0])
##    print(GLMT[2])
    GLMTp = GLMmod.summary(title='Test GLM')


    """
コード例 #12
0
        'params': 'columns_eq',
        'pvalues': 'columns_eq',
        'tvalues': 'columns_eq',
        'sigma_u': 'cov_eq',
        'sigma_u_mle': 'cov_eq',
        'stderr': 'columns_eq'
    }
    _wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
                                   _attrs)
    _methods = {}
    _wrap_methods = wrap.union_dicts(
        tsbase.TimeSeriesResultsWrapper._wrap_methods, _methods)
    _wrap_methods.pop('cov_params')  # not yet a method in VARResults


wrap.populate_wrapper(VARResultsWrapper, VARResults)


class FEVD(object):
    """
    Compute and plot Forecast error variance decomposition and asymptotic
    standard errors
    """
    def __init__(self, model, P=None, periods=None):
        self.periods = periods

        self.model = model
        self.neqs = model.neqs
        self.names = model.names

        self.irfobj = model.irf(var_decomp=P, periods=periods)