Exemplo n.º 1
0
def test_brentq_expanding():
    cases = [
        (0, {}),
        (50, {}),
        (-50, {}),
        (500000, dict(low=10000)),
        (-50000, dict(upp=-1000)),
        (500000, dict(low=300000, upp=700000)),
        (-50000, dict(low= -70000, upp=-1000))
        ]

    funcs = [(func, None),
             (func, True),
             (funcn, None),
             (funcn, False)]

    for f, inc in funcs:
        for a, kwds in cases:
            kw = {'increasing':inc}
            kw.update(kwds)
            res = brentq_expanding(f, args=(a,), **kwds)
            #print '%10d'%a, ['dec', 'inc'][f is func], res - a
            assert_allclose(res, a, rtol=1e-5)

    # wrong sign for start bounds
    # doesn't raise yet during development TODO: activate this
    # it kind of works in some cases, but not correctly or in a useful way
    #assert_raises(ValueError, brentq_expanding, func, args=(-500,), start_upp=-1000)
    #assert_raises(ValueError, brentq_expanding, func, args=(500,), start_low=1000)

    # low upp given, but doesn't bound root, leave brentq exception
    # ValueError: f(a) and f(b) must have different signs
    assert_raises(ValueError, brentq_expanding, funcn, args=(-50000,), low= -40000, upp=-10000)

    # max_it too low to find root bounds
    # ValueError: f(a) and f(b) must have different signs
    assert_raises(ValueError, brentq_expanding, func, args=(-50000,), max_it=2)

    # maxiter_bq too low
    # RuntimeError: Failed to converge after 3 iterations.
    assert_raises(RuntimeError, brentq_expanding, func, args=(-50000,), maxiter_bq=3)

    # cannot determin whether increasing, all 4 low trial points return nan
    assert_raises(ValueError, brentq_expanding, func_nan, args=(-20, 0.6))

    # test for full_output
    a = 500
    val, info = brentq_expanding(func, args=(a,), full_output=True)
    assert_allclose(val, a, rtol=1e-5)
    info1 = {'iterations': 63, 'start_bounds': (-1, 1),
             'brentq_bounds': (100, 1000), 'flag': 'converged',
             'function_calls': 64, 'iterations_expand': 3, 'converged': True}
    for k in info1:
        assert_equal(info1[k], info.__dict__[k])

    assert_allclose(info.root, a, rtol=1e-5)
Exemplo n.º 2
0
    def solve_power(self, **kwds):
        '''solve for any one of the parameters of a t-test

        for t-test the keywords are:
            effect_size, nobs, alpha, power

        exactly one needs to be ``None``, all others need numeric values

        *attaches*

        cache_fit_res : list
            Cache of the result of the root finding procedure for the latest
            call to ``solve_power``, mainly for debugging purposes.
            The first element is the success indicator, one if successful.
            The remaining elements contain the return information of the up to
            three solvers that have been tried.


        '''
        #TODO: maybe use explicit kwds,
        #    nicer but requires inspect? and not generic across tests
        #    I'm duplicating this in the subclass to get informative docstring
        key = [k for k, v in iteritems(kwds) if v is None]
        #print kwds, key
        if len(key) != 1:
            raise ValueError('need exactly one keyword that is None')
        key = key[0]

        if key == 'power':
            del kwds['power']
            return self.power(**kwds)

        if kwds['effect_size'] == 0:
            import warnings
            from statsmodels.tools.sm_exceptions import HypothesisTestWarning
            warnings.warn('Warning: Effect size of 0 detected',
                          HypothesisTestWarning)
            if key == 'power':
                return kwds['alpha']
            if key == 'alpha':
                return kwds['power']
            else:
                raise ValueError(
                    'Cannot detect an effect-size of 0. Try changing your effect-size.'
                )

        self._counter = 0

        def func(x):
            kwds[key] = x
            fval = self._power_identity(**kwds)
            self._counter += 1
            #print self._counter,
            if self._counter > 500:
                raise RuntimeError('possible endless loop (500 NaNs)')
            if np.isnan(fval):
                return np.inf
            else:
                return fval

        #TODO: I'm using the following so I get a warning when start_ttp is not defined
        try:
            start_value = self.start_ttp[key]
        except KeyError:
            start_value = 0.9
            import warnings
            from statsmodels.tools.sm_exceptions import ValueWarning
            warnings.warn(
                'Warning: using default start_value for {0}'.format(key),
                ValueWarning)

        fit_kwds = self.start_bqexp[key]
        fit_res = []
        #print vars()
        try:
            val, res = brentq_expanding(func, full_output=True, **fit_kwds)
            failed = False
            fit_res.append(res)
        except ValueError:
            failed = True
            fit_res.append(None)

        success = None
        if (not failed) and res.converged:
            success = 1
        else:
            # try backup
            # TODO: check more cases to make this robust
            if not np.isnan(start_value):
                val, infodict, ier, msg = optimize.fsolve(
                    func, start_value, full_output=True)  #scalar
                #val = optimize.newton(func, start_value) #scalar
                fval = infodict['fvec']
                fit_res.append(infodict)
            else:
                ier = -1
                fval = 1
                fit_res.append([None])

            if ier == 1 and np.abs(fval) < 1e-4:
                success = 1
            else:
                #print infodict
                if key in ['alpha', 'power', 'effect_size']:
                    val, r = optimize.brentq(func,
                                             1e-8,
                                             1 - 1e-8,
                                             full_output=True)  #scalar
                    success = 1 if r.converged else 0
                    fit_res.append(r)
                else:
                    success = 0

        if not success == 1:
            import warnings
            from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
                                                         convergence_doc)
            warnings.warn(convergence_doc, ConvergenceWarning)

        #attach fit_res, for reading only, should be needed only for debugging
        fit_res.insert(0, success)
        self.cache_fit_res = fit_res
        return val
Exemplo n.º 3
0

def funcn(x, a):
    f = -(x - a)**3
    if DEBUG: print 'evaluating at %g, fval = %g' % (x, f)
    return f

def func2(x, a):
    f = (x - a)**3
    print 'evaluating at %g, fval = %f' % (x, f)
    return f

if __name__ == '__main__':
    run_all = False
    if run_all:
        print brentq_expanding(func, args=(0,), increasing=True)

        print brentq_expanding(funcn, args=(0,), increasing=False)
        print brentq_expanding(funcn, args=(-50,), increasing=False)

        print brentq_expanding(func, args=(20,))
        print brentq_expanding(funcn, args=(20,))
        print brentq_expanding(func, args=(500000,))

        # one bound
        print brentq_expanding(func, args=(500000,), low=10000)
        print brentq_expanding(func, args=(-50000,), upp=-1000)

        print brentq_expanding(funcn, args=(500000,), low=10000)
        print brentq_expanding(funcn, args=(-50000,), upp=-1000)
Exemplo n.º 4
0
def test_brentq_expanding():
    cases = [(0, {}), (50, {}), (-50, {}), (500000, dict(low=10000)),
             (-50000, dict(upp=-1000)), (500000, dict(low=300000, upp=700000)),
             (-50000, dict(low=-70000, upp=-1000))]

    funcs = [(func, None), (func, True), (funcn, None), (funcn, False)]

    for f, inc in funcs:
        for a, kwds in cases:
            kw = {'increasing': inc}
            kw.update(kwds)
            res = brentq_expanding(f, args=(a, ), **kwds)
            #print '%10d'%a, ['dec', 'inc'][f is func], res - a
            assert_allclose(res, a, rtol=1e-5)

    # wrong sign for start bounds
    # doesn't raise yet during development TODO: activate this
    # it kind of works in some cases, but not correctly or in a useful way
    #assert_raises(ValueError, brentq_expanding, func, args=(-500,), start_upp=-1000)
    #assert_raises(ValueError, brentq_expanding, func, args=(500,), start_low=1000)

    # low upp given, but doesn't bound root, leave brentq exception
    # ValueError: f(a) and f(b) must have different signs
    assert_raises(ValueError,
                  brentq_expanding,
                  funcn,
                  args=(-50000, ),
                  low=-40000,
                  upp=-10000)

    # max_it too low to find root bounds
    # ValueError: f(a) and f(b) must have different signs
    assert_raises(ValueError,
                  brentq_expanding,
                  func,
                  args=(-50000, ),
                  max_it=2)

    # maxiter_bq too low
    # RuntimeError: Failed to converge after 3 iterations.
    assert_raises(RuntimeError,
                  brentq_expanding,
                  func,
                  args=(-50000, ),
                  maxiter_bq=3)

    # cannot determin whether increasing, all 4 low trial points return nan
    assert_raises(ValueError, brentq_expanding, func_nan, args=(-20, 0.6))

    # test for full_output
    a = 500
    val, info = brentq_expanding(func, args=(a, ), full_output=True)
    assert_allclose(val, a, rtol=1e-5)
    info1 = {
        'iterations': 63,
        'start_bounds': (-1, 1),
        'brentq_bounds': (100, 1000),
        'flag': 'converged',
        'function_calls': 64,
        'iterations_expand': 3,
        'converged': True
    }
    for k in info1:
        assert_equal(info1[k], info.__dict__[k])

    assert_allclose(info.root, a, rtol=1e-5)
Exemplo n.º 5
0
def funcn(x, a):
    f = -(x - a)**3
    if DEBUG:
        print('evaluating at %g, fval = %g' % (x, f))
    return f


def func2(x, a):
    f = (x - a)**3
    print('evaluating at %g, fval = %f' % (x, f))
    return f

if __name__ == '__main__':
    run_all = False
    if run_all:
        print(brentq_expanding(func, args=(0,), increasing=True))

        print(brentq_expanding(funcn, args=(0,), increasing=False))
        print(brentq_expanding(funcn, args=(-50,), increasing=False))

        print(brentq_expanding(func, args=(20,)))
        print(brentq_expanding(funcn, args=(20,)))
        print(brentq_expanding(func, args=(500000,)))

        # one bound
        print(brentq_expanding(func, args=(500000,), low=10000))
        print(brentq_expanding(func, args=(-50000,), upp=-1000))

        print(brentq_expanding(funcn, args=(500000,), low=10000))
        print(brentq_expanding(funcn, args=(-50000,), upp=-1000))
Exemplo n.º 6
0
    def solve_power(self, **kwds):
        '''solve for any one of the parameters of a t-test

        for t-test the keywords are:
            effect_size, nobs, alpha, power

        exactly one needs to be ``None``, all others need numeric values

        *attaches*

        cache_fit_res : list
            Cache of the result of the root finding procedure for the latest
            call to ``solve_power``, mainly for debugging purposes.
            The first element is the success indicator, one if successful.
            The remaining elements contain the return information of the up to
            three solvers that have been tried.


        '''
        #TODO: maybe use explicit kwds,
        #    nicer but requires inspect? and not generic across tests
        #    I'm duplicating this in the subclass to get informative docstring
        key = [k for k,v in iteritems(kwds) if v is None]
        #print kwds, key;
        if len(key) != 1:
            raise ValueError('need exactly one keyword that is None')
        key = key[0]

        if key == 'power':
            del kwds['power']
            return self.power(**kwds)

        self._counter = 0
        def func(x):
            kwds[key] = x
            fval = self._power_identity(**kwds)
            self._counter += 1
            #print self._counter,
            if self._counter > 500:
                raise RuntimeError('possible endless loop (500 NaNs)')
            if np.isnan(fval):
                return np.inf
            else:
                return fval

        #TODO: I'm using the following so I get a warning when start_ttp is not defined
        try:
            start_value = self.start_ttp[key]
        except KeyError:
            start_value = 0.9
            print('Warning: using default start_value for {0}'.format(key))

        fit_kwds = self.start_bqexp[key]
        fit_res = []
        #print vars()
        try:
            val, res = brentq_expanding(func, full_output=True, **fit_kwds)
            failed = False
            fit_res.append(res)
        except ValueError:
            failed = True
            fit_res.append(None)

        success = None
        if (not failed) and res.converged:
            success = 1
        else:
            # try backup
            #TODO: check more cases to make this robust
            val, infodict, ier, msg = optimize.fsolve(func, start_value,
                                                      full_output=True) #scalar
            #val = optimize.newton(func, start_value) #scalar
            fval = infodict['fvec']
            fit_res.append(infodict)
            if ier == 1 and np.abs(fval) < 1e-4 :
                success = 1
            else:
                #print infodict
                if key in ['alpha', 'power', 'effect_size']:
                    val, r = optimize.brentq(func, 1e-8, 1-1e-8,
                                             full_output=True) #scalar
                    success = 1 if r.converged else 0
                    fit_res.append(r)
                else:
                    success = 0

        if not success == 1:
            import warnings
            from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
                convergence_doc)
            warnings.warn(convergence_doc, ConvergenceWarning)

        #attach fit_res, for reading only, should be needed only for debugging
        fit_res.insert(0, success)
        self.cache_fit_res = fit_res
        return val