Ejemplo n.º 1
0
def fit_mvpolya(X, initial_params=None):
    infinitesimal = np.finfo(np.float).eps

    def log_likelihood(params, *args):
        alpha = params
        X = args[0]

        res = np.sum([np.sum(gammaln(row+alpha)) \
                - np.sum(gammaln(alpha)) \
                + gammaln(np.sum(alpha)) \
                - gammaln(np.sum(row + alpha)) \
                + gammaln(np.sum(row)+1) \
                - np.sum(gammaln(row+1)) for row in X])

        return -res

    if initial_params is None:
        #initial_params = np.zeros(X.shape[1]) + 1.0
        initial_params = np.mean(X, 0) + infinitesimal

    bounds = [(infinitesimal, None)] * X.shape[1]
    optimres = optim(log_likelihood,
                     x0=initial_params,
                     args=(X,),
                     approx_grad=1,
                     bounds=bounds)

    params = optimres[0]
    return {'params': params}
Ejemplo n.º 2
0
def fit_mvpolya(X, initial_params=None):
    infinitesimal = np.finfo(np.float).eps

    def log_likelihood(params, *args):
        alpha = params
        X = args[0]

        res = np.sum([np.sum(gammaln(row+alpha)) \
                - np.sum(gammaln(alpha)) \
                + gammaln(np.sum(alpha)) \
                - gammaln(np.sum(row + alpha)) \
                + gammaln(np.sum(row)+1) \
                - np.sum(gammaln(row+1)) for row in X])

        return -res

    if initial_params is None:
        #initial_params = np.zeros(X.shape[1]) + 1.0
        initial_params = np.mean(X, 0) + infinitesimal

    bounds = [(infinitesimal, None)] * X.shape[1]
    optimres = optim(log_likelihood,
                     x0=initial_params,
                     args=(X, ),
                     approx_grad=1,
                     bounds=bounds)

    params = optimres[0]
    return {'params': params}
def fit_nbinom(X, initial_params=None):
    # Copyright (C) 2014 Gokcen Eraslan
    # https://github.com/gokceneraslan/fit_nbinom/blob/master/fit_nbinom.py
    # X is a numpy array representing the data
    # initial params is a numpy array representing the initial values of
    # size and prob parameters
    infinitesimal = np.finfo(np.float).eps

    def log_likelihood(params, *args):
        r, p = params
        X = args[0]
        N = X.size

        #MLE estimate based on the formula on Wikipedia:
        # http://en.wikipedia.org/wiki/Negative_binomial_distribution#Maximum_likelihood_estimation
        result = np.sum(gammaln(X + r)) \
            - np.sum(np.log(factorial(X))) \
            - N*(gammaln(r)) \
            + N*r*np.log(p) \
            + np.sum(X*np.log(1-(p if p < 1 else 1-infinitesimal)))

        return -result

    def log_likelihood_deriv(params, *args):
        r, p = params
        X = args[0]
        N = X.size

        pderiv = (N *
                  r) / p - np.sum(X) / (1 -
                                        (p if p < 1 else 1 - infinitesimal))
        rderiv = np.sum(psi(X + r)) \
            - N*psi(r) \
            + N*np.log(p)

        return np.array([-rderiv, -pderiv])

    if initial_params is None:
        #reasonable initial values (from fitdistr function in R)
        m = np.mean(X)
        v = np.var(X)
        size = (m**2) / (v - m) if v > m else 10

        #convert mu/size parameterization to prob/size
        p0 = size / ((size + m) if size + m != 0 else 1)
        r0 = size
        initial_params = np.array([r0, p0])

    bounds = [(infinitesimal, None), (infinitesimal, 1)]
    optimres = optim(
        log_likelihood,
        x0=initial_params,
        #fprime=log_likelihood_deriv,
        args=(X, ),
        approx_grad=1,
        bounds=bounds)

    params = optimres[0]
    return {'size': params[0], 'prob': params[1]}
Ejemplo n.º 4
0
 def optim_h0(self, hzy):
     fi_tips = self.calc_fi()
     fi_opt = np.zeros(len(fi_tips), dtype=float)
     hi_opt = np.zeros(len(fi_tips), dtype=float)
     for i, pop in enumerate(fi_tips.keys()):
         fi_opt[i] = fi_tips[pop]
         hi_opt[i] = hzy[pop]
     val = optim(qfunc, np.mean(hi_opt), args=(fi_opt, hi_opt))
     return val
Ejemplo n.º 5
0
 def optim_h0(self,hzy):
     fi_tips=self.calc_fi()
     fi_opt=np.zeros(len(fi_tips),dtype=float)
     hi_opt=np.zeros(len(fi_tips),dtype=float)
     for i,pop in enumerate(fi_tips.keys()):
         fi_opt[i]=fi_tips[pop]
         hi_opt[i]=hzy[pop]
     val=optim(qfunc,np.mean(hi_opt),args=(fi_opt,hi_opt))
     return val
Ejemplo n.º 6
0
def optimize_hapflk_qreg(sample,
                         K,
                         Npop,
                         probs=np.linspace(0.05, 0.95, num=19)):
    oq = np.percentile(sample, q=list(100 * probs))
    res = optim(qfunc, K, args=(oq, probs))
    mydf = res.x[0]
    tq = chi2.ppf(probs, df=mydf)
    ## robust regression
    reg = sm.RLM(tq, np.vstack([np.ones(len(oq)), oq]).T)
    res = reg.fit()
    mu, beta = res.params
    return {'mu': mu, 'beta': beta, 'df': mydf}
Ejemplo n.º 7
0
def fit_nbinom(X, initial_params=None):
    infinitesimal = np.finfo(np.float).eps

    def log_likelihood(params, *args):
        r, p = params
        X = args[0]
        N = X.size

        #MLE estimate based on the formula on Wikipedia:
        # http://en.wikipedia.org/wiki/Negative_binomial_distribution#Maximum_likelihood_estimation
        result = np.sum(gammaln(X + r)) \
            - np.sum(np.log(factorial(X))) \
            - N*(gammaln(r)) \
            + N*r*np.log(p) \
            + np.sum(X*np.log(1-(p if p < 1 else 1-infinitesimal)))

        return -result

    def log_likelihood_deriv(params, *args):
        r, p = params
        X = args[0]
        N = X.size

        pderiv = (N*r)/p - np.sum(X)/(1-(p if p < 1 else 1-infinitesimal))
        rderiv = np.sum(psi(X + r)) \
            - N*psi(r) \
            + N*np.log(p)

        return np.array([-rderiv, -pderiv])

    if initial_params is None:
        #reasonable initial values (from fitdistr function in R)
        m = np.mean(X)
        v = np.var(X)
        size = (m**2)/(v-m) if v > m else 10

        #convert mu/size parameterization to prob/size
        p0 = size / ((size+m) if size+m != 0 else 1)
        r0 = size
        initial_params = np.array([r0, p0])

    bounds = [(infinitesimal, None), (infinitesimal, 1)]
    optimres = optim(log_likelihood,
                     x0=initial_params,
                     #fprime=log_likelihood_deriv,
                     args=(X,),
                     approx_grad=1,
                     bounds=bounds)

    params = optimres[0]
    return {'size': params[0], 'prob': params[1]}
Ejemplo n.º 8
0
def fit_nbinom(X, initial_params=None):
    infinitesimal = np.finfo(np.float).eps
    X = X[X > 0]

    def log_likelihood(params, *args):
        r, p = params
        X = args[0]
        N = X.size

        result = np.sum(gammaln(X + r)) - np.sum(np.log(factorial(X))) \
            - N*(gammaln(r)) + N*r*np.log(p) + np.sum(X*np.log(1-(p if p < 1 else 1-infinitesimal)))

        return -result

    if initial_params is None:
        #reasonable initial values (from fitdistr function in R)
        m = np.mean(X)
        v = np.var(X)
        size = (m**2) / (v - m) if v > m else 10

        #convert mu/size parameterization to prob/size
        p0 = size / ((size + m) if size + m != 0 else 1)
        r0 = size
        initial_params = np.array([r0, p0])

    bounds = [(infinitesimal, None), (infinitesimal, 1)]
    optimres = optim(log_likelihood,
                     x0=initial_params,
                     args=(X, ),
                     approx_grad=True,
                     bounds=bounds)

    params = optimres[0]
    mu = params[0] * (1 - params[1]) / (params[1]
                                        if params[1] > 0 else infinitesimal)
    sigmasqr = params[0] * (1 - params[1]) / (
        (params[1]**2) if params[1] > 0 else infinitesimal)

    return {'mu': mu, 'size': params[0], 'prob': params[1], 'var': sigmasqr}
Ejemplo n.º 9
0
def calculate_R2(x0, *args):
    global stl_range
    global expected
    calculated = func(x0, stl_range)
    return np.sum((calculated - expected) ** 2)

bounds = [
    (0, None),

    (None, None),
    (None, None),
    (None, None),
    (None, None),
    (None, None),

    (0.001, None),
    (0.001, None),
    (0.001, None),
    (0.001, None),
    (0.001, None),
]

x0 = asf2

lastx, lastR2, info = optim(calculate_R2, x0, approx_grad=True, pgtol=10e-24 , factr=2, iprint=-1, bounds=bounds)

print lastR2
print "\t".join([("%.10g" % fl).replace(".", ",") for fl in lastx])
print info
Ejemplo n.º 10
0
def fit_nbinom(X, initial_params=None):

    infinitesimal = np.finfo(np.float).eps

    def log_likelihood(params, *args):
        r, p = params
        X = args[0]
        N = X.size

        # MLE estimate based on the formula on Wikipedia:
        # http://en.wikipedia.org/wiki/Negative_binomial_distribution#Maximum_likelihood_estimation
        result = np.sum(gammaln(X + r)) \
            - np.sum(np.log(factorial(X))) \
            - N*(gammaln(r)) \
            + N*r*np.log(p) \
            + np.sum(X*np.log(1-(p if p < 1 else 1-infinitesimal)))

        #TODO I have put these condition in here to correct for a double scalar error.
        if (np.isnan(result)):
            return 0.0
        if (np.isinf(result)):
            if (result < 0):
                return -100000
            else:
                return 100000

        return -result

    def log_likelihood_deriv(params, *args):
        r, p = params
        X = args[0]
        N = X.size

        pderiv = (N *
                  r) / p - np.sum(X) / (1 -
                                        (p if p < 1 else 1 - infinitesimal))
        rderiv = np.sum(psi(X + r)) \
            - N*psi(r) \
            + N*np.log(p)

        return np.array([-rderiv, -pderiv])

    if initial_params is None:
        #reasonable initial values (from fitdistr function in R)
        m = np.mean(X)
        v = np.var(X)
        size = (m**2) / (v - m) if v > m else 10

        #convert mu/size parameterization to prob/size
        p0 = size / ((size + m) if size + m != 0 else 1)
        r0 = size
        initial_params = np.array([r0, p0])

    bounds = [(infinitesimal, None), (infinitesimal, 1)]

    # fmin_l_bfgs_b
    optimres = optim(
        log_likelihood,
        x0=initial_params,
        #fprime=log_likelihood_deriv,
        args=(X, ),
        approx_grad=1,
        bounds=bounds)

    params = optimres[0]
    return {'size': params[0], 'prob': params[1]}
Ejemplo n.º 11
0
def fit_nbinom(
    X: np.ndarray, initial_params: Optional[Tuple[Number, Number]] = None
) -> Tuple[float, float]:
    """Fit a negative binomial distribution.

    Parameters
    ----------
    X
        data to fit
    initial_params
        Tuple with initial `size` and `prob` parameters.

    Returns
    -------
    """
    infinitesimal = np.finfo(float).eps

    def log_likelihood(params, *args):
        r, p = params
        X = args[0]
        N = X.size

        # MLE estimate based on the formula on Wikipedia:
        # http://en.wikipedia.org/wiki/Negative_binomial_distribution#Maximum_likelihood_estimation
        result = (
            np.sum(gammaln(X + r))
            - np.sum(np.log(factorial(X)))
            - N * (gammaln(r))
            + N * r * np.log(p)
            + np.sum(X * np.log(1 - (p if p < 1 else 1 - infinitesimal)))
        )

        return -result

    def log_likelihood_deriv(params, *args):
        r, p = params
        X = args[0]
        N = X.size

        pderiv = (N * r) / p - np.sum(X) / (1 - (p if p < 1 else 1 - infinitesimal))
        rderiv = np.sum(psi(X + r)) - N * psi(r) + N * np.log(p)

        return np.array([-rderiv, -pderiv])

    if initial_params is None:
        # reasonable initial values (from fitdistr function in R)
        m = np.mean(X)
        v = np.var(X)
        size = (m**2) / (v - m) if v > m else 10

        # convert mu/size parameterization to prob/size
        p0 = size / ((size + m) if size + m != 0 else 1)
        r0 = size
        initial_params = (r0, p0)

    initial_params = np.array(initial_params)

    bounds = [(infinitesimal, None), (infinitesimal, 1)]
    optimres = optim(
        log_likelihood,
        x0=initial_params,
        # fprime=log_likelihood_deriv,
        args=(X,),
        approx_grad=1,
        bounds=bounds,
    )

    params = optimres[0]
    return params[0], params[1]
Ejemplo n.º 12
0
 def optim_root(self, hzy):
     val = optim(qfunc_2pars, [0.25, 0.5], args=(self, hzy))
     return val
Ejemplo n.º 13
0
 def optim_root(self,hzy):
     val=optim(qfunc_2pars,[0.25,0.5],args=(self,hzy))
     return val