Ejemplo n.º 1
0
def incomplete_beta_ps(a, b, value):
    """Power series for incomplete beta
    Use when b*x is small and value not too close to 1.
    Based on Cephes library by Steve Moshier (incbet.c)
    """
    one = aet.constant(1, dtype="float64")
    ai = one / a
    u = (one - b) * value
    t1 = u / (a + one)
    t = u
    threshold = np.MachAr().eps * ai
    s = aet.constant(0, dtype="float64")

    def _step(i, t, s):
        t *= (i - b) * value / i
        step = t / (a + i)
        s += step
        return ((t, s), until(aet.abs_(step) < threshold))

    (t, s), _ = scan(_step,
                     sequences=[aet.arange(2, 302)],
                     outputs_info=[e for e in aet.cast((t, s), "float64")])

    s = s[-1] + t1 + ai

    t = gammaln(a +
                b) - gammaln(a) - gammaln(b) + a * aet.log(value) + aet.log(s)
    return aet.exp(t)
Ejemplo n.º 2
0
def incomplete_beta(a, b, value):
    '''Incomplete beta implementation
    Power series and continued fraction expansions chosen for best numerical
    convergence across the board based on inputs.
    '''
    machep = tt.constant(np.MachAr().eps, dtype='float64')
    one = tt.constant(1, dtype='float64')
    w = one - value

    ps = incomplete_beta_ps(a, b, value)

    flip = tt.gt(value, (a / (a + b)))
    aa, bb = a, b
    a = tt.switch(flip, bb, aa)
    b = tt.switch(flip, aa, bb)
    xc = tt.switch(flip, value, w)
    x = tt.switch(flip, w, value)

    tps = incomplete_beta_ps(a, b, x)
    tps = tt.switch(tt.le(tps, machep), one - machep, one - tps)

    # Choose which continued fraction expansion for best convergence.
    small = tt.lt(x * (a + b - 2.0) - (a - one), 0.0)
    cfe = incomplete_beta_cfe(a, b, x, small)
    w = tt.switch(small, cfe, cfe / xc)

    # Direct incomplete beta accounting for flipped a, b.
    t = tt.exp(a * tt.log(x) + b * tt.log(xc) + gammaln(a + b) - gammaln(a) -
               gammaln(b) + tt.log(w / a))

    t = tt.switch(flip, tt.switch(tt.le(t, machep), one - machep, one - t), t)
    return tt.switch(
        tt.and_(flip, tt.and_(tt.le((b * x), one), tt.le(x, 0.95))), tps,
        tt.switch(tt.and_(tt.le(b * value, one), tt.le(value, 0.95)), ps, t))
Ejemplo n.º 3
0
def compute_smallbone_reduction(N, Ex, v_star, tol=1E-8):
    """ Uses the SVD to calculate a reduced stoichiometric matrix, then
    calculates a link matrix as described in Smallbone *et al* 2007.
    
    Returns:
    Nr, L, P
    
    """
    q, r, p = sp.linalg.qr((N @ np.diag(v_star) @ Ex).T,
                           pivoting=True)

    # Construct permutation matrix
    P = np.zeros((len(p), len(p)), dtype=int)
    for i, pi in enumerate(p):
        P[i, pi] = 1

    # Get the matrix rank from the r matrix
    maxabs = np.max(np.abs(np.diag(r)))
    maxdim = max(N.shape)
    tol = maxabs * maxdim * np.MachAr().eps
    # Find where the rows of r are all less than tol
    rank = (~(np.abs(r) < tol).all(1)).sum()

    Nr = P[:rank] @ N
    L = N @ np.linalg.pinv(Nr)

    return Nr, L, P
Ejemplo n.º 4
0
def MachAr_impl():
    f = np.MachAr()
    _mach_ar_data = tuple([getattr(f, x) for x in _mach_ar_supported])

    def impl():
        return MachAr(*_mach_ar_data)

    return impl
Ejemplo n.º 5
0
    def __init__(self, N, Ex, v_star, x_star):
        """A class to handle the stochiometric analysis and matrix reduction
        required to ensure the relevant matrices are invertable.

        Parameters
        ----------
        N : np.array
            The full stoichiometric matrix of the considered model. Must be of
            dimensions MxN
        Ex : np.array
            An NxM array of the elasticity coefficients for the given linlog
            model.
        v_star : np.array
            A length M vector specifying the original steady-state flux
            solution of the model.
        x_star : np.array
            A length N vector specifying the original steady-state metabolite
            concentrations.

        """

        self.N = N
        self.x_star = x_star
        self.v_star = v_star

        self.nm, self.nr = N.shape

        assert Ex.shape == (self.nr, self.nm), "Ex is the wrong shape"
        assert len(v_star) == self.nr, "v_star is the wrong length"
        assert len(x_star) == self.nm, "x_star is the wrong length"

        q, r, p = sp.linalg.qr((N @ np.diag(v_star) @ Ex).T, pivoting=True)

        # # Construct permutation matrix
        # self.P = np.zeros((len(p), len(p)), dtype=int)
        # for i, pi in enumerate(p):
        #     self.P[i, pi] = 1

        # Get the matrix rank from the r matrix
        maxabs = np.max(np.abs(np.diag(r)))
        maxdim = max(N.shape)
        tol = maxabs * maxdim * np.MachAr().eps
        # Find where the rows of r are all less than tol
        self.rank = (~(np.abs(r) < tol).all(1)).sum()

        # Permutation vector
        self.p = np.sort(p[:self.rank])

        self.z_star = self.x_to_z(self.x_star)
        self.Nr = Nr = N[self.p]
        self.L = np.diag(1 / self.x_star) @ N @ np.linalg.pinv(Nr) @ np.diag(
            self.z_star)

        # Sanity checks
        assert np.linalg.matrix_rank(Nr) == self.rank
        assert np.allclose(Nr @ v_star, 0)
Ejemplo n.º 6
0
    def test_issue_25(self):
        def g_fun(x):
            out = np.zeros((2, 2), dtype=float)
            out[0, 0] = x[0]
            out[0, 1] = x[1]
            out[1, 0] = x[0]
            out[1, 1] = x[1]
            return out

        dg_dx = nd.Jacobian(g_fun)  # TODO:  method='reverse' fails
        x = np.array([1, 2])

        tv = [[[1., 0.], [0., 1.]], [[1., 0.], [0., 1.]]]
        _EPS = np.MachAr().eps
        epsilon = _EPS**(1. / 4)
        assert_allclose(nd.approx_fprime(x, g_fun, epsilon), tv)
        dg = dg_dx(x)
        assert_allclose(dg, tv)
Ejemplo n.º 7
0
    def _sp_subvector_error_out_of_range(
            self, radius, dimensions, subdimensions):
        dist = SubvectorLength(dimensions, subdimensions)
        sq_r = radius * radius

        normalization = 1.0 - dist.cdf(radius)
        b = (dimensions - subdimensions) / 2.0
        aligned_integral = beta(subdimensions / 2.0 + 1.0, b) * (1.0 - betainc(
            subdimensions / 2.0 + 1.0, b, sq_r))
        cross_integral = beta((subdimensions + 1) / 2.0, b) * (1.0 - betainc(
            (subdimensions + 1) / 2.0, b, sq_r))

        numerator = (sq_r * normalization + (
            aligned_integral - 2.0 * radius * cross_integral) / beta(
            subdimensions / 2.0, b))
        with np.errstate(invalid='ignore'):
            return np.where(
                numerator > np.MachAr().eps,
                numerator / normalization, np.zeros_like(normalization))
Ejemplo n.º 8
0
def incomplete_beta_cfe(a, b, x, small):
    """Incomplete beta continued fraction expansions
    based on Cephes library by Steve Moshier (incbet.c).
    small: Choose element-wise which continued fraction expansion to use.
    """
    BIG = aet.constant(4.503599627370496e15, dtype="float64")
    BIGINV = aet.constant(2.22044604925031308085e-16, dtype="float64")
    THRESH = aet.constant(3.0 * np.MachAr().eps, dtype="float64")

    zero = aet.constant(0.0, dtype="float64")
    one = aet.constant(1.0, dtype="float64")
    two = aet.constant(2.0, dtype="float64")

    r = one
    k1 = a
    k3 = a
    k4 = a + one
    k5 = one
    k8 = a + two

    k2 = aet.switch(small, a + b, b - one)
    k6 = aet.switch(small, b - one, a + b)
    k7 = aet.switch(small, k4, a + one)
    k26update = aet.switch(small, one, -one)
    x = aet.switch(small, x, x / (one - x))

    pkm2 = zero
    qkm2 = one
    pkm1 = one
    qkm1 = one
    r = one

    def _step(i, pkm1, pkm2, qkm1, qkm2, k1, k2, k3, k4, k5, k6, k7, k8, r):
        xk = -(x * k1 * k2) / (k3 * k4)
        pk = pkm1 + pkm2 * xk
        qk = qkm1 + qkm2 * xk
        pkm2 = pkm1
        pkm1 = pk
        qkm2 = qkm1
        qkm1 = qk

        xk = (x * k5 * k6) / (k7 * k8)
        pk = pkm1 + pkm2 * xk
        qk = qkm1 + qkm2 * xk
        pkm2 = pkm1
        pkm1 = pk
        qkm2 = qkm1
        qkm1 = qk

        old_r = r
        r = aet.switch(aet.eq(qk, zero), r, pk / qk)

        k1 += one
        k2 += k26update
        k3 += two
        k4 += two
        k5 += one
        k6 -= k26update
        k7 += two
        k8 += two

        big_cond = aet.gt(aet.abs_(qk) + aet.abs_(pk), BIG)
        biginv_cond = aet.or_(aet.lt(aet.abs_(qk), BIGINV),
                              aet.lt(aet.abs_(pk), BIGINV))

        pkm2 = aet.switch(big_cond, pkm2 * BIGINV, pkm2)
        pkm1 = aet.switch(big_cond, pkm1 * BIGINV, pkm1)
        qkm2 = aet.switch(big_cond, qkm2 * BIGINV, qkm2)
        qkm1 = aet.switch(big_cond, qkm1 * BIGINV, qkm1)

        pkm2 = aet.switch(biginv_cond, pkm2 * BIG, pkm2)
        pkm1 = aet.switch(biginv_cond, pkm1 * BIG, pkm1)
        qkm2 = aet.switch(biginv_cond, qkm2 * BIG, qkm2)
        qkm1 = aet.switch(biginv_cond, qkm1 * BIG, qkm1)

        return (
            (pkm1, pkm2, qkm1, qkm2, k1, k2, k3, k4, k5, k6, k7, k8, r),
            until(aet.abs_(old_r - r) < (THRESH * aet.abs_(r))),
        )

    (pkm1, pkm2, qkm1, qkm2, k1, k2, k3, k4, k5, k6, k7, k8, r), _ = scan(
        _step,
        sequences=[aet.arange(0, 300)],
        outputs_info=[
            e for e in aet.cast((pkm1, pkm2, qkm1, qkm2, k1, k2, k3, k4, k5,
                                 k6, k7, k8, r), "float64")
        ],
    )

    return r[-1]
Ejemplo n.º 9
0
This package contains functions that can be used inside 'value' elements.

As an example, the Ramp class is used in a way to allow the user to specify
'ramp(10,.5)' to ramp from the current value to a value of 10 using an exponent
of 0.5 over the duration of the waveform element.
"""

from scipy.interpolate import interp1d
import numpy as np
import math

import physical
from physical import unit
from physical.sympy_util import has_sympy

machine_arch = np.MachAr()
from logging import log, info, debug, warn, critical, DEBUG, root as rootlog

from ..tools.dict import Dict


class step_iter:
    def __init__(self, ti, tf, dt, fun):
        self.t = ti
        self.tf = tf
        self.tf_m1 = tf - dt  # time after last full step
        self.tf_stop = tf + 0.5 * dt
        self.dt = dt
        self.fun = fun

    def __iter__(self):
Ejemplo n.º 10
0
def machar(*args):
    return np.MachAr()
Ejemplo n.º 11
0
    return T0


def phase(t, P, T0=0.0):
    """
    Phase-fold times t given period P and epoch T0; result in [0:1].
    """
    Phase = ((t - T0) % P) / P
    # ensure > 0
    Phase[Phase <= 0.] += 1
    return Phase


# Lower level functions

f = np.MachAr()
machep = f.eps


def eccan(Ecc, M, Tol=1.0e-8, Nmax=50):
    """
    Calculate eccentric anomaly from eccentricity Ecc and mean
    anomaly M using Newton-Raphson process.
    """
    if M < Tol: return M
    x = Ecc * np.sin(M) / (1 - Ecc * np.cos(M))
    Eo = M + x * (1 - x * x / 2.)
    Diff = 1
    Flag = 0
    i = 0
    while (Diff > Tol):
Ejemplo n.º 12
0
# Module to calculate relative positions and radial velocities for
# binary system. Written by Suzanne Aigrain.

import numpy

f = numpy.MachAr()
machep = f.eps


def eccan(Ecc, M, Tol=1.0e-8, Nmax=50):
    """Calculate eccentric anomaly using Newton-Raphson process."""
    if M < Tol: return M
    x = Ecc * numpy.sin(M) / (1 - Ecc * numpy.cos(M))
    Eo = M + x * (1 - x * x / 2.)
    Diff = 1
    Flag = 0
    i = 0
    while (Diff > Tol):
        En = Eo + (M + Ecc * numpy.sin(Eo) - Eo) / (1 - Ecc * numpy.cos(Eo))
        Diff = abs((En - Eo) / Eo)
        Eo = En
        i += 1
        if i >= Nmax:
            if Flag == 1:
                print Ecc, M
                print 'Eccan did not converge'
                return M
            Flag = 1
            i = 0
            Eo = M
            Diff = 1
Ejemplo n.º 13
0
import numpy as np
import scipy.linalg as sla
small = np.MachAr().eps

def sinefitm(time, data, w = None, \
             fmin = None, fmax = None, nfreq = None):
    '''    
    Least squares fit of sine curve to data. Can process multiple
    time-series simlutaneously. Returns trial frequencies, reduced chi2,
    amplitudes of sine and cosine components, and dc level.
    '''
    if fmin is None:
        fmin = 1. / (np.nanmax(time) - np.nanmin(time))
    if fmax is None:
        fmax = 0.5 / np.nanmin(time[1:] - time[:-1])
    if nfreq is None:
        nfreq = int(fmax/fmin)
    freq = np.r_[fmin:fmax:nfreq*1j]
    sha = data.shape
    if len(sha) == 1:
        data = data.reshape(1, sha[0])
    elif sha[1] < sha[0]:
        data = data.reshape(sha[1], sha[0])
    nobj, nobs = data.shape
    if w == None:
        w = np.ones(nobs)
    rchi2 = np.zeros((nobj,nfreq)) + np.nan
    dc = np.zeros((nobj,nfreq)) + np.nan
    amps = np.zeros((nobj,nfreq)) + np.nan
    ampc = np.zeros((nobj,nfreq)) + np.nan
    sumw = w.sum()
Ejemplo n.º 14
0
#       MC, Oxford, 23 October 2007
# V1.1.1: Added STATUS keyword. Provide more informative error messages on failure.
#       MC, Windoek, 5 October 2008
# V1.1.2: Renamed CAP_QUADVA to avoid potential naming conflicts.
#       MC, Paranal, 8 November 2013
# V2.0.0: Translated from IDL into Python. MC, Paranal, 14 November 2013
# V2.0.1: Fixed possible program stop. MC, Oxford, 27 January 2014
# V2.0.2: Support both Python 2.6/2.7 and Python 3.x. MC, Oxford, 25 May 2014
# 
#----------------------------------------------------------------------

from __future__ import print_function

import numpy as np    

EPS = 100.*np.MachAr().eps

#----------------------------------------------------------------------
def _qva_split(interval):

    # If breakpoints are specified, split subintervals in
    # half as needed to get a minimum of 10 subintervals.
    v = interval
    while True:
        npts = interval.size
        if npts > 10: 
            break
        v = np.zeros(npts*2 - 1)
        v[0::2] = interval
        v[1::2] = interval[:-1] + 0.5*np.diff(interval)
        interval = v
Ejemplo n.º 15
0
    def linear_hessian(self, params, step_size=None):
        """
        Compute the Hessian, quickly. This takes advantage of the fact that many components of utility are constant
        when computing the Hessian, which changes two parameters at a time. The ASCs also do not really need to be
        recomputed, as the changes to the coefficients are miniscule. This only works for utility functions that are
        strictly linear-in-parameters, which the MNL utility function is.

        The results of this function should be identical to statsmodels.tools.numdiff.approx_hess3, as the algorithm
        used is the same (equation 9 from Ridout 2009 - note that Ridout 2009 is about complex step differentiation, but
        eq. 9 is an example of finite-step differentiation).

        Ridout, M. S. (2009). Statistical Applications of the Complex-Step Method of Numerical Differentiation.
            _The American Statistician_, 63(1), 66–74. https://doi.org/10.1198/tast.2009.0013
        """
        utility = self.full_utility(params)

        # this is the step size for the finite-difference approximation. it is copied from
        # statsmodels.tools.numdiff.approx_hess3
        if step_size is None:
            step_size = np.MachAr().eps**(1.0 / 4) * np.maximum(
                np.abs(params), 0.1)
        elif isinstance(step_size, float):
            step_size = np.full(len(params), step_size)

        nthreads = max_thread_count()
        LOG.info(f"computing Hessian using {nthreads} threads")
        # we need the outer product to scale the finite diff appx below
        prod = np.outer(step_size, step_size)
        hess = np.zeros_like(prod)

        task_queue = queue.Queue()
        result_queue = queue.Queue()
        stop_threads = threading.Event()

        n = len(params)

        # parallelized by rows, each thread calcs rows at a time, b/c there is some computation that's common across
        # a row.
        def worker():
            while not stop_threads.is_set():
                try:
                    i = task_queue.get(timeout=10)
                except queue.Empty:
                    continue
                else:
                    step_i = self.alternatives[:, i] * step_size[i]
                    LOG.info(f"Hessian starting row {i} / {n}")
                    for j in range(i, n):
                        step_j = self.alternatives[:, j] * step_size[j]
                        elem = (self.negative_log_likelihood_for_utility(
                            utility + step_i + step_j) -
                                self.negative_log_likelihood_for_utility(
                                    utility + step_i - step_j) -
                                (self.negative_log_likelihood_for_utility(
                                    utility - step_i + step_j) -
                                 self.negative_log_likelihood_for_utility(
                                     utility - step_i - step_j))) / (
                                         4 * prod[i, j])

                        result_queue.put((i, j, elem))
                    # only mark as done once per row
                    task_queue.task_done()

        def consumer():
            while not stop_threads.is_set():
                try:
                    i, j, elem = result_queue.get(timeout=10)
                except queue.Empty:
                    continue
                else:
                    hess[i, j] = hess[j, i] = elem
                    result_queue.task_done()

        for i in range(nthreads):
            threading.Thread(target=worker).start()
        threading.Thread(target=consumer).start()

        for i in range(n):
            task_queue.put(i)

        task_queue.join()
        result_queue.join()
        stop_threads.set()

        return hess
Ejemplo n.º 16
0
this distribution for specifics.

NO WARRANTY IS EXPRESSED OR IMPLIED.  USE AT YOUR OWN RISK.
Brett G. Olivier
"""

"""
This module contains simplified methods derived from the Pysces model class
Brett G. Olivier June 2010
"""

import os, copy, time
import numpy
from pysces import PyscesStoich
from pysces import PyscesParse
mach_spec = numpy.MachAr()
pscParser = PyscesParse.PySCeSParser(debug=0)

class PyscesInputFileParser(object):
    """
    This class contains the PySCeS model loading and Stoichiometric Analysis methods
    """
    ModelDir = None
    ModelFile = None
    ModelOutput = None
    __settings__ = None
    N = None

    def __init__(self, model_file, directory, output_dir=None):
        self.ModelDir = directory
        self.ModelFile = model_file
Ejemplo n.º 17
0
"""
Created on 22. apr. 2015

@author: pab
"""
import unittest
# from functools import partial
from numdifftools.multicomplex import bicomplex
from numdifftools.example_functions import get_function
import numpy as np

EPS = np.MachAr().eps


def _default_base_step(x, scale, epsilon=None):
    h = (10 * EPS)**(1. / scale) * np.maximum(np.log1p(np.abs(x)), 0.1)
    return h


class BicomplexTester(unittest.TestCase):
    def test_init(self):
        z = bicomplex(1, 2)
        self.assertEqual(z.z1, 1)
        self.assertEqual(z.z2, 2)

    def test_neg(self):
        z = bicomplex(1, 2)
        z2 = -z
        self.assertEqual(z2.z1, -z.z1)
        self.assertEqual(z2.z2, -z.z2)
Ejemplo n.º 18
0
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <https://www.gnu.org/licenses/>.

import numpy as np
from functools import lru_cache
from scipy.stats import norm, uniform
sq2 = np.sqrt(2)

__all__ = ['standard', 'interactions']

marr = np.MachAr()


class InvalidPtable(Exception):
    pass


def _pbalance(A, B):
    '''Copy the lower triangular part of B to that of A, 0.5 on the
    diagonal, and A_ij = 1 - A_ji for i < j'''

    assert A.shape == B.shape
    I = np.indices(A.shape)
    ltri = I[0] > I[1]
    utri = I[1] > I[0]