Exemple #1
0
def sarkar_embedding_3D(tree, root, **kwargs):
    eps = kwargs.get("eps",0.1)
    weighted = kwargs.get("weighted", True)
    tau = kwargs.get("tau")
    max_deg = max(tree.degree)[1]

    if tau is None:
        tau = (1+eps)/eps * mpm.log(2*max_deg/ mpm.pi)
    prc = kwargs.get("precision")
    if prc is None:
        prc = _embedding_precision(tree,root,eps)
    mpm.mp.dps = prc
    
    n = tree.order()
    emb = mpm.zeros(n,3)
    place = []

    # place the children of root
    fib = fib_2D_code(tree.degree[root])
    for i, v in enumerate(tree[root]):
        r = mpm.tanh(tau*tree[root][v].get("weight",1.))
        v_emb = r * mpm.matrix([[fib[i,0],fib[i,1],fib[i,2]]])
        emb[v,:]=v_emb
        place.append((root,v))
    
    while place:
        u, v = place.pop() # u is the parent of v
        u_emb, v_emb = emb[u,:], emb[v,:]

        # reflect and rotate so that embedding(v) is at (0,0,0) 
        # and embedding(u) is in the direction of (0,0,1)
        u_emb = poincare_reflect0(v_emb, u_emb, precision=prc)
        R = rotate_3D_mp(mpm.matrix([[0.,0.,1.]]), u_emb)
        #u_emb = (R.T * u_emb).T

        # place children of v 
        fib = fib_2D_code(tree.degree[v])
        i=0
        for w in tree[v]:
            if w == u: # i=0 is for u (parent of v)
                continue            
            i+=1
            r = mpm.tanh(tau*tree[w][v].get("weight",1.))
            w_emb = r * mpm.matrix([[fib[i,0],fib[i,1],fib[i,2]]])

            #undo reflection and rotation
            w_emb = (R * w_emb.T).T
            w_emb = poincare_reflect0(v_emb, w_emb, precision=prc)
            emb[w,:] = w_emb
            place.append((v,w))
    return emb
Exemple #2
0
def tanh_warp_arb(X, l1, l2, lw, x0):
    r"""Warps the `X` coordinate with the tanh model

    .. math::

        l = \frac{l_1 + l_2}{2} - \frac{l_1 - l_2}{2}\tanh\frac{x-x_0}{l_w}

    Parameters
    ----------
    X : :py:class:`Array`, (`M`,) or scalar float
        `M` locations to evaluate length scale at.
    l1 : positive float
        Small-`X` saturation value of the length scale.
    l2 : positive float
        Large-`X` saturation value of the length scale.
    lw : positive float
        Length scale of the transition between the two length scales.
    x0 : float
        Location of the center of the transition between the two length scales.

    Returns
    -------
    l : :py:class:`Array`, (`M`,) or scalar float
        The value of the length scale at the specified point.
    """
    if isinstance(X, scipy.ndarray):
        if isinstance(X, scipy.matrix):
            X = scipy.asarray(X, dtype=float)
        return 0.5 * ((l1 + l2) - (l1 - l2) * scipy.tanh((X - x0) / lw))
    else:
        return 0.5 * ((l1 + l2) - (l1 - l2) * mpmath.tanh((X - x0) / lw))
Exemple #3
0
def tanh_warp_arb(X, l1, l2, lw, x0):
    r"""Warps the `X` coordinate with the tanh model
    
    .. math::
    
        l = \frac{l_1 + l_2}{2} - \frac{l_1 - l_2}{2}\tanh\frac{x-x_0}{l_w}
    
    Parameters
    ----------
    X : :py:class:`Array`, (`M`,) or scalar float
        `M` locations to evaluate length scale at.
    l1 : positive float
        Small-`X` saturation value of the length scale.
    l2 : positive float
        Large-`X` saturation value of the length scale.
    lw : positive float
        Length scale of the transition between the two length scales.
    x0 : float
        Location of the center of the transition between the two length scales.
    
    Returns
    -------
    l : :py:class:`Array`, (`M`,) or scalar float
        The value of the length scale at the specified point.
    """
    if isinstance(X, scipy.ndarray):
        if isinstance(X, scipy.matrix):
            X = scipy.asarray(X, dtype=float)
        return 0.5 * ((l1 + l2) - (l1 - l2) * scipy.tanh((X - x0) / lw))
    else:
        return 0.5 * ((l1 + l2) - (l1 - l2) * mpmath.tanh((X - x0) / lw))
Exemple #4
0
def logistic_gaussian(m, v):
    if m == oo:
        if v == oo:
            return oo
        return Float('1.0')
    if v == oo:
        return Float('0.5')
    mpmath.mp.dps = 500
    mmpf = m._to_mpmath(500)
    vmpf = v._to_mpmath(500)
    # The integration routine below is obtained by substituting x = atanh(t)
    # into the definition of logistic_gaussian
    #
    # f = lambda x: mpmath.exp(-(x - mmpf) * (x - mmpf) / (2 * vmpf)) / (1 + mpmath.exp(-x))
    # result = 1 / mpmath.sqrt(2 * mpmath.pi * vmpf) * mpmath.quad(f, [-mpmath.inf, mpmath.inf])
    #
    # Such substitution makes mpmath.quad call much faster.
    tanhm = mpmath.tanh(mmpf)
    # Not really a precise threshold, but fine for our data
    if tanhm == mpmath.mpf('1.0'):
        return Float('1.0')
    f = lambda t: mpmath.exp(-(mpmath.atanh(t) - mmpf) ** 2 / (2 * vmpf)) / ((1 - t) * (1 + t + mpmath.sqrt(1 - t * t)))
    coef = 1 / mpmath.sqrt(2 * mpmath.pi * vmpf)
    int, err = mpmath.quad(f, [-1, 1], error=True)
    result = coef * int
    if mpmath.mpf('1e50') * abs(err) > abs(int):
        print(f"Suspiciously big error when evaluating an integral for logistic_gaussian({m}, {v}).")
        print(f"Integral: {int}")
        print(f"integral error estimate: {err}")
        print(f"Coefficient: {coef}")
        print(f"Result (Coefficient * Integral): {result}")
    return Float(result)
Exemple #5
0
def sf(x, loc=0, scale=1):
    """
    Survival function of the logistic distribution.
    """
    with mpmath.extradps(5):
        x = mpmath.mpf(x)
        loc = mpmath.mpf(loc)
        scale = mpmath.mpf(scale)
        z = (x - loc) / scale
        p = (1 - mpmath.tanh(z / 2)) / 2
    return p
Exemple #6
0
def cdf(x, loc=0, scale=1):
    """
    CDF of the logistic distribution.
    """
    with mpmath.extradps(5):
        x = mpmath.mpf(x)
        loc = mpmath.mpf(loc)
        scale = mpmath.mpf(scale)
        z = (x - loc) / scale
        p = (1 + mpmath.tanh(z / 2)) / 2
    return p
Exemple #7
0
def v(x, t, s, b):
    # c is atrificial parameter for findroot.
    c = 10**(-10)
    if x > b:
        return j * mp.findroot(lambda y: re(w(j * y, s, b) - (x - t)),
                               [-s / 2 + c, 0 - c], "bisect")
    elif x < -b:
        return j * mp.findroot(lambda y: re(w(j * y, s, b) - (x - t)),
                               [0 + c, s / 2 - c], "bisect")
    else:
        tolerant = 10**(-20)
        normparam = 10**11
        return 1 / 2 + j * mp.findroot(
            lambda y: tanh(re(w(1 / 2 + j * y, s, b) - (x - t)) / normparam),
            0,
            tol=tolerant)
def sigmoid(x):
    return mpmath.tanh(x)
Exemple #9
0
def pearsonr_ci(r, n, alpha, alternative='two-sided'):
    """
    Confidence interval of Pearson's correlation coefficient.

    This function uses Fisher's transformation to compute the confidence
    interval of Pearson's correlation coefficient.

    Examples
    --------
    Imports:

    >>> import mpmath
    >>> mpmath.mp.dps = 20
    >>> from mpsci.stats import pearsonr, pearsonr_ci

    Sample data:

    >>> a = [2, 4, 5, 7, 10, 11, 12, 15, 16, 20]
    >>> b = [2.53, 2.41, 3.60, 2.69, 3.19, 4.05, 3.71, 4.65, 4.33, 4.70]

    Compute the correlation coefficient:

    >>> r, p = pearsonr(a, b)
    >>> r
    mpf('0.893060379514729854846')
    >>> p
    mpf('0.00050197523992669206603645')

    Compute the 95% confidence interval for r:

    >>> rlo, rhi = pearsonr_ci(r, n=len(a), alpha=0.05)
    >>> rlo
    mpf('0.60185206817708369265664')
    >>> rhi
    mpf('0.97464778383702233502275')

    """
    if alternative not in ['two-sided', 'less', 'greater']:
        raise ValueError("alternative must be 'two-sided', 'less', or "
                         "'greater'.")

    with mpmath.mp.extradps(5):
        zr = mpmath.atanh(r)
        n = mpmath.mp.mpf(n)
        alpha = mpmath.mp.mpf(alpha)
        s = mpmath.sqrt(1 / (n - 3))
        if alternative == 'two-sided':
            h = normal.invcdf(1 - alpha / 2)
            zlo = zr - h * s
            zhi = zr + h * s
            rlo = mpmath.tanh(zlo)
            rhi = mpmath.tanh(zhi)
        elif alternative == 'less':
            h = normal.invcdf(1 - alpha)
            zhi = zr + h * s
            rhi = mpmath.tanh(zhi)
            rlo = -mpmath.mp.one
        else:
            # alternative == 'greater'
            h = normal.invcdf(1 - alpha)
            zlo = zr - h * s
            rlo = mpmath.tanh(zlo)
            rhi = mpmath.mp.one
        return rlo, rhi
Exemple #10
0
 def DDs(self, x):
     return -2 * pow(mpmath.sech(x), 2) * mpmath.tanh(x)
Exemple #11
0
 def s(self, x):
     return mpmath.tanh(x)
Exemple #12
0
 def disp_rel_mp(self, W, K, M_A):
     return self.m0_mp(W, M_A)**2 * W**4 + 1/self.R1 * 1/self.R2 * self.m1_mp(W) * self.m2_mp(W) * (1 - (W - M_A)**2)**2 - \
         0.5 * W**2 * self.m0_mp(W, M_A) * (1 - (W - M_A)**2) * (1/self.R1 * self.m1_mp(W) + 1/self.R2 * self.m2_mp(W)) * \
         (mp.tanh(self.m0_mp(W, M_A) * K) + mp.tanh(self.m0_mp(W, M_A) * K)**(-1))
Exemple #13
0
def sarkar_embedding(tree, root, **kwargs):
    ''' 
    Embed a tree in the Poincare disc using Sarkar's algorithm 
    from "Low Distortion Delaunay Embedding of Trees in Hyperbolic Plane.
        Args:
            tree (networkx.Graph) : The tree represented with int node labels.
                  Weighted trees should have the edge attribute "weight"
            root (int): The node to use as the root of the embedding 
        Keyword Args:
            weighted (bool): True if the tree is weighted (default True)
            tau (float): the scaling factor for distances. 
                        By default it is calculated based on statistics of the tree.
            epsilon (float): parameter >0 controlling distortion bound (default 0.1).
            precision (int): number of bits of precision to use.
                            By default it is calculated based on tau and epsilon.
        Returns:
            size N x 2 mpmath.matrix containing the coordinates of embedded nodes
    '''
    eps = kwargs.get("epsilon",0.1)
    weighted = kwargs.get("weighted", True)
    tau = kwargs.get("tau")
    max_deg = max(tree.degree)[1]

    if tau is None:
        tau = (1+eps)/eps * mpm.log(2*max_deg/ mpm.pi)
    prc = kwargs.get("precision")
    if prc is None:
        prc = _embedding_precision(tree,root,eps)
    mpm.mp.dps = prc
    
    n = tree.order()
    emb = mpm.zeros(n,2)
    place = []

    # place the children of root
    for i, v in enumerate(tree[root]):
        if weighted: 
            r = mpm.tanh( tau*tree[root][v]["weight"])
        else:
            r = mpm.tanh(tau)
        theta = 2*i*mpm.pi / tree.degree[root]
        emb[v,0] = r*mpm.cos(theta)
        emb[v,1] = r*mpm.sin(theta)
        place.append((root,v))
    
    # TODO parallelize this
    while place:
        u, v = place.pop() # u is the parent of v
        p, x = emb[u,:], emb[v,:]
        rp = poincare_reflect0(x, p, precision=prc)
        arg = mpm.acos(rp[0]/mpm.norm(rp))
        if rp[1] < 0:
            arg = 2*mpm.pi - arg
            
        theta = 2*mpm.pi / tree.degree[v]
        i=0
        for w in tree[v]:
            if w == u: continue
            i+=1
            if weighted:
                r = mpm.tanh(tau*tree[v][w]["weight"])
            else:
                r = mpm.tanh(tau)
            w_emb = r * mpm.matrix([mpm.cos(arg+theta*i),mpm.sin(arg+theta*i)]).T
            w_emb = poincare_reflect0(x, w_emb, precision=prc)
            emb[w,:] = w_emb
            place.append((v,w))
    return emb
Exemple #14
0
 def eval(self, z):
     return mpmath.tanh(z)
Exemple #15
0
 def eval(self, z):
     return mpmath.tanh(z)
Exemple #16
0
def Activation(x, xm, eta):
    return 0.5 * (mpmath.tanh(eta * (x - xm)) + 1.0)