예제 #1
0
def test_errprint():
    flag = sc.errprint(True)
    try:
        assert_(isinstance(flag, bool))
        with warnings.catch_warnings(record=True) as w:
            sc.loggamma(0)
            assert_(w[-1].category is sc.SpecialFunctionWarning)
    finally:
        sc.errprint(flag)
예제 #2
0
def test_errprint():
    with suppress_warnings() as sup:
        sup.filter(DeprecationWarning, "`errprint` is deprecated!")
        flag = sc.errprint(True)

    try:
        assert_(isinstance(flag, bool))
        with pytest.warns(sc.SpecialFunctionWarning):
            sc.loggamma(0)
    finally:
        with suppress_warnings() as sup:
            sup.filter(DeprecationWarning, "`errprint` is deprecated!")
            sc.errprint(flag)
예제 #3
0
 def spectral_density(self, k):  # noqa: D102
     k = np.array(k, dtype=np.double)
     # for nu > 20 we just use an approximation of the gaussian model
     if self.nu > 20.0:
         return ((self.len_scale / np.sqrt(np.pi))**self.dim *
                 np.exp(-(k * self.len_scale)**2) *
                 (1 + (((k * self.len_scale)**2 - self.dim / 2.0)**2 -
                       self.dim / 2.0) / self.nu))
     return (self.len_scale / np.sqrt(np.pi))**self.dim * np.exp(
         -(self.nu + self.dim / 2.0) *
         np.log(1.0 + (k * self.len_scale)**2 / self.nu) +
         sps.loggamma(self.nu + self.dim / 2.0) - sps.loggamma(self.nu) -
         self.dim * np.log(np.sqrt(self.nu)))
def test_errprint():
    with suppress_warnings() as sup:
        sup.filter(DeprecationWarning, "`errprint` is deprecated!")
        flag = sc.errprint(True)

    try:
        assert_(isinstance(flag, bool))
        with pytest.warns(sc.SpecialFunctionWarning):
            sc.loggamma(0)
    finally:
        with suppress_warnings() as sup:
            sup.filter(DeprecationWarning, "`errprint` is deprecated!")
            sc.errprint(flag)
예제 #5
0
def poisson_gamma(data, sum_w, sum_w2, a=1, b=0):
    """
    Log-likelihood based on the poisson-gamma mixture. This is a Poisson likelihood using a Gamma prior.
    This implementation is based on the implementation of Austin Schneider ([email protected])
    -- Input variables --
    data = data histogram
    sum_w = MC histogram
    sum_w2 = Uncertainty map (sum of weights squared in each bin)
    a, b = hyperparameters of gamma prior for MC counts; default values of a = 1 and b = 0 corresponds to LEff (eq 3.16) https://doi.org/10.1007/JHEP06(2019)030
           a = 0 and b = 0 corresponds to LMean (Table 2) https://doi.org/10.1007/JHEP06(2019)030

    -- Output --
    llh = LLH values in each bin

    -- Notes --
    Shape of data, sum_w, sum_w2 and llh are identical
    """

    llh = np.ones(data.shape) * -np.inf  # Binwise LLH values

    bad_bins = np.logical_or(
        sum_w <= 0, sum_w2 < 0)  # Bins where the MC is 0 or less than 0

    # LLH would be 0 for the bad bins if the data is 0
    zero_llh = np.logical_and(data == 0, bad_bins)
    llh[zero_llh] = 0  # Zero LLH for these bins if data is also 0

    good_bins = ~bad_bins
    poisson_bins = np.logical_and(
        sum_w2 == 0, good_bins
    )  # In the limit that sum_w2 == 0, the llh converges to poisson

    llh[poisson_bins] = poissonLLH(
        data[poisson_bins],
        sum_w[poisson_bins])  # Poisson LLH since limiting case

    # Calculate hyperparameters for the gamma posterior for MC counts
    regular_bins = np.logical_and(
        good_bins, ~poisson_bins
    )  # Bins on which the poisson_gamma LLH would be evaluated
    alpha = sum_w[regular_bins]**2. / sum_w2[regular_bins] + a
    beta = sum_w[regular_bins] / sum_w2[regular_bins] + b

    k = data[regular_bins]
    # Poisson-gamma LLH
    L = alpha * np.log(beta) + special.loggamma(
        k + alpha).real - special.loggamma(k + 1.0).real - (
            k + alpha) * np.log1p(beta) - special.loggamma(alpha).real
    llh[regular_bins] = L

    return llh
예제 #6
0
def fnGASfilter(aData, aFactors, vOmega, mAlpha, mBeta, dNu, dSigma2):

    # data, which is used for the observation driven model
    dfY = aData.values

    # initialize size
    iT = aData.shape[0]
    iK = aData.shape[1]

    # Initialize time varying parameter
    aF = np.zeros(shape=(iT, 4))  # (408 x 4)
    aF[:, 3] = aFactors.iloc[:, 3]
    aF[0, :3] = aFactors.iloc[0, :3]  # first is equal to first estimated betas

    mSigma2 = np.identity(iK) * dSigma2

    # Initialize vector of loglikelihood contributions
    vLikelihoodValues = np.ones(iT)
    vScore = np.zeros(shape=(iT, 3))
    vX = NSM()

    mAlpha.resize(3, 3)
    mBeta.resize(3, 3)

    # filter variances and compute likelihood contributions by
    # a loop over the monthly observations
    for t in range(0, iT - 1):

        # Compute residuals first residuals
        vE = dfY[t, :] - vX @ aF[t, :3]  # shape = (360,1)

        # Compute likelihood contribution
        vLikelihoodValues[t] = (scs.loggamma((dNu + iK) / 2)) \
            - (scs.loggamma(dNu/2)) \
            - ((iK / 2) * np.log((dNu-2) * np.pi)) \
            - (0.5 * np.log(np.sum(np.diag(mSigma2)))) \
            - (0.5*(dNu + 360)*np.log(1 + np.power((dNu), -1) * (vE.T @ npl.inv(mSigma2) @ vE)))

        vNablat = ((dNu + 360) /
                   dNu) * (1 + dNu**(-1) * vE @ npl.inv(mSigma2) @ vE)**(
                       -1) * (vX.T @ npl.inv(mSigma2) @ vE)

        mSt = (dNu + 360) * (
            (dNu + 360 + 2)**(-1)) * (vX.T @ npl.inv(mSigma2) @ vX)

        vScore[t] = vNablat @ (mSt**(-1))

        # update the filter
        aF[t + 1, :3] = vOmega + (mBeta @ aF[t, :3]) + (mAlpha @ vScore[t])

    return (vLikelihoodValues[:-1], aF)
예제 #7
0
    def fit(self, X, delta=10, report_status=True, stop_at_conv=True, num_steps=100, num_burn_in_steps=20, alpha=1, gamma=1):
        """ Infers the parameters
        
        Arguments:
            X {np.array} -- Matrix of Data, where each row repesent one data point and each column the number of times
                            a catgory was observed in the data  
        
        Keyword Arguments:
            delta {int} -- If the loglikelihood does not change in a defined interval by delta the algorithm stops (default: {10})
            report_status {bool} -- print status at current iteration (default: {True})
            stop_at_conv {bool} -- stop if converged (default: {True})
            num_steps {int} -- maximal number of steps (default: {100})
            num_burn_in_steps {int} -- minimal number of steps (default: {20})
            alpha {int} -- Dirchichlet Parameter for pi, which can be seen as prior knowledge for the number of clusters (default: {1})
            gamma {int} -- Dirichlet Paramters for theta (default: {1})
        """
        I = len(X[0])
        # Initialization
        self.ll_list = []
        self.K_seen = 1
        self.Z = np.zeros(len(X))
        self.theta = np.zeros((self.K_seen, I))
        self.theta[0] = np.random.dirichlet(I*[gamma])

        # Useful private counts to have:
        self._logpx_n_unseen = np.zeros(len(X))
        for n in range(len(X)):
            first_gamma = np.sum(loggamma(gamma + X[n]))
            second_gamma = loggamma(np.sum([gamma]*I))
            third_gamma = loggamma(np.sum(gamma+X[n]))
            four_gamma = np.sum(loggamma([gamma]*I))
            self._logpx_n_unseen[n] = first_gamma + \
                second_gamma - third_gamma - four_gamma

        self._m_k = np.zeros(self.K_seen)
        for k in range(self.K_seen):
            self._m_k[k] = np.sum(self.Z == k)

        self._gamma_km = np.zeros((self.K_seen, I))
        for k in range(self.K_seen):
            for m in range(I):
                self._gamma_km[k, m] = gamma + np.sum(X[self.Z == k][:, m])

        # Start:
        i = 1
        while i <= num_burn_in_steps or (stop_at_conv and not self._isConverged(delta=delta)) and i <= num_burn_in_steps + num_steps:
            self._sample_Z(X, alpha, gamma)
            self._sample_theta()
            self._print_status(X, i, report_status=report_status)
            i += 1
예제 #8
0
    def score(self, X=None):
        """ Likelihood of the model

        Parameters
        ----------
        X : np.array[n_vocabulary, n_docs]
            Word-document data matrix.

        Returns
        -------
        loglikelihood : float
        """
        if X is None:
            nw = self.num_words
            nd = self.num_documents
            wtm = self.word_topic_matrix
            tdm = self.topic_document_matrix
        else:
            tdm = self._transform(X)

        loglikeli = self.num_topics * loggamma(self.beta_ * self.num_words)
        loglikeli -= self.num_topics * self.num_words * loggamma(self.beta_)
        loglikeli += self.num_documents * loggamma(
            self.alpha_ * self.num_topics)
        loglikeli -= self.num_documents * self.num_topics * loggamma(
            self.alpha_)

        loglikeli += loggamma(self.word_topic_matrix + self.beta_).sum()
        loglikeli -= loggamma(
            self.word_topic_matrix.sum(0) + self.beta_ * self.num_words).sum()
        loglikeli += loggamma(tdm + self.alpha_).sum()
        loglikeli -= loggamma(tdm.sum(0) + self.alpha_ * self.num_topics).sum()
        return loglikeli
def log_prob(mu_r, ks, with_prior=True, sumall=True):
    mu, r = mu_r
    if r <= 0 or mu <= 0:
        return -9e99
    loglhood = loggamma(r + ks) - loggamma(r) - loggamma(ks + 1) + r * np.log(
        r / (r + mu)) + ks * np.log(mu / (r + mu))
    if sumall:
        loglhood = np.sum(loglhood)
    if with_prior:
        logprior = -np.log(r)
    else:
        logprior = 0
    lprob = loglhood + logprior
    return lprob
예제 #10
0
def eval_integrand(a, b, c, d, z, s):
    """
    Evaluates the integrand of the Fox's \overbar{H} function in (z,s)
    :param a (list of tuples): first vector of length n. Each element is a tuple (\alpha_j, A_j, a_j)
    :param b (list of tuples): second vector of length p - n. Each element is a tuple (\alpha_j, A_j)
    :param c (list of tuples): third vector of length m. Each element is a tuple (\beta_j, B_j)
    :param d (list of tuples): fourth vector of length q - m. Each element is a tuple (\beta_j, B_j, b_j)
    :param z (float): value of the \overbar{H} function to evaluate
    :param s (complex): value where to evaluate the integrand
    :return (float): the value of the integrand in (z,s)
    """
    assert (all(len(x) == 3 for x in a))
    assert (all(len(x) == 2 for x in b))
    assert (all(len(x) == 3 for x in d))
    assert (all(len(x) == 2 for x in c))
    assert (z != 0)

    n = len(a)
    m = len(c)
    p = n + len(b)
    q = m + len(d)

    nom1_exp = 0.0
    for i in range(0, n):
        v = a[i]
        nom1_exp += v[2] * loggamma(1.0 - v[0] + v[1] * s)

    nom2_exp = 0.0
    for i in range(0, m):
        v = c[i]
        nom2_exp += loggamma(v[0] - v[1] * s)

    nom_exp = nom1_exp + nom2_exp

    den1_exp = 0.0
    for i in range(0, p - n):
        v = b[i]
        den1_exp += loggamma(v[0] - v[1] * s)

    den2_exp = 0.0
    for i in range(0, q - m):
        v = d[i]
        den2_exp += v[2] * loggamma(1.0 - v[0] + v[1] * s)

    den_exp = den1_exp + den2_exp

    M = exp(nom_exp - den_exp) * np.power(z + 0j, s)

    return M
예제 #11
0
def log_pc_gmm(K_max, N_max, D, *, R=1e+3, lmd_min=1e-3):
    """log PC of GMM.

    Calculate (log) parametric complexity of Gaussian mixture model.

    Args:
        K_max (int): max number of clusters.
        N_max (int): max number of data.
        D (int): dimension of data.
        R (float): upper bound of ||mean||^2.
        lmd_min (float): lower bound of the eigenvalues of the covariance matrix.

    Returns:
        np.ndarray: array of (log) parametric complexity.
            returns[K, N] = log C(K, N)
    """
    log_PC_array = np.zeros([K_max + 1, N_max + 1])
    r1_min = D + 1

    # N = 0
    log_PC_array[:, 0] = -np.inf

    # K = 0
    log_PC_array[0, :] = -np.inf

    # K = 1
    # N <= r1_min
    log_PC_array[1, :r1_min] = -np.inf
    # N > r1_min
    N_list = np.arange(r1_min, N_max + 1)
    log_PC_array[1, r1_min:] = _log_pc_gaussian(N_list,
                                                D=D,
                                                R=R,
                                                lmd_min=lmd_min)

    # K > 1
    for k in range(2, K_max + 1):
        for n in range(1, N_max + 1):
            r1 = np.arange(n + 1)
            r2 = n - r1
            log_PC_array[k, n] = logsumexp(
                sum([
                    loggamma(n + 1), (-1) * loggamma(r1 + 1),
                    (-1) * loggamma(r2 + 1), r1 * np.log(r1 / n + 1e-100),
                    r2 * np.log(r2 / n + 1e-100), log_PC_array[1, r1],
                    log_PC_array[k - 1, r2]
                ]))

    return log_PC_array
예제 #12
0
def func_call(s, *p):
    s = s + 0j
    base = s * np.log(p[0]**2) + np.log(p[1]**2)
    #constant = loggamma(p[2]) - loggamma(p[3])
    off = N_base + N_constant
    plus = np.sum([
        loggamma(p[off + 2 * k] + p[off + 2 * k + 1] * s)
        for k in range(N_plus)
    ])
    off = N_base + N_constant + 2 * N_plus
    minus = np.sum([
        loggamma(p[off + 2 * k] + p[off + 2 * k + 1] * s)
        for k in range(N_minus)
    ])
    return np.exp(base + plus - minus)
예제 #13
0
def FromGamma(a, b):

    if a <= 1:
        raise Exception(
            'ERROR in Coversion of Gamma distribution: a must be greater than 1'
        )
    else:

        mu = np.sqrt(b) * np.exp(spec.loggamma(a - 0.5) - spec.loggamma(a))

        sig = np.sqrt(b / (a - 1) -
                      b * np.exp(2 *
                                 (spec.loggamma(a - 0.5) - spec.loggamma(a))))

        return mu, sig
예제 #14
0
def test_loggamma():
    if NumCpp.NO_USE_BOOST:
        return

    value = np.random.rand(1).item()
    assert (roundScaler(NumCpp.log_gamma_Scaler(value), NUM_DECIMALS_ROUND) ==
            roundScaler(sp.loggamma(value), NUM_DECIMALS_ROUND))

    shapeInput = np.random.randint(20, 100, [2, ])
    shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
    cArray = NumCpp.NdArray(shape)
    data = np.random.rand(shape.rows, shape.cols)
    cArray.setArray(data)
    assert np.array_equal(roundArray(NumCpp.log_gamma_Array(cArray), NUM_DECIMALS_ROUND),
                          roundArray(sp.loggamma(data), NUM_DECIMALS_ROUND))
예제 #15
0
def Zpdf_upper(x, n, m, g, alpha, xi):
    #This function returns the density of Z_{g + m} at the supplied points

    F = sumrvs_cdf_upper(x, alpha, xi)
    f = sumrvs_pdf_upper(x, alpha, xi)

    densities = np.exp(
        np.log(m) + loggamma(n - g + 1) - loggamma(m + 1) -
        loggamma(n - g - m + 1)) * F**(m - 1) * (1 - F)**(n - g - m) * f

    #O(1) computation but may run into numerical precision issues
    #densities = np.exp(np.log(m) + loggamma(n - g + 1) - loggamma(m + 1) - loggamma(n - g - m + 1)
    #                  + (m - 1)*np.log(F) + (n - g - m)*np.log(1 - F) + np.log(f))

    return densities
예제 #16
0
def func(sr, si, *p):
    s = sr + 1j * si
    base = s * np.log(p[0]**2) + np.log(p[1]**2) + loggamma(p[2] * s)
    #constant = loggamma(p[2]) - loggamma(p[3])
    off = N_base + N_constant
    plus = np.sum([
        loggamma(p[off + 2 * k] + p[off + 2 * k + 1] * s)
        for k in range(N_plus)
    ])
    off = N_base + N_constant + 2 * N_plus
    minus = np.sum([
        loggamma(p[off + 2 * k] + p[off + 2 * k + 1] * s)
        for k in range(N_minus)
    ])
    return base + plus - minus
예제 #17
0
    def local_score(self, node, parents):
        """
        Method to compute the score associated to having
        a certain set of parents associated to a node.

        :param node: int representing the node in question.
        :param parents: np.array of ints representing the parent nodes.
        """
        # number of categories of this node
        c = self.num_categories[node]

        # number of categories of the parent nodes
        dims = self.num_categories[parents]

        # number of parent states (i.e. multiplying the number of
        # categories of each variable together)
        r = np.prod(dims)

        # conditional cell coeffs for node given parents (node)
        n_jk = np.zeros((r, c))
        n_j = np.zeros(r)
        my_parents = self.dataset[:, parents]
        my_child = self.dataset[:, node]

        # populate the conditional cell coeffs
        for i in range(self.sampleSize):
            parent_values = my_parents[i]
            child_value = my_child[i]
            row_index = self.get_row_index(dims, parent_values)

            n_jk[row_index][child_value] += 1
            n_j[row_index] += 1

        # finally compute the score
        score = self.get_prior_for_structure(len(parents))

        cell_prior = self.sample_prior / (c * r)
        row_prior = self.sample_prior / r

        for j in range(r):
            score -= special.loggamma(row_prior + n_j[j])
            for k in range(c):
                score += special.loggamma(cell_prior + n_jk[j][k])

        score += r * special.loggamma(row_prior)
        score -= c * r * special.loggamma(cell_prior)

        return score
예제 #18
0
 def _vmf_kld(k, d):
     return np.array([
         (k * ((sp.iv(d / 2.0 + 1.0, k) + sp.iv(d / 2.0, k) * d /
                (2.0 * k)) / sp.iv(d / 2.0, k) - d / (2.0 * k)) +
          d * np.log(k) / 2.0 - np.log(sp.iv(d / 2.0, k)) -
          sp.loggamma(d / 2 + 1) - d * np.log(2) / 2).real
     ])
def likelihood(theta):
    """ Simple Gaussian_shell Likelihood"""
    sigma = 0.1
    radius = 2
    A = -1
    logL ,logL_temp, r0, sigma0, logf0  = 0, 0, 0, 0, 0
    mu = zeros(len(theta))
    logsqrttwopi = log(sqrt(2*pi))
    logpi = log(pi)
    ndims = len(theta)
    i = 0

    if (A==-1):
        r0      = (radius + sqrt(radius**2 + 4*(ndims-1)*sigma**2))/2
        logf0   = -(radius - r0)**2/2/sigma**2 + (ndims-1)*log(r0)
        logf0  += log(ndims+0) + ndims/2*logpi -loggamma(1+ndims/2)
        sigma0  = sigma*sqrt((1+radius/sqrt(radius**2 + 4*(ndims-1)*sigma**2))/2)
        A       = logf0 + logsqrttwopi + log(sigma0)
        

    #Gaussian normalisation
    logL = -A - ( (sqrt( sum( (mu-theta)**2 ) ) -radius)**2 /(2*sigma*sigma) )

    r2 = sum(theta**2)
    #for i in range(1, len(phi)-1):
        #phi(i) = acos(theta(i-1)/sqrt(sum(theta(i-1:)**2)))

    return logL, [r2] # float, array-like
예제 #20
0
def func(m, n, l, lamb, Delay):
    forCoeffi = -lamb * Delay
    ret_sum = 0
    for i in range(m - n - l + 1):
        tmp_log = i * np.log(lamb * Delay) - spys.loggamma(i + 1)
        ret_sum += np.power(np.e, forCoeffi + tmp_log)
    return ret_sum
예제 #21
0
def loglikelihood(data, x):
    theta = x[:-1]
    alph = x[-1]
    alp_1 = 1 / alph
    dif = Mod(theta, obs_time)
    mu = np.diff(dif)
    a_mu = alph * mu
    #v1 = np.sum(loggamma(data + alp_1)  -  loggamma(alp_1))
    #v1 = log_gam(data, alp_1)
    #v2 = np.sum((alp_1 + data)*np.log(1 + a_mu))
    #v3 = np.sum(np.log(a_mu)*data)
    v1 = loggamma(data + alp_1) - loggamma(alp_1)
    v2 = (alp_1 + data) * np.log(1 + a_mu)
    v3 = np.log(a_mu) * data

    return np.sum(v1 - v2 + v3)
예제 #22
0
def KL_guu(k, d):
    kld = k * ((sp.iv(d / 2.0 + 1.0, k) \
                + sp.iv(d / 2.0, k) * d / (2.0 * k)) / sp.iv(d / 2.0, k) - d / (2.0 * k)) \
          + d * np.log(k) / 2.0 - np.log(sp.iv(d / 2.0, k)) \
          - sp.loggamma(d / 2 + 1) - d * np.log(2) / 2

    return kld
def logLikelihoodPart(hyperParams):
    #    mu = hyperParams[:,0]
    nu = hyperParams[:, 1]
    alpha = hyperParams[:, 2]
    beta = hyperParams[:, 3]
    res = -loggamma(alpha) + alpha * np.log(beta) + 0.5 * np.log(nu)
    return res
예제 #24
0
    def compute_KLD(self, tup, batch_sz):
        kappa = tup['kappa']
        d = self.lat_dim

        rt_bag = []
        # const = torch.log(torch.tensor(3.1415926)) * d / 2 + torch.log(torch.tensor(2.0)) \
        #         - torch.tensor(sp.loggamma(d / 2).real) - (d / 2) * torch.log(torch.tensor(2 * 3.1415926))

        const = torch.tensor(
            np.log(np.pi) * d / 2 + np.log(2) - sp.loggamma(d / 2).real -
            (d / 2) * np.log(2 * np.pi)).to(device)
        d = torch.tensor([d], dtype=torch.float).to(device)
        batchsz = kappa.size()[0]

        rt_tensor = torch.zeros(batchsz)
        for k_idx in range(batchsz):
            k = kappa[k_idx]
            # print(k)
            # print(k)
            # print(d)
            first = k * bessel_iv(d / 2, k) / bessel_iv(d / 2 - 1, k)
            second = (d / 2 - 1) * torch.log(k) - torch.log(
                bessel_iv(d / 2 - 1, k))
            combin = first + second + const
            rt_tensor[k_idx] = combin
            # rt_bag.append(combin)
        return rt_tensor.to(device)
예제 #25
0
 def _vmf_kld(k, d):
     tmp = (k * ((sp.iv(d / 2.0 + 1.0, k) + sp.iv(d / 2.0, k) * d / (2.0 * k)) / sp.iv(d / 2.0, k) - d / (2.0 * k)) \
            + d * np.log(k) / 2.0 - np.log(sp.iv(d / 2.0, k)) \
            - sp.loggamma(d / 2 + 1) - d * np.log(2) / 2).real
     if tmp != tmp:
         exit()
     return np.array([tmp])
예제 #26
0
def fVWU3U4U5(v, w, u3, u4, u5, alpha1, alpha2, alpha3, alpha4, alpha5):
    # This function implements the density just before eq. (2)
    
    if (u4/w - u5 < u3 < (u4 + u5)*v and u4 > 0 and u5 > 0 and v > 0 and w > 0):
        
        f1 = (u3 + u5)*(u4 + u5)/np.exp(loggamma(alpha1) + loggamma(alpha2) + loggamma(alpha3) + loggamma(alpha4) + loggamma(alpha5))
        f2 = (v*(u4 + u5) - u3)**(alpha1 - 1)
        f3 = (w*(u3 + u5) - u4)**(alpha2 - 1)
        f4 = u3**(alpha3 - 1)*u4**(alpha4 - 1)*u5**(alpha5 - 1)
        f5 = np.exp(-(u3*w + u4*v + u5*(v + w + 1)))
        
        return f1*f2*f3*f4*f5
       
    else:
        
        return 0
예제 #27
0
    def get_initialspectra(self,t,E):
        """Get neutrino spectra/luminosity curves after oscillation.

        Parameters
        ----------
        t : float
            Time to evaluate initial and oscillated spectra.
        E : float or ndarray
            Energies to evaluate the initial and oscillated spectra.

        Returns
        -------
        initialspectra : dict
            Dictionary of model spectra, keyed by neutrino flavor."""
    
        initialspectra = {}
        for flavor in Flavor:
            L = self.luminosity[flavor](t)
            Ea = self.meanE[flavor](t)          # <E_nu(t)>
            Ea = Ea*1e6 * 1.60218e-12
            a = self.pinch[flavor](t)           # alpha_nu(t)
            E[E==0] = np.finfo(float).eps       # Avoid division by zero.

            # For numerical stability, evaluate log PDF then exponentiate.
            initialspectra[flavor] = \
                np.exp(np.log(L) - (2+a)*np.log(Ea) + (1+a)*np.log(1+a) 
                       - loggamma(1+a) + a*np.log(E) - (1+a)*(E/Ea)) / (u.erg * u.s)

        return initialspectra
예제 #28
0
    def correlation(self, r):
        r"""Matérn correlation function.

        .. math::
           \mathrm{cor}(r) =
           \frac{2^{1-\nu}}{\Gamma\left(\nu\right)} \cdot
           \left(\sqrt{\nu}\cdot\frac{r}{\ell}\right)^{\nu} \cdot
           \mathrm{K}_{\nu}\left(\sqrt{\nu}\cdot\frac{r}{\ell}\right)
        """
        r = np.array(np.abs(r), dtype=np.double)
        # for nu > 20 we just use the gaussian model
        if self.nu > 20.0:
            return np.exp(-(r / self.len_scale)**2 / 4)
        # calculate by log-transformation to prevent numerical errors
        r_gz = r[r > 0.0]
        res = np.ones_like(r)
        # with np.errstate(over="ignore", invalid="ignore"):
        res[r > 0.0] = np.exp(
            (1.0 - self.nu) * np.log(2) - sps.loggamma(self.nu) + self.nu *
            np.log(np.sqrt(self.nu) * r_gz / self.len_scale)) * sps.kv(
                self.nu,
                np.sqrt(self.nu) * r_gz / self.len_scale)
        # if nu >> 1 we get errors for the farfield, there 0 is approached
        res[np.logical_not(np.isfinite(res))] = 0.0
        # covariance is positiv
        res = np.maximum(res, 0.0)
        return res
예제 #29
0
파일: source.py 프로젝트: nuberoi/ASTERIA
    def __init__(self,
                 name,
                 model,
                 progenitor_mass,
                 progenitor_distance,
                 time={},
                 luminosity={},
                 mean_energy={},
                 pinch={}):

        self.name = name
        self.model = model
        self.progenitor_mass = progenitor_mass
        self.progenitor_distance = progenitor_distance
        self.time = time
        self.luminosity = luminosity
        self.mean_energy = mean_energy
        self.pinch = pinch

        # Energy PDF function is assumed to be like a gamma function,
        # parameterized by mean energy and pinch parameter alpha. True for
        # nearly all CCSN models.
        self.energy_pdf = lambda a, Ea, E: \
            np.exp((1 + a) * np.log(1 + a) - loggamma(1 + a) + a * np.log(E) - \
                   (1 + a) * np.log(Ea) - (1 + a) * (E / Ea))

        self.v_energy_pdf = np.vectorize(self.energy_pdf,
                                         excluded=['E'],
                                         signature='(1,n),(1,n)->(m,n)')

        # Energy CDF, useful for random energy sampling.
        self.energy_cdf = lambda a, Ea, E: gdtr(1., a + 1., (a + 1.) *
                                                (E / Ea))
예제 #30
0
def fingerprint(p):
    ret = np.log(p[0]**2)  ## A constant factor
    ret += s_values * np.log(
        p[1]**
        2)  ## C^s for some C, together with previous cover most prefactors
    ret += loggamma(p[2] + p[3] * s_values)  ## A flexible gamma
    ret += loggamma(p[4] + p[5] * s_values)  ## A flexible gamma
    hyp = [
        complex(
            hyper([p[6] * s + p[7], p[8] + p[9] * s], [p[10] + p[11] * s],
                  p[12])) for s in s_values
    ]  ## slow generalised_hypergeom
    ret += np.log(hyp)
    # s_values**2 * np.log(p[6]**2) #+ s**3 * np.log(p[3]**2) + s**4 * np.log(p[4]**2)  ## Strange series temrs
    #ret += np.log(1 + p[5]*s_values + p[6]*s_values**2 + p[7]*s_values**3 + p[8]*s_values**4) ## Log of polynomial
    return ret
예제 #31
0
파일: liq.py 프로젝트: AyyerLab/diffuser
    def liqlatt(self, sigma_A, gamma_A):
        '''Apply liquidization transform to reciprocal lattice'''
        if self.abc[0] == 0:
            msg = 'Provide rlatt_vox to apply liqlatt (recip. lattice voxel dimensions)'
            raise AttributeError(msg)

        s_sq = (2 * cp.pi * sigma_A * self.dgen.qrad)**2

        n_max = 0
        if self.slimits.max() > 2 * np.pi * sigma_A / self.res_max:
            n_max = np.where(
                self.slimits > 2. * np.pi * sigma_A / self.res_max)[0][0] + 1

        if n_max == 0:
            #bzone = cp.zeros(self.abc)
            #bzone[self.abc[0]//2, self.abc[1]//2, self.abc[2]//2] = 1
            #return cp.tile(bzone, ncells)
            print('Returning ones array')
            return cp.ones_like(s_sq)

        liq = cp.zeros_like(s_sq)
        for n in range(1, n_max):
            weight = cp.exp(-s_sq + n * cp.log(s_sq) -
                            float(special.loggamma(n + 1)))
            factor = self.corrdisp_factor(gamma_A / n)
            liq += weight * factor
            sys.stderr.write('\rLiquidizing: %d/%d' % (n, n_max - 1))
        sys.stderr.write('\n')

        return liq
예제 #32
0
def log_density_log_alpha_j(log_alpha_j, Y_j, Sigma_cho, Sigma_inv, mu):
    alpha_j = np.exp(log_alpha_j)
    n = Y_j.shape[0]
    lp = (+((alpha_j - 1) * np.log(Y_j).sum(axis=0)).sum() -
          (n * loggamma(alpha_j)).sum() + log_density_mvnormal(
              log_alpha_j, mu, np.triu(Sigma_cho[0]), Sigma_inv))
    return lp
예제 #33
0
def test_identities2():
    # test the identity loggamma(z + 1) = log(z) + loggamma(z)
    x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5])
    y = x.copy()
    x, y = np.meshgrid(x, y)
    z = (x + 1J*y).flatten()
    dataset = np.vstack((z, np.log(z) + loggamma(z))).T

    def f(z):
        return loggamma(z + 1)

    FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
예제 #34
0
def test_gh_6536():
    z = loggamma(complex(-3.4, +0.0))
    zbar = loggamma(complex(-3.4, -0.0))
    assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0)
예제 #35
0
파일: special.py 프로젝트: BranYang/scipy
 def time_loggamma_asymptotic(self):
     loggamma(self.large_z)
예제 #36
0
def test_errstate_pyx_basic():
    olderr = sc.geterr()
    with sc.errstate(singular='raise'):
        with assert_raises(sc.SpecialFunctionError):
            sc.loggamma(0)
    assert_equal(olderr, sc.geterr())
예제 #37
0
 def f(z):
     return loggamma(z).real
예제 #38
0
 def f(z):
     return loggamma(z + 1)
예제 #39
0
 def f(z):
     return np.exp(loggamma(z))