def copula_bv_frank(u, v, theta): '''Cook, Johnson bivariate copula ''' if not theta > 0: raise ValueError('theta needs to be strictly positive') cdfv = -np.log(1 + expm1(-theta*u) * expm1(-theta*v) / expm1(-theta))/theta cdfv = np.minimum(cdfv, 1) #necessary for example if theta=100 return cdfv
def _genextreme_fitstart(self, data, fitstart): # pab d = np.asarray(data) # Probability weighted moments log = np.log n = len(d) d.sort() koeff1 = np.r_[0:n] / (n - 1) koeff2 = koeff1 * (np.r_[0:n] - 1) / (n - 2) b2 = np.dot(koeff2, d) / n b1 = np.dot(koeff1, d) / n b0 = d.mean() z = (2 * b1 - b0) / (3 * b2 - b0) - log(2) / log(3) shape = 7.8590 * z + 2.9554 * z**2 scale = (2 * b1 - b0) * shape / (np.exp(sc.gammaln(1 + shape)) * (1 - 2**(-shape))) loc = b0 + scale * (sc.expm1(sc.gammaln(1 + shape))) / shape return shape, loc, scale
def nexpm1(a, x): r""" Evaluates :math:`\frac{\exp(a x) - 1}{a}` safely. Parameters ---------- a : numpy.ndarray Scale factors. x : numpy.ndarray Input values. Returns ------- y : numpy.ndarray Elementwise evaluation of the desired function. """ fltr = a == 0 return np.where(fltr, x, special.expm1(a * x) / np.where(fltr, 1, a))
def newton_method(phi: np.ndarray, group: np.ndarray, num_of_points: int) -> np.ndarray: """ Newton method :param phi: Φ matrix :param group: group of each data point :param num_of_points: number of data points :return: weight vector omega """ info_log("== Newton's method ==") # Set up initial guess of omega omega = np.random.rand(3, 1) info_log(f'Initial omega:\n{omega}') # Set up D matrix for hessian matrix d = np.zeros((num_of_points * 2, num_of_points * 2)) # Get optimal weight vector omega count = 0 while True: count += 1 old_omega = omega.copy() # Fill in values in the diagonal of D matrix product = phi.dot(omega) diagonal = (expm1(-product) + 1) * np.power(expit(product), 2) np.fill_diagonal(d, diagonal) # Set up hessian matrix hessian = phi.T.dot(d.dot(phi)) # Update omega try: # Use Newton method omega += inv(hessian).dot(get_delta_j(phi, omega, group)) except: # Use gradient descent if hessian is singular or infinite omega += get_delta_j(phi, omega, group) if np.linalg.norm(omega - old_omega) < 0.0001 or count > 1000: break return omega
def ntail(l, u): # samples a column vector of length=length(l)=length(u) # from the standard multivariate normal distribution, # truncated over the region [l,u], where l>0 and # l and u are column vectors; # uses acceptance-rejection from Rayleigh distr. # similar to Marsaglia (1964); c = l**2 / 2 n = len(l) f = expm1(c - u**2 / 2) x = c - np.log(1 + np.random.uniform(size=n) * f) # sample using Rayleigh # keep list of rejected I = np.random.uniform(size=n)**2 * x > c while np.any(I): # while there are rejections cy = c[I] # find the thresholds of rejected y = cy - np.log(1 + np.random.uniform(size=len(cy)) * f[I]) idx = (np.random.uniform(size=len(cy))**2) * y < cy # accepted tmp = I.copy() I[tmp] = idx # make the list of elements in x to update x[I] = y[idx] # store the accepted I[tmp] = np.logical_not(idx) # remove accepted from list # while d>0: # while there are rejections # cy = c[I] # find the thresholds of rejected # y = cy - np.log(1+np.random.uniform(size=d)*f[I]) # idx = (np.random.uniform(size=d)**2)*y < cy # accepted # x[I[idx]] = y[idx] # store the accepted # I = I[~idx] # remove accepted from list # d = len(I) # number of rejected x = np.sqrt(2 * x) # this Rayleigh transform can be delayed till the end return x
def neg_loglikelihood(beta, Y, X): """ Summary ------- Loss function of the logistic regression. Parameters ---------- beta: 'numpy array' Parameters of the logistic regression. Y: 'numpy array' Response variable vector. X: 'numpy matrix' Matrix of covariates. Returns ------- Loss function. """ # sum without NAs return -np.nansum(Y*np.matmul(X,beta) - scisp.log1p(1+scisp.expm1(np.matmul(X,beta))))
def _cdf(self, x, c, q): return -special.expm1(-x**c + log(q))
def inverse(self, phi, theta): return -np.log1p(np.exp(-phi) * expm1(-theta)) / theta
def inverse(self, phi, theta): phi = np.asarray(phi) return -np.log1p(np.exp(-phi) * expm1(-theta)) / theta
def cexpm1(x, y): z = expm1(x + 1j*y) return z.real, z.imag
def evaluate(self, t, theta): return - (np.log(-expm1(-theta*t)) - np.log(-expm1(-theta)))
def _cdf(self, x): return -special.expm1(-0.5 * x**2)
def evaluate(self, t, theta): t = np.asarray(t) with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) val = -(np.log(-expm1(-theta*t)) - np.log(-expm1(-theta))) return val
# n個の乱数セット a = npmat.rand(n) print('a = ', a) # 立方根 sc_c = scspf.cbrt(a) # SciPy.special np_c = np.cbrt(a) # NumPy # 相対誤差の最大値と最小値 relerr_vec = relerr(sc_c, np_c) print('max(reldiff(sc_c, np_c)) = ', np.max(relerr_vec)) print('min(reldiff(sc_c, np_c)) = ', np.min(relerr_vec)) # expm1(a) = exp(a) - 1 sc_c = scspf.expm1(a) np_c = np.exp(a) - 1 print('SciPy expm1(a) = ', sc_c) print('NumPy exp(a)-1 = ', np_c) # 相対誤差の最大値と最小値 relerr_vec = relerr(sc_c, np_c) print('max(reldiff(sc_c, np_c)) = ', np.max(relerr_vec)) print('min(reldiff(sc_c, np_c)) = ', np.min(relerr_vec)) # ------------------------------------- # Copyright (c) 2021 Tomonori Kouya # All rights reserved. # -------------------------------------
def _pmf(self, k, mu): return -poisson.pmf(k, mu) / expm1(-mu)
def evaluate(self, t, theta): t = np.asarray(t) return -(np.log(-expm1(-theta * t)) - np.log(-expm1(-theta)))
def _stats(self, mu): mean = mu * exp(mu) / expm1(mu) var = mean * (1.0 - mu / expm1(mu)) g1 = None g2 = None return mean, var
def evaluate(self, t, theta): return -(np.log(-expm1(-theta * t)) - np.log(-expm1(-theta)))
def _cdf(self, r, c): rc = r + c return -sc.expm1(-(rc * rc - c * c) / 2.0) #pylint: disable=no-member
def _cdf(self, x, c): return -special.expm1(-x**c)