def to_unconstrained_arr(p): """ Numerically stable transform from positive reals to real line Implements ag_np.log(ag_np.exp(x) - 1.0) Autograd friendly and fully vectorized Args ---- p : array of values in (0, +\infty) Returns ------- ans : array of values in (-\infty, +\infty), same size as p """ ## Handle numpy array case if not isinstance(p, float): mask1 = p > 10.0 mask0 = ag_np.logical_not(mask1) out = ag_np.zeros_like(p) out[mask0] = ag_np.log(ag_np.expm1(p[mask0])) out[mask1] = p[mask1] + ag_np.log1p(-ag_np.exp(-p[mask1])) return out ## Handle scalar float case else: if p > 10: return p + ag_np.log1p(-ag_np.exp(-p)) else: return ag_np.log(ag_np.expm1(p))
def _log_hazard(self, params, T, *Xs): lambda_params = params[self._LOOKUP_SLICE["lambda_"]] log_lambda_ = np.dot(Xs[0], lambda_params) rho_params = params[self._LOOKUP_SLICE["rho_"]] log_rho_ = np.dot(Xs[1], rho_params) return log_rho_ - log_lambda_ + np.expm1(log_rho_) * (np.log(T) - log_lambda_)
def _log_hazard(self, params, T, Xs): lambda_params = params["lambda_"] log_lambda_ = Xs["lambda_"] @ lambda_params rho_params = params["rho_"] log_rho_ = Xs["rho_"] @ rho_params return log_rho_ - log_lambda_ + np.expm1(log_rho_) * (np.log(T) - log_lambda_)
def _log_hazard(self, params, T, Xs): lambda_params = params["lambda_"] log_lambda_ = np.dot(Xs["lambda_"], lambda_params) rho_params = params["rho_"] log_rho_ = np.dot(Xs["rho_"], rho_params) return log_rho_ - log_lambda_ + np.expm1(log_rho_) * (np.log(T) - log_lambda_)
def _log_hazard(self, params: DictBox, T: Union[float, ndarray], Xs: DataframeSliceDict) -> ArrayBox: lambda_params = params["lambda_"] log_lambda_ = Xs["lambda_"] @ lambda_params rho_params = params["rho_"] log_rho_ = Xs["rho_"] @ rho_params return log_rho_ - log_lambda_ + np.expm1(log_rho_) * (np.log(T) - log_lambda_)
def _log_hazard(self, params, T, *Xs): alpha_params = params[self._LOOKUP_SLICE["alpha_"]] log_alpha_ = np.dot(Xs[0], alpha_params) alpha_ = np.exp(log_alpha_) beta_params = params[self._LOOKUP_SLICE["beta_"]] log_beta_ = np.dot(Xs[1], beta_params) beta_ = np.exp(log_beta_) return log_beta_ - log_alpha_ + np.expm1(log_beta_) * (np.log(T) - log_alpha_) - np.log1p((T / alpha_) ** beta_)
def _log_hazard(self, params, T, *Xs): alpha_params = params[self._LOOKUP_SLICE["alpha_"]] log_alpha_ = np.dot(Xs[0], alpha_params) alpha_ = np.exp(log_alpha_) beta_params = params[self._LOOKUP_SLICE["beta_"]] log_beta_ = np.dot(Xs[1], beta_params) beta_ = np.exp(log_beta_) return log_beta_ - log_alpha_ + np.expm1(log_beta_) * (np.log(T) - log_alpha_) - np.log1p((T / alpha_) ** beta_)
def _log_hazard(self, params: Union[DictBox, Dict[str, np.array]], T: Union[float, np.array], Xs: DataframeSlicer) -> Union[np.array, ArrayBox]: lambda_params = params["lambda_"] log_lambda_ = Xs["lambda_"] @ lambda_params rho_params = params["rho_"] log_rho_ = Xs["rho_"] @ rho_params return log_rho_ - log_lambda_ + np.expm1(log_rho_) * (np.log(T) - log_lambda_)
def _log_hazard(self, params, T, Xs): alpha_params = params["alpha_"] log_alpha_ = np.dot(Xs["alpha_"], alpha_params) alpha_ = np.exp(log_alpha_) beta_params = params["beta_"] log_beta_ = np.dot(Xs["beta_"], beta_params) beta_ = np.exp(log_beta_) return (log_beta_ - log_alpha_ + np.expm1(log_beta_) * (np.log(T) - log_alpha_) - np.logaddexp(beta_ * (np.log(T) - np.log(alpha_)), 0))
def decaycos_self_product(w, tau, phi=0.0, L=128.0, **kwargs): r""" Squared integral $$ \|\cos (\omega \xi + \phi) \exp \tau \xi\|_v^2 = \frac{1}{2} \int_0^L e^{-2 \xi \tau} \cos(2 \xi \omega +2 \phi ) \dd \xi + \frac{1}{2} \int_0^L e^{-2 \xi \tau}\dd \xi $$ and $$ \frac{1}{2}\int_0^L e^{-2 \xi \tau}\dd \xi = \frac{1-e^{-2L\tau}}{4 \tau} $$ """ return ( 0.5 * decaycos_int( w * 2.0, tau * 2.0, phi * 2.0, L=L, **kwargs ) - 0.25 * np.expm1(-2.0 * L * tau)/tau )
def test_expm1(): fun = lambda x : 3.0 * np.expm1(x) d_fun = grad(fun) check_grads(fun, abs(npr.randn())) check_grads(d_fun, abs(npr.randn()))
def test_expm1(): fun = lambda x : 3.0 * np.expm1(x) d_fun = grad(fun) check_grads(fun, abs(npr.randn())) check_grads(d_fun, abs(npr.randn()))
def test_expm1(): fun = lambda x: 3.0 * np.expm1(x) check_grads(fun)(abs(npr.randn()))
def expm1d_naive(x): return np.expm1(x) / x
def test_expm1(): fun = lambda x : 3.0 * np.expm1(x) check_grads(fun)(abs(npr.randn()))