def _rvs(self, a, b): # We use inverse transform method: # z ~ ppf(U), where U ~ Uniform(cdf(a), cdf(b)). # ~ Uniform(arctan(a), arctan(b)) / pi + 1/2 u = random.uniform(self._random_state, shape=self._size, minval=np.arctan(a), maxval=np.arctan(b)) return np.tan(u)
def exchange_energy_density(self, n, zeta): """Exchange energy density.""" y = jnp.pi * n / self.k e_x = self.A * self.k * ( jnp.log(1 + (y ** 2) * ((1 + zeta) ** 2)) - 2 * y * (1 + zeta) * jnp.arctan(y * (1 + zeta)) + jnp.log(1 + (y ** 2) * ((-1 + zeta) ** 2)) - 2 * y * (-1 + zeta) * jnp.arctan(y * (-1 + zeta)) ) / (4 * (jnp.pi ** 2)) return e_x / n
def _arc(x, y0, y1, r): """ Compute the area within an arc of a circle. The arc is defined by the two points (x,y0) and (x,y1) in the following manner: The circle is of radius r and is positioned at the origin. The origin and each individual point define a line which intersects the circle at some point. The angle between these two points on the circle measured from y0 to y1 defines the sides of a wedge of the circle. The area returned is the area of this wedge. If the area is traversed clockwise then the area is negative, otherwise it is positive. """ return 0.5 * r**2 * (np.arctan(y1 / x) - np.arctan(y0 / x))
def V(t, x): # need to use jnp (instead of np) for autodiff xa, xc, yc = x dev_a = xa - equ_len dev_c = jnp.sqrt(xc**2 + yc**2) - equ_len dev_ang = np.pi / 2 - jnp.arctan(xc / yc) - sad_ang return .5 * ((dev_a**2 + dev_c**2) / sep + k * (dev_ang**2 - del_ang**2)**2)
def update(i, state, inp): z_c, P_c, Q = state y, x = inp N = y.shape[0] # frame size A = jnp.array([[1, N], [0, 1]]) I = jnp.eye(2) n = (jnp.arange(N) - (N - 1) / 2) z_p = A @ z_c P_p = A @ P_c @ A.T + Q phi_p = z_p[0, 0] + n * z_p[1, 0] # linear approx. s_p = y * jnp.exp(-1j * phi_p) d = jnp.where( train(i), x, const[jnp.argmin(jnp.abs(const[None, :] - s_p[:, None]), axis=-1)]) scd_p = s_p * d.conj() sumscd_p = jnp.sum(scd_p) e = jnp.array([[jnp.arctan(sumscd_p.imag / sumscd_p.real)], [(jnp.sum(n * scd_p)).imag / (jnp.sum(n * n * scd_p)).real]]) G = P_p @ jnp.linalg.pinv((P_p + R)) z_c = z_p + G @ e P_c = (I - G) @ P_p Q = jnp.where(akf(i), alpha * Q + (1 - alpha) * (G @ e @ e.T @ G), Q) out = (z_p[1, 0], phi_p) state = (z_c, P_c, Q) return state, out
def log_prob(self, value): if self._validate_args: self._validate_sample(value) low = (self.low - self.loc) / self.scale # pi / 2 is arctan of self.high when that arg is supported normalize_term = np.log(np.pi / 2 - np.arctan(low)) + np.log(self.scale) return - np.log1p(((value - self.loc) / self.scale) ** 2) - normalize_term
def sample(self, key, sample_shape=()): # We use inverse transform method: # z ~ inv_cdf(U), where U ~ Uniform(cdf(low), cdf(high)). # ~ Uniform(arctan(low), arctan(high)) / pi + 1/2 size = sample_shape + self.batch_shape minval = -np.arctan(self.base_loc) maxval = np.pi / 2 u = minval + random.uniform(key, shape=size) * (maxval - minval) return self.base_loc + np.tan(u)
def sample(self, key, sample_shape=()): """ ** References: ** 1. A New Unified Approach for the Simulation of a Wide Class of Directional Distributions John T. Kent, Asaad M. Ganeiber & Kanti V. Mardia (2018) """ assert is_prng_key(key) phi_key, psi_key = random.split(key) corr = self.correlation conc = jnp.stack((self.phi_concentration, self.psi_concentration)) eig = 0.5 * (conc[0] - corr**2 / conc[1]) eig = jnp.stack((jnp.zeros_like(eig), eig)) eigmin = jnp.where(eig[1] < 0, eig[1], jnp.zeros_like(eig[1], dtype=eig.dtype)) eig = eig - eigmin b0 = self._bfind(eig) total = _numel(sample_shape) phi_den = log_I1(0, conc[1]).squeeze(0) batch_size = _numel(self.batch_shape) phi_shape = (total, 2, batch_size) phi_state = SineBivariateVonMises._phi_marginal( phi_shape, phi_key, jnp.reshape(conc, (2, batch_size)), jnp.reshape(corr, (batch_size, )), jnp.reshape(eig, (2, batch_size)), jnp.reshape(b0, (batch_size, )), jnp.reshape(eigmin, (batch_size, )), jnp.reshape(phi_den, (batch_size, )), ) phi = jnp.arctan2(phi_state.phi[:, 1:], phi_state.phi[:, :1]) alpha = jnp.sqrt(conc[1]**2 + (corr * jnp.sin(phi))**2) beta = jnp.arctan(corr / conc[1] * jnp.sin(phi)) psi = VonMises(beta, alpha).sample(psi_key) phi_psi = jnp.concatenate( ( (phi + self.phi_loc + pi) % (2 * pi) - pi, (psi + self.psi_loc + pi) % (2 * pi) - pi, ), axis=1, ) phi_psi = jnp.transpose(phi_psi, (0, 2, 1)) return phi_psi.reshape(*sample_shape, *self.batch_shape, *self.event_shape)
def exchange_energy_density( self, density, amplitude=constants.EXPONENTIAL_COULOMB_AMPLITUDE, kappa=constants.EXPONENTIAL_COULOMB_KAPPA, epsilon=1e-15): """Exchange energy density for uniform gas with exponential coulomb. Equation 17 in the following paper provides the exchange energy per length for 1d uniform gas with exponential coulomb interaction. One-dimensional mimicking of electronic structure: The case for exponentials. Physical Review B 91.23 (2015): 235141. https://arxiv.org/pdf/1504.05620.pdf y = pi * density / kappa exchange energy per length = amplitude * kappa * (ln(1 + y ** 2) - 2 * y * arctan(y)) / (2 * pi ** 2) exchange energy density = exchange energy per length * pi / (kappa * y) = amplitude / (2 * pi) * (ln(1 + y ** 2) / y - 2 * arctan(y)) Dividing by y may cause numerical instability when y is close to zero. Small value epsilon is introduced to prevent it. When density is smaller than epsilon, the exchange energy density is computed by its series expansion at y=0: exchange energy density = amplitude / (2 * pi) * (-y + y ** 3 / 6) Note the exchange energy density converge to constant -amplitude / 2 at high density limit. Args: density: Float numpy array with shape (num_grids,). amplitude: Float, parameter of exponential Coulomb interaction. kappa: Float, parameter of exponential Coulomb interaction. epsilon: Float, a constant for numerical stability. Returns: Float numpy array with shape (num_grids,). """ y = jnp.pi * density / kappa return jnp.where( y > epsilon, amplitude / (2 * jnp.pi) * (jnp.log(1 + y ** 2) / y - 2 * jnp.arctan(y)), amplitude / (2 * jnp.pi) * (-y + y ** 3 / 6))
def log(self: "SO3") -> types.TangentVector: # Reference: # > https://github.com/strasdat/Sophus/blob/a0fe89a323e20c42d3cecb590937eb7a06b8343a/sophus/so3.hpp#L247 w = self.wxyz[0] norm_sq = self.wxyz[1:] @ self.wxyz[1:] norm = jnp.sqrt(norm_sq) use_taylor = norm < get_epsilon(norm_sq.dtype) atan_factor = jnp.where( use_taylor, 2.0 / w - 2.0 / 3.0 * norm_sq / (w**3), jnp.where( jnp.abs(w) < get_epsilon(w.dtype), jnp.where(w > 0, 1.0, -1.0) * jnp.pi / norm, 2.0 * jnp.arctan(norm / w) / norm, ), ) return atan_factor * self.wxyz[1:]
def _cdf(self, x): return 2.0 / np.pi * np.arctan(x)
tf.math.argmax, lambda input, axis=None, output_type=tf.int64, name=None: ( # pylint: disable=g-long-lambda np.argmax(input, axis=0 if axis is None else _astuple(axis)).astype( utils.numpy_dtype(output_type)))) argmin = utils.copy_docstring( tf.math.argmin, lambda input, axis=None, output_type=tf.int64, name=None: ( # pylint: disable=g-long-lambda np.argmin(input, axis=0 if axis is None else _astuple(axis)).astype( utils.numpy_dtype(output_type)))) asin = utils.copy_docstring(tf.math.asin, lambda x, name=None: np.arcsin(x)) asinh = utils.copy_docstring(tf.math.asinh, lambda x, name=None: np.arcsinh(x)) atan = utils.copy_docstring(tf.math.atan, lambda x, name=None: np.arctan(x)) atan2 = utils.copy_docstring(tf.math.atan2, lambda y, x, name=None: np.arctan2(y, x)) atanh = utils.copy_docstring(tf.math.atanh, lambda x, name=None: np.arctanh(x)) bessel_i0 = utils.copy_docstring(tf.math.bessel_i0, lambda x, name=None: scipy_special.i0(x)) bessel_i0e = utils.copy_docstring(tf.math.bessel_i0e, lambda x, name=None: scipy_special.i0e(x)) bessel_i1 = utils.copy_docstring(tf.math.bessel_i1, lambda x, name=None: scipy_special.i1(x))
# print solution to dictionary if i > 0: xSol[i, :] = xFinal + x[:-1] xFinal += x[-1] # save solution to python dictionary ySol[i, :] = y(x, xi, IC)[:-1] res[i, :] = np.abs(L(xi, x, IC))[:-1] # update initial condtions IC['y0'] = y(x, xi, IC)[-1] IC['y0p'] = yp(x, xi, IC)[-1] ## compute the error: ****************************************************************************** A = np.sqrt(y0**2 + (y0p / w)**2) Phi = np.arctan(-y0p / w / y0) yTrue = A * np.cos(w * xSol + Phi) err = np.abs(ySol - yTrue) ## print status of run: **************************************************************************** print('TFC least-squares time[s]: ' + '\t' + str((time.sum()))) print('Max residual:' + '\t' * 3 + str(res.max())) print('Max error:' + '\t' * 3 + str(err.max())) ## plotting: *************************************************************************************** # figure 1: solution p1 = MakePlot(r'$x$', r'$y(t)$') p1.ax[0].plot(xSol.flatten(), ySol.flatten()) p1.ax[0].grid(True) p1.PartScreen(7., 6.)
def arctan(x): if isinstance(x, JaxArray): x = x.value return JaxArray(jnp.arctan(x))
def _cdf(self, x): return 0.5 + 1.0 / np.pi * np.arctan(x)
def _pdf(self, x, a, b): return np.reciprocal((1 + x * x) * (np.arctan(b) - np.arctan(a)))
def _vwn_f(s, p): return (0.5 * p[1] * (2.0 * np.log(s) + _vwn_a(p) * np.log(_vwn_x(s, p)) - _vwn_b(p) * np.log(_vwn_y(s, p)) + _vwn_c(p) * np.arctan(_vwn_z(s, p))))
def __call__(self, x, logsnr, y, *, train): B, H, W, _ = x.shape # pylint: disable=invalid-name assert H == W assert x.dtype in (jnp.float32, jnp.float64) assert logsnr.shape == (B,) and logsnr.dtype in (jnp.float32, jnp.float64) num_resolutions = len(self.ch_mult) ch = self.ch emb_ch = self.emb_ch # Timestep embedding if self.logsnr_input_type == 'linear': logging.info('LogSNR representation: linear') logsnr_input = (logsnr - self.logsnr_scale_range[0]) / ( self.logsnr_scale_range[1] - self.logsnr_scale_range[0]) elif self.logsnr_input_type == 'sigmoid': logging.info('LogSNR representation: sigmoid') logsnr_input = nn.sigmoid(logsnr) elif self.logsnr_input_type == 'inv_cos': logging.info('LogSNR representation: inverse cosine') logsnr_input = (jnp.arctan(jnp.exp(-0.5 * jnp.clip(logsnr, -20., 20.))) / (0.5 * jnp.pi)) else: raise NotImplementedError(self.logsnr_input_type) emb = get_timestep_embedding(logsnr_input, embedding_dim=ch, max_time=1.) emb = nn.Dense(features=emb_ch, name='dense0')(emb) emb = nn.Dense(features=emb_ch, name='dense1')(nonlinearity(emb)) assert emb.shape == (B, emb_ch) # Class embedding assert self.num_classes >= 1 if self.num_classes > 1: logging.info('conditional: num_classes=%d', self.num_classes) assert y.shape == (B,) and y.dtype == jnp.int32 y_emb = jax.nn.one_hot(y, num_classes=self.num_classes, dtype=x.dtype) y_emb = nn.Dense(features=emb_ch, name='class_emb')(y_emb) assert y_emb.shape == emb.shape == (B, emb_ch) emb += y_emb else: logging.info('unconditional: num_classes=%d', self.num_classes) del y # Downsampling hs = [nn.Conv( features=ch, kernel_size=(3, 3), strides=(1, 1), name='conv_in')(x)] for i_level in range(num_resolutions): # Residual blocks for this resolution for i_block in range(self.num_res_blocks): h = ResnetBlock( out_ch=ch * self.ch_mult[i_level], dropout=self.dropout, name=f'down_{i_level}.block_{i_block}')( hs[-1], emb=emb, deterministic=not train) if h.shape[1] in self.attn_resolutions: h = AttnBlock( num_heads=self.num_heads, head_dim=self.head_dim, name=f'down_{i_level}.attn_{i_block}')(h) hs.append(h) # Downsample if i_level != num_resolutions - 1: hs.append(self._downsample( hs[-1], name=f'down_{i_level}.downsample', emb=emb, train=train)) # Middle h = hs[-1] h = ResnetBlock(dropout=self.dropout, name='mid.block_1')( h, emb=emb, deterministic=not train) h = AttnBlock( num_heads=self.num_heads, head_dim=self.head_dim, name='mid.attn_1')(h) h = ResnetBlock(dropout=self.dropout, name='mid.block_2')( h, emb=emb, deterministic=not train) # Upsampling for i_level in reversed(range(num_resolutions)): # Residual blocks for this resolution for i_block in range(self.num_res_blocks + 1): h = ResnetBlock( out_ch=ch * self.ch_mult[i_level], dropout=self.dropout, name=f'up_{i_level}.block_{i_block}')( jnp.concatenate([h, hs.pop()], axis=-1), emb=emb, deterministic=not train) if h.shape[1] in self.attn_resolutions: h = AttnBlock( num_heads=self.num_heads, head_dim=self.head_dim, name=f'up_{i_level}.attn_{i_block}')(h) # Upsample if i_level != 0: h = self._upsample( h, name=f'up_{i_level}.upsample', emb=emb, train=train) assert not hs # End h = nonlinearity(Normalize(name='norm_out')(h)) h = nn.Conv( features=self.out_ch, kernel_size=(3, 3), strides=(1, 1), kernel_init=nn.initializers.zeros, name='conv_out')(h) assert h.shape == (*x.shape[:3], self.out_ch) return h
def radial_hyperbolic_compactification(x): """ y = 2/pi x/|x| atan(x/|x|). Sends |x|=0 to |y|=0. and |x|=\infty to |y|=1.""" r = jnp.maximum(jnp.linalg.norm(x),1e-4) return (jnp.arctan(r)*2/jnp.pi)*x/r
def _logpdf(self, x, a, b): # trunc_pdf(x) = pdf(x) / (cdf(b) - cdf(a)) # = 1 / (1 + x^2) / (arctan(b) - arctan(a)) normalizer = np.log(np.arctan(b) - np.arctan(a)) return -(np.log(1 + x * x) + normalizer)
def arctan(a: Numeric): return jnp.arctan(a)
def _sf(self, x): return 0.5 - 1.0 / np.pi * np.arctan(x)
def arctan_(u): return jnp.arctan(u)
def log_prob(self, value): # pi / 2 is arctan of self.high when that arg is supported normalize_term = np.log(np.pi / 2 + np.arctan(self.base_loc)) return -np.log1p((value - self.base_loc)**2) - normalize_term
def standard_cauchy_cdf(c): """Cumulative distribution function of standard Cauchy.""" return np.arctan(c) / np.pi + 0.5