class LevenbergMarquardtBR(Optimizer): """ Levenberg-Marquardt optimizer with Bayesian regularization. """ params: LevenbergMarquardtParams = LevenbergMarquardtParams() alpha: Float = np.float64(0.0) max_iters: Int = 500 update: Callable = lambda a, b, c, t: t
def blackman_kernel(dims, M): n = M - 2 apply = jax.vmap(lambda ns: blackman(M, norm(np.float64(ns)) / 2)) inds = np.stack( np.meshgrid(*(np.arange(1 - n, n, 2) for _ in range(dims))), axis = -1 ) kernel = apply(inds.reshape(-1, dims)) return (kernel / kernel.sum()).reshape(*(n for _ in range(dims)))
def initialize(params, x, y): e = error(params, x, y) mu = optimizer.params.mu_0 gamma = np.float64(params.size) beta = (x.size / m)**2 * (x.size - gamma) / sum_squares(e) beta = np.where(beta < 0, 1.0, beta) alpha = gamma / sum_squares(params) return LevenbergMarquardtBRState((x, y), params, e, np.inf, mu, alpha / beta)
def f(key): def body_fn(uk): key = uk[1] u = random.uniform(key, (), dtype=np.float64) key, _ = random.split(key) return u, key u, _ = lax.while_loop(lambda uk: uk[0] > 0.5, body_fn, (np.float64(1.), key)) return u
def test_while_loop(self, jit): @partial(_maybe_jit, jit) def count_to(N): return lax.while_loop(lambda x: x < N, lambda x: x + 1.0, 0.0) with enable_x64(): self.assertArraysEqual(count_to(10), jnp.float64(10), check_dtypes=True) with disable_x64(): self.assertArraysEqual(count_to(10), jnp.float32(10), check_dtypes=True)
def test_while_loop(self, jit): if jit == "cpp" and not config.omnistaging_enabled: self.skipTest("cpp_jit requires omnistaging") @partial(_maybe_jit, jit) def count_to(N): return lax.while_loop(lambda x: x < N, lambda x: x + 1.0, 0.0) with enable_x64(): self.assertArraysEqual(count_to(10), jnp.float64(10), check_dtypes=True) with disable_x64(): self.assertArraysEqual(count_to(10), jnp.float32(10), check_dtypes=True)
def __init__(self, *, sigma, period, Q0, dQ, f): self.sigma = np.float64(sigma) self.period = np.float64(period) self.Q0 = np.float64(Q0) self.dQ = np.float64(dQ) self.f = np.float64(f) self.amp = self.sigma**2 / (1 + self.f) # One term with a period of period Q1 = 0.5 + self.Q0 + self.dQ w1 = 4 * np.pi * Q1 / (self.period * np.sqrt(4 * Q1**2 - 1)) S1 = self.amp / (w1 * Q1) # Another term at half the period Q2 = 0.5 + self.Q0 w2 = 8 * np.pi * Q2 / (self.period * np.sqrt(4 * Q2**2 - 1)) S2 = self.f * self.amp / (w2 * Q2) super().__init__( UnderdampedSHOTerm(S0=S1, w0=w1, Q=Q1), UnderdampedSHOTerm(S0=S2, w0=w2, Q=Q2), )
def C(n: int) -> np.ndarray: r"""The combinatorial matrix :math:`\mathbf C` defined in the paper's appendix. Args: n: the number of sampled haplotypes :math:`n` Returns: :math:`(n-1)\times(n-1)` matrix """ W1 = np.zeros((n - 1, n - 1)) W2 = np.zeros((n - 1, n - 1)) b = np.arange(1, n - 1 + 1) # j = 2 W1 = W1.at[:, 0].set(6 / (n + 1)) W2 = W2.at[:, 0].set(0) # j = 3 W1 = W1.at[:, 1].set(10 * (5 * n - 6 * b - 4) / (n + 2) / (n + 1)) W2 = W2.at[:, 1].set((20 * (n - 2)) / (n + 1) / (n + 2)) for col in range(n - 3): # this cast is crucial for floating point precision j = np.float64(col + 2) # procedurally generated by Zeilberger's algorithm in Mathematica W1 = W1.at[:, col + 2].set(-( (-((-1 + j) * (1 + j)**2 * (3 + 2 * j) * (j - n) * (4 + 2 * j - 2 * b * j + j**2 - b * j**2 + 4 * n + 2 * j * n + j**2 * n) * W1[:, col]) - (-1 + 2 * j) * (3 + 2 * j) * (-4 * j - 12 * b * j - 4 * b**2 * j - 6 * j**2 - 12 * b * j**2 - 2 * b**2 * j**2 - 4 * j**3 + 4 * b**2 * j**3 - 2 * j**4 + 2 * b**2 * j**4 + 4 * n + 2 * j * n - 6 * b * j * n + j**2 * n - 9 * b * j**2 * n - 2 * j**3 * n - 6 * b * j**3 * n - j**4 * n - 3 * b * j**4 * n + 4 * n**2 + 6 * j * n**2 + 7 * j**2 * n**2 + 2 * j**3 * n**2 + j**4 * n**2) * W1[:, col + 1]) / (j**2 * (2 + j) * (-1 + 2 * j) * (1 + j + n) * (3 + b + j**2 - b * j**2 + 3 * n + j**2 * n)))) # noqa: E501 W2 = W2.at[:, col + 2].set( ((-1 + j) * (1 + j) * (2 + j) * (3 + 2 * j) * (j - n) * (1 + j - n) * (1 + j + n) * W2[:, col] + (-1 + 2 * j) * (3 + 2 * j) * (1 + j - n) * (j + n) * (2 - j - 2 * b * j - j**2 - 2 * b * j**2 + 2 * n + j * n + j**2 * n) * W2[:, col + 1]) / ((-1 + j) * j * (2 + j) * (-1 + 2 * j) * (j - n) * (j + n) * (1 + j + n))) # noqa: E501 return W1 - W2
def testIssue758(self): # code from https://github.com/google/jax/issues/758 # this is more of a scan + jacfwd/jacrev test, but it lives here to use the # optimizers.py code def harmonic_bond(conf, params): return np.sum(conf * params) opt_init, opt_update, get_params = optimizers.sgd(5e-2) x0 = onp.array([0.5], dtype=onp.float64) params = onp.array([0.3], dtype=onp.float64) def minimize_structure(test_params): energy_fn = functools.partial(harmonic_bond, params=test_params) grad_fn = grad(energy_fn, argnums=(0, )) opt_state = opt_init(x0) def apply_carry(carry, _): i, x = carry g = grad_fn(get_params(x))[0] new_state = opt_update(i, g, x) new_carry = (i + 1, new_state) return new_carry, _ carry_final, _ = lax.scan(apply_carry, (0, opt_state), np.zeros((75, 0))) trip, opt_final = carry_final assert trip == 75 return opt_final initial_params = np.float64(0.5) minimize_structure(initial_params) def loss(test_params): opt_final = minimize_structure(test_params) return 1.0 - get_params(opt_final)[0] loss_opt_init, loss_opt_update, loss_get_params = optimizers.sgd(5e-2) J1 = jacrev(loss, argnums=(0, ))(initial_params) J2 = jacfwd(loss, argnums=(0, ))(initial_params) self.assertAllClose(J1, J2, check_dtypes=True, rtol=1e-6)
def EXP(w_raw): """ Estimate free energy difference using exponential averaging Parameters ---------- w : np.ndarray, float, (N) work for N frames Returns ------ deltaF : scalar, float free energy difference """ w = [] for ww in w_raw: if ww is not None: w.append(ww) w = jnp.array(w) T = jnp.float64(jnp.size(w)) deltaF = -(logsumexp(-w) - jnp.log(T)) return deltaF
def __init__(self, *, sigma, rho, eps=0.01): self.sigma = np.float64(sigma) self.rho = np.float64(rho) self.eps = np.float64(eps)
Narray = [10] prec = 1E-4 Dinit = 8 for N in Narray: print('Length of the chain = {N}\n'.format(N=N)) configarray = constructconfigarray(Dinit) for config in configarray: numofitarray = [] minarray = [] precisionarray = [] timearray = [] bondarray = [] t1 = time.time() test_finite_DMRG_init(N, config, Dinit) print("{x} minutes".format( x=jnp.around(float(time.time() - t1) / 60, decimals=2))) print("Bond array: {x}".format(x=bondarray)) print("Minimum array: {x}".format(x=jnp.float64(minarray))) print("Precision array (%): {x}".format(x=jnp.float(precisionarray))) print("Time array (minutes): {x}".format(x=jnp.float(timearray))) print("Number of iterations (at every bond): {x}".format( x=jnp.int(numofitarray))) print( "*********************************************************************************\n" )
def __init__(self, term, delta): self.term = term self.delta = np.float64(delta)
def __init__(self, *, a, c): self.a = np.float64(a) self.c = np.float64(c)
fig.subplots_adjust(left=0.06, bottom=0.08, right=0.95, top=0.95, wspace=0.07, hspace=0.25) #plt.show() #plt.savefig('test.pdf') #print(g2) # save figures fig_save = file_save_plt() if fig_save is None: None else: with PdfPages(fig_save) as pdf: pdf.savefig() the_fits.to_csv(file_save(), sep='\t', index=False) the_error.to_csv(file_save_err(), sep='\t', index=False) # Saving Stark Data\ P = np.float64(the_fits['Peak Position']) E = np.float64(the_error['Peak Position err']) stark_data = '%s\t%.6f\t%.6f' % (volt, P, E) file_name = "Stark_%s_%s_%s.txt" % (sel2, sel, sel3) fn = open(file_name, 'a+') fn.write(stark_data + '\n') fn.close() exit()
def __init__(self, *, a, b, c, d): self.a = np.float64(a) self.b = np.float64(b) self.c = np.float64(c) self.d = np.float64(d)
def __init__(self, *, eps=1e-5, **kwargs): self.eps = np.float64(eps)