def fixture_measure_params( measure_name: str, input_dim: int, cov_diagonal: bool, rng: np.random.Generator, ) -> Dict: params = {"name": measure_name} if measure_name == "gauss": # set up mean and covariance if input_dim == 1: mean = rng.normal(0, 1) cov = rng.uniform(0.5, 1.5) else: mean = rng.normal(0, 1, size=(input_dim, 1)) if cov_diagonal: cov = np.diag(rng.uniform(0.5, 1.5, size=(input_dim,))) else: mat = rng.normal(0, 1, size=(input_dim, input_dim)) cov = mat @ mat.T params["mean"] = mean params["cov"] = cov elif measure_name == "lebesgue": # set up bounds rv = rng.uniform(0, 1, size=(input_dim, 2)) domain = (rv[:, 0] - 1.0, rv[:, 1] + 1.0) params["domain"] = domain params["normalized"] = True return params
def sample_twocoin( w1: float, w2: float, c1: Iterator[Tuple[bool, Any]], c2: Iterator[Tuple[bool, Any]], pr_portkey: float = 0, prop0: Any = None, ome: np.random.Generator = np.random.default_rng(), max_props: int = int(1e5) ) -> (bool, bool, Any): """Sample from a coin with probability w1 * p1 / (w1 * p1 + w2 * p2) using two coins with probabilities p1 and p2 :param w1: :param w2: :param c1: generator yielding heads with probability p1 :param c2: generator yielding heads with probability p2 :param pr_portkey: probability of escaping with coin 2 :param prop0: :param ome: :param max_props: maximum number of proposals :return: coin flip with probability of heads w1 * p1 / (w1 * p1 + w2 * p2) >>> # draw iid samples >>> nsamples = int(1e4) >>> w1, w2 = np.random.uniform(size=2) >>> p1, p2 = np.random.uniform(size=2) >>> c1 = ((np.random.uniform() < p1, None) for _ in count()) >>> c2 = ((np.random.uniform() < p2, None) for _ in count()) >>> sample = [sample_twocoin(np.log(w1), np.log(w2), c1, c2)[0] for _ in range(nsamples)] >>> # test >>> from scipy.stats import binom_test >>> alpha = 1e-2 >>> alpha < binom_test(np.sum(sample), len(sample), w1 * p1 / (w1 * p1 + w2 * p2)) True """ v1 = w1 - np.logaddexp(w1, w2) for _ in range(max_props): # attempt proposal from coin 1 if np.log(ome.uniform()) < v1: success, prop = next(c1) if success: return True, False, prop # attempt proposal from coin 2 else: success, prop = next(c2) if success: return False, False, prop # attempt escape or abort pathological proposal if ome.uniform() < pr_portkey: return False, True, prop0 # prevent infinite loop else: raise BudgetConstraintError
def test_nonbonded_interaction_group_zero_interactions( rng: np.random.Generator): num_atoms = 33 num_atoms_ligand = 15 beta = 2.0 lamb = 0.1 cutoff = 1.1 box = 10.0 * np.eye(3) conf = rng.uniform(0, 1, size=(num_atoms, 3)) ligand_idxs = rng.choice(num_atoms, size=(num_atoms_ligand, ), replace=False).astype(np.int32) # shift ligand atoms in x by twice the cutoff conf[ligand_idxs, 0] += 2 * cutoff params = rng.uniform(0, 1, size=(num_atoms, 3)) potential = NonbondedInteractionGroup( ligand_idxs, np.zeros(num_atoms, dtype=np.int32), np.zeros(num_atoms, dtype=np.int32), beta, cutoff, ) du_dx, du_dp, du_dl, u = potential.unbound_impl(np.float64).execute( conf, params, box, lamb) assert (du_dx == 0).all() assert (du_dp == 0).all() assert du_dl == 0 assert u == 0
def sample_brownbr_min(fin_t: float, init_x: float, fin_x: float, lb_min_x: float = None, ub_min_x: float = None, ome: np.random.Generator = np.random.default_rng()) -> (float, float): """Sample the minimum value and its associated time from a Brownian bridge process. :param fin_t: :param init_x: :param fin_x: :param lb_min_x: :param ub_min_x: :param ome: :return: >>> # generate fixture >>> n = int(1e5) >>> fin_t = np.random.uniform() >>> init_x = np.random.normal() >>> fin_x = np.random.normal(init_x, np.sqrt(fin_t)) >>> # draw iid samples >>> sample = np.array([sample_brownbr_min(fin_t, init_x, fin_x)[0] for _ in range(n)]) >>> # test against discrete approximation >>> from scipy.stats import epps_singleton_2samp >>> from ctbayes.sdelib.paths import sample_brownbr >>> alpha = 1e-2 >>> res = 1 / n >>> t = np.arange(res, fin_t, res) >>> new_x = np.array([sample_brownbr(t, fin_t, init_x, fin_x) for _ in range(n)]) >>> ref_sample = t[new_x.argmin(1)] >>> alpha < epps_singleton_2samp(sample, ref_sample)[0] True """ if lb_min_x is None: lb_min_x = -np.inf if ub_min_x is None: ub_min_x = min(init_x, fin_x) assert 0 < fin_t assert lb_min_x < ub_min_x <= min(init_x, fin_x) # simulate minimum value min_x = ppf_brownbr_min(ome.uniform(), fin_t, init_x, fin_x, lb_min_x, ub_min_x) # simulate hitting time of minimum value par1 = (fin_x - min_x) ** 2 / (2 * fin_t) par2 = (min_x - init_x) ** 2 / (2 * fin_t) par3 = np.sqrt(par1 / par2) if ome.uniform() < 1 / (1 + par3): min_t = fin_t / (1 + ome.wald(par3, 2 * par1)) else: min_t = fin_t / (1 + 1 / ome.wald(1 / par3, 2 * par2)) return min_t, min_x
def sample_parameters(self, random: np.random.Generator, times: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: num_samples = times.shape[1] log_period = random.uniform(self.log_period_range[0], self.log_period_range[1], num_samples) log_semiamp = random.uniform( self.log_semiamp_range[0], self.log_semiamp_range[1], num_samples, ) phase = random.uniform(-np.pi, np.pi, num_samples) if self.ecc_beta_params is None: params = np.concatenate( (log_semiamp[:, None], log_period[:, None], phase[:, None]), axis=1, ) mod = self.compute_fiducial_model( times, semiamp=np.exp(log_semiamp), period=np.exp(log_period), phase=phase, ) else: ecc = random.beta(self.ecc_beta_params[0], self.ecc_beta_params[1], num_samples) omega = random.uniform(-np.pi, np.pi, num_samples) params = np.concatenate( ( log_semiamp[:, None], log_period[:, None], phase[:, None], ecc[:, None], omega[:, None], ), axis=1, ) mod = self.compute_fiducial_model( times, semiamp=np.exp(log_semiamp), period=np.exp(log_period), phase=phase, ecc=ecc, omega=omega, ) return params, mod
def sample_raw_skeleton(fin_t: float, init_x: float, fin_x: float, bounds_x: (float, float), ome: np.random.Generator) -> Skeleton: """ :param fin_t: :param init_x: :param fin_x: :param bounds_x: :param ome: :return: """ lo_anchor, lo_sector, hi_sector = layers.sample_anchor(fin_t, init_x, fin_x, *bounds_x, ome=ome) tight_t, tight_x, hit_x, loose_x, hit_i = layers.sample_edges(fin_t, init_x, fin_x, lo_sector, hi_sector, ome=ome) skel = Skeleton(np.array([0, tight_t, fin_t]), np.array([init_x, tight_x, fin_x]), tight_x, loose_x, hit_x, hit_i) if ome.uniform() < .5: return skel return flip_skeleton(skel)
def rand_floats( gen: np.random.Generator, low: float = 0.0, high: float = 1.0, size: Optional[int] = None, dtype: np.dtype = np.float64, include_invalid: bool = False, ) -> np.ndarray: """ Generate a random array of floating-point values with the specified length and dtype. The elements of the array are drawn from the uniform distribution over the range ``[low, high)``. """ # Generate a random array for the given floating-point type. arr = gen.uniform(low=low, high=high, size=size).astype(dtype) # If we're including invalid values (NaN for floating-point types), draw a random integer # indicating how many elements we'll set to NaN; then generate a random integer array of # that length whose elements will be a fancy index we'll use to assign NaNs into the generated # floating-point array. # NOTE: The nancount we generate is approximate because we'll don't enforce that all the # elements of the fancy index are unique. if include_invalid: nancount = gen.integers(0, size, endpoint=True) nan_indices = gen.integers(0, size, size=nancount) arr[nan_indices] = np.nan return arr
def sample_times( self, random: np.random.Generator, num_samples: int, max_nb_transits: int, ) -> np.ndarray: return random.uniform(0, 668.0, (max_nb_transits, num_samples))
def flip_hidden_precoin(thi: np.ndarray, z_nil: sde_seed.Partition, h_nil: types.Anchorage, z_prime: sde_seed.Partition, h_prime: types.Anchorage, mod: Model, ome: np.random.Generator ) -> bool: weight_nil = eval_hidden_weight(thi, z_nil, h_nil, mod) weight_prime = eval_hidden_weight(thi, z_prime, h_prime, mod) return np.log(ome.uniform()) < weight_prime - np.logaddexp(weight_prime, weight_nil)
def mutate( population: np.ndarray, mut_prob: float, rand_generator: np.random.Generator = _DEFAULT_RAND_GEN) -> np.ndarray: """ Inputs: population -- array of chromosomes -- each chromosome must be represented as 1D numpy boolean array mut_prob -- positive real number -- mutation rate -- probability that a bit will be inverted rand_generator -- instance of Numpy Random Generator -- Generator with Numpy default BitGenerator (PCG64) and None as a seed value is used as default Mutation can occur independently at every bit along each chromosome with uniform probability. Returns nested (2D) numpy boolean array, the entire population of chromosomes (solution candidates) with randomly altered bits. """ bits_to_mutate = rand_generator.uniform( low=0.0, high=1.0, size=population.shape) < mut_prob # Change only specific bits in chromosomes, using XOR return population ^ bits_to_mutate
def gravity_model_contact_events(agents: List[Agent], positions: np.array, env: simpy.Environment, rng: np.random.Generator): tree = KDTree(data=positions) close_pairs = list(tree.query_pairs(r=contact_distance_upper_bound)) inverse_distances = np.array( [np.linalg.norm(positions[idx1] - positions[idx2]) ** -contact_rate_gravity_exponent for idx1, idx2 in close_pairs]) inverse_distances /= inverse_distances.sum() while True: choices = rng.choice(a=close_pairs, p=inverse_distances, size=len(agents)).tolist() for choice in choices: yield env.timeout(delay=rng.exponential(scale=1 / len(agents) / contact_rate_per_individual)) contact_agents = [agents[idx] for idx in choice if not agents[idx].state.symptomatic() or rng.uniform() > p_symptomatic_individual_isolates] if len(contact_agents) < 2: # Symptomatic self-isolation means this is no longer a contact event and doesn't need # recording. Skip to the next event. continue infected = get_infected(contact_agents, rng=rng) for i in infected: env.process(generator=infection_events(env=env, infected=i, rng=rng))
def test_nonbonded_pair_list_interpolated_correctness( ixn_group_size, precision, rtol, atol, cutoff, beta, lamb, example_nonbonded_params, example_conf, example_box, rng: np.random.Generator, ): "Compares with jax reference implementation, with parameter interpolation." num_atoms, _ = example_conf.shape params = gen_params(example_nonbonded_params, rng) # randomly select 2 interaction groups and construct all pairwise interactions atom_idxs = rng.choice( num_atoms, size=( 2, ixn_group_size, ), replace=False, ).astype(np.int32) pair_idxs = np.stack(np.meshgrid(atom_idxs[0, :], atom_idxs[1, :])).reshape(2, -1).T num_pairs, _ = pair_idxs.shape scales = rng.uniform(0, 1, size=(num_pairs, 2)) lambda_plane_idxs = rng.integers(-2, 3, size=(num_atoms, ), dtype=np.int32) lambda_offset_idxs = rng.integers(-2, 3, size=(num_atoms, ), dtype=np.int32) ref_potential = nonbonded.interpolated( make_ref_potential(pair_idxs, scales, lambda_plane_idxs, lambda_offset_idxs, beta, cutoff)) test_potential = NonbondedPairListInterpolated(pair_idxs, scales, lambda_plane_idxs, lambda_offset_idxs, beta, cutoff) GradientTest().compare_forces( example_conf, params, example_box, lamb, ref_potential, test_potential, precision=precision, rtol=rtol, atol=atol, )
def flip_param_precoin(thi_nil: np.ndarray, thi_prime: np.ndarray, log_p_for: float, log_p_back: float, mod: Model, ome: np.random.Generator) -> bool: weight_prime = mod.eval_log_prior(thi_prime) + log_p_back weight_nil = mod.eval_log_prior(thi_nil) + log_p_for return np.log( ome.uniform()) < weight_prime - np.logaddexp(weight_prime, weight_nil)
def propose(self, lam: np.ndarray, t: np.ndarray, ome: np.random.Generator) -> (mjp_skel.Skeleton, np.ndarray): p_cond = 1 / (1 + np.exp(self.log_prop_scale[-1])) t_cond = t[1:-1][ome.uniform(size=len(t) - 2) < p_cond] prop = mjp_skel.paste_partition( mjp_skel.mutate_partition( mjp_skel.partition_skeleton(self.state, t_cond), lam, ome)) return (prop, t_cond)
def test_policy_log_policy_grad_vs_empirical(policy: policies.DiscretePolicy, rng: np.random.Generator): x0 = rng.uniform(-1, 1, policy.num_params()) n = 10 actions = rng.integers(policy.num_actions, size=n) states = rng.integers(policy.num_states, size=n) weights = rng.uniform(0, 1, n) def f(x): policy_matrix = policy.policy_matrix(x) return np.sum(weights * np.log(policy_matrix[states, actions])) def f_grad(x): return policy.log_policy_grad(actions=actions, states=states, weights=weights, x=x) assert scipy.optimize.check_grad(f, f_grad, x0) < 1e-6
def sample_batch_ppp( intensity: float, bounds: Tuple[float, ...] = (1, 1), batch_size: int = 2, ome: np.random.Generator = np.random.default_rng() ) -> Iterator[np.ndarray]: """Sample from a Poisson point process in batches. >>> # generate fixture >>> gen = int(1e4) >>> bound = np.random.lognormal(size=2) >>> # draw iid samples >>> y = np.vstack([ppp for ppp in sample_batch_ppp(gen, tuple(bound))]) >>> # test sampling distribution >>> from scipy.stats import kstest, uniform >>> alpha = 1e-2 >>> sample = (y / bound).flatten() >>> dist = uniform() >>> alpha < kstest(sample, dist.cdf)[1] True """ assert 0 < intensity assert 0 < min(bounds) assert 0 < batch_size n_points = ome.poisson(intensity * np.prod(bounds)) batch_sizes = it.repeat(batch_size, int(n_points // batch_size)) if n_points % batch_size != 0: batch_sizes = it.chain(batch_sizes, [n_points % batch_size]) ub = 0 for i in batch_sizes: lb, ub = ub, ub + bounds[0] * ome.beta(i, n_points + 1 - i) if i == 1: u0 = np.array([ub]) else: u0 = np.hstack([np.sort(ome.uniform(lb, ub, i - 1)), ub]) u = ome.uniform(np.zeros(len(bounds[1:])), bounds[1:], (i, len(bounds[1:]))) yield np.vstack([u0, u.T]).T
def test_spectrum_matches_given(rng: np.random.Generator): """Test whether the spectrum of the test problem matches the provided spectrum.""" dim = 10 spectrum = np.sort(rng.uniform(0.1, 1, size=dim)) spdmat = random_spd_matrix(rng=rng, dim=dim, spectrum=spectrum) eigvals = np.sort(np.linalg.eigvals(spdmat)) np.testing.assert_allclose( spectrum, eigvals, err_msg="Provided spectrum doesn't match actual.", )
def sample_brownbr_esc(fin_t: float, init_x: float, fin_x: float, ue_x: float, ub_x: float = None, seed: float = None, ome: np.random.Generator = np.random.default_rng()) -> bool: """ :param fin_t: :param init_x: :param fin_x: :param ue_x: :param ub_x: :param seed: :return: >>> # generate sample, unbounded case >>> from ctbayes.sdelib.paths import sample_brownbr >>> n = int(1e5) >>> fin_t = np.random.uniform() >>> init_x = np.random.normal() >>> fin_x = np.random.normal(scale=np.sqrt(fin_t)) >>> inf_x = ppf_brownbr_min(0.75, fin_t, init_x, fin_x) >>> ub_sup_x = max(abs(inf_x), abs(init_x + fin_x - inf_x)) >>> sample = [sample_brownbr_esc(fin_t, init_x, fin_x, ub_sup_x) for _ in range(n)] >>> # test against discrete approximation >>> from scipy.stats import binom_test >>> alpha = 1e-2 >>> gen = series_brownbr_esc(fin_t, init_x, fin_x, ub_sup_x) >>> for _ in range(n): p = next(gen) >>> alpha < binom_test(sum(sample), len(sample), p) True >>> # generate sample, bounded case >>> sample = [sample_brownbr_esc(fin_t, init_x, fin_x, ub_sup_x, 1.1 * ub_sup_x) for _ in range(n)] >>> # test against discrete approximation >>> gen = series_brownbr_esc(fin_t, init_x, fin_x, ub_sup_x, 1.1 * ub_sup_x) >>> for _ in range(n): p = next(gen) >>> alpha < binom_test(sum(sample), len(sample), p) True """ if seed is None: seed = ome.uniform() assert 0 < fin_t assert 0 <= seed <= 1 if ue_x <= max(abs(init_x), abs(fin_x)): return True if ub_x is not None and ub_x < ue_x: return False for i, s in enumerate(series_brownbr_esc(fin_t, init_x, fin_x, ue_x, ub_x), 1): if (not i % 2 and s > seed) or (i % 2 and s < seed): return not bool(i % 2)
def update_objective(thi: np.ndarray, zz: List[seed.Partition], mod: Model, ctrl: Controls, ome: np.random.Generator, pool: Parallel) -> Callable[[np.ndarray], np.ndarray]: def f_obj(thi_prime: np.ndarray) -> np.ndarray: log_aug_trans = [est_log_aug_lik(thi_prime, zz_, mod, supp_t, ome)[0] for zz_ in new_zz] return np.array(log_aug_trans) + mod.eval_log_prior(thi_prime) supp_t = [np.sort(ome.uniform(high=dt, size=ctrl.n_disc_supports)) for dt in np.diff(mod.t)] new_ome = [np.random.default_rng(seed_) for seed_ in ome.bit_generator._seed_seq.spawn(len(zz))] _, new_zz = zip(*pool(delayed(est_log_aug_lik)(thi, zz_, mod, supp_t, ome_) for zz_, ome_ in zip(zz, new_ome))) return f_obj
def __init__(self, n_features: int, n_dim: int, rbf_ls: float = 1., rng: np.random.Generator = None): if rng is None: rng = np.random.default_rng() self.__n_features = n_features self.__n_dim = n_dim self.__rbf_ls = rbf_ls self.__rng = rng self.__weight = rng.normal(size=(n_dim, n_features)) / rbf_ls self.__offset = rng.uniform(low=0, high=2 * np.pi, size=n_features) return
def sample_edges( fin_t: float, init_x: float, fin_x: float, bounds_inf_x: Tuple[float, float], bounds_sup_x: Tuple[float, float], ome: np.random.Generator = np.random.default_rng(), max_props: int = int(1e6) ) -> (float, float, float, float, Optional[List[bool]]): """ :param fin_t: :param init_x: :param fin_x: :param bounds_inf_x: :param bounds_sup_x: :param ome: :param max_props: :return: :raise: BudgetConstraintError """ assert 0 < fin_t assert 0 < max_props lb_inf_x, ub_inf_x = bounds_inf_x lb_sup_x, ub_sup_x = bounds_sup_x for _ in range(max_props): min_t, min_x = hitting.sample_layerbr_min(fin_t, init_x, fin_x, ub_sup_x, lb_inf_x, ub_inf_x, ome) if min(init_x, fin_x) in (ub_inf_x, lb_sup_x): return min_t, min_x, lb_sup_x, ub_sup_x, None esc1 = hitting.sample_bessel3br_esc(min_t, 0, init_x - min_x, lb_sup_x - min_x, ub_sup_x - min_x, ome=ome) esc2 = hitting.sample_bessel3br_esc(fin_t - min_t, 0, fin_x - min_x, lb_sup_x - min_x, ub_sup_x - min_x, ome=ome) if (not esc1 and not esc2) or ome.uniform() > .5: return min_t, min_x, lb_sup_x, ub_sup_x, [esc1, esc2] else: raise BudgetConstraintError('None of the proposals were accepted.')
def generate( pop_size: int, chrom_length: int, threshold: float = 0.5, rand_generator: np.random.Generator = _DEFAULT_RAND_GEN) -> np.ndarray: """ Inputs: pop_size -- positive integer number -- total number of chromosomes in a generation chrom_length -- positive integer number -- number of bits in a chromosome -- length of a chromosome threshold -- real number between 0.0 and 1.0 (default is 0.5) -- values (from uniform distribution) lower than this number translate to True values in chromosomes rand_generator -- instance of Numpy Random Generator -- Generator with Numpy default BitGenerator (PCG64) and None as a seed value is used as default Each chromosome is represented as a fixed length 1D numpy array with random sequence of Boolean values. Population size must be smaller than the total number of unique chromosome sequences (binary patterns) that can be generated from a given number of bits. Returns nested (2D) numpy boolean array, the entire population of chromosomes (solution candidates). """ if pop_size >= (1 << chrom_length): raise ValueError( 'Population must be smaller than overall unique chromosome sequences.' ) return rand_generator.uniform( low=0.0, high=1.0, size=(pop_size, chrom_length)) < threshold
def create_random_circuit( n_qubits: int, n_gates: int, rng: np.random.Generator, ) -> Circuit: """Generates random circuit acting on nqubits with ngates for testing purposes. The resulting circuit it saved to file in JSON format under 'circuit.json'. Args: n_qubits: The number of qubits in the circuit n_gates: The number of gates in the circuit rng: Numpy random generator Returns: Generated circuit. """ # Initialize all gates in set, not including RH or ZXZ all_gates_lists = [ ONE_QUBIT_NO_PARAMS_GATES, TWO_QUBITS_NO_PARAMS_GATES, ONE_QUBIT_ONE_PARAM_GATES, TWO_QUBITS_ONE_PARAM_GATES, ] # Loop to add gates to circuit circuit = Circuit() for gate_i in range(n_gates): # Pick gate type gates_list = rng.choice(all_gates_lists) gate = rng.choice(gates_list) # Pick qubit to act on (control if two qubit gate) qubits: Tuple[int, ...] if gates_list in [ONE_QUBIT_NO_PARAMS_GATES, ONE_QUBIT_ONE_PARAM_GATES]: index = rng.choice(range(n_qubits)) qubits = (int(index),) else: indices = rng.choice(range(n_qubits), size=2, replace=False) qubits = tuple(int(i) for i in indices) if gates_list in [ONE_QUBIT_ONE_PARAM_GATES, TWO_QUBITS_ONE_PARAM_GATES]: param = rng.uniform(-np.pi, np.pi, size=1) gate = gate(float(param)) circuit += gate(*qubits) return circuit
def crop_img(img, vertices, labels, length, rand_gen: np.random.Generator): '''crop img patches to obtain batch and augment Input: img : PIL Image vertices : vertices of text regions <numpy.ndarray, (n,8)> labels : 1->valid, 0->ignore, <numpy.ndarray, (n,)> length : length of cropped image region Output: region : cropped image region new_vertices: new vertices in cropped region ''' h, w = img.height, img.width # confirm the shortest side of image >= length if h >= w and w < length: img = img.resize((length, int(h * length / w)), Image.BILINEAR) elif h < w and h < length: img = img.resize((int(w * length / h), length), Image.BILINEAR) ratio_w = img.width / w ratio_h = img.height / h assert (ratio_w >= 1 and ratio_h >= 1) new_vertices = np.zeros(vertices.shape) if vertices.size > 0: new_vertices[:, [0, 2, 4, 6]] = vertices[:, [0, 2, 4, 6]] * ratio_w new_vertices[:, [1, 3, 5, 7]] = vertices[:, [1, 3, 5, 7]] * ratio_h # find random position remain_h = img.height - length remain_w = img.width - length flag = True cnt = 0 while flag and cnt < 1000: cnt += 1 start_w = int(rand_gen.uniform() * remain_w) start_h = int(rand_gen.uniform() * remain_h) flag = is_cross_text([start_w, start_h], length, new_vertices[labels == 1, :]) box = (start_w, start_h, start_w + length, start_h + length) region = img.crop(box) if new_vertices.size == 0: return region, new_vertices new_vertices[:, [0, 2, 4, 6]] -= start_w new_vertices[:, [1, 3, 5, 7]] -= start_h return region, new_vertices
def sample_envelope( x: np.ndarray, fx: np.ndarray, dfx: np.ndarray, y: np.ndarray, ome: np.random.Generator = np.random.default_rng()) -> float: """Generate a sample from the upper hull. :param x: hull supports :param fx: upper hull value at y :param dfx: upper hull derivative value at y :param y: intersection points of upper hull segments :param ome: :return: sample from the upper hull >>> y = np.array([-2, -1, 1, 2]) >>> y = np.array([-np.inf, *gen_upper(y, -y**2 / 2, -y), np.inf]) >>> sample = [sample_envelope(y, -y**2 / 2, -y, y) for _ in range(int(1e3))] >>> # test mean CLT >>> from scipy.stats import ttest_1samp >>> alpha = 1e-2 >>> alpha < ttest_1samp(sample, 0)[1] True """ # prevent over/underflows offset = np.max(dfx * (y[1:] - x) + fx) vol_upper = np.exp(dfx * (y[1:] - x) + fx - offset) / dfx vol_lower = np.exp(dfx * (y[:-1] - x) + fx - offset) / dfx cdf = np.array([0, *np.cumsum(vol_upper - vol_lower)]) norm = cdf[-1] # pick sector to sample from z = ome.uniform() i = np.searchsorted(cdf / norm, z) - 1 return x[i] + (np.log( (norm * z - cdf[i] + vol_lower[i]) * dfx[i]) - fx[i] + offset) / dfx[i]
def infection_events(env: simpy.Environment, infected: Agent, rng: np.random.Generator): print(f'@t={env.now} - {infected}->{State.INFECTED.name}') infected.state = State.INFECTED yield env.timeout(delay=rng.normal(loc=4.6, scale=0.3)) print(f'@t={env.now} - {infected}->{State.INFECTIOUS.name}') infected.state = State.INFECTIOUS if rng.uniform() < p_asymptomatic: # Asymptomatic yield env.timeout(delay=rng.normal(loc=6.5, scale=0.4)) print(f'@t={env.now} - {infected}->{State.REMOVED.name}') infected.state = State.REMOVED else: # Symptomatic yield env.timeout(delay=0.5) print(f'@t={env.now} - {infected}->{State.SYMPTOMATIC_INFECTIOUS.name}') infected.state = State.SYMPTOMATIC_INFECTIOUS yield env.timeout(delay=rng.normal(loc=6.0, scale=0.4)) print(f'@t={env.now} - {infected}->{State.REMOVED.name}') infected.state = State.REMOVED
def sample_bridge( bounds_x: (float, float), norm_rsde: Callable[[np.ndarray, Optional[np.ndarray]], Tuple[np.ndarray, np.ndarray]], denorm_rsde: Callable[[np.ndarray, Optional[np.ndarray]], Tuple[np.ndarray, np.ndarray]], eval_disc: Callable[[np.ndarray], np.ndarray], eval_bounds_disc: Callable[[float, float], Tuple[float, float, float]], ome: np.random.Generator, max_props: int = int(1e4)) -> Iterator[SeedSkeleton]: """ :param bounds_x: :param norm_rsde: :param denorm_rsde: :param eval_disc: :param eval_bounds_disc: :param ome: :param max_props: :return: :raise: BudgetConstraintError """ assert bounds_x[0] < bounds_x[1] assert 0 <= max_props while True: for _ in range(max_props): proposal = sample_raw_seed(bounds_x, norm_rsde, denorm_rsde, ome) success, log_weight, proposal = flip_poisson_coin( proposal, norm_rsde, denorm_rsde, eval_disc, eval_bounds_disc, ome) if success and np.log(ome.uniform()) < log_weight: break else: raise BudgetConstraintError('None of the proposals were accepted.') yield proposal
def sample_anchor( fin_t: float, init_x: float, fin_x: float, lb_x: float = -np.inf, ub_x: float = np.inf, ome: np.random.Generator = np.random.default_rng() ) -> (bool, Tuple[float, float], Tuple[float, float]): """ :param fin_t: :param init_x: :param fin_x: :param lb_x: :param ub_x :param ome: :return: """ assert 0 < fin_t # sample partition layer_ix = sample_layer(fin_t, init_x, fin_x, lb_x, ub_x, ome) in_layer, ou_layer = [ compute_edges(layer_ix - i, fin_t, init_x, fin_x, lb_x, ub_x) for i in (0, 1) ] lo_sector, hi_sector = (in_layer[0], ou_layer[0]), (ou_layer[1], in_layer[1]) # assess sector probabilities p_lo = hitting.cdf_brownbr_min(lo_sector[1], fin_t, init_x, fin_x, lo_sector[0]) p_hi = hitting.cdf_brownbr_min(-hi_sector[0], fin_t, -fin_x, -init_x, -hi_sector[1]) # simulate anchor sector return ome.uniform() < p_lo / (p_lo + p_hi), lo_sector, hi_sector
def sample_ppp( intensity: float, bound: Tuple[float, ...] = (1, 1), ome: np.random.Generator = np.random.default_rng() ) -> np.ndarray: """Sample from a Poisson point process on the plane. :param intensity: :param bound: :param ome: :return: >>> # generate fixture >>> gen = int(1e4) >>> bound = np.random.lognormal(size=2) >>> # draw iid samples >>> y = sample_ppp(gen, tuple(bound)) >>> # test sampling distribution >>> from scipy.stats import kstest, uniform >>> alpha = 1e-2 >>> sample = (y / bound).flatten() >>> dist = uniform() >>> alpha < kstest(sample, dist.cdf)[1] True """ assert 0 < intensity assert 0 < min(bound) n_points = ome.poisson(intensity * np.prod(bound)) locations = ome.uniform(np.zeros(len(bound)), bound, (n_points, len(bound))) return locations[np.argsort(locations[:, 0])]
def warp_spectrograms( specs: np.ndarray, num_landmarks: int, max_warp_time: int, max_warp_freq: int, rng: np.random.Generator, ) -> np.ndarray: """MAKEDOC: what is warp_spectrograms doing?""" logg = logging.getLogger(f"c.{__name__}.warp_spectrograms") logg.setLevel("INFO") logg.debug("Start warp_spectrograms") # extract info on data and spectrogram shapes num_samples = specs.shape[0] spec_dim = specs.shape[1:3] logg.debug(f"num_samples {num_samples} spec_dim {spec_dim}") # the shape of the landmark for one dimension land_shape = num_samples, num_landmarks # the source point has to be at least max_warp_* from the border bounds_time = (max_warp_time, spec_dim[0] - max_warp_time) bounds_freq = (max_warp_freq, spec_dim[1] - max_warp_freq) # generate (num_sample, num_landmarks) time/freq positions source_land_t = rng.uniform(*bounds_time, size=land_shape) source_land_f = rng.uniform(*bounds_freq, size=land_shape) source_landmarks = np.dstack((source_land_t, source_land_f)) logg.debug(f"land_t.shape: {source_land_t.shape}") logg.debug(f"source_landmarks.shape: {source_landmarks.shape}") # generate the deltas, how much to shift each point delta_t = rng.uniform(-max_warp_time, max_warp_time, size=land_shape) delta_f = rng.uniform(-max_warp_freq, max_warp_freq, size=land_shape) dest_land_t = source_land_t + delta_t dest_land_f = source_land_f + delta_f dest_landmarks = np.dstack((dest_land_t, dest_land_f)) logg.debug(f"dest_landmarks.shape: {dest_landmarks.shape}") # data_specs = data_specs.astype("float32") # source_landmarks = source_landmarks.astype("float32") # dest_landmarks = dest_landmarks.astype("float32") # data_warped, _ = sparse_image_warp( # data_specs, source_landmarks, dest_landmarks, num_boundary_points=2 # ) # logg.debug(f"data_warped.shape: {data_warped.shape}") data_specs = tf.convert_to_tensor(specs, dtype=tf.float32) source_landmarks = tf.convert_to_tensor(source_landmarks, dtype=tf.float32) dest_landmarks = tf.convert_to_tensor(dest_landmarks, dtype=tf.float32) siw = tf.function(sparse_image_warp, experimental_relax_shapes=True) # https://www.tensorflow.org/guide/function#controlling_retracing # siw = tf.function( # sparse_image_warp, # experimental_relax_shapes=True, # input_signature=( # tf.TensorSpec(shape=[None], dtype=tf.float32), # tf.TensorSpec(shape=[None], dtype=tf.float32), # tf.TensorSpec(shape=[None], dtype=tf.float32), # tf.TensorSpec(shape=[None], dtype=tf.float32), # ), # ) data_warped, _ = siw(data_specs, source_landmarks, dest_landmarks, num_boundary_points=2) logg.debug(f"data_warped.shape: {data_warped.shape}") return data_warped