def fixture_measure_params( measure_name: str, input_dim: int, cov_diagonal: bool, rng: np.random.Generator, ) -> Dict: params = {"name": measure_name} if measure_name == "gauss": # set up mean and covariance if input_dim == 1: mean = rng.normal(0, 1) cov = rng.uniform(0.5, 1.5) else: mean = rng.normal(0, 1, size=(input_dim, 1)) if cov_diagonal: cov = np.diag(rng.uniform(0.5, 1.5, size=(input_dim,))) else: mat = rng.normal(0, 1, size=(input_dim, input_dim)) cov = mat @ mat.T params["mean"] = mean params["cov"] = cov elif measure_name == "lebesgue": # set up bounds rv = rng.uniform(0, 1, size=(input_dim, 2)) domain = (rv[:, 0] - 1.0, rv[:, 1] + 1.0) params["domain"] = domain params["normalized"] = True return params
def gen_params(params_initial: npt.NDArray, rng: np.random.Generator, dcharge=0.01, dlogsig=0.1, dlogeps=0.1): """Given an initial set of nonbonded parameters, generate random final parameters and return the concatenation of the initial and final parameters""" num_atoms, _ = params_initial.shape charge_init, sig_init, eps_init = params_initial[:].T charge_final = charge_init + rng.normal(0, dcharge, size=(num_atoms, )) # perturb LJ parameters in log space to avoid negative result sig_final = np.where( sig_init, np.exp(np.log(sig_init) + rng.normal(0, dlogsig, size=(num_atoms, ))), 0) eps_final = np.where( eps_init, np.exp(np.log(eps_init) + rng.normal(0, dlogeps, size=(num_atoms, ))), 0) params_final = np.stack((charge_final, sig_final, eps_final), axis=1) return np.concatenate((params_initial, params_final))
def fixture_args0( request, random_process: randprocs.RandomProcess, rng: np.random.Generator, ) -> np.ndarray: """Input(s) to a random process.""" return rng.normal(size=(request.param, random_process.input_dim))
def make_env_attributes_task( env: gym.Env, task_params: Union[List[str], Dict[str, Any]], seed: int = None, rng: np.random.Generator = None, noise_std: float = 0.2, ) -> Dict[str, Any]: task: Dict[str, Any] = {} rng: np.random.Generator = rng or np.random.default_rng(seed) if isinstance(task_params, list): task_params = {param: getattr(env.unwrapped, param) for param in task_params} for attribute, default_value in task_params.items(): new_value = default_value if isinstance(default_value, (int, float, np.ndarray)): new_value *= rng.normal(1.0, noise_std) # Clip the value to be in the [0.1*default, 10*default] range. new_value = max(0.1 * default_value, new_value) new_value = min(10 * default_value, new_value) if isinstance(default_value, int): new_value = round(new_value) elif isinstance(default_value, bool): new_value = rng.choice([True, False]) else: raise NotImplementedError( f"TODO: Don't yet know how to sample a random value for " f"attribute {attribute} with default value {default_value} of type " f" {type(default_value)}." ) task[attribute] = new_value return task
def test_pickle_frame_dyncodec(tmp_path, rng: np.random.Generator): file = tmp_path / 'data.bpk' df = pd.DataFrame({ 'key': np.arange(0, 5000, dtype='i4'), 'count': rng.integers(0, 1000, 5000), 'score': rng.normal(10, 2, 5000) }) def codec(buf): obj = memoryview(buf).obj if isinstance(obj, np.ndarray) and obj.dtype == np.float64: print('compacting double array') return codecs.Chain([numcodecs.AsType('f4', 'f8'), codecs.Blosc('zstd', 9)]) else: return codecs.Blosc('zstd', 9) with BinPickler.compressed(file, codec) as w: w.dump(df) with BinPickleFile(file) as bpf: assert not bpf.find_errors() df2 = bpf.load() print(df2) assert all(df2.columns == df.columns) assert all(df2['key'] == df['key']) assert all(df2['count'] == df['count']) assert all(df2['score'].astype('f4') == df['score'].astype('f4')) del df2
def fixture_x1(rng: np.random.Generator, x1_shape: Optional[ShapeType]) -> Optional[np.ndarray]: """Random data from a standard normal distribution.""" if x1_shape is None: return None return rng.normal(0, 1, size=x1_shape)
def levy_flight(start: np.ndarray, alpha: float, param_lambda: float, gen: np.random.Generator) -> np.ndarray: """ Perform a levy flight step. Arguments: start {numpy.ndarray} -- The cuckoo's start position alpha {float} -- The step size param_lambda {float} -- lambda parameter of the levy distribution gen {Generator} -- the generator used to generate pseudo random numbers Returns: numpy.ndarray -- The new position """ dividend = gamma(1 + param_lambda) * np.sin(np.pi * param_lambda / 2) divisor = gamma((1 + param_lambda) / 2) * param_lambda * np.power(2, (param_lambda - 1) / 2) sigma1 = np.power(dividend / divisor, 1 / param_lambda) sigma2 = 1 u_vec = gen.normal(0, sigma1, size=2) v_vec = gen.normal(0, sigma2, size=2) step_length = u_vec / np.power(np.fabs(v_vec), 1 / param_lambda) return start + alpha * step_length
def fixture_x0( rng: np.random.Generator, input_shapes: Tuple[ShapeType, Optional[ShapeType]] ) -> np.ndarray: """The first argument to the covariance function drawn from a standard normal distribution.""" x0_shape, _ = input_shapes return rng.normal(0, 1, size=x0_shape)
def multivariate_normal(shape: ShapeLike, precompute_cov_cholesky: bool, rng: np.random.Generator) -> randvars.Normal: rv = randvars.Normal( mean=rng.normal(size=shape), cov=random_spd_matrix(rng=rng, dim=shape[0]), ) if precompute_cov_cholesky: rv.precompute_cov_cholesky() return rv
def test_evaluated_random_process_is_random_variable( random_process: randprocs.RandomProcess, rng: np.random.Generator): """Test whether evaluating a random process returns a random variable.""" n_inputs_args0 = 10 args0 = rng.normal(size=(n_inputs_args0, ) + random_process.input_shape) y0 = random_process(args0) assert isinstance(y0, randvars.RandomVariable), ( f"Output of {repr(random_process)} is not a " f"random variable.")
def matrixvariate_normal(shape: ShapeLike, precompute_cov_cholesky: bool, rng: np.random.Generator) -> randvars.Normal: rv = randvars.Normal( mean=rng.normal(size=shape), cov=linops.Kronecker( A=random_spd_matrix(dim=shape[0], rng=rng), B=random_spd_matrix(dim=shape[1], rng=rng), ), ) if precompute_cov_cholesky: rv.precompute_cov_cholesky() return rv
def infection_events(env: simpy.Environment, infected: Agent, rng: np.random.Generator): print(f'@t={env.now} - {infected}->{State.INFECTED.name}') infected.state = State.INFECTED yield env.timeout(delay=rng.normal(loc=4.6, scale=0.3)) print(f'@t={env.now} - {infected}->{State.INFECTIOUS.name}') infected.state = State.INFECTIOUS if rng.uniform() < p_asymptomatic: # Asymptomatic yield env.timeout(delay=rng.normal(loc=6.5, scale=0.4)) print(f'@t={env.now} - {infected}->{State.REMOVED.name}') infected.state = State.REMOVED else: # Symptomatic yield env.timeout(delay=0.5) print(f'@t={env.now} - {infected}->{State.SYMPTOMATIC_INFECTIOUS.name}') infected.state = State.SYMPTOMATIC_INFECTIOUS yield env.timeout(delay=rng.normal(loc=6.0, scale=0.4)) print(f'@t={env.now} - {infected}->{State.REMOVED.name}') infected.state = State.REMOVED
def fixture_x1( rng: np.random.Generator, input_shapes: Tuple[ShapeType, Optional[ShapeType]], ) -> Optional[np.ndarray]: """The second argument to the covariance function drawn from a standard normal distribution.""" _, x1_shape = input_shapes if x1_shape is None: return None return rng.normal(0, 1, size=x1_shape)
def __init__(self, n_features: int, n_dim: int, rbf_ls: float = 1., rng: np.random.Generator = None): if rng is None: rng = np.random.default_rng() self.__n_features = n_features self.__n_dim = n_dim self.__rbf_ls = rbf_ls self.__rng = rng self.__weight = rng.normal(size=(n_dim, n_features)) / rbf_ls self.__offset = rng.uniform(low=0, high=2 * np.pi, size=n_features) return
def test_induced_solution_belief(rng: np.random.Generator): """Test whether a consistent belief over the solution is inferred from a belief over the inverse.""" n = 5 A = randvars.Constant(random_spd_matrix(dim=n, rng=rng)) Ainv = randvars.Normal( mean=linops.Scaling(factors=1 / np.diag(A.mean)), cov=linops.SymmetricKronecker(linops.Identity(n)), ) b = randvars.Constant(rng.normal(size=(n, 1))) prior = LinearSystemBelief(A=A, Ainv=Ainv, x=None, b=b) x_infer = Ainv @ b np.testing.assert_allclose(prior.x.mean, x_infer.mean) np.testing.assert_allclose(prior.x.cov.todense(), x_infer.cov.todense())
def test_rmatvec( linop: pn.linops.LinearOperator, matrix: np.ndarray, rng: np.random.Generator, ): vec = rng.normal(size=linop.shape[0]) linop_matvec = vec @ linop matrix_matvec = vec @ matrix assert linop_matvec.ndim == 1 assert linop_matvec.shape == matrix_matvec.shape assert linop_matvec.dtype == matrix_matvec.dtype np.testing.assert_allclose(linop_matvec, matrix_matvec)
def test_dump_frame(tmp_path, rng: np.random.Generator): "Pickle a Pandas data frame" file = tmp_path / 'data.bpk' df = pd.DataFrame({ 'key': np.arange(0, 5000), 'count': rng.integers(0, 1000, 5000), 'score': rng.normal(10, 2, 5000) }) dump(df, file) df2 = load(file) assert all(df2.columns == df.columns) for c in df2.columns: assert all(df2[c] == df[c])
def test_matmat( linop: pn.linops.LinearOperator, matrix: np.ndarray, rng: np.random.Generator, ncols: int, order: str, ): mat = np.asarray(rng.normal(size=(linop.shape[1], ncols)), order=order) linop_matmat = linop @ mat matrix_matmat = matrix @ mat assert linop_matmat.ndim == 2 assert linop_matmat.shape == matrix_matmat.shape assert linop_matmat.dtype == matrix_matmat.dtype np.testing.assert_allclose(linop_matmat, matrix_matmat)
def test_raise_on_indefinite_result( N: int, dtype: np.dtype, L: np.ndarray, rng: np.random.Generator, method_kwargs: Dict[str, Any], ): """Tests whether a :class:`numpy.linalg.LinAlgError` is raised if the downdate results in a singular or indefinite result.""" # The downdated matrix is positive definite if and only if p^T p < 1 for L * p = v. # Hence, the vector v' := a * v defines an invalid downdate if and only if # a >= (1 / ||p||_2). v = rng.normal(size=N).astype(dtype, copy=False) p = scipy.linalg.solve_triangular(L, v, lower=True) v *= (1.0 + 0.2) / np.linalg.norm(p, ord=2) with pytest.raises(np.linalg.LinAlgError): cholupdates.rank_1.downdate(L, v, **method_kwargs)
def test_rp_mean_cov_evaluated_matches_rv_mean_cov( random_process: randprocs.RandomProcess, rng: np.random.Generator ): """Check whether the evaluated mean and covariance function of a random process is equivalent to the mean and covariance of the evaluated random process as a random variable.""" x = rng.normal(size=(10, random_process.input_dim)) np.testing.assert_allclose( random_process(x).mean, random_process.mean(x), err_msg=f"Mean of evaluated {repr(random_process)} does not match the " f"random process mean function evaluated.", ) np.testing.assert_allclose( random_process(x).cov, random_process.covmatrix(x), err_msg=f"Covariance of evaluated {repr(random_process)} does not match the " f"random process mean function evaluated.", )
def test_call( linop: pn.linops.LinearOperator, matrix: np.ndarray, rng: np.random.Generator, shape: Tuple[Optional[int], ...], ): axis = shape.index(None) - len(shape) shape = tuple(entry if entry is not None else linop.shape[1] for entry in shape) arr = rng.normal(size=shape) linop_call = linop(arr, axis=axis) matrix_call = np.moveaxis(np.tensordot(matrix, arr, axes=(-1, axis)), 0, axis) assert linop_call.ndim == 4 assert linop_call.shape == matrix_call.shape assert linop_call.dtype == matrix_call.dtype np.testing.assert_allclose(linop_call, matrix_call)
def test_pickle_frame(tmp_path, rng: np.random.Generator, writer, direct): "Pickle a Pandas data frame" file = tmp_path / 'data.bpk' df = pd.DataFrame({ 'key': np.arange(0, 5000), 'count': rng.integers(0, 1000, 5000), 'score': rng.normal(10, 2, 5000) }) with writer(file) as w: w.dump(df) with BinPickleFile(file, direct=direct) as bpf: assert not bpf.find_errors() df2 = bpf.load() print(df2) assert all(df2.columns == df.columns) for c in df2.columns: assert all(df2[c] == df[c]) del df2
def generate_random_from_kernel(a: np.ndarray, d: int, rng: np.random.Generator) -> np.ndarray: """ Use the random number generator `rng` to draw a random `d`-dimensional subspace from the kernel of matrix `a`. If `dim(ker(a)) < d`, an exception is raised. """ evals, evecs = np.linalg.eigh(a.T @ a) # kernel is where evals are (almost) zero ker_dims = (evals < 1e-10).nonzero()[0] size_ker = len(ker_dims) if size_ker < d: raise ValueError( "Kernel dimension lower than requested subspace dimension") chosen_dims = rng.choice(ker_dims, size=d, replace=False) chosen_basis = evecs[:, chosen_dims] b = rng.normal(size=(d, d)) @ chosen_basis.T return b
def _sample_truncated_integer_gaussian(rng: np.random.Generator, loc: int, scale: int, min_val: int, max_val: int) -> int: sample = None while sample is None or not (min_val <= sample <= max_val): sample = int(rng.normal(loc=loc, scale=scale)) return sample
def test_nonbonded_interaction_group_consistency_allpairs_constant_shift( num_atoms, num_atoms_ligand, precision, rtol, atol, cutoff, beta, lamb, example_nonbonded_params, example_conf, example_box, rng: np.random.Generator, ): """Compares with reference nonbonded_v3 potential, which computes the sum of all pairwise interactions. This uses the identity U(x') - U(x) = U_AB(x') - U_AB(x) where - U is the all-pairs potential over all atoms - U_A, U_B are all-pairs potentials for interacting groups A and B, respectively - U_AB is the "interaction group" potential, i.e. the sum of pairwise interactions (a, b) where "a" is in A and "b" is in B - the transformation x -> x' does not affect U_A or U_B (e.g. a constant translation applied to each atom in one group) """ conf = example_conf[:num_atoms] params = example_nonbonded_params[:num_atoms, :] lambda_plane_idxs = rng.integers(-2, 3, size=(num_atoms, ), dtype=np.int32) lambda_offset_idxs = rng.integers(-2, 3, size=(num_atoms, ), dtype=np.int32) def ref_allpairs(conf): return prepare_reference_nonbonded( params=params, exclusion_idxs=np.array([], dtype=np.int32), scales=np.zeros((0, 2), dtype=np.float64), lambda_plane_idxs=lambda_plane_idxs, lambda_offset_idxs=lambda_offset_idxs, beta=beta, cutoff=cutoff, )(conf, params, example_box, lamb) ligand_idxs = rng.choice(num_atoms, size=(num_atoms_ligand, ), replace=False).astype(np.int32) def test_ixngroups(conf): _, _, _, u = (NonbondedInteractionGroup( ligand_idxs, lambda_plane_idxs, lambda_offset_idxs, beta, cutoff, ).unbound_impl(precision).execute(conf, params, example_box, lamb)) return u conf_prime = np.array(conf) conf_prime[ligand_idxs] += rng.normal(0, 0.01, size=(3, )) ref_delta = ref_allpairs(conf_prime) - ref_allpairs(conf) test_delta = test_ixngroups(conf_prime) - test_ixngroups(conf) np.testing.assert_allclose(ref_delta, test_delta, rtol=rtol, atol=atol)
def fixture_x0(rng: np.random.Generator, x0_shape: ShapeType) -> np.ndarray: """Random data from a standard normal distribution.""" return rng.normal(0, 1, size=x0_shape)
def fixture_x(input_dim: int, num_data: int, rng: np.random.Generator) -> np.ndarray: """Random data from a standard normal distribution.""" return rng.normal(0, 1, size=(num_data, input_dim))
def v(N: int, dtype: np.dtype, rng: np.random.Generator) -> np.ndarray: """Random vector of shape :func:`N` which defines a symmetric rank-1 update to :func:`A`""" return rng.normal(scale=10, size=N).astype(dtype, copy=False)
def constant(shape_const: ShapeLike, rng: np.random.Generator) -> randvars.Constant: return randvars.Constant(support=rng.normal(size=shape_const))
def sampling_fn(rng: np.random.Generator): return rng.normal(loc=loc, scale=scale, size=1)