def update_params(generator, ll_node, prior, n=1000000): ll_node.row_ids = list(range(n)) ll_node.scope = [0] X = sample_parametric_node(generator, n, RandomState(1234)).reshape(-1, 1) update_parametric_parameters_posterior(ll_node, X, RandomState(1234), prior) print("expected", generator.params, "found", ll_node.params) return generator, ll_node
def test_resume(self): sampler = Sampler(self.nwalkers, self.ndim, LogLikeGaussian(self.icov), LogPriorGaussian(self.icov, cutoff=self.cutoff), adaptive=True, betas=make_ladder(self.ndim, self.ntemps, Tmax=self.Tmax)) N = 10 thin_by = 2 seed = 1 # Run the chain in two parts. chain1 = sampler.chain(self.p0, RandomState(seed), thin_by) jr, sr = chain1.run(N) assert (0 <= jr).all() and (jr <= 1).all() assert (0 <= sr).all() and (sr <= 1).all() assert chain1.x.shape[0] == N jr, sr = chain1.run(N) assert (0 <= jr).all() and (jr <= 1).all() assert (0 <= sr).all() and (sr <= 1).all() assert chain1.x.shape[0] == 2 * N # Now do the same run afresh and compare the results. Given the same seed, the they should be identical. chain2 = sampler.chain(self.p0, RandomState(seed), thin_by) jr, sr = chain2.run(2 * N) assert (0 <= jr).all() and (jr <= 1).all() assert (0 <= sr).all() and (sr <= 1).all() assert chain2.x.shape[0] == 2 * N assert (chain1.x == chain2.x).all(), 'Chains don\'t match.' assert (chain1.betas == chain2.betas).all(), 'Ladders don\'t match.'
def reset_random_seed(self, epoch=0): # Initialize Random State. assert (self.config.random_seed is not None) self.rng = RandomState(self.config.random_seed + epoch) if self.config.random_seed_for_user is not None: assert isinstance(self.config.random_seed_for_user, int) self.user_rng = RandomState(self.config.random_seed_for_user + epoch)
def test_bin_to_group_indices(size=100, bins=10): bin_indices = RandomState().randint(0, bins, size=size) mask = RandomState().randint(0, 2, size=size) > 0.5 group_indices = bin_to_group_indices(bin_indices, mask=mask) assert numpy.sum([len(group) for group in group_indices]) == numpy.sum(mask) a = numpy.sort(numpy.concatenate(group_indices)) b = numpy.where(mask > 0.5)[0] assert numpy.all(a == b), 'group indices are computed wrongly'
def test_generate_positions(self): ps = ParticleSource() ps._generator = RandomState(123) p = ps.generate_num_of_particles(100) assert_ae = p.xp.testing.assert_array_equal assert_ae( p.positions, Box().generate_uniform_random_posititons(RandomState(123), 100))
def test_groups_matrix(size=1000, bins=4): bin_indices = RandomState().randint(0, bins, size=size) mask = RandomState().randint(0, 2, size=size) > 0.5 n_signal_events = numpy.sum(mask) group_indices = bin_to_group_indices(bin_indices, mask=mask) assert numpy.sum([len(group) for group in group_indices]) == n_signal_events group_matrix = group_indices_to_groups_matrix(group_indices, n_events=size) assert group_matrix.sum() == n_signal_events for event_id, (is_signal, bin) in enumerate(zip(mask, bin_indices)): assert group_matrix[bin, event_id] == is_signal
def gad_function(common_data, tasks_data=None): inst, slv, meas, info = common_data results = [] bits = decode_bits(info) [dim_type, bd_type] = bits[:2] bd_base = to_number(bits[2:8], 6) mask_len = to_number(bits[8:24], 16) bd_mask = bits[24:mask_len + 24] kwargs = {} if inst.cnf.has_atmosts and inst.cnf.atmosts(): kwargs['atmosts'] = inst.cnf.atmosts() backdoor = inst.get_backdoor2(bd_type, bd_base, bd_mask) bases = backdoor.get_bases() extra_assumptions = [] if inst.has_intervals(): state = RandomState() supbs_vars = inst.supbs.variables() output_vars = inst.output_set.variables() supbs_values = state.randint(0, bd_base, size=len(supbs_vars)) supbs_assumptions = [ x if supbs_values[i] else -x for i, x in enumerate(supbs_vars) ] _, _, solution = slv.propagate(inst, supbs_assumptions, **kwargs) for lit in solution: if abs(lit) in output_vars: extra_assumptions.append(lit) with slv.prototype(inst, **kwargs) as solver: for task_data in tasks_data: st_timestamp = now() task_i, task_value = task_data if dim_type == NUMBERS: state = RandomState(seed=task_value) values = state.randint(0, bd_base, size=len(backdoor)) # todo: apply backdoor.get_masks() to values else: values = decimal_to_base(task_value, bases) # todo: map values using backdoor.get_mappers() assumptions = inst.get_assumptions(backdoor, values) status, stats, literals = solver.propagate(assumptions) time, value = stats['time'], meas.get(stats) status = not (status and len(literals) < inst.max_literal()) results.append( (task_i, getpid(), value, time, status, now() - st_timestamp)) return results
def test_ks2samp_fast(size=1000): y1 = RandomState().uniform(size=size) y2 = y1[RandomState().uniform(size=size) > 0.5] a = ks_2samp(y1, y2)[0] prep_data, prep_weights, prep_F = prepare_distribution(y1, numpy.ones(len(y1))) b = _ks_2samp_fast(prep_data, y2, prep_weights, numpy.ones(len(y2)), cdf1=prep_F) c = _ks_2samp_fast(prep_data, y2, prep_weights, numpy.ones(len(y2)), cdf1=prep_F) d = ks_2samp_weighted(y1, y2, numpy.ones(len(y1)) / 3, numpy.ones(len(y2)) / 4) assert numpy.allclose(a, b, rtol=1e-2, atol=1e-3) assert numpy.allclose(b, c) assert numpy.allclose(b, d) print('ks2samp is ok')
def sample(): spn = create_SPN() import numpy as np from numpy.random.mtrand import RandomState from spn.algorithms.Sampling import sample_instances print( sample_instances(spn, np.array([np.nan, 0, 0] * 5).reshape(-1, 3), RandomState(123))) print( sample_instances(spn, np.array([np.nan, np.nan, np.nan] * 5).reshape(-1, 3), RandomState(123)))
def __init__(self, n: int, k: int, seed: int = None, transform=None, noisiness: float = 0, noise_seed: int = None): random_instance = RandomState(seed=seed) if seed is not None else RandomState() super().__init__( weight_array=self.normal_weights(n=n, k=k, random_instance=random_instance), transform=transform or LTFArray.transform_atf, combiner=LTFArray.combiner_xor, sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights( n=n, sigma_weight=1, noisiness=noisiness, ), random_instance=RandomState(seed=noise_seed) if noise_seed else RandomState() ) self.n = n self.k = k
def __init__( self, n_stages=sys. maxint, # Maximum number of iterations on the training set latent_size=10, # Size of the latent variable latent_covariance_matrix_regularizer=0, input_covariance_matrix_regularizer=0, latent_transition_matrix_regularizer=0, input_transition_matrix_regularizer=0, observation_variance=-1., seed=1827, ): self.stage = 0 self.n_stages = n_stages self.latent_size = latent_size self.seed = seed self.rng = RandomState(seed) self.latent_covariance_matrix_regularizer = float( latent_covariance_matrix_regularizer) self.input_covariance_matrix_regularizer = float( input_covariance_matrix_regularizer) self.latent_transition_matrix_regularizer = float( latent_transition_matrix_regularizer) self.input_transition_matrix_regularizer = float( input_transition_matrix_regularizer) self.observation_variance = float(observation_variance)
def __init__( self, n_stages=sys. maxint, # Maximum number of iterations on the training set latent_size=10, # Size of the latent variable observation_variance=0.001, # input_covariance_matrix_regularizer = 0., latent_transition_matrix_regularizer=0., emission_matrix_regularizer=0., gamma_prior_alpha=1., gamma_prior_beta=0.0001, gamma_change_tolerance=0.0001, output_laplace_probs=True, max_Esteps=1, max_test_Esteps=25, seed=1827, verbose=False): self.stage = 0 self.n_stages = n_stages self.latent_size = latent_size self.seed = seed self.max_Esteps = max_Esteps self.max_test_Esteps = max_test_Esteps self.last_Esteps = max_Esteps self.rng = RandomState(seed) self.observation_variance = float(observation_variance) self.latent_transition_matrix_regularizer = float( latent_transition_matrix_regularizer) self.emission_matrix_regularizer = float(emission_matrix_regularizer) self.gamma_prior_alpha = float(gamma_prior_alpha) self.gamma_prior_beta = float(gamma_prior_beta) self.gamma_change_tolerance = float(gamma_change_tolerance) self.output_laplace_probs = output_laplace_probs self.verbose = verbose
def forget(self): d_y = self.input_size d_z = self.latent_size self.rng = RandomState(self.seed) rng = self.rng self.stage = 0 # Model will be untrained after initialization self.mu_zero = rng.randn(d_z) / d_z self.V_zero = diag(ones(d_z)) self.A = rng.randn(d_z, d_z) / d_z self.C = rng.randn(d_y, d_z) / d_z if self.observation_variance >= 0: self.Sigma = diag(self.observation_variance * ones(d_y)) else: self.Sigma = diag(ones(d_y)) self.E = diag(ones(d_z)) # Some useful temporary computation variables (mainly for multivariate_norm_log_pdf()) self.vec_d_y = zeros((d_y)) self.vec_d_y2 = zeros((d_y)) self.pcov = zeros((d_y), dtype='i') self.Lcov = zeros((d_y, d_y)) self.Ucov = zeros((d_y, d_y)) self.covf = zeros((d_y, d_y), order='fortran') self.colvecf = zeros((d_y, 1), order='fortran') self.pivotscov = zeros((d_y), dtype='i')
def initialize(self, specification: Specification): self.specification = specification self.seed = specification['seed'] self.num_calls = specification["num_calls"] logging.getLogger(self.get_logger_name()).info("Doing work!") self.rs = RandomState(self.seed) self.i = 0
def __init__(self, subject_column, *args, **kwargs): super(Futrell2018Encoding.ManySubjectExtrapolationCeiling, self).__init__(subject_column, *args, **kwargs) self._rng = RandomState(0) self._num_subsamples = 5 self.holdout_ceiling = Futrell2018Encoding.SplitHalfPoolCeiling( subject_column=subject_column)
def __init__(self, n: Union[int, ndarray, Iterable[int]], p: Union[float, ndarray, Iterable[float]], seed=None): self.n = n self.p = p self.rs = RandomState(seed=seed)
def __init__(self, mean: Union[float, ndarray, Iterable[float]] = 0.0, std: Union[float, ndarray, Iterable[float]] = 1.0, seed=None): self.mean = mean self.std = std self.rs = RandomState(seed=seed)
def __init__(self, mu: Union[float, ndarray, Iterable[float]], kappa: Union[float, ndarray, Iterable[float]], seed=None): self.mu = mu self.kappa = kappa self.rs = RandomState(seed=seed)
def __init__(self, n: int, pvals: Union[float, ndarray, Iterable[float]], seed=None): self.n = n self.pvals = pvals self.rs = RandomState(seed=seed)
def _sample(self, n=1, random_state=RandomState(123)): placeholder = np.repeat(np.array(self._condition), n, axis=0) s = sample_instances(self._spn, placeholder, random_state) indices = [self._initial_names_to_index[name] for name in self.names] result = s[:, indices] result = [self._numeric_to_names(l) for l in result.tolist()] return result
def test_init_params(self): """Tests that GaussianMixture params are set""" n_components = 2 covariance_type = 'diag' tol = 1e-4 reg_covar = 1e-5 max_iter = 3 init_params = 'random' weights_init = np.array([0.4, 0.6]) means_init = np.array([[0, 0], [2, 3]]) precisions_init = 'todo' random_state = RandomState(666) gm = GaussianMixture(n_components=n_components, covariance_type=covariance_type, tol=tol, reg_covar=reg_covar, max_iter=max_iter, init_params=init_params, weights_init=weights_init, means_init=means_init, precisions_init=precisions_init, random_state=random_state) expected = (n_components, covariance_type, tol, reg_covar, max_iter, init_params, weights_init, means_init, precisions_init, random_state) real = (gm.n_components, gm.covariance_type, gm.tol, gm.reg_covar, gm.max_iter, gm.init_params, gm.weights_init, gm.means_init, gm.precisions_init, gm.random_state) self.assertEqual(expected, real)
def __init__(self, n: int, ks: List[int], seed: int, noisiness: float = 0) -> None: self.seed = seed self.prng = RandomState(seed) self.n = n self.ks = ks self.k = ks[0] self.depth = len(ks) - 1 self.chains = sum([k * (2 ** i) for i, k in enumerate(ks)]) self.xors = sum([k * 2 ** i if k > 1 else 0 for i, k in enumerate(ks)]) self.interposings = 2 ** (self.depth + 1) - 2 self.noisiness = noisiness self.layers = \ [ [ XORArbiterPUF( n=n + 1 if i > 0 else n, k=ks[i], seed=self.prng.randint(0, 2 ** 32), noisiness=noisiness, noise_seed=self.prng.randint(0, 2 ** 32) ) for _ in range(2 ** i) ] for i in range(self.depth + 1) ] self.interpose_pos = n // 2
def analyze(self): self.progress_logger.debug('Analyzing result') accuracy = -1 if not self.model else 1.0 - tools.approx_dist( instance1=self.simulation, instance2=self.model, num=10 ** 4, random_instance=RandomState(seed=self.parameters.seed), ) return Result( name=self.NAME, n=self.parameters.simulation.n, first_k=self.parameters.simulation.k, num_chains=self.parameters.simulation.chains, num_xors=self.parameters.simulation.xors, num_interposings=self.parameters.simulation.interposings, experiment_id=self.id, pid=getpid(), measured_time=self.measured_time, iterations=-1 if not self.model else self.learner.nn.n_iter_, accuracy=accuracy, accuracy_relative=accuracy / self.reliability, stability=self.stability, reliability=self.reliability, loss_curve=[-1] if not self.model else [round(loss, 3) for loss in self.learner.nn.loss_curve_], accuracy_curve=[-1] if not self.model else [round(accuracy, 3) for accuracy in self.learner.accuracy_curve], max_memory=self.max_memory(), )
def __init__(self, subject_column, *args, **kwargs): super(_PereiraBenchmark.PereiraExtrapolationCeiling, self).__init__(subject_column, *args, **kwargs) self._num_subsamples = 10 self.holdout_ceiling = _PereiraBenchmark.PereiraHoldoutSubjectCeiling( subject_column=subject_column) self._rng = RandomState(0)
def get_constants_for_inits(name, seed=17): # (numerator: [x, x.pow(1), x.pow(2), x.pow(3), x.pow(4, x.pow(5)], denominator: (x, x.pow(2), center) if name == "pade_sigmoid_3": return ((1 / 2, 1 / 4, 1 / 20, 1 / 240), (0., 1 / 10), (0, )) elif name == "pade_sigmoid_5": return ((1 / 2, 1 / 4, 17 / 336, 1 / 224, 0, -1 / 40320), (0., 1 / 10), (0, )) elif name == "pade_softplus": return ((np.log(2), 1 / 2, (15 + 8 * np.log(2)) / 120, 1 / 30, 1 / 320), (0.01, 1 / 15), (0, )) elif name == "pade_optimized_avg": return [(0.15775171, 0.74704865, 0.82560348, 1.61369449, 0.6371632, 0.10474671), (0.38940287, 2.19787666, 0.30977883, 0.15976778), (0., )] elif name == "pade_optimized_leakyrelu": return [(3.35583603e-02, 5.05000375e-01, 1.65343934e+00, 2.01001052e+00, 9.31901999e-01, 1.52424124e-01), (3.30847488e-06, 3.98021568e+00, 5.12471206e-07, 3.01830109e-01), (0, )] elif name == "pade_optimized_leakyrelu2": return [(0.1494, 0.8779, 1.8259, 2.4658, 1.6976, 0.4414), (0.0878, 3.3983, 0.0055, 0.3488), (0, )] elif name == "pade_random": rng = RandomState(seed) return (rng.standard_normal(5), rng.standard_normal(4), (0, )) elif name == "pade_optmized": return [ (0.0034586860882628158, -0.41459839329894876, 4.562452712166459, -16.314813244428276, 18.091669531543833, 0.23550876048241304), (3.0849791873233383e-28, 3.2072596311394997e-27, 1.0781647589819156e-28, 11.493453196161223), (0, ) ]
def __init__(self, lb: Union[float, ndarray, Iterable[float]], ub: Union[float, ndarray, Iterable[float]], seed=None): self.lb = lb self.ub = ub self.rs = RandomState(seed=seed)
def __init__(self, mu: Union[float, ndarray, Iterable[float]], cov: Union[list, ndarray], seed=None): self.mu = mu self.cov = cov self.rs = RandomState(seed=seed)
def __init__(self, mu: Union[float, ndarray, Iterable[float]], lam: Union[float, ndarray, Iterable[float]], seed=None): self.mu = mu self.lam = lam self.rs = RandomState(seed=seed)
def gad_function(common_data, tasks_data=None): inst, slv, meas, info = common_data results = [] bits = decode_bits(info) [dim_type, bd_type] = bits[:2] bd_base = to_number(bits[2:8], 6) mask_len = to_number(bits[8:24], 16) bd_mask = bits[24:mask_len + 24] backdoor = inst.get_backdoor2(bd_type, bd_base, bd_mask) bases = backdoor.get_bases() for task_data in tasks_data: st_timestamp = now() task_i, task_value = task_data if dim_type == NUMBERS: state = RandomState(seed=task_value) values = state.randint(0, bd_base, size=len(backdoor)) # todo: apply backdoor.get_masks() to values else: values = decimal_to_base(task_value, bases) # todo: map values using backdoor.get_mappers() kwargs = {} if inst.cnf.has_atmosts and inst.cnf.atmosts(): kwargs['atmosts'] = inst.cnf.atmosts() assumptions = inst.get_assumptions(backdoor, values) status, stats, _ = slv.solve(inst.clauses(), assumptions, **kwargs) time, value = stats['time'], meas.get(stats) results.append( (task_i, getpid(), value, time, status, now() - st_timestamp)) return results
def SplitData(X_full, Y_full, hour_full, dayofweek_full, train_prop=0.7, valid_prop=0.2, test_prop=0.1): n = Y_full.shape[0] indices = np.arange(n) RS = RandomState(1024) RS.shuffle(indices) sep_1 = int(float(n) * train_prop) sep_2 = int(float(n) * (train_prop + valid_prop)) print('train : valid : test = ', train_prop, valid_prop, test_prop) train_indices = indices[:sep_1] valid_indices = indices[sep_1:sep_2] test_indices = indices[sep_2:] X_train = X_full[train_indices] X_valid = X_full[valid_indices] X_test = X_full[test_indices] Y_train = Y_full[train_indices] Y_valid = Y_full[valid_indices] Y_test = Y_full[test_indices] #hour_train = hour_full[train_indices] # hour_valid = hour_full[valid_indices] #hour_test = hour_full[test_indices] #dayofweek_train = dayofweek_full[train_indices] #dayofweek_valid = dayofweek_full[valid_indices] # dayofweek_test = dayofweek_full[test_indices] return X_train, X_valid, X_test, \ Y_train, Y_valid, Y_test