def test_call_within_randomstate(self):
     # Check that custom RandomState does not call into global state
     m = Generator(MT19937())  # mt19937.RandomState()
     res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
     for i in range(3):
         mt19937.bit_generator.seed(i)
         m.bit_generator.seed(4321)
         # If m.state is not honored, the result will change
         assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
Beispiel #2
0
 def setup_class(cls):
     # Overridden in test classes. Place holder to silence IDE noise
     cls.bit_generator = PCG64
     cls.advance = None
     cls.seed = [12345]
     cls.rg = Generator(cls.bit_generator(*cls.seed))
     cls.initial_state = cls.rg.bit_generator.state
     cls.seed_vector_bits = 64
     cls._extra_setup()
Beispiel #3
0
    def test_pickle(self):
        import pickle

        bit_generator = self.bit_generator(*self.data1["seed"])
        state = bit_generator.state
        bitgen_pkl = pickle.dumps(bit_generator)
        reloaded = pickle.loads(bitgen_pkl)
        reloaded_state = reloaded.state
        assert_array_equal(
            Generator(bit_generator).standard_normal(1000),
            Generator(reloaded).standard_normal(1000),
        )
        assert bit_generator is not reloaded
        assert_state_equal(reloaded_state, state)

        ss = SeedSequence(100)
        aa = pickle.loads(pickle.dumps(ss))
        assert_equal(ss.state, aa.state)
Beispiel #4
0
def _retry(minimizer):
    sg = SeedSequence()
    rgs = [Generator(MT19937(s)) for s in sg.spawn(minimizer.workers)]
    procs = [
        Process(target=_retry_loop, args=(pid, rgs, minimizer))
        for pid in range(minimizer.workers)
    ]
    [p.start() for p in procs]
    return procs
Beispiel #5
0
 def setup_class(cls):
     cls.bit_generator = MT19937
     cls.advance = None
     cls.seed = [2**21 + 2**16 + 2**5 + 1]
     cls.rg = Generator(cls.bit_generator(*cls.seed))
     cls.initial_state = cls.rg.bit_generator.state
     cls.seed_vector_bits = 32
     cls._extra_setup()
     cls.seed_error = ValueError
Beispiel #6
0
    def gen_dd_samples(self,
                       replications,
                       n_samples,
                       dimensions,
                       scramble=True):
        """
        Generate r nxd Lattice samples

        Args:
            replications (int): Number of nxd matrices to generate (sample.size()[0])
            n_samples (int): Number of observations (sample.size()[1])
            dimensions (int): Number of dimensions (sample.size()[2])
            scramble (bool): If true, random numbers are in unit cube, otherwise they are non-negative integers

        Returns:
            replications x n_samples x dimensions (numpy array)
        """
        m = log2(n_samples)
        if m % 1 != 0:
            raise DistributionGenerationError("n_samples must be a power of 2")
        m = int(m)
        r = int(replications)
        d = int(dimensions)
        if not hasattr(self,
                       'lattice_rng'):  # initialize lattice rng and shifts
            self.d = d
            self.r = r
            self.rng = Generator(PCG64(self.rng_seed))
            self.lattice_rng = LatticeSeq(s=self.d)
            self.shifts = self.rng.uniform(0, 1, (self.r, self.d))
        else:
            if d != self.d or r != self.r:
                warnings.warn(
                    '''
                    Using dimensions = %d and replications = %d
                    as previously set for this generator.''' %
                    (self.d, self.r), DistributionGenerationWarnings)
        if self.n_min == 0:
            # generate first 2^m points
            x = vstack([self.lattice_rng.calc_block(i) for i in range(m + 1)])
            self.n_min = 2**m
        elif n_samples != self.n_min:
            raise DistributionGenerationError('''
                This Lattice generator has returned a total of %d samples.
                n_samples is expected to be %d
                ''' % (int(self.n_min), int(self.n_min)))
        else:
            # generate self.n_min more samples
            x = self.lattice_rng.calc_block(m + 1)
            self.n_min = 2**(m + 1)
        if scramble:
            x = array([(x + shift_r) % 1
                       for shift_r in self.shifts])  # random shift
        else:
            x = repeat(x[None, :, :], self.r,
                       axis=0)  # duplicate unshifted samples
        return x
Beispiel #7
0
    def __init__(self, H, graph_shape=(4, 4), allow_cuda=False):
        """
        :param H: A Hamiltonian designed for spacetime graphs.
        :param graph_shape: The dimension of spacetime grpahs to be used in the simulator.
        :param allow_cuda: Can we do computations on the GPU? Currently ignored (2021-06-03).
        """

        super(RejectionSimulator, self).__init__(H, graph_shape, allow_cuda)
        self.rng = Generator(PCG64())
Beispiel #8
0
 def setup(self, bitgen):
     if bitgen == 'numpy':
         self.rg = np.random.RandomState()
     else:
         self.rg = Generator(getattr(np.random, bitgen)())
     self.rg.random()
     self.int32info = np.iinfo(np.int32)
     self.uint32info = np.iinfo(np.uint32)
     self.uint64info = np.iinfo(np.uint64)
 def test_shuffle_mixed_dimension(self):
     # Test for trac ticket #2074
     for t in [[1, 2, 3, None], [(1, 1), (2, 2), (3, 3), None],
               [1, (2, 2), (3, 3), None], [(1, 1), 2, 3, None]]:
         mt19937 = Generator(MT19937(12345))
         shuffled = np.array(t, dtype=object)
         mt19937.shuffle(shuffled)
         expected = np.array([t[2], t[0], t[3], t[1]], dtype=object)
         assert_array_equal(np.array(shuffled, dtype=object), expected)
 def test_shuffle_mixed_dimension(self):
     # Test for trac ticket #2074
     for t in [[1, 2, 3, None],
               [(1, 1), (2, 2), (3, 3), None],
               [1, (2, 2), (3, 3), None],
               [(1, 1), 2, 3, None]]:
         mt19937 = Generator(MT19937(12345))
         shuffled = list(t)
         mt19937.shuffle(shuffled)
         assert_array_equal(shuffled, [t[2], t[0], t[3], t[1]])
Beispiel #11
0
def _make_irregular_tripole_grid_data(shape: Tuple[int, int], seed: int) -> np.ndarray:
    rng = Generator(PCG64(seed))
    # avoid large-amplitude variation, ensure positive values, mean of 1
    grid_data = 0.9 + 0.2 * rng.random(shape)
    assert np.all(grid_data > 0)
    # make northern edge grid data fold onto itself
    nx = shape[-1]
    half_northern_edge = grid_data[-1, : (nx // 2)]
    grid_data[-1, (nx // 2) :] = half_northern_edge[::-1]
    return grid_data
Beispiel #12
0
 def __init__(self, fname):
     self.vars = Variables(fname)
     self.seed = SeedSequence()
     self.rand_gen = Generator(PCG64(self.seed))
     self.vector_initial = Normalise(self.rand_gen.uniform(-1,1,3))
     self.pos_initial = np.array([0, 0, 0])
     self.time = np.full(self.vars.sample_total, self.vars.base_time)
     self.time = np.append([0], self.time)
     self.time = np.cumsum(self.time)
     self.time = np.reshape(self.time, (self.time.size, 1))
Beispiel #13
0
    def spawn_generators(n):
        """
        Spawn n random generators

        Generators are insured independent if you spawn less than 2^64 of them and you pull less than 2^64 variates for
        each generators

        :return: a list of n generators
        """
        return [Generator(SFC64(stream)) for stream in seed_seq.spawn(n)]
Beispiel #14
0
 def test_advange_large(self):
     rs = Generator(self.bit_generator(38219308213743))
     pcg = rs.bit_generator
     state = pcg.state
     initial_state = 287608843259529770491897792873167516365
     assert state["state"]["state"] == initial_state
     pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1)))
     state = pcg.state["state"]
     advanced_state = 277778083536782149546677086420637664879
     assert state["state"] == advanced_state
Beispiel #15
0
 def minimize(self,
              fun,
              bounds,
              guess=None,
              sdevs=None,
              rg=Generator(MT19937()),
              store=None):
     choice = rg.integers(0, len(self.optimizers))
     opt = self.optimizers[choice]
     return opt.minimize(fun, bounds, guess, sdevs, rg, store)
Beispiel #16
0
 def test_advange_large(self):
     rs = Generator(self.bit_generator(38219308213743))
     pcg = rs.bit_generator
     state = pcg.state["state"]
     initial_state = 287608843259529770491897792873167516365
     assert state["state"] == initial_state
     pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1)))
     state = pcg.state["state"]
     advanced_state = 135275564607035429730177404003164635391
     assert state["state"] == advanced_state
Beispiel #17
0
 def test_seed_float_array(self):
     # GH #82
     rs = Generator(self.bit_generator(*self.data1['seed']))
     assert_raises(TypeError, rs.bit_generator.seed, np.array([np.pi]))
     assert_raises(TypeError, rs.bit_generator.seed, np.array([-np.pi]))
     assert_raises(TypeError, rs.bit_generator.seed,
                   np.array([np.pi, -np.pi]))
     assert_raises(TypeError, rs.bit_generator.seed, np.array([0, np.pi]))
     assert_raises(TypeError, rs.bit_generator.seed, [np.pi])
     assert_raises(TypeError, rs.bit_generator.seed, [0, np.pi])
Beispiel #18
0
    def __init__(self, Nx, Ny, p=0.593):

        self.Nx = Nx
        self.Ny = Ny
        self.Nsites = Nx * Ny
        self.p = p
        self.trials = 0
        self.cluster_iterations = 0
        self.rng = Generator(PCG64())
        self.compute_neighbor_sites()
        self.trial()
Beispiel #19
0
 def minimize(self,
              fun,
              bounds,
              guess=None,
              sdevs=None,
              rg=Generator(MT19937()),
              store=None):
     ret = shgo(fun,
                bounds=list(zip(bounds.lb, bounds.ub)),
                options={'maxfev': self.max_eval_num(store)})
     return ret.x, ret.fun, ret.nfev
Beispiel #20
0
 def minimize(self,
              fun,
              bounds,
              guess=None,
              sdevs=None,
              rg=Generator(MT19937()),
              store=None):
     if guess is None:
         guess = rg.uniform(bounds.lb, bounds.ub)
     ret = minimize(fun, x0=guess, bounds=bounds)
     return ret.x, ret.fun, ret.nfev
Beispiel #21
0
    def set_idx(self, new_idx):
        '''
           Skip the sequence to new_idx 
        '''
        if self.idx > new_idx:
            self.pcg_instance = PCG64(self.seed)
            self.generator = Generator(self.pcg_instance)
            self.idx = 0

        self.pcg_instance.advance(new_idx - self.idx)
        self.idx = new_idx
Beispiel #22
0
    def _day_setup(self):
        h = self._hash.copy()
        h.update(self._skt)
        self._skt = h.finalize()

        prf = hmac.HMAC(self._skt, hashes.SHA256(), backend=default_backend())
        prf.update(b"broadcast key")
        prf_out = prf.copy().finalize()
        bit_gen = AESCounter(key=int.from_bytes(prf_out[16:], "big"))
        self._gen = Generator(bit_gen)
        self._ephid = self._gen.bytes(16)
    def __init__(self):

        global sigma_angle, sigma_position, sigma_angleD, sigma_positionD

        SEED = int(
            (datetime.now() - datetime(1970, 1, 1)).total_seconds() * 77.0)
        self.rng_noise_adder = Generator(SFC64(SEED))

        self.noise_mode = NOISE_MODE

        self.sigma_Q = sigma_Q
Beispiel #24
0
def create_pulsed_events(nevents, freq, t0=0, t1=1000, nback=0):
    from numpy.random import Generator, PCG64
    rg = Generator(PCG64())
    events = rg.normal(0.5, 0.1, nevents - nback)
    events = events - np.floor(events)

    if nback > 0:
        events = np.concatenate((events, rg.uniform(0, 1, nback)))
    pulse_no = rg.integers(0, np.rint((t1 - t0) * freq), nevents)
    events = np.sort(events + pulse_no)
    return t1 + events / freq
Beispiel #25
0
def mo_retry(fun, weight_bounds, ncon, y_exp, store, optimize, num_retries, value_limits, 
          workers=mp.cpu_count()):
    sg = SeedSequence()
    rgs = [Generator(MT19937(s)) for s in sg.spawn(workers)]
    proc=[Process(target=_retry_loop,
            args=(pid, rgs, fun, weight_bounds, ncon, y_exp, 
                  store, optimize, num_retries, value_limits)) for pid in range(workers)]
    [p.start() for p in proc]
    [p.join() for p in proc]
    store.sort()
    store.dump()
    return store.get_xs()
 def __init__(self, text_file):
     with Path(text_file).open('r+', encoding="utf-8") as f:
         self.text_list = [s.rstrip() for s in f.readlines()]
     with (Path(text_file).parent / 'short_replics.txt').open(
             'r+', encoding="utf-8") as f:
         self.short_text_list = [s.rstrip() for s in f.readlines()]
     self.font_files = []
     self.min_font_size = 14
     self.max_font_size = 60
     self.rg = Generator(PCG64())
     self.fancy_fonts = list((config.fonts_dir / 'fancy_fonts').iterdir())
     self.plain_fonts = list((config.fonts_dir / 'fancy_fonts').iterdir())
Beispiel #27
0
def retry(fun, store, optimize, num_retries, value_limit = math.inf, 
          workers=mp.cpu_count(), stop_fitness = -math.inf):
    sg = SeedSequence()
    rgs = [Generator(MT19937(s)) for s in sg.spawn(workers)]
    proc=[Process(target=_retry_loop,
            args=(pid, rgs, fun, store, optimize, num_retries, value_limit, stop_fitness)) for pid in range(workers)]
    [p.start() for p in proc]
    [p.join() for p in proc]
    store.sort()
    store.dump()
    return OptimizeResult(x=store.get_x_best(), fun=store.get_y_best(), 
                          nfev=store.get_count_evals(), success=True)
    def test_permutation_subclass(self):
        class N(np.ndarray):
            pass

        mt19937 = Generator(MT19937(1))
        orig = np.arange(3).view(N)
        perm = mt19937.permutation(orig)
        assert_array_equal(perm, np.array([2, 0, 1]))
        assert_array_equal(orig, np.arange(3).view(N))

        class M:
            a = np.arange(5)

            def __array__(self):
                return self.a

        mt19937 = Generator(MT19937(1))
        m = M()
        perm = mt19937.permutation(m)
        assert_array_equal(perm, np.array([4, 1, 3, 0, 2]))
        assert_array_equal(m.__array__(), np.arange(5))
Beispiel #29
0
 def test_seed_float_array(self):
     rs = Generator(self.bit_generator(*self.data1['seed']))
     assert_raises(self.seed_error_type, rs.bit_generator.seed,
                   np.array([np.pi]))
     assert_raises(self.seed_error_type, rs.bit_generator.seed,
                   np.array([-np.pi]))
     assert_raises(self.seed_error_type, rs.bit_generator.seed,
                   np.array([np.pi, -np.pi]))
     assert_raises(self.seed_error_type, rs.bit_generator.seed,
                   np.array([0, np.pi]))
     assert_raises(self.seed_error_type, rs.bit_generator.seed, [np.pi])
     assert_raises(self.seed_error_type, rs.bit_generator.seed, [0, np.pi])
Beispiel #30
0
def underopt_edges(quantiles: Dict, method: str, model: Architecture,
                   model_init: Architecture):
    limit_val: Dict[int, torch.Tensor] = dict()
    underoptimized_edges = dict()
    for layer_idx, layer in enumerate(model.layers):

        weight_keys = [
            key for key in layer.func.state_dict() if "weight" in key
        ]

        if len(weight_keys) > 0:
            param = layer.func.state_dict()[weight_keys[0]]
            if method == ThresholdStrategy.UnderoptimizedMagnitudeIncrease:
                param_init = model_init.layers[layer_idx].func.state_dict()[
                    weight_keys[0]]
                limit_val[layer_idx] = torch.abs(param) - torch.abs(param_init)
            elif method == ThresholdStrategy.UnderoptimizedLargeFinal:
                limit_val[layer_idx] = torch.abs(param)
            elif method == ThresholdStrategy.UnderoptimizedRandom:
                n = reduce(lambda x, y: x * y, param.shape, 1)
                # Ensuring we select different edges each time
                gen = Generator(PCG64(int(time.time() + os.getpid())))
                limit_val[layer_idx] = (
                    torch.abs(param).reshape(-1)[gen.permutation(n)].reshape(
                        param.shape))
            limit_val[layer_idx] = limit_val[layer_idx].cpu()

            if layer_idx not in quantiles:
                underoptimized_edges[layer_idx] = list()
            else:
                low_quantile, up_quantile = quantiles[layer_idx]
                logger.info(
                    f"[{layer_idx}] Quantiles are {low_quantile}-{up_quantile}"
                )

                if low_quantile <= 0.0:
                    lower_bound = -np.infty
                else:
                    lower_bound = np.quantile(limit_val[layer_idx],
                                              low_quantile)

                if up_quantile >= 1.0:
                    upper_bound = np.infty
                else:
                    upper_bound = np.quantile(limit_val[layer_idx],
                                              up_quantile)

                underoptimized_edges[layer_idx] = (torch.logical_and(
                    limit_val[layer_idx] < upper_bound,
                    limit_val[layer_idx] >= lower_bound,
                ).nonzero().cpu().numpy().tolist())

    return underoptimized_edges