def test_randint_range(self): # Test for ticket #1690 lmax = np.iinfo('l').max lmin = np.iinfo('l').min try: rnd.randint(lmin, lmax) except: raise AssertionError
def symmetric_random_walk(size, seed=None, scale=1.0, normalize=True): # 0. Preparation if seed != None: rnd.seed(seed) else: rnd.seed() time_steps = size[0] num_dims = len(size) - 1 # 1. Generate random walk noise levels (integer) random_walk = 2 * rnd.randint(2, size=time_steps) - 1 random_walk = np.cumsum(random_walk) # 2. Normalize random walk noise levels to the range [0,1] if normalize == True: random_walk += np.abs(random_walk.min()) random_walk = random_walk / random_walk.max() # 3. Scale random walk noise levels to max_level random_walk *= scale # 4. Generate noise noise = rnd.uniform(low=-np.sqrt(12) / 2, high=np.sqrt(12) / 2, size=size) # 5. Scale noise to desired std over time_steps random_walk = random_walk.reshape((-1, ) + (1, ) * num_dims) noise *= random_walk return noise.astype(np.float32), random_walk.astype(np.float32)
def parallel_montecarlo(filename, mapper, reducer, jobs_params, n_repetitions, seed=None, n_cpu = None): """ This function implements a basic map-reduce framework based on multiprocessing.Pool. Inputs: filename - name of output, where the results will be saved as a pickle. mapper - this is the function that runs the computaitonal job, given an element of job_params. reducer - function for aggregating n_repetitions runs of a job. job_params - list of job parameters. n_repetitions - number of times to run each job. seed - Random seed to be used. To have reproducible results, always specify a seed. n_cpu - number of processes to use. The default is to use all available cores. Outputs: reduced_results - output of reducer on the various simulations Also, the results will be saved to a pickle file Example: (this computes the means of 3 normal random variables with different means) >> parallel_montecarlo('testing', numpy.random.normal, numpy.mean, [-1,0,+1], 1000) n_cpu: 4 Saving to ./pickles/testing.pickle.gz Saved fields: n_repetitions, name, results, seed, xs Out[14]: [-0.9465148770830919, 0.03763575004851667, 1.056358627427924] """ mkl.set_num_threads(1) if n_cpu is None: n_cpu = get_n_cpu() #print(f'n_cpu: {n_cpu}') SEED = seed if seed is not None else 0 N_SEED_INTS = 4 mkl_random.seed(SEED) iteration_parameters = zip(mkl_random.randint(0, 2**32, size=(len(jobs_params)*n_repetitions, N_SEED_INTS)), itertools.cycle(jobs_params)) wrapped_job_computation_func = functools.partial(set_random_seed_and_apply_func, mapper) if n_cpu == 1: results = list(itertools.starmap(wrapped_job_computation_func, iteration_parameters)) else: with multiprocessing.Pool(processes=n_cpu) as p: results = list(p.starmap(wrapped_job_computation_func, iteration_parameters)) results_grouped_by_params = [results[i::len(jobs_params)] for i in range(len(jobs_params))] reduced_results = list(map(reducer, results_grouped_by_params)) if filename is not None: pickler.dump(filename, name=filename, xs=jobs_params, results=np.array(reduced_results), n_repetitions=n_repetitions, seed=SEED) return reduced_results
def test_randint(self): rnd.seed(self.seed, self.brng) actual = rnd.randint(-99, 99, size=(3, 2)) desired = np.array([[95, -96], [-65, 41], [3, 96]]) np.testing.assert_array_equal(actual, desired)
def test_int_negative_interval(self): assert_(-5 <= rnd.randint(-5, -1) < -1) x = rnd.randint(-5, -1, 5) assert_(np.all(-5 <= x)) assert_(np.all(x < -1))