def __init__(self, obj, delayInterval): self.obj=obj self.delayInterval=delayInterval self.start=delayInterval[0] self.range=delayInterval[1]-delayInterval[0] # Seed our own random generator, remember its state and restore original state state=random.get_state() random.seed() self.state=random.get_state() random.set_state(state)
def create_dataset(cifar, lld): """ Create full dataset, shuffling while preserving labels :param cifar: the cifar sub-dataset :param lld: the lld sub-dataset :return: the training dataset images and labels """ print(CREATING_DATASET) dataset_size = len(cifar) + len(lld) step = 100 / dataset_size images = zeros(shape=[dataset_size, img_size, img_size, num_channels], dtype=float32) i, j = 0, 0 for i, image in enumerate(cifar): images[i] = image stdout.write('\r{} {:2.2f}%'.format(loading_message('CIFAR'), step * (i + 1))) for j, image in enumerate(lld): images[i + j] = image stdout.write('\r{} {:2.2f}%'.format(loading_message('LLD'), step * (i + j + 1))) labels = zeros(shape=[dataset_size], dtype=int) labels[len(cifar):dataset_size] = ones(shape=[len(lld)], dtype=int) del cifar, lld print(CREATION_SUCCESFULL) rng_state = get_state() shuffle(images) set_state(rng_state) shuffle(labels) return images, labels
def init_theta_la( theta, src_embsize, trg_embsize, src_word_vectors, trg_word_vectors, _seed=None ): if _seed != None: ori_state = get_state() seed(_seed) src_offset = 4 * src_embsize * src_embsize + 3 * src_embsize + src_embsize * src_word_vectors._vectors.shape[1] src_theta = theta[0:src_offset] trg_theta = theta[src_offset:] parameters = [] # Source side parameters.append( src_theta ) # Wla n*n parameters.append( init_W( src_embsize, src_embsize ) ) # bla n*1 parameters.append( zeros( src_embsize ) ) # Target side parameters.append( trg_theta ) # Wla n*n parameters.append( init_W( trg_embsize, trg_embsize ) ) # bla n*1 parameters.append( zeros( trg_embsize ) ) if _seed != None: set_state(ori_state) return concatenate(parameters)
def _reseed(config, offset=0): global entrypoint_reseeds seed = config.getoption("randomly_seed") + offset if seed not in random_states: random.seed(seed) random_states[seed] = random.getstate() else: random.setstate(random_states[seed]) if have_factory_boy: factory_set_random_state(random_states[seed]) if have_faker: faker_random.setstate(random_states[seed]) if have_numpy: if seed not in np_random_states: np_random.seed(seed) np_random_states[seed] = np_random.get_state() else: np_random.set_state(np_random_states[seed]) if entrypoint_reseeds is None: entrypoint_reseeds = [ e.load() for e in entry_points().get("pytest_randomly.random_seeder", []) ] for reseed in entrypoint_reseeds: reseed(seed)
def init_theta(embsize, _seed=None): if _seed != None: ori_state = get_state() seed(_seed) parameters = [] # Wi1 parameters.append(init_W(embsize, embsize)) # Wi2 parameters.append(init_W(embsize, embsize)) # bi parameters.append(zeros(embsize)) # Wo1 parameters.append(init_W(embsize, embsize)) # Wo2 parameters.append(init_W(embsize, embsize)) # bo1 parameters.append(zeros(embsize)) # bo2 parameters.append(zeros(embsize)) if _seed != None: set_state(ori_state) return concatenate(parameters)
def init_theta(embsize, num_of_domains=1, _seed=None): if _seed != None: ori_state = get_state() seed(_seed) parameters = [] # Wi1 parameters.append(init_W(embsize, embsize)) # Wi2 parameters.append(init_W(embsize, embsize)) # bi parameters.append(zeros(embsize)) # Wo1 parameters.append(init_W(embsize, embsize)) # Wo2 parameters.append(init_W(embsize, embsize)) # bo1 parameters.append(zeros(embsize)) # bo2 parameters.append(zeros(embsize)) for i in range(0, num_of_domains): parameters.append(init_W(1, embsize * 2)) parameters.append(init_W(1, embsize * 2)) parameters.append(zeros(1)) parameters.append(zeros(1)) if _seed != None: set_state(ori_state) return concatenate(parameters)
def _real_init(self, dims, values, durations): self.randomstate = npr.get_state() # Identify constraint violations # Note that we'll treat NaNs and Infs as these values as well # as an optional user defined value goodvals = np.nonzero( np.logical_and(values != self.bad_value, np.isfinite(values)))[0] # Input dimensionality. self.D = dims # Initial length scales. self.ls = np.ones(self.D) self.constraint_ls = np.ones(self.D) # Initial amplitude. self.amp2 = np.std(values[goodvals]) + 1e-4 self.constraint_amp2 = 1.0 # Initial observation noise. self.noise = 1e-3 self.constraint_noise = 1e-3 self.constraint_gain = 1 # Initial mean. self.mean = np.mean(values[goodvals]) self.constraint_mean = 0.5
def save_rng(fname='numpy_rng_state.pkl'): """ Save the state of NumPy's RNG to a file in the CWD. Backup the previous two saved states if present. If the RNG state file exists (from a previous save), rename the previous one with a '.1' suffix. If a '.1' file exists, rename it with a '.2' suffix. After use, to reproduce the previous run, restore the RNG using the state file with a '.1' suffix (the state used for the last run). """ state = random.get_state() if state[0] == MT_id: id, state = state[0], (state[1], state[2]) # (ID, (key, pos)) for MT else: raise RuntimeError('numpy.random using unrecognized RNG type!') if path.exists(fname): fname1 = fname + '.1' if path.exists(fname1): fname2 = fname + '.2' os.rename(fname1, fname2) os.rename(fname, fname1) ofile = open(fname, 'wb') pickle.dump((id, state), ofile) ofile.close()
def check_grad(fun, test_x, error_tol=1e-3, delta=1e-5, verbose=False, fun_args=[]): if verbose: sys.stderr.write('Checking gradients...\n') state_before_checking = npr.get_state() fixed_seed = 5 # arbitrary npr.seed(fixed_seed) analytical_grad = fun(test_x, *fun_args)[1] D = test_x.shape[1] if test_x.ndim > 1 else test_x.size grad_check = np.zeros(analytical_grad.shape) if analytical_grad.size > 1 else np.zeros(1) for i in range(D): unit_vector = np.zeros(D) unit_vector[i] = delta npr.seed(fixed_seed) forward_val = fun(test_x + unit_vector, *fun_args)[0] npr.seed(fixed_seed) backward_val = fun(test_x - unit_vector, *fun_args)[0] grad_check_i = (forward_val - backward_val)/(2*delta) if test_x.ndim > 1: grad_check[:,i] = grad_check_i else: grad_check[i] = grad_check_i grad_diff = grad_check - analytical_grad err = np.sqrt(np.sum(grad_diff**2)) if verbose: sys.stderr.write('Analytical grad: %s\n' % str(analytical_grad)) sys.stderr.write('Estimated grad: %s\n' % str(grad_check)) sys.stderr.write('L2-norm of gradient error = %g\n' % err) npr.set_state(state_before_checking) return err < error_tol
def permute_target_and_match_target_and_features(target, features, min_n_sample, match_function, n_permutation, random_seed): if n_permutation < 1: raise ValueError( 'Not computing P-Value and FDR because n_permutation < 1.') print('Computing p-value and FDR with {} permutations ...'.format( n_permutation)) feature_x_permutation = full((features.shape[0], n_permutation), nan) permuted_target = target.copy() seed(random_seed) for i in range(n_permutation): shuffle(permuted_target) random_state = get_state() feature_x_permutation[:, i] = match_target_and_features( permuted_target, features, min_n_sample, match_function) set_state(random_state) return feature_x_permutation
def init_theta( embsize, word_vectors, _seed = None ): if _seed != None: ori_state = get_state() seed(_seed) parameters = [] # Wi1 n*n parameters.append(init_W(embsize, embsize)) # Wi2 n*n parameters.append(init_W(embsize, embsize)) # bi n*1 parameters.append(zeros(embsize)) # Wo1 n*n parameters.append(init_W(embsize, embsize)) # Wo2 n*n parameters.append(init_W(embsize, embsize)) # bo1 n*1 parameters.append(zeros(embsize)) # bo2 n*1 parameters.append(zeros(embsize)) # L parameters.append( word_vectors._vectors.reshape( embsize * len( word_vectors ) ) ) if _seed != None: set_state(ori_state) return concatenate(parameters)
def init_theta_la(theta, source_embsize, target_embsize, _seed=None): if _seed != None: ori_state = get_state() seed(_seed) source_offset = 4 * source_embsize * source_embsize + 3 * source_embsize source_theta = theta[0:source_offset] target_theta = theta[source_offset:] parameters = [] # Source side parameters.append(source_theta) # Wla n*n parameters.append(init_W(source_embsize, source_embsize)) # bla n*1 parameters.append(zeros(source_embsize)) # Target side parameters.append(target_theta) # Wla n*n parameters.append(init_W(target_embsize, target_embsize)) # bla n*1 parameters.append(zeros(target_embsize)) if _seed != None: set_state(ori_state) return concatenate(parameters)
def _real_init(self, dims, values, hyper_parameters_provider): self.randomstate = npr.get_state() state = hyper_parameters_provider.get_state() if state is not None: self.D = state['dims'] self.ls = state['ls'] self.amp2 = state['amp2'] self.noise = state['noise'] self.mean = state['mean'] self.hyper_samples = state['hyper_samples'] self.needs_burnin = False else: # Input dimensionality. self.D = dims # Initial length scales. self.ls = np.ones(self.D) # Initial amplitude. self.amp2 = np.std(values) + 1e-4 # Initial observation noise. self.noise = 1e-3 # Initial mean. self.mean = np.mean(values) # Save hyperparameter samples self.hyper_samples.append( (self.mean, self.noise, self.amp2, self.ls))
def test_sigma_clip(): from numpy.random import randn,seed,get_state,set_state #need to seed the numpy RNG to make sure we don't get some amazingly flukey #random number that breaks one of the tests randstate = get_state() try: seed(12345) # Amazing, I've got the same combination on my luggage! randvar = randn(10000) data, mask = misc.sigma_clip(randvar, 1, 2) maskedarr = misc.sigma_clip(randvar, 1, 2, maout=True) assert sum(mask) > 0 assert data.size < randvar.size assert np.all(mask == ~maskedarr.mask) #this is actually a silly thing to do, because it uses the standard #deviation as the variance, but it tests to make sure these arguments #are actually doing something data2, mask2 = misc.sigma_clip(randvar, 1, 2, varfunc=np.std) assert not np.all(data == data2) assert not np.all(mask == mask2) data3, mask3 = misc.sigma_clip(randvar, 1, 2, cenfunc=np.mean) assert not np.all(data == data3) assert not np.all(mask == mask3) #now just make sure the iters=None method works at all. maskedarr = misc.sigma_clip(randvar, 3, None, maout=True) finally: set_state(randstate)
def _permute_target_and_match_target_and_data( target, data, random_seed, n_permutation, match_function, n_required_for_match_function, raise_for_n_less_than_required, ): print("Computing p-value and FDR with {} permutation ...".format(n_permutation)) seed(random_seed) index_x_permutation = full((data.shape[0], n_permutation), nan) permuted_target = target.copy() for i in range(n_permutation): shuffle(permuted_target) random_state = get_state() index_x_permutation[:, i] = _match_target_and_data( permuted_target, data, match_function, n_required_for_match_function, raise_for_n_less_than_required, ) set_state(random_state) return index_x_permutation
def _real_init(self, dims, values): self.randomstate = npr.get_state() if os.path.exists(self.state_pkl): fh = open(self.state_pkl, 'r') state = cPickle.load(fh) fh.close() self.D = state['dims'] self.ls = state['ls'] self.amp2 = state['amp2'] self.noise = state['noise'] self.mean = state['mean'] self.hyper_samples = state['hyper_samples'] self.needs_burnin = False else: # Input dimensionality. self.D = dims # Initial length scales. self.ls = np.ones(self.D) # Initial amplitude. self.amp2 = np.std(values)+1e-4 # Initial observation noise. self.noise = 1e-3 # Initial mean. self.mean = np.mean(values) # Save hyperparameter samples self.hyper_samples.append((self.mean, self.noise, self.amp2, self.ls))
def sample_ids(self, set_, size, offset): if set_ == TRAIN: # DEBUG # return [self.ids[set][49], self.ids[set][24]]* (size/2) # return [self.ids[set][49]]*size # return self.rng.choice(self.ids[set_], size=size, replace=False) # print ids if self.balance: classes = npr.choice(range(249), size=size, replace=False) ids = [npr.choice(self.idsperclass[set_][c]) for c in classes] else: offset = offset % len(self.ids[set_]) assert offset + size <= len(self.ids[set_]) ids = self.ids[set_][offset:offset + size] return ids else: state = npr.get_state() npr.seed(168465 + offset) ids = npr.choice(self.ids[set_], size=size, replace=False) # classes = npr.choice(range(249), size=size, replace=False) # ids = [npr.choice(self.idsperclass[set_][c]) for c in classes] # print ids npr.set_state(state) return ids
def __call__(self, *args): # Switch to our own random generator state=random.get_state() random.set_state(self.state) # Generate delay s=self.start+self.range*random.rand(1)[0] # Remember our state and switch back random generator self.state=random.get_state() random.set_state(state) # Sleep sleep(s) # Evaluate return self.obj(*args)
def shuffle_sets(ins, outs, times): state = nr.get_state() for i in range(0, times): nr.shuffle(ins) nr.set_state(state) for i in range(0, times): nr.shuffle(outs)
def randomListSelectionSeed(list, seed): state = random.get_state() random.seed(seed) try: seeded_selection = randomListSelection(list) return seeded_selection finally: random.set_state(state)
def fixed_seed(seed): state = rng.get_state() np.random.seed(seed) try: yield finally: np.random.set_state(state)
def randomIntList(start, end, length, seed): state = random.get_state() random.seed(seed) try: int_list = random.randint(start, end, length) return int_list finally: random.set_state(state)
def randomDecList (start, end, length, seed): state = random.get_state() random.seed(seed) try: dec_list = random.uniform(start, end, length) return dec_list finally: random.set_state(state)
def randomAmountSelectionSeed (list, num_values, seed): state = random.get_state() random.seed(seed) try: selection = randomAmountSelection(list, num_values) return selection finally: random.set_state(state)
def __init__(self, trace, scaffold, pnodes): self.trace = trace self.scaffold = scaffold # Pass and store the pnodes because their order matters, and the # scaffold has them as a set self.pnodes = pnodes self.pyr_state = random.getstate() self.numpyr_state = npr.get_state()
def local_seed(seed): state = random.get_state() try: random.seed(seed) yield finally: random.set_state(state)
def generate( self, *args, **kwargs ): if kwargs.has_key('seed'): R.set_state(kwargs['seed']) kwargs = kb.format_kwargs(self.generator,kwargs) if not kwargs.has_key('N_timebins'): raise NameError('stimulus generator requires an N_timebins argument!') self.N_timebin_list += [kwargs['N_timebins']] self.seed_list += [R.get_state()] return self.generator( *args, **kwargs )
def randomIntSeed(start, end, seed): state = random.get_state() random.seed(seed) try: randIntSeeded = randomInt(start, end) return randIntSeeded finally: random.set_state(state)
def random_state_numpy(self): # numpy uses its own random state implementation. if not have_numpy: raise RuntimeError('numpy not installed') if not hasattr(self, '_random_state_numpy'): np_random.seed(self.options.seed) self._random_state_numpy = np_random.get_state() return self._random_state_numpy
def skip_random_states(random_seed, n_skips, skipper='', for_skipper=None): """ :param random_seed: int; :param skipper: str; :param for_skipper: object; :return: list; list of arrays; """ seed(random_seed) r = get_state() for i in range(n_skips): exec(skipper) r = get_state() return r
def test_weighted_level_generator_with_cum_sum(self): for n in xrange(1000): state = random.random_integers(1, 10, 10) cum_sum = np.cumsum(state) rand_state = random.get_state() pos = generator.weighted_level_generator(state) random.set_state(rand_state) cs_pos = generator.weighted_level_generator(state, cum_sum) self.assertEqual(pos, cs_pos)
def __init__(self, p=0.5, seed=None): self.seed = seed self.p = p self._state = None # 1 for flip, 0 for no flip self.countdown = 0 if seed: random.seed(seed) self._random_state = random.get_state() else: self._random_state = None
def push_seed(seed=None): # pragma no cover """ Set a temporary seed to the numpy random number generator, restoring it at the end of the context. If seed is None, then get a seed from /dev/urandom (or the windows analogue). """ from numpy.random import get_state, set_state, seed as set_seed state = get_state() set_seed(seed) yield set_state(state)
def _update_get_state(self): """ Updates the state with a new random flip every second read """ if self.countdown == 0: self.countdown = 1 if self.seed: random.set_state(self._random_state) self._state = int(random.random() < self.p) if self.seed: self._random_state = random.get_state() else: self.countdown -= 1 return self._state
def main(): rstate = random.get_state() reset_random() print ("Running tests 1 and 2 ...") test_1and2(trajectories) print ("Running test 3 ...") test_3(trajectories) print_timer() random.set_state(rstate)
def main(): rstate = random.get_state() reset_random() print("Running tests 1 and 2 ...") test_1and2(trajectories) print("Running test 3 ...") test_3(trajectories) print_timer() random.set_state(rstate)
def subset_pps5(self, nsamp): """ Return a sample of nsamp distinct items from the population, sampled without replacement with probability proportional to size (PPS) according to Sampford's sampling scheme. 5-table lookup samplers are used within Sampford's algorithm to accelerate the sampling for large populations. """ # Copy the whole population if nsamp = npopn. if nsamp == self.npopn: return [item for item in self.items] set_rng_state(*random.get_state()) if self.equiprob: pool = arange(self.npopn) indices = equalprob(nsamp, pool) else: # This part of setup has to be done before any sampling. if not self.did_init: print 'Initing ppssampler...' self.sampler = _ppssampler(self.weights) self.did_init = True # This part has to be done before any sampling w/o replacement. if not self.did_Sampford_init: print 'Initing wts...' self.sort_indices, self.sort_wts, self.tot_wt = \ self.sampler.prepwts(self.weights) self.max_wt = self.sort_wts[0]/self.tot_wt # Max wt, normed self.nsamp = 0 self.did_Sampford_init = True # This part has to be done when sample size changes. if self.nsamp != nsamp: print 'Initing ratios...' if nsamp > self.npopn: raise ValueError, 'nsamp larger than population size!' if nsamp*self.max_wt > 1: raise ValueError, 'Sample size too large for PPS sampling!' self.sampler.prepratios(nsamp, self.sort_wts, self.tot_wt) self.sampler.prepratiotables() self.did_Sampford_tables = True self.nsamp = nsamp # This may happen if subset_pps is called before subset_pps5. if not self.did_Sampford_tables: print 'Initing ratio tables...' self.sampler.prepratiotables() self.did_Sampford_tables = True self.ntry, indices = self.sampler.samplenr5() # Note the 5-table version returns unsorted indices. # indices = [self.sort_indices[i] for i in sindices] result = [self.items[i] for i in indices] random.set_state(get_rng_state()) return result
def subset_pps5(self, nsamp): """ Return a sample of nsamp distinct items from the population, sampled without replacement with probability proportional to size (PPS) according to Sampford's sampling scheme. 5-table lookup samplers are used within Sampford's algorithm to accelerate the sampling for large populations. """ # Copy the whole population if nsamp = npopn. if nsamp == self.npopn: return [item for item in self.items] set_rng_state(*random.get_state()) if self.equiprob: pool = arange(self.npopn) indices = equalprob(nsamp, pool) else: # This part of setup has to be done before any sampling. if not self.did_init: print('Initing ppssampler...') self.sampler = _ppssampler(self.weights) self.did_init = True # This part has to be done before any sampling w/o replacement. if not self.did_Sampford_init: print('Initing wts...') self.sort_indices, self.sort_wts, self.tot_wt = \ self.sampler.prepwts(self.weights) self.max_wt = self.sort_wts[0] / self.tot_wt # Max wt, normed self.nsamp = 0 self.did_Sampford_init = True # This part has to be done when sample size changes. if self.nsamp != nsamp: print('Initing ratios...') if nsamp > self.npopn: raise ValueError('nsamp larger than population size!') if nsamp * self.max_wt > 1: raise ValueError('Sample size too large for PPS sampling!') self.sampler.prepratios(nsamp, self.sort_wts, self.tot_wt) self.sampler.prepratiotables() self.did_Sampford_tables = True self.nsamp = nsamp # This may happen if subset_pps is called before subset_pps5. if not self.did_Sampford_tables: print('Initing ratio tables...') self.sampler.prepratiotables() self.did_Sampford_tables = True self.ntry, indices = self.sampler.samplenr5() # Note the 5-table version returns unsorted indices. # indices = [self.sort_indices[i] for i in sindices] result = [self.items[i] for i in indices] random.set_state(get_rng_state()) return result
def test_numpy_rng_preservation(self): """Tests whether caching is rng-sensible""" @caching.cache_results(cache_path=CACHE_PATH) def _any_cached_function(arg, kwarg=None): return arg, kwarg, nprng.rand() arg = 1 nprng.seed(0) a1, k1, r1 = _any_cached_function(arg) state1 = nprng.get_state() a2, k2, r2 = _any_cached_function(arg) state2 = nprng.get_state() self.assertEqual(a1, a2, "Return arg changed.") self.assertEqual(k1, k2, "Returned kwarg changed.") self.assertNotEqual(r1, r2, "Returned the same random.") nprng.seed(0) rep_a1, rep_k1, rep_r1 = _any_cached_function(arg) rep_state1 = nprng.get_state() rep_a2, rep_k2, rep_r2 = _any_cached_function(arg) rep_state2 = nprng.get_state() self.assertEqual(a1, rep_a1, "Return arg changed after rng reset.") self.assertEqual(k1, rep_k1, "Return kwarg changed after rng reset.") self.assertEqual( r1, rep_r1, "Returned different cached random after " "rng reset.") self.assertEqual( str(state1), str(rep_state1), "State of rng after first call " "after reset is different.") self.assertEqual(rep_a1, rep_a2, "Return arg2 changed after rng " "reset.") self.assertEqual(rep_k1, rep_k2, "Return kwarg2 changed after rng " "reset.") self.assertEqual( r2, rep_r2, "Returned different cached random2 " "after rng reset.") self.assertEqual( str(state2), str(rep_state2), "State of rng after second call " "after rng reset is different.")
def fixed_regen(self, values): # Ensure repeatability of randomness cur_pyr_state = random.getstate() cur_numpyr_state = npr.get_state() try: random.setstate(self.pyr_state) npr.set_state(self.numpyr_state) registerDeterministicLKernels(self.trace, self.scaffold, self.pnodes, values) answer = regenAndAttach(self.trace, self.scaffold.border[0], self.scaffold, False, OmegaDB(), {}) finally: random.setstate(cur_pyr_state) npr.set_state(cur_numpyr_state) return answer
def __init__(self, num_dims, **options): opts = OPTION_DEFAULTS.copy() opts.update(options) if hasattr(self, 'options'): opts.update(self.options) # This is a bit of a mess. Basically to make it work with the GPClassifer -- # but yes I know the GP shouldn't have code for the sake of those who inherit from it # TODO -- clean this up self.options = opts self.num_dims = num_dims self.noiseless = self.options['likelihood'].lower() == "noiseless" self._inputs = None # Matrix of data inputs self._values = None # Vector of data values self.pending = None # Matrix of pending inputs # TODO: support meta-data self.params = None self._cache_list = [] # Cached computations for re-use. self._hypers_list = [] # Hyperparameter dicts for each state. self._fantasy_values_list = [ ] # Fantasy values generated from pending samples. self.state = None self._random_state = npr.get_state() self._samplers = [] # If you are only doing one fantasy of pending jobs, then don't even both sampling # it from the marginal gaussian posterior predictive and instead just take # the mean of this distribution. This only has an effect if num_fantasies is 1 self._use_mean_if_single_fantasy = True # get the Kernel type from the options try: self.input_kernel_class = getattr(spearmint.kernels, self.options['kernel']) except NameError: raise Exception("Unknown kernel: %s" % self.options['kernel']) self._kernel = None self._kernel_with_noise = None self.num_states = 0 self.chain_length = 0 self.max_cache_bytes = self.options['max_cache_mb'] * 1024 * 1024 self._build()
def OnEventsReplace(self, event): data = array(self.model.GetCurrentData()[:]) n = data.shape[0] inputs = {} dlg = ParameterDialog([('n', 'IntValidator', str(n))], inputs, 'Random choice of n events with replacement') if dlg.ShowModal() == wx.ID_OK: indices = randint(0, n, inputs['n']) name = self.model.GetCurrentGroup()._v_pathname self.model.FilterOnRows('SampleOnEventsReplace', indices) self.model.AddHistory(('SampleOnRows', [name, inputs, ('state', get_state())])) dlg.Destroy()
def _real_init(self, dims, values, durations): self.locker.lock_wait(self.state_pkl) self.randomstate = npr.get_state() if os.path.exists(self.state_pkl): fh = open(self.state_pkl, "rb") state = pickle.load(fh) fh.close() self.D = state["dims"] self.ls = state["ls"] self.amp2 = state["amp2"] self.noise = state["noise"] self.mean = state["mean"] self.constraint_ls = state["constraint_ls"] self.constraint_amp2 = state["constraint_amp2"] self.constraint_noise = state["constraint_noise"] self.constraint_mean = state["constraint_mean"] self.constraint_gain = state["constraint_gain"] self.needs_burnin = False else: # Identify constraint violations # Note that we'll treat NaNs and Infs as these values as well # as an optional user defined value goodvals = np.nonzero(np.logical_and(values != self.bad_value, np.isfinite(values)))[0] # Input dimensionality. self.D = dims # Initial length scales. self.ls = np.ones(self.D) self.constraint_ls = np.ones(self.D) # Initial amplitude. self.amp2 = np.std(values[goodvals]) + 1e-4 self.constraint_amp2 = 1.0 # Initial observation noise. self.noise = 1e-3 self.constraint_noise = 1e-3 self.constraint_gain = 1 # Initial mean. self.mean = np.mean(values[goodvals]) self.constraint_mean = 0.5 self.locker.unlock(self.state_pkl)
def print_random_state(mark='', print_process=False): """ Print numpy random state. :param mark: str; :return: None """ random_state = get_state() _, keys, pos, _, _ = random_state try: print_log('[{}] Seed0={}\ti={}\t@={}'.format(mark, keys[0], pos, keys[pos]), print_process=print_process) except IndexError: print_log('[{}] Seed0={}\ti={}'.format(mark, keys[0], pos), print_process=print_process)
def test_validate_random_state(self): r1 = random.get_state() self.assertTrue(validate_random_state(r1)) r2 = list(r1) self.assertTrue(validate_random_state(r2)) r3 = deepcopy(r2) r3[0] = 'xxx' self.assertRaises(InvalidRandomStateException, validate_random_state, r3) r3 = deepcopy(r2) r3[1] = [1] self.assertRaises(InvalidRandomStateException, validate_random_state, r3) r3 = deepcopy(r2) r3[2] = 1.2 self.assertRaises(InvalidRandomStateException, validate_random_state, r3) r3 = deepcopy(r2) r3[3] = 1.2 self.assertRaises(InvalidRandomStateException, validate_random_state, r3) r3 = deepcopy(r2) r3[4] = 'x' self.assertRaises(InvalidRandomStateException, validate_random_state, r3) with self.assertRaisesRegex(InvalidRandomStateException, '^Random state must be a tuple$'): validate_random_state(1.) with self.assertRaisesRegex(InvalidRandomStateException, '^Random state must have length 5$'): validate_random_state((1, )) with self.assertRaisesRegex( InvalidRandomStateException, r'^Random number generator random_state\[1\] must be an array of length 624 of unsigned ints$' ): validate_random_state(('MT19937', [1.] * 624, 1, 1, 1))
def init_theta(source_embsize, target_embsize, _seed=None): if _seed != None: ori_state = get_state() seed(_seed) parameters = [] # Source Side # Wi1 n*n parameters.append(init_W(source_embsize, source_embsize)) # Wi2 n*n parameters.append(init_W(source_embsize, source_embsize)) # bi n*1 parameters.append(zeros(source_embsize)) # Wo1 n*n parameters.append(init_W(source_embsize, source_embsize)) # Wo2 n*n parameters.append(init_W(source_embsize, source_embsize)) # bo1 n*1 parameters.append(zeros(source_embsize)) # bo2 n*1 parameters.append(zeros(source_embsize)) # Target Side # Wi1 n*n parameters.append(init_W(target_embsize, target_embsize)) # Wi2 n*n parameters.append(init_W(target_embsize, target_embsize)) # bi n*1 parameters.append(zeros(target_embsize)) # Wo1 n*n parameters.append(init_W(target_embsize, target_embsize)) # Wo2 n*n parameters.append(init_W(target_embsize, target_embsize)) # bo1 n*1 parameters.append(zeros(target_embsize)) # bo2 n*1 parameters.append(zeros(target_embsize)) if _seed != None: set_state(ori_state) return concatenate(parameters)
def _real_init(self, dims, values): self.randomstate = npr.get_state() # Input dimensionality. self.D = dims # Initial length scales. self.ls = np.ones(self.D) # Initial amplitude. self.amp2 = np.std(values) + 1e-4 # Initial observation noise. self.noise = 1e-3 # Initial mean. self.mean = np.mean(values) # Save hyperparameter samples self.hyper_samples.append((self.mean, self.noise, self.amp2, self.ls))
def __init__(self, num_dims, **options): self.num_dims = num_dims self._set_likelihood(options) log.debug('GP received initialization options: %s' % (options)) self.verbose = bool(options.get("verbose", False)) self.mcmc_diagnostics = bool(options.get("mcmc_diagnostics", False)) self.mcmc_iters = int(options.get("mcmc_iters", DEFAULT_MCMC_ITERS)) self.burnin = int(options.get("burnin", DEFAULT_BURNIN)) self.thinning = int(options.get("thinning", 0)) self._inputs = None # Matrix of data inputs self._values = None # Vector of data values self.pending = None # Matrix of pending inputs # TODO: support meta-data self.params = None # Default to using the mean prediction for fantasies self.num_fantasies = options.get('num_fantasies', 1) # TODO -- make in config self._caching = bool(options.get("caching", True)) self._cache_list = [] # Cached computations for re-use. self._hypers_list = [] # Hyperparameter dicts for each state. self._fantasy_values_list = [ ] # Fantasy values generated from pending samples. self.state = None self._random_state = npr.get_state() self._samplers = [] self._use_mean_if_single_fantasy = True self._kernel = None self._kernel_with_noise = None self.num_states = 0 self.chain_length = 0 self.max_cache_mb = 256 # TODO -- make in config self.max_cache_bytes = self.max_cache_mb * 1024 * 1024 self._build()
def _reseed(config, offset=0): seed = config.getoption('randomly_seed') + offset if seed not in random_states: random.seed(seed) random_states[seed] = random.getstate() else: random.setstate(random_states[seed]) if have_factory_boy: factory_set_random_state(random_states[seed]) if have_faker: faker_random.setstate(random_states[seed]) if have_numpy: if seed not in np_random_states: np_random.seed(seed) np_random_states[seed] = np_random.get_state() else: np_random.set_state(np_random_states[seed])
def __init__(self, num_dims, **options): self.num_dims = num_dims self._set_likelihood(options) log.debug('GP received initialization options: %s' % (options)) self.verbose = bool(options.get("verbose", False)) self.mcmc_diagnostics = bool(options.get("mcmc_diagnostics", False)) self.mcmc_iters = int(options.get("mcmc_iters", DEFAULT_MCMC_ITERS)) self.burnin = int(options.get("burnin", DEFAULT_BURNIN)) self.thinning = int(options.get("thinning", 0)) self._inputs = None # Matrix of data inputs self._values = None # Vector of data values self.pending = None # Matrix of pending inputs # TODO: support meta-data self.params = None # Default to using the mean prediction for fantasies self.num_fantasies = options.get('num_fantasies', 1) # TODO -- make in config self._caching = bool(options.get("caching", True)) self._cache_list = [] # Cached computations for re-use. self._hypers_list = [] # Hyperparameter dicts for each state. self._fantasy_values_list = [] # Fantasy values generated from pending samples. self.state = None self._random_state = npr.get_state() self._samplers = [] self._use_mean_if_single_fantasy = True self._kernel = None self._kernel_with_noise = None self.num_states = 0 self.chain_length = 0 self.max_cache_mb = 256 # TODO -- make in config self.max_cache_bytes = self.max_cache_mb*1024*1024 self._build()
def phaseout( objective, true_run , stimulus , V2 , phase ): iterations[0] = -1 model = copy.deepcopy( true_run['model'] ) model['U'] = retina.ring_weights( shape=true_run['model']['U'].shape, offset_out=phase) phase_objective = LL_with_U( objective , model['U'] , V2 ) true_objective = LL_with_U( objective , true_run['model']['U'] , V2 ) params = optimize_V1( phase_objective , true_run , model['U'], V2 ) opt_params = optimize_V1( true_objective , true_run , true_run['model']['V'], V2 ) model['V'] = params['V1'] dLopt = true_objective.LL(true_run['model']['V']) - true_objective.LL(opt_params) dLL = phase_objective.LL(params) - true_objective.LL(opt_params) seed = Rand.get_state() true_100 = retina.run_LNLNP( true_model , stimulus = stimulus , keep=frozenset(['intensity']), N_timebins = 100000 ) Rand.set_state(seed) this_100 = retina.run_LNLNP( model , stimulus = stimulus , keep=frozenset(['intensity']), N_timebins = 100000 ) intensities = np.vstack([true_100['intensity'].flatten(),this_100['intensity'].flatten()]) order = np.argsort( intensities[0,:] ) intensities = intensities[:,order[::20]] p.close('all') p.figure(1, figsize=(10,12)) ax0 = p.subplot(3,1,1) p.plot( true_100['model']['V'].T ) #ax0.xaxis.set_label_text('subunits') p.title('V filters') ax1 = p.subplot(3,1,2) p.plot( true_100['model']['U'].T ) #ax1.xaxis.set_label_text('cones') p.title('True U filters , sigma = %.0f' % (sigma_spatial[0])) ax2 = p.subplot(3,1,3) p.semilogy( intensities.T ) ax2.xaxis.set_label_text('samples reordered by intensity of true model') ax2.yaxis.set_label_text('intensities in spikes / bin') p.title('Intensities of true vs. out-of-phase model (dLL=%.1e bits/spike, dLopt=%.1e) ' % \ (dLL/np.log(2.), dLopt/np.log(2.)) ) #p.subplot(2,1,2) #p.plot( intensities.T ) p.savefig('/Users/kolia/Desktop/out-of-phase_sigma%d_phase%.1f.pdf' % \ (true_run['model']['sigma_spatial'][0], phase),format='pdf')
def save_rng(fname='.numpy-rng-state'): """ Save the state of NumPy's RNG to a file in the CWD ('.numpy-rng-state' by default). Backup the previous two saved states if present: If the RNG state file exists (from a previous save), rename the previous one with a '.1' suffix. If a '.1' file exists, rename it with a '.2' suffix. """ state = random.get_state() if state[0] == MT_id: id, state = state[0], (state[1], state[2]) # (ID, (key, pos)) for MT else: raise RuntimeError, 'numpy.random using unrecognized RNG type!' if path.exists(fname): fname1 = fname + '.1' if path.exists(fname1): fname2 = fname + '.2' os.rename(fname1, fname2) os.rename(fname, fname1) ofile = open(fname, 'w') pickle.dump((id, state), ofile) ofile.close()
def sample(self, nsamp): """ Return a set of nsamp samples from the population, sampled with replacement. """ # *** Implement equiprob case. if self.equiprob: raise NotImplementedError, 'Awaiting code...' if not self.did_init: self.sampler = _ppssampler(self.weights) self.did_init = True # Track the RNG state within the sampler, to update NumPy's RNG state. # Internally we only use the MT state; any extra state for cached # normal or other samples can just be copied. rng_state = random.get_state() mt_state, extra_state = rng_state[:3], rng_state[3:] set_rng_state(*mt_state) # *** modify to handle full rng state indices = self.sampler.sample(nsamp) new_state = list(get_rng_state()) new_state.extend(extra_state) random.set_state(new_state) return [self.items[i] for i in indices]
def unison_shuffle(a, b): # constant seed state = random.get_state() random.shuffle(a) random.set_state(state) random.shuffle(b)
def save_random_state(): state = get_state() try: yield finally: set_state(state)
def test_all_world2pix( fname=None, ext=0, tolerance=1.0e-4, origin=0, random_npts=250000, mag=2, adaptive=False, maxiter=20, detect_divergence=True, ): """Test all_world2pix, iterative inverse of all_pix2world""" from numpy import random from datetime import datetime from astropy.io import fits from os import path # Open test FITS file: if fname is None: fname = get_pkg_data_filename("data/j94f05bgq_flt.fits") ext = ("SCI", 1) if not path.isfile(fname): raise IOError("Input file '{:s}' to 'test_all_world2pix' not found.".format(fname)) h = fits.open(fname) w = wcs.WCS(h[ext].header, h) h.close() del h crpix = w.wcs.crpix ncoord = crpix.shape[0] # Assume that CRPIX is at the center of the image and that the image # has an even number of pixels along each axis: naxesi = list(2 * crpix.astype(np.int) - origin) # Generate integer indices of pixels (image grid): img_pix = np.dstack([i.flatten() for i in np.meshgrid(*map(range, naxesi))])[0] # Generage random data (in image coordinates): startstate = random.get_state() random.seed(123456789) rnd_pix = np.random.rand(random_npts, ncoord) random.set_state(startstate) # Scale random data to cover the entire image (or more, if 'mag' > 1). # Assume that CRPIX is at the center of the image and that the image # has an even number of pixels along each axis: mwidth = 2 * mag * (crpix - origin) rnd_pix = crpix - 0.5 * mwidth + (mwidth - 1) * rnd_pix # Reference pixel coordinates in image coordinate system (CS): test_pix = np.append(img_pix, rnd_pix, axis=0) # Reference pixel coordinates in sky CS using forward transformation: all_world = w.all_pix2world(test_pix, origin) try: runtime_begin = datetime.now() # Apply the inverse iterative process to pixels in world coordinates # to recover the pixel coordinates in image space. all_pix = w.all_world2pix( all_world, origin, tolerance=tolerance, adaptive=adaptive, maxiter=maxiter, detect_divergence=detect_divergence, ) runtime_end = datetime.now() except wcs.wcs.NoConvergence as e: runtime_end = datetime.now() ndiv = 0 if e.divergent is not None: ndiv = e.divergent.shape[0] print("There are {} diverging solutions.".format(ndiv)) print("Indices of diverging solutions:\n{}".format(e.divergent)) print("Diverging solutions:\n{}\n".format(e.best_solution[e.divergent])) print( "Mean radius of the diverging solutions: {}".format( np.mean(np.linalg.norm(e.best_solution[e.divergent], axis=1)) ) ) print( "Mean accuracy of the diverging solutions: {}\n".format( np.mean(np.linalg.norm(e.accuracy[e.divergent], axis=1)) ) ) else: print("There are no diverging solutions.") nslow = 0 if e.slow_conv is not None: nslow = e.slow_conv.shape[0] print("There are {} slowly converging solutions.".format(nslow)) print("Indices of slowly converging solutions:\n{}".format(e.slow_conv)) print("Slowly converging solutions:\n{}\n".format(e.best_solution[e.slow_conv])) else: print("There are no slowly converging solutions.\n") print("There are {} converged solutions.".format(e.best_solution.shape[0] - ndiv - nslow)) print("Best solutions (all points):\n{}".format(e.best_solution)) print("Accuracy:\n{}\n".format(e.accuracy)) print( "\nFinished running 'test_all_world2pix' with errors.\n" "ERROR: {}\nRun time: {}\n".format(e.args[0], runtime_end - runtime_begin) ) raise e # Compute differences between reference pixel coordinates and # pixel coordinates (in image space) recovered from reference # pixels in world coordinates: errors = np.sqrt(np.sum(np.power(all_pix - test_pix, 2), axis=1)) meanerr = np.mean(errors) maxerr = np.amax(errors) print( "\nFinished running 'test_all_world2pix'.\n" "Mean error = {0:e} (Max error = {1:e})\n" "Run time: {2}\n".format(meanerr, maxerr, runtime_end - runtime_begin) ) assert maxerr < 2.0 * tolerance
def __enter__(self): from numpy import random self.startstate = random.get_state() random.seed(self.seed)