def _permute_target_and_match_target_and_data(
    target,
    data,
    random_seed,
    n_permutation,
    match_function,
    n_required_for_match_function,
    raise_for_n_less_than_required,
):

    print("Computing p-value and FDR with {} permutation ...".format(n_permutation))

    seed(random_seed)

    index_x_permutation = full((data.shape[0], n_permutation), nan)

    permuted_target = target.copy()

    for i in range(n_permutation):

        shuffle(permuted_target)

        random_state = get_state()

        index_x_permutation[:, i] = _match_target_and_data(
            permuted_target,
            data,
            match_function,
            n_required_for_match_function,
            raise_for_n_less_than_required,
        )

        set_state(random_state)

    return index_x_permutation
def check_grad(fun, test_x, error_tol=1e-3, delta=1e-5, verbose=False, fun_args=[]):
    if verbose:
        sys.stderr.write('Checking gradients...\n')
        
    state_before_checking = npr.get_state()
    fixed_seed = 5      # arbitrary
    
    npr.seed(fixed_seed)
    analytical_grad = fun(test_x, *fun_args)[1]
    D = test_x.shape[1] if test_x.ndim > 1 else test_x.size
    grad_check = np.zeros(analytical_grad.shape) if analytical_grad.size > 1 else np.zeros(1)
    for i in range(D):
        unit_vector = np.zeros(D)
        unit_vector[i] = delta
        npr.seed(fixed_seed)
        forward_val = fun(test_x + unit_vector, *fun_args)[0]
        npr.seed(fixed_seed)
        backward_val = fun(test_x - unit_vector, *fun_args)[0]
        grad_check_i = (forward_val - backward_val)/(2*delta)
        if test_x.ndim > 1:
            grad_check[:,i] = grad_check_i
        else:
            grad_check[i] = grad_check_i
    grad_diff = grad_check - analytical_grad
    err = np.sqrt(np.sum(grad_diff**2))

    if verbose:        
        sys.stderr.write('Analytical grad: %s\n' % str(analytical_grad))
        sys.stderr.write('Estimated grad:  %s\n' % str(grad_check))
        sys.stderr.write('L2-norm of gradient error = %g\n' % err)

    npr.set_state(state_before_checking)

    return err < error_tol
def init_theta_la( theta, src_embsize, trg_embsize, src_word_vectors, trg_word_vectors, _seed=None ):
    if _seed != None:
        ori_state = get_state()
        seed(_seed)
    
    src_offset = 4 * src_embsize * src_embsize + 3 * src_embsize + src_embsize * src_word_vectors._vectors.shape[1]
    src_theta = theta[0:src_offset] 
    trg_theta = theta[src_offset:]

    parameters = []

    # Source side 
    parameters.append( src_theta )
    # Wla n*n
    parameters.append( init_W( src_embsize, src_embsize ) )
    # bla n*1
    parameters.append( zeros( src_embsize ) )

    # Target side 
    parameters.append( trg_theta )
    # Wla n*n
    parameters.append( init_W( trg_embsize, trg_embsize ) )
    # bla n*1
    parameters.append( zeros( trg_embsize ) )
    
    if _seed != None:  
        set_state(ori_state)
  
    return concatenate(parameters) 
def init_theta( embsize, word_vectors, _seed = None ):
    if _seed != None:
        ori_state = get_state()
        seed(_seed)
    
    parameters = []
    
    # Wi1 n*n
    parameters.append(init_W(embsize, embsize))
    # Wi2 n*n
    parameters.append(init_W(embsize, embsize))
    # bi n*1
    parameters.append(zeros(embsize))
  
    # Wo1 n*n
    parameters.append(init_W(embsize, embsize))
    # Wo2 n*n
    parameters.append(init_W(embsize, embsize))
    # bo1 n*1
    parameters.append(zeros(embsize))
    # bo2 n*1
    parameters.append(zeros(embsize))

    # L
    parameters.append( word_vectors._vectors.reshape( embsize * len( word_vectors ) ) )

    if _seed != None:  
        set_state(ori_state)
  
    return concatenate(parameters)   
Exemple #5
0
def random_search_hyperparameters(AlgoClass, algo_args, random_state, train_sets, validation_sets, hyp_intervals, tries=100, calc_train_error=False):
    """Performs a random search with integer values on specified hyperparameters.
    :param AlgoClass: Class of RandomForestAlgorithm
    :param algo_args: All necessary arguments to RandomForestAlgorithm
    :param random_state: Random state is used to determine parameter values and initialize RandomForestAlgorithm
    :param train_sets: Training data sets
    :param validation_sets: Validation data sets (cross-validation)
    :param hyp_intervals: List of tuples of (param_name, low, high)
    """
    random.set_state(random_state)
    search_results = []

    for try_i in xrange(tries):
        # Sample hyperparameters
        hyp_values = {}
        for param_name, low, high in hyp_intervals:
            hyp_values[param_name] = int(random.uniform(low, high))

        # Assemble Algorithm arguments
        args = dict(
            algo_args.items() + \
            hyp_values.items() + \
            [('random_state', random)]
        )
        algo = AlgoClass(**args)

        test_results = get_result_statistics(test([algo], train_sets, validation_sets, cross_validation=True, print_results=False))
        if calc_train_error:
            train_results = get_result_statistics(test([algo], train_sets, train_sets, cross_validation=True, print_results=False))
            print "#%d MSE: %.2f CVE-TE: %.2f" % (try_i, test_results[algo]['mean_mse'], abs(test_results[algo]['mean_mse']-train_results[algo]['mean_mse']))
            search_results.append((try_i, hyp_values, test_results[algo], train_results[algo]))
        else:
            print "#%d MSE: %.2f" % (try_i, test_results[algo]['mean_mse'])
            search_results.append((try_i, hyp_values, test_results[algo]))
    return search_results
Exemple #6
0
def create_dataset(cifar, lld):
    """
    Create full dataset, shuffling while preserving labels
    :param cifar: the cifar sub-dataset
    :param lld: the lld sub-dataset
    :return: the training dataset images and labels
    """
    print(CREATING_DATASET)
    dataset_size = len(cifar) + len(lld)
    step = 100 / dataset_size
    images = zeros(shape=[dataset_size, img_size, img_size, num_channels],
                   dtype=float32)
    i, j = 0, 0
    for i, image in enumerate(cifar):
        images[i] = image
        stdout.write('\r{} {:2.2f}%'.format(loading_message('CIFAR'),
                                            step * (i + 1)))
    for j, image in enumerate(lld):
        images[i + j] = image
        stdout.write('\r{} {:2.2f}%'.format(loading_message('LLD'),
                                            step * (i + j + 1)))

    labels = zeros(shape=[dataset_size], dtype=int)
    labels[len(cifar):dataset_size] = ones(shape=[len(lld)], dtype=int)

    del cifar, lld
    print(CREATION_SUCCESFULL)
    rng_state = get_state()
    shuffle(images)
    set_state(rng_state)
    shuffle(labels)
    return images, labels
Exemple #7
0
def init_theta(embsize, _seed=None):
  if _seed != None:
    ori_state = get_state()
    seed(_seed)
    
  parameters = []
  
  # Wi1 
  parameters.append(init_W(embsize, embsize))
  # Wi2
  parameters.append(init_W(embsize, embsize))
  # bi
  parameters.append(zeros(embsize))
  
  # Wo1 
  parameters.append(init_W(embsize, embsize))
  # Wo2
  parameters.append(init_W(embsize, embsize))
  # bo1
  parameters.append(zeros(embsize))
  # bo2
  parameters.append(zeros(embsize))

  if _seed != None:  
    set_state(ori_state)
  
  return concatenate(parameters)   
Exemple #8
0
def init_theta(embsize, num_of_domains=1, _seed=None):
    if _seed != None:
        ori_state = get_state()
        seed(_seed)

    parameters = []

    # Wi1
    parameters.append(init_W(embsize, embsize))
    # Wi2
    parameters.append(init_W(embsize, embsize))
    # bi
    parameters.append(zeros(embsize))

    # Wo1
    parameters.append(init_W(embsize, embsize))
    # Wo2
    parameters.append(init_W(embsize, embsize))
    # bo1
    parameters.append(zeros(embsize))
    # bo2
    parameters.append(zeros(embsize))

    for i in range(0, num_of_domains):
        parameters.append(init_W(1, embsize * 2))
        parameters.append(init_W(1, embsize * 2))
        parameters.append(zeros(1))
        parameters.append(zeros(1))

    if _seed != None:
        set_state(ori_state)

    return concatenate(parameters)
Exemple #9
0
def test_sigma_clip():
    from numpy.random import randn,seed,get_state,set_state
    
    #need to seed the numpy RNG to make sure we don't get some amazingly flukey
    #random number that breaks one of the tests
    
    randstate = get_state()
    try:
        seed(12345)  # Amazing, I've got the same combination on my luggage!
        
        randvar = randn(10000)

        data, mask = misc.sigma_clip(randvar, 1, 2)
        maskedarr = misc.sigma_clip(randvar, 1, 2, maout=True)

        assert sum(mask) > 0
        assert data.size < randvar.size
        assert np.all(mask == ~maskedarr.mask)

        #this is actually a silly thing to do, because it uses the standard
        #deviation as the variance, but it tests to make sure these arguments
        #are actually doing something
        data2, mask2 = misc.sigma_clip(randvar, 1, 2, varfunc=np.std)
        assert not np.all(data == data2)
        assert not np.all(mask == mask2)

        data3, mask3 = misc.sigma_clip(randvar, 1, 2, cenfunc=np.mean)
        assert not np.all(data == data3)
        assert not np.all(mask == mask3)

        #now just make sure the iters=None method works at all.
        maskedarr = misc.sigma_clip(randvar, 3, None, maout=True)
    finally:
        set_state(randstate)
Exemple #10
0
    def sample_ids(self, set_, size, offset):
        if set_ == TRAIN:
            # DEBUG
            # return [self.ids[set][49], self.ids[set][24]]* (size/2)
            # return [self.ids[set][49]]*size

            # return self.rng.choice(self.ids[set_], size=size, replace=False)

            # print ids

            if self.balance:
                classes = npr.choice(range(249), size=size, replace=False)
                ids = [npr.choice(self.idsperclass[set_][c]) for c in classes]
            else:
                offset = offset % len(self.ids[set_])
                assert offset + size <= len(self.ids[set_])
                ids = self.ids[set_][offset:offset + size]

            return ids
        else:

            state = npr.get_state()
            npr.seed(168465 + offset)

            ids = npr.choice(self.ids[set_], size=size, replace=False)
            # classes = npr.choice(range(249), size=size, replace=False)
            # ids = [npr.choice(self.idsperclass[set_][c]) for c in classes]
            # print ids

            npr.set_state(state)
            return ids
Exemple #11
0
def test0():
    random.set_state(state)
    (x, k, w) = initialize()
    (mu, S, priors, gamma, err) = kcluster0.gmm(x,
                                                k,
                                                weights=w,
                                                nreplicates=10)
Exemple #12
0
def permute_target_and_match_target_and_features(target, features,
                                                 min_n_sample, match_function,
                                                 n_permutation, random_seed):

    if n_permutation < 1:

        raise ValueError(
            'Not computing P-Value and FDR because n_permutation < 1.')

    print('Computing p-value and FDR with {} permutations ...'.format(
        n_permutation))

    feature_x_permutation = full((features.shape[0], n_permutation), nan)

    permuted_target = target.copy()

    seed(random_seed)

    for i in range(n_permutation):

        shuffle(permuted_target)

        random_state = get_state()

        feature_x_permutation[:, i] = match_target_and_features(
            permuted_target, features, min_n_sample, match_function)

        set_state(random_state)

    return feature_x_permutation
Exemple #13
0
def _reseed(config, offset=0):
    global entrypoint_reseeds
    seed = config.getoption("randomly_seed") + offset
    if seed not in random_states:
        random.seed(seed)
        random_states[seed] = random.getstate()
    else:
        random.setstate(random_states[seed])

    if have_factory_boy:
        factory_set_random_state(random_states[seed])

    if have_faker:
        faker_random.setstate(random_states[seed])

    if have_numpy:
        if seed not in np_random_states:
            np_random.seed(seed)
            np_random_states[seed] = np_random.get_state()
        else:
            np_random.set_state(np_random_states[seed])

    if entrypoint_reseeds is None:
        entrypoint_reseeds = [
            e.load() for e in entry_points().get("pytest_randomly.random_seeder", [])
        ]
    for reseed in entrypoint_reseeds:
        reseed(seed)
Exemple #14
0
def init_theta_la(theta, source_embsize, target_embsize, _seed=None):
    if _seed != None:
        ori_state = get_state()
        seed(_seed)

    source_offset = 4 * source_embsize * source_embsize + 3 * source_embsize
    source_theta = theta[0:source_offset]
    target_theta = theta[source_offset:]

    parameters = []

    # Source side
    parameters.append(source_theta)
    # Wla n*n
    parameters.append(init_W(source_embsize, source_embsize))
    # bla n*1
    parameters.append(zeros(source_embsize))

    # Target side
    parameters.append(target_theta)
    # Wla n*n
    parameters.append(init_W(target_embsize, target_embsize))
    # bla n*1
    parameters.append(zeros(target_embsize))

    if _seed != None:
        set_state(ori_state)

    return concatenate(parameters)
Exemple #15
0
 def _fantasize(self, pend):
     if self._use_mean_if_single_fantasy and self.options['num_fantasies'] == 1:
         predicted_mean, cov = self.predict(pend)
         return predicted_mean
     else:
         npr.set_state(self._random_state)
         return self.sample_from_posterior_given_hypers_and_data(pend, self.options['num_fantasies'])
Exemple #16
0
 def _fantasize(self, pend):
     if self._use_mean_if_single_fantasy and self.num_fantasies == 1:
         predicted_mean, cov = self.predict(pend)
         return predicted_mean
     else:
         npr.set_state(self._random_state)
         return self.sample_from_posterior_given_hypers_and_data(pend, self.num_fantasies)
Exemple #17
0
def init_theta(embsize, _seed=None):
    if _seed != None:
        ori_state = get_state()
        seed(_seed)

    parameters = []

    # Wi1
    parameters.append(init_W(embsize, embsize))
    # Wi2
    parameters.append(init_W(embsize, embsize))
    # bi
    parameters.append(zeros(embsize))

    # Wo1
    parameters.append(init_W(embsize, embsize))
    # Wo2
    parameters.append(init_W(embsize, embsize))
    # bo1
    parameters.append(zeros(embsize))
    # bo2
    parameters.append(zeros(embsize))

    if _seed != None:
        set_state(ori_state)

    return concatenate(parameters)
Exemple #18
0
 def generate( self, *args, **kwargs ):
     if kwargs.has_key('seed'):  R.set_state(kwargs['seed'])
     kwargs = kb.format_kwargs(self.generator,kwargs)
     if not kwargs.has_key('N_timebins'):
         raise NameError('stimulus generator requires an N_timebins argument!')
     self.N_timebin_list += [kwargs['N_timebins']]
     self.seed_list      += [R.get_state()]
     return self.generator( *args, **kwargs )
Exemple #19
0
def randomIntSeed(start, end, seed):
    state = random.get_state()
    random.seed(seed)
    try:
        randIntSeeded = randomInt(start, end)
        return randIntSeeded
    finally:
        random.set_state(state)
Exemple #20
0
def shuffle_sets(ins, outs, times):

    state = nr.get_state()
    for i in range(0, times):
        nr.shuffle(ins)
    nr.set_state(state)
    for i in range(0, times):
        nr.shuffle(outs)
def randomListSelectionSeed(list, seed):
    state = random.get_state()
    random.seed(seed)
    try:
        seeded_selection = randomListSelection(list)
        return seeded_selection
    finally:
        random.set_state(state)
def local_seed(seed):
    state = random.get_state()

    try:
        random.seed(seed)
        yield
    finally:
        random.set_state(state)
def randomIntList(start, end, length, seed):
    state = random.get_state()
    random.seed(seed)
    try:
        int_list = random.randint(start, end, length)
        return int_list
    finally:
        random.set_state(state)
def randomDecList (start, end, length, seed):
    state = random.get_state()
    random.seed(seed)
    try:
        dec_list = random.uniform(start, end, length)
        return dec_list
    finally:
        random.set_state(state)
def randomAmountSelectionSeed (list, num_values, seed):
    state = random.get_state()
    random.seed(seed)
    try:
        selection = randomAmountSelection(list, num_values)
        return selection
    finally:
        random.set_state(state)
 def test_weighted_level_generator_with_cum_sum(self):
     for n in xrange(1000):
         state = random.random_integers(1, 10, 10)
         cum_sum = np.cumsum(state)
         rand_state = random.get_state()
         pos = generator.weighted_level_generator(state)
         random.set_state(rand_state)
         cs_pos = generator.weighted_level_generator(state, cum_sum)
         self.assertEqual(pos, cs_pos)
Exemple #27
0
 def test_weighted_level_generator_with_cum_sum(self):
     for n in xrange(1000):
         state = random.random_integers(1, 10, 10)
         cum_sum = np.cumsum(state)
         rand_state = random.get_state()
         pos = generator.weighted_level_generator(state)
         random.set_state(rand_state)
         cs_pos = generator.weighted_level_generator(state, cum_sum)
         self.assertEqual(pos, cs_pos)
Exemple #28
0
	def __init__(self, obj, delayInterval): 
		self.obj=obj
		self.delayInterval=delayInterval
		self.start=delayInterval[0]
		self.range=delayInterval[1]-delayInterval[0]
		
		# Seed our own random generator, remember its state and restore original state
		state=random.get_state()
		random.seed()
		self.state=random.get_state()
		random.set_state(state)
Exemple #29
0
def push_seed(seed=None): # pragma no cover
    """
    Set a temporary seed to the numpy random number generator, restoring it
    at the end of the context.  If seed is None, then get a seed from
    /dev/urandom (or the windows analogue).
    """
    from numpy.random import get_state, set_state, seed as set_seed
    state = get_state()
    set_seed(seed)
    yield
    set_state(state)
Exemple #30
0
def push_seed(seed=None): # pragma no cover
    """
    Set a temporary seed to the numpy random number generator, restoring it
    at the end of the context.  If seed is None, then get a seed from
    /dev/urandom (or the windows analogue).
    """
    from numpy.random import get_state, set_state, seed as set_seed
    state = get_state()
    set_seed(seed)
    yield
    set_state(state)
Exemple #31
0
 def _update_get_state(self):
     """ Updates the state with a new random flip every second read """
     if self.countdown == 0:
         self.countdown = 1
         if self.seed:
             random.set_state(self._random_state)
         self._state = int(random.random() < self.p)
         if self.seed:
             self._random_state = random.get_state()
     else:
         self.countdown -= 1
     return self._state
Exemple #32
0
def main():
    rstate = random.get_state()
    reset_random()
    
    print ("Running tests 1 and 2 ...")
    test_1and2(trajectories)
    
    print ("Running test 3 ...")
    test_3(trajectories)
    
    print_timer()
    random.set_state(rstate)
def main():
    rstate = random.get_state()
    reset_random()

    print("Running tests 1 and 2 ...")
    test_1and2(trajectories)

    print("Running test 3 ...")
    test_3(trajectories)

    print_timer()
    random.set_state(rstate)
Exemple #34
0
 def subset_pps5(self, nsamp):
     """
     Return a sample of nsamp distinct items from the population, sampled 
     without replacement with probability proportional to size (PPS)
     according to Sampford's sampling scheme.
     
     5-table lookup samplers are used within Sampford's algorithm to
     accelerate the sampling for large populations.
     """
     # Copy the whole population if nsamp = npopn.
     if nsamp == self.npopn:
         return [item for item in self.items]
     set_rng_state(*random.get_state())
     if self.equiprob:
         pool = arange(self.npopn)
         indices = equalprob(nsamp, pool)
     else:
         # This part of setup has to be done before any sampling.
         if not self.did_init:
             print 'Initing ppssampler...'
             self.sampler = _ppssampler(self.weights)
             self.did_init = True
         # This part has to be done before any sampling w/o replacement.
         if not self.did_Sampford_init:
             print 'Initing wts...'
             self.sort_indices, self.sort_wts, self.tot_wt = \
                 self.sampler.prepwts(self.weights)
             self.max_wt = self.sort_wts[0]/self.tot_wt  # Max wt, normed
             self.nsamp = 0
             self.did_Sampford_init = True
         # This part has to be done when sample size changes.
         if self.nsamp != nsamp:
             print 'Initing ratios...'
             if nsamp > self.npopn:
                 raise ValueError, 'nsamp larger than population size!'
             if nsamp*self.max_wt > 1:
                 raise ValueError, 'Sample size too large for PPS sampling!'
             self.sampler.prepratios(nsamp, self.sort_wts, self.tot_wt)
             self.sampler.prepratiotables()
             self.did_Sampford_tables = True
             self.nsamp = nsamp
         # This may happen if subset_pps is called before subset_pps5.
         if not self.did_Sampford_tables:
             print 'Initing ratio tables...'
             self.sampler.prepratiotables()
             self.did_Sampford_tables = True
         self.ntry, indices = self.sampler.samplenr5()
         # Note the 5-table version returns unsorted indices.
         # indices = [self.sort_indices[i] for i in sindices]
     result = [self.items[i] for i in indices]
     random.set_state(get_rng_state())
     return result
Exemple #35
0
 def subset_pps5(self, nsamp):
     """
     Return a sample of nsamp distinct items from the population, sampled 
     without replacement with probability proportional to size (PPS)
     according to Sampford's sampling scheme.
     
     5-table lookup samplers are used within Sampford's algorithm to
     accelerate the sampling for large populations.
     """
     # Copy the whole population if nsamp = npopn.
     if nsamp == self.npopn:
         return [item for item in self.items]
     set_rng_state(*random.get_state())
     if self.equiprob:
         pool = arange(self.npopn)
         indices = equalprob(nsamp, pool)
     else:
         # This part of setup has to be done before any sampling.
         if not self.did_init:
             print('Initing ppssampler...')
             self.sampler = _ppssampler(self.weights)
             self.did_init = True
         # This part has to be done before any sampling w/o replacement.
         if not self.did_Sampford_init:
             print('Initing wts...')
             self.sort_indices, self.sort_wts, self.tot_wt = \
                 self.sampler.prepwts(self.weights)
             self.max_wt = self.sort_wts[0] / self.tot_wt  # Max wt, normed
             self.nsamp = 0
             self.did_Sampford_init = True
         # This part has to be done when sample size changes.
         if self.nsamp != nsamp:
             print('Initing ratios...')
             if nsamp > self.npopn:
                 raise ValueError('nsamp larger than population size!')
             if nsamp * self.max_wt > 1:
                 raise ValueError('Sample size too large for PPS sampling!')
             self.sampler.prepratios(nsamp, self.sort_wts, self.tot_wt)
             self.sampler.prepratiotables()
             self.did_Sampford_tables = True
             self.nsamp = nsamp
         # This may happen if subset_pps is called before subset_pps5.
         if not self.did_Sampford_tables:
             print('Initing ratio tables...')
             self.sampler.prepratiotables()
             self.did_Sampford_tables = True
         self.ntry, indices = self.sampler.samplenr5()
         # Note the 5-table version returns unsorted indices.
         # indices = [self.sort_indices[i] for i in sindices]
     result = [self.items[i] for i in indices]
     random.set_state(get_rng_state())
     return result
Exemple #36
0
 def fixed_regen(self, values):
   # Ensure repeatability of randomness
   cur_pyr_state = random.getstate()
   cur_numpyr_state = npr.get_state()
   try:
     random.setstate(self.pyr_state)
     npr.set_state(self.numpyr_state)
     registerDeterministicLKernels(self.trace, self.scaffold, self.pnodes, values)
     answer = regenAndAttach(self.trace, self.scaffold.border[0], self.scaffold, False, OmegaDB(), {})
   finally:
     random.setstate(cur_pyr_state)
     npr.set_state(cur_numpyr_state)
   return answer
    def reset_random_seed(self):
        if not self.enabled:
            return

        if self.options.reset_seed:
            random.setstate(self.random_state)

            if have_factory_boy:
                factory_set_random_state(self.random_state)

            if have_faker:
                faker_random.setstate(self.random_state)

            if have_numpy:
                np_random.set_state(self.random_state_numpy)
Exemple #38
0
	def __call__(self, *args): 
		# Switch to our own random generator
		state=random.get_state()
		random.set_state(self.state)
		
		# Generate delay
		s=self.start+self.range*random.rand(1)[0]
		
		# Remember our state and switch back random generator
		self.state=random.get_state()
		random.set_state(state)
		
		# Sleep
		sleep(s)
		
		# Evaluate
		return self.obj(*args)
Exemple #39
0
def init_theta(source_embsize, target_embsize, _seed=None):
    if _seed != None:
        ori_state = get_state()
        seed(_seed)

    parameters = []

    # Source Side
    # Wi1 n*n
    parameters.append(init_W(source_embsize, source_embsize))
    # Wi2 n*n
    parameters.append(init_W(source_embsize, source_embsize))
    # bi n*1
    parameters.append(zeros(source_embsize))

    # Wo1 n*n
    parameters.append(init_W(source_embsize, source_embsize))
    # Wo2 n*n
    parameters.append(init_W(source_embsize, source_embsize))
    # bo1 n*1
    parameters.append(zeros(source_embsize))
    # bo2 n*1
    parameters.append(zeros(source_embsize))

    # Target Side
    # Wi1 n*n
    parameters.append(init_W(target_embsize, target_embsize))
    # Wi2 n*n
    parameters.append(init_W(target_embsize, target_embsize))
    # bi n*1
    parameters.append(zeros(target_embsize))

    # Wo1 n*n
    parameters.append(init_W(target_embsize, target_embsize))
    # Wo2 n*n
    parameters.append(init_W(target_embsize, target_embsize))
    # bo1 n*1
    parameters.append(zeros(target_embsize))
    # bo2 n*1
    parameters.append(zeros(target_embsize))

    if _seed != None:
        set_state(ori_state)

    return concatenate(parameters)
Exemple #40
0
def _match_randomly_sampled_target_and_features_to_compute_margin_of_errors(
    target,
    features,
    random_seed,
    n_sampling,
    match_function,
    n_required_for_match_function,
    raise_for_n_less_than_required,
):

    print("Computing MoE with {} sampling ...".format(n_sampling))

    seed(random_seed)

    feature_x_sampling = full((features.shape[0], n_sampling), nan)

    n_sample = ceil(0.632 * target.size)

    for i in range(n_sampling):

        random_indices = choice(target.size, size=n_sample, replace=True)

        sampled_target = target[random_indices]

        sampled_features = features[:, random_indices]

        random_state = get_state()

        feature_x_sampling[:, i] = _match_target_and_features(
            sampled_target,
            sampled_features,
            match_function,
            n_required_for_match_function,
            raise_for_n_less_than_required,
        )

        set_state(random_state)

    return apply_along_axis(compute_nd_array_margin_of_error,
                            1,
                            feature_x_sampling,
                            raise_for_bad=False)
def _reseed(config, offset=0):
    seed = config.getoption('randomly_seed') + offset
    if seed not in random_states:
        random.seed(seed)
        random_states[seed] = random.getstate()
    else:
        random.setstate(random_states[seed])

    if have_factory_boy:
        factory_set_random_state(random_states[seed])

    if have_faker:
        faker_random.setstate(random_states[seed])

    if have_numpy:
        if seed not in np_random_states:
            np_random.seed(seed)
            np_random_states[seed] = np_random.get_state()
        else:
            np_random.set_state(np_random_states[seed])
Exemple #42
0
 def initialize_sim(self, rand_state):
     """
      Computes the attraction and satisfaction matrices (which are the same), which determine whether
      a user will click on an item
      """
     self._rand_state = rand_state
     rand.set_state(self._rand_state.get_state())
     self._user_pref = self._simulate_user_preference()
     self._item_loc = self._simulate_item_loc()
     self._distance_mat = euclidean_distances(self._user_pref,
                                              self._item_loc)
     self._null_dist = np.transpose(
         np.repeat(euclidean_distances([[0, 0]], self._item_loc),
                   self._param_container.users).reshape(
                       -1, self._param_container.users))
     # self._awareness_mat = self._get_awareness_mat()
     self._attr_mat = self._get_attractiveness_mat()
     self._satis_mat = self._get_satisfaction_mat()
     # self._satis_mat = self._attr_mat  # As in click-chain model, assume attractiveness = satisfaction prob
     self._has_init_sim = True
def _reseed(config, offset=0):
    seed = config.getoption('randomly_seed') + offset
    if seed not in random_states:
        random.seed(seed)
        random_states[seed] = random.getstate()
    else:
        random.setstate(random_states[seed])

    if have_factory_boy:
        factory_set_random_state(random_states[seed])

    if have_faker:
        faker_random.setstate(random_states[seed])

    if have_numpy:
        if seed not in np_random_states:
            np_random.seed(seed)
            np_random_states[seed] = np_random.get_state()
        else:
            np_random.set_state(np_random_states[seed])
Exemple #44
0
def deterministic_PRNG():
    """Context manager that handles random.seed without polluting global state.

    See issue #1255 and PR #1295 for details and motivation - in short,
    leaving the global pseudo-random number generator (PRNG) seeded is a very
    bad idea in principle, and breaks all kinds of independence assumptions
    in practice.
    """
    _random_state = random.getstate()
    random.seed(0)
    # These branches are covered by tests/numpy/, not tests/cover/
    if npr is not None:  # pragma: no cover
        _npr_state = npr.get_state()
        npr.seed(0)
    try:
        yield
    finally:
        random.setstate(_random_state)
        if npr is not None:  # pragma: no cover
            npr.set_state(_npr_state)
Exemple #45
0
def phaseout( objective, true_run , stimulus , V2 , phase ):
    iterations[0] = -1
    model = copy.deepcopy( true_run['model'] )
    model['U'] = retina.ring_weights( shape=true_run['model']['U'].shape, 
                                      offset_out=phase)
    phase_objective = LL_with_U( objective , model['U'] , V2 )
    true_objective  = LL_with_U( objective , true_run['model']['U'] , V2 )
    params      = optimize_V1( phase_objective , true_run , model['U'], V2 )
    opt_params  = optimize_V1( true_objective  , true_run , true_run['model']['V'], V2 )
    model['V']  = params['V1']
    dLopt = true_objective.LL(true_run['model']['V']) - true_objective.LL(opt_params)
    dLL   = phase_objective.LL(params) - true_objective.LL(opt_params)
    seed        = Rand.get_state()
    true_100    = retina.run_LNLNP( true_model , stimulus = stimulus , 
                                    keep=frozenset(['intensity']), N_timebins = 100000 )
    Rand.set_state(seed)
    this_100 = retina.run_LNLNP( model , stimulus = stimulus , 
                                 keep=frozenset(['intensity']), N_timebins = 100000 )
    intensities = np.vstack([true_100['intensity'].flatten(),this_100['intensity'].flatten()])
    order       = np.argsort( intensities[0,:] )
    intensities = intensities[:,order[::20]]
    p.close('all')
    p.figure(1, figsize=(10,12))
    ax0 = p.subplot(3,1,1)
    p.plot( true_100['model']['V'].T )
    #ax0.xaxis.set_label_text('subunits')
    p.title('V filters')
    ax1 = p.subplot(3,1,2)
    p.plot( true_100['model']['U'].T )
    #ax1.xaxis.set_label_text('cones')
    p.title('True U filters ,  sigma = %.0f' % (sigma_spatial[0]))
    ax2 = p.subplot(3,1,3)
    p.semilogy( intensities.T )
    ax2.xaxis.set_label_text('samples reordered by intensity of true model')
    ax2.yaxis.set_label_text('intensities in spikes / bin')
    p.title('Intensities of true vs. out-of-phase model  (dLL=%.1e bits/spike, dLopt=%.1e) ' % \
             (dLL/np.log(2.), dLopt/np.log(2.)) )
    #p.subplot(2,1,2)
    #p.plot( intensities.T )
    p.savefig('/Users/kolia/Desktop/out-of-phase_sigma%d_phase%.1f.pdf' % \
             (true_run['model']['sigma_spatial'][0], phase),format='pdf')
Exemple #46
0
def restore_rng(fname='.numpy-rng-state', notify=True):
    """
    Restore the state of NumPy's RNG from the contents of a file in the CWD
    if the file exists; otherwise use (and save) the default initialization.
    The default file name is '.numpy-rng-state'.
    """
    if os.access(fname, os.R_OK | os.W_OK):
        rng_file = open(fname, 'r')
        id, state = pickle.load(rng_file)
        rng_file.close()
        if id == MT_id:
            # Note key is numpy,uint32 -> need repr() to see value.
            if notify:
                print('Recovered RNG state:  %s [%s %s ...] %i' %\
            	    (id, repr(state[0][0]), repr(state[0][1]), state[1]))
            random.set_state((id, state[0], state[1]))
        else:
            raise ValueError('Invalid ID for RNG in %s!' % fname)
    else:
        print('No accessible RNG status file; using (and saving) default initialization.')
        save_rng(fname)
Exemple #47
0
def restore_rng(fname='.numpy-rng-state', notify=True):
    """
    Restore the state of NumPy's RNG from the contents of a file in the CWD
    if the file exists; otherwise use (and save) the default initialization.
    The default file name is '.numpy-rng-state'.
    """
    if os.access(fname, os.R_OK | os.W_OK):
        rng_file = open(fname, 'r')
        id, state = pickle.load(rng_file)
        rng_file.close()
        if id == MT_id:
            # Note key is numpy,uint32 -> need repr() to see value.
            if notify:
                print 'Recovered RNG state:  %s [%s %s ...] %i' %\
                    (id, repr(state[0][0]), repr(state[0][1]), state[1])
            random.set_state((id, state[0], state[1]))
        else:
            raise ValueError, 'Invalid ID for RNG in %s!' % fname
    else:
        print 'No accessible RNG status file; using (and saving) default initialization.'
        save_rng(fname)
Exemple #48
0
 def test_distro_v_get_integrity(self):
     """
     Distro.v_get equivalent to repeated Distro.get_val
     """
     from copy import deepcopy
     # duplicate the self.distro
     distro2 = deepcopy(self.distro)
     set_state(self.seed)
     # draw 2*self.queue_size>distro_values
     _shape_object = [0] * (2 * self.queue_size)
     distro_values = self.distro.v_get(_shape_object)
     set_state(self.seed)
     distro2_values = [
         distro2.get_val() for _ in xrange(len(_shape_object))
     ]
     self.assertEqual(
             distro_values,
             distro2_values,
             'Distro.get_val and Distro.v_get do not produce '\
                     'identical output'
             )
Exemple #49
0
def check_grad(fun,
               test_x,
               error_tol=1e-3,
               delta=1e-5,
               verbose=False,
               fun_args=[]):
    if verbose:
        sys.stderr.write('Checking gradients...\n')

    state_before_checking = npr.get_state()
    fixed_seed = 5  # arbitrary

    npr.seed(fixed_seed)
    analytical_grad = fun(test_x, *fun_args)[1]
    D = test_x.shape[1] if test_x.ndim > 1 else test_x.size
    grad_check = np.zeros(
        analytical_grad.shape) if analytical_grad.size > 1 else np.zeros(1)
    for i in range(D):
        unit_vector = np.zeros(D)
        unit_vector[i] = delta
        npr.seed(fixed_seed)
        forward_val = fun(test_x + unit_vector, *fun_args)[0]
        npr.seed(fixed_seed)
        backward_val = fun(test_x - unit_vector, *fun_args)[0]
        grad_check_i = (forward_val - backward_val) / (2 * delta)
        if test_x.ndim > 1:
            grad_check[:, i] = grad_check_i
        else:
            grad_check[i] = grad_check_i
    grad_diff = grad_check - analytical_grad
    err = np.sqrt(np.sum(grad_diff**2))

    if verbose:
        sys.stderr.write('Analytical grad: %s\n' % str(analytical_grad))
        sys.stderr.write('Estimated grad:  %s\n' % str(grad_check))
        sys.stderr.write('L2-norm of gradient error = %g\n' % err)

    npr.set_state(state_before_checking)

    return err < error_tol
Exemple #50
0
 def sample(self, nsamp):
     """
     Return a set of nsamp samples from the population, sampled with
     replacement.
     """
     # *** Implement equiprob case.
     if self.equiprob:
         raise NotImplementedError, 'Awaiting code...'
     if not self.did_init:
         self.sampler = _ppssampler(self.weights)
         self.did_init = True
     # Track the RNG state within the sampler, to update NumPy's RNG state.
     # Internally we only use the MT state; any extra state for cached
     # normal or other samples can just be copied.
     rng_state = random.get_state()
     mt_state, extra_state = rng_state[:3], rng_state[3:]
     set_rng_state(*mt_state)  # *** modify to handle full rng state
     indices = self.sampler.sample(nsamp)
     new_state = list(get_rng_state())
     new_state.extend(extra_state)
     random.set_state(new_state)
     return [self.items[i] for i in indices]
Exemple #51
0
 def sample(self, nsamp):
     """
     Return a set of nsamp samples from the population, sampled with
     replacement.
     """
     # *** Implement equiprob case.
     if self.equiprob:
         raise NotImplementedError('Awaiting code...')
     if not self.did_init:
         self.sampler = _ppssampler(self.weights)
         self.did_init = True
     # Track the RNG state within the sampler, to update NumPy's RNG state.
     # Internally we only use the MT state; any extra state for cached
     # normal or other samples can just be copied.
     rng_state = random.get_state()
     mt_state, extra_state = rng_state[:3], rng_state[3:]
     set_rng_state(*mt_state)  # *** modify to handle full rng state
     indices = self.sampler.sample(nsamp)
     new_state = list(get_rng_state())
     new_state.extend(extra_state)
     random.set_state(new_state)
     return [self.items[i] for i in indices]
Exemple #52
0
def _permute_and_score(args):
    """
    Compute: ith score = function(target, ith feature) for n_permutations times.
    :param args: list-like;
        (Series (m_samples); target,
         DataFrame (n_features, m_samples); features,
         function,
         int; n_permutations,
         array; random_state)
    :return: DataFrame; (n_features, n_permutations)
    """

    if len(args) != 5:
        raise ValueError(
            'args is not length of 5 (target, features, function, n_perms, and random_state).'
        )
    else:
        t, f, func, n_perms, random_seed = args

    scores = DataFrame(index=f.index, columns=range(n_perms))

    # Target array to be permuted during each permutation
    permuted_t = array(t)

    seed(random_seed)
    for p in range(n_perms):
        print_log('\tScoring against permuted target ({}/{}) ...'.format(
            p, n_perms),
                  print_process=True)

        shuffle(permuted_t)
        rs = get_state()

        scores.iloc[:, p] = f.apply(lambda r: func(permuted_t, r), axis=1)

        set_state(rs)

    return scores
Exemple #53
0
    def simulate(self, warm_up_frac):
        """
        Starts the simulation
        :param warm_up_frac: fraction of users used to determine the overall item popularity (which is used as item
        order)
        :param init_sim: True if the attraction and satisfaction matrices have not been computed yet
        :param rand_state: random seed
        :return: The result of the simulation, and a dataframe with information about the different cookies
        """
        t0 = time.time()
        print("Starting simulation")
        if not self._has_init_sim:
            raise ValueError(
                "Initialize the simulation first before running the simulation"
            )

        print("Running warm-up")
        warm_up_users = math.ceil(warm_up_frac * self._param_container.users)
        rand.set_state(self._rand_state.get_state())

        init_rel = np.repeat(1 / self._param_container.items,
                             self._param_container.items)

        warm_up_res = self._run_simulation(warm_up_users, init_rel)

        dur = round(time.time() - t0)
        print("Warm-up finished, time: " + str(dur) + " seconds")

        new_relevance = self._get_warmed_up_satisfaction(warm_up_res)

        print("Running simulation:")
        sim_result = self._run_simulation(
            self._param_container.users - warm_up_users, new_relevance)

        dur = round(time.time() - t0)
        print("Simulation finished, simulation time: " + str(dur) + " seconds")

        return sim_result
Exemple #54
0
def test_all_world2pix(
    fname=None,
    ext=0,
    tolerance=1.0e-4,
    origin=0,
    random_npts=250000,
    mag=2,
    adaptive=False,
    maxiter=20,
    detect_divergence=True,
):
    """Test all_world2pix, iterative inverse of all_pix2world"""
    from numpy import random
    from datetime import datetime
    from astropy.io import fits
    from os import path

    # Open test FITS file:
    if fname is None:
        fname = get_pkg_data_filename("data/j94f05bgq_flt.fits")
        ext = ("SCI", 1)
    if not path.isfile(fname):
        raise IOError("Input file '{:s}' to 'test_all_world2pix' not found.".format(fname))
    h = fits.open(fname)
    w = wcs.WCS(h[ext].header, h)
    h.close()
    del h

    crpix = w.wcs.crpix
    ncoord = crpix.shape[0]

    # Assume that CRPIX is at the center of the image and that the image
    # has an even number of pixels along each axis:
    naxesi = list(2 * crpix.astype(np.int) - origin)

    # Generate integer indices of pixels (image grid):
    img_pix = np.dstack([i.flatten() for i in np.meshgrid(*map(range, naxesi))])[0]

    # Generage random data (in image coordinates):
    startstate = random.get_state()
    random.seed(123456789)
    rnd_pix = np.random.rand(random_npts, ncoord)
    random.set_state(startstate)

    # Scale random data to cover the entire image (or more, if 'mag' > 1).
    # Assume that CRPIX is at the center of the image and that the image
    # has an even number of pixels along each axis:
    mwidth = 2 * mag * (crpix - origin)
    rnd_pix = crpix - 0.5 * mwidth + (mwidth - 1) * rnd_pix

    # Reference pixel coordinates in image coordinate system (CS):
    test_pix = np.append(img_pix, rnd_pix, axis=0)
    # Reference pixel coordinates in sky CS using forward transformation:
    all_world = w.all_pix2world(test_pix, origin)

    try:
        runtime_begin = datetime.now()
        # Apply the inverse iterative process to pixels in world coordinates
        # to recover the pixel coordinates in image space.
        all_pix = w.all_world2pix(
            all_world,
            origin,
            tolerance=tolerance,
            adaptive=adaptive,
            maxiter=maxiter,
            detect_divergence=detect_divergence,
        )
        runtime_end = datetime.now()
    except wcs.wcs.NoConvergence as e:
        runtime_end = datetime.now()
        ndiv = 0
        if e.divergent is not None:
            ndiv = e.divergent.shape[0]
            print("There are {} diverging solutions.".format(ndiv))
            print("Indices of diverging solutions:\n{}".format(e.divergent))
            print("Diverging solutions:\n{}\n".format(e.best_solution[e.divergent]))
            print(
                "Mean radius of the diverging solutions: {}".format(
                    np.mean(np.linalg.norm(e.best_solution[e.divergent], axis=1))
                )
            )
            print(
                "Mean accuracy of the diverging solutions: {}\n".format(
                    np.mean(np.linalg.norm(e.accuracy[e.divergent], axis=1))
                )
            )
        else:
            print("There are no diverging solutions.")

        nslow = 0
        if e.slow_conv is not None:
            nslow = e.slow_conv.shape[0]
            print("There are {} slowly converging solutions.".format(nslow))
            print("Indices of slowly converging solutions:\n{}".format(e.slow_conv))
            print("Slowly converging solutions:\n{}\n".format(e.best_solution[e.slow_conv]))
        else:
            print("There are no slowly converging solutions.\n")

        print("There are {} converged solutions.".format(e.best_solution.shape[0] - ndiv - nslow))
        print("Best solutions (all points):\n{}".format(e.best_solution))
        print("Accuracy:\n{}\n".format(e.accuracy))
        print(
            "\nFinished running 'test_all_world2pix' with errors.\n"
            "ERROR: {}\nRun time: {}\n".format(e.args[0], runtime_end - runtime_begin)
        )
        raise e

    # Compute differences between reference pixel coordinates and
    # pixel coordinates (in image space) recovered from reference
    # pixels in world coordinates:
    errors = np.sqrt(np.sum(np.power(all_pix - test_pix, 2), axis=1))
    meanerr = np.mean(errors)
    maxerr = np.amax(errors)
    print(
        "\nFinished running 'test_all_world2pix'.\n"
        "Mean error = {0:e}  (Max error = {1:e})\n"
        "Run time: {2}\n".format(meanerr, maxerr, runtime_end - runtime_begin)
    )

    assert maxerr < 2.0 * tolerance
Exemple #55
0
def _set_seed(iseed):
    if iseed is not None:
        try:
            random.set_state(iseed)
        except:
            random.seed(iseed)
Exemple #56
0
 def save_random_state():
     state = get_state()
     try:
         yield
     finally:
         set_state(state)
    def compute_ei(self, comp, pend, cand, vals):
        if pend.shape[0] == 0:
            # If there are no pending, don't do anything fancy.

            # Current best.
            best = np.min(vals)

            # The primary covariances for prediction.
            comp_cov   = self.cov(comp)
            cand_cross = self.cov(comp, cand)

            # Compute the required Cholesky.
            obsv_cov  = comp_cov + self.noise*np.eye(comp.shape[0])
            obsv_chol = spla.cholesky( obsv_cov, lower=True )

            # Solve the linear systems.
            alpha  = spla.cho_solve((obsv_chol, True), vals - self.mean)
            beta   = spla.solve_triangular(obsv_chol, cand_cross, lower=True)

            # Predict the marginal means and variances at candidates.
            func_m = np.dot(cand_cross.T, alpha) + self.mean
            func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)

            # Expected improvement
            func_s = np.sqrt(func_v)
            u      = (best - func_m) / func_s
            ncdf   = sps.norm.cdf(u)
            npdf   = sps.norm.pdf(u)
            ei     = func_s*( u*ncdf + npdf)

            return ei
        else:
            # If there are pending experiments, fantasize their outcomes.

            # Create a composite vector of complete and pending.
            comp_pend = np.concatenate((comp, pend))

            # Compute the covariance and Cholesky decomposition.
            comp_pend_cov  = (self.cov(comp_pend) +
                              self.noise*np.eye(comp_pend.shape[0]))
            comp_pend_chol = spla.cholesky(comp_pend_cov, lower=True)

            # Compute submatrices.
            pend_cross = self.cov(comp, pend)
            pend_kappa = self.cov(pend)

            # Use the sub-Cholesky.
            obsv_chol = comp_pend_chol[:comp.shape[0],:comp.shape[0]]

            # Solve the linear systems.
            alpha  = spla.cho_solve((obsv_chol, True), vals - self.mean)
            beta   = spla.cho_solve((obsv_chol, True), pend_cross)

            # Finding predictive means and variances.
            pend_m = np.dot(pend_cross.T, alpha) + self.mean
            pend_K = pend_kappa - np.dot(pend_cross.T, beta)

            # Take the Cholesky of the predictive covariance.
            pend_chol = spla.cholesky(pend_K, lower=True)

            # Make predictions.
            npr.set_state(self.randomstate)
            pend_fant = np.dot(pend_chol, npr.randn(pend.shape[0],self.pending_samples)) + pend_m[:,None]

            # Include the fantasies.
            fant_vals = np.concatenate(
                (np.tile(vals[:,np.newaxis],
                         (1,self.pending_samples)), pend_fant))

            # Compute bests over the fantasies.
            bests = np.min(fant_vals, axis=0)

            # Now generalize from these fantasies.
            cand_cross = self.cov(comp_pend, cand)

            # Solve the linear systems.
            alpha  = spla.cho_solve((comp_pend_chol, True),
                                    fant_vals - self.mean)
            beta   = spla.solve_triangular(comp_pend_chol, cand_cross,
                                           lower=True)

            # Predict the marginal means and variances at candidates.
            func_m = np.dot(cand_cross.T, alpha) + self.mean
            func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)

            # Expected improvement
            func_s = np.sqrt(func_v[:,np.newaxis])
            u      = (bests[np.newaxis,:] - func_m) / func_s
            ncdf   = sps.norm.cdf(u)
            npdf   = sps.norm.pdf(u)
            ei     = func_s*( u*ncdf + npdf)

            return np.mean(ei, axis=1)
    def grad_optimize_ei(self, cand, comp, pend, vals, compute_grad=True):
        if pend.shape[0] == 0:
            best = np.min(vals)
            cand = np.reshape(cand, (-1, comp.shape[1]))

            # The primary covariances for prediction.
            comp_cov   = self.cov(comp)
            cand_cross = self.cov(comp, cand)

            # Compute the required Cholesky.
            obsv_cov  = comp_cov + self.noise*np.eye(comp.shape[0])
            obsv_chol = spla.cholesky(obsv_cov, lower=True)

            cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__)
            cand_cross_grad = cov_grad_func(self.ls, comp, cand)

            # Predictive things.
            # Solve the linear systems.
            alpha  = spla.cho_solve((obsv_chol, True), vals - self.mean)
            beta   = spla.solve_triangular(obsv_chol, cand_cross, lower=True)

            # Predict the marginal means and variances at candidates.
            func_m = np.dot(cand_cross.T, alpha) + self.mean
            func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)

            # Expected improvement
            func_s = np.sqrt(func_v)
            u      = (best - func_m) / func_s
            ncdf   = sps.norm.cdf(u)
            npdf   = sps.norm.pdf(u)
            ei     = func_s*( u*ncdf + npdf)

            if not compute_grad:
                return ei

            # Gradients of ei w.r.t. mean and variance
            g_ei_m = -ncdf
            g_ei_s2 = 0.5*npdf / func_s

            # Apply covariance function
            grad_cross = np.squeeze(cand_cross_grad)

            grad_xp_m = np.dot(alpha.transpose(),grad_cross)
            grad_xp_v = np.dot(-2*spla.cho_solve(
                    (obsv_chol, True),cand_cross).transpose(), grad_cross)

            grad_xp = 0.5*self.amp2*(grad_xp_m*g_ei_m + grad_xp_v*g_ei_s2)
            ei = -np.sum(ei)

            return ei, grad_xp.flatten()

        else:
            # If there are pending experiments, fantasize their outcomes.
            cand = np.reshape(cand, (-1, comp.shape[1]))

            # Create a composite vector of complete and pending.
            comp_pend = np.concatenate((comp, pend))

            # Compute the covariance and Cholesky decomposition.
            comp_pend_cov  = (self.cov(comp_pend) +
                              self.noise*np.eye(comp_pend.shape[0]))
            comp_pend_chol = spla.cholesky(comp_pend_cov, lower=True)

            # Compute submatrices.
            pend_cross = self.cov(comp, pend)
            pend_kappa = self.cov(pend)

            # Use the sub-Cholesky.
            obsv_chol = comp_pend_chol[:comp.shape[0],:comp.shape[0]]

            # Solve the linear systems.
            alpha  = spla.cho_solve((obsv_chol, True), vals - self.mean)
            beta   = spla.cho_solve((obsv_chol, True), pend_cross)

            # Finding predictive means and variances.
            pend_m = np.dot(pend_cross.T, alpha) + self.mean
            pend_K = pend_kappa - np.dot(pend_cross.T, beta)

            # Take the Cholesky of the predictive covariance.
            pend_chol = spla.cholesky(pend_K, lower=True)

            # Make predictions.
            npr.set_state(self.randomstate)
            pend_fant = np.dot(pend_chol, npr.randn(pend.shape[0],self.pending_samples)) + pend_m[:,None]

            # Include the fantasies.
            fant_vals = np.concatenate(
                (np.tile(vals[:,np.newaxis],
                         (1,self.pending_samples)), pend_fant))

            # Compute bests over the fantasies.
            bests = np.min(fant_vals, axis=0)

            # Now generalize from these fantasies.
            cand_cross = self.cov(comp_pend, cand)
            cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__)
            cand_cross_grad = cov_grad_func(self.ls, comp_pend, cand)

            # Solve the linear systems.
            alpha  = spla.cho_solve((comp_pend_chol, True),
                                    fant_vals - self.mean)
            beta   = spla.solve_triangular(comp_pend_chol, cand_cross,
                                           lower=True)

            # Predict the marginal means and variances at candidates.
            func_m = np.dot(cand_cross.T, alpha) + self.mean
            func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)

            # Expected improvement
            func_s = np.sqrt(func_v[:,np.newaxis])
            u      = (bests[np.newaxis,:] - func_m) / func_s
            ncdf   = sps.norm.cdf(u)
            npdf   = sps.norm.pdf(u)
            ei     = func_s*( u*ncdf + npdf)

            # Gradients of ei w.r.t. mean and variance
            g_ei_m = -ncdf
            g_ei_s2 = 0.5*npdf / func_s

            # Apply covariance function
            grad_cross = np.squeeze(cand_cross_grad)

            grad_xp_m = np.dot(alpha.transpose(),grad_cross)
            grad_xp_v = np.dot(-2*spla.cho_solve(
                    (comp_pend_chol, True),cand_cross).transpose(), grad_cross)

            grad_xp = 0.5*self.amp2*(grad_xp_m*np.tile(g_ei_m,(comp.shape[1],1)).T + (grad_xp_v.T*g_ei_s2).T)
            ei = -np.mean(ei, axis=1)
            grad_xp = np.mean(grad_xp,axis=0)

            return ei, grad_xp.flatten()
Exemple #59
0
def unison_shuffle(a, b):
	# constant seed
	state = random.get_state()
	random.shuffle(a)
	random.set_state(state)
	random.shuffle(b)
Exemple #60
0
    def __exit__(self, exc_type, exc_value, traceback):
        from numpy import random

        random.set_state(self.startstate)