Example #1
0
 def simulate_stationary_distribution(self, burning_time_per_estimate, samples_per_estimate, number_of_estimates=1,
                                      seed=None):
     """
     #TODO:proper docs and tests
     """
     np.random.seed(seed)
     ans = np.zeros(self.number_of_strategies)
     pop_size = self.population_size
     payoff_func = self.payoff_function
     intensity = self.intensity_of_selection
     nr_strategies = self.number_of_strategies
     kernel = self.mutation_kernel
     mapping = self.fitness_mapping
     # no dots inside loops
     random_edge = utils.random_edge_population
     for __ in range(0, number_of_estimates):
         single_estimate = np.zeros(self.number_of_strategies)
         population_array = random_edge(nr_strategies, pop_size)
         # burn time
         for _ in range(0, burning_time_per_estimate):
             (population_array, ___) = _detached_step(population_array, pop_size, payoff_func, intensity,
                                                      nr_strategies, kernel, mapping, mutation_step=True)
         # sample
         for _ in range(0, samples_per_estimate):
             single_estimate = single_estimate + population_array
             (population_array, ___) = _detached_step(population_array, pop_size, payoff_func, intensity,
                                                      nr_strategies, kernel, mapping, mutation_step=True)
             single_estimate *= (1.0 / samples_per_estimate)
             ans += single_estimate
     return utils.normalize_vector((1.0 / number_of_estimates) * ans)
Example #2
0
def stationary_distribution(transition_matrix_markov_chain):
    '''
    Computes the stationary_distribution of a markov chain. The matrix is given by rows.

    Parameters
    ----------
    transition_matrix_markov_chain: ndarray (must be a numpy array)

    Returns
    -------
    out: ndarray

    Examples
    -------
    >>>stationary_distribution(np.array([[0.1,0.9],[0.9,0.1]]))
    Out[1]: array([ 0.5,  0.5])
    >>>stationary_distribution(np.array([[0.1,0.0],[0.9,0.1]]))
    Out[1]: array([ 1.,  0.])
    >>>stationary_distribution(np.array([[0.6,0.4],[0.2,0.8]]))
    Out[1]: array([ 0.33333333,  0.66666667])
    '''
    transition_matrix_markov_chain = transition_matrix_markov_chain.T
    eigenvalues, eigenvectors = np.linalg.eig(transition_matrix_markov_chain)
    #builds a dictionary with position, eigenvalue
    #and retrieves from this, the index of the largest eigenvalue
    index = max(
        zip(range(0, len(eigenvalues)), eigenvalues), key=itemgetter(1))[0]
    #returns the normalized vector corresponding to the
    #index of the largest eigenvalue
    # and gets rid of potential complex values
    return utils.normalize_vector(np.real(eigenvectors[:, index]))
Example #3
0
def stationary_distribution(transition_matrix_markov_chain):
    '''
    Computes the stationary_distribution of a markov chain. The matrix is given by rows.

    Parameters
    ----------
    transition_matrix_markov_chain: ndarray (must be a numpy array)

    Returns
    -------
    out: ndarray

    Examples
    -------
    >>>stationary_distribution(np.array([[0.1,0.9],[0.9,0.1]]))
    Out[1]: array([ 0.5,  0.5])
    >>>stationary_distribution(np.array([[0.1,0.0],[0.9,0.1]]))
    Out[1]: array([ 1.,  0.])
    >>>stationary_distribution(np.array([[0.6,0.4],[0.2,0.8]]))
    Out[1]: array([ 0.33333333,  0.66666667])
    '''
    transition_matrix_markov_chain = transition_matrix_markov_chain.T
    eigenvalues, eigenvectors = np.linalg.eig(transition_matrix_markov_chain)
    # builds a dictionary with position, eigenvalue
    # and retrieves from this, the index of the largest eigenvalue
    index = max(zip(range(0, len(eigenvalues)), eigenvalues),
                key=itemgetter(1))[0]
    # returns the normalized vector corresponding to the
    # index of the largest eigenvalue
    # and gets rid of potential complex values
    return utils.normalize_vector(np.real(eigenvectors[:, index]))
Example #4
0
 def test_normalize_vector(self):
     # check for 10 random vector that all sum up to 1 when normalized
     for _ in range(0, 10):
         random_vector = np.random.rand(10)
         np.testing.assert_almost_equal(
             np.sum(utils.normalize_vector(random_vector)),
             1,
             decimal=10,
             err_msg="Normalized vectors should sum up to be one")
Example #5
0
 def simulate_stationary_distribution(self,
                                      burning_time_per_estimate,
                                      samples_per_estimate,
                                      number_of_estimates=1,
                                      seed=None):
     """
     #TODO:proper docs and tests
     """
     np.random.seed(seed)
     ans = np.zeros(self.number_of_strategies)
     pop_size = self.population_size
     payoff_func = self.payoff_function
     intensity = self.intensity_of_selection
     nr_strategies = self.number_of_strategies
     kernel = self.mutation_kernel
     mapping = self.fitness_mapping
     # no dots inside loops
     random_edge = utils.random_edge_population
     for __ in range(0, number_of_estimates):
         single_estimate = np.zeros(self.number_of_strategies)
         population_array = random_edge(nr_strategies, pop_size)
         # burn time
         for _ in range(0, burning_time_per_estimate):
             (population_array, ___) = _detached_step(population_array,
                                                      pop_size,
                                                      payoff_func,
                                                      intensity,
                                                      nr_strategies,
                                                      kernel,
                                                      mapping,
                                                      mutation_step=True)
         # sample
         for _ in range(0, samples_per_estimate):
             single_estimate = single_estimate + population_array
             (population_array, ___) = _detached_step(population_array,
                                                      pop_size,
                                                      payoff_func,
                                                      intensity,
                                                      nr_strategies,
                                                      kernel,
                                                      mapping,
                                                      mutation_step=True)
             single_estimate *= (1.0 / samples_per_estimate)
             ans += single_estimate
     return utils.normalize_vector((1.0 / number_of_estimates) * ans)
Example #6
0
 def test_normalize_vector(self):
     #check for 10 random vector that all sum up to 1 when normalized
     for _ in xrange(0,10):
         random_vector = np.random.rand(10) 
         np.testing.assert_almost_equal(np.sum(utils.normalize_vector(random_vector)), 1,decimal=10,  err_msg="Normalized vectors should sum up to be one")