def __init__(self): super().__init__() self.mutations = util.Distribution() self.mutations.add(connect_mutation_weight, ann_mutations.ConnectMutation(6.0)) #self.mutations.add(1, ann_mutations.DisconnectMutation()) self.mutations.add(split_mutation_weight, ann_mutations.SplitMutation(6.0)) self.mutations.add(perturb_mutation_weight, ann_mutations.PerturbMutation(3.0)) if pivot_mutation_weights is not None: self.pivot_mutations_distribution = util.Distribution() self.pivot_mutations_distribution.add( pivot_mutation_weights[0], ann_mutations.ConnectMutation(6.0)) self.pivot_mutations_distribution.add( pivot_mutation_weights[1], ann_mutations.SplitMutation(6.0)) self.pivot_mutations_distribution.add( pivot_mutation_weights[2], ann_mutations.PerturbMutation(3.0)) if seed is not None: self.random.seed(seed) if out_file_name is not None: self.out = open(out_file_name, "w") if orgs_file_name is not None: self.orgs_file_name = orgs_file_name for k, v in kwargs.items(): setattr(self, k, v)
def pagerank(conditional_distribution, N_samples, jumps_between_samples): """ Computes the steady-state distribution by simulating running the Markov chain. Collects samples at regular intervals and returns the empirical distribution of the samples. Inputs ------ conditional_distribution : A dictionary in which each key is an state, and each value is a Distribution over other states. N_samples : the desired number of samples for the approximate empirical distribution jumps_between_samples : how many jumps to perform between each collected sample Returns ------- An empirical Distribution over the states that should approximate the steady-state distribution. """ ### YOUR CODE HERE empirical_distribution = util.Distribution() return empirical_distribution
def compute_distributions(actor_to_movies, movie_to_actors): """ Computes conditional distributions for transitioning between actors (states). Inputs ------ actor_to_movies : a dictionary in which each key is an actor name and each value is a list of movies that actor starred in movie_to_actors : a dictionary in which each key is a movie and each value is a list of actors in that movie Returns ------- A dictionary in which each key is an actor, and each value is a Distribution over other actors. The probability of transitioning from actor i to actor j should be proportional to the number of movies they starred in together. """ out = {} counts = [] lengths = [] for actor in actor_to_movies: conditional_distribution = util.Distribution() for movie in actor_to_movies[actor]: lengths.append(len(movie_to_actors[movie])) for co_star in movie_to_actors[movie]: conditional_distribution[co_star] += 1 counts.extend(conditional_distribution.values()) conditional_distribution.renormalize() out[actor] = conditional_distribution return out
def approx_markov_chain_steady_state(conditional_distribution, N_samples, iterations_between_samples, debug=True): """ Computes the steady-state distribution by simulating running the Markov chain. Collects samples at regular intervals and returns the empirical distribution of the samples. Inputs ------ conditional_distribution : A dictionary in which each key is an state, and each value is a Distribution over other states. N_samples : the desired number of samples for the approximate empirical distribution iterations_between_samples : how many jumps to perform between each collected sample Returns ------- An empirical Distribution over the states that should approximate the steady-state distribution. """ t0 = time.time() empirical_distribution = util.Distribution() # Collect all valid states. states = list(conditional_distribution.keys()) # Parallelize sampling. Here I make the assumption that each sample can be # generated independently from a uniformly sampled initial state. with Pool(processes=2 * cpu_count()) as pool: results = [ pool.apply_async(generate_sample, (states, conditional_distribution, iterations_between_samples, i)) for i in range(N_samples) ] samples = [r.get(timeout=0.5) for r in results] for s in samples: empirical_distribution[s] += 1.0 # Normalize before returning. empirical_distribution.renormalize() if debug: print('Finished simulation in %f sec.' % (time.time() - t0)) return empirical_distribution
def pagerank(conditional_distribution, N_samples, jumps_between_samples): """ Computes the steady-state distribution by simulating running the Markov chain. Collects samples at regular intervals and returns the empirical distribution of the samples. Inputs ------ conditional_distribution : A dictionary in which each key is an state, and each value is a Distribution over other states. N_samples : the desired number of samples for the approximate empirical distribution jumps_between_samples : how many jumps to perform between each collected sample Returns ------- An empirical Distribution over the states that should approximate the steady-state distribution. """ states = conditional_distribution.keys() state = random.choice(states) samples = [] i = 0 while True: i += 1 if random.random() < .1: state = random.choice(states) else: state = conditional_distribution[state].sample() if (i % jumps_between_samples) == 0: samples.append(state) if len(samples) >= N_samples: break # Construct empirical distribution from samples empirical_distribution = util.Distribution() for s in samples: empirical_distribution[s] += 1 empirical_distribution.renormalize() return empirical_distribution