class GeneticPDModel(Model): description = 'A model which does nothing' def __init__(self, numagents=10, verbose=False, Q1_fixed_prob=False, history_len2=False, mutation_switch=False, crossover_switch=False): super().__init__() # Set parameters self.numagents = numagents self.verbose = verbose self.Q1_fixed_prob = Q1_fixed_prob self.history_len2 = history_len2 self.mutation_switch = mutation_switch self.crossover_switch = crossover_switch # Build basic objects self.schedule = RandomActivation(self) self.datacollector = DataCollector( {"Agents": lambda m: len(m.schedule.agents)}) # Create agents: for i in range(self.numagents): newagent = GeneticPDAgent(unique_id=self.next_id(), model=self) self.schedule.add(newagent) if self.verbose: print("Agent " + str(newagent.unique_id) + " created") def step(self): if self.verbose: print("Tick number:", self.schedule.time) self.schedule.step() # collect data self.datacollector.collect(self) def run_model(self, step_count=200): if self.verbose: print('Initial number agents: ', self.schedule.get_agent_count()) for i in range(step_count): self.step() if self.verbose: print('') print('Final number agents: ', self.schedule.get_agent_count())
class GeneticPDModel(Model): description = 'A model which does nothing' def __init__(self, numagents=10, verbose=False, rounds_per_play=1, history_length=2): super().__init__() # Set parameters self.numagents = numagents self.verbose = verbose self.rounds_per_play = rounds_per_play self.history_length = history_length self.agents = [] # Build basic objects self.schedule = RandomActivation(self) self.datacollector = DataCollector( model_reporters={"Agents": lambda m: len(m.schedule.agents)}, agent_reporters={"wealth": lambda a: a.wealth}) # Create agents: for i in range(self.numagents): newagent = GeneticPDAgent(unique_id=self.next_id(), model=self) self.schedule.add(newagent) self.agents.append(newagent) def step(self): if self.verbose: print("Tick number:", self.schedule.time) self.schedule.step() # collect data self.datacollector.collect(self) def run_model(self, step_count=200): if self.verbose: print('Initial number agents: ', self.schedule.get_agent_count()) for i in range(step_count): self.step() if self.verbose: print('') print('Final number agents: ', self.schedule.get_agent_count()) def PDpayoff(self, my_action, your_action): if my_action == "C" and your_action == "C": return 3 if my_action == "C" and your_action == "D": return 0 if my_action == "D" and your_action == "C": return 4 if my_action == "D" and your_action == "D": return 1
class CrossingModel(Model): def __init__(self, ped_origin, ped_destination, road_length, road_width, vehicle_flow, epsilon, gamma, ped_speed, lam, alpha, a_rate): self.schedule = RandomActivation(self) self.running = True self.nsteps = 0 # Create two crossing alternatives, one a zebra crossing and one mid block crossing zebra_location = road_length * 0.75 zebra_type = 'zebra' mid_block_type = 'unmarked' zebra = CrossingAlternative(0, self, location = zebra_location, ctype = zebra_type, name = 'z1', vehicle_flow = vehicle_flow) unmarked = CrossingAlternative(1, self, ctype = mid_block_type, name = 'mid1', vehicle_flow = vehicle_flow) # Crossing alternatives with salience factors crossing_altertives = np.array([unmarked,zebra]) i = 0 model_type = 'sampling' self.ped = Ped(i, self, location = ped_origin, speed = ped_speed, destination = ped_destination, crossing_altertives = crossing_altertives, road_length = road_length, road_width = road_width, epsilon = epsilon, gamma = gamma, lam = lam, alpha = alpha, a_rate = a_rate, model_type = model_type) self.schedule.add(self.ped) self.datacollector = DataCollector(agent_reporters={"CrossingType": "chosenCAType"}) self.crossing_choice = None self.choice_step = None def step(self): self.datacollector.collect(self) self.schedule.step() if self.schedule.get_agent_count() == 0: self.running = False self.nsteps += 1
class SchellingModel(Model): ''' Model class for the Schelling segregation model. ''' def __init__(self, height, width, density, minority_pc, homophily): ''' ''' self.height = height self.width = width self.density = density self.minority_pc = minority_pc self.homophily = homophily self.schedule = RandomActivation(self) self.grid = Grid(height, width, torus=True) self.happy = 0 self.datacollector = DataCollector( {"happy": lambda m: m.happy}, # Model-level count of happy agents # For testing purposes, agent's individual x and y {"x": lambda a: a.x, "y": lambda a: a.y}) self.running = True # Set up agents for x in range(self.width): for y in range(self.height): if random.random() < self.density: if random.random() < self.minority_pc: agent_type = 1 else: agent_type = 0 agent = SchellingAgent((x,y), x, y, agent_type) self.grid[y][x] = agent self.schedule.add(agent) def get_empty(self): ''' Get a list of coordinate tuples of currently-empty cells. ''' empty_cells = [] for x in range(self.width): for y in range(self.height): if self.grid[y][x] is None: empty_cells.append((x, y)) return empty_cells def step(self): ''' Run one step of the model. If All agents are happy, halt the model. ''' self.happy = 0 # Reset counter of happy agents self.schedule.step() self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False
class COVID_model(Model): def __init__(self): super().__init__(Model) self.susceptible = 0 self.dead = 0 self.recovered = 0 self.infected = 0 interactions = model_params.parameters['interactions'] self.population = model_params.parameters['population'] self.SIR_instance = SIR.Infection( self, ptrans=model_params.parameters['ptrans'], reinfection_rate=model_params.parameters['reinfection_rate'], I0=model_params.parameters["I0"], severe=model_params.parameters["severe"], progression_period=model_params.parameters["progression_period"], progression_sd=model_params.parameters["progression_sd"], death_rate=model_params.parameters["death_rate"], recovery_days=model_params.parameters["recovery_days"], recovery_sd=model_params.parameters["recovery_sd"]) G = SIR.build_network(interactions, self.population) self.grid = NetworkGrid(G) self.schedule = RandomActivation(self) self.dead_agents = [] self.running = True for node in range(self.population): new_agent = agent.human(node, self) #what was self.next_id() self.grid.place_agent(new_agent, node) self.schedule.add(new_agent) #self.meme = 0 self.datacollector = DataCollector( model_reporters={ "infected": lambda m: c_p.compute(m, 'infected'), "recovered": lambda m: c_p.compute(m, 'recovered'), "susceptible": lambda m: c_p.compute(m, "susceptible"), "dead": lambda m: c_p.compute(m, "dead"), "R0": lambda m: c_p.compute(m, "R0"), "severe_cases": lambda m: c_p.compute(m, "severe") }) self.datacollector.collect(self) def step(self): self.schedule.step() self.datacollector.collect(self) ''' for a in self.schedule.agents: if a.alive == False: self.schedule.remove(a) self.dead_agents.append(a.unique_id) ''' if self.dead == self.schedule.get_agent_count(): self.running = False else: self.running = True
class miModelo(Model): def __init__(self, N, seed=None): self.current_id = 0 self.running = True # Definimos el schedule para hacer la ejecucion en orden aleatorio self.schedule = RandomActivation(self) #Definimos el grid de tamanio 10x10 y sin fronteras flexibles self.grid = MultiGrid(10, 10, False) for i in range(N): a = miAgente(self.next_id(), self, 5) self.schedule.add(a) pos_x = self.random.randint(0, 9) pos_y = self.random.randint(0, 9) self.grid.place_agent(a, [pos_x, pos_y]) self.datacollector = DataCollector(model_reporters={ "Nagentes": contarAgentes, "NumberTicks": getCurrentTick }) def step(self): self.schedule.step() self.datacollector.collect(self) # Paramos la simulacion cuando hay menos de dos agentes if self.schedule.get_agent_count() < 2: self.running = False
class propagation_model(Model): def __init__(self): super().__init__(Model) density = model_params.parameters['density'] nodes = model_params.parameters['network_size'] neg_bias = model_params.parameters['neg_bias'] meme_density = model_params.parameters['meme_density'] self.num_agents = nodes self.meme = 0 G = model_functions.build_network(density, nodes) self.grid = NetworkGrid(G) self.schedule = RandomActivation(self) self.running = True for node in range(nodes): new_agent = agent.tweeter(self.next_id(), node, self, neg_bias, meme_density) self.grid.place_agent(new_agent, node) self.schedule.add(new_agent) #self.meme = 0 self.datacollector = DataCollector(model_reporters={"meme_density": model_functions.compute_meme_density}) self.datacollector.collect(self) def step(self): self.schedule.step() self.datacollector.collect(self) if self.meme == self.schedule.get_agent_count(): self.running = False
class SchellingModel(Model): ''' Model class for the Schelling segregation model. ''' def __init__(self, height, width, density, minority_pc, homophily): ''' ''' self.height = height self.width = width self.density = density self.minority_pc = minority_pc self.homophily = homophily self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=True) self.happy = 0 self.datacollector = DataCollector( {"happy": "happy"}, # Model-level count of happy agents # For testing purposes, agent's individual x and y { "x": lambda a: a.pos[0], "y": lambda a: a.pos[1] }) # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if random.random() < self.density: if random.random() < self.minority_pc: agent_type = 1 else: agent_type = 0 agent = SchellingAgent((x, y), self, agent_type) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) self.running = True self.datacollector.collect(self) def step(self): ''' Run one step of the model. If All agents are happy, halt the model. ''' self.happy = 0 # Reset counter of happy agents self.schedule.step() # collect data self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False
class Schelling(Model): ''' Model class for the Schelling segregation model. ''' def __init__(self, height=20, width=20, density=0.8, minority_pc=0.2, homophily=3): ''' ''' self.height = height self.width = width self.density = density self.minority_pc = minority_pc self.homophily = homophily self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=True) self.happy = 0 self.datacollector = DataCollector( {"happy": "happy"}, # Model-level count of happy agents # For testing purposes, agent's individual x and y {"x": lambda a: a.pos[0], "y": lambda a: a.pos[1]}) # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if self.random.random() < self.density: if self.random.random() < self.minority_pc: agent_type = 1 else: agent_type = 0 agent = SchellingAgent((x, y), self, agent_type) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) self.running = True self.datacollector.collect(self) def step(self): ''' Run one step of the model. If All agents are happy, halt the model. ''' self.happy = 0 # Reset counter of happy agents self.schedule.step() # collect data self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False
class Schelling(Model): ''' Model for Schelling segregation agent ''' def __init__(self, height, width, density, minority_pc, homophily): self.height = height self.width = width self.density = density self.minority_pc = minority_pc self.homophily = homophily # grid self.grid = SingleGrid(height, width, torus=True) # schedule self.schedule = RandomActivation(self) # datacollector self.happy = 0 self.datacollector = DataCollector({"happy": "happy"}, { "x": lambda a: a.position[0], "y": lambda a: a.position[1] }) # agent setup for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if self.random.random() < self.density: if self.random.random() < self.minority_pc: agent_type = 1 else: agent_type = 0 agent = SchellingAgent((x, y), self, agent_type) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) self.running = True self.datacollector.collect(self) def step(self): # reset at each step self.happy = 0 self.schedule.step() # collect data self.datacollector.collect(self) # stop if all agents are happy if self.happy == self.schedule.get_agent_count(): self.running = False
class SchellingModel(Model): ''' Model class for the Schelling segregation model. ''' def __init__(self, height, width, density, type_pcs=[.2, .2, .2, .2, .2]): ''' ''' self.height = height self.width = width self.density = density self.type_pcs = type_pcs self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=False) self.happy = 0 self.datacollector = DataCollector( {"happy": lambda m: m.happy}, # Model-level count of happy agents # For testing purposes, agent's individual x and y { "x": lambda a: a.pos[0], "y": lambda a: a.pos[1] }) self.running = True # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) total_agents = self.height * self.width * self.density agents_by_type = [total_agents * val for val in self.type_pcs] for loc, types in enumerate(agents_by_type): for i in range(int(types)): pos = self.grid.find_empty() agent = SchellingAgent(pos, self, loc) self.grid.position_agent(agent, pos) self.schedule.add(agent) def step(self): ''' Run one step of the model. If All agents are happy, halt the model. ''' self.happy = 0 # Reset counter of happy agents self.schedule.step() self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False
class SchellingModel(Model): ''' Schelling model class ''' def __init__(self, width=5, height=5, threshold=0.5, population_density=0.8, population_breakdown=0.5): ''' Initialize the model Args: width: Width of the grid containing agents. height: Height of the grid containing agents. threshold: Homophily threshold, the number, from 0-8, of nearest neighbours at which I am so unhappy that I move. population_density: Proportion of cells occupied, from 0-1. population_breakdown: Proportion of agents of type 1, from 0-1. ''' self.running = True self.height = height self.width = width self.threshold = threshold self.population_density = population_density self.population_breakdown = population_breakdown self.no_happy_this_timestep = 0 self.schedule = RandomActivation(self) self.grid = SingleGrid(width, height, torus=True) self.datacollector = DataCollector( {"happy": lambda m: m.no_happy_this_timestep}, {"x": lambda a: a.pos[0], "y": lambda a: a.pos[1]}) for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if random.random() < self.population_density: if random.random() < self.population_breakdown: agent_type = 1 else: agent_type = 0 agent = Agent(self,(x, y), agent_type) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) def step(self): ''' Update model once in each time step ''' self.no_happy_this_timestep = 0 self.schedule.step() self.datacollector.collect(self) # End the simulation if all agents are happy since none will move if self.no_happy_this_timestep == self.schedule.get_agent_count(): self.running = False
class SchellingModel(Model): ''' Model class for the Schelling segregation model. ''' def __init__(self, height, width, density, type_pcs=[.2, .2, .2, .2, .2]): ''' ''' self.height = height self.width = width self.density = density self.type_pcs = type_pcs self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=False) self.happy = 0 self.datacollector = DataCollector( {"happy": lambda m: m.happy}, # Model-level count of happy agents # For testing purposes, agent's individual x and y {"x": lambda a: a.pos[0], "y": lambda a: a.pos[1]}) self.running = True # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) total_agents = self.height * self.width * self.density agents_by_type = [total_agents*val for val in self.type_pcs] for loc, types in enumerate(agents_by_type): for i in range(int(types)): pos = self.grid.find_empty() agent = SchellingAgent(pos, self, loc) self.grid.position_agent(agent, pos) self.schedule.add(agent) def step(self): ''' Run one step of the model. If All agents are happy, halt the model. ''' self.happy = 0 # Reset counter of happy agents self.schedule.step() self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False
class NewModel(Model): def __init__(self, width, height, num_agents): self.schedule = RandomActivation(self) self.grid = SingleGrid(width, height, torus = True) self.num_agents = num_agents # to collect info about how many agents are happy, average similarity of neighbors, length of residence self.datacollector = DataCollector(model_reporters = {"Happy": lambda m: m.happy, "Similar": lambda m: m.similar, "Residence": lambda m: m.avg_residence}, agent_reporters = {"x": lambda a: a.pos[0], "y": lambda a: a.pos[1]}) self.avg_residence = 0 self.happy = 0 self.similar = 0 self.running = True for i in range(self.num_agents): # white if random.random() < 0.70: agent_type = 1 income = np.random.normal(54000, 41000) # black else: agent_type = 0 income = np.random.normal(32000, 40000) # add new agents agent = NewAgent(i, self, agent_type, income) self.schedule.add(agent) # assign the initial coords of the agents x = self.random.randrange(self.grid.width) y = self.random.randrange(self.grid.height) self.grid.position_agent(agent, (x, y)) def step(self): '''Advance the model by one step.''' self.happy = 0 self.schedule.step() # get the average similarity self.similar /= self.num_agents # get the average length of residence self.avg_residence /= self.num_agents self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False
class SchellingModel(Model): """Model class for the Schelling segregation model.""" def __init__(self, density, minority_pc): self.density = density self.minority_pc = minority_pc self.schedule = RandomActivation(self) self.grid = GeoSpace(crs='epsg:4326') self.happy = 0 self.datacollector = DataCollector( {"happy": lambda m: m.happy}) # Model-level count of happy agents self.running = True # Set up the grid with patches for every NUTS region regions = geojson.load(open('nuts_rg_60M_2013_lvl_2.geojson')) self.grid.create_agents_from_GeoJSON(regions, SchellingAgent, model=self, unique_id='NUTS_ID') # Set up agents for agent in self.grid.agents: if random.random() < self.density: if random.random() < self.minority_pc: agent.atype = 1 else: agent.atype = 0 self.schedule.add(agent) # Update the bounding box of the grid and create a new rtree self.grid.update_bbox() self.grid.create_rtree() def step(self): """Run one step of the model. If All agents are happy, halt the model. """ self.happy = 0 # Reset counter of happy agents self.schedule.step() self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False self.grid.create_rtree()
class COVID_model(Model): def __init__(self): super().__init__(Model) self.susceptible = 0 self.dead = 0 self.recovered = 0 self.infected = 0 interactions = model_params.parameters['interactions'] population = model_params.parameters['population'] self.num_agents = population G = model_functions.build_network(interactions, population) self.grid = NetworkGrid(G) self.schedule = RandomActivation(self) self.running = True for node in range(population): new_agent = agent.human(self.next_id(), node, self) self.grid.place_agent(new_agent, node) self.schedule.add(new_agent) #self.meme = 0 self.datacollector = DataCollector(model_reporters={"infected": model_functions.compute_infected, "recovered": model_functions.compute_recovered, "susceptible": model_functions.compute_susceptible, "dead": model_functions.compute_dead, "R0": model_functions.compute_R0, "severe_cases":model_functions.compute_severe}) self.datacollector.collect(self) def step(self): self.schedule.step() self.datacollector.collect(self) if self.dead == self.schedule.get_agent_count(): self.running = False else: self.running = True
class SchellingModel(Model): """Model class for the Schelling segregation model.""" def __init__(self, density, minority_pc): self.density = density self.minority_pc = minority_pc self.schedule = RandomActivation(self) self.grid = GeoSpace() self.happy = 0 self.datacollector = DataCollector({"happy": "happy"}) self.running = True # Set up the grid with patches for every NUTS region AC = AgentCreator(SchellingAgent, {"model": self}) agents = AC.from_file("nuts_rg_60M_2013_lvl_2.geojson") self.grid.add_agents(agents) # Set up agents for agent in agents: if random.random() < self.density: if random.random() < self.minority_pc: agent.atype = 1 else: agent.atype = 0 self.schedule.add(agent) def step(self): """Run one step of the model. If All agents are happy, halt the model. """ self.happy = 0 # Reset counter of happy agents self.schedule.step() # self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False
class Schelling(Model): ''' Model class for the SM coupled to the Schelling segregation model. This class has been modified from the original mesa Schelling model. ''' def __init__(self, height=20, width=20, density=0.8, minority_pc=0.2, homophilyType0=0.5, homophilyType1=0.5, movementQuota=0.30, happyCheckRadius=5, moveCheckRadius=10, last_move_quota=5): ''' ''' self.height = height self.width = width self.density = density self.minority_pc = minority_pc self.homophilyType0 = homophilyType0 self.homophilyType1 = homophilyType1 self.movementQuota = movementQuota self.happyCheckRadius = happyCheckRadius self.moveCheckRadius = moveCheckRadius self.last_move_quota = last_move_quota self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=True) self.happy = 0 self.happytype0 = 0 self.happytype1 = 0 self.stepCount = 0 self.evenness = 0 self.empty = 0 self.type0agents = 0 self.type1agents = 0 self.movement = 0 self.movementtype0 = 0 self.movementtype1 = 0 self.movementQuotaCount = 0 self.numberOfAgents = 0 self.datacollector = DataCollector( # Model-level count of happy agents {"step": "stepCount", "happy": "happy", "happytype0": "happytype0", "happytype1": "happytype1", "movement": "movement", "movementtype0": "movementtype0", "movementtype1": "movementtype1","evenness": "evenness", "numberOfAgents": "numberOfAgents", "homophilyType0": "homophilyType0", "homophilyType1": "homophilyType1", "movementQuota": "movementQuota", "happyCheckRadius": "happyCheckRadius", "last_move_quota": "last_move_quota"}, # For testing purposes, agent's individual x and y {"x": lambda a: a.pos[0], "y": lambda a: a.pos[1], "Agent type": lambda a:a.type}) # , "z": lambda a:a.type # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if self.random.random() < self.density: if self.random.random() < self.minority_pc: agent_type = 1 else: agent_type = 0 last_move = round(self.random.random()*10) # randomly assign a value from 0 to 10 agent = SchellingAgent((x, y), self, agent_type, last_move) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) # print("Schedule: ", len(self.schedule.agents)) self.running = True self.numberOfAgents = self.schedule.get_agent_count() self.datacollector.collect(self) def step(self, policy): ''' Run one step of the model. If All agents are happy, halt the model. Note on the eveness paramater calculation: It cannot be performed in the step function of the agents as then it would not take consider periods of time during which the agents are still moving, making the parameter calculation inaccurate. ''' self.happy = 0 # Reset counter of happy agents self.happytype0 = 0 # Reset counter of happy type 0 agents self.happytype1 = 0 # Reset counter of happy type 1 agents self.empty = 0 # Reset counter of empty cells self.type0agents = 0 # Reset count of type 0 agents self.type1agents = 0 # Reset count of type 1 agents self.movementQuotaCount = 0 # Reset count of the movement quota self.movement = 0 # Reset counter of movement of agents self.movementtype0 = 0 # Reset counter of movement of type 0 agents self.movementtype1 = 0 # Reset counter of movement of type 1 agents # introduction of the selected policy in the Schelling model # happy check vision changes if policy[0] != None and self.happyCheckRadius<15 and self.happyCheckRadius>1: self.happyCheckRadius += policy[0] # movement quota changes if policy[1] != None and self.movementQuota<1 and self.movementQuota>0.05: self.movementQuota += policy[1] # last movement threshold if policy[2] != None and self.last_move_quota<50 and self.last_move_quota>0: self.last_move_quota += policy[2] # type 0 preference if policy[3] != None and self.homophilyType0<1 and self.homophilyType0>0: self.homophilyType0 += policy[3] # type 1 preference if policy[4] != None and self.homophilyType1<1 and self.homophilyType1>0: self.homophilyType1 += policy[4] # run the step for the agents self.schedule.step() # print(self.movementQuotaCount, " agents moved.") # print(round(self.happy/self.schedule.get_agent_count() * 100,2), "percent are happy agents.") # calculating empty counter self.empty = (self.height*self.width) - self.schedule.get_agent_count() # calculating type 0 and type 1 agent numbers for agent in self.schedule.agent_buffer(shuffled=True): # print(agent.type) if agent.type == 0: self.type0agents += 1 if agent.type == 1: self.type1agents += 1 # calculation of evenness (segregation parameter) using Haw (2015). self.evenness_calculation() # iterate the steps counter self.stepCount += 1 # collect data self.datacollector.collect(self) # checking the datacollector # if self.stepCount % 2 == 0: # print(self.datacollector.get_model_vars_dataframe()) # print(self.datacollector.get_agent_vars_dataframe()) if self.happy == self.schedule.get_agent_count(): self.running = False print("All agents are happy, the simulation ends!") output_KPIs = [self.evenness, self.movement, self.happy, self.movementtype0, self.movementtype1, self.happytype0, self.happytype1] return output_KPIs, self.type0agents, self.type1agents def evenness_calculation(self): ''' To calculate the evenness parameter, one needs to first subdivide the grid into areas of more than one square each. The evenness will be then calculated based on the distribution of type 0 and type 1 agents in each of these areas. The division into area needs to be done carefully as it depends on the inputs within the model (width and height of the grid). ''' # check for a square grid if self.height != self.width: self.running = False print("WARNING - The grid is not a square, please insert the same width and height") # reset the evenness parameter self.evenness = 0 # algorithm to calculate evenness n = 4 # number of big areas considered in width and height if self.height % n == 0: # consider all big areas for big_dy in range(n): for big_dx in range(n): # looking within one big areas, going through all cells listAgents = [] for small_dy in range(int(self.height/n)): for small_dx in range(int(self.height/n)): for agents in self.schedule.agent_buffer(shuffled=True): if agents.pos == (self.height/n * big_dx + small_dx, self.height/n * big_dy + small_dy): listAgents.append(agents) # calculating evenness for each big area countType0agents = 0 # Reset of the type counter for type 0 agents countType1agents = 0 # Reset of the type counter for type 1 agents # checking the type of agents in the big area for agents in listAgents: if agents.type == 0: countType0agents += 1 if agents.type == 1: countType1agents += 1 self.evenness += 0.5 * abs((countType0agents/self.type0agents) - (countType1agents/self.type1agents)) # print("evenness :", round(self.evenness,2))
class OilSpread(Model): def __init__(self, height=20, width=20, initial_macchie=1, qnt=10, qnt_prop=50, initial_barche=1, power_boat=3, initial_land=20): self.height = height self.width = width self.initial_macchie = initial_macchie self.qnt = qnt self.qnt_prop = qnt_prop self.initial_barche = initial_barche self.power_boat = power_boat self.initial_land = initial_land self.schedule = RandomActivation(self) self.grid = Grid(width, height, torus=True) self.datacollector = DataCollector({ "Oil": lambda m: m.schedule.get_agent_count() - self.initial_barche - self .initial_land - self.height #"Cane": lambda m: self.count_type(self, 0) }) # Create terra for i in range(self.initial_land): x = 0 y = i terra = Land((x, y), self, 0) self.grid.place_agent(terra, (x, y)) self.schedule.add(terra) # Create terra for i in range(20): x = self.width - 1 y = i limite = Bound((x, y), self) self.grid.place_agent(limite, (x, y)) self.schedule.add(limite) # Create macchie di petrolio for i in range(self.initial_macchie): x = self.random.randrange(self.width) y = self.random.randrange(self.height) if (x == 0): x += 1 if (y == self.width): y -= 2 macchia = Oil((x, y), self, qnt, qnt_prop) self.grid.place_agent(macchia, (x, y)) self.schedule.add(macchia) # Create barchette pulisci mondo for i in range(self.initial_barche): x = self.random.randrange(self.width) y = self.random.randrange(self.height) if (x == 0): x += 1 if (y == self.width): y -= 2 barca = Boat((x, y), self, power_boat) self.grid.place_agent(barca, (x, y)) self.schedule.add(barca) self.running = True self.datacollector.collect(self) def step(self): self.schedule.step() # collect data self.datacollector.collect(self) print("il numero di macchie di petrolio sono in gioco sono " + str(self.schedule.get_agent_count() - self.initial_barche - self.initial_land - self.height)) if self.schedule.get_agent_count( ) == self.initial_barche + self.initial_land + self.height: self.running = False
class EvacuationModel(Model): def __init__(self, N=20, height=21, width=21, push_ratio = 0.5): super().__init__() self.height = height self.width = width self.num_agents = N self.exit_x = self.width - 1 self.exit_y = round(self.height/2) self.push_probs = np.array([[0.,0.],[1.,0.5]]) self.grid = HexGrid(self.width, self.height, torus=False) self.schedule = RandomActivation(self) # decide for ID whether it is a pusher is_pusher = np.zeros(N, dtype = int) idx = self.random.sample([i for i in range(N)], int(push_ratio * N)) is_pusher[idx] = 1 # Add N pedestrians taken_pos = [] for i in range(self.num_agents): # Add the agent to a random grid cell while True: x = self.random.randrange(1, self.grid.width-1) y = self.random.randrange(1, self.grid.height-1) pos = (x,y) if not pos in taken_pos: break a = Pedestrian(i, self, pos, self.exit_x, self.exit_y, is_pusher[i]) self.schedule.add(a) self.grid.place_agent(a, pos) taken_pos.append(pos) print(len(taken_pos)) # Place vertical walls for i in range(self.height): # Left x=0 y=i if x == self.exit_x and y == self.exit_y: e = Exit(self, (x, y)) #self.schedule.add(e) self.grid.place_agent(e, (x, y)) else: w = Wall(self, (x, y)) #self.schedule.add(w) self.grid.place_agent(w, (x, y)) # Right x=self.width-1 y=i # One exit if x == self.exit_x and y == self.exit_y: e = Exit(self, (x, y)) #self.schedule.add(e) self.grid.place_agent(e, (x, y)) else: w = Wall(self, (x, y)) #self.schedule.add(w) self.grid.place_agent(w, (x, y)) # Place horizontal walls for i in range(self.width): # Up x=i y=0 if x == self.exit_x and y == self.exit_y: e = Exit(self, (x, y)) #self.schedule.add(e) self.grid.place_agent(e, (x, y)) else: w = Wall(self, (x, y)) #self.schedule.add(w) self.grid.place_agent(w, (x, y)) # Down x=i y=self.height-1 # One exit if x == self.exit_x and y == self.exit_y: #e = Exit(self, (x, y)) #self.schedule.add(e) #self.grid.place_agent(e, (x, y)) continue else: w = Wall(self, (x, y)) #self.schedule.add(w) self.grid.place_agent(w, (x, y)) self.data_collector = DataCollector({ "Evacuees": lambda m: self.count_evacuees(), "Evacuated": lambda m: self.count_evacuated() }) # this is required for the data_collector to work self.running = True self.data_collector.collect(self) def count_evacuees(self): count = self.schedule.get_agent_count() print('EVACUEES COUNT') print(count) print() return count def count_evacuated(self): count = self.num_agents - self.schedule.get_agent_count() return count def step(self): print(self.schedule.get_agent_count()) if self.schedule.get_agent_count() == 0: exit() else: self.schedule.step() print("") self.data_collector.collect(self)
class SchellingModel(Model): '''Model class for Schelling segregation model''' def __init__(self, height=20, width=20, density=.8, group_ratio=.66, minority_ratio=.5, homophily=3): self.height = height self.width = width self.density = density self.group_ratio = group_ratio self.minority_ratio = minority_ratio self.homophily = homophily self.happy = 0 self.segregated = 0 self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=False) self.place_agents() self.datacollector = DataCollector({ 'happy': (lambda m: m.happy), 'segregated': (lambda m: m.segregated) }) self.running = True def step(self): '''Run one step of model''' self.schedule.step() self.calculate_stats() self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False def place_agents(self): for cell in self.grid.coord_iter(): x, y = cell[1:3] if random.random() < self.density: if random.random() < self.group_ratio: if random.random() < self.minority_ratio: group = 0 else: group = 1 else: group = 2 agent = SchellingAgent((x, y), group) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) for agent in self.schedule.agents: count = 0 for neighbour in self.grid.iter_neighbors(agent.pos, moore=False): if neighbour.group == agent.group: count += 1 agent.similar = count def calculate_stats(self): happy_count = 0 avg_seg = 0 for agent in self.schedule.agents: avg_seg += agent.similar if agent.similar >= self.homophily: happy_count += 1 self.happy = happy_count self.segregated = avg_seg / self.schedule.get_agent_count()
class SchoolModel(Model): """ Model class for the Schelling segregation model. ... Attributes ---------- height: int grid height width: int grid width num_schools: int number of schools f : float fraction preference of agents for like M : float utility penalty for homogeneous neighbourhood residential_steps : number of steps for the residential model minority_pc : minority fraction bounded : boolean If True use bounded (predefined neighbourhood) for agents residential choice cap_max : float school capacity TODO: explain radius : int neighbourhood radius for agents calculation of residential choice (only used if not bounded) household_types : labels for different ethnic types of households symmetric_positions : use symmetric positions for the schools along the grid, or random schelling : if True use schelling utility function otherwise use assymetric school_pos : if supplied place schools in the supplied positions - also update school_num extended_data : if True collect extra data for agents (utility distribution and satisfaction) takes up a lot of space sample : int subsample the empty residential sites to be evaluated to speed up computation variable_f : variable_f draw values of the ethnic preference, f from a normal distribution sigma : float The standard deviation of the normal distribution used for f alpha : float ratio of ethnic to distance to school preference for school utility temp : float temperature for the behavioural logit rule for agents moving households : list all household objects schools : list all school objects residential_moves_per_step : int number of agents to move residence at every step school_moves_per_step : int number of agents to move school at every step num_households : int total number of household agents pm : list [ , ] number of majority households, number of minority households schedule : mesa schedule type grid : mesa grid type total_moves : number of school moves made in particular step res_moves : number of residential site moves made in particular step move : type of move recipe - 'random' 'boltzmann' or 'deterministic' school_locations : list list of locations of all schools (x,y) household_locations : list of locations of all households (x,y) closer_school_from_position : numpy array shape : (width x height) map of every grid position to the closest school """ def __init__(self, height=100, width=100, density=0.9, num_neighbourhoods=16, schools_per_neighbourhood=2,minority_pc=0.5, homophily=3, f0=0.6,f1=0.6,\ M0=0.8,M1=0.8,T=0.75, alpha=0.5, temp=1, cap_max=1.01, move="boltzmann", symmetric_positions=True, residential_steps=70,schelling=False,bounded=True, residential_moves_per_step=2000, school_moves_per_step =2000,radius=6,proportional = False, torus=False,fs="eq", extended_data = False, school_pos=None, agents=None, sample=4, variable_f=True, sigma=0.35, displacement=8 ): # Options for the model self.height = height self.width = width print("h x w", height, width) self.density = density #self.num_schools= num_schools self.f = [f0, f1] self.M = [M0, M1] self.residential_steps = residential_steps self.minority_pc = minority_pc self.bounded = bounded self.cap_max = cap_max self.T = T self.radius = radius self.household_types = [0, 1] # majority, minority !! self.symmetric_positions = symmetric_positions self.schelling = schelling self.school_pos = school_pos self.extended_data = extended_data self.sample = sample self.variable_f = variable_f self.sigma = sigma self.fs = fs # choice parameters self.alpha = alpha self.temp = temp self.households = [] self.schools = [] self.neighbourhoods = [] self.residential_moves_per_step = residential_moves_per_step self.school_moves_per_step = school_moves_per_step self.num_households = int(width * height * density) num_min_households = int(self.minority_pc * self.num_households) self.num_neighbourhoods = num_neighbourhoods self.schools_per_neigh = schools_per_neighbourhood self.num_schools = int(num_neighbourhoods * self.schools_per_neigh) self.pm = [ self.num_households - num_min_households, num_min_households ] self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=torus) self.total_moves = 0 self.res_moves = 0 self.move = move self.school_locations = [] self.household_locations = [] self.neighbourhood_locations = [] self.closer_school_from_position = np.empty( [self.grid.width, self.grid.height]) self.closer_neighbourhood_from_position = np.empty( [self.grid.width, self.grid.height]) self.happy = 0 self.res_happy = 0 self.percent_happy = 0 self.seg_index = 0 self.res_seg_index = 0 self.residential_segregation = 0 self.collective_utility = 0 self.comp0,self.comp1,self.comp2,self.comp3,self.comp4,self.comp5,self.comp6,self.comp7, \ self.comp8, self.comp9, self.comp10, self.comp11, self.comp12, self.comp13, self.comp14, self.comp15 = 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 self.satisfaction = [] self.pi_jm = [] self.pi_jm_fixed = [] self.compositions = [] self.average_like_fixed = 0 self.average_like_variable = 0 self.my_collector = [] if torus: self.max_dist = self.height / np.sqrt(2) else: self.max_dist = self.height * np.sqrt(2) # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) # Set up schools in symmetric positions along the grid # if schools already supplied place them where they should be # TODO: fix if self.school_pos: school_positions = self.school_pos self.school_locations = school_pos self.num_schools = len(school_pos) print("Option not working") sys.exit() # otherwise calculate the positions else: if self.num_neighbourhoods == 4: neighbourhood_positions = [(width / 4, height / 4), (width * 3 / 4, height / 4), (width / 4, height * 3 / 4), (width * 3 / 4, height * 3 / 4)] elif self.num_neighbourhoods == 9: n = 6 neighbourhood_positions = [(width/n,height/n),(width*3/n,height*1/n),(width*5/n,height*1/n),(width/n,height*3/n),\ (width*3/n,height*3/n),(width*5/n,height*3/n),(width*1/n,height*5/n),(width*3/n,height*5/n),\ (width*5/n,height*5/n)] elif self.num_neighbourhoods in [25, 64, 16]: neighbourhood_positions = [] n = int(np.sqrt(self.num_neighbourhoods) * 2) print(n) x1 = range(1, int(n + 1), 2) xloc = np.repeat(x1, int(n / 2)) yloc = np.tile(x1, int(n / 2)) for i in range(self.num_neighbourhoods): neighbourhood_positions.append( (xloc[i] * height / n, yloc[i] * width / n)) print(neighbourhood_positions) #for i in range(self.num_schools):i i = 0 while len(self.neighbourhoods) < self.num_neighbourhoods: if self.symmetric_positions or self.school_pos: x = int(neighbourhood_positions[i][0]) y = int(neighbourhood_positions[i][1]) #print(x,y) else: x = random.randrange(start=2, stop=self.grid.width - 2) y = random.randrange(start=2, stop=self.grid.height - 2) pos = (x, y) pos2 = (x + 1, y + 1) if schools_per_neighbourhood == 2: pos3 = (x - displacement, y - displacement) pos2 = (x + displacement, y + displacement) do_not_use = self.school_locations + self.neighbourhood_locations #if (pos not in do_not_use) and (pos2 not in do_not_use ) and (pos3 not in do_not_use ): if (pos not in do_not_use) and (pos2 not in do_not_use): #print('pos',pos,pos2,pos3) self.school_locations.append(pos2) school = SchoolAgent(pos2, self) self.grid.place_agent(school, school.unique_id) self.schools.append(school) self.schedule.add(school) if self.schools_per_neigh == 2: # Add another school self.school_locations.append(pos3) school = SchoolAgent(pos3, self) self.grid.place_agent(school, school.unique_id) self.schools.append(school) self.schedule.add(school) self.neighbourhood_locations.append(pos) neighbourhood = NeighbourhoodAgent(pos, self) self.grid.place_agent(neighbourhood, neighbourhood.unique_id) self.neighbourhoods.append(neighbourhood) self.schedule.add(neighbourhood) else: print(pos, pos2, pos3, "is found in", do_not_use) i += 1 print("num_schools", len(self.school_locations)) print("schools completed") #print(self.neighbourhood_locations) #print("schools",self.school_locations, len(self.school_locations)) # Set up households # If agents are supplied place them where they need to be if agents: for cell in agents: [agent_type, x, y] = cell if agent_type in [0, 1]: pos = (x, y) if self.grid.is_cell_empty(pos): agent = HouseholdAgent(pos, self, agent_type) self.grid.place_agent(agent, agent.unique_id) self.household_locations.append(pos) self.households.append(agent) self.schedule.add(agent) # otherwise produce them else: # create household locations but dont create agents yet while len(self.household_locations) < self.num_households: #Add the agent to a random grid cell x = random.randrange(self.grid.width) y = random.randrange(self.grid.height) pos = (x, y) if (pos not in (self.school_locations + self.household_locations + self.neighbourhood_locations)): self.household_locations.append(pos) #print(Dij) for ind, pos in enumerate(self.household_locations): # create a school or create a household if ind < int(self.minority_pc * self.num_households): agent_type = self.household_types[1] else: agent_type = self.household_types[0] household_index = ind agent = HouseholdAgent(pos, self, agent_type, household_index) #decorator_agent = HouseholdAgent(pos, self, agent_type) self.grid.place_agent(agent, agent.unique_id) #self.grid.place_agent(decorator_agent, pos) self.households.append(agent) self.schedule.add(agent) self.set_positions_to_school() self.set_positions_to_neighbourhood() self.calculate_all_distances() self.calculate_all_distances_to_neighbourhoods() for agent in self.households: random_school_index = random.randint(0, len(self.schools) - 1) #print("school_index", random_school_index, agent.Dj, len(agent.Dj)) candidate_school = self.schools[random_school_index] agent.allocate(candidate_school, agent.Dj[random_school_index]) #closer_school = self.schools[p.argmin(Dj)] #closer_school.students.append(agent) # agent.allocate(closer_school, np.min(Dj)) #print(agent.school.unique_id) self.pi_jm = np.zeros(shape=(len(self.school_locations), len(self.household_types))) self.local_compositions = np.zeros(shape=(len(self.school_locations), len(self.household_types))) self.avg_school_size = round(density * width * height / (len(self.schools))) if self.extended_data: self.datacollector = DataCollector( model_reporters={ "agent_count": lambda m: m.schedule.get_agent_count(), "seg_index": "seg_index", "residential_segregation": "residential_segregation", "res_seg_index": "res_seg_index", "fixed_res_seg_index": "fixed_res_seg_index", "happy": "happy", "percent_happy": "percent_happy", "total_moves": "total_moves", "compositions0": "compositions0", "compositions1": "compositions1", "comp0": "comp0", "comp1": "comp1", "comp2": "comp2", "comp3": "comp3", "comp4": "comp4", "comp5": "comp5", "comp6": "comp6", "comp7": "comp7", "compositions": "compositions", "collective_utility": "collective_utility" }, agent_reporters={ "local_composition": "local_composition", "type": lambda a: a.type, "id": lambda a: a.unique_id, #"fixed_local_composition": "fixed_local_composition", #"variable_local_composition": "variable_local_composition", "school_utilities": "school_utilities", "residential_utilities": "residential_utilities", "pos": "pos" }) else: self.datacollector = DataCollector( model_reporters={ "agent_count": lambda m: m.schedule.get_agent_count(), "seg_index": "seg_index", "residential_segregation": "residential_segregation", "res_seg_index": "res_seg_index", "fixed_res_seg_index": "fixed_res_seg_index", "happy": "happy", "percent_happy": "percent_happy", "total_moves": "total_moves", "compositions0": "compositions0", "compositions1": "compositions1", "comp0": "comp0", "comp1": "comp1", "comp2": "comp2", "comp3": "comp3", "comp4": "comp4", "comp5": "comp5", "comp6": "comp6", "comp7": "comp7", "compositions": "compositions", "collective_utility": "collective_utility" }, agent_reporters={ "local_composition": "local_composition", "type": lambda a: a.type, "id": lambda a: a.unique_id, # "fixed_local_composition": "fixed_local_composition", # "variable_local_composition": "variable_local_composition", "pos": "pos" }) # Calculate local composition # set size for school in self.schools: #school.get_local_school_composition() #cap = round(np.random.normal(loc=cap_max * self.avg_school_size, scale=self.avg_school_size * 0.05)) cap = self.avg_school_size * self.cap_max school.capacity = cap print("cap", self.avg_school_size, cap) segregation_index(self) # print( "height = %d; width = %d; density = %.2f; num_schools = %d; minority_pc = %.2f; " "f0 = %.2f; f1 = %.2f; M0 = %.2f; M1 = %.2f;\ alpha = %.2f; temp = %.2f; cap_max = %.2f; move = %s; symmetric_positions = %s" % (height, width, density, self.num_schools, minority_pc, f0, f1, M0, M1, alpha, temp, cap_max, move, symmetric_positions)) self.total_considered = 0 self.running = True self.datacollector.collect(self) def calculate_all_distances(self): """ calculate distance between school and household Euclidean or gis shortest road route :return: dist """ Dij = distance.cdist(np.array(self.household_locations), np.array(self.school_locations), 'euclidean') for household_index, household in enumerate(self.households): Dj = Dij[household_index, :] household.Dj = Dj # Calculate distances of the schools - define the school-neighbourhood and compare # closer_school = household.schools[np.argmin(household.)] closer_school_index = np.argmin(household.Dj) household.closer_school = self.schools[closer_school_index] household.closer_school.neighbourhood_students.append(household) return (Dij) def calculate_all_distances_to_neighbourhoods(self): """ calculate distance between school and household Euclidean or gis shortest road route :return: dist """ for household_index, household in enumerate(self.households): # Calculate distances of the schools - define the school-neighbourhood and compare # closer_school = household.schools[np.argmin(household.)] household.closer_neighbourhood = self.get_closer_neighbourhood_from_position( household.pos) household.closer_neighbourhood.neighbourhood_students_indexes.append( household_index) # just sanity check # for i, neighbourhood in enumerate(self.neighbourhoods): # students = neighbourhood.neighbourhood_students_indexes # print("students,",i, len(students)) def set_positions_to_school(self): ''' calculate closer school from every position on the grid Euclidean or gis shortest road route :return: dist ''' distance_dict = {} # Add the agent to a random grid cell all_grid_locations = [] for x in range(self.grid.width): for y in range(self.grid.height): all_grid_locations.append((x, y)) Dij = distance.cdist(np.array(all_grid_locations), np.array(self.school_locations), 'euclidean') for i, pos in enumerate(all_grid_locations): Dj = Dij[i, :] (x, y) = pos # Calculate distances of the schools - define the school-neighbourhood and compare # closer_school = household.schools[np.argmin(household.)] closer_school_index = np.argmin(Dj) self.closer_school_from_position[x][y] = closer_school_index #print("closer_school_by_position",self.closer_school_from_position) def set_positions_to_neighbourhood(self): ''' calculate closer neighbourhood centre from every position on the grid Euclidean or gis shortest road route :return: dist ''' distance_dict = {} # Add the agent to a random grid cell all_grid_locations = [] for x in range(self.grid.width): for y in range(self.grid.height): all_grid_locations.append((x, y)) Dij = distance.cdist(np.array(all_grid_locations), np.array(self.neighbourhood_locations), 'euclidean') for i, pos in enumerate(all_grid_locations): Dj = Dij[i, :] (x, y) = pos # Calculate distances of the schools - define the school-neighbourhood and compare # closer_school = household.schools[np.argmin(household.)] closer_neighbourhood_index = np.argmin(Dj) self.closer_neighbourhood_from_position[x][ y] = closer_neighbourhood_index #print("closer_school_by_position", self.closer_school_from_position) def get_closer_school_from_position(self, pos): """ :param pos: (x,y) position :return school: school object closest to this position """ (x, y) = pos school_index = self.closer_school_from_position[x][y] school = self.get_school_from_index(school_index) return (school) def get_closer_neighbourhood_from_position(self, pos): """ :param pos: (x,y) position :return school: school object closest to this position """ (x, y) = pos neighbourhood_index = self.closer_neighbourhood_from_position[x][y] neighbourhood = self.get_neighbourhood_from_index(neighbourhood_index) return (neighbourhood) def get_school_from_index(self, school_index): """ :param self: obtain the school object using the index :param school_index: :return: school object """ return (self.schools[int(school_index)]) def get_neighbourhood_from_index(self, neighbourhood_index): """ :param self: obtain the school object using the index :param school_index: :return: school object """ return (self.neighbourhoods[int(neighbourhood_index)]) def get_households_from_index(self, household_indexes): """ Retrieve household objects from their indexes :param household_indexes: list of indexes to retrieve household objects :return: households: household objects """ households = [] for household_index in household_indexes: households.append(self.households[household_index]) return (households) def step(self): ''' Run one step of the model. If All agents are happy, halt the model. ''' self.happy = 0 # Reset counter of happy agents self.res_happy = 0 self.total_moves = 0 self.total_considered = 0 self.res_moves = 0 self.satisfaction = [] self.res_satisfaction = [] self.schedule.step() satisfaction = 0 res_satisfaction = 0 print("happy", self.happy) print("total_considered", self.total_considered) # Once residential steps are done calculate school distances if self.schedule.steps <= self.residential_steps or self.schedule.steps == 1: # during the residential steps keep recalculating the school neighbourhood compositions # this is required for the neighbourhoods metric #print("recalculating neighbourhoods") # TODO: check this, not sure if this and the recalculation below is needed for school in self.schools: school.neighbourhood_students = [] for neighbourhood in self.neighbourhoods: neighbourhood.neighbourhood_students_indexes = [] # update the household locations after a move self.household_locations = [] for i, household in enumerate(self.households): self.household_locations.append(household.pos) self.calculate_all_distances() self.calculate_all_distances_to_neighbourhoods() #print("all", self.calculate_all_distances()[i, :]) # for i, household in enumerate(self.households): # print(household.calculate_distances()) # # Calculate distances of the schools - define the school-neighbourhood and compare # # closer_school = household.schools[np.argmin(household.)] # closer_school_index = np.argmin(household.Dj) # household.closer_school = self.schools[closer_school_index] # household.closer_school.neighbourhood_students.append(household) # # # Initialize house allocation to school # #household.move_school(closer_school_index, self.schools[closer_school_index]) # self.residential_segregation = segregation_index( self, unit="neighbourhood") self.res_seg_index = segregation_index(self, unit="agents_neighbourhood") self.fixed_res_seg_index = segregation_index( self, unit="fixed_agents_neighbourhood", radius=1) res_satisfaction = np.mean(self.res_satisfaction) satisfaction = 0 # calculate these after residential_model if self.schedule.steps > self.residential_steps: self.collective_utility = calculate_collective_utility(self) print(self.collective_utility) self.seg_index = segregation_index(self) satisfaction = np.mean(self.satisfaction) print("seg_index", "%.2f"%(self.seg_index), "var_res_seg", "%.2f"%(self.res_seg_index), "neighbourhood", "%.2f"%(self.residential_segregation), "fixed_res_seg_index","%.2f"%(self.fixed_res_seg_index), \ "res_satisfaction %.2f" %res_satisfaction,"satisfaction %.2f" %satisfaction,\ "average_like_fixed %.2f"%self.average_like_fixed,"average_like_var %.2f"%self.average_like_variable ) if self.happy == self.schedule.get_agent_count(): self.running = False compositions = [] # remove this? for school in self.schools: self.my_collector.append([ self.schedule.steps, school.unique_id, school.get_local_school_composition() ]) self.compositions = school.get_local_school_composition() compositions.append(school.get_local_school_composition()[0]) compositions.append(school.get_local_school_composition()[1]) self.compositions1 = int(school.get_local_school_composition()[1]) self.compositions0 = int(school.get_local_school_composition()[0]) #print("school_students",school.neighbourhood_students) #print("comps",compositions,np.sum(compositions) ) [ self.comp0, self.comp1, self.comp2, self.comp3, self.comp4, self.comp5, self.comp6, self.comp7 ] = compositions[0:8] # collect data # self.datacollector.collect(self) print("moves", self.total_moves, "res_moves", self.res_moves, "percent_happy", self.percent_happy) for i, household in enumerate(self.households): household.school_utilities = [] household.residential_utilities = []
class Schelling(Model): ''' Define the Model The other core class ''' ''' mesa/space.py/Grid has 3 properties: - width - height - torus So `minority_pc` and `homophily` are customized properties here. ''' def __init__(self, height=20, width=20, density=0.8, minority_pc=0.2, homophily=3): self.height = height self.width = width self.density = density self.minority_pc = minority_pc self.homophily = homophily # Scheduler is used `RandomActivation`, which is defined in mesa/time.py/RandomActivation. # Specify *time* of the model. self.schedule = RandomActivation(self) # `SingleGrid` is defined in mesa/space.py/SingleGrid. # Grid which strictly enforces one object per cell. # Specify *space* of the model. # width, height, torus are the native properties. self.grid = SingleGrid(width, height, torus=True) # Without happy agents initially self.happy = 0 # DataCollector collects 3 types of data: # model-level data, agent-level data, and tables # A DataCollector is instantiated with 2 dictionaries of reporter names and associated variable names or functions for each, one for model-level data and one for agent-level data; a third dictionary provides table names and columns. Variable names are converted into functions which retrieve attributes of that name. self.datacollector = DataCollector( { 'happy': 'happy' }, # Model-level count of happy agents, only one agent-level reporter # For testing purposes, agent’s individual x and y # lambda function, it is like: # lambda x, y: x ** y { 'x': lambda a: a.pos[0], 'y': lambda a: a.pos[1] }, ) # Set up agents # We use grid iterator that returns # the coordinates of a cell as well # as its contents. (coord_iter) # coord_iter is defined in mesa/space.py, which, which returns coordinates as well as cell contents. for cell in self.grid.coord_iter(): # Grid cells are indexed by [x][y] (tuple), where [0][0] is assumed to be the bottom-left and [width-1][height-1] is the top-right. If a grid is toroidal, the top and bottom, and left and right, edges wrap to each other. x = cell[1] y = cell[2] if self.random.random() < self.density: if self.random.random() < self.minority_pc: agent_type = 1 else: agent_type = 0 agent = SchellingAgent((x, y), self, agent_type) # position_agent is defined in mesa/space.py. Position an agent on the grid. This is used when first placing agents! self.grid.position_agent(agent, (x, y)) # schedule.add() method is defined in mesa/time.py. # Add an Agent object to the schedule. # # Aggs: # agent: An Agent to be added to the schedule. Note: the agent must have a step() method. self.schedule.add(agent) self.running = True # datacollector.collect() method is defined in mesa/datacollection.py. When the collect(…) method is called, it collects these attributes and executes these functions one by one and store the results. self.datacollector.collect(self) # Oh, I did’t understand step(…) method previously. Now I know as a consequential method, it executes all stages for all agents. def step(self): ''' Run one step of the model. If all agents are happy, halt the model. ''' self.happy = 0 # Reset counter of happy agents self.schedule.step() # collect data self.datacollector.collect(self) # Method get_agent_count is defined in mesa/time.py. It returns the current number agents in the queue. if self.happy == self.schedule.get_agent_count(): self.running = False
class PolicyEmergenceSM(Model): ''' Simplest Model for the policy emergence model. ''' def __init__(self, SM_inputs, height=20, width=20): self.height = height self.width = width self.SM_inputs = SM_inputs self.stepCount = 0 self.agenda_PC = None self.agenda_PF = None self.policy_implemented = None self.policy_implemented_number = None self.policy_formulation_run = False # True if an agenda is found self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=True) # creation of the datacollector vector self.datacollector = DataCollector( # Model-level variables model_reporters = { "step": "stepCount", "AS_PF": get_problem_policy_chosen, "agent_attributes": get_agents_attributes}, # Agent-level variables agent_reporters = { "x": lambda a: a.pos[0], "y": lambda a: a.pos[1], "Agent type": lambda a:type(a), "Issuetree": lambda a: getattr(a, 'issuetree', [None])[a.unique_id if isinstance(a, ActiveAgent) else 0]} ) # , "agenda_PC":"agenda_PC", "agenda_PF":"agenda_PF", "policy_implemented": "policy_implemented" # "x": lambda a: a.pos[0], "y": lambda a: a.pos[1] # "z": lambda a:a.issuetree # belief tree properties self.len_S, self.len_PC, self.len_DC, self.len_CR = issue_tree_input(self) # print(self.len_S, self.len_PC, self.len_DC, self.len_CR) # issue tree properties self.policy_instruments, self.len_ins_1, self.len_ins_2, self.len_ins_all, self.PF_indices = policy_instrument_input(self, self.len_PC) # Set up active agents init_active_agents(self, self.len_S, self.len_PC, self.len_DC, self.len_CR, self.len_PC, self.len_ins_1, self.len_ins_2, self.len_ins_all, self.SM_inputs) # Set up passive agents init_electorate_agents(self, self.len_S, self.len_PC, self.len_DC, self.SM_inputs) # Set up truth agent init_truth_agent(self, self.len_S, self.len_PC, self.len_DC, self.len_ins_1, self.len_ins_2, self.len_ins_all) # the issue tree will need to be updated at a later stage witht he values from the system/policy context # print("Schedule has : ", len(self.schedule.agents), " agents.") # print(self.schedule.agents) # print(" ") # for agent in self.schedule.agent_buffer(shuffled=False): # print(' ') # print(agent) # print(type(agent)) # if isinstance(agent, ActiveAgent): # print(agent.unique_id, " ", agent.pos, " ", agent.agent_type, " ", agent.resources, " ", agent.affiliation, " ", agent.issuetree[agent.unique_id], " ", agent.policytree[agent.unique_id][0]) # if isinstance(agent, ElectorateAgent): # print(agent.unique_id, " ", agent.pos, " ", agent.affiliation, " ", agent.issuetree) # if isinstance(agent, TruthAgent): # print(agent.pos, " ", agent.issuetree) self.running = True self.numberOfAgents = self.schedule.get_agent_count() self.datacollector.collect(self) def step(self, KPIs): print(" ") print("Step +1 - Policy emergence model") print("Step count: ", self.stepCount) ''' Main steps of the Simplest Model for policy emergence: 0. Module interface - Input Obtention of the beliefs from the system/policy context !! This is to be implemented at a later stage 1. Agenda setting step 2. Policy formulation step 3. Module interface - Output Implementation of the policy instrument selected ''' # saving the attributes self.KPIs = KPIs # 0. self.module_interface_input(self.KPIs) ''' TO DO: - Introduce the transfer of information between the external parties and the truth agent relates to the policy impacts ''' # 1. self.agenda_setting() # 2. if self.policy_formulation_run: self.policy_formulation() else: self.policy_implemented = self.policy_instruments[-1] # 3. # self.module_interface_output() # end of step actions: # iterate the steps counter self.stepCount += 1 # collect data self.datacollector.collect(self) print("step ends") print(" ") # print(self.datacollector.get_agent_vars_dataframe()) print(self.datacollector.get_model_vars_dataframe()) return self.policy_implemented def module_interface_input(self, KPIs): ''' The module interface input step consists of actions related to the module interface and the policy emergence model Missing: - Electorate actions ''' # selection of the Truth agent policy tree and issue tree for agent in self.schedule.agent_buffer(shuffled=True): if isinstance(agent, TruthAgent): truth_policytree = agent.policytree_truth for issue in range(self.len_DC+self.len_PC+self.len_S): agent.issuetree_truth[issue] = KPIs[issue] truth_issuetree = agent.issuetree_truth # Transferring policy impact to active agents for agent in self.schedule.agent_buffer(shuffled=True): if isinstance(agent, ActiveAgent): # replacing the policy family likelihoods for PFj in range(self.len_PC): for PFij in range(self.len_PC): agent.policytree[agent.unique_id][PFj][PFij] = truth_policytree[PFj][PFij] # replacing the policy instruments impacts for insj in range(self.len_ins_1 + self.len_ins_2 + self.len_ins_all): agent.policytree[agent.unique_id][self.len_PC+insj][0:self.len_S] = truth_policytree[self.len_PC+insj] # replacing the issue beliefs from the KPIs for issue in range(self.len_DC+self.len_PC+self.len_S): agent.issuetree[agent.unique_id][issue][0] = truth_issuetree[issue] self.preference_update(agent, agent.unique_id) def agenda_setting(self): ''' The agenda setting step is the first step in the policy process conceptualised in this model. The steps are given as follows: 1. Active agents policy core issue selection 2. Active agents policy family selection 3. Active agents actions [to be detailed later] 4. Active agents policy core issue selection update 5. Active agents policy family selection update 6. Agenda selection ''' # 1. & 2. for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): # considering only active agents agent.selection_PC() agent.selection_PF() # print("PC and PF selected for agent", agent.unique_id, ": ", agent.selected_PC, agent.selected_PF) # 3. # 4. & 5. for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): # considering only active agents agent.selection_PC() agent.selection_PF() # 6. # All active agents considered selected_PC_list = [] selected_PF_list = [] number_ActiveAgents = 0 for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): # considering only policy makers selected_PC_list.append(agent.selected_PC) selected_PF_list.append(agent.selected_PF) number_ActiveAgents += 1 # finding the most common policy core issue and its frequency d = defaultdict(int) for i in selected_PC_list: d[i] += 1 result = max(d.items(), key=lambda x: x[1]) agenda_PC_temp = result[0] agenda_PC_temp_frequency = result[1] # finding the most common policy family issue and its frequency d = defaultdict(int) for i in selected_PF_list: d[i] += 1 result = max(d.items(), key=lambda x: x[1]) agenda_PF_temp = result[0] agenda_PF_temp_frequency = result[1] # checking for majority if agenda_PC_temp_frequency > int(number_ActiveAgents/2) and agenda_PF_temp_frequency > int(number_ActiveAgents/2): self.agenda_PC = agenda_PC_temp self.agenda_PF = agenda_PF_temp self.policy_formulation_run = True print("The agenda consists of PC", self.agenda_PC, " and PF", self.agenda_PF, ".") else: self.policy_formulation_run = False print("No agenda was formed, moving to the next step.") def policy_formulation(self): ''' The policy formulation step is the second step in the policy process conceptualised in this model. The steps are given as follows: 0. Detailing of policy instruments that can be considered 1. Active agents deep core issue selection 2. Active agents policy instrument selection 3. Active agents actions [to be detailed later] 4. Active agents policy instrument selection update 5. Policy instrument selection NOTE: THIS CODE DOESNT CONSIDER MAJORITY WHEN MORE THAN THREE POLICY MAKERS ARE INCLUDED, IT CONSIDERS THE MAXIMUM. THIS NEEDS TO BE ADAPTED TO CONSIDER 50% OR MORE! ''' print("Policy formulation being introduced") # 0. possible_PI = self.PF_indices[self.agenda_PF] # 1. & 2. for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): # considering only active agents agent.selection_S() agent.selection_PI() # 3. # 4. & 5. for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): # considering only active agents agent.selection_PI() # 6. # Only policy makers considered selected_PI_list = [] number_PMs = 0 for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent) and agent.agent_type == 'policymaker': # considering only policy makers selected_PI_list.append(agent.selected_PI) number_PMs += 1 # finding the most common secondary issue and its frequency d = defaultdict(int) for i in selected_PI_list: d[i] += 1 result = max(d.items(), key=lambda x: x[1]) self.policy_implemented_number = result[0] policy_implemented_number_frequency = result[1] # check for the majority and implemented if satisfied if policy_implemented_number_frequency > int(number_PMs/2): print("The policy instrument selected is policy instrument ", self.policy_implemented_number, ".") self.policy_implemented = self.policy_instruments[self.policy_implemented_number] else: print("No consensus on a policy instrument.") self.policy_implemented = self.policy_instruments[-1] # selecting last policy instrument which is the no instrument policy instrument def module_interface_output(self): print("Module interface output not introduced yet") def preference_update(self, agent, who): self.preference_update_DC(agent, who) self.preference_update_PC(agent, who) self.preference_update_S(agent, who) def preference_update_DC(self, agent, who): """ The preference update function (DC) =========================== This function is used to update the preferences of the deep core issues of agents in their respective belief trees. agent - this is the owner of the belief tree who - this is the part of the belieftree that is considered - agent.unique_id should be used for this - this is done to also include partial knowledge preference calculation """ len_DC = self.len_DC len_PC = self.len_PC len_S = self.len_S ##### # 1.5.1. Preference calculation for the deep core issues # 1.5.1.1. Calculation of the denominator PC_denominator = 0 for h in range(len_DC): if agent.issuetree[who][h][1] == None or agent.issuetree[who][h][0] == None: PC_denominator = 0 else: PC_denominator = PC_denominator + abs(agent.issuetree[who][h][1] - agent.issuetree[who][h][0]) # print('The denominator is given by: ' + str(PC_denominator)) # 1.5.1.2. Selection of the numerator and calculation of the preference for i in range(len_DC): # There are rare occasions where the denominator could be 0 if PC_denominator != 0: agent.issuetree[who][i][2] = abs(agent.issuetree[who][i][1] - agent.issuetree[who][i][0]) / PC_denominator else: agent.issuetree[who][i][2] = 0 def preference_update_PC(self, agent, who): """ The preference update function (PC) =========================== This function is used to update the preferences of the policy core issues of agents in their respective belief trees. agent - this is the owner of the belief tree who - this is the part of the belieftree that is considered - agent.unique_id should be used for this - this is done to also include partial knowledge preference calculation """ len_DC = self.len_DC len_PC = self.len_PC len_S = self.len_S ##### # 1.5.2 Preference calculation for the policy core issues PC_denominator = 0 # 1.5.2.1. Calculation of the denominator for j in range(len_PC): # print('Selection PC' + str(j+1)) # print('State of the PC' + str(j+1) + ': ' + str(agent.issuetree[0][len_DC + j][0])) # the state printed # Selecting the causal relations starting from PC for k in range(len_DC): # Contingency for partial knowledge issues if agent.issuetree[who][k][1] == None or agent.issuetree[who][k][0] == None or agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0] == None: PC_denominator += 0 else: # print('Causal Relation PC' + str(j+1) + ' - PC' + str(k+1) + ': ' + str(agent.issuetree[0][len_DC+len_PC+len_S+j+(k*len_PC)][1])) # print('Gap of PC' + str(k+1) + ': ' + str((agent.issuetree[0][k][1] - agent.issuetree[0][k][0]))) # Check if causal relation and gap are both positive of both negative # print('agent.issuetree[' + str(who) + '][' + str(len_DC+len_PC+len_S+j+(k*len_PC)) + '][0]: ' + str(agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0])) if (agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0] < 0 and (agent.issuetree[who][k][1] - agent.issuetree[who][k][0]) < 0) or (agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0] > 0 and (agent.issuetree[who][k][1] - agent.issuetree[who][k][0]) > 0): PC_denominator = PC_denominator + abs(agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0]*(agent.issuetree[who][k][1] - agent.issuetree[who][k][0])) # print('This is the PC numerator: ' + str(PC_denominator)) else: PC_denominator = PC_denominator # 1.5.2.2. Addition of the gaps of the associated mid-level issues for i in range(len_PC): # Contingency for partial knowledge issues if agent.issuetree[who][len_DC + i][1] == None or agent.issuetree[who][len_DC + i][0] == None: PC_denominator = PC_denominator else: # print('This is the gap for the PC' + str(i+1) + ': ' + str(agent.issuetree[0][len_DC + i][1] - agent.issuetree[0][len_DC + i][0])) PC_denominator += abs(agent.issuetree[who][len_DC + i][1] - agent.issuetree[who][len_DC + i][0]) # print('This is the S denominator: ' + str(PC_denominator)) # 1.5.2.3 Calculation the numerator and the preference # Select one by one the PC for j in range(len_PC): # 1.5.2.3.1. Calculation of the right side of the numerator PC_numerator = 0 # print('Selection PC' + str(j+1)) # print('State of the PC' + str(j+1) + ': ' + str(agent.issuetree[0][len_DC + j][0])) # the state printed # Selecting the causal relations starting from DC for k in range(len_DC): # Contingency for partial knowledge issues if agent.issuetree[who][k][1] == None or agent.issuetree[who][k][0] == None or agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0] == None: PC_numerator += 0 else: # print('Causal Relation PC' + str(j+1) + ' - DC' + str(k+1) + ': ' + str(agent.issuetree[0][len_DC+len_PC+len_S+j+(k*len_PC)][1])) # print('Gap of DC' + str(k+1) + ': ' + str((agent.issuetree[0][k][1] - agent.issuetree[0][k][0]))) # Check if causal relation and gap are both positive of both negative if (agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0] < 0 and (agent.issuetree[who][k][1] - agent.issuetree[who][k][0]) < 0) or (agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0] > 0 and (agent.issuetree[who][k][1] - agent.issuetree[who][k][0]) > 0): PC_numerator = PC_numerator + abs(agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0]*(agent.issuetree[who][k][1] - agent.issuetree[who][k][0])) # print('This is the PC numerator: ' + str(PC_numerator)) else: PC_numerator = PC_numerator # 1.5.2.3.2. Addition of the gap to the numerator # Contingency for partial knowledge issues if agent.issuetree[who][len_DC + j][1] == None or agent.issuetree[who][len_DC + j][0] == None: PC_numerator += 0 else: # print('This is the gap for the PC' + str(j+1) + ': ' + str(agent.issuetree[0][len_DC + j][1] - agent.issuetree[0][len_DC + j][0])) PC_numerator += abs(agent.issuetree[who][len_DC + j][1] - agent.issuetree[who][len_DC + j][0]) # print('The numerator is equal to: ' + str(PC_numerator)) # print('The denominator is equal to: ' + str(PC_denominator)) # 1.5.2.3.3. Calculation of the preference if PC_denominator != 0: agent.issuetree[who][len_DC+j][2] = round(PC_numerator/PC_denominator,3) # print('The new preference of the policy core PC' + str(j+1) + ' is: ' + str(agent.issuetree[0][len_DC+j][2])) else: agent.issuetree[who][len_DC+j][2] = 0 def preference_update_S(self, agent, who): """ The preference update function (S) =========================== This function is used to update the preferences of secondary issues the agents in their respective belief trees. agent - this is the owner of the belief tree who - this is the part of the belieftree that is considered - agent.unique_id should be used for this - this is done to also include partial knowledge preference calculation """ len_DC = self.len_DC len_PC = self.len_PC len_S = self.len_S ##### # 1.5.3 Preference calculation for the secondary issues S_denominator = 0 # 1.5.2.1. Calculation of the denominator for j in range(len_S): # print('Selection S' + str(j+1)) # print('State of the S' + str(j+1) + ': ' + str(agent.issuetree[0][len_DC + len_PC + j][0])) # the state printed # Selecting the causal relations starting from S for k in range(len_PC): # Contingency for partial knowledge issues if agent.issuetree[who][len_DC + k][1] == None or agent.issuetree[who][len_DC + k][0] == None or agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0] == None: S_denominator += 0 else: # print('Causal Relation S' + str(j+1) + ' - PC' + str(k+1) + ': ' + str(agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0])) # print('Gap of PC' + str(k+1) + ': ' + str((agent.issuetree[who][len_DC+k][1] - agent.issuetree[who][len_DC+k][0]))) # Check if causal relation and gap are both positive of both negative # print('agent.issuetree[' + str(who) + '][' + str(len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)) + '][0]: ' + str(agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0])) if (agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0] < 0 and (agent.issuetree[who][len_DC+k][1] - agent.issuetree[who][len_DC+k][0]) < 0) or (agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0] > 0 and (agent.issuetree[who][len_DC+k][1] - agent.issuetree[who][len_DC+k][0]) > 0): S_denominator += abs(agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0]*(agent.issuetree[who][len_DC+k][1] - agent.issuetree[who][len_DC+k][0])) # print('This is the PC numerator: ' + str(S_denominator)) else: S_denominator = S_denominator # 1.5.2.2. Addition of the gaps of the associated secondary issues for j in range(len_S): # Contingency for partial knowledge issues if agent.issuetree[who][len_DC+len_PC+j][1] == None or agent.issuetree[who][len_DC+len_PC+j][0] == None: S_denominator = S_denominator else: # print('This is the gap for the PC' + str(i+1) + ': ' + str(agent.issuetree[0][len_DC + len_PC + i][1] - agent.issuetree[0][len_DC + len_PC + i][0])) S_denominator += abs(agent.issuetree[who][len_DC+len_PC+j][1] - agent.issuetree[who][len_DC+len_PC+j][0]) # print('This is the PC denominator: ' + str(S_denominator)) # 1.5.2.3 Calculation the numerator and the preference # Select one by one the S for j in range(len_S): # 1.5.2.3.1. Calculation of the right side of the numerator S_numerator = 0 # print('Selection S' + str(j+1)) # print('State of the S' + str(j+1) + ': ' + str(agent.issuetree[who][len_DC + len_PC + j][0])) # the state printed # Selecting the causal relations starting from PC for k in range(len_PC): # Contingency for partial knowledge issues if agent.issuetree[who][len_DC + k][1] == None or agent.issuetree[who][len_DC + k][0] == None or agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0] == None: S_numerator = 0 else: # print('Causal Relation S' + str(j+1) + ' - PC' + str(k+1) + ': ' + str(agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0])) # print('Gap of PC' + str(k+1) + ': ' + str((agent.issuetree[who][len_DC + k][1] - agent.issuetree[who][len_DC + k][0]))) # Check if causal relation and gap are both positive of both negative if (agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0] < 0 and (agent.issuetree[who][len_DC+k][1] - agent.issuetree[who][len_DC+k][0]) < 0) or (agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0] > 0 and (agent.issuetree[who][len_DC+k][1] - agent.issuetree[who][len_DC+k][0]) > 0): S_numerator += abs(agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0]*(agent.issuetree[who][len_DC+k][1] - agent.issuetree[who][len_DC+k][0])) # print('This is the PC numerator: ' + str(S_numerator)) else: S_numerator = S_numerator # 1.5.2.3.2. Addition of the gap to the numerator # Contingency for partial knowledge issues if agent.issuetree[who][len_DC+len_PC+j][1] == None or agent.issuetree[who][len_DC+len_PC+j][0] == None: S_numerator += 0 else: # print('This is the gap for the PC' + str(j+1) + ': ' + str(agent.issuetree[who][len_DC+len_PC+j][1] - agent.issuetree[who][len_DC+len_PC+j][0])) S_numerator += abs(agent.issuetree[who][len_DC+len_PC+j][1] - agent.issuetree[who][len_DC+len_PC+j][0]) # print('The numerator is equal to: ' + str(S_numerator)) # print('The denominator is equal to: ' + str(S_denominator)) # 1.5.2.3.3. Calculation of the preference if S_denominator != 0: agent.issuetree[who][len_DC+len_PC+j][2] = round(S_numerator/S_denominator,3) # print('The new preference of the policy core PC' + str(j+1) + ' is: ' + str(agent.issuetree[0][len_DC+j][2])) else: agent.issuetree[who][len_DC+len_PC+j][2] = 0
class PolicyEmergenceSM(Model): ''' Simplest Model for the policy emergence model. ''' def __init__(self, SM_inputs, height=20, width=20): self.height = height # height of the canvas self.width = width # width of the canvas self.SM_inputs = SM_inputs # inputs for the entire model self.stepCount = 0 # int - [-] - initialisation of step counter self.agenda_PC = None # initialisation of agenda policy core issue tracker self.policy_implemented_number = None # initialisation of policy number tracker self.policy_formulation_run = False # check value for running policy formulation self.w_el_influence = self.SM_inputs[ 9] # float - [-] - electorate influence weight constant # todo - consider also saving the electorate influence parameter self.schedule = RandomActivation(self) # mesa random activation method self.grid = SingleGrid(height, width, torus=True) # mesa grid creation method # creation of the datacollector vector self.datacollector = DataCollector( # Model-level variables model_reporters={ "step": "stepCount", "AS_PF": get_problem_policy_chosen, "agent_attributes": get_agents_attributes, "electorate_attributes": get_electorate_attributes }, # Agent-level variables agent_reporters={ "x": lambda a: a.pos[0], "y": lambda a: a.pos[1], "Agent type": lambda a: type(a), "Issuetree": lambda a: getattr(a, 'issuetree', [None])[ a.unique_id if isinstance(a, ActiveAgent) else 0] }) self.len_S, self.len_PC, self.len_DC, self.len_CR = belief_tree_input( ) # setting up belief tree self.policy_instruments, self.len_ins, self.PF_indices = policy_instrument_input( ) # setting up policy instruments init_active_agents(self, self.len_S, self.len_PC, self.len_DC, self.len_CR, self.len_PC, self.len_ins, self.SM_inputs) # setting up active agents init_electorate_agents(self, self.len_S, self.len_PC, self.len_DC, self.SM_inputs) # setting up passive agents init_truth_agent(self, self.len_S, self.len_PC, self.len_DC, self.len_ins) # setting up truth agent self.running = True self.numberOfAgents = self.schedule.get_agent_count() self.datacollector.collect(self) def step(self, KPIs): ''' Main steps of the Simplest Model for policy emergence: 0. Module interface - Input 1. Agenda setting step 2. Policy formulation step 3. Data collection ''' self.KPIs = KPIs # saving the indicators # 0. initialisation self.module_interface_input( self.KPIs) # communicating the beliefs (indicators) self.electorate_influence( self.w_el_influence) # electorate influence actions # 1. agenda setting self.agenda_setting() # 2. policy formulation if self.policy_formulation_run: policy_implemented = self.policy_formulation() else: policy_implemented = self.policy_instruments[-1] # 3. data collection self.stepCount += 1 # iterate the steps counter self.datacollector.collect(self) # collect data print("Step ends", "\n") return policy_implemented def module_interface_input(self, KPIs): ''' The module interface input step consists of actions related to the module interface and the policy emergence model ''' len_DC = self.len_DC len_PC = self.len_PC len_S = self.len_S len_ins = self.len_ins # saving the issue tree of the truth agent for agent in self.schedule.agent_buffer(shuffled=True): if isinstance(agent, TruthAgent): agent.issuetree_truth = KPIs truth_issuetree = agent.issuetree_truth truth_policytree = agent.policytree_truth # Transferring policy impact to active agents for agent in self.schedule.agent_buffer(shuffled=True): if isinstance(agent, ActiveAgent): # selecting only active agents # for PFj in range(len_PC): # communicating the policy family likelihoods # for PFij in range(len_PC): # agent.policytree[agent.unique_id][PFj][PFij] = truth_policytree[PFj][PFij] for insj in range( len_ins ): # communicating the policy instruments impacts agent.policytree[agent.unique_id][ len_PC + insj][0:len_S] = truth_policytree[len_PC + insj] for issue in range( len_DC + len_PC + len_S ): # communicating the issue beliefs from the KPIs agent.issuetree[ agent.unique_id][issue][0] = truth_issuetree[issue] self.preference_update( agent, agent.unique_id) # updating the preferences def agenda_setting(self): ''' In the agenda setting step, the active agents first select their policy core issue of preference and then select the agenda. ''' # active agent policy core selection for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): # selecting only active agents agent.selection_PC() # for each agent, selection of their preferred policy core issue selected_PC_list = [] number_ActiveAgents = 0 for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): # considering only policy makers selected_PC_list.append(agent.selected_PC) number_ActiveAgents += 1 # finding the most common policy core issue and its frequency d = defaultdict(int) for i in selected_PC_list: d[i] += 1 result = max(d.items(), key=lambda x: x[1]) agenda_PC_temp = result[0] agenda_PC_temp_frequency = result[1] # checking for majority if agenda_PC_temp_frequency > int(number_ActiveAgents / 2): self.agenda_PC = agenda_PC_temp self.policy_formulation_run = True # allowing for policy formulation to happen print("The agenda consists of PC", self.agenda_PC, ".") else: # if no majority self.policy_formulation_run = False print("No agenda was formed, moving to the next step.") # for purposes of not changing the entire code - the policy family selected is set at 0 so all policy instruments # are always considered in the rest of the model self.agenda_PF = 0 def policy_formulation(self): ''' In the policy formulation step, the policy maker agents first select their policy core issue of preference and then they select the policy that is to be implemented if there is a majority of them. ''' # calculation of policy instruments preferences selected_PI_list = [] number_PMs = 0 for agent in self.schedule.agent_buffer(shuffled=False): if isinstance( agent, ActiveAgent ) and agent.agent_type == 'policymaker': # considering only policy makers agent.selection_S() agent.selection_PI( ) # individual agent policy instrument selection selected_PI_list.append( agent.selected_PI ) # appending the policy instruments selected to a list for all PMs number_PMs += 1 # finding the most common policy instrument and its frequency d = defaultdict(int) for i in selected_PI_list: d[i] += 1 result = max(d.items(), key=lambda x: x[1]) self.policy_implemented_number = result[0] policy_implemented_number_frequency = result[1] # check for the majority and implemented if satisfied if policy_implemented_number_frequency > int(number_PMs / 2): print("The policy selected is policy instrument ", self.policy_implemented_number, ".") policy_implemented = self.policy_instruments[ self.policy_implemented_number] else: # if no majority print("No consensus on a policy instrument.") policy_implemented = self.policy_instruments[ -1] # selecting status quo policy instrument return policy_implemented def preference_update(self, agent, who): ''' This function is used to call the preference update functions of the issues of the active agents. ''' self.preference_update_DC(agent, who) # deep core issue preference update self.preference_update_PC(agent, who) # policy core issue preference update self.preference_update_S(agent, who) # def preference_update_DC(self, agent, who): """ This function is used to update the preferences of the deep core issues of agents in their respective issue trees. agent - this is the owner of the issue tree who - this is the part of the issuetree that is considered - agent.unique_id should be used for this - this is done to also include partial knowledge preference calculation """ len_DC = self.len_DC # calculation of the denominator PC_denominator = 0 for h in range(len_DC): issue_belief = agent.issuetree[who][h][0] issue_goal = agent.issuetree[who][h][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None: PC_denominator += abs(gap) # selection of the numerator and calculation of the preference for i in range(len_DC): issue_belief = agent.issuetree[who][i][0] issue_goal = agent.issuetree[who][i][1] gap = issue_goal - issue_belief if PC_denominator != 0: # make sure the denominator is not 0 agent.issuetree[who][i][2] = abs(gap) / PC_denominator else: agent.issuetree[who][i][2] = 0 def preference_update_PC(self, agent, who): """ This function is used to update the preferences of the policy core issues of agents in their respective issue trees. agent - this is the owner of the belief tree who - this is the part of the issuetree that is considered - agent.unique_id should be used for this - this is done to also include partial knowledge preference calculation """ len_DC = self.len_DC len_PC = self.len_PC len_S = self.len_S PC_denominator = 0 # calculation of the denominator for j in range( len_PC): # selecting the causal relations starting from PC for k in range(len_DC): cr = agent.issuetree[who][len_DC + len_PC + len_S + j + (k * len_PC)][0] issue_belief = agent.issuetree[who][k][0] issue_goal = agent.issuetree[who][k][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None and cr is not None \ and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)): # contingency for partial knowledge issues and check if cr and belief-goal are same sign PC_denominator = PC_denominator + abs(cr * gap) # addition of the gaps of the associated mid-level issues for i in range(len_PC): issue_belief = agent.issuetree[who][len_DC + i][0] issue_goal = agent.issuetree[who][len_DC + i][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None: # contingency for partial knowledge issues PC_denominator += abs(gap) # calculation the numerator and the preference for j in range(len_PC): # select one by one the PC # calculation of the right side of the numerator PC_numerator = 0 for k in range( len_DC): # selecting the causal relations starting from DC issue_belief = agent.issuetree[who][k][0] issue_goal = agent.issuetree[who][k][1] cr = agent.issuetree[who][len_DC + len_PC + len_S + j + (k * len_PC)][0] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None and cr is not None \ and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)): # contingency for partial knowledge issues and check if cr and belief-goal are same sign PC_numerator += abs(cr * gap) # addition of the gap to the numerator issue_belief = agent.issuetree[who][len_DC + j][0] issue_goal = agent.issuetree[who][len_DC + j][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None: # contingency for partial knowledge issues PC_numerator += abs(gap) # calculation of the preferences if PC_denominator != 0: agent.issuetree[who][len_DC + j][2] = round( PC_numerator / PC_denominator, 3) else: agent.issuetree[who][len_DC + j][2] = 0 def preference_update_S(self, agent, who): """ This function is used to update the preferences of secondary issues the agents in their respective issue trees. agent - this is the owner of the belief tree who - this is the part of the issuetree that is considered - agent.unique_id should be used for this - this is done to also include partial knowledge preference calculation """ len_DC = self.len_DC len_PC = self.len_PC len_S = self.len_S S_denominator = 0 # calculation of the denominator for j in range(len_S): for k in range( len_PC): # selecting the causal relations starting from S issue_belief = agent.issuetree[who][len_DC + k][0] issue_goal = agent.issuetree[who][len_DC + k][1] cr = agent.issuetree[who][len_DC + len_PC + len_S + len_DC * len_PC + j + (k * len_S)][0] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None and cr is not None \ and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)): # contingency for partial knowledge issues and check if cr and belief-goal are same sign S_denominator += abs(cr * gap) # addition of the gaps of the associated secondary issues for j in range(len_S): issue_belief = agent.issuetree[who][len_DC + len_PC + j][0] issue_goal = agent.issuetree[who][len_DC + len_PC + j][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None: # contingency for partial knowledge issues S_denominator += abs(gap) # calculation the numerator and the preference for j in range(len_S): # select one by one the S # calculation of the right side of the numerator S_numerator = 0 for k in range( len_PC): # selecting the causal relations starting from PC # Contingency for partial knowledge issues cr = agent.issuetree[who][len_DC + len_PC + len_S + len_DC * len_PC + j + (k * len_S)][0] issue_belief = agent.issuetree[who][len_DC + k][0] issue_goal = agent.issuetree[who][len_DC + k][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None and cr is not None \ and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)): # contingency for partial knowledge issues and check if cr and gap are same sign S_numerator += abs(cr * gap) # addition of the gap to the numerator issue_belief = agent.issuetree[who][len_DC + len_PC + j][0] issue_goal = agent.issuetree[who][len_DC + len_PC + j][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None: # contingency for partial knowledge issues S_numerator += abs(gap) # calculation of the preferences if S_denominator != 0: agent.issuetree[who][len_DC + len_PC + j][2] = round( S_numerator / S_denominator, 3) else: agent.issuetree[who][len_DC + len_PC + j][2] = 0 def electorate_influence(self, w_el_influence): ''' This function calls the influence actions in the electorate agent class. ''' for agent in self.schedule.agent_buffer(shuffled=True): if isinstance(agent, ElectorateAgent): agent.electorate_influence(w_el_influence)
class PolicyEmergenceSM(Model): ''' Simplest Model for the policy emergence model. ''' def __init__(self, PE_type, SM_inputs, AplusPL_inputs, AplusCo_inputs, AplusPK_inputs, height=20, width=20, input_LHS=False): self.height = height # height of the canvas self.width = width # width of the canvas self.SM_inputs = SM_inputs # inputs for the entire model self.PE_type = PE_type # model type (SM, A+PL, A+Co, A+PK, A+PI) self.resources_aff = SM_inputs[2] # resources per affiliation agent self.stepCount = 0 # int - [-] - initialisation of step counter self.agenda_PC = None # initialisation of agenda policy core issue tracker self.policy_implemented_number = None # initialisation of policy number tracker self.policy_formulation_run = False # check value for running policy formulation self.w_el_influence = self.SM_inputs[ 5] # float - [-] - electorate influence weight constant # batchrunner inputs self.input_LHS = input_LHS # ACF+PL parameters if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type: self.conflict_level = AplusPL_inputs[0] self.resources_spend_incr_agents = AplusPL_inputs[1] # ACF+Co parameters if 'A+Co' in self.PE_type: self.PC_interest = AplusCo_inputs[0] if self.input_LHS: self.coa_creation_thresh = self.input_LHS[1] # LHS inputs self.coa_resources_share = self.input_LHS[0] # LHS inputs else: self.coa_creation_thresh = AplusCo_inputs[1] self.coa_resources_share = AplusCo_inputs[3] self.coa_coherence_thresh = AplusCo_inputs[2] self.resources_spend_incr_coal = AplusCo_inputs[4] print('res. share:', round(self.coa_resources_share, 3), ', coa. threshold:', round(self.coa_creation_thresh, 3)) self.coalition_list = [] # +PK parameters self.PK = False if '+PK' in self.PE_type: self.PK = True self.PK_catchup = AplusPK_inputs[0] self.schedule = RandomActivation(self) # mesa random activation method self.grid = SingleGrid(height, width, torus=True) # mesa grid creation method # creation of the datacollector vector if 'A+Co' in self.PE_type: self.datacollector = DataCollector( # Model-level variables model_reporters={ "step": "stepCount", "AS_PF": get_problem_policy_chosen, "agent_attributes": get_agents_attributes, "coalitions_attributes": get_coalitions_attributes, "electorate_attributes": get_electorate_attributes }, # Agent-level variables agent_reporters={ "x": lambda a: a.pos[0], "y": lambda a: a.pos[1], "Agent type": lambda a: type(a), "Issuetree": lambda a: getattr(a, 'issuetree', [None])[ a.unique_id if isinstance(a, ActiveAgent) and not isinstance( a, Coalition) else 0] }) else: self.datacollector = DataCollector( # Model-level variables model_reporters={ "step": "stepCount", "AS_PF": get_problem_policy_chosen, "agent_attributes": get_agents_attributes, "electorate_attributes": get_electorate_attributes }, # Agent-level variables agent_reporters={ "x": lambda a: a.pos[0], "y": lambda a: a.pos[1], "Agent type": lambda a: type(a), "Issuetree": lambda a: getattr(a, 'issuetree', [None])[ a.unique_id if isinstance(a, ActiveAgent) else 0] }) self.len_S, self.len_PC, self.len_DC, self.len_CR = belief_tree_input( ) # setting up belief tree self.policy_instruments, self.len_ins, self.PF_indices = policy_instrument_input( ) # setting up policy instruments init_active_agents(self, self.len_S, self.len_PC, self.len_DC, self.len_CR, self.len_PC, self.len_ins, self.SM_inputs) # setting up active agents init_electorate_agents(self, self.len_S, self.len_PC, self.len_DC, self.SM_inputs) # setting up passive agents init_truth_agent(self, self.len_S, self.len_PC, self.len_DC, self.len_ins) # setting up truth agent self.running = True self.numberOfAgents = self.schedule.get_agent_count() self.datacollector.collect(self) def step(self, KPIs): ''' Main steps of the Simplest Model for policy emergence: 0. Module interface - Input 1. Agenda setting step 2. Policy formulation step 3. Data collection ''' self.KPIs = KPIs # saving the indicators # 0. initialisation self.module_interface_input( self.KPIs) # communicating the beliefs (indicators) self.electorate_influence( self.w_el_influence) # electorate influence actions if 'A+Co' in self.PE_type: self.coalition_creation_algorithm() # 1. agenda setting self.agenda_setting() # 2. policy formulation if self.policy_formulation_run: policy_implemented = self.policy_formulation() else: policy_implemented = self.policy_instruments[-1] # 3. data collection self.stepCount += 1 # iterate the steps counter self.datacollector.collect(self) # collect data print("Step ends", "\n") return policy_implemented def module_interface_input(self, KPIs): ''' The module interface input step consists of actions related to the module interface and the policy emergence model ''' len_DC = self.len_DC len_PC = self.len_PC len_S = self.len_S len_ins = self.len_ins # saving the issue tree of the truth agent for agent in self.schedule.agent_buffer(shuffled=True): if isinstance(agent, TruthAgent): agent.issuetree_truth = KPIs truth_issuetree = agent.issuetree_truth truth_policytree = agent.policytree_truth # Transferring policy impact to active agents for agent in self.schedule.agent_buffer(shuffled=True): if isinstance(agent, ActiveAgent) and not isinstance( agent, Coalition): # selecting only active agents # for PFj in range(len_PC): # communicating the policy family likelihoods # for PFij in range(len_PC): # agent.policytree[agent.unique_id][PFj][PFij] = truth_policytree[PFj][PFij] for insj in range( len_ins ): # communicating the policy instruments impacts agent.policytree[agent.unique_id][ len_PC + insj][0:len_S] = truth_policytree[len_PC + insj] for issue in range( len_DC + len_PC + len_S ): # communicating the issue beliefs from the KPIs agent.issuetree[ agent.unique_id][issue][0] = truth_issuetree[issue] self.preference_update( agent, agent.unique_id) # updating the preferences def resources_distribution(self): if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type: for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): # selecting only active agents if agent.affiliation == 0: # affiliation 0 agent.resources = 0.01 * self.number_activeagents * self.resources_aff[ 0] / 100 if agent.affiliation == 1: # affiliation 1 agent.resources = 0.01 * self.number_activeagents * self.resources_aff[ 1] / 100 agent.resources_action = agent.resources # assigning resources for the actions for both if 'A+Co' in self.PE_type: # attribution of the resources to coalitions for coalition in self.schedule.agent_buffer(shuffled=False): if isinstance(coalition, Coalition): resources = 0 for agent_mem in coalition.members: resources += agent_mem.resources * self.coa_resources_share agent_mem.resources -= self.coa_resources_share * agent_mem.resources agent.resources_action = agent.resources # assigning resources for the actions for both coalition.resources = resources coalition.resources_action = coalition.resources # assigning resources for the actions for both def agenda_setting(self): ''' In the agenda setting step, the active agents first select their policy core issue of preference and then select the agenda. ''' # resources distribution self.resources_distribution() # active agent policy core selection for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): # selecting only active agents agent.selection_PC() if 'A+Co' in self.PE_type: for coalition in self.schedule.agent_buffer(shuffled=True): if isinstance(coalition, Coalition): # selecting only coalitions coalition.interactions_intra_coalition( 'AS') # intra-coalition interactions # active agent interactions (including coalitions) if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type: for agent in self.schedule.agent_buffer(shuffled=True): if isinstance(agent, ActiveAgent): # selecting only active agents agent.interactions('AS', self.PK) # active agent policy core selection (after agent interactions) if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type: # active agent policy core selection for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): # selecting only active agents agent.selection_PC() # for each agent, selection of their preferred policy core issue selected_PC_list = [] number_ActiveAgents = 0 for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): # considering only policy makers selected_PC_list.append(agent.selected_PC) number_ActiveAgents += 1 # finding the most common policy core issue and its frequency d = defaultdict(int) for i in selected_PC_list: d[i] += 1 result = max(d.items(), key=lambda x: x[1]) agenda_PC_temp = result[0] agenda_PC_temp_frequency = result[1] # checking for majority if agenda_PC_temp_frequency > int(number_ActiveAgents / 2): self.agenda_PC = agenda_PC_temp self.policy_formulation_run = True # allowing for policy formulation to happen print("The agenda consists of PC", self.agenda_PC, ".") else: # if no majority self.policy_formulation_run = False print("No agenda was formed, moving to the next step.") # for purposes of not changing the entire code - the policy family selected is set at 0 so all policy instruments # are always considered in the rest of the model self.agenda_PF = 0 def policy_formulation(self): ''' In the policy formulation step, the policy maker agents first select their policy core issue of preference and then they select the policy that is to be implemented if there is a majority of them. ''' # resources distribution self.resources_distribution() # calculation of policy instruments preferences if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type: for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): agent.selection_S() agent.selection_PI( ) # individual agent policy instrument selection if 'A+Co' in self.PE_type: for coalition in self.schedule.agent_buffer(shuffled=True): if isinstance(coalition, Coalition): # selecting only active agents # print('selected_PC', agent.selected_PC) coalition.interactions_intra_coalition('PF') # coalition.interactions('PF') # active agent interactions if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type: for agent in self.schedule.agent_buffer(shuffled=True): if isinstance(agent, ActiveAgent): # selecting only active agents agent.interactions('PF', self.PK) # calculation of policy instruments preferences selected_PI_list = [] number_PMs = 0 for agent in self.schedule.agent_buffer(shuffled=False): if isinstance( agent, ActiveAgent ) and agent.agent_type == 'policymaker': # considering only policy makers agent.selection_S() agent.selection_PI( ) # individual agent policy instrument selection selected_PI_list.append( agent.selected_PI ) # appending the policy instruments selected to a list for all PMs number_PMs += 1 # finding the most common policy instrument and its frequency d = defaultdict(int) print(selected_PI_list) for i in selected_PI_list: d[i] += 1 result = max(d.items(), key=lambda x: x[1]) self.policy_implemented_number = result[0] policy_implemented_number_frequency = result[1] # check for the majority and implemented if satisfied if policy_implemented_number_frequency > int(number_PMs / 2): print("The policy selected is policy instrument ", self.policy_implemented_number, ".") policy_implemented = self.policy_instruments[ self.policy_implemented_number] else: # if no majority print("No consensus on a policy instrument.") policy_implemented = self.policy_instruments[ -1] # selecting status quo policy instrument return policy_implemented def preference_update(self, agent, who, coalition_check=False): ''' This function is used to call the preference update functions of the issues of the active agents. ''' if coalition_check: who = self.number_activeagents self.preference_update_DC(agent, who) # deep core issue preference update self.preference_update_PC(agent, who) # policy core issue preference update self.preference_update_S(agent, who) # def preference_update_DC(self, agent, who): """ This function is used to update the preferences of the deep core issues of agents in their respective issue trees. agent - this is the owner of the issue tree who - this is the part of the issuetree that is considered - agent.unique_id should be used for this - this is done to also include partial knowledge preference calculation """ len_DC = self.len_DC # calculation of the denominator PC_denominator = 0 for h in range(len_DC): issue_belief = agent.issuetree[who][h][0] issue_goal = agent.issuetree[who][h][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None: PC_denominator += abs(gap) # selection of the numerator and calculation of the preference for i in range(len_DC): issue_belief = agent.issuetree[who][i][0] issue_goal = agent.issuetree[who][i][1] gap = issue_goal - issue_belief if PC_denominator != 0: # make sure the denominator is not 0 agent.issuetree[who][i][2] = abs(gap) / PC_denominator else: agent.issuetree[who][i][2] = 0 def preference_update_PC(self, agent, who): """ This function is used to update the preferences of the policy core issues of agents in their respective issue trees. agent - this is the owner of the belief tree who - this is the part of the issuetree that is considered - agent.unique_id should be used for this - this is done to also include partial knowledge preference calculation """ len_DC = self.len_DC len_PC = self.len_PC len_S = self.len_S PC_denominator = 0 # calculation of the denominator for j in range( len_PC): # selecting the causal relations starting from PC for k in range(len_DC): cr = agent.issuetree[who][len_DC + len_PC + len_S + j + (k * len_PC)][0] issue_belief = agent.issuetree[who][k][0] issue_goal = agent.issuetree[who][k][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None and cr is not None \ and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)): # contingency for partial knowledge issues and check if cr and belief-goal are same sign PC_denominator = PC_denominator + abs(cr * gap) # addition of the gaps of the associated mid-level issues for i in range(len_PC): issue_belief = agent.issuetree[who][len_DC + i][0] issue_goal = agent.issuetree[who][len_DC + i][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None: # contingency for partial knowledge issues PC_denominator += abs(gap) # calculation the numerator and the preference for j in range(len_PC): # select one by one the PC # calculation of the right side of the numerator PC_numerator = 0 for k in range( len_DC): # selecting the causal relations starting from DC issue_belief = agent.issuetree[who][k][0] issue_goal = agent.issuetree[who][k][1] cr = agent.issuetree[who][len_DC + len_PC + len_S + j + (k * len_PC)][0] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None and cr is not None \ and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)): # contingency for partial knowledge issues and check if cr and belief-goal are same sign PC_numerator += abs(cr * gap) # addition of the gap to the numerator issue_belief = agent.issuetree[who][len_DC + j][0] issue_goal = agent.issuetree[who][len_DC + j][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None: # contingency for partial knowledge issues PC_numerator += abs(gap) # calculation of the preferences if PC_denominator != 0: agent.issuetree[who][len_DC + j][2] = round( PC_numerator / PC_denominator, 3) else: agent.issuetree[who][len_DC + j][2] = 0 def preference_update_S(self, agent, who): """ This function is used to update the preferences of secondary issues the agents in their respective issue trees. agent - this is the owner of the belief tree who - this is the part of the issuetree that is considered - agent.unique_id should be used for this - this is done to also include partial knowledge preference calculation """ len_DC = self.len_DC len_PC = self.len_PC len_S = self.len_S S_denominator = 0 # calculation of the denominator for j in range(len_S): for k in range( len_PC): # selecting the causal relations starting from S issue_belief = agent.issuetree[who][len_DC + k][0] issue_goal = agent.issuetree[who][len_DC + k][1] cr = agent.issuetree[who][len_DC + len_PC + len_S + len_DC * len_PC + j + (k * len_S)][0] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None and cr is not None \ and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)): # contingency for partial knowledge issues and check if cr and belief-goal are same sign S_denominator += abs(cr * gap) # addition of the gaps of the associated secondary issues for j in range(len_S): issue_belief = agent.issuetree[who][len_DC + len_PC + j][0] issue_goal = agent.issuetree[who][len_DC + len_PC + j][1] # print(issue_goal, type(issue_goal), type(issue_belief)) gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None: # contingency for partial knowledge issues S_denominator += abs(gap) # calculation the numerator and the preference for j in range(len_S): # select one by one the S # calculation of the right side of the numerator S_numerator = 0 for k in range( len_PC): # selecting the causal relations starting from PC # Contingency for partial knowledge issues cr = agent.issuetree[who][len_DC + len_PC + len_S + len_DC * len_PC + j + (k * len_S)][0] issue_belief = agent.issuetree[who][len_DC + k][0] issue_goal = agent.issuetree[who][len_DC + k][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None and cr is not None \ and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)): # contingency for partial knowledge issues and check if cr and gap are same sign S_numerator += abs(cr * gap) # addition of the gap to the numerator issue_belief = agent.issuetree[who][len_DC + len_PC + j][0] issue_goal = agent.issuetree[who][len_DC + len_PC + j][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None: # contingency for partial knowledge issues S_numerator += abs(gap) # calculation of the preferences if S_denominator != 0: agent.issuetree[who][len_DC + len_PC + j][2] = round( S_numerator / S_denominator, 3) else: agent.issuetree[who][len_DC + len_PC + j][2] = 0 def electorate_influence(self, w_el_influence): ''' This function calls the influence actions in the electorate agent class. ''' for agent in self.schedule.agent_buffer(shuffled=True): if isinstance(agent, ElectorateAgent): agent.electorate_influence(w_el_influence) def coalition_creation_algorithm(self): ''' Function that is used to reset the coalitions at the beginning of each round A maximum of two coalitions are allowed. The agents have to be within a certain threshold of their goals to be assembled together. Note that the preferred states only are considered and not the actual beliefs of the actors - this could be a problem when considering the partial information case. :return: ''' # resetting the coalitions before the creation of new ones for coalition in self.schedule.agent_buffer(shuffled=False): if isinstance(coalition, Coalition): self.schedule.remove(coalition) # saving the agents in a list with their belief values list_agents_1 = [] # active agent list for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): list_agents_1.append( (agent, agent.issuetree[agent.unique_id][self.len_DC + self.PC_interest][1])) list_agents_1.sort( key=lambda x: x[1]) # sorting the list based on the goals # checking for groups for first coalition list_coalition_number = [] for i in range(len(list_agents_1)): count = 0 for j in range(len(list_agents_1)): if list_agents_1[i][ 1] - self.coa_creation_thresh <= list_agents_1[j][ 1] <= list_agents_1[i][ 1] + self.coa_creation_thresh: count += 1 list_coalition_number.append(count) index = list_coalition_number.index( max(list_coalition_number )) # finding the grouping with the most member index list_coalition_members = [] list_agents_2 = copy.copy(list_agents_1) for i in range(len(list_agents_1)): if list_agents_1[index][ 1] - self.coa_creation_thresh <= list_agents_1[i][ 1] <= list_agents_1[index][ 1] + self.coa_creation_thresh: list_coalition_members.append(list_agents_1[i][0]) list_agents_2.remove(list_agents_1[i]) self.coalition_creation( 1001, list_coalition_members ) # creating the coalition with the selected members if len(list_agents_2) > 2: #check if there are enough agents left: # checking for groups for second coalition list_coalition_number = [] for i in range(len(list_agents_2)): count = 0 for j in range(len(list_agents_2)): if list_agents_2[i][ 1] - self.coa_creation_thresh <= list_agents_2[j][ 1] <= list_agents_2[i][ 1] + self.coa_creation_thresh: count += 1 list_coalition_number.append(count) index = list_coalition_number.index( max(list_coalition_number )) # finding the grouping with the most member index list_coalition_members = [] for i in range(len(list_agents_2)): if list_agents_2[index][ 1] - self.coa_creation_thresh <= list_agents_2[i][ 1] <= list_agents_2[index][ 1] + self.coa_creation_thresh: list_coalition_members.append(list_agents_2[i][0]) self.coalition_creation( 1002, list_coalition_members ) # creating the coalition with selected members def coalition_creation(self, unique_id, members): ''' Function that is used to create the object Coalition which is a sub-agent of the ActiveAgent class :param unique_id: :param members: :return: ''' x = 0 y = 0 resources = 0 # resources are reset to 0 len_DC = self.len_DC len_PC = self.len_PC len_S = self.len_S len_CR = self.len_CR len_PF = self.len_PC len_ins = self.len_ins issuetree_coal = [None] # creation of the issue tree issuetree_coal[0] = issuetree_creation( len_DC, len_PC, len_S, len_CR) # using the newly made function for r in range( self.number_activeagents ): # last spot is where the coalition beliefs are stored issuetree_coal.append( issuetree_creation(len_DC, len_PC, len_S, len_CR)) policytree_coal = [None] # creation of the policy tree policytree_coal[0] = members[0].policytree[members[0].unique_id] for r in range(self.number_activeagents): policytree_coal.append(members[0].policytree[members[0].unique_id]) # note that the policy tree is simply copied ... this will not work in the case of partial information where a different # algorithm will need to be found for this part of the model # creation of the coalition agent agent = Coalition((x, y), unique_id, self, 'coalition', resources, 'X', issuetree_coal, policytree_coal, members) self.coalition_belief_update(agent, members) self.preference_update(agent, unique_id, True) # updating the issue tree preferences self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) def coalition_belief_update(self, coalition, members): ''' Function that is used to update the beliefs of the coalition to an average of the agents members of this said coalition. :param coalition: :param members: :return: ''' len_DC = self.len_DC len_PC = self.len_PC len_S = self.len_S len_CR = self.len_CR for k in range( len_DC + len_PC + len_S): # updating the preferred states and actual beliefs belief = 0 goal = 0 for agent_mem in members: id = agent_mem.unique_id belief += agent_mem.issuetree[id][k][0] goal += agent_mem.issuetree[id][k][1] coalition.issuetree[ self.number_activeagents][k][0] = belief / len(members) coalition.issuetree[ self.number_activeagents][k][1] = goal / len(members) for k in range(len_CR): # updating the causal relations CR = 0 for agent_mem in members: id = agent_mem.unique_id CR += agent_mem.issuetree[id][len_DC + len_PC + len_S + k][0] coalition.issuetree[self.number_activeagents][ len_DC + len_PC + len_S + k][0] = CR / len(members) if self.PK: # for the partial knowledge for agent in self.schedule.agent_buffer(shuffled=False): if agent not in members and isinstance( agent, ActiveAgent) and not isinstance(agent, Coalition): id = agent.unique_id for k in range(len_DC + len_PC + len_S): # updating the preferred states goal = 0 for agent_mem in members: goal += agent_mem.issuetree[id][k][1] coalition.issuetree[id][k][1] = goal / len(members) for k in range(len_CR): # updating the causal relations CR = 0 for agent_mem in members: CR += agent_mem.issuetree[id][len_DC + len_PC + len_S + k][0] coalition.issuetree[id][len_DC + len_PC + len_S + k][0] = CR / len(members)
class WolfSheep(Model): ''' Wolf-Sheep Predation Model ''' def __init__(self, height=20, width=20, initial_sheep=100, initial_wolves=30, sheep_reproduction_chance=0.05, wolf_death_chance=0.05): super().__init__() self.height = height self.width = width self.initial_sheep = initial_sheep self.initial_wolves = initial_wolves self.sheep_reproduction_chance = sheep_reproduction_chance self.wolf_death_chance = wolf_death_chance # Add a schedule for sheep and wolves seperately to prevent # race-conditions self.schedule_Sheep = RandomActivation(self) self.schedule_Wolf = RandomActivation(self) self.grid = MultiGrid(self.width, self.height, torus=True) self.datacollector = DataCollector( {"Sheep": lambda m: self.schedule_Sheep.get_agent_count(), "Wolves": lambda m: self.schedule_Wolf.get_agent_count(), "Mean": mean_wolf}) # Create sheep and wolves self.init_population(Sheep, self.initial_sheep) self.init_population(Wolf, self.initial_wolves) # This is required for the datacollector to work self.running = True self.datacollector.collect(self) def init_population(self, agent_type, n): ''' Method that provides an easy way of making a bunch of agents at once. ''' for _ in range(n): x = random.randrange(self.width) y = random.randrange(self.height) self.new_agent(agent_type, (x, y)) def new_agent(self, agent_type, pos): ''' Method that creates a new agent, and adds it to the correct scheduler. ''' agent = agent_type(self.next_id(), self, pos) self.grid.place_agent(agent, pos) getattr(self, f'schedule_{agent_type.__name__}').add(agent) def remove_agent(self, agent): ''' Method that removes an agent from the grid and the correct scheduler. ''' self.grid.remove_agent(agent) getattr(self, f'schedule_{type(agent).__name__}').remove(agent) def step(self): ''' Method that calls the step method for each of the sheep, and then for each of the wolves. ''' self.schedule_Sheep.step() self.schedule_Wolf.step() # Save the statistics self.datacollector.collect(self) def run_model(self, step_count=200): ''' Method that runs the model for a specific amount of steps. ''' for _ in range(step_count): self.step()
class Schelling(Model): """ Model class for the Schelling segregation model. """ def __init__(self, height=30, width=30, density=0.9, minority_pc=0.5, homophily=3): """ """ # Height and width of the Grid; # Height and width also defines the maximum number of agents that could be in the environment self.height = height self.width = width # Define the population density; Float between 0 and 1 self.density = density # Ratio between blue and red. # Blue is minority, red is majority; Float between 0 and 1; if > 0.5, blue becomes majority # 1 에 가까워 질수록 파란색이 많아지고, # 0 에 가까워 질수록 빨간색이 많아진다. self.minority_pc = minority_pc # number of similar neighbors required for the agents to be happy # Takes integer value between 0 and 8 since you can only be surrounded by 8 neighbors self.homophily = homophily # Scheduler controls the order in which agents are activated self.schedule = RandomActivation(self) self.grid = SingleGrid(width, height, torus=True) self.happy = 0 # Obtain data after each step self.datacollector = DataCollector( {"happy": "happy"}, # Model-level count of happy agents # For testing purposes, agent's individual x and y {"x": lambda a: a.pos[0], "y": lambda a: a.pos[1]}, ) # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if self.random.random() < self.density: if self.random.random() < self.minority_pc: agent_type = 1 else: agent_type = 0 agent = SchellingAgent((x, y), self, agent_type) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) self.running = True self.datacollector.collect(self) # The class requires a step function that represent each run def step(self): """ Run one step of the model. If All agents are happy, halt the model. """ self.happy = 0 # Reset counter of happy agents self.schedule.step() # collect data self.datacollector.collect(self) # 여기서 terminate 하는 것을 manage 한다. if self.happy == self.schedule.get_agent_count(): self.running = False
class SchellingModel(Model): ''' Model class for the Schelling segregation model. ''' def __init__(self, height, width, density, minority_pc, homophily): ''' ''' # Setting up the Model self.height = height self.width = width self.density = density #percentage (empty houses) self.minority_pc = minority_pc #percentage minority in the city self.homophily = homophily #number of similar minded person that you want around you # Setting up the AGM simulation self.schedule = RandomActivation(self) # Setting up the grid, using inputs in the function, the torus function # seems to be related to how we treat edges, but not sure self.grid = SingleGrid(height, width, torus=True) # Setting the number of happy people to zero self.happy = 0 self.datacollector = DataCollector( {"happy": lambda m: m.happy}, # Model-level count of happy agents # For testing purposes, agent's individual x and y {"x": lambda a: a.pos[0], "y": lambda a: a.pos[1]}) self.running = True # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) for cell in self.grid.coord_iter(): # For each cell coordinate apply if statements x = cell[1] y = cell[2] # First if statement: take a random number between 0 and 1 # (random.random command) and check whether that value is # below the assigned density. # Second if statement: take a random number between 0 and 1 # and assign the agent type based on the condition if random.random() < self.density: if random.random() < self.minority_pc: agent_type = 1 else: agent_type = 0 # Refer to the above function related to Agent attributes agent = SchellingAgent((x, y), self, agent_type) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) def step(self): ''' Run one step of the model. If All agents are happy, halt the model. ''' self.happy = 0 # Reset counter of happy agents self.schedule.step() self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False
class LoveMatch(Model): ''' Love-match market Model: En este modelo, cada individuo recorre de manera aleatoria el lugar, al encontrarse con un match (agente del sexo opuesto con parámetros de belleza y riqueza coincidentes con lo deseado) desaparece del modelo. El objetivo es observar la distribución de perfiles de belleza y riqueza a lo largo del tiempo hasta ver quienes no logran encontrar pareja. ''' def __init__( self, height=50, width=50, density=0.8, HM_pc=0.2, entry_rate=1, max_agents=750 ): # Aquí establecemos el tamaño del Grid donde se desarrolla el modelo, además de los parámetros iniciales. self.height = height self.width = width self.density = density self.HM_pc = HM_pc self.entry_rate = 5 self.schedule = RandomActivation(self) self.grid = MultiGrid(height, width, torus=False) self.max_agents = max_agents self.parejas = 0 self.hombres = 0 self.mujeres = 0 self.unhappy = 0 self.idcounter = 0 # En esta sección, etiquetamos a cada agente según su tipo for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if self.random.random() < self.density: if self.random.random() < self.HM_pc: gender = 1 self.hombres += 1 else: gender = 0 self.mujeres += 1 #Creamos a cada agente y asignamos su ID, cad vez que se crea un agente, se agrega uno al contador de ID's #Nota: La distribución de las características las modelamos con una distribución log-normal. Esto nos permite tener solo valores positivos y este ranking de belleza/riqueza se concentra de 0 a 1 self.idcounter += 1 agent = miAgente((x, y), self, gender, beauty=np.random.lognormal(0.5, 0.30), wealth=np.random.lognormal(0.5, 0.30), desired_beauty=np.random.lognormal(0.5, 0.3), desired_wealth=np.random.lognormal(0.5, 0.3), time_to_critical=random.randint(10, 30), sojourn=-1, is_critical=0, myid=self.idcounter) #coloca a los agentes en el modelo self.schedule.add(agent) self.grid.place_agent(agent, (x, y)) #Corre el modelo self.running = True #Colecciona los datos relevantes para el agente y para el modelo self.datacollector = DataCollector(model_reporters={ 'density': 'density', 'parejas': 'parejas', 'unhappy': 'unhappy', 'hombres': 'hombres', 'mujeres': 'mujeres' }, agent_reporters={ 'myid': 'myid', 'wealth': 'wealth', 'gender': 'gender', 'beauty': 'beauty', 'desired_beauty': 'desired_beauty', 'desired_wealth': 'desired_wealth', 'time_to_critical': 'time_to_critical', 'is_critical': 'is_critical', 'sojourn': 'sojourn' }) self.datacollector.collect(self) def update(self): if self.schedule.get_agent_count() < self.max_agents: for i in range(self.entry_rate): x = self.random.randrange(self.grid.width) y = self.random.randrange(self.grid.height) if self.random.random() < self.HM_pc: gender = 1 self.hombres += 1 else: gender = 0 self.mujeres += 1 agent = miAgente(i, self, gender, beauty=random.g(4, 2), wealth=random.gauss(4, 3), desired_beauty=random.gauss(4, 3), desired_wealth=random.gauss(3, 2), time_to_critical=random.gauss(20, 5), sojourn=-1, is_critical=0) self.schedule.add(agent) self.grid.place_agent(agent, (x, y)) def step( self ): # Este step permite que el modelo siga corriendo hasta que todos los agentes tengan pareja self.schedule.step() # Por fines gráficos, recolectamos la información sobre la cantidad de parejas self.datacollector.collect(self) ### Guarda la información relevante dentro de tablas en csv's. self.datacollector.get_agent_vars_dataframe().to_csv("test_me_a.csv") self.datacollector.get_model_vars_dataframe().to_csv("test_me_m.csv") ### Finalmente, el modelo se detiene si el número de agentes es cero if self.schedule.get_agent_count() == 0: self.running = False
class EvacuationModel(Model): def __init__(self, N=20, height=21, width=21, push_ratio=0.5): super().__init__() self.height = height self.width = width self.num_agents = N self.exit_x = round(self.width / 2) self.exit_y = self.height - 1 self.push_probs = np.array([[0., 0.], [1., 0.5]]) self.grid = MultiGrid(self.width, self.height, torus=False) self.schedule = RandomActivation(self) self.exit_times = [] # decide for ID whether it is a pusher is_pusher = np.zeros(N, dtype=int) idx = self.random.sample([i for i in range(N)], int(push_ratio * N)) is_pusher[idx] = 1 # Add N pedestrians taken_pos = [] for i in range(self.num_agents): # Add the agent to a random grid cell while True: x = self.random.randrange(1, self.grid.width - 1) y = self.random.randrange(1, self.grid.height - 1) pos = (x, y) if not pos in taken_pos: break a = Pedestrian(i, self, pos, self.exit_x, self.exit_y, is_pusher[i]) self.schedule.add(a) self.grid.place_agent(a, pos) taken_pos.append(pos) # Place vertical walls for i in range(self.height): # Left x = 0 y = i w = Wall(self, (x, y)) self.grid.place_agent(w, (x, y)) # Right x = self.width - 1 y = i w = Wall(self, (x, y)) self.grid.place_agent(w, (x, y)) # Place horizontal walls for i in range(self.width): # Up x = i y = 0 w = Wall(self, (x, y)) self.grid.place_agent(w, (x, y)) # Down x = i y = self.height - 1 # One exit if x == self.exit_x and y == self.exit_y: e = Exit(self, (x, y)) self.grid.place_agent(e, (x, y)) else: w = Wall(self, (x, y)) self.grid.place_agent(w, (x, y)) self.data_collector = DataCollector({ "Evacuees": lambda m: self.count_evacuees(), "Evacuated": lambda m: self.count_evacuated() }) # this is required for the data_collector to work self.running = True self.data_collector.collect(self) def count_evacuees(self): count = self.schedule.get_agent_count() return count def count_evacuated(self): count = self.num_agents - self.schedule.get_agent_count() return count def plot(self): # Average exit time sum = 0 for time in self.exit_times: sum += time avg = sum / len(self.exit_times) # Exit times bins L = self.exit_times[-1] - 0 bin_size = 5 min_edge = 0 max_edge = math.ceil(L / bin_size) * bin_size N = int((max_edge - min_edge) / bin_size) Nplus1 = N + 1 bin_list = np.linspace(min_edge, max_edge, Nplus1) print() print(self.exit_times) print(L) print(max_edge) print() # Exit times histogram plt.hist(self.exit_times, bin_list, edgecolor="k") plt.title("Average = " + str(avg)) plt.xlabel("Exit time") plt.ylabel("Frequence") plt.show() return def step(self): # Stop run if all Pedestrians have exited if self.schedule.get_agent_count() == 0: self.plot() self.running = False self.schedule.step() self.data_collector.collect(self)
class PartyModel(Model): def __init__(self, height=20, width=20, number_introvert=30, number_ambivert=40, number_extrovert=30): ''' ''' self.height = height self.width = width self.number_attendees = 1.0 * \ (number_introvert + number_ambivert + number_extrovert) self.number_introvert = number_introvert self.number_ambivert = number_ambivert self.number_extrovert = number_extrovert self.percent_introvert = number_introvert / self.number_attendees self.percent_ambivert = number_ambivert / self.number_attendees self.percent_extrovert = number_extrovert / self.number_attendees self.introvert_cutoff = self.percent_introvert self.ambivert_cutoff = self.percent_introvert + self.percent_ambivert self.schedule = RandomActivation(self) self.grid = SingleGrid(width, height, torus=True) self.happy = 0 self.happy_introverts = 0 self.happy_ambiverts = 0 self.happy_extroverts = 0 self.datacollector = DataCollector( {"happy": "happy"}, # Model-level count of happy agents # For testing purposes, agent's individual x and y { "x": lambda a: a.pos[0], "y": lambda a: a.pos[1] }) count = 0 # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if count < self.number_attendees: extroversion = self.random.random() if extroversion < self.introvert_cutoff: agent_type = "introvert" else: if extroversion < self.ambivert_cutoff: agent_type = "ambivert" else: agent_type = "extrovert" agent = PartyAgent((x, y), self, agent_type) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) count = count + 1 self.running = True self.datacollector.collect(self) ''' Run one step of the model. If all agents are happy, halt the model. ''' def step(self): # Reset counters of happy agents self.happy = 0 self.schedule.step() # collect data self.datacollector.collect(self) if self.happy > self.schedule.get_agent_count() * 0.95: self.running = False
class Schelling(Model): """ Model class for the Schelling segregation model. """ # ANSWER --- cooperativeness = 10 in the init definition def __init__(self, height=30, width=30, density=0.9, homophily=3, cooperativeness=0.0): """ """ # Height and width of the Grid; # Height and width also defines the maximum number of agents that could be in the environment self.height = height self.width = width # Define the population density; Float between 0 and 1 self.density = density # number of similar neighbors required for the agents to be happy # Takes integer value between 0 and 8 since you can only be surrounded by 8 neighbors # homophily == wanted similarity self.homophily = homophily # ANSWER # 얼마만큼의 agent 를 cooperativeness 한 agent 로 정의 할 것인가 self.cooperativeness = cooperativeness # ANSWER # Scheduler controls the order in which agents are activated self.schedule = RandomActivation(self) self.grid = SingleGrid(width, height, torus=True) self.happy = 0 self.segregation = 0 # Obtain data after each step self.datacollector = DataCollector( { "happy": "happy", "segregation": "segregation" }, # Model-level count of happy agents # For testing purposes, agent's individual x and y { "x": lambda a: a.pos[0], "y": lambda a: a.pos[1] }, ) # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if self.random.random() < self.density: if self.random.random() < 0.33: agent_type = 2 elif self.random.random() > 0.66: agent_type = 1 else: agent_type = 0 # ANSWER is_cooperative = False if self.random.random() < cooperativeness: is_cooperative = True happiness_extent = 0 # ANSWER # ANSWER --- Updated initialization to use new init definition agent = SchellingAgent((x, y), self, agent_type, is_cooperative, happiness_extent) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) self.running = True self.datacollector.collect(self) # The class requires a step function that represent each run def step(self): """ Run one step of the model. If All agents are happy, halt the model. """ self.happy = 0 # Reset counter of happy agents self.segregation = 0 # Reset counter of segregated agents self.schedule.step() # collect data self.datacollector.collect(self) # 여기서 terminate 하는거 manage if self.happy == self.schedule.get_agent_count(): self.running = False
class SchellingModel(Model): '''Model class for Schelling segregation model''' def __init__(self, height=20, width=20, density=.8, group_ratio=.66, minority_ratio=.5, homophily=3): self.height = height self.width = width self.density = density self.group_ratio = group_ratio self.minority_ratio = minority_ratio self.homophily = homophily self.happy = 0 self.segregated = 0 self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=False) self.place_agents() self.datacollector = DataCollector( {'happy': (lambda m: m.happy), 'segregated': (lambda m: m.segregated)}) self.running = True def step(self): '''Run one step of model''' self.schedule.step() self.calculate_stats() self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False def place_agents(self): for cell in self.grid.coord_iter(): x, y = cell[1:3] if random.random() < self.density: if random.random() < self.group_ratio: if random.random() < self.minority_ratio: group = 0 else: group = 1 else: group = 2 agent = SchellingAgent((x,y), group) self.grid.position_agent(agent, (x,y)) self.schedule.add(agent) for agent in self.schedule.agents: count = 0 for neighbour in self.grid.iter_neighbors(agent.pos, moore=False): if neighbour.group == agent.group: count += 1 agent.similar = count def calculate_stats(self): happy_count = 0 avg_seg = 0 for agent in self.schedule.agents: avg_seg += agent.similar if agent.similar >= self.homophily: happy_count += 1 self.happy = happy_count self.segregated = avg_seg/self.schedule.get_agent_count()