class WorldModel(Model): def __init__(self, N, width, height): self.grid = SingleGrid(height, width, True) self.schedule = RandomActivation(self) self.num_agents = N self.running = True for i in range(self.num_agents): ethnicity = random.choice(Ethnicities) a = PersonAgent(unique_id=i, model=self, ethnicity=int(ethnicity) ) self.schedule.add(a) # Add the agent to a random grid cell self.grid.position_agent(a) self.datacollector = DataCollector( agent_reporters={ "Nationalism": lambda a: a.nationalism, "X": lambda a: a.pos[0], "Y": lambda a: a.pos[1] } ) def step(self): self.datacollector.collect(self) self.schedule.step()
class SchellingModel(Model): def __init__(self, height=20, width=20, tolerance=0.3, population=200): self.height = height self.width = width self.tolerance = tolerance self.population = population self.happy = 0 self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=False) self.running = True self.setup() def setup(self): occupied = [] for i in range(self.population): x = np.random.randint(0, self.width) y = np.random.randint(0, self.height) while [x, y] in occupied: x = np.random.randint(0, self.width) y = np.random.randint(0, self.height) occupied.append([x, y]) agent = SchellingAgent((x, y), self) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) def happy_ratio(self): agents = self.schedule.agents return self.happy / len(agents) def step(self): self.happy = 0 # Reset counter of happy agents self.schedule.step() if self.happy_ratio() > 0.99: self.running = False
class TurtleModel(Model): def __init__(self, width: int = 5, height: int = 5): super().__init__() self.active_agent = Turtle(self.next_id(), self) self.active_agent.active = True self.grid = SingleGrid(width, height, True) self.grid.position_agent(self.active_agent, width // 2, height // 2) self.schedule = BaseScheduler(self) self.schedule.add(self.active_agent) def step(self): direction = self.random.choice([(1, 0), (-1, 0), (0, 1), (0, -1)]) self.active_agent.move(direction) def on_key(self, key): key_to_direction = { "ArrowUp": (0, 1), "ArrowDown": (0, -1), "ArrowLeft": (-1, 0), "ArrowRight": (1, 0), } direction = key_to_direction.get(key, "") if direction: self.active_agent.move(direction) def on_click(self, **kwargs): self.active_agent.active = False unique_id = kwargs.get("unique_id") for agent in self.schedule.agents: if agent.unique_id == unique_id: self.active_agent = agent self.active_agent.active = True
class Schelling(Model): """ Model class for the Schelling segregation model. """ def __init__(self, height=20, width=20, density=0.8, schedule="RandomActivation", **kwargs): """""" self.height = height self.width = width self.density = density self.minority_pc = 0.4 self.homophily = 3 self.schedule = getattr(time, schedule)(self) self.grid = SingleGrid(height, width, torus=True) self.happy = 0 # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if self.random.random() < self.density: if self.random.random() < self.minority_pc: agent_type = 1 else: agent_type = 0 agent = SchellingAgent((x, y), self, agent_type) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) self.running = True def step(self): """ Run one step of the model. If All agents are happy, halt the model. """ self.happy = 0 # Reset counter of happy agents self.schedule.step() if self.happy == self.schedule.get_agent_count(): self.running = False def on_click(self, x, y, agent_type, **kwargs): """Change agent type on click.""" self.grid[x][y].type = 1 if agent_type == 0 else 0 @property def Step(self): return self.schedule.steps
class BikeShare(Model): """A model with some number of potential riders.""" global hours_per_day hours_per_day = 24 def __init__(self, N, M, width, height): # self.running = True self.num_agents = N # self.grid = MultiGrid(width, height, True) self.num_stations = M self.radius = np.int(np.sqrt(width * height)) self.grid = MultiGrid(width, height, True) self.grid_stations = SingleGrid(width, height, True) self.schedule = RandomActivation(self) self.timestamp = 0 # use to find days self.datestamp = 0 threshold = 0.8 # create agents for i in range(self.num_agents): a = BikeRider(i, self, threshold) self.schedule.add(a) # add the agent to a random grid cell x = np.random.randint(self.grid.width) y = np.random.randint(self.grid.height) self.grid.place_agent(a, (x, y)) for i in range(self.num_stations): s = BikeStation(i, self) self.schedule.add(s) # add the station to a random grid cell # x = np.random.randint(self.grid_stations.width) # y = np.random.randint(self.grid_stations.height) self.grid_stations.position_agent(s) # ensures one station max # self.grid.place_agent(s, s.pos) print ("Station " + str(s.unique_id) + "; " + str(s.pos)) # self.datacollector = DataCollector( # model_reporters={"Gini": compute_gini}, # agent_reporters={"Wealth": lambda a: a.wealth} # ) def step(self): '''Advance the model by 1 step: arbitrary unit of time. ''' # self.datacollector.collect(self) # print ("Step the schedule ...") # print (str(self.timestamp)) self.timestamp += 1 if self.timestamp % hours_per_day == 0: print ("\n**** new day " + str(self.datestamp)) self.datestamp += 1 self.timestamp = 0 self.schedule.step()
class PredatorPreyModel(Model): def __init__(self, N_predator, N_prey, width, height, random_seed=None, predator_probdiv=0.15, predator_probdie=0.2, predator_probdiv_init=0.25): self.num_predators = N_predator self.num_prey = N_prey self.num_ids = N_predator + N_prey self.grid = SingleGrid(width, height, False) self.schedule = RandomActivation(self) model_reporters = {} agent_reporters = {} used_pos_set = set() random.seed(a=random_seed) for i in range(self.num_predators): a = Predator(i, self, probdiv=predator_probdiv, probdie=predator_probdie, probdiv_init=predator_probdiv_init) # Generate x,y pos xcoor = int(random.random() * width) ycoor = int(random.random() * height) while str([xcoor, ycoor]) in used_pos_set: xcoor = int(random.random() * width) ycoor = int(random.random() * height) used_pos_set.add(str([xcoor, ycoor])) self.grid.position_agent(a, x=xcoor, y=ycoor) self.schedule.add(a) for i in range(self.num_prey): a = Prey(i + self.num_predators, self) # Generate x,y pos xcoor = int(random.random() * width) ycoor = int(random.random() * height) while str([xcoor, ycoor]) in used_pos_set: xcoor = int(random.random() * width) ycoor = int(random.random() * height) used_pos_set.add(str([xcoor, ycoor])) self.grid.position_agent(a, x=xcoor, y=ycoor) self.schedule.add(a) self.datacollector = DataCollector(model_reporters=model_reporters, agent_reporters=agent_reporters) self.running = True self.datacollector.collect(self) def step(self): self.schedule.step() self.datacollector.collect(self)
class CoopaModel(Model): """A model with some number of agents.""" def __init__(self, N, width, height, agent_type, log_path=None): self.running = True self.num_agents = N self.grid = SingleGrid(width, height, torus=False) self.schedule = RandomActivation(self) self.message_dispatcher = MessageDispatcher() self.layout = Layout() self._context = Context() self.agent_type = AGENT_TYPES[agent_type] self.layout.draw(self.grid) # Add drop point(s) self.drop_points = [DropPoint(1, self)] self.grid.place_agent(self.drop_points[0], (5, 5)) # Add recharging station(s) self.recharge_points = [RechargePoint(1, self)] self.grid.place_agent(self.recharge_points[0], (55, 5)) # Place resources tactically self._context.place_few_trash_in_all_rooms(self) # the mighty agents arrive for i in range(self.num_agents): a = self.agent_type(i, self, log_path=log_path) self.schedule.add(a) self.grid.position_agent(a) self.datacollector = DataCollector( model_reporters={ "Trash collected": compute_dropped_trashes, "Average battery power": compute_average_battery_power, "Max battery power": compute_max_battery_power, "Min battery power": compute_min_battery_power }, # agent_reporters={"Trash": "trash_count"} ) # An agent attribute self.name = "CoopaModel" self._logger = utils.create_logger(self.name, log_path=log_path) @property def time(self): return self.schedule.time def step(self): t = time.monotonic() self.datacollector.collect(self) self.schedule.step() self._log("Finished in {:.5f} seconds.".format(time.monotonic() - t), logging.INFO) def _log(self, msg, lvl=logging.DEBUG): self._logger.log(lvl, msg, extra={'time': self.time})
class SchellingModel(Model): ''' Model class for the Schelling segregation model. ''' def __init__(self, height, width, density, minority_pc, homophily): ''' ''' self.height = height self.width = width self.density = density self.minority_pc = minority_pc self.homophily = homophily self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=True) self.happy = 0 self.datacollector = DataCollector( {"happy": "happy"}, # Model-level count of happy agents # For testing purposes, agent's individual x and y { "x": lambda a: a.pos[0], "y": lambda a: a.pos[1] }) # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if random.random() < self.density: if random.random() < self.minority_pc: agent_type = 1 else: agent_type = 0 agent = SchellingAgent((x, y), self, agent_type) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) self.running = True self.datacollector.collect(self) def step(self): ''' Run one step of the model. If All agents are happy, halt the model. ''' self.happy = 0 # Reset counter of happy agents self.schedule.step() # collect data self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False
class Schelling(Model): ''' Model class for the Schelling segregation model. ''' def __init__(self, height=20, width=20, density=0.8, minority_pc=0.2, homophily=3): ''' ''' self.height = height self.width = width self.density = density self.minority_pc = minority_pc self.homophily = homophily self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=True) self.happy = 0 self.datacollector = DataCollector( {"happy": "happy"}, # Model-level count of happy agents # For testing purposes, agent's individual x and y {"x": lambda a: a.pos[0], "y": lambda a: a.pos[1]}) # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if self.random.random() < self.density: if self.random.random() < self.minority_pc: agent_type = 1 else: agent_type = 0 agent = SchellingAgent((x, y), self, agent_type) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) self.running = True self.datacollector.collect(self) def step(self): ''' Run one step of the model. If All agents are happy, halt the model. ''' self.happy = 0 # Reset counter of happy agents self.schedule.step() # collect data self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False
class CovModel(Model): def __init__(self, height=5, width=5, density=0.8, minority_pc=0.2, d_pm=0.8, confinement=False): self.height = height self.width = width self.density = density self.minority_pc = minority_pc self.d_pm = d_pm self.schedule = RandomActivation(self) self.grid = SingleGrid(width, height, torus=True) self.confinement = confinement self.infecte = 0 self.sucep = 0 self.retab = 0 self.runs = 0 self.datacollector = DataCollector(model_reporters={ "infecte": "infecte", "suceptible": "sucep", "retabli": "retab" }) for cell in self.grid.coord_iter(): cell_content, x, y = cell if self.random.random() < self.density: if self.random.random() < self.minority_pc: agent = CovAgent((x, y), self, 1, 0, None, random.uniform(0, 1), random.uniform(5, 15)) self.infecte += 1 else: agent = CovAgent((x, y), self, 0, 0, random.uniform(0, 1)) self.sucep += 1 if self.random.random() < self.d_pm: agent.port_mask = 1 self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) self.running = True self.datacollector.collect(self) def step(self): self.runs += 1 self.infecte = 0 self.sucep = 0 self.retab = 0 self.schedule.step() # collect data self.datacollector.collect(self) if self.infecte == 0: self.running = False
class BurglaryModel(Model): DELTA_T = 1 / 100 OMEGA = 1 / 15 A0 = 1 / 30 eta = 0 theta = 0 gamma = 0 avgA = A0 varA = 0 avgBD = theta * gamma / OMEGA avgN = (gamma * DELTA_T / (1 - math.exp(-avgA * DELTA_T))) id = 0 """A model with some number of agents.""" def __init__(self, width, height, eta, theta, gamma): super().__init__() self.grid = SingleGrid(width, height, True) self.schedule = RandomActivation(self) self.eta = eta self.theta = theta self.gamma = gamma self.nAgents = width * height self.avgA = self.A0 self.avgBD = theta * gamma / self.OMEGA self.avgN = (gamma * self.DELTA_T / (1 - math.exp(-self.avgA * self.DELTA_T))) for i in range(width): for j in range(height): newId = self.id + 1 newAgent = LatticeAgent(newId, self, [], (j, i)) self.schedule.add(newAgent) self.grid.position_agent(newAgent, j, i) self.id = newId + 1 for k in range(round(self.avgN * width * height)): acell = random.choice(self.schedule.agents) acell.burglers.append(Burgler()) def step(self): self.schedule.step() for agent in self.schedule.agents: self.avgA = (self.avgA + agent.attractiveness) / 2 self.avgBD = (self.avgBD + agent.burglerDynamic) / 2 self.avgN = (self.avgN + len(agent.burglers)) / 2 sumV = 0 for agent in self.schedule.agents: sumV = (agent.attractiveness - self.avgA) * (agent.attractiveness - self.avgA) self.varA = sumV / (len(self.schedule.agents) - 1)
class Pandemic(Model): """Modeling an infection spreading across a population""" def __init__(self, height=100, width=100, N=500, N_initial_infected=2, infect_prob=0.05, min_time_disease=5, max_time_disease=15, death_rate=0.05): self.num_agents = N self.schedule = RandomActivation(self) self.grid = SingleGrid(width, height, False) self.N_infected = N_initial_infected self.infect_prob = infect_prob self.min_time_disease = min_time_disease self.max_time_disease = max_time_disease self.death_rate = death_rate self.running = True # Create agents N_infected_placed = 0 for i in range(self.num_agents): # place as many infected as indicated if N_infected_placed < self.N_infected: a = Person(i, self, state='infected') N_infected_placed += 1 else: a = Person(i, self, state='healthy') self.schedule.add(a) self.grid.position_agent(a) self.datacollector = DataCollector(model_reporters={ "N_infected": get_N_infected, "N_immune": get_N_immune, "N_dead": get_N_dead, "Average r0": get_average_r0 }, agent_reporters={ "State": "state", "R0": "r0" }) def check_N_infected(self): N_infected = get_N_infected(self) if N_infected == 0: self.running = False def step(self): self.datacollector.collect(self) self.check_N_infected() self.schedule.step()
class SchellingModel(Model): """ Model class for the Schelling segregation model. """ def __init__(self, height, width, density, minority_pc, homophily): """ """ self.height = height self.width = width self.density = density self.minority_pc = minority_pc self.homophily = homophily self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=True) self.happy = 0 self.total_agents = 0 self.datacollector = DataCollector( {"unhappy": lambda m: m.total_agents - m.happy}, # For testing purposes, agent's individual x and y {"x": lambda a: a.pos[X], "y": lambda a: a.pos[Y]}, ) self.running = True # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) for cell, x, y in self.grid.coord_iter(): if random.random() < self.density: if random.random() < self.minority_pc: agent_type = 1 else: agent_type = 0 agent = SchellingAgent(self.total_agents, agent_type) self.grid.position_agent(agent, x, y) self.schedule.add(agent) self.total_agents += 1 def step(self): """ Run one step of the model. If All agents are happy, halt the model. """ self.happy = 0 # Reset counter of happy agents self.schedule.step() self.datacollector.collect(self) if self.happy == self.total_agents: self.running = False
class Schelling(Model): ''' Model for Schelling segregation agent ''' def __init__(self, height, width, density, minority_pc, homophily): self.height = height self.width = width self.density = density self.minority_pc = minority_pc self.homophily = homophily # grid self.grid = SingleGrid(height, width, torus=True) # schedule self.schedule = RandomActivation(self) # datacollector self.happy = 0 self.datacollector = DataCollector({"happy": "happy"}, { "x": lambda a: a.position[0], "y": lambda a: a.position[1] }) # agent setup for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if self.random.random() < self.density: if self.random.random() < self.minority_pc: agent_type = 1 else: agent_type = 0 agent = SchellingAgent((x, y), self, agent_type) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) self.running = True self.datacollector.collect(self) def step(self): # reset at each step self.happy = 0 self.schedule.step() # collect data self.datacollector.collect(self) # stop if all agents are happy if self.happy == self.schedule.get_agent_count(): self.running = False
class SchellingModel(Model): ''' Model class for the Schelling segregation model. ''' def __init__(self, height, width, density, type_pcs=[.2, .2, .2, .2, .2]): ''' ''' self.height = height self.width = width self.density = density self.type_pcs = type_pcs self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=False) self.happy = 0 self.datacollector = DataCollector( {"happy": lambda m: m.happy}, # Model-level count of happy agents # For testing purposes, agent's individual x and y { "x": lambda a: a.pos[0], "y": lambda a: a.pos[1] }) self.running = True # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) total_agents = self.height * self.width * self.density agents_by_type = [total_agents * val for val in self.type_pcs] for loc, types in enumerate(agents_by_type): for i in range(int(types)): pos = self.grid.find_empty() agent = SchellingAgent(pos, self, loc) self.grid.position_agent(agent, pos) self.schedule.add(agent) def step(self): ''' Run one step of the model. If All agents are happy, halt the model. ''' self.happy = 0 # Reset counter of happy agents self.schedule.step() self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False
class SchellingModel(Model): ''' Schelling model class ''' def __init__(self, width=5, height=5, threshold=0.5, population_density=0.8, population_breakdown=0.5): ''' Initialize the model Args: width: Width of the grid containing agents. height: Height of the grid containing agents. threshold: Homophily threshold, the number, from 0-8, of nearest neighbours at which I am so unhappy that I move. population_density: Proportion of cells occupied, from 0-1. population_breakdown: Proportion of agents of type 1, from 0-1. ''' self.running = True self.height = height self.width = width self.threshold = threshold self.population_density = population_density self.population_breakdown = population_breakdown self.no_happy_this_timestep = 0 self.schedule = RandomActivation(self) self.grid = SingleGrid(width, height, torus=True) self.datacollector = DataCollector( {"happy": lambda m: m.no_happy_this_timestep}, {"x": lambda a: a.pos[0], "y": lambda a: a.pos[1]}) for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if random.random() < self.population_density: if random.random() < self.population_breakdown: agent_type = 1 else: agent_type = 0 agent = Agent(self,(x, y), agent_type) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) def step(self): ''' Update model once in each time step ''' self.no_happy_this_timestep = 0 self.schedule.step() self.datacollector.collect(self) # End the simulation if all agents are happy since none will move if self.no_happy_this_timestep == self.schedule.get_agent_count(): self.running = False
class SchellingModel(Model): # need to specify width, height, and density of agents # in the grid. def __init__(self, width, height, density, homophily): self.schedule = RandomActivation(self) # create the grid self.grid = SingleGrid(width, height, torus=True) self.moved = 0 self.running = True # loop through the grid, and add agents so that the # overall density is roughly equal to the passed # density for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if self.random.random() < density: agent_type = np.random.choice(["Orange", "Blue"]) agent = SchellingAgent(pos=(x, y), agent_type=agent_type, homophily=homophily, model=self) self.schedule.add(agent) self.grid.position_agent(agent, (x, y)) # NEW: create data collector self.datacollector = DataCollector( model_reporters={"num_moved": lambda m: m.moved}, agent_reporters={ "x": lambda a: a.pos[0], "y": lambda a: a.pos[1], "type": lambda a: a.type }) # this doesn't change, except that we will add a counter for the number of happy agents # who don't move in this timestep def step(self): self.moved = 0 self.schedule.step() print(f"{self.moved} agents moved in this timestep") # NEW: call the data collector after each step self.datacollector.collect(self) self.running = self.moved != 0
class TestSingleGrid(unittest.TestCase): def setUp(self): self.space = SingleGrid(50, 50, False) self.agents = [] for i, pos in enumerate(TEST_AGENTS_GRID): a = MockAgent(i, None) self.agents.append(a) self.space.place_agent(a, pos) def test_agent_positions(self): """ Ensure that the agents are all placed properly. """ for i, pos in enumerate(TEST_AGENTS_GRID): a = self.agents[i] assert a.pos == pos def test_remove_agent(self): for i, pos in enumerate(TEST_AGENTS_GRID): a = self.agents[i] assert a.pos == pos assert self.space.grid[pos[0]][pos[1]] == a self.space.remove_agent(a) assert a.pos is None assert self.space.grid[pos[0]][pos[1]] is None def test_empty_cells(self): if self.space.exists_empty_cells(): pytest.deprecated_call(self.space.find_empty) for i, pos in enumerate(list(self.space.empties)): a = MockAgent(-i, pos) self.space.position_agent(a, x=pos[0], y=pos[1]) assert self.space.find_empty() is None with self.assertRaises(Exception): self.space.move_to_empty(a) def move_agent(self): agent_number = 0 initial_pos = TEST_AGENTS_GRID[agent_number] final_pos = (7, 7) _agent = self.agents[agent_number] assert _agent.pos == initial_pos assert self.space.grid[initial_pos[0]][initial_pos[1]] == _agent assert self.space.grid[final_pos[0]][final_pos[1]] is None self.space.move_agent(_agent, final_pos) assert _agent.pos == final_pos assert self.space.grid[initial_pos[0]][initial_pos[1]] is None assert self.space.grid[final_pos[0]][final_pos[1]] == _agent
class SchellingModel(Model): ''' Model class for the Schelling segregation model. ''' def __init__(self, height, width, density, type_pcs=[.2, .2, .2, .2, .2]): ''' ''' self.height = height self.width = width self.density = density self.type_pcs = type_pcs self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=False) self.happy = 0 self.datacollector = DataCollector( {"happy": lambda m: m.happy}, # Model-level count of happy agents # For testing purposes, agent's individual x and y {"x": lambda a: a.pos[0], "y": lambda a: a.pos[1]}) self.running = True # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) total_agents = self.height * self.width * self.density agents_by_type = [total_agents*val for val in self.type_pcs] for loc, types in enumerate(agents_by_type): for i in range(int(types)): pos = self.grid.find_empty() agent = SchellingAgent(pos, self, loc) self.grid.position_agent(agent, pos) self.schedule.add(agent) def step(self): ''' Run one step of the model. If All agents are happy, halt the model. ''' self.happy = 0 # Reset counter of happy agents self.schedule.step() self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False
class ConspiracyModel(Model): def __init__(self, n_agents: int, width: int, height: int, agent_reach_radius: int, prior_sample_size: int, initial_sd: float, start_p_h: float, *args: Any, **kwargs: Any) -> None: """ Create the model. :param n_agents: Number of agents to place. :param width: Width of the grid. :param height: Height of the grid. :param agent_reach_radius: Radius around the agent in which it can connect. :param prior_sample_size: Size of initial belief sample. :param initial_sd: Initial standard deviation of the agents' beliefs. :param start_p_h: Initial p|h value. """ super().__init__(*args, **kwargs) self.n_agents = n_agents self.agent_range = agent_reach_radius self.prior_sample_size = prior_sample_size self.initial_sd = initial_sd self.start_p_h = start_p_h self.grid = SingleGrid(width, height, torus=True) self.schedule = RandomActivation(self) print('Placing agents.') for i in range(self.n_agents): agent = ConspiracyAgent(i, self) self.schedule.add(agent) self.grid.position_agent(agent) print('Finished placing agents.') def step(self) -> None: self.schedule.step() print('Average confidence', statistics.mean(agent.prior_confidence for agent in self.agents)) # Create a histogram: # TODO do this in the mesa webpage if self.schedule.time % 10 == 0: beliefs = [agent.prior_value for agent in self.agents] pyplot.hist(beliefs, bins=30) pyplot.show() @property def agents(self) -> List[Union[Agent, ConspiracyAgent]]: return self.schedule.agents
class NewModel(Model): def __init__(self, width, height, num_agents): self.schedule = RandomActivation(self) self.grid = SingleGrid(width, height, torus = True) self.num_agents = num_agents # to collect info about how many agents are happy, average similarity of neighbors, length of residence self.datacollector = DataCollector(model_reporters = {"Happy": lambda m: m.happy, "Similar": lambda m: m.similar, "Residence": lambda m: m.avg_residence}, agent_reporters = {"x": lambda a: a.pos[0], "y": lambda a: a.pos[1]}) self.avg_residence = 0 self.happy = 0 self.similar = 0 self.running = True for i in range(self.num_agents): # white if random.random() < 0.70: agent_type = 1 income = np.random.normal(54000, 41000) # black else: agent_type = 0 income = np.random.normal(32000, 40000) # add new agents agent = NewAgent(i, self, agent_type, income) self.schedule.add(agent) # assign the initial coords of the agents x = self.random.randrange(self.grid.width) y = self.random.randrange(self.grid.height) self.grid.position_agent(agent, (x, y)) def step(self): '''Advance the model by one step.''' self.happy = 0 self.schedule.step() # get the average similarity self.similar /= self.num_agents # get the average length of residence self.avg_residence /= self.num_agents self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False
class SchellingModel(Model): ''' Model class for the Schelling segregation model. ''' def __init__(self, height=50, width=50, density=0.8, minority_pc=0.5, homophily=3): ''' ''' self.height = height self.width = width self.density = density self.minority_pc = minority_pc self.homophily = homophily self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=True) self.happy = 0 # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if random.random() < self.density: if random.random() < self.minority_pc: agent_type = 1 else: agent_type = 0 agent = SchellingAgent((x, y), self, agent_type) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) def step(self): ''' Run one step of the model. ''' self.happy = 0 # Reset counter of happy agents self.schedule.step()
class TestSingleGrid(unittest.TestCase): ''' Test the SingleGrid object. Since it inherits from Grid, all the functionality tested above should work here too. Instead, this tests the enforcement. ''' def setUp(self): ''' Create a test non-toroidal grid and populate it with Mock Agents ''' width = 3 height = 5 self.grid = SingleGrid(width, height, True) self.agents = [] counter = 0 for x in range(width): for y in range(height): if TEST_GRID[x][y] == 0: continue counter += 1 # Create and place the mock agent a = MockAgent(counter, None) self.agents.append(a) self.grid.place_agent(a, (x, y)) def test_enforcement(self): ''' Test the SingleGrid empty count and enforcement. ''' assert len(self.grid.empties) == 9 a = MockAgent(100, None) with self.assertRaises(Exception): self.grid._place_agent((0, 1), a) # Place the agent in an empty cell self.grid.position_agent(a) # Test whether after placing, the empty cells are reduced by 1 assert a.pos not in self.grid.empties assert len(self.grid.empties) == 8 for i in range(10): self.grid.move_to_empty(a) assert len(self.grid.empties) == 8 # Place agents until the grid is full empty_cells = len(self.grid.empties) for i in range(empty_cells): a = MockAgent(101 + i, None) self.grid.position_agent(a) assert len(self.grid.empties) == 0 a = MockAgent(110, None) with self.assertRaises(Exception): self.grid.position_agent(a) with self.assertRaises(Exception): self.move_to_empty(self.agents[0])
class GrAM(Model): ''' GrAM module main class. ''' # Set initial parameters def __init__(self, grid_width, grid_height, heights_grid, veg_type_grid, sand_grid, wall_grid, num_grazer): # Set parameters self.width = grid_width self.height = grid_height self.number_of_grazer = num_grazer self.veg_height_grid = heights_grid self.veg_type_grid = veg_type_grid self.sand_grid = sand_grid self.wall_grid = wall_grid self.passage_grid = np.zeros((Ncw, Nrw)) self.grid = SingleGrid(self.width, self.height, torus=True) self.schedule = RandomActivationByBreed(self) # Create the agent grazers for i in range(self.number_of_grazer): grazer = Grazers(i, self) self.grid.position_agent(grazer) self.schedule.add(grazer) self.running = True def step(self): # Execute model step and collect data self.schedule.step() def run(self, n): '''Execute the model GrAM for "n" step''' for i in range(n): self.step() return self.veg_height_grid, self.veg_type_grid, self.passage_grid
class CarModel(Model): ''' Model class for the Nagel-Schreckenberg Car model. ''' def __init__(self, height, width, dawdle_prob, car_amount): ''' ''' super().__init__() self.height = height self.width = width self.dawdle_prob = dawdle_prob self.car_amount = car_amount self.schedule = BaseScheduler(self) self.grid = SingleGrid(height, width, torus=True) self.place_agents() self.running = True def place_agents(self): for i in range(self.car_amount): while True: try: r = random() agent = CarAgent((int(r*100), 5), self, 10) self.grid.position_agent(agent, int(r*100), 5) self.schedule.add(agent) break except Exception: continue def step(self): self.schedule.step()
class TestSingleGrid(unittest.TestCase): ''' Test the SingleGrid object. Since it inherits from Grid, all the functionality tested above should work here too. Instead, this tests the enforcement. ''' def setUp(self): ''' Create a test non-toroidal grid and populate it with Mock Agents ''' self.grid = SingleGrid(3, 5, True) self.agents = [] counter = 0 for y in range(3): for x in range(5): if TEST_GRID[y][x] == 0: continue counter += 1 # Create and place the mock agent a = MockAgent(counter, None) self.agents.append(a) self.grid.place_agent(a, (x, y)) def test_enforcement(self): ''' Test the SingleGrid empty count and enforcement. ''' assert len(self.grid.empties) == 10 a = MockAgent(100, None) with self.assertRaises(Exception): self.grid._place_agent((1, 0), a) # Place the agent in an empty cell self.grid.position_agent(a) assert a.pos not in self.grid.empties assert len(self.grid.empties) == 9 for i in range(10): self.grid.move_to_empty(a) assert len(self.grid.empties) == 9 # Place agents until the grid is full for i in range(9): a = MockAgent(101 + i, None) self.grid.position_agent(a) assert len(self.grid.empties) == 0 a = MockAgent(110, None) with self.assertRaises(Exception): self.grid.position_agent(a) with self.assertRaises(Exception): self.move_to_empty(self.agents[0])
class EconMod(Model): ''' Model class for arming model. ''' def __init__( self, height, width, density, #domestic_min, domestic_max, num_adversaries, num_adversaries, pareto_scale, domestic, expend): ''' ''' self.height = height self.width = width self.density = density #self.domestic_min = domestic_min #self.domestic_max = domestic_max self.expend = expend #self.domestic = domestic ## average domestic lid self.domestic = domestic ## upper bound on distribution self.num_adversaries = num_adversaries self.schedule = RandomActivation(self) # All agents act at once self.grid = SingleGrid(height, width, torus=True) self.datacollector = DataCollector( # Collect data on each agent's arms levels agent_reporters={ "Arms": "arms", "Military_Burden": "mil_burden", "Econ": "econ", "Domestic": "domestic", "Expend": "expend" }) # Set up agents for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if random.random() < self.density: ## Set starting economy econ_start = math.ceil(np.random.pareto(pareto_scale)) + 10 ## Grow around 3% econ_growth = 0.01 * truncnorm.rvs(1.5, 6, 1) lower = 0.04 upper = self.domestic mu = (upper + lower) / 2 sigma = 0.05 ## based on real dist domestic_need = truncnorm.rvs((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma) #domestic_need = self.domestic expend = self.expend #domestic_need = np.random.uniform( # self.domestic_min, # self.domestic_max # ) # starting percent of wealth spent on weapons arms_start_perc = 0.05 arms = arms_start_perc * econ_start # create agent agent = state((x, y), self, econ_start=econ_start, econ_growth=econ_growth, arms=arms, domestic=domestic_need, expend=expend, num_adversaries=num_adversaries) # place agent in grid self.grid.position_agent(agent, (x, y)) # add schedule self.schedule.add(agent) self.running = True self.datacollector.collect(self) def step(self): ''' Run one step of the model. ''' self.schedule.step() # collect data self.datacollector.collect(self)
class SeparationBarrierModel(Model): def __init__(self, height, width, palestinian_density, settlement_density, settlers_violence_rate, settlers_growth_rate, suicide_rate, greed_level, settler_vision=1, palestinian_vision=1, movement=True, max_iters=1000): super(SeparationBarrierModel, self).__init__() self.height = height self.width = width self.palestinian_density = palestinian_density self.settler_vision = settler_vision self.palestinian_vision = palestinian_vision self.settlement_density = settlement_density self.movement = movement self.running = True self.max_iters = max_iters self.iteration = 0 self.schedule = RandomActivation(self) self.settlers_violence_rate = settlers_violence_rate self.settlers_growth_rate = settlers_growth_rate self.suicide_rate = suicide_rate self.greed_level = greed_level self.total_violence = 0 self.grid = SingleGrid(height, width, torus=False) model_reporters = { } agent_reporters = { # "x": lambda a: a.pos[0], # "y": lambda a: a.pos[1], } self.dc = DataCollector(model_reporters=model_reporters, agent_reporters=agent_reporters) self.unique_id = 0 # Israelis and palestinans split the region in half for (contents, x, y) in self.grid.coord_iter(): if random.random() < self.palestinian_density: palestinian = Palestinian(self.unique_id, (x, y), vision=self.palestinian_vision, breed="Palestinian", model=self) self.unique_id += 1 self.grid.position_agent(palestinian, x,y) self.schedule.add(palestinian) elif ((y > (self.grid.height) * (1-self.settlement_density)) and random.random() < self.settlement_density): settler = Settler(self.unique_id, (x, y), vision=self.settler_vision, model=self, breed="Settler") self.unique_id += 1 self.grid.position_agent(settler, x,y) self.schedule.add(settler) def add_settler(self, pos): settler = Settler(self.unique_id, pos, vision=self.settler_vision, model=self, breed="Settler") self.unique_id += 1 self.grid.position_agent(settler, pos[0], pos[1]) self.schedule.add(settler) def set_barrier(self,victim_pos, violent_pos): #print("Set barrier - Greed level", self.greed_level) visible_spots = self.grid.get_neighborhood(victim_pos, moore=True, radius=self.greed_level + 1) furthest_empty = self.find_furthest_empty_or_palestinian(victim_pos, visible_spots) x,y = furthest_empty current = self.grid[y][x] #print ("Set barrier!!", pos, current) free = True if (current is not None and current.breed == "Palestinian"): #print ("Relocating Palestinian") free = self.relocate_palestinian(current, current.pos) if (free): barrier = Barrier(-1, furthest_empty, model=self) self.grid.position_agent(barrier, x,y) # Relocate the violent palestinian #violent_x, violent_y = violent_pos #if violent_pos != furthest_empty: # violent_palestinian = self.grid[violent_y][violent_x] # self.relocate_palestinian(violent_palestinian, furthest_empty) def relocate_palestinian(self, palestinian, destination): #print ("Relocating Palestinian in ", palestinian.pos, "To somehwhere near ", destination) visible_spots = self.grid.get_neighborhood(destination, moore=True, radius=palestinian.vision) nearest_empty = self.find_nearest_empty(destination, visible_spots) #print("First Nearest empty to ", palestinian.pos, " Is ", nearest_empty) if (nearest_empty): self.grid.move_agent(palestinian, nearest_empty) else: #print ("Moveing to random empty") if (self.grid.exists_empty_cells()): self.grid.move_to_empty(palestinian) else: return False return True def find_nearest_empty(self, pos, neighborhood): nearest_empty = None sorted_spots = self.sort_neighborhood_by_distance(pos, neighborhood) index = 0 while (nearest_empty is None and index < len(sorted_spots)): if self.grid.is_cell_empty(sorted_spots[index]): nearest_empty = sorted_spots[index] index += 1 return nearest_empty def find_furthest_empty_or_palestinian(self, pos, neighborhood): furthest_empty = None sorted_spots = self.sort_neighborhood_by_distance(pos, neighborhood) sorted_spots.reverse() index = 0 while (furthest_empty is None and index < len(sorted_spots)): spot = sorted_spots[index] if self.grid.is_cell_empty(spot) or self.grid[spot[1]][spot[0]].breed == "Palestinian" : furthest_empty = sorted_spots[index] index += 1 return furthest_empty def sort_neighborhood_by_distance(self, from_pos, neighbor_spots): from_x, from_y = from_pos return sorted(neighbor_spots, key = lambda spot: self.eucledean_distance(from_x, spot[0], from_y, spot[1], self.grid.width, self.grid.height)) def eucledean_distance(self, x1,x2,y1,y2,w,h): # http://stackoverflow.com/questions/2123947/calculate-distance-between-two-x-y-coordinates return math.sqrt(min(abs(x1 - x2), w - abs(x1 - x2)) ** 2 + min(abs(y1 - y2), h - abs(y1-y2)) ** 2) def step(self): """ Advance the model by one step and collect data. """ self.violence_count = 0 # for i in range(100): self.schedule.step() self.total_violence += self.violence_count # average = self.violence_count / 100 #print("Violence average %f " % average) print("Total Violence: ", self.total_violence)
class SimModel(Model): def __init__( self, all_data, model_initial_state, output_data_writer, model_params, class_id_and_rng=None, class_id=None, speedup=1, **kwargs, ): self.data = all_data self.model_state = model_initial_state self.output_data_writer = output_data_writer if class_id_and_rng: (self.class_id, self.rng) = class_id_and_rng else: self.rng = np.random.default_rng() if class_id: self.class_id = class_id logger.info("Modelling class %s", self.class_id) self.model_params = model_params self.speedup = speedup self.write_file = False # Update any parameters passed as kwargs param_dict = dataclasses.asdict(self.model_params) update_params = False for kw in kwargs: if kw in param_dict: param_dict[kw] = kwargs[kw] update_params = True if update_params: self.model_params = ModelParamType(**param_dict) if "class_id" in kwargs: self.class_id = kwargs["class_id"] elif not self.class_id: self.class_id = 489 if "write_file" in kwargs: self.write_file = kwargs["write_file"] # Get summary data to display to users self.class_summary_data = None if "summary_data" in kwargs and kwargs["summary_data"] is not None: summary_df = kwargs["summary_data"] class_summary_data = summary_df[summary_df["class_id"] == self.class_id] if not class_summary_data.empty: self.class_summary_data = class_summary_data self.class_data = self.data.get_class_data(self.class_id) self.class_size = len(self.class_data) self.schedule = RandomActivation(self) # Calculate steps per day and holidays self.home_learning_steps = 0 # Calculate number of days from 1st September to 16th July inclusive self.start_date = datetime.date(2021, 9, 1) self.current_date = self.start_date self.end_date = datetime.date(2022, 7, 16) self.total_days = (self.end_date - self.start_date).days self.ticks_per_school_day = round( TruncatedNormalGenerator.get_single_value( self.model_params.maths_ticks_mean, self.model_params.maths_ticks_sd, 10, 600, )) self.ticks_per_home_day = self.model_params.ticks_per_home_day self.set_speedup() logger.debug("%s ticks per school day", self.ticks_per_school_day) self.holiday_week_numbers = self.calculate_holiday_weeks( self.start_date, self.end_date, self.model_params.number_of_holidays, self.model_params.weeks_per_holiday, ) # Create truncnorm generators for school and home learning random # increments # Use batch sizes as total days * class_size * ticks per day # (overestimate to ensure we only generate values once) batch_multiplier = self.total_days * self.class_size self.school_learning_random_gen = TruncatedNormalGenerator( 5 / self.model_params.school_learn_mean_divisor, self.model_params.school_learn_sd, lower=0, batch_size=self.ticks_per_school_day * batch_multiplier, ) self.home_learning_random_gen = TruncatedNormalGenerator( 5 / 2000, 0.08, lower=0, batch_size=self.ticks_per_home_day * batch_multiplier, ) # Create TeacherVariable instances for quality and control self.teacher_control_variable = TeacherVariable( self.model_params.teacher_control_mean, self.model_params.teacher_control_sd, self.model_params.teacher_control_variation_sd, self.rng, self.total_days, ) self.teacher_quality_variable = TeacherVariable( self.model_params.teacher_quality_mean, self.model_params.teacher_quality_sd, self.model_params.teacher_quality_variation_sd, self.rng, self.total_days, ) # Create grid with torus = False - in a real class students at either ends of classroom don't interact self.grid_params = get_grid_size(len(self.class_data), self.model_params.group_size) self.grid = SingleGrid(self.grid_params.width, self.grid_params.height, torus=False) sorted_pupils = [] if self.model_params.group_by_ability: sorted_pupils = self.class_data.sort_values("Ability") else: sorted_pupils = self.class_data.sample(frac=1) # Set up agents pupil_counter = 0 for i in range(self.grid_params.n_groups): group_size = self.grid_params.max_group_size if i >= self.grid_params.n_full_groups: group_size -= 1 group_pupils = sorted_pupils.iloc[pupil_counter:pupil_counter + group_size] group_x = math.floor(i / self.grid_params.n_group_rows) group_y = i % self.grid_params.n_group_rows for j, row in enumerate(group_pupils.iterrows()): index, pupil_data = row # Work out position on grid x = (group_x * self.grid_params.group_width + group_x) + math.floor(j / self.grid_params.group_height) y = (group_y * self.grid_params.group_height + group_y) + (j % self.grid_params.group_height) # create agents from data agent = Pupil( (x, y), self, pupil_data.student_id, PupilLearningState.YELLOW, pupil_data.Inattentiveness, pupil_data.hyper_impulsive, pupil_data.Deprivation, pupil_data.start_maths, pupil_data.Ability, group_size, ) # Place Agents on grid self.grid.position_agent(agent, x, y) self.schedule.add(agent) pupil_counter += group_size # Collecting data while running the model self.pupil_state_datacollector = DataCollector( model_reporters={ "Learning Students": get_num_learning, "Passive Students": get_num_passive, "Disruptive Students": get_num_disruptors, }) self.pupil_state_datacollector.collect(self) self.mean_maths = compute_ave(self) self.agent_datacollector = DataCollector( agent_reporters={ "student_id": "student_id", "end_maths": "e_math", "start_maths": "s_math", "Ability": "ability", "Inattentiveness": "inattentiveness", "hyper_impulsive": "hyper_impulsive", "Deprivation": "deprivation", }) # Monitor mean maths score self.maths_datacollector = DataCollector({ "Date": get_date_for_chart, "Mean Score": compute_ave, }) self.maths_datacollector.collect(self) self.running = True def set_speedup(self): if self.speedup > 1: min_ticks = min(self.ticks_per_school_day, self.ticks_per_home_day) # Can't have fewer than 1 tick per school day so reduce the speedup accordingly if self.speedup > min_ticks: self.speedup = min_ticks # Speedup should be divisible by self.ticks_per_school_day # e.g. if 10 ticks per day # Can't have speedup more than 10 as we need 1 tick per days # If speedup is 5 then we have 2 ticks per day # If speedup is 8 then we would have 10/8 = 1.25 ticks per day # Round that to 1, then speedup would be 10 (=10/1) not 8 # If speedup is 6 then we would have 10/6 = 1.67 ticks per day # Round that to 2, then speedup would be 5 (=10/2) not 6 speedup_ticks_per_school_day = round(self.ticks_per_school_day / self.speedup) self.speedup = self.ticks_per_school_day / speedup_ticks_per_school_day self.ticks_per_school_day = speedup_ticks_per_school_day speedup_ticks_per_home_day = round(self.ticks_per_home_day / self.speedup) self.home_speedup = self.ticks_per_home_day / speedup_ticks_per_home_day self.ticks_per_home_day = speedup_ticks_per_school_day else: self.home_speedup = 1 @staticmethod def calculate_holiday_weeks(start_date, end_date, number_of_holidays, weeks_per_holiday): """Calculate which weeks should be holidays given the total number of days from start to end of the school year, and the number and length of holidays Returns an array of week numbers which are holidays """ # Get start of first week of term # Go back to start of week start_week = start_date - datetime.timedelta(days=start_date.weekday()) if start_date.weekday() >= 5: # start_date is weekend so go to following Monday start_week += datetime.timedelta(weeks=1) # Get difference from following week after end day total_weeks = math.ceil( (end_date + datetime.timedelta(days=1) - start_week).days / 7) n_terms = number_of_holidays + 1 n_holiday_weeks = number_of_holidays * weeks_per_holiday n_school_weeks = total_weeks - n_holiday_weeks min_weeks_per_term = math.floor(n_school_weeks / n_terms) remainder_weeks = n_school_weeks % n_terms weeks_per_term = [] for i in range(n_terms): term_weeks = min_weeks_per_term if i < remainder_weeks: term_weeks += 1 weeks_per_term.append(term_weeks) holiday_week_numbers = [] current_week = 0 for term_weeks in weeks_per_term[:-1]: start_week = current_week + term_weeks holiday_week_numbers.extend( list(range(start_week, start_week + weeks_per_holiday))) current_week += term_weeks + weeks_per_holiday return holiday_week_numbers def update_school_time(self): time_in_day = self.schedule.steps % self.ticks_per_school_day if (time_in_day == self.ticks_per_school_day - 1 or self.ticks_per_school_day == 1): # Have just finished the penultimate tick of school day, so add # home learning time ready for the next tick self.home_learning_days = 1 # If it's Friday add 2 more days' home learning for the weekend if self.current_date.weekday() == 4: self.home_learning_days += 2 # Is it a holiday? week_number = math.floor( (self.current_date - self.start_date).days / 7) if week_number in self.holiday_week_numbers: # Add holiday weeks self.home_learning_days += 7 * self.model_params.weeks_per_holiday self.home_learning_steps = self.home_learning_days * self.ticks_per_home_day else: self.home_learning_steps = 0 if time_in_day == 0: # Update current date by self.home_learning days now we've completed the last tick of the day self.current_date += datetime.timedelta( days=self.home_learning_days) self.home_learning_days = 0 # Update teacher control/teacher_quality self.teacher_control_variable.update_current_value() self.teacher_quality_variable.update_current_value() # Reset all pupils's states ready for the next day for pupil in self.schedule.agents: pupil.resetState() def step(self): # Reset counter of learning and disruptive agents self.model_state.learning_count = 0 self.model_state.disruptive_count = 0 # Advance the model by one step self.schedule.step() self.update_school_time() # collect data self.maths_datacollector.collect(self) self.pupil_state_datacollector.collect(self) self.mean_maths = compute_ave(self) if self.current_date > self.end_date or self.running == False: logger.debug("Finished run; collecting data") self.running = False # Remove tngs self.school_learning_random_gen = None self.home_learning_random_gen = None for pupil in self.schedule.agents: pupil.school_learning_ability_random_gen = None pupil.home_learning_ability_random_gen = None self.agent_datacollector.collect(self) agent_data = self.agent_datacollector.get_agent_vars_dataframe() logger.debug("Got agent data") self.output_data_writer.write_data(agent_data, self.class_id, self.class_size) logger.debug("Written to output file") self.agent_datacollector = None self.maths_datacollector = None self.pupil_state_datacollector = None self.home_learning_random_gen = None self.school_learning_random_gen = None logger.info("Completed run for class %s", self.class_id)
class SchellingModel(Model): ''' Model class for the Schelling segregation model. ''' def __init__(self, height, width, density, minority_pc, homophily): ''' ''' # Setting up the Model self.height = height self.width = width self.density = density #percentage (empty houses) self.minority_pc = minority_pc #percentage minority in the city self.homophily = homophily #number of similar minded person that you want around you # Setting up the AGM simulation self.schedule = RandomActivation(self) # Setting up the grid, using inputs in the function, the torus function # seems to be related to how we treat edges, but not sure self.grid = SingleGrid(height, width, torus=True) # Setting the number of happy people to zero self.happy = 0 self.datacollector = DataCollector( {"happy": lambda m: m.happy}, # Model-level count of happy agents # For testing purposes, agent's individual x and y {"x": lambda a: a.pos[0], "y": lambda a: a.pos[1]}) self.running = True # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) for cell in self.grid.coord_iter(): # For each cell coordinate apply if statements x = cell[1] y = cell[2] # First if statement: take a random number between 0 and 1 # (random.random command) and check whether that value is # below the assigned density. # Second if statement: take a random number between 0 and 1 # and assign the agent type based on the condition if random.random() < self.density: if random.random() < self.minority_pc: agent_type = 1 else: agent_type = 0 # Refer to the above function related to Agent attributes agent = SchellingAgent((x, y), self, agent_type) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) def step(self): ''' Run one step of the model. If All agents are happy, halt the model. ''' self.happy = 0 # Reset counter of happy agents self.schedule.step() self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False
class GentrificationModel(Model): ''' Model class for the Gentrification model. ''' def __init__(self, height, width, depreciation_rate, mobility, status, stat_var, d_factor): # Set model parameters self.depreciation_rate = depreciation_rate self.mobility = mobility self.status = status self.stat_var = stat_var self.d_factor = d_factor self.height = height # Global tracking variables self.mean_income = 0.0 self.schedule = SimultaneousActivation(self) self.grid = SingleGrid(height, width, torus=False) self.datacollector = DataCollector(model_reporters={ "status": lambda m: m.status, "income": lambda m: m.mean_income, "condition": lambda m: m.mean_condition }, agent_reporters={ "x": lambda a: a.pos[0], "y": lambda a: a.pos[1] }) self.running = True self.hit_bottom = False self.last_bottom = 0 self.gent_time = None self.conditions = np.zeros((width, height)) # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) for cell in self.grid.coord_iter(): x, y = cell[1], cell[2] self.conditions[x, y] = bounded_normal(0.50, 0.1, 0.0, 1.0) # Income initially differs little from property conditions while True: income = self.conditions[x, y] + np.random.normal(0.0, 0.025) if income >= 0.0 and income <= 1.0: self.mean_income += income break agent = PropertyAgent((x, y), self, income) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) self.mean_condition = np.sum(self.conditions) / self.conditions.size self.mean_income /= self.conditions.size def step(self): ''' Run one step of the model. ''' # For tracking change old_conditions = np.copy(self.conditions) # Initialize change tracking variables self.income_change = 0.0 self.schedule.step() # Update property conditions self.conditions -= self.depreciation_rate self.conditions = np.clip(self.conditions, 0, 1) conditions_change = self.conditions - old_conditions # Update neighborhood status self.status += ((self.income_change + np.sum(conditions_change)) / (conditions_change.size)) self.status += np.random.normal(0.0, self.stat_var) self.status = np.clip(self.status, 0, 1) # Update datacollector variables self.mean_income += self.income_change / self.conditions.size self.mean_condition = np.sum(self.conditions) / self.conditions.size self.datacollector.collect(self) if self.status == 0.0: self.hit_bottom = True self.last_bottom = self.schedule.steps if self.schedule.steps > 2999: # self.gent_time = None self.running = False if (self.status == 1.0 and 0.5 * (self.mean_condition + self.mean_income) > 0.5 and self.hit_bottom == True): self.running = False self.gent_time = (self.schedule.steps - self.last_bottom) / 12
class Schelling(Model): ''' Model class for the SM coupled to the Schelling segregation model. This class has been modified from the original mesa Schelling model. ''' def __init__(self, height=20, width=20, density=0.8, minority_pc=0.2, homophilyType0=0.5, homophilyType1=0.5, movementQuota=0.30, happyCheckRadius=5, moveCheckRadius=10, last_move_quota=5): ''' ''' self.height = height self.width = width self.density = density self.minority_pc = minority_pc self.homophilyType0 = homophilyType0 self.homophilyType1 = homophilyType1 self.movementQuota = movementQuota self.happyCheckRadius = happyCheckRadius self.moveCheckRadius = moveCheckRadius self.last_move_quota = last_move_quota self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=True) self.happy = 0 self.happytype0 = 0 self.happytype1 = 0 self.stepCount = 0 self.evenness = 0 self.empty = 0 self.type0agents = 0 self.type1agents = 0 self.movement = 0 self.movementtype0 = 0 self.movementtype1 = 0 self.movementQuotaCount = 0 self.numberOfAgents = 0 self.datacollector = DataCollector( # Model-level count of happy agents {"step": "stepCount", "happy": "happy", "happytype0": "happytype0", "happytype1": "happytype1", "movement": "movement", "movementtype0": "movementtype0", "movementtype1": "movementtype1","evenness": "evenness", "numberOfAgents": "numberOfAgents", "homophilyType0": "homophilyType0", "homophilyType1": "homophilyType1", "movementQuota": "movementQuota", "happyCheckRadius": "happyCheckRadius", "last_move_quota": "last_move_quota"}, # For testing purposes, agent's individual x and y {"x": lambda a: a.pos[0], "y": lambda a: a.pos[1], "Agent type": lambda a:a.type}) # , "z": lambda a:a.type # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if self.random.random() < self.density: if self.random.random() < self.minority_pc: agent_type = 1 else: agent_type = 0 last_move = round(self.random.random()*10) # randomly assign a value from 0 to 10 agent = SchellingAgent((x, y), self, agent_type, last_move) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) # print("Schedule: ", len(self.schedule.agents)) self.running = True self.numberOfAgents = self.schedule.get_agent_count() self.datacollector.collect(self) def step(self, policy): ''' Run one step of the model. If All agents are happy, halt the model. Note on the eveness paramater calculation: It cannot be performed in the step function of the agents as then it would not take consider periods of time during which the agents are still moving, making the parameter calculation inaccurate. ''' self.happy = 0 # Reset counter of happy agents self.happytype0 = 0 # Reset counter of happy type 0 agents self.happytype1 = 0 # Reset counter of happy type 1 agents self.empty = 0 # Reset counter of empty cells self.type0agents = 0 # Reset count of type 0 agents self.type1agents = 0 # Reset count of type 1 agents self.movementQuotaCount = 0 # Reset count of the movement quota self.movement = 0 # Reset counter of movement of agents self.movementtype0 = 0 # Reset counter of movement of type 0 agents self.movementtype1 = 0 # Reset counter of movement of type 1 agents # introduction of the selected policy in the Schelling model # happy check vision changes if policy[0] != None and self.happyCheckRadius<15 and self.happyCheckRadius>1: self.happyCheckRadius += policy[0] # movement quota changes if policy[1] != None and self.movementQuota<1 and self.movementQuota>0.05: self.movementQuota += policy[1] # last movement threshold if policy[2] != None and self.last_move_quota<50 and self.last_move_quota>0: self.last_move_quota += policy[2] # type 0 preference if policy[3] != None and self.homophilyType0<1 and self.homophilyType0>0: self.homophilyType0 += policy[3] # type 1 preference if policy[4] != None and self.homophilyType1<1 and self.homophilyType1>0: self.homophilyType1 += policy[4] # run the step for the agents self.schedule.step() # print(self.movementQuotaCount, " agents moved.") # print(round(self.happy/self.schedule.get_agent_count() * 100,2), "percent are happy agents.") # calculating empty counter self.empty = (self.height*self.width) - self.schedule.get_agent_count() # calculating type 0 and type 1 agent numbers for agent in self.schedule.agent_buffer(shuffled=True): # print(agent.type) if agent.type == 0: self.type0agents += 1 if agent.type == 1: self.type1agents += 1 # calculation of evenness (segregation parameter) using Haw (2015). self.evenness_calculation() # iterate the steps counter self.stepCount += 1 # collect data self.datacollector.collect(self) # checking the datacollector # if self.stepCount % 2 == 0: # print(self.datacollector.get_model_vars_dataframe()) # print(self.datacollector.get_agent_vars_dataframe()) if self.happy == self.schedule.get_agent_count(): self.running = False print("All agents are happy, the simulation ends!") output_KPIs = [self.evenness, self.movement, self.happy, self.movementtype0, self.movementtype1, self.happytype0, self.happytype1] return output_KPIs, self.type0agents, self.type1agents def evenness_calculation(self): ''' To calculate the evenness parameter, one needs to first subdivide the grid into areas of more than one square each. The evenness will be then calculated based on the distribution of type 0 and type 1 agents in each of these areas. The division into area needs to be done carefully as it depends on the inputs within the model (width and height of the grid). ''' # check for a square grid if self.height != self.width: self.running = False print("WARNING - The grid is not a square, please insert the same width and height") # reset the evenness parameter self.evenness = 0 # algorithm to calculate evenness n = 4 # number of big areas considered in width and height if self.height % n == 0: # consider all big areas for big_dy in range(n): for big_dx in range(n): # looking within one big areas, going through all cells listAgents = [] for small_dy in range(int(self.height/n)): for small_dx in range(int(self.height/n)): for agents in self.schedule.agent_buffer(shuffled=True): if agents.pos == (self.height/n * big_dx + small_dx, self.height/n * big_dy + small_dy): listAgents.append(agents) # calculating evenness for each big area countType0agents = 0 # Reset of the type counter for type 0 agents countType1agents = 0 # Reset of the type counter for type 1 agents # checking the type of agents in the big area for agents in listAgents: if agents.type == 0: countType0agents += 1 if agents.type == 1: countType1agents += 1 self.evenness += 0.5 * abs((countType0agents/self.type0agents) - (countType1agents/self.type1agents)) # print("evenness :", round(self.evenness,2))
class Schelling(Model): """ Model class for the Schelling segregation model. """ # ANSWER --- cooperativeness = 10 in the init definition def __init__(self, height=30, width=30, density=0.9, homophily=3, cooperativeness=0.0): """ """ # Height and width of the Grid; # Height and width also defines the maximum number of agents that could be in the environment self.height = height self.width = width # Define the population density; Float between 0 and 1 self.density = density # number of similar neighbors required for the agents to be happy # Takes integer value between 0 and 8 since you can only be surrounded by 8 neighbors # homophily == wanted similarity self.homophily = homophily # ANSWER # 얼마만큼의 agent 를 cooperativeness 한 agent 로 정의 할 것인가 self.cooperativeness = cooperativeness # ANSWER # Scheduler controls the order in which agents are activated self.schedule = RandomActivation(self) self.grid = SingleGrid(width, height, torus=True) self.happy = 0 self.segregation = 0 # Obtain data after each step self.datacollector = DataCollector( { "happy": "happy", "segregation": "segregation" }, # Model-level count of happy agents # For testing purposes, agent's individual x and y { "x": lambda a: a.pos[0], "y": lambda a: a.pos[1] }, ) # Set up agents # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) for cell in self.grid.coord_iter(): x = cell[1] y = cell[2] if self.random.random() < self.density: if self.random.random() < 0.33: agent_type = 2 elif self.random.random() > 0.66: agent_type = 1 else: agent_type = 0 # ANSWER is_cooperative = False if self.random.random() < cooperativeness: is_cooperative = True happiness_extent = 0 # ANSWER # ANSWER --- Updated initialization to use new init definition agent = SchellingAgent((x, y), self, agent_type, is_cooperative, happiness_extent) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) self.running = True self.datacollector.collect(self) # The class requires a step function that represent each run def step(self): """ Run one step of the model. If All agents are happy, halt the model. """ self.happy = 0 # Reset counter of happy agents self.segregation = 0 # Reset counter of segregated agents self.schedule.step() # collect data self.datacollector.collect(self) # 여기서 terminate 하는거 manage if self.happy == self.schedule.get_agent_count(): self.running = False
class PolicyEmergenceSM(Model): ''' Simplest Model for the policy emergence model. ''' def __init__(self, PE_type, SM_inputs, AplusPL_inputs, AplusCo_inputs, AplusPK_inputs, height=20, width=20, input_LHS=False): self.height = height # height of the canvas self.width = width # width of the canvas self.SM_inputs = SM_inputs # inputs for the entire model self.PE_type = PE_type # model type (SM, A+PL, A+Co, A+PK, A+PI) self.resources_aff = SM_inputs[2] # resources per affiliation agent self.stepCount = 0 # int - [-] - initialisation of step counter self.agenda_PC = None # initialisation of agenda policy core issue tracker self.policy_implemented_number = None # initialisation of policy number tracker self.policy_formulation_run = False # check value for running policy formulation self.w_el_influence = self.SM_inputs[ 5] # float - [-] - electorate influence weight constant # batchrunner inputs self.input_LHS = input_LHS # ACF+PL parameters if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type: self.conflict_level = AplusPL_inputs[0] self.resources_spend_incr_agents = AplusPL_inputs[1] # ACF+Co parameters if 'A+Co' in self.PE_type: self.PC_interest = AplusCo_inputs[0] if self.input_LHS: self.coa_creation_thresh = self.input_LHS[1] # LHS inputs self.coa_resources_share = self.input_LHS[0] # LHS inputs else: self.coa_creation_thresh = AplusCo_inputs[1] self.coa_resources_share = AplusCo_inputs[3] self.coa_coherence_thresh = AplusCo_inputs[2] self.resources_spend_incr_coal = AplusCo_inputs[4] print('res. share:', round(self.coa_resources_share, 3), ', coa. threshold:', round(self.coa_creation_thresh, 3)) self.coalition_list = [] # +PK parameters self.PK = False if '+PK' in self.PE_type: self.PK = True self.PK_catchup = AplusPK_inputs[0] self.schedule = RandomActivation(self) # mesa random activation method self.grid = SingleGrid(height, width, torus=True) # mesa grid creation method # creation of the datacollector vector if 'A+Co' in self.PE_type: self.datacollector = DataCollector( # Model-level variables model_reporters={ "step": "stepCount", "AS_PF": get_problem_policy_chosen, "agent_attributes": get_agents_attributes, "coalitions_attributes": get_coalitions_attributes, "electorate_attributes": get_electorate_attributes }, # Agent-level variables agent_reporters={ "x": lambda a: a.pos[0], "y": lambda a: a.pos[1], "Agent type": lambda a: type(a), "Issuetree": lambda a: getattr(a, 'issuetree', [None])[ a.unique_id if isinstance(a, ActiveAgent) and not isinstance( a, Coalition) else 0] }) else: self.datacollector = DataCollector( # Model-level variables model_reporters={ "step": "stepCount", "AS_PF": get_problem_policy_chosen, "agent_attributes": get_agents_attributes, "electorate_attributes": get_electorate_attributes }, # Agent-level variables agent_reporters={ "x": lambda a: a.pos[0], "y": lambda a: a.pos[1], "Agent type": lambda a: type(a), "Issuetree": lambda a: getattr(a, 'issuetree', [None])[ a.unique_id if isinstance(a, ActiveAgent) else 0] }) self.len_S, self.len_PC, self.len_DC, self.len_CR = belief_tree_input( ) # setting up belief tree self.policy_instruments, self.len_ins, self.PF_indices = policy_instrument_input( ) # setting up policy instruments init_active_agents(self, self.len_S, self.len_PC, self.len_DC, self.len_CR, self.len_PC, self.len_ins, self.SM_inputs) # setting up active agents init_electorate_agents(self, self.len_S, self.len_PC, self.len_DC, self.SM_inputs) # setting up passive agents init_truth_agent(self, self.len_S, self.len_PC, self.len_DC, self.len_ins) # setting up truth agent self.running = True self.numberOfAgents = self.schedule.get_agent_count() self.datacollector.collect(self) def step(self, KPIs): ''' Main steps of the Simplest Model for policy emergence: 0. Module interface - Input 1. Agenda setting step 2. Policy formulation step 3. Data collection ''' self.KPIs = KPIs # saving the indicators # 0. initialisation self.module_interface_input( self.KPIs) # communicating the beliefs (indicators) self.electorate_influence( self.w_el_influence) # electorate influence actions if 'A+Co' in self.PE_type: self.coalition_creation_algorithm() # 1. agenda setting self.agenda_setting() # 2. policy formulation if self.policy_formulation_run: policy_implemented = self.policy_formulation() else: policy_implemented = self.policy_instruments[-1] # 3. data collection self.stepCount += 1 # iterate the steps counter self.datacollector.collect(self) # collect data print("Step ends", "\n") return policy_implemented def module_interface_input(self, KPIs): ''' The module interface input step consists of actions related to the module interface and the policy emergence model ''' len_DC = self.len_DC len_PC = self.len_PC len_S = self.len_S len_ins = self.len_ins # saving the issue tree of the truth agent for agent in self.schedule.agent_buffer(shuffled=True): if isinstance(agent, TruthAgent): agent.issuetree_truth = KPIs truth_issuetree = agent.issuetree_truth truth_policytree = agent.policytree_truth # Transferring policy impact to active agents for agent in self.schedule.agent_buffer(shuffled=True): if isinstance(agent, ActiveAgent) and not isinstance( agent, Coalition): # selecting only active agents # for PFj in range(len_PC): # communicating the policy family likelihoods # for PFij in range(len_PC): # agent.policytree[agent.unique_id][PFj][PFij] = truth_policytree[PFj][PFij] for insj in range( len_ins ): # communicating the policy instruments impacts agent.policytree[agent.unique_id][ len_PC + insj][0:len_S] = truth_policytree[len_PC + insj] for issue in range( len_DC + len_PC + len_S ): # communicating the issue beliefs from the KPIs agent.issuetree[ agent.unique_id][issue][0] = truth_issuetree[issue] self.preference_update( agent, agent.unique_id) # updating the preferences def resources_distribution(self): if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type: for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): # selecting only active agents if agent.affiliation == 0: # affiliation 0 agent.resources = 0.01 * self.number_activeagents * self.resources_aff[ 0] / 100 if agent.affiliation == 1: # affiliation 1 agent.resources = 0.01 * self.number_activeagents * self.resources_aff[ 1] / 100 agent.resources_action = agent.resources # assigning resources for the actions for both if 'A+Co' in self.PE_type: # attribution of the resources to coalitions for coalition in self.schedule.agent_buffer(shuffled=False): if isinstance(coalition, Coalition): resources = 0 for agent_mem in coalition.members: resources += agent_mem.resources * self.coa_resources_share agent_mem.resources -= self.coa_resources_share * agent_mem.resources agent.resources_action = agent.resources # assigning resources for the actions for both coalition.resources = resources coalition.resources_action = coalition.resources # assigning resources for the actions for both def agenda_setting(self): ''' In the agenda setting step, the active agents first select their policy core issue of preference and then select the agenda. ''' # resources distribution self.resources_distribution() # active agent policy core selection for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): # selecting only active agents agent.selection_PC() if 'A+Co' in self.PE_type: for coalition in self.schedule.agent_buffer(shuffled=True): if isinstance(coalition, Coalition): # selecting only coalitions coalition.interactions_intra_coalition( 'AS') # intra-coalition interactions # active agent interactions (including coalitions) if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type: for agent in self.schedule.agent_buffer(shuffled=True): if isinstance(agent, ActiveAgent): # selecting only active agents agent.interactions('AS', self.PK) # active agent policy core selection (after agent interactions) if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type: # active agent policy core selection for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): # selecting only active agents agent.selection_PC() # for each agent, selection of their preferred policy core issue selected_PC_list = [] number_ActiveAgents = 0 for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): # considering only policy makers selected_PC_list.append(agent.selected_PC) number_ActiveAgents += 1 # finding the most common policy core issue and its frequency d = defaultdict(int) for i in selected_PC_list: d[i] += 1 result = max(d.items(), key=lambda x: x[1]) agenda_PC_temp = result[0] agenda_PC_temp_frequency = result[1] # checking for majority if agenda_PC_temp_frequency > int(number_ActiveAgents / 2): self.agenda_PC = agenda_PC_temp self.policy_formulation_run = True # allowing for policy formulation to happen print("The agenda consists of PC", self.agenda_PC, ".") else: # if no majority self.policy_formulation_run = False print("No agenda was formed, moving to the next step.") # for purposes of not changing the entire code - the policy family selected is set at 0 so all policy instruments # are always considered in the rest of the model self.agenda_PF = 0 def policy_formulation(self): ''' In the policy formulation step, the policy maker agents first select their policy core issue of preference and then they select the policy that is to be implemented if there is a majority of them. ''' # resources distribution self.resources_distribution() # calculation of policy instruments preferences if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type: for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): agent.selection_S() agent.selection_PI( ) # individual agent policy instrument selection if 'A+Co' in self.PE_type: for coalition in self.schedule.agent_buffer(shuffled=True): if isinstance(coalition, Coalition): # selecting only active agents # print('selected_PC', agent.selected_PC) coalition.interactions_intra_coalition('PF') # coalition.interactions('PF') # active agent interactions if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type: for agent in self.schedule.agent_buffer(shuffled=True): if isinstance(agent, ActiveAgent): # selecting only active agents agent.interactions('PF', self.PK) # calculation of policy instruments preferences selected_PI_list = [] number_PMs = 0 for agent in self.schedule.agent_buffer(shuffled=False): if isinstance( agent, ActiveAgent ) and agent.agent_type == 'policymaker': # considering only policy makers agent.selection_S() agent.selection_PI( ) # individual agent policy instrument selection selected_PI_list.append( agent.selected_PI ) # appending the policy instruments selected to a list for all PMs number_PMs += 1 # finding the most common policy instrument and its frequency d = defaultdict(int) print(selected_PI_list) for i in selected_PI_list: d[i] += 1 result = max(d.items(), key=lambda x: x[1]) self.policy_implemented_number = result[0] policy_implemented_number_frequency = result[1] # check for the majority and implemented if satisfied if policy_implemented_number_frequency > int(number_PMs / 2): print("The policy selected is policy instrument ", self.policy_implemented_number, ".") policy_implemented = self.policy_instruments[ self.policy_implemented_number] else: # if no majority print("No consensus on a policy instrument.") policy_implemented = self.policy_instruments[ -1] # selecting status quo policy instrument return policy_implemented def preference_update(self, agent, who, coalition_check=False): ''' This function is used to call the preference update functions of the issues of the active agents. ''' if coalition_check: who = self.number_activeagents self.preference_update_DC(agent, who) # deep core issue preference update self.preference_update_PC(agent, who) # policy core issue preference update self.preference_update_S(agent, who) # def preference_update_DC(self, agent, who): """ This function is used to update the preferences of the deep core issues of agents in their respective issue trees. agent - this is the owner of the issue tree who - this is the part of the issuetree that is considered - agent.unique_id should be used for this - this is done to also include partial knowledge preference calculation """ len_DC = self.len_DC # calculation of the denominator PC_denominator = 0 for h in range(len_DC): issue_belief = agent.issuetree[who][h][0] issue_goal = agent.issuetree[who][h][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None: PC_denominator += abs(gap) # selection of the numerator and calculation of the preference for i in range(len_DC): issue_belief = agent.issuetree[who][i][0] issue_goal = agent.issuetree[who][i][1] gap = issue_goal - issue_belief if PC_denominator != 0: # make sure the denominator is not 0 agent.issuetree[who][i][2] = abs(gap) / PC_denominator else: agent.issuetree[who][i][2] = 0 def preference_update_PC(self, agent, who): """ This function is used to update the preferences of the policy core issues of agents in their respective issue trees. agent - this is the owner of the belief tree who - this is the part of the issuetree that is considered - agent.unique_id should be used for this - this is done to also include partial knowledge preference calculation """ len_DC = self.len_DC len_PC = self.len_PC len_S = self.len_S PC_denominator = 0 # calculation of the denominator for j in range( len_PC): # selecting the causal relations starting from PC for k in range(len_DC): cr = agent.issuetree[who][len_DC + len_PC + len_S + j + (k * len_PC)][0] issue_belief = agent.issuetree[who][k][0] issue_goal = agent.issuetree[who][k][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None and cr is not None \ and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)): # contingency for partial knowledge issues and check if cr and belief-goal are same sign PC_denominator = PC_denominator + abs(cr * gap) # addition of the gaps of the associated mid-level issues for i in range(len_PC): issue_belief = agent.issuetree[who][len_DC + i][0] issue_goal = agent.issuetree[who][len_DC + i][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None: # contingency for partial knowledge issues PC_denominator += abs(gap) # calculation the numerator and the preference for j in range(len_PC): # select one by one the PC # calculation of the right side of the numerator PC_numerator = 0 for k in range( len_DC): # selecting the causal relations starting from DC issue_belief = agent.issuetree[who][k][0] issue_goal = agent.issuetree[who][k][1] cr = agent.issuetree[who][len_DC + len_PC + len_S + j + (k * len_PC)][0] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None and cr is not None \ and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)): # contingency for partial knowledge issues and check if cr and belief-goal are same sign PC_numerator += abs(cr * gap) # addition of the gap to the numerator issue_belief = agent.issuetree[who][len_DC + j][0] issue_goal = agent.issuetree[who][len_DC + j][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None: # contingency for partial knowledge issues PC_numerator += abs(gap) # calculation of the preferences if PC_denominator != 0: agent.issuetree[who][len_DC + j][2] = round( PC_numerator / PC_denominator, 3) else: agent.issuetree[who][len_DC + j][2] = 0 def preference_update_S(self, agent, who): """ This function is used to update the preferences of secondary issues the agents in their respective issue trees. agent - this is the owner of the belief tree who - this is the part of the issuetree that is considered - agent.unique_id should be used for this - this is done to also include partial knowledge preference calculation """ len_DC = self.len_DC len_PC = self.len_PC len_S = self.len_S S_denominator = 0 # calculation of the denominator for j in range(len_S): for k in range( len_PC): # selecting the causal relations starting from S issue_belief = agent.issuetree[who][len_DC + k][0] issue_goal = agent.issuetree[who][len_DC + k][1] cr = agent.issuetree[who][len_DC + len_PC + len_S + len_DC * len_PC + j + (k * len_S)][0] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None and cr is not None \ and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)): # contingency for partial knowledge issues and check if cr and belief-goal are same sign S_denominator += abs(cr * gap) # addition of the gaps of the associated secondary issues for j in range(len_S): issue_belief = agent.issuetree[who][len_DC + len_PC + j][0] issue_goal = agent.issuetree[who][len_DC + len_PC + j][1] # print(issue_goal, type(issue_goal), type(issue_belief)) gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None: # contingency for partial knowledge issues S_denominator += abs(gap) # calculation the numerator and the preference for j in range(len_S): # select one by one the S # calculation of the right side of the numerator S_numerator = 0 for k in range( len_PC): # selecting the causal relations starting from PC # Contingency for partial knowledge issues cr = agent.issuetree[who][len_DC + len_PC + len_S + len_DC * len_PC + j + (k * len_S)][0] issue_belief = agent.issuetree[who][len_DC + k][0] issue_goal = agent.issuetree[who][len_DC + k][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None and cr is not None \ and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)): # contingency for partial knowledge issues and check if cr and gap are same sign S_numerator += abs(cr * gap) # addition of the gap to the numerator issue_belief = agent.issuetree[who][len_DC + len_PC + j][0] issue_goal = agent.issuetree[who][len_DC + len_PC + j][1] gap = issue_goal - issue_belief if issue_goal is not None and issue_belief is not None: # contingency for partial knowledge issues S_numerator += abs(gap) # calculation of the preferences if S_denominator != 0: agent.issuetree[who][len_DC + len_PC + j][2] = round( S_numerator / S_denominator, 3) else: agent.issuetree[who][len_DC + len_PC + j][2] = 0 def electorate_influence(self, w_el_influence): ''' This function calls the influence actions in the electorate agent class. ''' for agent in self.schedule.agent_buffer(shuffled=True): if isinstance(agent, ElectorateAgent): agent.electorate_influence(w_el_influence) def coalition_creation_algorithm(self): ''' Function that is used to reset the coalitions at the beginning of each round A maximum of two coalitions are allowed. The agents have to be within a certain threshold of their goals to be assembled together. Note that the preferred states only are considered and not the actual beliefs of the actors - this could be a problem when considering the partial information case. :return: ''' # resetting the coalitions before the creation of new ones for coalition in self.schedule.agent_buffer(shuffled=False): if isinstance(coalition, Coalition): self.schedule.remove(coalition) # saving the agents in a list with their belief values list_agents_1 = [] # active agent list for agent in self.schedule.agent_buffer(shuffled=False): if isinstance(agent, ActiveAgent): list_agents_1.append( (agent, agent.issuetree[agent.unique_id][self.len_DC + self.PC_interest][1])) list_agents_1.sort( key=lambda x: x[1]) # sorting the list based on the goals # checking for groups for first coalition list_coalition_number = [] for i in range(len(list_agents_1)): count = 0 for j in range(len(list_agents_1)): if list_agents_1[i][ 1] - self.coa_creation_thresh <= list_agents_1[j][ 1] <= list_agents_1[i][ 1] + self.coa_creation_thresh: count += 1 list_coalition_number.append(count) index = list_coalition_number.index( max(list_coalition_number )) # finding the grouping with the most member index list_coalition_members = [] list_agents_2 = copy.copy(list_agents_1) for i in range(len(list_agents_1)): if list_agents_1[index][ 1] - self.coa_creation_thresh <= list_agents_1[i][ 1] <= list_agents_1[index][ 1] + self.coa_creation_thresh: list_coalition_members.append(list_agents_1[i][0]) list_agents_2.remove(list_agents_1[i]) self.coalition_creation( 1001, list_coalition_members ) # creating the coalition with the selected members if len(list_agents_2) > 2: #check if there are enough agents left: # checking for groups for second coalition list_coalition_number = [] for i in range(len(list_agents_2)): count = 0 for j in range(len(list_agents_2)): if list_agents_2[i][ 1] - self.coa_creation_thresh <= list_agents_2[j][ 1] <= list_agents_2[i][ 1] + self.coa_creation_thresh: count += 1 list_coalition_number.append(count) index = list_coalition_number.index( max(list_coalition_number )) # finding the grouping with the most member index list_coalition_members = [] for i in range(len(list_agents_2)): if list_agents_2[index][ 1] - self.coa_creation_thresh <= list_agents_2[i][ 1] <= list_agents_2[index][ 1] + self.coa_creation_thresh: list_coalition_members.append(list_agents_2[i][0]) self.coalition_creation( 1002, list_coalition_members ) # creating the coalition with selected members def coalition_creation(self, unique_id, members): ''' Function that is used to create the object Coalition which is a sub-agent of the ActiveAgent class :param unique_id: :param members: :return: ''' x = 0 y = 0 resources = 0 # resources are reset to 0 len_DC = self.len_DC len_PC = self.len_PC len_S = self.len_S len_CR = self.len_CR len_PF = self.len_PC len_ins = self.len_ins issuetree_coal = [None] # creation of the issue tree issuetree_coal[0] = issuetree_creation( len_DC, len_PC, len_S, len_CR) # using the newly made function for r in range( self.number_activeagents ): # last spot is where the coalition beliefs are stored issuetree_coal.append( issuetree_creation(len_DC, len_PC, len_S, len_CR)) policytree_coal = [None] # creation of the policy tree policytree_coal[0] = members[0].policytree[members[0].unique_id] for r in range(self.number_activeagents): policytree_coal.append(members[0].policytree[members[0].unique_id]) # note that the policy tree is simply copied ... this will not work in the case of partial information where a different # algorithm will need to be found for this part of the model # creation of the coalition agent agent = Coalition((x, y), unique_id, self, 'coalition', resources, 'X', issuetree_coal, policytree_coal, members) self.coalition_belief_update(agent, members) self.preference_update(agent, unique_id, True) # updating the issue tree preferences self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) def coalition_belief_update(self, coalition, members): ''' Function that is used to update the beliefs of the coalition to an average of the agents members of this said coalition. :param coalition: :param members: :return: ''' len_DC = self.len_DC len_PC = self.len_PC len_S = self.len_S len_CR = self.len_CR for k in range( len_DC + len_PC + len_S): # updating the preferred states and actual beliefs belief = 0 goal = 0 for agent_mem in members: id = agent_mem.unique_id belief += agent_mem.issuetree[id][k][0] goal += agent_mem.issuetree[id][k][1] coalition.issuetree[ self.number_activeagents][k][0] = belief / len(members) coalition.issuetree[ self.number_activeagents][k][1] = goal / len(members) for k in range(len_CR): # updating the causal relations CR = 0 for agent_mem in members: id = agent_mem.unique_id CR += agent_mem.issuetree[id][len_DC + len_PC + len_S + k][0] coalition.issuetree[self.number_activeagents][ len_DC + len_PC + len_S + k][0] = CR / len(members) if self.PK: # for the partial knowledge for agent in self.schedule.agent_buffer(shuffled=False): if agent not in members and isinstance( agent, ActiveAgent) and not isinstance(agent, Coalition): id = agent.unique_id for k in range(len_DC + len_PC + len_S): # updating the preferred states goal = 0 for agent_mem in members: goal += agent_mem.issuetree[id][k][1] coalition.issuetree[id][k][1] = goal / len(members) for k in range(len_CR): # updating the causal relations CR = 0 for agent_mem in members: CR += agent_mem.issuetree[id][len_DC + len_PC + len_S + k][0] coalition.issuetree[id][len_DC + len_PC + len_S + k][0] = CR / len(members)
class NaSchTraffic(Model): """ Agent based model of traffic flow, with responsive street lighting. Happiness is measured by the level of lighting in cells occupied by, and ahead of, agents. """ def __init__(self, height=1, width=200, vehicle_density=0.1, general_max_speed=5, p_randomisation=0.4, debug=0, seed=None): """""" super().__init__(seed=seed) self.height = height self.width = width self.vehicle_density = vehicle_density self.general_max_speed = general_max_speed self.p_randomisation = p_randomisation self.debug = debug self.schedule = SimultaneousActivation(self) self.grid = SingleGrid(width, height, torus=True) self.light_range = int(floor(36 / 4.5)) self.lighting_grid = [20] * width self.agent_position_log = [] self.total_street_lights = 0 self.total_speed = 0 self.total_happy = 0 self.total_vehicles = 0 self.total_flow = 0 self.average_speed = 0.0 self.average_happy = 0.0 self.current_density = 0.0 self.average_lighting_level = 0.0 self.speed_averages = [] self.happiness_averages = [] self.densities = [] self.flows = [] self.lighting_averages = [] if self.debug == 1 or self.debug == 3: self.datacollector = DataCollector( model_reporters={ "Average_Speed": "average_speed", # Model-level count of average speed of all agents # "Average_Happiness": "average_happy", # Model-level count of agent happiness "Density": "current_density", "Flow": "total_flow", # "Lighting_Level": "average_lighting_level", "Agent_Positions": "agent_position_log", }, # For testing purposes, agent's individual x position and speed # agent_reporters={ # "PosX": lambda x: x.pos[0], # "Speed": lambda x: x.speed, # }, ) else: self.datacollector = DataCollector( model_reporters={ "Average_Speed": "average_speed", # Model-level count of average speed of all agents # "Average_Happiness": "average_happy", # Model-level count of agent happiness "Density": "current_density", "Flow": "total_flow", # "Lighting_Level": "average_lighting_level", }, ) # Set up agents # Street lights first as these are fixed y = 0 for light_iter in range(0, int(width / self.light_range)): x = light_iter * self.light_range agent = StreetLightAgent((x, y), self, self.light_range) self.schedule.add(agent) self.total_street_lights += 1 if self.debug > 1: print("Added " + str(self.total_street_lights) + " lights") # We use a grid iterator that returns # the coordinates of a cell as well as # its contents. (coord_iter) cells = list(self.grid.coord_iter()) self.random.shuffle(cells) vehicle_quantity = int(width * self.vehicle_density) for vehicle_iter in range(0, vehicle_quantity): cell = cells[vehicle_iter] (content, x, y) = cell agent = VehicleAgent((x, y), self, general_max_speed) self.grid.position_agent(agent, (x, y)) self.schedule.add(agent) if self.debug > 1: print("Added " + str(vehicle_quantity) + " vehicles") self.running = True self.datacollector.collect(self) def step(self): """ Run one step of the model. Calculate current average speed of all agents. """ self.total_speed = 0 self.total_happy = 0 self.total_vehicles = 0 self.total_flow = 0 self.agent_position_log = [] # Step all agents, then advance all agents self.schedule.step() if self.total_vehicles > 0: self.average_speed = self.total_speed / self.total_vehicles self.average_happy = self.total_happy / self.total_vehicles self.current_density = self.total_vehicles / (self.width * 0.6) else: self.average_speed = 0 self.average_happy = 0 self.current_density = 0 lighting_subset = self.lighting_grid[int(self.width * 0.2):int(self.width * 0.8)] self.average_lighting_level = sum(lighting_subset) / len( lighting_subset) self.speed_averages.append(self.average_speed) # self.happiness_averages.append(self.average_happy) self.densities.append(self.current_density) self.flows.append(float(self.total_flow)) # self.lighting_averages.append(self.average_lighting_level) # collect data self.datacollector.collect(self)
class SchellingModel(Model): '''Model class for Schelling segregation model''' def __init__(self, height=20, width=20, density=.8, group_ratio=.66, minority_ratio=.5, homophily=3): self.height = height self.width = width self.density = density self.group_ratio = group_ratio self.minority_ratio = minority_ratio self.homophily = homophily self.happy = 0 self.segregated = 0 self.schedule = RandomActivation(self) self.grid = SingleGrid(height, width, torus=False) self.place_agents() self.datacollector = DataCollector( {'happy': (lambda m: m.happy), 'segregated': (lambda m: m.segregated)}) self.running = True def step(self): '''Run one step of model''' self.schedule.step() self.calculate_stats() self.datacollector.collect(self) if self.happy == self.schedule.get_agent_count(): self.running = False def place_agents(self): for cell in self.grid.coord_iter(): x, y = cell[1:3] if random.random() < self.density: if random.random() < self.group_ratio: if random.random() < self.minority_ratio: group = 0 else: group = 1 else: group = 2 agent = SchellingAgent((x,y), group) self.grid.position_agent(agent, (x,y)) self.schedule.add(agent) for agent in self.schedule.agents: count = 0 for neighbour in self.grid.iter_neighbors(agent.pos, moore=False): if neighbour.group == agent.group: count += 1 agent.similar = count def calculate_stats(self): happy_count = 0 avg_seg = 0 for agent in self.schedule.agents: avg_seg += agent.similar if agent.similar >= self.homophily: happy_count += 1 self.happy = happy_count self.segregated = avg_seg/self.schedule.get_agent_count()