class VirusModel(Model): """A virus model with some number of agents""" def __init__(self): # self.G = nx.erdos_renyi_graph(n=self.num_nodes, p=prob) # self.G = nx.erdos_renyi_graph(n=3, p=0.5) self.G = nx.Graph() self.G.add_node(0) self.G.add_node(1) self.G.add_node(2) self.G.add_node(3) self.G.add_node(4) self.G.add_node(4) self.G.add_edge(0, 1) self.G.add_edge(0, 2) self.G.add_edge(0, 3) self.G.add_edge(0, 4) self.G.add_edge(0, 5) self.G.add_edge(1, 4) self.G.add_edge(4, 5) self.grid = NetworkGrid(self.G) self.rooms = {} self.rooms[0] = {"name": "Wejście", "rates": {}} self.rooms[1] = {"name": "Czytelnia", "rates": {"Nauka": 2}} self.rooms[2] = {"name": "Chillout", "rates": {"Relaks": 10}} self.rooms[3] = {"name": "Biuro", "rates": {"Praca": 1.5}} self.rooms[4] = {"name": "Toaleta", "rates": {"Toaleta": 30}} self.rooms[5] = { "name": "Kawiarnia", "rates": { "Jedzenie": 12, "Kultura": 0.5 } } collector_dict = {} for i, room in enumerate(self.rooms): collector_dict[self.rooms[i]["name"]] = lambda model, i=i: len( model.grid.get_cell_list_contents([i])) - 1 self.datacollector = DataCollector(collector_dict) self.schedule = RandomActivation(self) # Create agents for i, node in enumerate(self.G.nodes()): r = RoomAgent(i, self, self.rooms[i]["name"], self.rooms[i]["rates"]) self.schedule.add(r) # Add the agent to the node self.grid.place_agent(r, node) self.prob_needs = { "Jedzenie": [4, 0.6], "Toaleta": [2, 0.6], "Relaks": [5, 1] } self.prob_studs = { "Nauka": [2, 1.5], "Praca": [0, 0.5], "Kultura": [0, 1.0] } self.prob_works = { "Nauka": [0, 0.3], "Praca": [6, 1.0], "Kultura": [0, 0.2] } self.prob_tours = { "Nauka": [0, 0.3], "Praca": [0, 0.5], "Kultura": [1, 1.0] } self.prob_local = { "Nauka": [1, 0.7], "Praca": [2, 0.9], "Kultura": [1, 1.0] } # godziny 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 self.rate_studs = [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0 ] self.rate_works = [ 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0 ] self.rate_tours = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3, 3, 3, 4, 4, 4, 6, 6, 4, 3, 2, 0, 0 ] self.rate_local = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 4, 4, 2, 2, 4, 5, 6, 6, 6, 3, 0, 0, 0 ] self.running = True self.datacollector.collect(self) self.tm = 0 * 60 self.count = 0 def get_sample(self, probs): ret = {} for k, [m, s] in probs.items(): tm = int(np.clip(np.random.normal(m, s) * 60, 15, 600)) ret[k] = tm return ret def step(self): # prepare list for the satisfied agents self.satisfied = [] # add new agents hour = int(self.tm / 60) if (hour > 23): hour = 0 for i in range(self.rate_studs[hour]): a = HumanAgent(100 + self.count, self, self.get_sample(self.prob_needs), self.get_sample(self.prob_studs)) self.schedule.add(a) self.grid.place_agent(a, 0) self.count += 1 for i in range(self.rate_works[hour]): a = HumanAgent(100 + self.count, self, self.get_sample(self.prob_needs), self.get_sample(self.prob_works)) self.schedule.add(a) self.grid.place_agent(a, 0) self.count += 1 # update system self.schedule.step() # collect data self.datacollector.collect(self) # make time step self.tm = self.tm + 1 if (self.tm > 24 * 60): self.datacollector.get_model_vars_dataframe().to_csv("one_day.csv") self.tm = 0 # remove satisfied agents from the system for a in self.satisfied: print(a.unique_id, a.goals, "is satisfied") self.grid.move_agent(a, 0) self.grid._remove_agent(a, 0) self.schedule.remove(a) def run_model(self, n): for i in range(n): self.step() def find_best_room(self, goal): #print("Looking for room for", goal) for i, room in enumerate(self.rooms): #print("Room", room, self.rooms[room]["rates"]) if goal in self.rooms[room]["rates"]: return room return -1
class SIRSModelModel(Model): def __init__(self, datacollector=None): super().__init__() # work from directory this file is in os.chdir(os.path.dirname(os.path.realpath(__file__))) self.schedule = SimultaneousActivation(self) self.G = nx.Graph() self.time = 0 # simple iteration counter self._generate_sites() self.grid = NetworkGrid(self.G) # make a dictionary of {hash: site} values for easy relation lookups in agent generation self.site_hashes = { h: s for s, h in dict(self.G.nodes.data('hash')).items() } self._generate_agents() self.vita_groups = [] self.datacollector = datacollector def step(self): if self.datacollector: self.datacollector.collect(self) else: warnings.warn( 'This Model has no DataCollector! You may want to add one in the `datacollector` attribute ' 'before running the model') self.schedule.step() while self.vita_groups: a = self.vita_groups.pop() a.unique_id = self.next_id() a.model = self self.schedule.add(a) for a in self.schedule.agents: if a.get('__void__', False): self.grid._remove_agent(a, a.pos) self.schedule.remove(a) self.time += 1 # ------------------------- INITIALIZATION HELPERS ------------------------- def _generate_agents(self): """ Called once during __init__ to create appropriate groups from the original simulation's model and add them to the model grid. Loads group data from the JSON file created during translation. """ with open("SIRSModelGroups.json", 'r') as file: j = json.load(file) for group in j: for _ in range(group['m']): a = SIRSModelAgent(self.next_id(), self, group['attr'], group['rel']) self.schedule.add(a) def _generate_sites(self): """ Called once during __init__ to load the original simulation's sites into the networkx graph. Loads site data from a JSON file created during translation. """ with open("SIRSModelSites.json", 'r') as file: j = json.load(file) for site in j: self.G.add_node(str(site['name']), hash=site['hash'], rel_name=site['rel_name']) for k, v in site['attr'].items(): self.G.nodes[str(site['name'])][k] = v # ------------------------- RUNTIME FUNCTIONS ------------------------- def get_attr(self, agent_or_node, name=None): """ Retrieves an attribute of a Mesa Agent or NetworkGrid node. :param name: A string containing the attribute to retrieve, or None :param agent_or_node: A Mesa Agent or a string corresponding to a node in the NetworkGrid :return: If agent_or_node is a string, returns the named attribute represented by it, or the node's entire attribute dictionary if name is None (note: this includes the special 'agent' attribute) If agent_or_node is an Agent, returns the named attribute of that Agent """ name = mpi(name) if name is not None else name if isinstance(agent_or_node, str): node_dict = self.grid.G.nodes[agent_or_node] return node_dict.get(name) if name is not None else node_dict elif isinstance(agent_or_node, Agent): # return getattr(agent_or_node, name, agent_or_node.namespace[name]) return agent_or_node.get(name) else: raise TypeError( f"get_attr expected a str or Agent for agent_or_node, but received {type(agent_or_node)}" )
class SegregationModelModel(Model): def __init__(self, datacollector=None): super().__init__() # work from directory this file is in os.chdir(os.path.dirname(os.path.realpath(__file__))) self.schedule = SimultaneousActivation(self) self.G = nx.Graph() self.time = 0 # simple iteration counter self._generate_sites() self.grid = NetworkGrid(self.G) # make a dictionary of {hash: site} values for easy relation lookups in agent generation self.site_hashes = {h: s for s, h in dict( self.G.nodes.data('hash')).items()} self._generate_agents() self.vita_groups = [] self.datacollector = datacollector def step(self): if self.datacollector: self.datacollector.collect(self) else: warnings.warn('This Model has no DataCollector! You may want to add one in the `datacollector` attribute ' 'before running the model') self.schedule.step() while self.vita_groups: a = self.vita_groups.pop() a.unique_id = self.next_id() a.model = self self.schedule.add(a) for a in self.schedule.agents: if a.get('__void__', False): self.grid._remove_agent(a, a.pos) self.schedule.remove(a) self.time += 1 # ------------------------- INITIALIZATION HELPERS ------------------------- def _generate_agents(self): """ Called once during __init__ to create appropriate groups from the original simulation's model and add them to the model grid. Loads group data from the JSON file created during translation. """ with open("SegregationModelGroups.json", 'r') as file: j = json.load(file) for group in j: for _ in range(group['m']): a = SegregationModelAgent( self.next_id(), self, group['attr'], group['rel']) self.schedule.add(a) def _generate_sites(self): """ Called once during __init__ to load the original simulation's sites into the networkx graph. Loads site data from a JSON file created during translation. """ with open("SegregationModelSites.json", 'r') as file: j = json.load(file) for site in j: self.G.add_node( str(site['name']), hash=site['hash'], rel_name=site['rel_name']) for k, v in site['attr'].items(): self.G.nodes[str(site['name'])][k] = v # ------------------------- RUNTIME FUNCTIONS ------------------------- def get_attr(self, agent_or_node, name=None): """ Retrieves an attribute of a Mesa Agent or NetworkGrid node. :param name: A string containing the attribute to retrieve, or None :param agent_or_node: A Mesa Agent or a string corresponding to a node in the NetworkGrid :return: If agent_or_node is a string, returns the named attribute represented by it, or the node's entire attribute dictionary if name is None (note: this includes the special 'agent' attribute) If agent_or_node is an Agent, returns the named attribute of that Agent """ name = mpi(name) if name is not None else name if isinstance(agent_or_node, str): node_dict = self.grid.G.nodes[agent_or_node] return node_dict.get(name) if name is not None else node_dict elif isinstance(agent_or_node, Agent): # return getattr(agent_or_node, name, agent_or_node.namespace[name]) return agent_or_node.get(name) else: raise TypeError( f"get_attr expected a str or Agent for agent_or_node, but received {type(agent_or_node)}") def get_groups(self, node_or_model, qry=None): """ Returns a list of agents at the node or the entire model that satisfy the qry. :param node_or_model: A string corresponding to a node in the NetworkGrid, or a Mesa Model :param qry: a GroupQry namedtuple :return: a list of agents at the node satisfying the qry. """ if isinstance(node_or_model, Model): agents = node_or_model.schedule.agents elif isinstance(node_or_model, str): agents = self.grid.get_cell_list_contents([node_or_model]) else: raise TypeError( f"get_groups expects a str or Model for node_or_model, but received {type(node_or_model)}") return [a for a in agents if a.matches_qry(qry)] # if not qry: # return agents # # the code below is REALLY PAINFUL... replacing it with 'return agents` makes the code run like 20x faster # elif qry.full: # return [a for a in agents # if qry.attr.items() == {k: getattr(a, k) for k in a._attr}.items() # and qry.rel.items() == {k: getattr(a, k) for k in a._rel}.items() # and all([fn(a) for fn in qry.cond])] # else: # return [a for a in agents # if qry.attr.items() <= {k: getattr(a, k) for k in a._attr}.items() # and qry.rel.items() <= {k: getattr(a, k) for k in a._rel}.items() # and all([fn(a) for fn in qry.cond])] def get_mass(self, agent_node_model, qry=None): """ If agent_node_model is an agent, returns the number of agents with the same attributes as it, including itself. This ignores unique_id (and source_name). This is probably very unoptimized. If agent_node_model is a string corresponding to a node in the NetworkGrid, returns the number of agents at that node with the attributes specified in qry, or all agents at that node if qry is None. If agent_node_model is a Model, returns the total number of agents in the model. """ if isinstance(agent_node_model, str): return len(self.get_groups(agent_node_model, qry)) elif isinstance(agent_node_model, Agent): mod_dict = {k: v for k, v in agent_node_model.__dict__.items() if k not in ('unique_id', 'source_name')} # toss unique identifiers return sum([mod_dict == {k: v for k, v in a.__dict__.items() if k not in ('unique_id', 'source_name')} for a in self.schedule.agents]) elif isinstance(agent_node_model, Model): return len(agent_node_model.schedule.agents) else: raise TypeError(f"get_mass expects a str, Agent, or Model for agent_node_model, but received " f"{type(agent_node_model)}")
class ClimateMigrationModel(Model): def __init__( self, num_counties, preferences, network_type, \ climate_threshold, limited_radius=True, init_time=0): super().__init__() global TICK TICK = init_time self.num_agents = 0 self.agent_index = 0 self.preferences = preferences self.limited_radius = limited_radius self.upper_network_size = 3 self.network_type = network_type self.climate_threshold = climate_threshold self.schedule = SimultaneousActivation(self) self.G = create_graph() self.num_counties = num_counties self.nodes = self.G.nodes() self.grid = NetworkGrid(self.G) self.county_climate_ranking = [] self.county_population_list = [0] * self.num_counties self.county_flux = [0] * self.num_counties self.deaths = [] self.births = [] self.county_income = {} self.datacollector = DataCollector(model_reporters={"County Population": lambda m1: list(m1.county_population_list), "County Influx": lambda m2: list(m2.county_flux), "Deaths": lambda m3: m3.deaths, "Births": lambda m4: m4.births, "Total Population": lambda m5: m5.num_agents}) def add_agents(self): """ Adds agents based on 2013 ACS population data. """ cumulative_population_list = get_cumulative_population_list() self.county_population_list = get_population_list() county = 0 # keeps track of which county each agent should be placed in index = 0 # keeps track of each agent's unique_id # keep creating agents until county population is reached while index < cumulative_population_list[county]: # create agent agent = Household(index, self) # place agent in appropriate county self.grid.place_agent(agent, list(self.nodes)[county]) # add agent to schedule self.schedule.add(agent) # set agent's original_pos attribute agent.original_pos = agent.pos # initialize all other agent attributes agent.initialize_agent() # if running model with heterogeneous preferences, set agent preference if self.preferences: agent.initialize_preference() # update index index += 1 # if done with county and not at last county, increase county if index == cumulative_population_list[county] and county < self.num_counties - 1: county += 1 # after all agents are added, set model attributes self.num_agents = cumulative_population_list[self.num_counties-1] self.agent_index = cumulative_population_list[self.num_counties-1] def initialize_all_random_networks(self): """ Initializes random networks for all agents in model. """ for a in self.schedule.agents: a.initialize_random_network() def initialize_all_income_networks(self): """ Initializes income-based networks for all agents in model. """ for a in self.schedule.agents: a.initialize_income_network() def initialize_all_age_networks(self): """ Initializes age-based networks for all agents in model. """ for a in self.schedule.agents: a.initialize_age_network() def initialize_all_income_age_networks(self): """ Initializes income and age-based networks for all agents in model. """ for a in self.schedule.agents: a.initialize_income_age_network() def initialize_all_families(self): """ Initializes families for all agents in model. """ for a in self.schedule.agents: a.initialize_family() def update_population(self): """ Updates population by adding and removing agents. """ # keep track of number of deaths and births per county self.deaths = [0]*self.num_counties self.births = [0]*self.num_counties # remove agents (death) # loop through all agents for agent in self.schedule.agents: # source: https://www.ssa.gov/oact/STATS/table4c6.html#ss # calculate death probability by age in the united states if random.random() < 0.0001*(math.e**(0.075*agent.age)): # keep track of deaths by county self.deaths[agent.pos] += 1 # remove agent from model self.grid._remove_agent(agent, agent.pos) # remove agent from schedule self.schedule.remove(agent) # update number of agents self.num_agents -= 1 # add agents (birth) # loop through all counties for county in range(self.num_counties): # source: https://www.cdc.gov/nchs/fastats/births.htm # access current population current_population = self.county_population_list[county] # calculate how many agents should be added to_add = current_population//100 # birth rate # update number of agents self.num_agents += to_add # add specified number of agents for count in range(to_add): # update agent index self.agent_index += 1 # create new agent agent = Household(self.agent_index, self) # place agent in current county self.grid.place_agent(agent, county) # add agent to schedule self.schedule.add(agent) # initialize agent attributes and networks # agents are assumed to be 18 as that is the lower bound of a householder's age agent.age = 18 # based on age, income is assigned agent.initialize_income(random.random()) # based on income, tenure is assigned agent.initialize_tenure(random.random()) # input-specified network is initialized agent.initialize_network() # family is initialized agent.initialize_family() if self.preferences: agent.initialize_preference() # original position is set agent.original_pos = agent.pos # keep track of births by county self.births[agent.pos] += 1 # loop through counties, update population counts for county in self.nodes: self.county_population_list[county] = len(self.G.node[county]['agent']) def update_climate(self): """ Update climate variables based on NOAA's predictions. Index 1 represents number of days above 90 degrees Fahrenheit. Index 4 represents number of days with < 1 inch of rain. Index 7 represents number of days without rain. Indexes 3, 6, and 9 are the yearly increases/decreases for these estimates. The update function is a simple linear function. Note: More accurate climate data could be integrated by importing more climate explorer data. """ for n in self.nodes: self.G.node[n]['climate'][1] += self.G.node[n]['climate'][3] self.G.node[n]['climate'][4] += self.G.node[n]['climate'][6] self.G.node[n]['climate'][7] += self.G.node[n]['climate'][9] def rank_by_climate(self): """ Create an ordered list of counties, from least hot/dry climate to most hot/dry climate. """ # initialize lists to store data heat_data = [] dry_data = [] heat_dry_data = [] # loop through counties in order for county in self.nodes: # access and store all heat/dry data heat_data.append(self.G.node[county]['climate'][1]) dry_data.append(self.G.node[county]['climate'][7]) # find max heat/dry data max_heat = max(heat_data) max_dry = max(dry_data) # normalize data based on max value heat_data = [(e/max_heat) for e in heat_data] dry_data = [(e/max_dry) for e in dry_data] # add normalized data for county in range(self.num_counties): heat_dry_data.append(heat_data[county] + dry_data[county]) # convert to numpy array heat_dry_data = np.array(heat_dry_data) # returns indices that would sort an array (in this case, # returns county id's from best to worst climate) county_climate_rank = np.argsort(heat_dry_data) # convert to list, update model attribute self.county_climate_ranking = list(county_climate_rank) def update_income_counts(self): """ Update income distribution by county. """ # loop through counties in order for county in range(self.num_counties): # initialize list self.county_income[county] = [0]*10 # loop through agents for agent in self.schedule.agents: # update dictionary based on agent data self.county_income[agent.pos][agent.income-1] += 1 # income counts are printed at the beginning and end of run print(self.county_income) def get_preference_distribution(self): """ TODO: docstring """ preference_list = [0]*5 for agent in self.schedule.agents: preference_list[agent.preference] += 1 print(preference_list) def step(self): """ Advance the model by one step. """ global TICK # update climate ranking self.rank_by_climate() # advance all agents by one step self.schedule.step() # update population self.update_population() # update climate self.update_climate() # collect data self.datacollector.collect(self) # update step counter TICK += 1