Example #1
0
def simulate(agents, config, seed=None, max_steps=1000):
    """Simulate a run of the civil violence model.

    Parameters
    ----------
        agents: list
            List of whynot.simulators.civil_violence.Agent to populate the model
        config: whynot.simulators.civil_violence.Config
            Simulation parameters
        seed: int
            (Optional) Seed for all randomness in model setup and execution.
        max_steps: int
            Maximum number of steps to run the civil_violence model.

    Returns
    -------
        observations: pd.DataFrame
            Pandas dataframe containing the "observations" recorded for each
            agent. Observations are defined in the `agent_reporter` and include
            agent attributes along with:
                "pos" # position on the grid
                "jail_sentence" # agent's jail sentence at model end
                "condition"  # agent's condition (rebelling or acquiesent) at model end
                "arrest_probability" # agent's probability of arrest
                "arrests" # number of time agent has been arrested
                "days_active"  # how long as the agent spent in rebellion

    """
    # Ensure everything will fit on the grid
    num_cells = config.grid_height * config.grid_width
    num_cops = int(np.floor(len(agents) * config.cop_fraction))

    assert len(agents) + num_cops < num_cells

    model = CivilViolenceModel(
        height=config.grid_height,
        width=config.grid_width,
        cop_vision=config.cop_vision,
        max_jail_term=config.max_jail_term,
        prison_interaction=config.prison_interaction,
        arrest_prob_constant=config.arrest_prob_constant,
        max_steps=max_steps,
        seed=seed,
    )
    # Place agents on grid
    for i, agent in enumerate(agents):
        model.add_agent(
            i,
            model.find_empty(),
            agent.hardship,
            agent.legitimacy,
            agent.risk_aversion,
            agent.active_threshold,
            agent.vision,
        )

    for i in range(num_cops):
        model.add_cop(i + len(agents), model.find_empty())

    # Which attributes to report
    agent_reporters = {
        "pos": "pos",
        "breed": "breed",
        "jail_sentence": "jail_sentence",
        "condition": "condition",
        "arrest_probability": "arrest_probability",
        "arrests": "arrests",
        "hardship": "hardship",
        "regime_legitimacy": "regime_legitimacy",
        "days_active": "days_active",
        "risk_aversion": "risk_aversion",
        "threshold": "threshold",
        "arrest_parameter": "arrest_parameter",
        "vision": "vision",
    }

    datacollector = DataCollector(agent_reporters=agent_reporters)
    while model.running:
        model.step()
    datacollector.collect(model)
    dataframe = datacollector.get_agent_vars_dataframe()
    observations = dataframe[dataframe.breed == "citizen"].drop(
        columns="breed")
    return observations
class FluModel(Model):
    def __init__(self,
                 N,
                 width=10,
                 height=10,
                 death_rate=0.006,
                 ptrans=0.5,
                 recovery_days=24,
                 recovery_sd=6,
                 init_inf=1.5,
                 biased=False):

        self.num_agents = N
        self.schedule = RandomActivation(self)
        self.grid = MultiGrid(width, height, True)
        self.death_rate = death_rate
        self.ptrans = ptrans

        self.recovery_days = recovery_days
        self.recovery_sd = recovery_sd
        self.running = False
        self.deceased = 0
        self.init_inf = init_inf / 100
        self.biased = biased

        for i in range(self.num_agents):

            a = FluAgent(i, self)
            self.schedule.add(a)

            x = self.random.randrange(self.grid.width)
            y = self.random.randrange(self.grid.height)
            self.grid.place_agent(a, (x, y))

            infected = np.random.choice([0, 1],
                                        p=[1 - self.init_inf, self.init_inf])

            if (infected == 1):
                a.state = State.INFECTED
                a.recovery_time = self.get_recovery_time()

        # if initial infection percent is very small, pick a single agent to infect
        if (self.init_inf * self.num_agents < 1):
            a = self.random.choice(self.schedule.agents)
            a.state = State.INFECTED
            a.recovery_time = self.get_recovery_time()

        self.datacollector = DataCollector(agent_reporters={
            "State": "state",
            'p': 'p_test'
        })

    def step(self):
        self.datacollector.collect(self)
        self.schedule.step()

    def get_recovery_time(self):
        return int(
            self.random.normalvariate(self.recovery_days, self.recovery_sd))

    def sample(self, percent, i):

        # access DataCollector (states, weights, deceased agents)
        df = self.datacollector.get_agent_vars_dataframe()
        weights = df.p / sum(df.p)

        # select only current step
        df = df.iloc[df.index.get_level_values('Step') == i]

        # sample defined percent for State
        df2 = df.sample(frac=percent / 100, random_state=1,
                        weights=weights).drop(columns='p')

        return int(df2[df2.State == 1].count())
Example #3
0
class SimModel(Model):
    def __init__(
        self,
        all_data,
        model_initial_state,
        output_data_writer,
        model_params,
        class_id_and_rng=None,
        class_id=None,
        speedup=1,
        **kwargs,
    ):
        self.data = all_data
        self.model_state = model_initial_state
        self.output_data_writer = output_data_writer

        if class_id_and_rng:
            (self.class_id, self.rng) = class_id_and_rng
        else:
            self.rng = np.random.default_rng()
            if class_id:
                self.class_id = class_id

        logger.info("Modelling class %s", self.class_id)

        self.model_params = model_params
        self.speedup = speedup
        self.write_file = False

        # Update any parameters passed as kwargs
        param_dict = dataclasses.asdict(self.model_params)
        update_params = False
        for kw in kwargs:
            if kw in param_dict:
                param_dict[kw] = kwargs[kw]
                update_params = True

        if update_params:
            self.model_params = ModelParamType(**param_dict)

        if "class_id" in kwargs:
            self.class_id = kwargs["class_id"]
        elif not self.class_id:
            self.class_id = 489

        if "write_file" in kwargs:
            self.write_file = kwargs["write_file"]

        # Get summary data to display to users
        self.class_summary_data = None
        if "summary_data" in kwargs and kwargs["summary_data"] is not None:
            summary_df = kwargs["summary_data"]
            class_summary_data = summary_df[summary_df["class_id"] ==
                                            self.class_id]
            if not class_summary_data.empty:
                self.class_summary_data = class_summary_data

        self.class_data = self.data.get_class_data(self.class_id)
        self.class_size = len(self.class_data)

        self.schedule = RandomActivation(self)

        # Calculate steps per day and holidays
        self.home_learning_steps = 0
        # Calculate number of days from 1st September to 16th July inclusive
        self.start_date = datetime.date(2021, 9, 1)
        self.current_date = self.start_date
        self.end_date = datetime.date(2022, 7, 16)
        self.total_days = (self.end_date - self.start_date).days

        self.ticks_per_school_day = round(
            TruncatedNormalGenerator.get_single_value(
                self.model_params.maths_ticks_mean,
                self.model_params.maths_ticks_sd,
                10,
                600,
            ))
        self.ticks_per_home_day = self.model_params.ticks_per_home_day

        self.set_speedup()
        logger.debug("%s ticks per school day", self.ticks_per_school_day)

        self.holiday_week_numbers = self.calculate_holiday_weeks(
            self.start_date,
            self.end_date,
            self.model_params.number_of_holidays,
            self.model_params.weeks_per_holiday,
        )

        # Create truncnorm generators for school and home learning random
        # increments
        # Use batch sizes as total days * class_size * ticks per day
        # (overestimate to ensure we only generate values once)
        batch_multiplier = self.total_days * self.class_size
        self.school_learning_random_gen = TruncatedNormalGenerator(
            5 / self.model_params.school_learn_mean_divisor,
            self.model_params.school_learn_sd,
            lower=0,
            batch_size=self.ticks_per_school_day * batch_multiplier,
        )
        self.home_learning_random_gen = TruncatedNormalGenerator(
            5 / 2000,
            0.08,
            lower=0,
            batch_size=self.ticks_per_home_day * batch_multiplier,
        )

        # Create TeacherVariable instances for quality and control
        self.teacher_control_variable = TeacherVariable(
            self.model_params.teacher_control_mean,
            self.model_params.teacher_control_sd,
            self.model_params.teacher_control_variation_sd,
            self.rng,
            self.total_days,
        )
        self.teacher_quality_variable = TeacherVariable(
            self.model_params.teacher_quality_mean,
            self.model_params.teacher_quality_sd,
            self.model_params.teacher_quality_variation_sd,
            self.rng,
            self.total_days,
        )

        # Create grid with torus = False - in a real class students at either ends of classroom don't interact
        self.grid_params = get_grid_size(len(self.class_data),
                                         self.model_params.group_size)
        self.grid = SingleGrid(self.grid_params.width,
                               self.grid_params.height,
                               torus=False)

        sorted_pupils = []
        if self.model_params.group_by_ability:
            sorted_pupils = self.class_data.sort_values("Ability")
        else:
            sorted_pupils = self.class_data.sample(frac=1)

        # Set up agents
        pupil_counter = 0
        for i in range(self.grid_params.n_groups):
            group_size = self.grid_params.max_group_size
            if i >= self.grid_params.n_full_groups:
                group_size -= 1

            group_pupils = sorted_pupils.iloc[pupil_counter:pupil_counter +
                                              group_size]
            group_x = math.floor(i / self.grid_params.n_group_rows)
            group_y = i % self.grid_params.n_group_rows

            for j, row in enumerate(group_pupils.iterrows()):
                index, pupil_data = row

                # Work out position on grid
                x = (group_x * self.grid_params.group_width +
                     group_x) + math.floor(j / self.grid_params.group_height)
                y = (group_y * self.grid_params.group_height +
                     group_y) + (j % self.grid_params.group_height)

                # create agents from data
                agent = Pupil(
                    (x, y),
                    self,
                    pupil_data.student_id,
                    PupilLearningState.YELLOW,
                    pupil_data.Inattentiveness,
                    pupil_data.hyper_impulsive,
                    pupil_data.Deprivation,
                    pupil_data.start_maths,
                    pupil_data.Ability,
                    group_size,
                )
                # Place Agents on grid
                self.grid.position_agent(agent, x, y)
                self.schedule.add(agent)

            pupil_counter += group_size

        # Collecting data while running the model
        self.pupil_state_datacollector = DataCollector(
            model_reporters={
                "Learning Students": get_num_learning,
                "Passive Students": get_num_passive,
                "Disruptive Students": get_num_disruptors,
            })
        self.pupil_state_datacollector.collect(self)
        self.mean_maths = compute_ave(self)

        self.agent_datacollector = DataCollector(
            agent_reporters={
                "student_id": "student_id",
                "end_maths": "e_math",
                "start_maths": "s_math",
                "Ability": "ability",
                "Inattentiveness": "inattentiveness",
                "hyper_impulsive": "hyper_impulsive",
                "Deprivation": "deprivation",
            })

        # Monitor mean maths score
        self.maths_datacollector = DataCollector({
            "Date": get_date_for_chart,
            "Mean Score": compute_ave,
        })
        self.maths_datacollector.collect(self)
        self.running = True

    def set_speedup(self):
        if self.speedup > 1:
            min_ticks = min(self.ticks_per_school_day, self.ticks_per_home_day)
            # Can't have fewer than 1 tick per school day so reduce the speedup accordingly
            if self.speedup > min_ticks:
                self.speedup = min_ticks
            # Speedup should be divisible by self.ticks_per_school_day
            # e.g. if 10 ticks per day
            # Can't have speedup more than 10 as we need 1 tick per days
            # If speedup is 5 then we have 2 ticks per day
            # If speedup is 8 then we would have 10/8 = 1.25 ticks per day
            # Round that to 1, then speedup would be 10 (=10/1) not 8
            # If speedup is 6 then we would have 10/6 = 1.67 ticks per day
            # Round that to 2, then speedup would be 5 (=10/2) not 6
            speedup_ticks_per_school_day = round(self.ticks_per_school_day /
                                                 self.speedup)
            self.speedup = self.ticks_per_school_day / speedup_ticks_per_school_day
            self.ticks_per_school_day = speedup_ticks_per_school_day

            speedup_ticks_per_home_day = round(self.ticks_per_home_day /
                                               self.speedup)
            self.home_speedup = self.ticks_per_home_day / speedup_ticks_per_home_day
            self.ticks_per_home_day = speedup_ticks_per_school_day
        else:
            self.home_speedup = 1

    @staticmethod
    def calculate_holiday_weeks(start_date, end_date, number_of_holidays,
                                weeks_per_holiday):
        """Calculate which weeks should be holidays given the total number of
        days from start to end of the school year, and the number and length
        of holidays

        Returns an array of week numbers which are holidays
        """
        # Get start of first week of term
        # Go back to start of week
        start_week = start_date - datetime.timedelta(days=start_date.weekday())
        if start_date.weekday() >= 5:
            # start_date is weekend so go to following Monday
            start_week += datetime.timedelta(weeks=1)

        # Get difference from following week after end day
        total_weeks = math.ceil(
            (end_date + datetime.timedelta(days=1) - start_week).days / 7)

        n_terms = number_of_holidays + 1
        n_holiday_weeks = number_of_holidays * weeks_per_holiday
        n_school_weeks = total_weeks - n_holiday_weeks
        min_weeks_per_term = math.floor(n_school_weeks / n_terms)
        remainder_weeks = n_school_weeks % n_terms

        weeks_per_term = []
        for i in range(n_terms):
            term_weeks = min_weeks_per_term
            if i < remainder_weeks:
                term_weeks += 1
            weeks_per_term.append(term_weeks)

        holiday_week_numbers = []
        current_week = 0
        for term_weeks in weeks_per_term[:-1]:
            start_week = current_week + term_weeks
            holiday_week_numbers.extend(
                list(range(start_week, start_week + weeks_per_holiday)))
            current_week += term_weeks + weeks_per_holiday
        return holiday_week_numbers

    def update_school_time(self):
        time_in_day = self.schedule.steps % self.ticks_per_school_day
        if (time_in_day == self.ticks_per_school_day - 1
                or self.ticks_per_school_day == 1):
            # Have just finished the penultimate tick of school day, so add
            # home learning time ready for the next tick
            self.home_learning_days = 1

            # If it's Friday add 2 more days' home learning for the weekend
            if self.current_date.weekday() == 4:
                self.home_learning_days += 2

                # Is it a holiday?
                week_number = math.floor(
                    (self.current_date - self.start_date).days / 7)
                if week_number in self.holiday_week_numbers:
                    # Add holiday weeks
                    self.home_learning_days += 7 * self.model_params.weeks_per_holiday

            self.home_learning_steps = self.home_learning_days * self.ticks_per_home_day

        else:
            self.home_learning_steps = 0

        if time_in_day == 0:
            # Update current date by self.home_learning days now we've completed the last tick of the day
            self.current_date += datetime.timedelta(
                days=self.home_learning_days)
            self.home_learning_days = 0

            # Update teacher control/teacher_quality
            self.teacher_control_variable.update_current_value()
            self.teacher_quality_variable.update_current_value()

            # Reset all pupils's states ready for the next day
            for pupil in self.schedule.agents:
                pupil.resetState()

    def step(self):
        # Reset counter of learning and disruptive agents
        self.model_state.learning_count = 0
        self.model_state.disruptive_count = 0

        # Advance the model by one step
        self.schedule.step()

        self.update_school_time()

        # collect data
        self.maths_datacollector.collect(self)
        self.pupil_state_datacollector.collect(self)
        self.mean_maths = compute_ave(self)

        if self.current_date > self.end_date or self.running == False:
            logger.debug("Finished run; collecting data")
            self.running = False

            # Remove tngs
            self.school_learning_random_gen = None
            self.home_learning_random_gen = None
            for pupil in self.schedule.agents:
                pupil.school_learning_ability_random_gen = None
                pupil.home_learning_ability_random_gen = None

            self.agent_datacollector.collect(self)
            agent_data = self.agent_datacollector.get_agent_vars_dataframe()
            logger.debug("Got agent data")
            self.output_data_writer.write_data(agent_data, self.class_id,
                                               self.class_size)
            logger.debug("Written to output file")
            self.agent_datacollector = None
            self.maths_datacollector = None
            self.pupil_state_datacollector = None
            self.home_learning_random_gen = None
            self.school_learning_random_gen = None
            logger.info("Completed run for class %s", self.class_id)
Example #4
0
class ForestFire(Model):
    """
    Simple Forest Fire model.
    """
    def __init__(self,
                 height=100,
                 width=100,
                 density=0.65,
                 server=True,
                 num_steps=1000):
        """
        Create a new forest fire model.

        Args:
            height, width: The size of the grid to model
            density: What fraction of grid cells have a tree in them.
        """
        # Initialize model parameters
        self.height = height
        self.width = width
        self.density = density
        self.server = server
        # Set up model objects
        self.schedule = RandomActivation(self)
        self.grid = Grid(height, width, torus=False)
        self.num_steps = num_steps

        self.datacollector = DataCollector({
            "Fine":
            lambda m: self.count_type(m, "Fine"),
            "On Fire":
            lambda m: self.count_type(m, "On Fire"),
            "Burned Out":
            lambda m: self.count_type(m, "Burned Out")
        })

        # Place a tree in each cell with Prob = density
        for (contents, x, y) in self.grid.coord_iter():
            if self.random.random() < self.density:
                # Create a tree
                new_tree = TreeCell((x, y), self)
                # Set all trees in the first column on fire.
                if x == 0:
                    new_tree.condition = "On Fire"
                self.grid._place_agent((x, y), new_tree)
                self.schedule.add(new_tree)

        self.running = True
        self.datacollector.collect(self)

    def step(self):
        """
        Advance the model by one step.
        """
        self.schedule.step()
        # collect data
        self.datacollector.collect(self)

        # Halt if no more fire
        if self.count_type(self, "On Fire") == 0:
            self.running = False

    @staticmethod
    def count_type(model, tree_condition):
        """
        Helper method to count trees in a given condition in a given model.
        """
        count = 0
        for tree in model.schedule.agents:
            if tree.condition == tree_condition:
                count += 1
        return count

    def run_model(self,
                  n=None,
                  export_agent_data=False,
                  export_model_data=False):

        if self.server == False:
            if not n:
                for _ in range(self.num_steps):
                    self.step()
                if export_agent_data:
                    return self.datacollector.get_agent_vars_dataframe()
                elif export_model_data:
                    return self.datacollector.get_model_vars_dataframe()
                elif export_model_data and export_agent_data:
                    return self.datacollector.get_model_vars_dataframe, self.datacollector.get_agent_vars_dataframe
            else:
                self.num_steps = n
                for _ in range(self.num_steps):
                    self.step()
                # if export_agent_data:
                #     return self.datacollector.get_agent_vars_dataframe()
                # elif export_model_data:
                #     return self.datacollector.get_model_vars_dataframe()
                # elif export_model_data == True and export_agent_data == True:
                #     return self.datacollector.get_model_vars_dataframe, self.datacollector.get_agent_vars_dataframe
                return self
        else:
            from .server import server

            server.launch()
class BoltzmannWealthModelNetwork(Model):
    """A model with some number of agents."""
    def __init__(self,
                 b=35.98,
                 a=0.6933,
                 beta=0.95,
                 delta=0.08,
                 theta=0.8,
                 N=100):  #N- number of agents
        self.N = N
        self.b = b
        self.a = a
        self.agents = []
        self.fit_alpha = 0
        self.fit_loc = 0
        self.fit_beta = 0
        self.t = 0
        self.beta = 0.95
        self.delta = 0.08
        self.theta = 0.8
        self.time = 1  #for sensitivity analysis
        self.G = nx.barabasi_albert_graph(n=N, m=1)
        nx.set_edge_attributes(
            self.G, 1, 'weight')  #setting all initial edges with a weight of 1
        self.nodes = np.linspace(0, N - 1, N,
                                 dtype='int')  #to keep track of the N nodes
        self.schedule = RandomActivation(self)
        self.datacollector = DataCollector(model_reporters={
            'beta': 'b',
            'a': 'a',
            'fit_alpha': 'fit_alpha',
            'fit_beta': 'fit_beta',
            'loc': 'fit_loc',
            'TotalSwitch': 'total',
            'Threshold': 't'
        },
                                           agent_reporters={
                                               "k": 'k',
                                               'lamda': 'lamda',
                                               'abilitity': 'alpha',
                                               'technology': 'tec'
                                           })

        for i, node in enumerate(self.G.nodes()):
            agent = MoneyAgent(i, self)
            self.schedule.add(agent)

        self.running = True
        self.datacollector.collect(self)

    def Global_Attachment(self):
        #print("Global Attachment no: {}".format(self.count))
        node1 = random.choice(self.nodes)
        node2 = random.choice(self.nodes)
        while (self.G.has_edge(node1, node2) == True):
            node2 = random.choice(self.nodes)
            node1 = random.choice(self.nodes)
        #adding the edge node1-node2
        for agent in self.agents:
            if (agent.unique_id == node1):
                node1_a = agent
            if (agent.unique_id == node2):
                node2_a = agent
        self.G.add_edge(node1,
                        node2,
                        weight=Edge_Weight(node1_a, node2_a, self.b, self.a))

    def step(self):
        #print(self.time)
        self.schedule.step()
        # collect data
        self.Global_Attachment()  #for sensitivity analysis
        self.datacollector.collect(self)
        agent_df = self.datacollector.get_agent_vars_dataframe()
        agent_df.reset_index(level=['Step', 'AgentID'], inplace=True)
        k = agent_df.k.to_numpy()
        self.t = np.percentile(k, q=10)
        #print("Threshold = ", self.t)
        count = 0
        trap = []
        agents = []
        for agent in self.nodes:
            df = agent_df.loc[(agent_df["AgentID"] == agent)
                              & (agent_df['k'] < self.t)].reset_index(
                                  drop=True)
            if (not df.empty):
                agents.append(agent)
                j = int(df.loc[0].Step)
                count = 0
                while (j < len(df) - 1):
                    if (int(df.loc[j + 1].Step) - int(df.loc[j].Step) == 1):
                        count += 1
                    j += 1
                    #print("i = ", i)
                trap.append(count)
                self.Count = count
        self.fit_alpha, self.fit_loc, self.fit_beta = stats.gamma.fit(trap)
        self.time += 1  #
        #counting number of switches
        switch = {'Agent': [], 'Total': []}
        for agent in self.nodes:
            df = agent_df.loc[agent_df.AgentID == agent].reset_index(drop=True)
            tech = df.technology.to_numpy()
            count = 0
            for i in range(len(tech) - 1):
                if ((tech[i] == 'L' and tech[i + 1] == 'H')
                        or (tech[i] == 'H' and tech[i + 1] == 'L')):
                    count += 1
            if (count):
                switch['Agent'].append(agent)
                switch['Total'].append(count)
        switch = pd.DataFrame(switch)
        no_switch = switch.Total.unique()
        no_switch.sort()
        total = {no_switch[i]: [] for i in range(len(no_switch))}
        #print(total)
        for i in no_switch:
            total[i] = len(switch.loc[switch.Total == i])
        #print(total)
        self.total = total

    def run_model(self, n):
        for i in tqdm(range(n)):
            self.time = i + 1
            self.step()
Example #6
0
class EvacuationModel(Model):
    """A Mesa ABM model to simulate evacuation during a flood

    Args:
        hazard: Spatial table of flood hazard zones in WGS84
        output_path: Path to output files without extension
        domain: Polygon used to select OSM data, required if the graph, agents or targets are not specified
        target_types: List of OSM amenity values to use as targets, defaults to school
        network: Undirected network generated from OSM road network
        targets: Spatial table of OSM amenities
        target_capacity: The number of agents that can be evacuated to each target
        agents: Spatial table of agent starting locations
        seed: Seed value for random number generation

    Attributes:
        output_path (str): Path to output files without extension
        schedule (RandomActivation): Scheduler which activates each agent once per step,
            in random order, with the order reshuffled every step
        hazard (GeoDataFrame): Spatial table of flood hazard zones in WGS84
        G (Graph): Undirected network generated from OSM road network
        nodes (GeoDataFrame): Spatial table of nodes in G
        edges (GeoDataFrame): Spatial table edges in G
        grid (NetworkGrid): Network grid for agents to travel around based on G
        data_collector (DataCollector): Stores the model state at each time step
        target_nodes (Series): Series of nodes to evacuate to
        target_capacity (int): The number of agents that can be evacuated to each target
        igraph: Duplicate of G as an igraph object to speed up routing

    """
    def __init__(
            self,
            hazard: GeoDataFrame,
            output_path: str,
            domain: Optional[Polygon] = None,
            target_types: Iterable[str] = tuple(['school']),
            network: Optional[Graph] = None,
            targets: Optional[GeoDataFrame] = None,
            target_capacity: int = 100,
            agents: Optional[GeoDataFrame] = None,
            seed: Optional[int] = None):
        super().__init__()
        self._seed = seed
        self.output_path = output_path

        self.hazard = hazard
        self.schedule = RandomActivation(self)
        self.target_capacity = target_capacity

        if network is None:
            self.G = osmnx.graph_from_polygon(domain, simplify=False)
            self.G = self.G.to_undirected()
        else:
            self.G = network

        self.nodes: GeoDataFrame
        self.edges: GeoDataFrame
        self.nodes, self.edges = osmnx.save_load.graph_to_gdfs(self.G)

        if agents is None:
            agents = GeoDataFrame(geometry=create_footprints_gdf(domain).centroid)

        if targets is None:
            targets = osmnx.pois_from_polygon(domain, amenities=list(target_types))
            # Query can return polygons as well as points, only using the points
            targets = targets[targets.geometry.geom_type == 'Point']

        output_gpkg = output_path + '.gpkg'

        driver = 'GPKG'

        targets.crs, agents.crs = [self.nodes.crs] * 2

        nodes_tree = cKDTree(np.transpose([self.nodes.geometry.x, self.nodes.geometry.y]))

        # Prevents warning about CRS not being the same
        self.hazard.crs = self.nodes.crs
        self.hazard.to_file(output_gpkg, layer='hazard', driver=driver)

        agents_in_hazard_zone: GeoDataFrame = sjoin(agents, self.hazard)
        agents_in_hazard_zone = agents_in_hazard_zone.loc[~agents_in_hazard_zone.index.duplicated(keep='first')]
        agents_in_hazard_zone.geometry.to_file(output_gpkg, layer='agents', driver=driver)

        assert len(agents_in_hazard_zone) > 0, 'There are no agents within the hazard zone'

        targets_in_hazard_zone: GeoDataFrame = sjoin(targets, self.hazard)
        targets_in_hazard_zone = targets_in_hazard_zone.loc[~targets_in_hazard_zone.index.duplicated(keep='first')]

        targets_outside_hazard_zone = targets[~targets.index.isin(targets_in_hazard_zone.index.values)]
        targets_outside_hazard_zone.to_file(output_gpkg, layer='targets', driver=driver)

        assert len(targets_outside_hazard_zone) > 0, 'There are no targets outside the hazard zone'

        _, node_idx = nodes_tree.query(
            np.transpose([agents_in_hazard_zone.geometry.x, agents_in_hazard_zone.geometry.y]))

        _, target_node_idx = nodes_tree.query(
            np.transpose([targets_outside_hazard_zone.geometry.x, targets_outside_hazard_zone.geometry.y]))

        for (_, row), nearest_node in zip(targets_outside_hazard_zone.iterrows(), self.nodes.index[target_node_idx]):
            if not self.G.has_node(row.osmid):
                self.G.add_edge(nearest_node, row.osmid, length=0)
                self.G.nodes[row.osmid]['osmid'] = row.osmid
                self.G.nodes[row.osmid]['x'] = row.geometry.x
                self.G.nodes[row.osmid]['y'] = row.geometry.y

        self.nodes, self.edges = osmnx.save_load.graph_to_gdfs(self.G)

        self.nodes[['osmid', 'geometry']].to_file(output_gpkg, layer='nodes', driver=driver)
        self.edges[['osmid', 'geometry']].to_file(output_gpkg, layer='edges', driver=driver)

        output_gml = output_path + '.gml'
        osmnx.nx.write_gml(self.G, path=output_gml)
        self.igraph = igraph.read(output_gml)

        self.target_nodes = targets_outside_hazard_zone.osmid

        self.grid = NetworkGrid(self.G)

        # Create agents
        for i, idx in enumerate(node_idx):
            a = agent.EvacuationAgent(i, self)
            self.schedule.add(a)
            self.grid.place_agent(a, self.nodes.index[idx])
            a.update_route()
            a.update_location()

        self.data_collector = DataCollector(
            model_reporters={
                'evacuated': evacuated,
                'stranded': stranded
            },
            agent_reporters={'position': 'pos',
                             'reroute_count': 'reroute_count',
                             'lat': 'lat',
                             'lon': 'lon',
                             'highway': 'highway',
                             'status': status})

    def step(self):
        """Advances the model by one step and then stores the current state in data_collector"""
        self.schedule.step()
        self.data_collector.collect(self)

    def run(self, steps: int):
        """Runs the model for the given number of steps`

        Args:
            steps: number of steps to run the model for
        Returns:
            DataFrame: the agent vars dataframe
        """
        self.data_collector.collect(self)
        for _ in range(steps):
            self.step()
            if self.data_collector.model_vars['evacuated'][-1] + self.data_collector.model_vars['stranded'][-1] == len(
                    self.schedule.agents):
                # Continue for 5 steps after all agents evacuated or stranded
                for _ in range(5):
                    self.step()
                break
        self.data_collector.get_agent_vars_dataframe().astype({'highway': pd.Int64Dtype()}).to_csv(
            self.output_path + '.agent.csv')
        self.data_collector.get_model_vars_dataframe().to_csv(self.output_path + '.model.csv')
        return self.data_collector.get_agent_vars_dataframe()
Example #7
0
class LoveMatch(Model):
    '''
    Love-match market Model: 
    
    En este modelo, cada individuo recorre de manera aleatoria el lugar, al encontrarse con un match (agente del sexo opuesto con parámetros de belleza y riqueza coincidentes con lo deseado) desaparece del modelo. 
    El objetivo es observar la distribución de perfiles de belleza y riqueza a lo largo del tiempo hasta ver quienes no logran encontrar pareja. 
    '''
    def __init__(
        self,
        height=50,
        width=50,
        density=0.8,
        HM_pc=0.2,
        entry_rate=1,
        max_agents=750
    ):  # Aquí establecemos el tamaño del Grid donde se desarrolla el modelo, además de los parámetros iniciales.
        self.height = height
        self.width = width
        self.density = density
        self.HM_pc = HM_pc

        self.entry_rate = 5

        self.schedule = RandomActivation(self)
        self.grid = MultiGrid(height, width, torus=False)
        self.max_agents = max_agents
        self.parejas = 0
        self.hombres = 0
        self.mujeres = 0
        self.unhappy = 0
        self.idcounter = 0

        # En esta sección, etiquetamos a cada agente según su tipo

        for cell in self.grid.coord_iter():
            x = cell[1]
            y = cell[2]
            if self.random.random() < self.density:
                if self.random.random() < self.HM_pc:
                    gender = 1
                    self.hombres += 1
                else:
                    gender = 0
                    self.mujeres += 1
                #Creamos a cada agente y asignamos su ID, cad vez que se crea un agente, se agrega uno al contador de ID's
                #Nota: La distribución de las características las modelamos con una distribución log-normal. Esto nos permite tener solo valores positivos y este ranking de belleza/riqueza se concentra de 0 a 1
                self.idcounter += 1
                agent = miAgente((x, y),
                                 self,
                                 gender,
                                 beauty=np.random.lognormal(0.5, 0.30),
                                 wealth=np.random.lognormal(0.5, 0.30),
                                 desired_beauty=np.random.lognormal(0.5, 0.3),
                                 desired_wealth=np.random.lognormal(0.5, 0.3),
                                 time_to_critical=random.randint(10, 30),
                                 sojourn=-1,
                                 is_critical=0,
                                 myid=self.idcounter)
                #coloca a los agentes en el modelo
                self.schedule.add(agent)
                self.grid.place_agent(agent, (x, y))
        #Corre el modelo
        self.running = True
        #Colecciona los datos relevantes para el agente y para el modelo
        self.datacollector = DataCollector(model_reporters={
            'density': 'density',
            'parejas': 'parejas',
            'unhappy': 'unhappy',
            'hombres': 'hombres',
            'mujeres': 'mujeres'
        },
                                           agent_reporters={
                                               'myid': 'myid',
                                               'wealth': 'wealth',
                                               'gender': 'gender',
                                               'beauty': 'beauty',
                                               'desired_beauty':
                                               'desired_beauty',
                                               'desired_wealth':
                                               'desired_wealth',
                                               'time_to_critical':
                                               'time_to_critical',
                                               'is_critical': 'is_critical',
                                               'sojourn': 'sojourn'
                                           })
        self.datacollector.collect(self)

    def update(self):
        if self.schedule.get_agent_count() < self.max_agents:
            for i in range(self.entry_rate):
                x = self.random.randrange(self.grid.width)
                y = self.random.randrange(self.grid.height)
                if self.random.random() < self.HM_pc:
                    gender = 1
                    self.hombres += 1
                else:
                    gender = 0
                    self.mujeres += 1

                agent = miAgente(i,
                                 self,
                                 gender,
                                 beauty=random.g(4, 2),
                                 wealth=random.gauss(4, 3),
                                 desired_beauty=random.gauss(4, 3),
                                 desired_wealth=random.gauss(3, 2),
                                 time_to_critical=random.gauss(20, 5),
                                 sojourn=-1,
                                 is_critical=0)
                self.schedule.add(agent)
                self.grid.place_agent(agent, (x, y))

    def step(
        self
    ):  # Este step permite que el modelo siga corriendo hasta que todos los agentes tengan pareja
        self.schedule.step()
        # Por fines gráficos, recolectamos la información sobre la cantidad de parejas
        self.datacollector.collect(self)

        ### Guarda la información relevante dentro de tablas en csv's.
        self.datacollector.get_agent_vars_dataframe().to_csv("test_me_a.csv")
        self.datacollector.get_model_vars_dataframe().to_csv("test_me_m.csv")

        ### Finalmente, el modelo se detiene si el número de agentes es cero
        if self.schedule.get_agent_count() == 0:
            self.running = False
Example #8
0
class People(AgentBasedDict):
    """
    A class to hold People.
    """
    def __init__(self, map_, crs=None, *args, **kwargs):
        super().__init__(crs=crs, *args, **kwargs)
        self.intersections = AgentBasedIntersections(map_.intersections)
        self.links = AgentBasedLinks(map_.links)
        self.links.update_intersections(self.intersections)

        self.data_collector = DataCollector(
            agent_reporters={"geometry": "geometry"})

    def to_crs(self, crs):
        """ """
        raise NotImplementedError(
            "I'm not able to change the crs on People. Maybe create a GeoDataFrame and then change the crs."
        )

    def add_person(self, person):
        self[person.name] = person
        self.model.schedule.add(person)

    def create_people_from_od(self, od):
        mode_choice_model = ModeChoiceModel()
        mode_choice_model.add_mode(Driver, 0.8)
        mode_choice_model.add_mode(Cyclist, 0.1)
        mode_choice_model.add_mode(Pedestrian, 0.1)
        for _, person in tqdm(od.iterrows(), total=len(od)):
            route = self.intersections.nodes_to_links(
                person.routes[0]["legs"][0]["annotation"]["nodes"])
            mode = mode_choice_model.predict()
            person = mode(self.model, person.home_geometry, route)
            self.add_person(person)

    def post_people(self, url):
        people = self.to_geopandas()
        people.to_crs("EPSG:4326")
        return requests.post(url, data={"people": people.to_json()})

    def get_agent_vars_geodataframe(self,
                                    start_time=datetime(1970, 1, 1, 0, 0, 0)):
        gdf = geopandas.GeoDataFrame(
            self.data_collector.get_agent_vars_dataframe())
        gdf.crs = self.crs
        one_day = timedelta(1)
        index = pandas.date_range(start_time, start_time + one_day,
                                  freq="S")[0:len(gdf)]
        gdf.index = gdf.index.set_levels(index, level=0)
        return gdf

    def get_trajectories(self):
        gdf = self.get_agent_vars_geodataframe()
        gdf.reset_index(level="AgentID", inplace=True)
        return movingpandas.TrajectoryCollection(gdf, "AgentID")

    def simulate(
        self,
        number_of_rounds=10,
        post_people_url=None,
    ):
        aea = CRS.from_string("North America Albers Equal Area Conic")
        self.intersections.to_crs(aea)
        self.links.to_crs(aea)
        self.crs = self.links.crs
        self.data_collector.collect(self.model)
        if post_people_url:
            werkzeug_thread = WerkzeugThread(people_flask_app())
            werkzeug_thread.start()
            self.people.post_people(post_people_url)
        for round_number in range(number_of_rounds):
            logging.info("Simulating round %s" % (round_number, ))
            self.model.step()
            self.intersections.model.step()
            self.data_collector.collect(self.model)
            if post_people_url:
                self.post_people(post_people_url)
        if post_people_url:
            werkzeug_thread.stop()
class BoltzmannWealthModelNetwork(Model):
    """A model with some number of agents."""
    def __init__(self, b, a, N):  #N- number of agents

        self.N = N
        self.b = b
        self.a = a
        self.agents = []
        self.gini = 0
        self.time = 0
        self.Budget = 0
        self.G = nx.barabasi_albert_graph(n=N, m=1)
        nx.set_edge_attributes(
            self.G, 1, 'weight')  #setting all initial edges with a weight of 1
        self.nodes = np.linspace(0, N - 1, N,
                                 dtype='int')  #to keep track of the N nodes

        self.schedule = RandomActivation(self)
        self.datacollector = DataCollector(model_reporters={
            "Gini": 'gini',
            'Agents below yp': 'poor_no'
        },
                                           agent_reporters={
                                               "slope": "slope",
                                               "k_t": 'k',
                                               'income': 'income',
                                               'consumption': 'consum',
                                               'lamda': 'lamda',
                                               'alpha': 'alpha',
                                               'technology': 'tec'
                                           })
        for i, node in enumerate(self.G.nodes()):
            agent = MoneyAgent(i, self)
            self.schedule.add(agent)

        self.running = True
        self.datacollector.collect(self)

    def Global_Attachment(self):
        #print("Global Attachment no: {}".format(self.count))
        node1 = random.choice(self.nodes)
        node2 = random.choice(self.nodes)
        while (self.G.has_edge(node1, node2) == True):
            node2 = random.choice(self.nodes)
            node1 = random.choice(self.nodes)
        #adding the edge node1-node2
        for agent in self.agents:
            if (agent.unique_id == node1):
                node1_a = agent
            if (agent.unique_id == node2):
                node2_a = agent
        self.G.add_edge(node1,
                        node2,
                        weight=Edge_Weight(node1_a, node2_a, self.b, self.a))

    def compute_gini(self):
        agent_wealths = [agent.k for agent in self.schedule.agents]
        x = sorted(agent_wealths)
        B = sum(xi * (self.N - i)
                for i, xi in enumerate(x)) / (self.N * sum(x))
        return 1 + (1 / self.N) - 2 * B

    def step(self):
        self.schedule.step()
        # collect data
        self.datacollector.collect(self)

    def run_model(self, n):
        for i in tqdm(range(n)):
            self.time = i + 1
            self.step()
            self.count_L = 0
            self.count_H = 0
            self.Global_Attachment()
            self.gini = self.compute_gini()
            print("Step: ", self.time)
            data = self.datacollector.get_agent_vars_dataframe()
            data = data.reset_index()
            data = data.loc[data.Step == i]
            if (self.time == 1):
                self.Budget = 0.09 * sum(
                    data.income
                )  #0.025*sum(data.income) #isn't this the budget? (at time step=1)

            #calculating the total poverty shortfall
            yi_data = data.loc[data.income < yp]
            income = yi_data.income.reset_index(drop=True)
            #print(income)
            S = sum(yp - (yi_data.income.reset_index(drop=True).to_numpy()))
            #print(S)
            poor_agents = yi_data.AgentID.reset_index(drop=True).to_numpy()
            self.poor_no = len(poor_agents)
            #print("Total Poor: ", len(poor_agents))
            self.count_L = 0
            self.count_H = 0
            for poor in poor_agents:
                for agent in self.agents:  #accessing the class object
                    if (poor == agent.unique_id):
                        if (agent.tec == 'L'):
                            self.count_L += 1
                        else:
                            self.count_H += 1
            #print("Low: ", self.count_L)
            #print("High :", self.count_H)
            if (self.Budget > S):
                #print("out")
                for poor in poor_agents:  #accessing the poor node
                    for agent in self.agents:  #accessing the class object
                        if (poor == agent.unique_id):
                            additional = yp - agent.income
                            agent.income += additional
            else:
                #print("in")
                for poor in poor_agents:  #accessing the poor node
                    for agent in self.agents:  #accessing the class object
                        if (poor == agent.unique_id):
                            additional = yp * (self.Budget / S)
                            agent.income += additional
class CivilViolenceModel(Model):
    """ Civil violence model class """
    def __init__(self,
                 max_iter=200,
                 height=40,
                 width=40,
                 agent_density=0.7,
                 agent_vision=7,
                 active_agent_density=0.01,
                 cop_density=0.04,
                 cop_vision=7,
                 inf_threshold=40,
                 tackle_inf=False,
                 k=2.3,
                 graph_type=GraphType.BARABASI_ALBERT.name,
                 p=0.1,
                 p_ws=0.1,
                 directed=False,
                 max_jail_term=30,
                 active_threshold_t=0.1,
                 initial_legitimacy_l0=0.82,
                 movement=True,
                 seed=None):
        """
        Create a new civil violence model.

        :param max_iter: Maximum number of steps in the simulation.
        :param height: Grid height.
        :param width: Grid width.
        :param agent_density: Approximate percentage of cells occupied by citizen agents.
        :param agent_vision: Radius of the agent vision in every direction.
        :param active_agent_density: Enforce initial percentage of cells occupied by active agents.
        :param cop_density: Approximate percentage of cells occupied by cops.
        :param cop_vision: Radius of the cop vision in every direction.
        :param initial_legitimacy_l0: Initial legitimacy of the central authority.
        :param inf_threshold: Amount of nodes that need to be connected before an agent is considered an influencer.
        :param tackle_inf: Remove influencer when outbreaks starting
        :param max_jail_term: Maximal jail term.
        :param active_threshold_t: Threshold where citizen agent became active.
        :param k: Arrest term constant k.
        :param graph_type: Graph used to build network
        :param p: Probability for edge creation
        :param directed: Is graph directed
        :param movement: Can agent move at end of an iteration
        :param seed: random seed

        Additional attributes:
            running : is the model running
            iteration : current step of the simulation
            citizen_list : a list storing the citizen agents added to the model.   
            influencer_list : a list storing the citizien agents that are influencers

            grid : A 2D cellular automata representing the real world space environment
            network : A NetworkGrid with as many nodes as (citizen) agents representing the social network.
            Agent in the NetworkGrid are deep copy of agent in the MultiGrid has Mesa implementation is based on
            the usage of a single space. (Example: NetworkGrid place_agent method will change "pos" attribute from agent
            meaning one agent can't be on both MultiGrid and NetworkGrid).
            We maintain a dictionary of agent position instead.

        """
        super().__init__()

        # =============================
        # === Initialize attributes ===
        # =============================

        self.seed = seed
        self.random.seed(self.seed)

        # Initialize Model grid and schedule
        self.height = height
        self.width = width
        self.grid = MultiGrid(self.width, self.height,
                              torus=True)  # Grid or MultiGrid ?
        self.schedule = RandomActivation(self)
        self.max_iter = max_iter
        self.iteration = 0  # Simulation iteration counter
        self.movement = movement

        # Set Model main attributes
        self.max_jail_term = max_jail_term
        self.active_threshold_t = active_threshold_t
        self.initial_legitimacy_l0 = initial_legitimacy_l0
        self.legitimacy = initial_legitimacy_l0
        self.k = k
        self.graph_type = graph_type

        self.agent_density = agent_density
        self.agent_vision = agent_vision
        self.active_agent_density = active_agent_density
        self.cop_density = cop_density
        self.cop_vision = cop_vision
        self.inf_threshold = inf_threshold

        self.citizen_list = []
        self.cop_list = []
        self.influencer_list = []
        self.jailings_list = [0, 0, 0, 0]
        self.outbreaks = 0
        self.outbreak_now = 0
        self.outbreak_influencer_now = False
        self.tackle_inf = tackle_inf

        date = datetime.now()
        self.path = f'output/{self.graph_type}_{date.month}_{date.day}_{date.hour}_{date.minute}_'

        # === Set Data collection ===
        self.datacollector = DataCollector(
            model_reporters=self.get_model_reporters(),
            agent_reporters=self.get_agent_reporters())

        # ==============================
        # === Initialize environment ===
        # ==============================

        # Add agents to the model
        unique_id = 0
        for (contents, x, y) in self.grid.coord_iter():
            random_x = self.random.random()
            if random_x < self.agent_density:
                # Add agents
                agent = Citizen(unique_id=unique_id,
                                model=self,
                                pos=(x, y),
                                hardship=self.random.random(),
                                susceptibility=self.random.random(),
                                influence=self.random.random(),
                                expression_intensity=self.random.random(),
                                legitimacy=self.initial_legitimacy_l0,
                                risk_aversion=self.random.random(),
                                threshold=self.active_threshold_t,
                                vision=self.agent_vision)

                unique_id += 1
                self.citizen_list.append(agent)
                self.grid.place_agent(agent,
                                      (x, y))  # Place agent in the MultiGrid
                self.schedule.add(agent)

            elif random_x < (self.agent_density + self.active_agent_density):
                # Enforce an initial proportion of active agents
                agent = Citizen(unique_id=unique_id,
                                model=self,
                                pos=(x, y),
                                hardship=self.random.random(),
                                susceptibility=self.random.random(),
                                influence=self.random.random(),
                                expression_intensity=self.random.random(),
                                legitimacy=self.initial_legitimacy_l0,
                                risk_aversion=self.random.random(),
                                threshold=0,
                                vision=self.agent_vision)

                unique_id += 1
                self.citizen_list.append(agent)
                self.grid.place_agent(agent,
                                      (x, y))  # Place agent in the MultiGrid
                self.schedule.add(agent)

            elif random_x < (self.agent_density + self.active_agent_density +
                             self.cop_density):
                # Add law enforcement officer
                agent = Cop(unique_id=unique_id,
                            model=self,
                            pos=(x, y),
                            vision=self.cop_vision)

                unique_id += 1
                self.cop_list.append(agent)
                self.grid.place_agent(agent,
                                      (x, y))  # Place agent in the MultiGrid
                self.schedule.add(agent)

        # Generate a social network composed of every civilian agents
        self.G, self.network_dict = generate_network(self.citizen_list,
                                                     graph_type, p, p_ws,
                                                     directed, seed)
        # print_network(self.G, self.network_dict)  # Uncomment to print the network.

        # With network in place, set the influencers.
        self.set_influencers(self.inf_threshold)

        # Create the graph show the frequency of degrees for the nodes
        create_fig(self.G.degree, draw=False)  # Set draw=True to draw a figure

        self.running = True
        self.datacollector.collect(self)

    def step(self):
        """
        One step in agent-based model simulation
        """

        self.schedule.step()
        self.iteration += 1
        self.update_legitimacy()

        self.outbreak_score_monitoring()
        self.datacollector.collect(self)

        # Save initial values
        if self.iteration == 1:
            self.save_initial_values(save=False)

        # Stop the model after a certain amount of iterations.
        if self.iteration > self.max_iter:
            self.save_data(save=False)
            self.running = False

    def outbreak_score_monitoring(self):
        if self.tackle_inf:
            if self.count_type_citizens(
                    "ACTIVE") > 30 and not self.outbreak_influencer_now:
                self.jail_influencer()
                self.outbreak_influencer_now = True

            if self.count_type_citizens("ACTIVE") < 30:
                self.outbreak_influencer_now = False

        # Count amount of outbreaks
        if self.count_type_citizens("ACTIVE") > 50 and self.outbreak_now == 0:
            self.outbreaks += 1  # Total number of outbreak
            self.outbreak_now = 1  # Indicate if outbreak now

        if self.count_type_citizens("ACTIVE") < 50:
            self.outbreak_now = 0

    def save_data(self, save=True):

        if save is not False:
            df_end = self.datacollector.get_agent_vars_dataframe()
            name = self.path + 'run_values.csv'
            df_end.to_csv(name)
        else:
            pass

    def save_initial_values(self, save=False):

        if save is not False:
            dictionary_data = {
                'agent_density': self.agent_density,
                'agent_vision': self.agent_vision,
                'active_agent_density': self.active_agent_density,
                'cop_density': self.cop_density,
                'initial_legitimacy_l0': self.initial_legitimacy_l0,
                'inf_threshold': self.inf_threshold,
                'max_iter': self.max_iter,
                'max_jail_term': self.max_jail_term,
                'active_threshold_t': self.active_threshold_t,
                'k': self.k,
                'graph_type': self.graph_type,
            }

            name = self.path + 'ini_values.json'
            a_file = open(name, "w")
            json.dump(dictionary_data, a_file)
            a_file.close()
        else:
            pass

    def update_legitimacy(self):
        """
        Compute legitimacy (Epstein Working Paper 2001)
        """
        self.jailings_list[3] = self.jailings_list[2]
        self.jailings_list[2] = self.jailings_list[1]
        nb_active_and_quiescent = self.count_type_citizens(
            "ACTIVE") + self.count_type_citizens("QUIESCENT")
        self.jailings_list[1] = self.jailings_list[
            0] / nb_active_and_quiescent  # + 1 to avoid division by zero
        self.jailings_list[0] = 0

        sum_jailed = self.jailings_list[1] - self.jailings_list[
            2]**2 - self.jailings_list[3]**3
        self.legitimacy = self.initial_legitimacy_l0 * (1 - sum_jailed)
        if self.legitimacy <= 0:
            self.legitimacy = 0

    def get_model_reporters(self):
        """
        Dictionary of model reporter names and attributes/funcs
        Reference to functions instead of lambda are provided to handle multiprocessing case.
        Multiprocessing pool cannot directly handle lambda.
        """
        return {
            "QUIESCENT": compute_quiescent,
            "ACTIVE": compute_active,
            "JAILED": compute_active,
            "LEGITIMACY": compute_legitimacy,
            "INFLUENCERS": compute_influencers,
            "OUTBREAKS": compute_outbreaks
        }

    def get_agent_reporters(self):
        """
        Dictionary of agent reporter names and attributes/funcs
        """

        return {
            "Grievance": "grievance",
            "Hardship": "hardship",
            "State": "state",
            "Influencer": "influencer",
            "N_connections": "network_neighbors",
            "InfluencePi": "influence"
        }

    def count_type_citizens(self, state_req):
        """
        Helper method to count agents.
        Cop agents can't disappear from the map, so number of cops can be retrieved from model attributes.
        """
        count = 0
        for agent in self.citizen_list:
            if type(agent).__name__.upper() == 'COP':
                continue
            if agent.jail_sentence and state_req == 'JAILED':
                count += 1
            else:
                if agent.state is State.ACTIVE and state_req == 'ACTIVE':
                    count += 1
                elif agent.state == State.QUIESCENT and state_req == 'QUIESCENT':
                    count += 1
        return count

    def remove_agent_grid(self, agent):
        """
        Removes an agent from the grid.
        """
        self.grid.remove_agent(agent)

    def add_jailed(self, agent):
        """
        Un-jail an agent
        If the sentence of a jailed agent is over, place him back on a random empty cell in the grid.
        """

        if len(self.grid.empties) == 0:
            raise Exception("There are no empty cells.")

        new_pos = self.random.choice(list(self.grid.empties))
        self.grid.place_agent(agent, new_pos)

    def set_influencers(self, inf_threshold=150):
        """
        If an agent in the network is connected to a large amount of nodes, this agent can
        be considered an influencer and receives a corresponding tag.
        :param inf_threshold: determine how many connections a node needs to be considered an influencer
        """
        for agent in self.citizen_list:
            agent.set_influencer(
                len(list(self.G.neighbors(agent.network_node))), inf_threshold)
            if agent.influencer:
                self.influencer_list.append(agent)

    def remove_influencer(self):
        """
        Removes a random agent with the influencer tag from the grid.
        Gives manual control over the model to evaluate the influence of influencers.
        """
        if self.influencer_list:
            for i in range(len(self.influencer_list)):
                to_remove = self.random.choice(self.influencer_list)
                if to_remove.pos:  # Check if influencer is jailed.
                    self.grid.remove_agent(to_remove)
                self.influencer_list.remove(to_remove)
                self.citizen_list.remove(to_remove)
                self.schedule.remove(to_remove)
                self.G.remove_node(to_remove.network_node)

    def jail_influencer(self):
        """
        Jail a random agent with the influencer tag from the grid.
        Gives manual control over the model to evaluate the influence of influencers.
        """
        if self.influencer_list:
            for i in range(len(self.influencer_list)):
                arrestee = self.random.choice(self.influencer_list)
                if arrestee.state == State.JAILED:  # Check if influencer is jailed.
                    continue
                sentence = random.randint(1, self.max_jail_term)
                arrestee.jail_sentence = sentence
                arrestee.state = State.JAILED
                self.jailings_list[0] += 1
                if sentence > 0:
                    self.remove_agent_grid(arrestee)

                print(arrestee.unique_id,
                      ' was an influencer and has been jailed.')
Example #11
0
class KSUModel(Model):
    """A model simulating KSU student"""
    def __init__(self, n_students, n_active: int, width: int, height: int):
        self.running = True
        self.schedule = SimultaneousActivation(self)
        self.grid = SingleGrid(width, height, torus=False)
        self.n_students: int = n_students
        self._semester_gen = self._gen_semester_code()
        self.semester = next(self._semester_gen)
        self.ALL_GENDERS = gen_gender(self.n_students)

        # Majors
        self.F1SEQ1_MAJORS = gen_f1seq1_majors(self.n_students)
        self.major_switcher = MajorSwitch()

        # Adding Student to KSU Environment
        for i in range(self.n_students):
            # Percentage of student agent that will be active and the rest inactive
            per_active = n_active / 100

            if np.random.binomial(1, per_active):
                student = Student(i, self, self.ALL_GENDERS[i])
                student.majors.append(self.F1SEQ1_MAJORS[i])
            else:
                student = Student(i, self, self.ALL_GENDERS[i], False)
                student.majors.append("N/A")

            self.schedule.add(student)
            self.grid.position_agent(student)

        self.datacollector = DataCollector(
            agent_reporters={
                "GPA": "gpa",
                "ATTEMPTED_HRS": "attempted_hrs",
                "EARNED_HRS": "earned_hrs",
                "Major": "curr_major"
            })

    def step(self):
        self.datacollector.collect(self)
        self.schedule.step()

        try:
            self.update_semester()
            self.update_credit_hrs()
            self.update_gpa()
        except StopIteration:
            agent_gpa = self.datacollector.get_agent_vars_dataframe()
            agent_gpa.to_csv("gpa.csv", index=False)
            self.running = False

    def update_semester(self) -> None:
        self.semester = next(self._semester_gen)

    def update_credit_hrs(self):
        active_students: List[Student] = [
            student for student in self.schedule.agents if student.is_active
        ]
        n_active_students = len(active_students)

        earned_hrs = [
            round(earned_hr)
            for earned_hr in gen_credit_hrs(self.semester, n_active_students)
        ]
        attempted_hrs = [
            round(attempted_hr) for attempted_hr in gen_credit_hrs(
                self.semester, n_active_students, False)
        ]

        for i, student in enumerate(active_students):
            curr_major = tlz.last(student.majors)
            new_earned_hrs = student.earned_hrs
            new_attempted_hrs = student.attempted_hrs

            # Check if earned & attempted credit hours exists for current semester
            if earned_hrs:
                new_earned_hrs = 0 if curr_major == "E" else earned_hrs[i]
                new_attempted_hrs = 0 if curr_major == "E" else attempted_hrs[i]

            student.earnedhrs_history.append(new_earned_hrs)
            student.attemptedhrs_history.append(new_attempted_hrs)

    def update_gpa(self):
        active_students: List[Student] = [
            student for student in self.schedule.agents if student.is_active
        ]
        n_active_students = len(active_students)

        gpa_distr = [
            round(earned_hr)
            for earned_hr in gen_credit_hrs(self.semester, n_active_students)
        ]

        for i, student in enumerate(active_students):
            curr_major = tlz.last(student.majors)
            new_gpa = student.gpa

            # Check if gpa exists for current semester
            if gpa_distr:
                new_gpa = 0 if curr_major == "E" else gpa_distr[i]

            student.gpa_history.append(new_gpa)

    @staticmethod
    def _gen_semester_code():
        semester_pos = [x for x in range(1, 7)]
        semester_season = product(semester_pos, ("F", "F", "S", "S"))
        seq = cycle([1, 2])

        for semester in semester_season:
            pos, season = semester
            yield f"{season}{pos}SEQ{next(seq)}"