Beispiel #1
0
class MoneyModel(Model):
    """A model with some number of agents."""
    def __init__(self, N, width, height):
        self.num_agents = N
        self.grid = MultiGrid(width, height, True)
        self.schedule = RandomActivation(self)

        self.cities = []

        self.running = True

        # Create agents
        for i in range(self.num_agents):
            a = MoneyAgent(i, self)
            self.schedule.add(a)
            # Add the agent to a random grid cell
            x = self.random.randrange(self.grid.width)
            y = self.random.randrange(self.grid.height)
            self.grid.place_agent(a, (x, y))

        self.datacollector = DataCollector(
            model_reporters={"Gini": compute_gini},  # `compute_gini` defined above
            agent_reporters={"Wealth": "wealth"})

    def establish_cities(self):
        for contents, x, y in self.grid.coord_iter():
            if len(contents) > 2:

                for agent in contents:
                    agent.in_city = True

    def step(self):
        self.establish_cities()
        self.datacollector.collect(self)
        self.schedule.step()
Beispiel #2
0
class Kvecinos(Model):
    def __init__(self, height, width, initial_population, n_clases, k):
        super().__init__()
        self.height = height
        self.width = width
        self.initial_population = initial_population
        # N_class será el número de colores
        self.n_clases = n_clases
        self.k = k
        self.unique_id = 0

        # Creación del planificador y del grid
        self.schedule = RandomActivation(self)
        self.grid = MultiGrid(self.width, self.height, torus=False)

        self.colors = {
            0: "red",
            1: "blue",
            2: "green0",
            3: "violet",
            4: "cyan",
            5: "orange"
        }

        self.clases = []

        self.setup()

        self.running = True

    def setup(self):
        for agent, x, y in self.grid.coord_iter():
            patch = Cell(self.unique_id, self, (x, y), "black")
            self.unique_id += 1
            self.grid.place_agent(patch, (x, y))
            self.schedule.add(patch)

        for i in range(self.initial_population):
            x = random.randint(0, self.width - 1)
            y = random.randint(0, self.height - 1)
            pos = (x, y)
            color = random.randint(0, self.n_clases - 1)

            cell = self.grid.get_cell_list_contents(pos)[0]
            self.clases.append(cell)
            self.clases[i].color = self.colors[color]

            individual = Individual(self.unique_id, self, pos,
                                    self.colors[color])
            self.unique_id += 1
            self.grid.place_agent(individual, pos)

    def step(self):
        self.schedule.step()
Beispiel #3
0
class GD_Hunter(Model):

    def __init__(self, initial_population, hunter_energy_consumption, hunter_energy ):
        self.description = "An example model built on mesa_gd. It uses the Generalized Darwinism framework to show how the hunting habit of searching for preys(turkeys here) only in new locations becomes dominant as it gives a slight advantage to hunters."
        # Height and Width of the environment grid
        self.height = 50
        self.width = 50
        self.initial_population = initial_population
        self.schedule = RandomActivationByBreed(self)
        self.grid = MultiGrid(self.height, self.width, torus=False)
        self.hunter_energy_consumption = hunter_energy_consumption
        self.hunter_energy = hunter_energy
        # Create initial resource distribution in the environment
        # Create turkeys distribution
        for _, x, y in self.grid.coord_iter(): # For each cell, ignore its contents, gets position
            max_turkey = self.random.randrange(0,5) # Defines a maximum amount of turkey that each location will carry, before it is consumed
            turkey = Turkey((x, y), self, max_turkey) # Creates turkey groups with them maximum amount of turkeys defined for that location
            self.grid.place_agent(turkey, (x, y)) # Place the turkey
            self.schedule.add(turkey) # Add the turkeys to the schedule

        # Create interactor:
        for i in range(self.initial_population):
            x = self.random.randrange(self.width)
            y = self.random.randrange(self.height)
            replicators_list = ['rv_moveany_directions', 'rv_movefwd_directions']
            replicators=[]
            replicators.append (self.random.choice(replicators_list)) # Instruction to the researcher: List here the replicator variations as function names
            age = self.random.randint (0, 45) # Creates an initial population with an age distribution
            replicators_dictionary = {
                  "rv_moveany_directions": rv_moveany_directions,
                  "rv_movefwd_directions": rv_movefwd_directions
                }
            hunter = Hunter((x,y), self, self.hunter_energy, self.hunter_energy_consumption, age, replicators, replicators_dictionary) # Create the hunters self, pos, model, hunter_energy, hunter_energy_consumption, age
            self.grid.place_agent(hunter, (x, y)) # Place the hunters
            self.schedule.add(hunter) # Add the hunter to the schedule

        self.running = True
        self.datacollector = DataCollector({"Population": lambda m: m.schedule.get_breed_count(Hunter), "Frequency of Replicator": replicator_frequency})
        self.datacollector.collect(self)

    def step(self):
        self.schedule.step()
        self.datacollector.collect(self)

    def run_model(self, step_count=50):
        for i in range(step_count):
            self.step()
Beispiel #4
0
class SegregationModel(Model):
    def __init__(self, number_of_agents, width, height, happiness_threshold,
                 minority_rate):
        self.num_agents = number_of_agents
        self.grid = MultiGrid(width, height, False)
        self.schedule = RandomActivation(self)

        ## Create Agents
        number_of_minority = round(number_of_agents * (minority_rate))
        for i in range(number_of_agents):
            if (i + 1) <= number_of_minority:
                ethnicity = 1
            else:
                ethnicity = 2
            a = SegregationAgent(i, ethnicity, happiness_threshold, self)
            self.schedule.add(a)

            # place agent on grid
            # x = self.random.randrange(self.grid.width)
            # y = self.random.randrange(self.grid.height)
            empty_place = self.grid.find_empty()
            self.grid.place_agent(a, empty_place)
            logger.info("Agent " + str(i) + " placed in " + str(empty_place))

        self.datacollector = DataCollector(
            agent_reporters={"Happiness": "happy"},
            model_reporters={"Overall_happiness": overall_happiness},
        )

    def plot_grid(self):
        ethnicities = np.zeros((self.grid.width, self.grid.height))

        for cell in self.grid.coord_iter():
            cell_content, x, y = cell
            agent_present = len(cell_content)
            if agent_present > 0:
                cell_content = list(cell_content)
                ethnicities[x][y] = cell_content[0].ethnicity

        plt.imshow(ethnicities, interpolation="nearest")
        plt.colorbar()
        plt.show()

    def step(self):
        self.datacollector.collect(self)
        self.schedule.step()
        self.plot_grid()
Beispiel #5
0
class MoneyModel(Model):
    def __init__(self, number_of_agents, width, height):
        self.num_agents = number_of_agents
        self.grid = MultiGrid(width, height, True)
        self.schedule = RandomActivation(self)

        # Create agents
        for i in range(number_of_agents):
            a = MoneyAgent(i, self)
            self.schedule.add(a)

            ## place agent on grid
            x = self.random.randrange(self.grid.width)
            y = self.random.randrange(self.grid.height)
            self.grid.place_agent(a, (x, y))

            # start datacollector
            self.datacollector = DataCollector(
                model_reporters={"Gini": compute_gini},
                agent_reporters={"Wealth": "wealth"},
            )

    def plot_grid(self):
        agent_counts = np.zeros((self.grid.width, model.grid.height))

        for cell in self.grid.coord_iter():
            cell_content, x, y = cell
            agent_count = len(cell_content)
            agent_counts[x][y] = agent_count

        plt.imshow(agent_counts, interpolation="nearest")
        plt.colorbar()
        plt.show()

    def step(self):
        # Advance the model by one step
        self.datacollector.collect(self)
        self.schedule.step()
        self.plot_grid()

    def check_agent_status(self):
        for i in range(len(self.schedule.agents)):
            id = self.schedule.agents[i].unique_id
            wealth = self.schedule.agents[i].wealth
            logger.info("Agent " + str(id) + " has money: " + str(wealth))
Beispiel #6
0
def main():
    s = Simulation(processes=1)
    grid = MultiGrid(50, 50, True)

    # build sugar and spice
    sugar_distribution = pylab.genfromtxt("sugar-map.txt")
    spice_distribution = sugar_distribution.T
    sugars = []
    spices = []
    for _, x, y in grid.coord_iter():
        max_sugar = sugar_distribution[x, y]
        max_spice = spice_distribution[x, y]
        sugar = SugarPatch((x, y), max_sugar)
        spice = SpicePatch((x, y), max_spice)
        sugars.append(sugar)
        spices.append(spice)
        grid.place_agent(sugar, (x, y))
        grid.place_agent(spice, (x, y))

    # build agents
    agents = s.build_agents(SsAgent, 'SsAgent', 100,
                            parameters={'grid': grid})

    # prices = []
    for r in range(100):
        s.advance_round(r)
        for sugar in sugars:
            sugar.step()
        for spice in spices:
            spice.step()
        agents.move()
        agents.eat()
        print(',', len(agents.trade_with_neighbors()))

        agents.trade()
        agents.agg_log(possessions=['sugar', 'spice'])

    s.finalize()
Beispiel #7
0
class WolfSheepPredation(Model):
    '''
    Wolf-Sheep Predation Model
    '''

    verbose = False  # Print-monitoring

    def __init__(self, height=20, width=20,
                 initial_sheep=150, initial_wolves=10,
                 sheep_reproduce=1, wolf_reproduce=40,
                 wolf_gain_from_food=15,
                 grass_regrowth_time=20, sheep_gain_from_food=4):
        '''
        Create a new Wolf-Sheep model with the given parameters.

        Args:
            initial_sheep: Number of sheep to start with
            initial_wolves: Number of wolves to start with
            sheep_reproduce: Энергия для генерации новой овечки, забираемая у родителей 
            wolf_reproduce: --//-- волчонка --//--
            wolf_gain_from_food: Energy a wolf gains from eating a sheep
            grass_regrowth_time: How long it takes for a grass patch to regrow
                                 once it is eaten
            sheep_gain_from_food: Energy sheep gain from grass
        '''

        # Set parameters
        self.height = height
        self.width = width
        self.initial_sheep = initial_sheep
        self.initial_wolves = initial_wolves
        self.sheep_reproduce = sheep_reproduce
        self.wolf_reproduce = wolf_reproduce
        self.wolf_gain_from_food = wolf_gain_from_food
        self.grass_regrowth_time = grass_regrowth_time
        self.sheep_gain_from_food = sheep_gain_from_food

        self.schedule = RandomActivationByBreed(self)
        self.grid = MultiGrid(self.height, self.width, torus=True)
        self.datacollector = DataCollector(
            {"Wolves": lambda m: m.schedule.get_breed_count(Wolf),
             "Sheep": lambda m: m.schedule.get_breed_count(Sheep)})

        # Create sheep:
        for i in range(self.initial_sheep):
            x = random.randrange(self.width)
            y = random.randrange(self.height)
            energy = random.randrange(2 * self.sheep_gain_from_food)
            sheep = Sheep((x, y), self, True, energy)
            self.grid.place_agent(sheep, (x, y))
            self.schedule.add(sheep)

        # Create wolves
        for i in range(self.initial_wolves):
            x = random.randrange(self.width)
            y = random.randrange(self.height)
            energy = random.randrange(2 * self.wolf_gain_from_food)
            wolf = Wolf((x, y), self, True, energy)
            self.grid.place_agent(wolf, (x, y))
            self.schedule.add(wolf)

        # Create grass patches
        for agent, x, y in self.grid.coord_iter():

            fully_grown = random.choice([True, False])

            if fully_grown:
                countdown = self.grass_regrowth_time
            else:
                countdown = random.randrange(self.grass_regrowth_time)

            patch = GrassPatch((x, y), self, fully_grown, countdown)
            self.grid.place_agent(patch, (x, y))
            self.schedule.add(patch)

        self.running = True

    def step(self):
        self.schedule.step()
        self.datacollector.collect(self)
        if self.verbose:
            print([self.schedule.time,
                   self.schedule.get_breed_count(Wolf),
                   self.schedule.get_breed_count(Sheep)])

    def run_model(self, step_count=200):

        if self.verbose:
            print('Initial number wolves: ',
                  self.schedule.get_breed_count(Wolf))
            print('Initial number sheep: ',
                  self.schedule.get_breed_count(Sheep))

        for i in range(step_count):
            self.step()

        if self.verbose:
            print('')
            print('Final number wolves: ',
                  self.schedule.get_breed_count(Wolf))
            print('Final number sheep: ',
                  self.schedule.get_breed_count(Sheep))
class ExplorationArea(Model):
    def __init__(
            self,
            nrobots,
            wifi_range,
            radar_radius=6,
            alpha=8.175,
            gamma=0.65,
            inj_pri=0,
            ninjured=None,
            ncells=None,
            obstacles_dist=None,
            load_file=None,
            dump_datas=True,  # enable data collection
            alpha_variation=False,  # record datas for alpha variation studies
            alpha_csv=alpha_csv,  # aggregate datas
            alpha_step_csv=alpha_step_csv,  # single step datas
            gamma_variation=False,  # record datas for gamma variation studies
            gamma_csv=gamma_csv,
            optimization_task=False,  # enable a small part of data collection for optimization task
            time_csv=number_of_steps_csv,
            robot_status_csv=robot_status_csv):

        # checking params consistency
        if not load_file and (not ncells or not obstacles_dist
                              or not ninjured):
            print("Invalid params")
            sys.exit(-1)

        # used in server start
        self.running = True
        self.nrobots = nrobots
        self.radar_radius = radar_radius
        self.ncells = ncells
        self.obstacles_dist = obstacles_dist
        self.wifi_range = wifi_range
        self.alpha = alpha
        self.gamma = gamma
        self.ninjured = ninjured
        self.inj_pri = inj_pri
        self.dump_datas = dump_datas
        self.optimization_task = optimization_task
        self.frontier = set()
        self.broken_beans = 0
        # Data collection tools
        if self.dump_datas:
            # it represents the sum of the difficulties of every cell
            self.total_difficulty = 0

            self.dc_robot_status = DataCollector({
                "idling":
                lambda m: self.get_number_robots_status(m, "idling"),
                "travelling":
                lambda m: self.get_number_robots_status(m, "travelling"),
                "exploring":
                lambda m: self.get_number_robots_status(m, "exploring"),
                "deploying_bean":
                lambda m: self.get_number_robots_status(m, "deploying_bean"),
                "step":
                lambda m: self.get_step(m)
            })
            self.time_csv = time_csv
            self.robot_status_csv = robot_status_csv

        if self.optimization_task:
            self.total_idling_time = 0

        self.alpha_variation = alpha_variation
        self.gamma_variation = gamma_variation
        if self.alpha_variation:
            self.costs_each_path = list()
            self.alpha_csv = alpha_csv
            self.alpha_step = dict()
            self.alpha_step_csv = alpha_step_csv
        if self.gamma_variation:
            self.gamma_df = pd.DataFrame(columns=["step", "mean", "std"])
            self.gamma_csv = gamma_csv

        self.schedule = RandomActivation(self)
        # unique counter for agents
        self.agent_counter = 1
        self.nobstacle = 0
        # graph of seen cells
        self.seen_graph = nx.DiGraph()

        rnd.seed()

        # place a cell agent for store data and visualization on each cell of the grid
        # if map is not taken from file, create it
        if load_file == None:
            self.grid = MultiGrid(ncells + 2, ncells + 2, torus=False)
            for i in self.grid.coord_iter():
                if i[1] != 0 and i[2] != 0 and i[1] != self.ncells + 1 and i[
                        2] != self.ncells + 1:
                    rand = np.random.random_sample()
                    obstacle = True if rand < self.obstacles_dist else False
                    # if obstacle
                    if obstacle:
                        self.nobstacle += 1
                        difficulty = math.inf
                        explored = -1
                        priority = 0
                        utility = -math.inf
                    # if free
                    else:
                        difficulty = np.random.randint(low=1, high=13)
                        if self.dump_datas:
                            self.total_difficulty += difficulty
                        explored = 0
                        priority = 0
                        utility = 1.0
                # if contour cell
                else:
                    difficulty = np.random.randint(low=1, high=13)
                    explored = -2
                    priority = -math.inf
                    utility = -math.inf
                # place the agent in the grid
                a = Cell(self.agent_counter, self, i[1:], difficulty, explored,
                         priority, utility)
                self.grid.place_agent(a, i[1:])
                self.agent_counter += 1

            # create injured agents
            valid_coord = []
            for i in self.grid.coord_iter():
                cell = [
                    e for e in self.grid.get_cell_list_contents(i[1:])
                    if isinstance(e, Cell)
                ][0]
                if cell.explored == 0:
                    valid_coord.append(cell.pos)
            for i in range(0, ninjured):
                inj_index = rnd.choice(valid_coord)
                a = Injured(self.agent_counter, self, inj_index)
                self.schedule.add(a)
                self.grid.place_agent(a, inj_index)
                self.agent_counter += 1
        else:
            # load map from file
            try:
                with open(load_file, 'r') as f:
                    file = f.read()
            except:
                print("file not found")
                sys.exit(-1)
            exported_map = literal_eval(file)
            self.ncells = int(math.sqrt(len(exported_map["Cell"].keys()))) - 2
            self.grid = MultiGrid(self.ncells + 2,
                                  self.ncells + 2,
                                  torus=False)
            for index in exported_map["Cell"].keys():
                cell = exported_map["Cell"][index]
                difficulty = cell[2]
                explored = cell[3]
                priority = cell[4]
                utility = cell[5]
                if difficulty == "inf":
                    difficulty = math.inf
                if priority == "-inf":
                    priority = -math.inf
                if utility == "-inf":
                    utility = -math.inf
                if explored == -1:
                    self.nobstacle += 1
                if self.dump_datas and utility == 1:
                    self.total_difficulty += difficulty
                a = Cell(self.agent_counter, self, index, difficulty, explored,
                         priority, utility)
                self.grid.place_agent(a, index)
                self.agent_counter += 1

            for index in exported_map["Injured"].keys():
                a = Injured(self.agent_counter, self, index)
                self.schedule.add(a)
                self.grid.place_agent(a, index)
                self.agent_counter += 1

        # create robotic agents
        row = 0
        starting_coord = []
        # data collection number of beans requested
        if self.dump_datas:
            self.deployed_beans_at_start = 0
        # generating the list for the starting position of robots
        for c in range(self.grid.width):
            # take the agent cell
            cell = [
                e for e in self.grid.get_cell_list_contents(tuple([c, row]))
                if isinstance(e, Cell)
            ][0]
            if cell.explored != -1:
                starting_coord.append(c)
        for i in range(0, self.nrobots):
            column = rnd.choice(starting_coord)
            a = Robot(self.agent_counter, self, tuple([column, row]),
                      self.radar_radius)
            self.schedule.add(a)
            self.grid.place_agent(a, (column, row))
            self.agent_counter += 1
            # create initial frontier: add cell in front of the robot if valid and not obstacles
            cell = [
                e for e in self.grid.get_cell_list_contents(
                    tuple([column, row + 1])) if isinstance(e, Cell)
            ][0]
            if cell.explored == 0:
                self.frontier.add(tuple([column, row + 1]))
            try:
                cell = [
                    e for e in self.grid.get_cell_list_contents(
                        tuple([column + 1, row + 1])) if isinstance(e, Cell)
                ][0]
                if cell.explored == 0:
                    self.frontier.add(tuple([column + 1, row + 1]))
            except:
                pass
            try:
                cell = [
                    e for e in self.grid.get_cell_list_contents(
                        tuple([column - 1, row + 1])) if isinstance(e, Cell)
                ][0]
                if cell.explored == 0:
                    self.frontier.add(tuple([column - 1, row + 1]))
            except:
                pass

            cell = [
                e
                for e in self.grid.get_cell_list_contents(tuple([column, row]))
                if isinstance(e, Cell)
            ][0]
            # in the cell where some robots are deployed, only one bean is deployed
            if not cell.wifi_bean:
                cell.wifi_bean = True
                for index in self.grid.get_neighborhood(
                        cell.pos,
                        "moore",
                        include_center=False,
                        radius=self.wifi_range):
                    cell = [
                        e for e in self.grid.get_cell_list_contents(index)
                        if isinstance(e, Cell)
                    ][0]
                    cell.wifi_covered = True
                if self.dump_datas:
                    self.deployed_beans_at_start += 1

    # what the model does at each time step
    def step(self):

        # data collection for alpha variation
        if self.alpha_variation:
            sim_step = self.get_step(self)
            self.alpha_step[sim_step] = list()

        # call step function for all of the robots in random order
        self.schedule.step()

        if self.dump_datas:
            self.dc_robot_status.collect(self)
        if self.optimization_task:
            self.total_idling_time += self.get_number_robots_status(
                self, "idling")
        if self.gamma_variation:
            distances = self.compute_robot_distances(self)
            self.gamma_df = self.gamma_df.append(
                {
                    "step": self.get_step(self),
                    "mean": distances[0],
                    "std": distances[1]
                },
                ignore_index=True,
                sort=False)
        # if all seen cells have benn explored, stop the simulation
        # we do this so if there are unreachable cells, the cannot be seen, so the simulation stops anyway
        stop_exploration_done = True
        for node in self.seen_graph.nodes():
            cell = [
                obj for obj in self.grid.get_cell_list_contents(node)
                if isinstance(obj, Cell)
            ][0]
            if cell.explored == 0 or cell.explored == 1:
                stop_exploration_done = False
        stop_no_robots = False
        if len([x for x in self.schedule.agents if isinstance(x, Robot)]) == 0:
            stop_no_robots = True
        stop = stop_exploration_done or stop_no_robots

        if stop:
            print("Simultation ended")
            # Data collection
            if self.dump_datas:
                df = pd.read_csv(self.time_csv)
                df = df.append(
                    {
                        "nrobots": self.nrobots,
                        "ncells": self.ncells,
                        "steps": self.schedule.steps,
                        "total_difficulty": self.total_difficulty,
                        "beans_deployed": self.get_number_bean_deployed(self)
                    },
                    ignore_index=True)
                df.to_csv(self.time_csv, index=False)

                df_robots_status = self.dc_robot_status.get_model_vars_dataframe(
                )
                df = pd.read_csv(self.robot_status_csv)
                if len(df["sim_id"]) == 0:
                    df_robots_status["sim_id"] = 0
                else:
                    df_robots_status["sim_id"] = df["sim_id"][df.index[-1]] + 1
                df = df.append(df_robots_status, ignore_index=True, sort=False)
                df.to_csv(self.robot_status_csv, index=False)

            if self.alpha_variation:
                mean = round(np.mean(self.costs_each_path), 3)
                std = round(np.std(self.costs_each_path), 3)
                df = pd.read_csv(self.alpha_csv)
                df = df.append(
                    {
                        "nrobots": self.nrobots,
                        "radar_radius": self.radar_radius,
                        "alpha": self.alpha,
                        "gamma": self.gamma,
                        "mean": mean,
                        "std": std
                    },
                    ignore_index=True)
                df.to_csv(self.alpha_csv, index=False)

                tmp_df = pd.DataFrame(columns=["step", "cost"])
                for s, costs in zip(self.alpha_step.keys(),
                                    self.alpha_step.values()):
                    if not costs:
                        tmp_df = tmp_df.append({
                            "step": s,
                            "cost": -1
                        },
                                               ignore_index=True,
                                               sort=False)
                        continue
                    for c in costs:
                        tmp_df = tmp_df.append({
                            "step": s,
                            "cost": c
                        },
                                               ignore_index=True,
                                               sort=False)
                df = pd.read_csv(self.alpha_step_csv)
                if len(df["sim_id"]) == 0:
                    tmp_df["sim_id"] = 0
                else:
                    tmp_df["sim_id"] = df["sim_id"][df.index[-1]] + 1
                tmp_df["nrobots"] = self.nrobots
                tmp_df["radar_radius"] = self.radar_radius
                tmp_df["alpha"] = self.alpha
                tmp_df["gamma"] = self.gamma
                df = df.append(tmp_df, ignore_index=True, sort=False)
                df.to_csv(self.alpha_step_csv, index=False)

            if self.gamma_variation:
                df = pd.read_csv(self.gamma_csv)
                if len(df["sim_id"]) == 0:
                    self.gamma_df["sim_id"] = 0
                else:
                    self.gamma_df["sim_id"] = df["sim_id"][df.index[-1]] + 1
                self.gamma_df["nrobots"] = self.nrobots
                self.gamma_df["radar_radius"] = self.radar_radius
                self.gamma_df["alpha"] = self.alpha
                self.gamma_df["gamma"] = self.gamma
                df = df.append(self.gamma_df, ignore_index=True, sort=False)
                df.to_csv(self.gamma_csv, index=False)

            self.running = False

    def run_model(self):
        while (True):
            # search for unexplored cells
            stop = True
            for node in self.seen_graph.nodes():
                cell = [
                    obj for obj in self.grid.get_cell_list_contents(node)
                    if isinstance(obj, Cell)
                ][0]
                if cell.explored == 0 or cell.explored == 1:
                    stop = False
            # if all seen cells have benn explored, stop the simulation
            # we do this so if there are unreachable cells, the cannot be seen, so the simulation stops anyway
            if stop:
                self.running = False
                break
            else:
                self.step()

    # Data collection utilities
    @staticmethod
    def get_step(m):
        return m.schedule.steps

    # these two should go faster since cells are not in the scheduler anymore
    @staticmethod
    def get_number_robots_status(m, status):
        status_value = {
            "idling": 0,
            "travelling": 1,
            "exploring": 2,
            "deploying_bean": 3
        }
        return len([
            x for x in m.schedule.agents
            if isinstance(x, Robot) and x.status == status_value[status]
        ])

    @staticmethod
    def get_number_bean_deployed(m):
        return sum([
            x.number_bean_deployed
            for x in m.schedule.agents if isinstance(x, Robot)
        ]) + m.deployed_beans_at_start

    # function for gamma variation
    @staticmethod
    def compute_robot_distances(m):
        nrobots = m.nrobots
        T_up = np.full(
            (nrobots, nrobots),
            0.0)  # if it's only zero, numpy represents only integers
        # didn't dig deep in numpy doc but it looks like it handles triangualr matrices as "normal" matrices, so
        # i just initilize a full matrix and then i'll use it as a triangular.
        robots = [x for x in m.schedule.agents if isinstance(x, Robot)]
        # the order of the robots in robots can change from step to step (due to the random scheduler),
        # This shouldn't create any type of problem, but to avoid a lot of problems with indexes later on
        # we sort them basing on the unique_id
        robots.sort(key=lambda x: x.unique_id)
        # I need the lowest id to shift back the ids to fit the matrices coordinations
        lowest_id = robots[0].unique_id
        for r in robots:
            matrix_id_row = r.unique_id - lowest_id
            # the distance of a robot to itself is zero by definition
            y1, x1 = r.pos
            for i in range(matrix_id_row + 1, nrobots):
                r2 = robots[i]  # i can do this because they are sorted
                y2, x2 = r2.pos
                T_up[matrix_id_row][i] = distance.euclidean([x1, y1], [x2, y2])
        mean_dist_robots = list()
        # the robot 0 has only the rows
        mean_robot_zero = sum(T_up[0, 1:nrobots])
        mean_dist_robots.append(mean_robot_zero)
        for i in range(1, nrobots -
                       1):  # the last row has no values, i iters the rows
            mean_robot = (sum(T_up[0:i, i]) +
                          sum(T_up[i, i + 1:nrobots])) / (nrobots - 1)
            mean_dist_robots.append(mean_robot)
        # last robot has only the columns
        mean_last_robot = sum(T_up[0:nrobots - 1, nrobots - 1])
        mean_dist_robots.append(mean_last_robot)
        return tuple([
            round(np.mean(mean_dist_robots), 3),
            round(np.std(mean_dist_robots), 3)
        ])
Beispiel #9
0
class CancerModel(Model):
    def xprint(self, *args):
        logger.info("CANCER MODEL:  " + " ".join(map(str, args)))

    def __init__(self, cure_agent_type, config):

        self.xprint("STARTING SIMULATION !!!")
        self.counter = 0
        self.decay_number = 0
        self.lifetime_counter = 0
        self.metastasis_score = 0
        eat_values = {CancerCell: 1, HealthyCell: -1, CancerStemCell: 5}
        assert (issubclass(cure_agent_type, CureAgent))

        agent_memory_range = config["Agent"]["memory_limit"]
        agent_memory_type = config["Agent"]["memory_type"]
        radoznalost = config["Agent"]["curiosity"]
        turn_off_modifiers = config["Tumor"]["turn_off_modifiers"]
        CC_mutation_probability = config["Tumor"]["mutation_probability"]
        is_tumor_growing = config["Tumor"]["is_growing"]
        tumor_movement_stopping_range = config["Tumor"][
            "movement_stopping_range"]
        steps_before_mutation = config["Model"]["steps_before_mutation"]
        self.SAMPLE_i = config["Model"]["sample_i"]

        self.cure_number = config["Model"]["NA_number"]
        self.probabilites_ranges = config["Agent"]["probabilities_limits"]
        self.modifier_fraction = config["Tumor"]["modifier_fraction"]
        self.mode = config["Simulation"]["mode"]
        fname = "xxx" if self.mode == "learning" else config["Simulation"][
            "fname"]

        self.MUTATION_PERCENTAGE = config["Model"]["mutation_percentage"]
        tumor_growth_probability = config["Tumor"]["growth_probability"]
        cancer_cell_number = config["Model"]["CC_number"]

        #DATA COLLECTION

        self.datacollector = DataCollector(
            model_reporters={
                "FitnessFunction": fitness_funkcija,
                "AverageSpeed": speed_avg,
                "AverageMemoryCapacity": memory_size_all_avg,
                "PopulationHeterogenity": population_heterogenity,
                "MutationAmount": mutation_amount,
                "CancerStemCell Number": CSC_number,
                "CSC Specialized Agents": CSC_specialized_agents,
                "CancerHeterogenity1": cancer_heterogenity_1,
                "CancerHeterogenity2": cancer_heterogenity_2,
                "CC_Number": CC_number,
                "HealthyCell_Number": HC_number,
                "MetastasisScore": "metastasis_score",
                "CancerSize": cancer_size,
                "TotalTumorResiliance": overall_cancer_resiliance,
                "TumorResiliance_Pi": cancer_resiliance_Pi,
                "TumorResiliance_Pd": cancer_resiliance_Pd,
                "TumorResiliance_Pa": cancer_resiliance_Pa,
                "TumorResiliance_Pk": cancer_resiliance_Pk,
                "TumorResiliance_Psd": cancer_resiliance_Psd,
                "NumberOfMutatedCells": mutated_CCs_num,
                "TumorCoverage": tumor_coverage,
                "AveragePd": average_Pd,
                "AveragePa": average_Pa,
                "AveragePi": average_Pi,
                "AveragePk": average_Pk,
                "AveragePsd": average_Psd,
                #      "PopulationClusters":cluster_counts
            },
            agent_reporters={
                "Pi": get_Pi,
                "Pa": get_Pa,
                "Pd": get_Pd,
                "speed": get_speed,
                "Psd": get_Psd,
                "Pk": get_Pk,
                "memory_size": get_memory_size,
                "type": get_agent_type
            })
        grid_size = math.ceil(math.sqrt(cancer_cell_number * 4))

        self.STEPS_BEFORE_MUTATION = steps_before_mutation
        self.grid = MultiGrid(grid_size, grid_size, False)
        self.NUM_OF_INJECTION_POINTS = config["Model"]["injection_points"]
        #        self.speeds = list(range(1,grid_size//2))
        self.speeds = [
            1
        ]  #TODO ovo mozda bolje? ne znam da li treba u config fajlu?
        poss = self.generate_cancer_cell_positions(grid_size,
                                                   cancer_cell_number)
        num_CSC = math.ceil(percentage(1, cancer_cell_number))
        pos_CSC = [self.random.choice(poss) for i in range(num_CSC)]

        #ACTIVATE SIMULATION
        self.schedule = RandomActivation(self)
        self.running = True

        #PLACE CANCER CELLS

        for i in range(cancer_cell_number):
            pos = poss[i]
            has_modifiers = False if ((i < (
                (1 - self.modifier_fraction) * cancer_cell_number))
                                      or turn_off_modifiers is True
                                      ) else True  #10 % will be with modifiers
            c = CancerStemCell("CANCER_STEM_CELL-"+str(uuid.uuid4()),self,value = eat_values[CancerStemCell],has_modifiers=has_modifiers,mutation_probability=CC_mutation_probability,grows=is_tumor_growing,growth_probability=tumor_growth_probability) \
                if pos in pos_CSC else CancerCell("CANCER_CELL-"+str(uuid.uuid4()),self,value=eat_values[CancerCell],has_modifiers=has_modifiers,mutation_probability=CC_mutation_probability,grows=is_tumor_growing,growth_probability=tumor_growth_probability)
            self.grid.place_agent(c, pos)
            self.schedule.add(c)

        #PLACE HEALTHY CELLS

        for (i, (contents, x, y)) in enumerate(self.grid.coord_iter()):
            nbrs = self.grid.get_neighborhood([x, y], moore=True)
            second_nbrs = []
            for nbr in nbrs:
                second_nbrs += self.grid.get_neighborhood(nbr, moore=True)
            nbrs = nbrs + second_nbrs
            nbr_contents = self.grid.get_cell_list_contents(nbrs)
            nbr_CCs = [
                nbr for nbr in nbr_contents if isinstance(nbr, CancerCell)
            ]

            if not contents and len(nbr_CCs) == 0:
                c = HealthyCell(uuid.uuid4(), self, eat_values[HealthyCell])
                self.grid.place_agent(c, (x, y))
                self.schedule.add(c)

        if self.mode == "simulation":
            self.duplicate_mutate_or_kill = self.simulation_mode_function
            self.decay_probability = 0.04  #MAGIC NUMBER
            self.read_nanoagents_from_file(
                fname=fname,
                cure_agent_type=cure_agent_type,
                tumor_movement_stopping_range=tumor_movement_stopping_range,
                agent_memory_range=agent_memory_range,
                radoznalost=radoznalost)

        elif self.mode == "learning":
            self.decay_probability = 0
            self.make_nanoagents_from_scratch(cure_agent_type, radoznalost,
                                              agent_memory_type,
                                              agent_memory_range,
                                              tumor_movement_stopping_range,
                                              grid_size)
        else:
            assert ("False")

    def get_random_positions_on_empty_cells(self):
        """Gets the N random currently empty positions on the grid"""
        #GETTING EMPTY POSITIONS (for placing nano agents)
        empty_cells = [(x, y) for (i, (contents, x,
                                       y)) in enumerate(self.grid.coord_iter())
                       if not contents]
        positions = [
            self.random.choice(empty_cells)
            for i in range(self.NUM_OF_INJECTION_POINTS)
        ]
        self.xprint("The 5 random empty positions are %s" % positions)
        return positions

    def inject_nanoagents(self):
        """Injects the nanoagents, they will be activated slowly after"""
        from itertools import cycle

        positions = cycle(self.get_random_positions_on_empty_cells())
        for a in self.agents:
            self.grid.place_agent(a, next(positions))
        self.agents_iterator = iter(self.agents)

    def activate_next_batch_of_agents(self):
        agents_to_be_placed_at_each_steps = round(len(self.agents) /
                                                  14)  #MAGIC NUMBER
        for i in range(agents_to_be_placed_at_each_steps):
            self.schedule.add(next(self.agents_iterator))

    def read_nanoagents_from_file(self, fname, cure_agent_type, radoznalost,
                                  agent_memory_range,
                                  tumor_movement_stopping_range):
        import pandas as pd
        self.xprint("Reading nanoagents from file")
        df = pd.read_csv(fname)
        self.agents = []

        for i, row in df.iterrows():
            a = cure_agent_type(
                uuid.uuid4(),
                self,
                speeds=self.speeds,
                radoznalost=radoznalost,
                memory_type=row.memory_size,
                memory_range=agent_memory_range,
                tumor_movement_stopping_range=tumor_movement_stopping_range,
                probabilities_ranges=self.probabilites_ranges)
            a.Pi = row.Pi
            a.Pa = row.Pa
            a.Pd = row.Pd
            a.memory_size = row.memory_size
            a.memorija = FixSizeOrderedDict(max=row.memory_size)
            a.tumor_movement_stopping_rate = row.tumor_movement_stopping_rate
            self.agents.append(a)

    def make_nanoagents_from_scratch(self, cure_agent_type, radoznalost,
                                     agent_memory_type, agent_memory_range,
                                     tumor_movement_stopping_range, grid_size):
        from itertools import cycle
        self.xprint("Making nanoagents from scratch")
        positions = cycle(self.get_random_positions_on_empty_cells())
        for i in range(self.cure_number):
            pos = next(positions)
            self.xprint(pos)
            a = cure_agent_type(
                uuid.uuid4(),
                self,
                speeds=self.speeds,
                radoznalost=radoznalost,
                memory_type=agent_memory_type,
                memory_range=agent_memory_range,
                tumor_movement_stopping_range=tumor_movement_stopping_range,
                probabilities_ranges=self.probabilites_ranges)
            self.grid.place_agent(a, pos)
            self.schedule.add(a)

    def simulation_mode_function(self):
        self.xprint("In simulation mode - not duplicating or mutating")
        return

    def generate_cancer_cell_positions(self, grid_size, cancer_cells_number):
        center = grid_size // 2
        poss = [(center, center)]
        for pos in poss:
            poss += [
                n for n in self.grid.get_neighborhood(
                    pos, moore=True, include_center=False) if n not in poss
            ]
            if len(set(poss)) >= cancer_cells_number:
                break
        poss = list(set(poss))
        return poss

    def duplicate_mutate_or_kill(self):
        if self.mode == "simulation":
            assert (False)
        koliko = math.ceil(
            percentage(self.MUTATION_PERCENTAGE, self.cure_number))
        cureagents = [
            c for c in self.schedule.agents if isinstance(c, CureAgent)
        ]
        sortirani = sorted(cureagents, key=lambda x: x.points, reverse=True)
        poslednji = sortirani[-koliko:]
        prvi = sortirani[:koliko]
        sredina = len(sortirani) // 2
        pocetak_sredine = sredina - (koliko // 2)
        kraj_sredine = sredina + (koliko // 2)
        srednji = sortirani[pocetak_sredine:kraj_sredine]
        self.mutate_agents(srednji)
        assert (len(prvi) == len(poslednji))
        self.remove_agents(poslednji)
        self.duplicate_agents(prvi)

    def mutate_agents(self, agents):
        self.xprint("Mutating middle agents")
        for a in agents:
            a.mutate()

    def remove_agents(self, agents):
        for a in agents:
            self.kill_cell(a)

    def duplicate_agents(self, agents):
        for a in agents:
            a_new = a.copy()
            self.grid.place_agent(a_new, (1, 1))
            self.schedule.add(a_new)

    def kill_cell(self, cell):
        self.grid.remove_agent(cell)
        self.schedule.remove(cell)

    def detach_stem_cell(self, cell):
        self.metastasis_score += 1
        self.kill_cell(cell)

    def kill_all_agents(self):
        agents = [a for a in self.schedule.agents if isinstance(a, CureAgent)]
        for a in agents:
            a.kill_self()

    def write_population_to_file(self, i):
        df = record_model_population(self)
        df.to_csv("./Populations/Population{}-step{}.csv".format(
            self.SAMPLE_i, i))

    def step(self):
        self.datacollector.collect(self)

        if self.mode == "simulation":
            self.simulation_step()

        self.write_population_to_file(self.counter)

        self.schedule.step()

        self.counter += 1
        self.lifetime_counter += 1

        if self.counter % self.STEPS_BEFORE_MUTATION == 0:
            #ovde ga stavljamo da izbegnemo da na nultom koraku uradi to
            self.duplicate_mutate_or_kill()

    def simulation_step(self):
        LIFETIME_OF_NANOAGENTS = 80
        REINJECTION_PERIOD = 100
        if self.lifetime_counter > LIFETIME_OF_NANOAGENTS:
            self.kill_all_agents()
        # if self.counter%(LIFETIME_OF_NANOAGENTS+REINJECTION_PERIOD)==0:
        #         #svakih 180 koraka se ubrizgavaju

        #     self.inject_nanoagents()
        #     self.lifetime_counter = 0
        agents_at_each_step = round(self.cure_number / 14)
        for i in range(agents_at_each_step):
            try:
                self.schedule.add(next(self.agents_iterator))
            except StopIteration:
                break
        agents = [a for a in self.schedule.agents if isinstance(a, CureAgent)]
        assert (len(agents) <= self.cure_number)
Beispiel #10
0
class SugarscapeCg(Model):
    '''
    Sugarscape 2 Constant Growback
    '''

    verbose = True  # Print-monitoring

    def __init__(self, height=50, width=50, initial_population=100):
        '''
        Create a new Constant Growback model with the given parameters.

        Args:
            initial_population: Number of population to start with
        '''

        # Set parameters
        self.height = height
        self.width = width
        self.initial_population = initial_population

        self.schedule = RandomActivationByBreed(self)
        self.grid = MultiGrid(self.height, self.width, torus=False)
        self.datacollector = DataCollector({
            "SsAgent":
            lambda m: m.schedule.get_breed_count(SsAgent),
        })

        # Create sugar
        import numpy as np
        sugar_distribution = np.genfromtxt("sugarscape_cg/sugar-map.txt")
        for _, x, y in self.grid.coord_iter():
            max_sugar = sugar_distribution[x, y]
            sugar = Sugar((x, y), self, max_sugar)
            self.grid.place_agent(sugar, (x, y))
            self.schedule.add(sugar)

        # Create agent:
        for i in range(self.initial_population):
            x = self.random.randrange(self.width)
            y = self.random.randrange(self.height)
            sugar = self.random.randrange(6, 25)
            metabolism = self.random.randrange(2, 4)
            vision = self.random.randrange(1, 6)
            ssa = SsAgent((x, y), self, False, sugar, metabolism, vision)
            self.grid.place_agent(ssa, (x, y))
            self.schedule.add(ssa)

        self.running = True
        self.datacollector.collect(self)

    def step(self):
        self.schedule.step()
        # collect data
        self.datacollector.collect(self)
        if self.verbose:
            print([self.schedule.time, self.schedule.get_breed_count(SsAgent)])

    def run_model(self, step_count=200):

        if self.verbose:
            print('Initial number Sugarscape Agent: ',
                  self.schedule.get_breed_count(SsAgent))

        for i in range(step_count):
            self.step()

        if self.verbose:
            print('')
            print('Final number Sugarscape Agent: ',
                  self.schedule.get_breed_count(SsAgent))
Beispiel #11
0
class WolfSheep(Model):
    """
    Wolf-Sheep Predation Model
    """

    height = 20
    width = 20

    initial_sheep = 2
    initial_wolves = 5
    sheep_reproduce = 0.04
    wolf_reproduce = 0.05

    wolf_gain_from_food = 20

    grass = False
    grass_regrowth_time = 5
    sheep_gain_from_food = 4

    verbose = False  # Print-monitoring

    description = (
        "A model for simulating wolf and sheep (predator-prey) ecosystem modelling."
    )

    def __init__(self,
                 height=20,
                 width=20,
                 initial_sheep=100,
                 initial_wolves=50,
                 sheep_reproduce=0.04,
                 wolf_reproduce=0.05,
                 wolf_gain_from_food=20,
                 grass=False,
                 grass_regrowth_time=30,
                 sheep_gain_from_food=4):
        """
        Create a new Wolf-Sheep model with the given parameters.

        Args:
            initial_sheep: Number of sheep to start with
            initial_wolves: Number of wolves to start with
            sheep_reproduce: Probability of each sheep reproducing each step
            wolf_reproduce: Probability of each wolf reproducing each step
            wolf_gain_from_food: Energy a wolf gains from eating a sheep
            grass: Whether to have the sheep eat grass for energy
            grass_regrowth_time: How long it takes for a grass patch to regrow
                                 once it is eaten
            sheep_gain_from_food: Energy sheep gain from grass, if enabled.
        """
        super().__init__()
        # Set parameters
        self.height = height
        self.width = width
        self.initial_sheep = initial_sheep
        self.initial_wolves = initial_wolves
        self.sheep_reproduce = sheep_reproduce
        self.wolf_reproduce = wolf_reproduce
        self.wolf_gain_from_food = wolf_gain_from_food
        self.grass = grass
        self.grass_regrowth_time = grass_regrowth_time
        self.sheep_gain_from_food = sheep_gain_from_food
        self.schedule = RandomActivationByBreed(self)
        self.grid = MultiGrid(self.height, self.width, torus=True)
        self.datacollector = DataCollector({
            "Wolves":
            lambda m: m.schedule.get_breed_count(Wolf),
            "Sheep":
            lambda m: m.schedule.get_breed_count(Sheep),
        })
        # Create Shed
        x = self.random.randrange(self.width)
        y = self.random.randrange(self.height)
        shed = Shed(self.next_id(), (x, y), self)
        self.grid.place_agent(shed, (x, y))
        self.schedule.add(shed)

        # Create water source
        while True:
            x = self.random.randrange(self.width)
            y = self.random.randrange(self.height)
            this_cell = self.grid.get_cell_list_contents([(x, y)])
            cell = [obj for obj in this_cell if isinstance(obj, Shed)]
            if len(cell) == 0:
                break
        ws1 = WaterSource(self.next_id(), (x, y), self)
        self.grid.place_agent(ws1, (x, y))
        self.schedule.add(ws1)
        while True:
            x = self.random.randrange(self.width)
            y = self.random.randrange(self.height)
            this_cell = self.grid.get_cell_list_contents([(x, y)])
            cell = [obj for obj in this_cell if isinstance(obj, Shed)]
            if len(cell) == 0:
                break
            cell = [obj for obj in this_cell if isinstance(obj, WaterSource)]
            if len(cell) == 0:
                break
        ws2 = WaterSource(self.next_id(), (x, y), self)
        self.grid.place_agent(ws2, (x, y))
        self.schedule.add(ws2)

        # Create grass patches
        if self.grass:
            for agent, x, y in self.grid.coord_iter():
                this_cell = self.grid.get_cell_list_contents([(x, y)])
                is_water = [
                    obj for obj in this_cell if isinstance(obj, WaterSource)
                ]
                is_shed = [obj for obj in this_cell if isinstance(obj, Shed)]
                if len(is_water) > 0 or len(is_shed) > 0:
                    continue
                fully_grown = self.random.choice([True, False])
                if fully_grown:
                    countdown = self.grass_regrowth_time
                else:
                    countdown = self.random.randrange(self.grass_regrowth_time)

                patch = GrassPatch(self.next_id(), (x, y), self, fully_grown,
                                   countdown)
                self.grid.place_agent(patch, (x, y))
                self.schedule.add(patch)

        # Create sheep:
        for i in range(self.initial_sheep):
            x = self.random.randrange(self.width)
            y = self.random.randrange(self.height)
            energy = self.random.randrange(2 * self.sheep_gain_from_food)
            sheep = Sheep(self.next_id(), (x, y), self, True, energy)
            sheep.target = shed
            self.grid.place_agent(sheep, (x, y))
            self.schedule.add(sheep)

        # Create wolves
        for i in range(self.initial_wolves):
            x = self.random.randrange(self.width)
            y = self.random.randrange(self.height)
            energy = self.random.randrange(2 * self.wolf_gain_from_food)
            wolf = Wolf(self.next_id(), (x, y), self, True, energy)
            if abs(ws1.pos[0] - x) + abs(ws1.pos[1] - y) < abs(
                    ws2.pos[0] - x) + abs(ws2.pos[1] - y):
                wolf.target = ws1
            else:
                wolf.target = ws2
            self.grid.place_agent(wolf, (x, y))
            self.schedule.add(wolf)

        self.running = True
        self.datacollector.collect(self)

    def step(self):
        self.schedule.step()
        # collect data
        self.datacollector.collect(self)
        if self.verbose:
            print([
                self.schedule.time,
                self.schedule.get_breed_count(Wolf),
                self.schedule.get_breed_count(Sheep),
            ])

    def run_model(self, step_count=200):

        if self.verbose:
            print("Initial number wolves: ",
                  self.schedule.get_breed_count(Wolf))
            print("Initial number sheep: ",
                  self.schedule.get_breed_count(Sheep))

        for i in range(step_count):
            self.step()

        if self.verbose:
            print("")
            print("Final number wolves: ", self.schedule.get_breed_count(Wolf))
            print("Final number sheep: ", self.schedule.get_breed_count(Sheep))
Beispiel #12
0
class LastMileModel(Model):
    """A model with some number of Rider agents."""
    def __init__(self, N_moto, N_van, N_business, width, height):
        '''
                Add an Agent object to the schedule
                Args:
                    N_moto: Number of Motos
                    N_van: Number of Vans
                    N_businnes: Number of Business
                    width: grid width
                    height: grid length
                '''

        # Number of Motos
        self.n_motos = N_moto
        # Number of Vans
        self.n_vans = N_van
        # Number of destinies
        self.num_business = N_business
        # Grid Initializer
        self.grid = MultiGrid(width, height, False)
        # Time Module, in charge of runnning the agents
        self.schedule = RandomActivationByType(self)
        # Render Purposes
        self.running = True

        # Where the base and business are initialized
        self.base_location = self.get_random_positions(1)[0]

        positions = self.get_list_of_points_in_grid()
        positions.remove(self.base_location)
        self.business_locations = sample(positions, self.num_business)

        # Create Base
        self.base = self.add_base()

        # Create Business
        for loc in self.business_locations:
            b = Business(loc, self)
            self.schedule.add(b)
            self.grid.place_agent(b, loc)

        # Create Agents
        for i in range(self.n_motos):
            m = Moto(i, self)
            self.schedule.add(m)

            # Add riders to grid
            self.grid.place_agent(m, self.base_location)

        # Create Agents
        for i in range(self.n_vans):
            v = Van((self.n_motos + i), self)
            self.schedule.add(v)

            # Add riders to grid
            self.grid.place_agent(v, self.base_location)

        # Add Datacollector
        self.datacollector = DataCollector({
            "Packs Moto":
            lambda m: m.schedule.get_pack_count_by_type(Moto),
            "Packs Van":
            lambda m: m.schedule.get_pack_count_by_type(Van),
            "Total Packs":
            lambda m: m.schedule.get_total_pack_count()
        })

        # Collect info at step 0
        self.datacollector.collect(self)

    def step(self):
        '''
                  Performs step on all Agents
                '''

        self.datacollector.collect(self)
        self.schedule.step()

    def add_base(self):
        '''
                  Adds Delivery Headquarter
                Returns Head Quarter Agent Object
                '''
        base = Base(self.base_location, self)
        self.grid.place_agent(base, self.base_location)
        self.schedule.add(base)
        return base

    def get_list_of_points_in_grid(self):
        '''
                Returns list of grid coordinates
                '''

        grid_points = []
        for agents, x, y in self.grid.coord_iter():
            grid_points.append((x, y))

        return grid_points

    def get_random_positions(self, N=1):
        '''
                Returns list of N random coordinates in grid
                '''

        positions = self.get_list_of_points_in_grid()

        return sample(positions, N)
Beispiel #13
0
class SludgeMonsterModel(Model):
    def __init__(self,
                 num_agents,
                 width=100,
                 height=100,
                 food_growth_prob=0.0005,
                 initial_food_growth=.30,
                 collection_frequency=1):
        self.running = True
        self.width = width
        self.height = height
        self.food_growth_prob = food_growth_prob
        self.initial_food_growth = initial_food_growth
        self.food_type = SludgeFood
        self.schedule = RandomActivation(self)
        self.grid = MultiGrid(self.width, self.height, True)
        self.datacollector = DataCollector(
            model_reporters={
                "friendliness":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="friendliness"),
                "anger":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="anger"),
                "fertility":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="fertility"),
                "max_attack":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="max_attack"),
                "max_hug_benefit":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="max_hug_benefit"),
                "_decay_mult":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="_decay_mult"),
                "leadership":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="leadership"),
                "follower_mult":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="follower_mult"),
                "leader_attraction":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="leader_attraction"),
                "food_attraction":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="food_attraction"),
                "sight":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="sight"),
                "movement_noise":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="movement_noise"),
                "is_following":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              func=lambda a: a.is_following)
            })

        self.collection_frequency = collection_frequency
        self.num_agents = num_agents
        for i in range(self.num_agents):
            self.add_agent()

        self.grow_food(self.initial_food_growth)

    def average_agent_val(self, agent_type, attrib_name=None, func=None):
        vals = []
        agent_count = 0
        if attrib_name:
            for agent in self.schedule.agents:
                if isinstance(agent, agent_type):
                    vals.append(getattr(agent, attrib_name))
                    agent_count += 1
        elif func:
            for agent in self.schedule.agents:
                if isinstance(agent, agent_type):
                    vals.append(func(agent))
                    agent_count += 1
        else:
            raise Exception("bad params")
        if agent_count > 0:
            return functools.reduce(operator.add, vals) / float(agent_count)
        else:
            return 0

    def step(self):
        if self.schedule.steps % self.collection_frequency == 0:
            self.datacollector.collect(self)
        self.schedule.step()
        self.grow_food(self.food_growth_prob)

    def grow_food(self, growth_prob):
        area = self.height * self.width
        food_growth_areas = np.random.random(
            (self.height, self.width)) < (growth_prob)
        existing_food = self.get_agent_locations(self.food_type)
        food_growth_areas = np.logical_and(food_growth_areas,
                                           np.logical_not(existing_food))
        for y in range(self.height):
            for x in range(self.width):
                if food_growth_areas[y, x]:
                    self.add_agent(agent=self.food_type(self), pos=(x, y))

    def get_agent_locations(self, agent_type):
        truth_table = np.zeros((self.height, self.width), np.bool)
        for contents, x, y in self.grid.coord_iter():
            if any([isinstance(agent, agent_type) for agent in contents]):
                truth_table[y, x] = True
        return truth_table

    def remove(self, agent):
        self.grid.remove_agent(agent)
        self.schedule.remove(agent)

    def add_agent(self, agent=None, pos=None):
        if not pos:
            x = random.randrange(self.width)
            y = random.randrange(self.height)
            pos = (x, y)
        if not agent:
            agent = SludgeMonster(self)
        self.schedule.add(agent)
        self.grid.place_agent(agent, pos)
        self.num_agents = len(self.schedule.agents)
        return agent

    def status_str(self):
        val = [self.__class__.__name__]
        for agent in self.schedule.agents:
            val.append(agent.status_str())
        if len(self.schedule.agents) == 0:
            val.append("No agents in model.")
        return "\n".join(val)
class WolfSheep(Model):
    """
    Wolf-Sheep Predation Model
    """

    height = 20
    width = 20

    initial_sheep = 100
    initial_wolves = 50

    sheep_reproduce = 0.04
    wolf_reproduce = 0.05

    wolf_gain_from_food = 20

    grass = False
    grass_regrowth_time = 30
    sheep_gain_from_food = 4

    verbose = False  # Print-monitoring

    description = (
        "A model for simulating wolf and sheep (predator-prey) ecosystem modelling."
    )

    def __init__(
        self,
        height=20,
        width=20,
        initial_sheep=100,
        initial_wolves=50,
        sheep_reproduce=0.04,
        wolf_reproduce=0.05,
        wolf_gain_from_food=20,
        grass=False,
        grass_regrowth_time=30,
        sheep_gain_from_food=4,
        trees_carrots_ratio=0.5,
        YEAR=20,
        nb_of_hunters=0,
    ):
        """
        Create a new Wolf-Sheep model with the given parameters.

        Args:
            initial_sheep: Number of sheep to start with
            initial_wolves: Number of wolves to start with
            sheep_reproduce: Probability of each sheep reproducing each step
            wolf_reproduce: Probability of each wolf reproducing each step
            wolf_gain_from_food: Energy a wolf gains from eating a sheep
            grass: Whether to have the sheep eat grass for energy
            grass_regrowth_time: How long it takes for a grass patch to regrow
                                 once it is eaten
            sheep_gain_from_food: Energy sheep gain from grass, if enabled.
        """
        super().__init__()
        # Set parameters
        self.height = height
        self.width = width
        self.initial_sheep = initial_sheep
        self.initial_wolves = initial_wolves
        self.sheep_reproduce = sheep_reproduce
        self.wolf_reproduce = wolf_reproduce
        self.wolf_gain_from_food = wolf_gain_from_food
        self.grass = grass
        self.grass_regrowth_time = grass_regrowth_time
        self.sheep_gain_from_food = sheep_gain_from_food
        self.trees_carrots_ratio = trees_carrots_ratio
        self.YEAR = YEAR  #new
        self.nb_of_hunters = nb_of_hunters

        self.schedule = RandomActivationByBreed(
            self
        )  # classe contenant un dictionnaire des types d'agents et agents existants par type, avec une ordre d'activation possible
        self.grid = MultiGrid(self.height, self.width, torus=True)
        self.datacollector = DataCollector({
            "Fox":
            lambda m: m.schedule.get_breed_count(Predator),
            "Rabbit":
            lambda m: m.schedule.get_breed_count(Prey),
        })

        # Create sheep:
        for i in range(self.initial_sheep):
            x = self.random.randrange(self.width)
            y = self.random.randrange(self.height)
            age = self.random.randrange(3 * self.YEAR)  #new
            energy = self.random.randrange(int(self.sheep_gain_from_food / 2),
                                           2 * self.sheep_gain_from_food)  #new
            sheep = Prey(self.next_id(), (x, y), self, True, energy, age)  #new
            #sheep = Prey(self.next_id(), (x, y), self)
            self.grid.place_agent(sheep, (x, y))
            self.schedule.add(sheep)

        # Create wolves
        for i in range(self.initial_wolves):
            x = self.random.randrange(self.width)
            y = self.random.randrange(self.height)
            age = self.random.randrange(4 * self.YEAR)  #new
            #print(age)
            energy = self.random.randrange(int(self.wolf_gain_from_food / 2),
                                           2 * self.wolf_gain_from_food)  #new
            wolf = Predator(self.next_id(), (x, y), self, True, energy,
                            age)  #new
            #wolf = Predator(self.next_id(), (x, y), self)
            self.grid.place_agent(wolf, (x, y))
            self.schedule.add(wolf)

        # Create grass patches
        if self.grass:
            for agent, x, y in self.grid.coord_iter():
                if self.trees_carrots_ratio < self.random.random(
                ):  # aléatoire du nombre d'arbres et de carottes
                    fully_grown = self.random.choice([True, False])
                    if fully_grown:  # carottes ou pousses de carotes
                        countdown = self.grass_regrowth_time
                    else:
                        countdown = self.random.randrange(
                            self.grass_regrowth_time)
                    plant = Plant(self.next_id(), (x, y), self, fully_grown,
                                  countdown)
                else:
                    plant = Tree(self.next_id(), (x, y), self)
                self.grid.place_agent(plant, (x, y))
                self.schedule.add(plant)

        # create hunters
        for i in range(self.nb_of_hunters):
            x = self.random.randrange(self.width)
            y = self.random.randrange(self.height)
            hunter = Hunter(self.next_id(), (x, y), self)  #new
            self.grid.place_agent(hunter, (x, y))
            self.schedule.add(hunter)

        self.running = True
        self.datacollector.collect(self)

    def step(self):
        self.schedule.step()
        # collect data
        self.datacollector.collect(
            self)  # nombre de renards et lapins à l'instant
        if self.verbose:
            print([
                self.schedule.time,
                self.schedule.get_breed_count(Predator),
                self.schedule.get_breed_count(Prey),
            ])

    def run_model(self, step_count=200):

        if self.verbose:
            print("Initial number fox: ",
                  self.schedule.get_breed_count(Predator))
            print("Initial number rabbit: ",
                  self.schedule.get_breed_count(Prey))

        for i in range(step_count):
            self.step()

        if self.verbose:
            print("")
            print("Final number fox: ",
                  self.schedule.get_breed_count(Predator))
            print("Final number rabbit: ", self.schedule.get_breed_count(Prey))
Beispiel #15
0
class ForestDisease(Model):
    """
    Simple Forest Fire model.
    """
    def __init__(self,
                 height=100,
                 width=100,
                 density=0.65,
                 mortality=1,
                 wind='N',
                 distance='1'):
        """
        Create a new forest fire model.

        Args:
            height, width: The size of the grid to model
            density: What fraction of grid cells have a tree in them.
        """
        # Initialize model parameters
        self.height = height
        self.width = width
        self.density = density
        self.mortality = mortality
        self.wind = wind
        self.distance = distance

        # Set up model objects
        self.schedule = RandomActivation(self)
        self.grid = MultiGrid(height, width, torus=False)

        self.datacollector = DataCollector({
            "Fine":
            lambda m: self.count_type(m, "Fine"),
            "Infected":
            lambda m: self.count_type(m, "Infected"),
            "Dead":
            lambda m: self.count_type(m, "Dead")
        })

        # Place a tree in each cell with Prob = density
        for (contents, x, y) in self.grid.coord_iter():
            if self.random.random() < self.density:
                # Create a tree
                new_tree = TreeCell((x, y), self)
                self.grid._place_agent((x, y), new_tree)
                self.schedule.add(new_tree)

        center = (int(width / 2), int(height / 2))
        movingAgent = MovingAgent(center, self)
        self.grid._place_agent(center, movingAgent)
        new_tree = TreeCell(center, self)
        self.grid._place_agent(center, new_tree)
        self.schedule.add(new_tree)
        self.schedule.add(movingAgent)

        self.running = True
        self.datacollector.collect(self)

    def step(self):
        """
        Advance the model by one step.
        """
        self.schedule.step()
        # collect data
        self.datacollector.collect(self)

        # Halt if no more fire
        if self.count_type(self, "Fine") == 0:
            self.running = False

    @staticmethod
    def count_type(model, tree_condition):
        """
        Helper method to count trees in a given condition in a given model.
        """
        count = 0
        for tree in model.schedule.agents:
            if tree.condition == tree_condition:
                count += 1
        return count
Beispiel #16
0
class Sugarscape2ConstantGrowback(Model):
    '''
    Sugarscape 2 Constant Growback
    '''

    verbose = True  # Print-monitoring

    def __init__(self, height=50, width=50,
                 initial_population=100):
        '''
        Create a new Constant Growback model with the given parameters.

        Args:
            initial_population: Number of population to start with
        '''

        # Set parameters
        self.height = height
        self.width = width
        self.initial_population = initial_population

        self.schedule = RandomActivationByBreed(self)
        self.grid = MultiGrid(self.height, self.width, torus=False)
        self.datacollector = DataCollector({"SsAgent": lambda m: m.schedule.get_breed_count(SsAgent), })

        # Create sugar
        import numpy as np
        sugar_distribution = np.genfromtxt("sugarscape/sugar-map.txt")
        for _, x, y in self.grid.coord_iter():
            max_sugar = sugar_distribution[x, y]
            sugar = Sugar((x, y), self, max_sugar)
            self.grid.place_agent(sugar, (x, y))
            self.schedule.add(sugar)

        # Create agent:
        for i in range(self.initial_population):
            x = random.randrange(self.width)
            y = random.randrange(self.height)
            sugar = random.randrange(6, 25)
            metabolism = random.randrange(2, 4)
            vision = random.randrange(1, 6)
            ssa = SsAgent((x, y), self, False, sugar, metabolism, vision)
            self.grid.place_agent(ssa, (x, y))
            self.schedule.add(ssa)

        self.running = True

    def step(self):
        self.schedule.step()
        self.datacollector.collect(self)
        if self.verbose:
            print([self.schedule.time,
                   self.schedule.get_breed_count(SsAgent)])

    def run_model(self, step_count=200):

        if self.verbose:
            print('Initial number Sugarscape Agent: ',
                  self.schedule.get_breed_count(SsAgent))

        for i in range(step_count):
            self.step()

        if self.verbose:
            print('')
            print('Final number Sugarscape Agent: ',
                  self.schedule.get_breed_count(SsAgent))
class ZoningModel(Model):
    def __init__(self, N, D, width, height, homeowner_renter_ratio,
                 homeowner_renter_newagent_ratio, committee_size,
                 regulation_impact, population_growth_rate):
        # Number of starting agents
        self.num_agents = N
        # Number of starting developed cells
        self.num_developed = D

        # Keep track of agents in committee
        self.committee_agents = []
        # Keep track of developed and occupied cells
        self.developed_cells = []
        self.occupied_cells = []

        self.grid = MultiGrid(width, height, True)
        self.schedule = RandomActivation(self)
        self.running = True
        self.rule = None

        # Keep track of permits, vacancies, occupancies, new construction, and population flows
        self.permits = 0
        self.vacancy_rate = 0
        self.vacancy_memory = []
        self.occupancy_rate = 0
        self.construction_rate = 0
        self.exit_rate = 0
        self.entry_rate = 0
        self.population_start = N
        self.population = 0

        # Keep track of regulations over time
        self.regulations = 0
        self.regulation_memory = []

        # Set initial ratio of homeowners to renters
        self.homeowner_renter_ratio = homeowner_renter_ratio
        # Set ratio of homeowners to renters of new agents
        self.homeowner_renter_newagent_ratio = homeowner_renter_ratio
        # Set committee size
        self.committee_size = committee_size

        # Initialize median and standard deviation of home prices and agent wealth
        self.home_price_median = MEDIAN_HOME_PRICE
        self.home_price_std_dev = HOME_PRICE_STD_DEV

        self.agent_wealth_median = MEDIAN_WEALTH
        self.agent_wealth_std_dev = WEALTH_STD_DEV

        self.regulation_impact = regulation_impact
        self.population_growth_rate = population_growth_rate

        # Populate grid with 'DevelopTag' agents that control attributes of cells
        for i, c in enumerate(self.grid.coord_iter()):
            d = DevelopTag(i, self)
            x, y = c[-2], c[-1]
            self.grid.place_agent(d, (x, y))

        # Initialize development of cells
        for cell in np.random.choice([cell for cell in self.grid],
                                     self.num_developed,
                                     replace=False):
            dtag = self.get_Cell_Tag(cell)
            dtag.set_Development(self)

        coords = [coords[-2:] for coords in list(self.grid.coord_iter())]
        idx = np.random.choice(len(coords), self.num_agents, replace=False)
        choices = [coords[i] for i in idx]

        # Populate cells with actors: renters and homeowners.
        for i in range(self.num_agents):
            a = Actor(i, self)
            x, y = choices[i]
            self.schedule.add(a)
            self.grid.place_agent(a, (x, y))

        self.population = len(self.schedule.agents)

        for a in self.schedule.agents:
            a.check_Cell()

        # Initialize renters, homowners
        self.set_Initial_Agent_Types()
        # Initialize agent wealth distribution
        self.set_Agent_Wealth(MEDIAN_WEALTH, WEALTH_STD_DEV, N)
        # Initialize agent budgets for rentgt
        self.set_Agent_Budget()

        # Intiialize home value distribution
        self.set_Home_Values(MEDIAN_HOME_PRICE, HOME_PRICE_STD_DEV, None)
        # Back out monthly rent from home value
        self.set_Rent_Values(None)
        '''
        Initializes Mesa datacollector to keep track of model data.
        Variables keep track of construction, population, vacancies and occupations.
        Also keeps track of average and median home values and rents, and agent wealth and budgets.
        Keeps track of permits and regulations.
        Finally, also keeps track of affordability indices for agents and their payoffs.
        '''
        self.datacollector = DataCollector(
            model_reporters={
                "Regulations": get_Regulations,
                "Permits": get_Permits,
                "Average Regulations": get_Average_Regulations,
                "New Construction": get_Construction_Rate,
                "Net Construction": get_Net_Construction,
                "Average Vacancy": get_Average_Vacancies,
                "Total Construction": get_Housing_Stock,
                "Total Population": get_Population,
                "Population Growth": get_Population_Growth,
                "Total Population Growth": get_Total_Population_Growth,
                "Vacancies": get_Vacancies,
                "Occupancies": get_Occupancy,
                "Homeowners": get_Homeowner_Number,
                "Renters": get_Renter_Number,
                "Homeowner Ratio": get_Homeowner_Ratio,
                "Average Home Value": get_Average_Home_Value,
                "Median Home Value": get_Median_Home_Value,
                "Average Home Rent": get_Average_Home_Rent,
                "Median Home Rent": get_Median_Home_Rent,
                "Average Agent Wealth": get_Average_Agent_Wealth,
                "Median Agent Wealth": get_Median_Agent_Wealth,
                "Average Agent Budget": get_Average_Agent_Budget,
                "Median Agent Budget": get_Median_Agent_Budget,
                "Home Affordability": get_Homeowner_Affordability,
                "Rent Affordability": get_Renter_Affordability,
                "Percent Homeowner Payoff": get_Average_Homeowner_Payoff,
                "Percent Renter Payoff": get_Average_Renter_Payoff
            },
            agent_reporters={"Wealth": "wealth"})

    # Check if a cell is developed or not
    def check_Developed(self, cell):
        dtag = self.get_Cell_Tag(cell)
        return dtag.developed

    # Check if a cell is occupied by another actor not
    # Note that this is different than checking if it is 'occupied', in the sense that an agent is living there
    def check_Occupied(self, cell):
        if any(type(obj) == Actor for obj in cell):
            return True
        else:
            return False

    # Decide based on votes whether to increase or decrease zoning regulations
    def compute_Rule(self):
        votes = []
        # Agents vote
        for a in self.schedule.agents:
            if a.committee == True:
                a.cast_vote()
                votes.append(a.vote)

        ffa_votes = votes.count('ffa')
        restrictive_votes = votes.count('restrictive')

        # If majority vote is for 'restrictive', add a regulation. Else, subtract one

        if ffa_votes > restrictive_votes:
            self.regulations -= 1
        else:
            self.regulations += 1

        if self.regulations < 0:
            self.regulations = 0

        # Add total regulations this step to model memory for average values later
        self.regulation_memory.append(self.regulations)

    # Check rent level of a property
    def check_Rent(self, cell):
        dtag = self.get_Cell_Tag(cell)
        return dtag.rent

    # Check property value
    def check_Value(self, cell):
        dtag = self.get_Cell_Tag(cell)
        return dtag.value

    # Check vacant properties and 'destroy' them by returning them to being undeveloped
    def destroy_Construction(self):
        self.vacancy_rate = 0
        self.occupancy_rate = 0
        vacant_cells = [
            cell for cell in self.grid if self.get_Cell_Tag(cell).vacant
        ]
        self.occupied_cells = [
            cell for cell in self.grid if self.get_Cell_Tag(cell).occupied
        ]
        # Destroy five random properties at a time
        # This staggers out destruction to smooth out large spikes of destruction
        if len(vacant_cells) > 5:
            for cell in np.random.choice(vacant_cells, 5):
                dtag = self.get_Cell_Tag(cell)
                dtag.destroy_Development(self)
                self.vacancy_rate += 1

        for cell in self.occupied_cells:
            self.occupancy_rate += 1

        # Keep track of vacancy rates
        self.vacancy_memory.append(self.vacancy_rate)

    # Retrieve and update agent wealth and budget statistics
    def get_Agent_Wealth(self):
        agent_wealth_values = [a.wealth for a in self.schedule.agents]
        agent_budgets = [a.budget for a in self.schedule.agents]

        self.agent_wealth_median = statistics.median(agent_wealth_values)
        self.agent_wealth_std_dev = statistics.stdev(agent_wealth_values)
        self.agent_wealth_avg = statistics.mean(agent_wealth_values)
        self.agent_budget_median = statistics.median(agent_budgets)
        self.agent_budget_std_dev = statistics.stdev(agent_budgets)
        self.agent_budget_avg = statistics.mean(agent_budgets)

    # Retrieve the development tag object of a cell to check or modify cell properties
    def get_Cell_Tag(self, cell):
        objs = [
            obj for obj in cell
            if type(obj) == DevelopTag or type(obj).__name__ == 'DevelopTag'
        ]
        dtag = objs[0]
        return dtag

    # Retrieve and update home value and rent statistics
    def get_Home_Prices(self):
        dtags = [self.get_Cell_Tag(cell) for cell in self.grid]
        home_prices = [dtag.value for dtag in dtags if dtag.value != None]
        home_rent_values = [dtag.rent for dtag in dtags if dtag.value != None]

        self.home_price_median = statistics.median(home_prices)
        self.home_price_std_dev = statistics.stdev(home_prices)
        self.home_price_avg = statistics.mean(home_prices)
        self.home_rent_median = statistics.median(home_rent_values)
        self.home_rent_std_dev = statistics.stdev(home_rent_values)
        self.home_rent_avg = statistics.mean(home_rent_values)

    def issue_Permits(self):
        # For potential use as additional factors in permit issueance
        self.remaining_space_ratio = 1 - (len(self.developed_cells) / (
            (self.grid.width * self.grid.height)) * 0.5)
        self.remaining_agents_ratio = len([
            a for a in self.schedule.agents if not a.location
        ]) / len(self.schedule.agents)
        # Base permit level set as function of total grid size minus the number of starting agents
        # Modified by decreasing this base rate by the regulation impact factor a number of times equal to the amount of regulations
        self.permits = round(
            (0.02 * ((self.grid.width * self.grid.height) - self.num_agents)) *
            (1.00 - (self.regulation_impact * self.regulations)))
        if self.permits < 0:
            self.permits = 0

    def new_Construction(self):
        new_developed_cells = []
        # Get list of all undeveloped cells
        undeveloped_cells = [
            step for step in self.grid if self.check_Developed(step) == False
        ]
        # Skip if there are no undeveloped cells left
        if undeveloped_cells == []:
            pass

        else:
            # Randomly develop a number of cells equal to the number of permits
            for i in range(int(self.permits)):
                cell = random.choice(undeveloped_cells)
                for obj in cell:
                    if type(obj) == DevelopTag:
                        obj.set_Development(self)
                        new_developed_cells.append(obj)

            # Set values/rent for newly constructed properties
            self.set_Home_Values(self.home_price_median,
                                 self.home_price_std_dev, new_developed_cells)
            self.set_Rent_Values(new_developed_cells)

        self.construction_rate = len(new_developed_cells)

    # Clear committee of previous agents
    def reset_Committee(self):
        self.committee_agents = []

    # Reset votes of all agents
    def reset_Votes(self):
        for a in self.schedule.agents:
            a.reset_vote()

    # Select agents to be part of the committee
    def select_Committee(self):
        for a in self.schedule.agents:
            a.committee = False
        occupying_agents = [
            a for a in self.schedule.agents if self.check_Occupied(a.pos)
        ]
        if len(occupying_agents) < self.committee_size:
            for a in np.random.choice(self.schedule.agents,
                                      self.committee_size):
                a.committee = True
                self.committee_agents.append(a)
        else:
            for a in np.random.choice(occupying_agents, self.committee_size):
                a.committee = True
                self.committee_agents.append(a)

    def set_Agent_Budget(self):
        # For now, budget is 2% of wealth, identical to rent as % value of property value
        for a in self.schedule.agents:
            a.budget = 0.02 * a.wealth

        # Set initial agent budget average and median (this only matters for visualization module)
        agent_budgets = [a.budget for a in self.schedule.agents]
        self.agent_budget_avg = statistics.mean(agent_budgets)
        self.agent_budget_median = statistics.median(agent_budgets)

    # Set wealth levels for n agents. Used in initialization as well as when adding new agents
    def set_Agent_Wealth(self, median, std_dev, n):
        # Create normal distribution around median with specified standard deviation and number of obs
        wealth_levels = np.random.normal(median, std_dev, n)
        for a in self.schedule.agents:
            rand_index, rand_value = random.choice(
                list(enumerate(wealth_levels)))
            a.wealth = rand_value
            wealth_levels = np.delete(wealth_levels, rand_index)

        # Set initial agent wealth average and median (this only matters for visualization module)
        agent_wealth_values = [a.wealth for a in self.schedule.agents]
        self.agent_wealth_avg = statistics.mean(agent_wealth_values)
        self.agent_wealth_median = statistics.mean(agent_wealth_values)

    # Set property values for n cells
    def set_Home_Values(self, median, std_dev, cells):
        # Create normal distribution around median with specified standard deviation and number of obs
        if cells == None:
            develop_tags = []
            for cell in self.grid:
                if self.check_Developed(cell):
                    develop_tags.append(self.get_Cell_Tag(cell))
            num_homes = len(develop_tags)
            home_values = np.random.normal(median, std_dev, num_homes)
            for d in develop_tags:
                rand_index, rand_value = random.choice(
                    list(enumerate(home_values)))
                d.value = rand_value
                # Pop out values from normal distribution once used
                home_values = np.delete(home_values, rand_index)

        else:
            dtags = cells
            num_homes = len(dtags)
            home_values = np.random.normal(median, std_dev, num_homes)
            for d in dtags:
                rand_index, rand_value = random.choice(
                    list(enumerate(home_values)))
                d.value = rand_value
                home_values = np.delete(home_values, rand_index)

        dtags = [
            self.get_Cell_Tag(cell) for cell in self.grid
            if self.check_Developed(cell)
        ]
        home_prices = [dtag.value for dtag in dtags if dtag.value != None]
        self.home_price_avg = statistics.mean(home_prices)

    # Randomly set agent type to ratio of 1 - specified value
    # For instance, if desired ratio of 70% homeowners, then randomly assign agents
    # Based on probability of a random value being higher than 1 - 0.7 = 0.3
    def set_Initial_Agent_Types(self):
        for a in self.schedule.agents:
            if random.uniform(0.0, 1.0) > (1 - self.homeowner_renter_ratio):
                a.set_Homeowner()
            else:
                a.set_Renter()

    # Set rent values equal to 2% of the property value
    def set_Rent_Values(self, cells):
        if cells == None:
            develop_tags = []
            for cell in self.grid:
                #print(cell)
                if self.check_Developed(cell):
                    develop_tags.append(self.get_Cell_Tag(cell))
            #print(develop_tags)
            for d in develop_tags:
                d.rent = 0.02 * d.value

        else:
            dtags = cells
            for d in dtags:
                d.rent = 0.02 * d.value

        dtags = [
            self.get_Cell_Tag(cell) for cell in self.grid
            if self.check_Developed(cell)
        ]
        home_rent_values = [dtag.rent for dtag in dtags if dtag.value != None]
        self.home_rent_avg = statistics.mean(home_rent_values)
        self.home_rent_median = statistics.median(home_rent_values)

    # Update running homeowner renter ratio
    # This matters because the ratio may change over time as renters or homeowners exit the neighborhood
    def update_Homeowner_Renter_Ratio(self):
        renter_num = [a for a in self.schedule.agents if a.type == 'renter']
        homeowner_num = [
            a for a in self.schedule.agents if a.type == 'homeowner'
        ]
        self.homeowner_renter_ratio = len(homeowner_num) / len(renter_num)

    def update_Occupancy(self):
        # Get a list of all developed cells
        cells = [cell for cell in self.grid if self.check_Developed(cell)]
        for cell in cells:
            dtag = self.get_Cell_Tag(cell)
            # Check if they're occupied or vacant first
            if dtag.occupied:
                pass
            elif dtag.vacant:
                pass
            else:
                # If there's someone on the cell but it's not yet 'occupied', tick their occupancy timer forward
                # Agents need to be present on the same cell for two ticks to 'occupy' it
                if self.check_Occupied(cell):
                    if dtag.occupancy_timer == 2:
                        dtag.set_Occupied()
                        # Reset vacancy timer if they occupy it
                        dtag.reset_Vacancy()
                    else:
                        dtag.tick_Occupancy()
                else:
                    # Reset occupancy timer
                    dtag.reset_Occupancy()
                    # If vacancy timer runs out, set property to vacant
                    if dtag.vacancy_timer == 0:
                        dtag.set_Vacant()
                    else:
                        dtag.tick_Vacancy()

    def update_Population(self):
        self.exit_rate = 0
        self.entry_rate = 0

        ids = [a.unique_id for a in self.schedule.agents]
        last_id = max(ids)

        # Get random unoccupied coordinates
        unoccupied_coords = []
        for coords in list(self.grid.coord_iter()):
            x = coords[-2]
            y = coords[-1]
            if not self.check_Occupied(self.grid[x][y]):
                unoccupied_coords.append((x, y))

        # Get list of unoccupied and developed coordinates
        undeveloped_cells = [
            coords for coords in unoccupied_coords
            if not self.check_Developed(self.grid[coords[0]][coords[1]])
        ]
        # Get list of unoccupied but undeveloped coordinates
        unoccupied_developed = [
            coords for coords in unoccupied_coords
            if self.check_Developed(self.grid[coords[0]][coords[1]])
        ]

        # Set number of new agents equal to the population growth rate times total grid size
        num_new_agents = round(
            (self.grid.width * self.grid.height) * self.population_growth_rate)
        wealth_values = np.random.normal(MEDIAN_WEALTH, WEALTH_STD_DEV,
                                         num_new_agents)

        try:
            # Place new agents on undeveloped cells on the grid
            idx = np.random.choice(len(undeveloped_cells),
                                   num_new_agents,
                                   replace=False)
            choices = [undeveloped_cells[i] for i in idx]
            for i in range(num_new_agents):
                new_id = last_id + (i + 1)
                a = Actor(new_id, self)
                if random.uniform(
                        0.0, 1.0) > (1 - self.homeowner_renter_newagent_ratio):
                    a.set_Homeowner()
                else:
                    a.set_Renter()
                a.wealth = np.random.choice(wealth_values)
                a.budget = 0.02 * a.wealth
                x, y = choices[i]
                self.schedule.add(a)
                self.grid.place_agent(a, (x, y))

            self.entry_rate += num_new_agents

        # Check if neighborhood is full and no more agents can enter
        except ValueError:
            print('Grid is full')

        self.population = len(self.schedule.agents)

    # Update home value and rent according to restrictive rule scenario
    def update_Prices(self):
        develop_tags = []
        for cell in self.grid:
            if self.check_Developed(cell):
                develop_tags.append(self.get_Cell_Tag(cell))

        for dtag in develop_tags:
            # Need to calibrate these values
            dtag.value = dtag.value - (100 * self.permits)
            dtag.value = dtag.value + (100 * self.regulations)
            dtag.rent = 0.02 * dtag.value

    # Order sequence
    def step(self):
        self.reset_Committee()
        self.select_Committee()
        self.compute_Rule()
        self.update_Prices()
        self.schedule.step()
        self.datacollector.collect(self)
        self.update_Occupancy()
        self.update_Population()
        self.issue_Permits()
        self.destroy_Construction()
        self.new_Construction()
        self.get_Home_Prices()
        self.get_Agent_Wealth()
Beispiel #18
0
class World(Model):
    height = 0
    width = 0

    initial_collector = 0
    wood = False
    wood_regrowth_time = 300
    iron = False

    verbose = False  # Print-monitoring
    log = []

    def __init__(self,
                 height=30,
                 width=30,
                 initial_collector=5,
                 initial_artisans=1,
                 initial_builders=1,
                 wood=False,
                 iron=False,
                 nmines=3,
                 wood_regrowth_time=1000):

        # Set parameters
        self.height = height
        self.width = width
        self.nmines = nmines

        self.initial_collector = initial_collector
        self.initial_artisans = initial_artisans
        self.initial_builders = initial_builders

        self.wood = wood
        self.iron = iron
        self.wood_regrowth_time = wood_regrowth_time

        self.schedule = WorldController(self)
        self.grid = MultiGrid(self.height, self.width, torus=False)
        self.log = []

        self.datacollector = DataCollector({
            "Wood":
            lambda m: m.schedule.get_breed_count(Wood),
            "Collectors":
            lambda m: m.schedule.get_breed_count(Collector),
            "Artisans":
            lambda m: m.schedule.get_breed_count(Artisan),
            "Builders":
            lambda m: m.schedule.get_breed_count(Builder),
            "Houses":
            lambda m: m.schedule.get_breed_count(House),
            "Mineral":
            lambda m: m.schedule.get_breed_count(Mine)
        })
        for agent, x, y in self.grid.coord_iter():
            terrain = Terrain((x, y), self)
            self.grid.place_agent(terrain, (x, y))

        for i in range(self.initial_collector):
            x = random.randrange(self.width)
            y = random.randrange(self.height)
            collector = Collector((x, y), self)
            self.grid.place_agent(collector, (x, y))
            self.schedule.add(collector)

        for i in range(self.initial_artisans):
            x = random.randrange(self.width)
            y = random.randrange(self.height)
            artisan = Artisan((x, y), self)
            self.grid.place_agent(artisan, (x, y))
            self.schedule.add(artisan)

        for i in range(self.initial_builders):
            x = random.randrange(self.width)
            y = random.randrange(self.height)
            builder = Builder((x, y), self)
            builder.add_objective(BuildHouseObjective(5), None)
            self.grid.place_agent(builder, (x, y))
            self.schedule.add(builder)

        # Create resources
        nminesplaced = 0
        if self.wood:

            for agent, x, y in self.grid.coord_iter():
                place_wood = random.choice([True, False, False, False, False])
                if place_wood:
                    fully_grown = random.choice([
                        True, False, False, False, False, False, False, False,
                        False, False, False
                    ])

                    if fully_grown:
                        countdown = self.wood_regrowth_time
                    else:
                        countdown = random.randrange(self.wood_regrowth_time)

                    patch = Wood((x, y), self, fully_grown, countdown)
                    self.grid.place_agent(patch, (x, y))
                    self.schedule.add(patch)

        if self.iron:
            for agent, x, y in self.grid.coord_iter():
                place_iron = random.choice([
                    True, False, False, False, False, False, False, False,
                    False, False, False
                ])
                if place_iron and nminesplaced < nmines:
                    empty = False
                    patch = Mine((x, y), self, empty)
                    self.grid.place_agent(patch, (x, y))
                    self.schedule.add(patch)
                    nminesplaced += 1

        self.running = True

    def step(self):
        self.schedule.step()
        self.datacollector.collect(self)
        if self.verbose:
            print(
                'Step: ',
                [self.schedule.time,
                 self.schedule.get_breed_count(Collector)])

    def run_model(self, step_count=200):

        if self.verbose:
            print('Initial number collector: ',
                  self.schedule.get_breed_count(Collector))

        for i in range(step_count):
            self.step()

        if self.verbose:
            print('')
            print('Final number collector: ',
                  self.schedule.get_breed_count(Collector))
Beispiel #19
0
class Cooperate(Model):
    """
    This is a mesa implementation of the greedy cows model
    """
    # grid height
    grid_h = 20
    # grid width
    grid_w = 20

    description = 'A model for simulating greedy cows.'

    def run_fcm(self, is_greedy, concepts):
        fcmService = FCMAgent()
        if is_greedy:
            fcm_result = fcmService.getFCM('greedyCow1', concepts)
        else:
            fcm_result = fcmService.getFCM('coopCow1', concepts)
        return fcm_result

    #cooperative_probabilty should be between 0-100
    def __init__(self,
                 height=grid_h,
                 width=grid_w,
                 use_fcm=False,
                 init_cows=10,
                 stride_length=1,
                 cooperative_probabilty=1,
                 metabolism=1,
                 reproduction_cost=1,
                 reproduction_threshold=1,
                 grass_energy=1):

        self.height = height
        self.width = width
        #have to initialize the grid
        self.grid = MultiGrid(self.width, self.height, torus=True)
        self.init_cows = init_cows
        self.stride_length = stride_length
        self.cooperative_probabilty = cooperative_probabilty
        self.metabolism = metabolism
        self.reproduction_cost = reproduction_cost
        self.reproduction_threshold = reproduction_threshold
        self.grass_regrowth_time = 3
        self.grass_energy = grass_energy
        self.use_fcm = use_fcm

        #note reporter has to be aware of what type of class we're using
        if self.use_fcm:
            self.datacollector = DataCollector(
                model_reporters={
                    "Greedy":
                    lambda m: m.schedule.get_greedy_cows(FCMCow),
                    "Cooperative":
                    lambda m: m.schedule.get_cooperative_cows(FCMCow)
                })
        else:
            self.datacollector = DataCollector(
                model_reporters={
                    "Greedy": lambda m: m.schedule.get_greedy_cows(Cow),
                    "Cooperative":
                    lambda m: m.schedule.get_cooperative_cows(Cow)
                })

        #don't forget to init the super, otherwise you'll get strange errors
        super().__init__()
        #set the scheduler
        self.schedule = RandomActivationByBreed(self)

        energy = metabolism * 4
        is_greedy = False

        #Generate the cows. use FCM or procedural cows
        for i in range(self.init_cows):
            x = self.random.randrange(self.width)
            y = self.random.randrange(self.height)
            is_greedy = False
            greedy_range = self.random.randrange(0, 100)
            if (greedy_range < self.cooperative_probabilty):
                is_greedy = True

            if self.use_fcm:
                #params for coop cow
                fcm_input1 = {
                    'name': 'Food Observation',
                    'act': 'SIGMOID',
                    'output': 0.41
                }
                fcm_input2 = {
                    'name': 'Energy',
                    'act': 'SIGMOID',
                    'output': 0.75
                }
                fcm_input3 = {'name': 'Eat', 'act': 'SIGMOID', 'output': 0.76}
                body_input = [fcm_input1, fcm_input2, fcm_input3]
                concepts = {'concepts': body_input}
                if (is_greedy):
                    fcm_input1 = {
                        'name': 'Food Observation',
                        'act': 'INTERVAL',
                        'output': 1,
                        'fixedOutput': True
                    }
                    fcm_input2 = {
                        'name': 'Eat',
                        'act': 'INTERVAL',
                        'output': 1,
                        'fixedOutput': True
                    }
                    fcm_input3 = {
                        'name': 'Energy',
                        'act': 'INTERVAL',
                        'output': 1,
                        'fixedOutput': True
                    }
                    body_input = [fcm_input1, fcm_input2]
                    concepts = {'concepts': body_input}
                fcm_result = self.run_fcm(is_greedy, concepts)

                print("Model FCM: ")
                print(fcm_result)

                cow = FCMCow(self.next_id(), (x, y), self, True, fcm_result,
                             energy, is_greedy)
                self.grid.place_agent(cow, (x, y))
                self.schedule.add(cow)
            else:
                cow = Cow(self.next_id(), (x, y), self, True, energy,
                          is_greedy)
                self.grid.place_agent(cow, (x, y))
                self.schedule.add(cow)

        #Generate the grass
        for agent, x, y in self.grid.coord_iter():
            fully_grown = self.random.choice([True, False])
            if fully_grown:
                countdown = self.grass_regrowth_time
            else:
                countdown = self.random.randrange(self.grass_regrowth_time)
            patch = GrassPatch(self.next_id(), (x, y), self, fully_grown,
                               countdown)
            self.grid.place_agent(patch, (x, y))
            self.schedule.add(patch)

    def step(self):
        # tell all the agents in the model to run their step function
        self.schedule.step()
        # collect data
        self.datacollector.collect(self)

    def run_model(self):
        for i in range(self.run_time):
            self.step()
Beispiel #20
0
class World(Model):
    """
    The class World which inherits from Model and is responsible for the
    intarations for the experiment.

    Attributs:
      gridsize: dimentions of the world grid
      cop_density: density of the cops placed in world
      citizen_density: density of the citizens in world
      agent_type: the alignment of agent either as cop or citizen
      l_state: the legitimacy state in world
      reduction_constant: the constant attribute which decide by what rate
      the state of l_state will reduce
    """
    def __init__(
        self,
        gridsize,
        cop_density,
        citizen_density,
        agent_type,
        legitimacy,
        l_state,
        reduction_constant,
        active_threshold,
        include_wealth,
        rich_threshold,
    ):

        # Create a new World instance.

        # Args:
        #    gridsize: the size of grid
        #    cop_density: density of cops to be placed
        #    citizen_density: density of citizens to be placed
        #    agent_type: the alignment of agent either as cop or citizen
        #    l_state: the legitimacy state
        #    reduction_constant: the constant attribute which decide by what rate
        #        the state of l_state will reduce

        self.cop_density = cop_density
        self.citizen_density = citizen_density
        self.agent_type = agent_type

        self.legitimacy = legitimacy
        self.l_state = l_state

        self.reduction_constant = reduction_constant
        self.active_threshold = active_threshold
        self.include_wealth = include_wealth
        self.rich_threshold = rich_threshold

        self.ap_constant = 2.3

        # Agent count r_c: rich_count, r_a_c: rich_active_count, m_c: middle_count, m_a_c: middle_active_count, p_c: poor_count, p_a_c: poor_active_count,.
        self.r_c = 0
        self.r_a_c = 0
        self.m_c = 0
        self.m_a_c = 0
        self.p_c = 0
        self.p_a_c = 0

        self.mean = 0
        self.kill_agents = []
        self.agents_killed = 0
        self.grid = MultiGrid(gridsize, gridsize, False)
        self.schedule = SimultaneousActivation(self)
        self.placement(gridsize)
        self.running = True

    def placement(self, gridsize):

        # Placement of agents inside the Grid

        # Arguments:
        # gridsize: Dimensions of grid

        unique_id = 1

        if self.cop_density + self.citizen_density > 1:
            print("Density ratios must not exceed 1", file=sys.stderr)

        self.bank = Bank(1, self)

        for (_, x, y) in self.grid.coord_iter():

            if self.random.random() < self.cop_density:
                a = Cop(unique_id, self)
                self.schedule.add(a)
                self.grid.place_agent(a, (x, y))
                unique_id += 1

            elif self.random.random() < (self.cop_density +
                                         self.citizen_density):
                a = Citizen(
                    unique_id,
                    self,
                    hardship=self.random.random(),
                    risk_aversion=self.random.random(),
                    bank=self.bank,
                    rich_threshold=self.rich_threshold,
                )
                self.schedule.add(a)
                self.grid.place_agent(a, (x, y))
                unique_id += 1

        self.datacollector = DataCollector(
            model_reporters={
                "Poor Grievance": lambda m: self.measure_poor_grievance(m),
                "Middle Grievance": lambda m: self.measure_middle_grievance(m),
                "Rich Grievance": lambda m: self.measure_rich_grievance(m),
                "Calm": lambda m: self.count_calm(m),
                "Revolt": lambda m: self.count_revolt(m),
                "Jail": lambda m: self.count_jailed(m),
                "Cops": lambda m: self.count_cops(m),
                "Rich": lambda m: self.count_rich(m),
                "Middle": lambda m: self.count_middle(m),
                "Poor": lambda m: self.count_poor(m),
                "Rich Wealth": lambda m: self.measure_rich_wealth(m),
                "Middle Wealth": lambda m: self.measure_middle_wealth(m),
                "Poor Wealth": lambda m: self.measure_poor_wealth(m),
                "Rich Confidence": lambda m: self.measure_rich_confidence(m),
                "Middle Confidence":
                lambda m: self.measure_middle_confidence(m),
                "Poor Confidence": lambda m: self.measure_poor_confidence(m),
                "Rich Hardship": lambda m: self.measure_rich_hardship(m),
                "Middle Hardship": lambda m: self.measure_middle_hardship(m),
                "Poor Hardship": lambda m: self.measure_poor_hardship(m),
                "Legitimacy": lambda m: self.measure_legitimacy(m),
                "WO Revolt": lambda m: self.wo_wealth_active(m),
                "WO Calm": lambda m: self.wo_wealth_calm(m),
                "WO Jail": lambda m: self.wo_wealth_jail(m),
            })

    def update_agent_count(self):

        # Updates the number of current and active agents

        self.r_c = len([
            a for a in self.schedule.agents
            if a.alignment == "Citizen" and a.status == "Rich"
        ])
        self.r_a_c = len([
            a for a in self.schedule.agents if a.alignment == "Citizen"
            and a.status == "Rich" and a.state == "Revolt"
        ])

        self.m_c = len([
            a for a in self.schedule.agents
            if a.alignment == "Citizen" and a.status == "Middle"
        ])
        self.m_a_c = len([
            a for a in self.schedule.agents if a.alignment == "Citizen"
            and a.status == "Middle" and a.state == "Revolt"
        ])

        self.p_c = len([
            a for a in self.schedule.agents
            if a.alignment == "Citizen" and a.status == "Poor"
        ])
        self.p_a_c = len([
            a for a in self.schedule.agents if a.alignment == "Citizen"
            and a.status == "Poor" and a.state == "Revolt"
        ])

    def mean_wealth(self):

        # Calculate the mean wealth of all the citizen agents

        self.agents = [
            agent.savings for agent in self.schedule.agents
            if agent.alignment == "Citizen"
        ]
        self.mean = s.mean(self.agents)

    def update_core(self):
        if self.l_state:
            if self.legitimacy > 0.0:
                self.legitimacy -= self.reduction_constant
            else:
                self.legitimacy = 0.0

    def step(self):

        # Calculation of world attributes in one step(iteration) of execution

        self.update_agent_count()
        self.datacollector.collect(self)
        self.mean_wealth()
        self.schedule.step()
        self.update_core()

        if self.kill_agents:
            self.kill_agents = list(dict.fromkeys(self.kill_agents))
            for i in self.kill_agents:
                self.grid.remove_agent(i)
                self.schedule.remove(i)
            self.kill_agents = []

        total_agents = len(
            [a for a in self.schedule.agents if a.alignment == "Citizen"])
        if total_agents < 2:
            self.running = False

    @staticmethod
    def count_calm(model):
        a = len([
            a for a in model.schedule.agents
            if a.alignment == "Citizen" and a.state == "Calm"
        ])
        return a

    @staticmethod
    def count_revolt(model):
        a = len([
            a for a in model.schedule.agents
            if a.alignment == "Citizen" and a.state == "Revolt"
        ])
        return a

    @staticmethod
    def count_jailed(model):
        a = len([
            a for a in model.schedule.agents
            if a.alignment == "Citizen" and a.state == "Jail"
        ])
        return a

    @staticmethod
    def count_cops(model):
        a = len([a for a in model.schedule.agents if a.alignment == "Cop"])
        return a

    @staticmethod
    def count_rich(model):
        a = len([
            a for a in model.schedule.agents
            if a.alignment == "Citizen" and a.status == "Rich"
        ])
        return a

    @staticmethod
    def count_middle(model):
        a = len([
            a for a in model.schedule.agents
            if a.alignment == "Citizen" and a.status == "Middle"
        ])
        return a

    @staticmethod
    def count_poor(model):
        a = len([
            a for a in model.schedule.agents
            if a.alignment == "Citizen" and a.status == "Poor"
        ])
        return a

    @staticmethod
    def measure_poor_grievance(model):
        if model.include_wealth:
            confidence = [
                a.grievance for a in model.schedule.agents
                if a.alignment == "Citizen" and a.status == "Poor"
            ]
            if confidence:
                total = sum(confidence)
                return total
            else:
                return 0
        else:
            confidence = [
                a.grievance for a in model.schedule.agents
                if a.alignment == "Citizen"
            ]
            return sum(confidence)

    @staticmethod
    def measure_middle_grievance(model):
        confidence = [
            a.grievance for a in model.schedule.agents
            if a.alignment == "Citizen" and a.status == "Middle"
        ]
        if confidence:
            total = sum(confidence)
            return total
        else:
            return 0

    @staticmethod
    def measure_rich_grievance(model):
        confidence = [
            a.grievance for a in model.schedule.agents
            if a.alignment == "Citizen" and a.status == "Rich"
        ]
        if confidence:
            total = sum(confidence)
            return total
        else:
            return 0

    @staticmethod
    def measure_rich_wealth(model):
        wealth = [
            a.wealth for a in model.schedule.agents
            if a.alignment == "Citizen" and a.status == "Rich"
        ]
        return s.mean(wealth) if wealth else 0

    @staticmethod
    def measure_middle_wealth(model):
        wealth = [
            a.wealth for a in model.schedule.agents
            if a.alignment == "Citizen" and a.status == "Middle"
        ]
        return s.mean(wealth) if wealth else 0

    @staticmethod
    def measure_poor_wealth(model):
        wealth = [
            a.wealth for a in model.schedule.agents
            if a.alignment == "Citizen" and a.status == "Poor"
        ]
        return s.mean(wealth) if wealth else 0

    @staticmethod
    def measure_poor_confidence(model):
        confidence = [
            a.confidence for a in model.schedule.agents
            if a.alignment == "Citizen" and a.status == "Poor"
        ]
        if confidence:
            total = s.mean(confidence)
            return total
        else:
            return 0

    @staticmethod
    def measure_middle_confidence(model):
        confidence = [
            a.confidence for a in model.schedule.agents
            if a.alignment == "Citizen" and a.status == "Middle"
        ]
        if confidence:
            total = s.mean(confidence)
            return total
        else:
            return 0

    @staticmethod
    def measure_rich_confidence(model):
        confidence = [
            a.confidence for a in model.schedule.agents
            if a.alignment == "Citizen" and a.status == "Rich"
        ]
        if confidence:
            total = s.mean(confidence)
            return total
        else:
            return 0

    @staticmethod
    def measure_total_reserves(model):
        return s.mean(model.bank.total_reserves)

    @staticmethod
    def measure_poor_hardship(model):

        confidence = [
            a.hardship for a in model.schedule.agents
            if a.alignment == "Citizen" and a.status == "Poor"
        ]
        if confidence:
            total = sum(confidence)
            return total
        else:
            return 0

    @staticmethod
    def measure_middle_hardship(model):
        confidence = [
            a.hardship for a in model.schedule.agents
            if a.alignment == "Citizen" and a.status == "Middle"
        ]
        if confidence:
            total = sum(confidence)
            return total
        else:
            return 0

    @staticmethod
    def measure_rich_hardship(model):
        confidence = [
            a.hardship for a in model.schedule.agents
            if a.alignment == "Citizen" and a.status == "Rich"
        ]
        if confidence:
            total = sum(confidence)
            return total
        else:
            return 0

    @staticmethod
    def measure_legitimacy(model):
        return model.legitimacy * 100

    @staticmethod
    def wo_wealth_active(model):
        if model.include_wealth == False:
            active = [
                a for a in model.schedule.agents
                if a.alignment == "Citizen" and a.state == "Revolt"
            ]
            return len(active)
        else:
            return 0

    @staticmethod
    def wo_wealth_calm(model):
        if model.include_wealth == False:
            active = [
                a for a in model.schedule.agents
                if a.alignment == "Citizen" and a.state == "Calm"
            ]
            return len(active)
        else:
            return 0

    @staticmethod
    def wo_wealth_jail(model):
        if model.include_wealth == False:
            active = [
                a for a in model.schedule.agents
                if a.alignment == "Citizen" and a.state == "Jail"
            ]
            return len(active)
        else:
            return 0
Beispiel #21
0
class WolfSheep(Model):
    '''
    Wolf-Sheep Predation Model
    '''

    height = 20
    width = 20

    initial_sheep = 100
    initial_wolves = 50

    sheep_reproduce = 0.04
    wolf_reproduce = 0.05

    wolf_gain_from_food = 20

    grass = False
    grass_regrowth_time = 30
    sheep_gain_from_food = 4

    verbose = False  # Print-monitoring

    description = 'A model for simulating wolf and sheep (predator-prey) ecosystem modelling.'

    def __init__(self, height=20, width=20,
                 initial_sheep=100, initial_wolves=50,
                 sheep_reproduce=0.04, wolf_reproduce=0.05,
                 wolf_gain_from_food=20,
                 grass=False, grass_regrowth_time=30, sheep_gain_from_food=4):
        '''
        Create a new Wolf-Sheep model with the given parameters.

        Args:
            initial_sheep: Number of sheep to start with
            initial_wolves: Number of wolves to start with
            sheep_reproduce: Probability of each sheep reproducing each step
            wolf_reproduce: Probability of each wolf reproducing each step
            wolf_gain_from_food: Energy a wolf gains from eating a sheep
            grass: Whether to have the sheep eat grass for energy
            grass_regrowth_time: How long it takes for a grass patch to regrow
                                 once it is eaten
            sheep_gain_from_food: Energy sheep gain from grass, if enabled.
        '''
        super().__init__()
        # Set parameters
        self.height = height
        self.width = width
        self.initial_sheep = initial_sheep
        self.initial_wolves = initial_wolves
        self.sheep_reproduce = sheep_reproduce
        self.wolf_reproduce = wolf_reproduce
        self.wolf_gain_from_food = wolf_gain_from_food
        self.grass = grass
        self.grass_regrowth_time = grass_regrowth_time
        self.sheep_gain_from_food = sheep_gain_from_food

        self.schedule = RandomActivationByBreed(self)
        self.grid = MultiGrid(self.height, self.width, torus=True)
        self.datacollector = DataCollector(
            {"Wolves": lambda m: m.schedule.get_breed_count(Wolf),
             "Sheep": lambda m: m.schedule.get_breed_count(Sheep)})

        # Create sheep:
        for i in range(self.initial_sheep):
            x = self.random.randrange(self.width)
            y = self.random.randrange(self.height)
            energy = self.random.randrange(2 * self.sheep_gain_from_food)
            sheep = Sheep(self.next_id(), (x, y), self, True, energy)
            self.grid.place_agent(sheep, (x, y))
            self.schedule.add(sheep)

        # Create wolves
        for i in range(self.initial_wolves):
            x = self.random.randrange(self.width)
            y = self.random.randrange(self.height)
            energy = self.random.randrange(2 * self.wolf_gain_from_food)
            wolf = Wolf(self.next_id(), (x, y), self, True, energy)
            self.grid.place_agent(wolf, (x, y))
            self.schedule.add(wolf)

        # Create grass patches
        if self.grass:
            for agent, x, y in self.grid.coord_iter():

                fully_grown = self.random.choice([True, False])

                if fully_grown:
                    countdown = self.grass_regrowth_time
                else:
                    countdown = self.random.randrange(self.grass_regrowth_time)

                patch = GrassPatch(self.next_id(), (x, y), self,
                                   fully_grown, countdown)
                self.grid.place_agent(patch, (x, y))
                self.schedule.add(patch)

        self.running = True
        self.datacollector.collect(self)

    def step(self):
        self.schedule.step()
        # collect data
        self.datacollector.collect(self)
        if self.verbose:
            print([self.schedule.time,
                   self.schedule.get_breed_count(Wolf),
                   self.schedule.get_breed_count(Sheep)])

    def run_model(self, step_count=200):

        if self.verbose:
            print('Initial number wolves: ',
                  self.schedule.get_breed_count(Wolf))
            print('Initial number sheep: ',
                  self.schedule.get_breed_count(Sheep))

        for i in range(step_count):
            self.step()

        if self.verbose:
            print('')
            print('Final number wolves: ',
                  self.schedule.get_breed_count(Wolf))
            print('Final number sheep: ',
                  self.schedule.get_breed_count(Sheep))
Beispiel #22
0
class SamplePSO(PSO):
    def __init__(self, population: int, dimension: int,
                 attraction_best_global: float,
                 attraction_best_personal: float, lim_vel_particles: float,
                 inertia_particle: float, max_iterations: int, width: int,
                 height: int, num_max_locales: int, suavizar_espacio: int):
        super().__init__(population, dimension, attraction_best_global,
                         attraction_best_personal, lim_vel_particles,
                         inertia_particle, max_iterations)

        self.width = width
        self.height = height
        self.grid = MultiGrid(self.width, self.height, torus=False)

        # Variables para el espacio de busqueda
        self.num_max_locales = num_max_locales
        self.suavizar_espacio = suavizar_espacio

        # Crear espacio de busqueda
        self.setup_search_space()

        # Crear particulas
        # Lo hace la clase padre

        # Colocar particulas
        # Hay que adaptar pos interno al espacio de busqueda
        self.place_particles(True)

        # Captura de datos para grafica
        self.datacollector = DataCollector({
            "Best": lambda m: m.global_best_value,
            "Average": lambda m: m.average()
        })

    def average(self):
        sum = 0
        for agent in self.schedule.agents:
            if isinstance(agent, Particle):
                sum += agent.personal_best_value
        return sum / self.population

    # Se modifica step del padre para añadir el collector

    def step(self):
        super().step()
        # Collect data
        self.datacollector.collect(self)
        # Stop si llega al máximo
        if self.global_best_value == 1:
            self.running = False

    def place_particles(self, initial=False):
        for particle in self.particles:
            pos = self.pos_particle_to_pos(particle)

            if initial:
                self.grid.place_agent(particle, pos)
            else:
                self.grid.move_agent(particle, pos)

    def pos_particle_to_pos(self, particle: Particle):
        # Convierte posiciones de particula [0 1] en posiciones de grid 2D
        min_xcor = 0
        max_xcor = self.width - 1
        min_ycor = 0
        max_ycor = self.height - 1
        x_cor = self.convert(particle.pos_particle[0], min_xcor, max_xcor)
        y_cor = self.convert(particle.pos_particle[1], min_ycor, max_ycor)
        return (x_cor, y_cor)

    @staticmethod
    def convert(x: float, a: float, b: float) -> int:
        # Bijection from [0, 1] to [a, b]
        return int(a + x * (b - a))

    def setup_search_space(self):
        # Preparar un espacio de busqueda con colinas y valles

        if self.num_max_locales == 0:
            for agent, x, y in self.grid.coord_iter():
                val = random.random()
                patch = Patch(self.unique_id, self, (x, y), val)
                self.unique_id += 1
                self.grid.place_agent(patch, (x, y))
        else:
            n_elements = (self.width - 1) * (self.height - 1)
            selected_elements = random.sample(range(n_elements),
                                              self.num_max_locales)
            element = 0
            for (agentSet, x, y) in self.grid.coord_iter():
                val = 10 * random.random(
                ) if element in selected_elements else 0
                patch = Patch(self.unique_id, self, (x, y), val)
                self.unique_id += 1
                self.grid.place_agent(patch, (x, y))
                element += 1

        # Suavizado del espacio
        for _ in range(self.suavizar_espacio):
            for (agentSet, x, y) in self.grid.coord_iter():
                for agent in agentSet:
                    if isinstance(agent, Patch):
                        agent.diffuse_val(1)

        # Normalizacion del espacio 0 y 0.99999
        min_val = 0
        max_val = 0
        for (agentSet, x, y) in self.grid.coord_iter():
            for agent in agentSet:
                if isinstance(agent, Patch):
                    if agent.val < min_val:
                        min_val = agent.val
                    if agent.val > max_val:
                        max_val = agent.val
        for (agentSet, x, y) in self.grid.coord_iter():
            for agent in agentSet:
                if isinstance(agent, Patch):
                    agent.val = 0.99999 * (agent.val - min_val) / (max_val -
                                                                   min_val)

        # Marcar a 1 el máximo
        max_val = 0
        max_patch = None
        for (agentSet, x, y) in self.grid.coord_iter():
            for agent in agentSet:
                if isinstance(agent, Patch):
                    if agent.val > max_val:
                        max_patch = agent
        if isinstance(max_patch, Particle):
            max_patch.val = 1

        # Colorear patches
        for (agentSet, x, y) in self.grid.coord_iter():
            for agent in agentSet:
                if isinstance(agent, Patch):
                    agent.set_color()

    # Se han de definir los métodos evaluation y psoexternalUpdate

    def evaluation(self, particle: Particle):
        # Se podría usar particle.pos si se ejecutara primero pso_external_update
        # Pero se ejecuta despues

        # Hay que revisar la primera evaluación al crear particulas
        if self.grid is not None:

            pos = self.pos_particle_to_pos(particle)
            for patch in self.grid.get_cell_list_contents(pos):
                if isinstance(patch, Patch):
                    return patch.val
        else:
            return 0

    def pso_external_update(self):
        self.place_particles()
class PitchModel(Model):
    def __init__(self, N, width, height):
        '''Initiate the model'''
        self.num_agents = 2 * N
        self.grid = MultiGrid(width, height, False)
        self.schedule = SimultaneousActivation(self)
        self.running = True
        self.justConceded = 0
        self.score1 = 0
        self.score2 = 0
        self.i = 0
        self.newPossession = -1

        #Initialise potential fields for each state
        self.movePotentialGK1 = np.zeros((width, height))
        self.movePotentialGK2 = np.zeros((width, height))
        self.movePotentialGKP1 = np.zeros((width, height))
        self.movePotentialGKP2 = np.zeros((width, height))
        self.movePotentialDF1 = np.zeros((width, height))
        self.movePotentialDF2 = np.zeros((width, height))
        self.movePotentialPO1 = np.zeros((width, height))
        self.movePotentialPO2 = np.zeros((width, height))
        self.movePotentialBP1 = np.zeros((width, height))
        self.movePotentialBP2 = np.zeros((width, height))

        #Set initial potential field due to goals
        self.goalPotentialGK1 = np.zeros((width, height))
        self.goalPotentialGK2 = np.zeros((width, height))
        self.goalPotentialGKP1 = np.zeros((width, height))
        self.goalPotentialGKP2 = np.zeros((width, height))
        self.goalPotentialDF1 = np.zeros((width, height))
        self.goalPotentialDF2 = np.zeros((width, height))
        self.goalPotentialPO1 = np.zeros((width, height))
        self.goalPotentialPO2 = np.zeros((width, height))
        self.goalPotentialBP1 = np.zeros((width, height))
        self.goalPotentialBP2 = np.zeros((width, height))

        for x in range(8):
            for y in range(height):
                widthVal = ((width / 2) - 4) + x
                self.goalPotentialGK1[int(widthVal)][
                    y] = self.goalPotentialGK1[int(widthVal)][y] + (y + 1)
                self.goalPotentialGK1[int(widthVal)][
                    height - (y + 1)] = self.goalPotentialGK1[int(widthVal)][
                        height - (y + 1)] - np.log2(height - (y + 1))

                self.goalPotentialGK2[int(
                    widthVal)][y] = self.goalPotentialGK2[int(
                        widthVal)][y] - np.log2(height - (y + 1))
                self.goalPotentialGK2[int(widthVal)][height - (
                    y + 1)] = self.goalPotentialGK2[int(widthVal)][height -
                                                                   (y + 1)] + (
                                                                       y + 1)

                self.goalPotentialGKP1[int(widthVal)][
                    y] = self.goalPotentialGKP1[int(widthVal)][y] + (y + 1)
                self.goalPotentialGKP1[int(widthVal)][
                    height - (y + 1)] = self.goalPotentialGKP1[int(widthVal)][
                        height - (y + 1)] - np.log2(height - (y + 1))

                self.goalPotentialGKP2[int(
                    widthVal)][y] = self.goalPotentialGKP2[int(
                        widthVal)][y] - np.log2(height - (y + 1))
                self.goalPotentialGKP2[int(widthVal)][height - (
                    y + 1
                )] = self.goalPotentialGKP2[int(widthVal)][height -
                                                           (y + 1)] + (y + 1)

                self.goalPotentialDF1[int(widthVal)][
                    y] = self.goalPotentialDF1[int(widthVal)][y] + (y + 1)
                self.goalPotentialDF1[int(widthVal)][
                    height - (y + 1)] = self.goalPotentialDF1[int(widthVal)][
                        height - (y + 1)] - np.log2(height - (y + 1))

                self.goalPotentialDF2[int(
                    widthVal)][y] = self.goalPotentialDF2[int(
                        widthVal)][y] - np.log2(height - (y + 1))
                self.goalPotentialDF2[int(widthVal)][height - (
                    y + 1)] = self.goalPotentialDF2[int(widthVal)][height -
                                                                   (y + 1)] + (
                                                                       y + 1)

                self.goalPotentialPO1[int(
                    widthVal)][y] = self.goalPotentialPO1[int(
                        widthVal)][y] - np.log2(height - (y + 1))
                self.goalPotentialPO1[int(widthVal)][height - (
                    y + 1)] = self.goalPotentialPO1[int(widthVal)][height -
                                                                   (y + 1)] + (
                                                                       y + 1)

                self.goalPotentialPO2[int(widthVal)][
                    y] = self.goalPotentialPO2[int(widthVal)][y] + (y + 1)
                self.goalPotentialPO2[int(widthVal)][
                    height - (y + 1)] = self.goalPotentialPO2[int(widthVal)][
                        height - (y + 1)] - np.log2(height - (y + 1))

                self.goalPotentialBP1[int(
                    widthVal)][y] = self.goalPotentialBP1[int(
                        widthVal)][y] - np.log2(height - (y + 1))
                self.goalPotentialBP1[int(widthVal)][height - (
                    y + 1)] = self.goalPotentialBP1[int(widthVal)][height -
                                                                   (y + 1)] + (
                                                                       y + 1)

                self.goalPotentialBP2[int(widthVal)][
                    y] = self.goalPotentialBP2[int(widthVal)][y] + (y + 1)
                self.goalPotentialBP2[int(widthVal)][
                    height - (y + 1)] = self.goalPotentialBP2[int(widthVal)][
                        height - (y + 1)] - np.log2(height - (y + 1))

        for x in range(int((width / 2) - 4)):
            for y in range(height):
                r1 = (((x + 1)**2) + ((y + 1)**2))**0.5
                r2 = (((x + 1)**2) + ((height - (y + 1))**2))**0.5
                goalStart = ((width / 2) - 5) - x
                goalEnd = ((width / 2) + 4) + x

                self.goalPotentialGK1[int(goalStart)][
                    y] = self.goalPotentialGK1[int(goalStart)][y] + (r1)
                self.goalPotentialGK1[int(goalEnd)][y] = self.goalPotentialGK1[
                    int(goalEnd)][y] + (r1)
                self.goalPotentialGK1[int(goalStart)][
                    height - (y + 1)] = self.goalPotentialGK1[int(goalStart)][
                        height - (y + 1)] - np.log2(r2)
                self.goalPotentialGK1[int(goalEnd)][height - (
                    y + 1
                )] = self.goalPotentialGK1[int(goalEnd)][height -
                                                         (y + 1)] - np.log2(r2)

                self.goalPotentialGK2[int(goalStart)][
                    y] = self.goalPotentialGK2[int(goalStart)][y] - np.log2(r2)
                self.goalPotentialGK2[int(goalEnd)][y] = self.goalPotentialGK2[
                    int(goalEnd)][y] - np.log2(r2)
                self.goalPotentialGK2[int(goalStart)][height - (
                    y +
                    1)] = self.goalPotentialGK2[int(goalStart)][height -
                                                                (y + 1)] + (r1)
                self.goalPotentialGK2[int(goalEnd)][height - (
                    y +
                    1)] = self.goalPotentialGK2[int(goalEnd)][height -
                                                              (y + 1)] + (r1)

                self.goalPotentialGKP1[int(goalStart)][
                    y] = self.goalPotentialGKP1[int(goalStart)][y] + (r1)
                self.goalPotentialGKP1[int(goalEnd)][
                    y] = self.goalPotentialGKP1[int(goalEnd)][y] + (r1)
                self.goalPotentialGKP1[int(goalStart)][
                    height - (y + 1)] = self.goalPotentialGKP1[int(goalStart)][
                        height - (y + 1)] - np.log2(r2)
                self.goalPotentialGKP1[int(goalEnd)][
                    height - (y + 1)] = self.goalPotentialGKP1[int(goalEnd)][
                        height - (y + 1)] - np.log2(r2)

                self.goalPotentialGKP2[int(
                    goalStart
                )][y] = self.goalPotentialGKP2[int(goalStart)][y] - np.log2(r2)
                self.goalPotentialGKP2[int(goalEnd)][
                    y] = self.goalPotentialGKP2[int(goalEnd)][y] - np.log2(r2)
                self.goalPotentialGKP2[int(goalStart)][height - (
                    y + 1
                )] = self.goalPotentialGKP2[int(goalStart)][height -
                                                            (y + 1)] + (r1)
                self.goalPotentialGKP2[int(goalEnd)][height - (
                    y +
                    1)] = self.goalPotentialGKP2[int(goalEnd)][height -
                                                               (y + 1)] + (r1)

                self.goalPotentialDF1[int(goalStart)][
                    y] = self.goalPotentialDF1[int(goalStart)][y] + (r1)
                self.goalPotentialDF1[int(goalEnd)][y] = self.goalPotentialDF1[
                    int(goalEnd)][y] + (r1)
                self.goalPotentialDF1[int(goalStart)][
                    height - (y + 1)] = self.goalPotentialDF1[int(goalStart)][
                        height - (y + 1)] - np.log2(r2)
                self.goalPotentialDF1[int(goalEnd)][height - (
                    y + 1
                )] = self.goalPotentialDF1[int(goalEnd)][height -
                                                         (y + 1)] - np.log2(r2)

                self.goalPotentialDF2[int(goalStart)][
                    y] = self.goalPotentialDF2[int(goalStart)][y] - np.log2(r2)
                self.goalPotentialDF2[int(goalEnd)][y] = self.goalPotentialDF2[
                    int(goalEnd)][y] - np.log2(r2)
                self.goalPotentialDF2[int(goalStart)][height - (
                    y +
                    1)] = self.goalPotentialDF2[int(goalStart)][height -
                                                                (y + 1)] + (r1)
                self.goalPotentialDF2[int(goalEnd)][height - (
                    y +
                    1)] = self.goalPotentialDF2[int(goalEnd)][height -
                                                              (y + 1)] + (r1)

                self.goalPotentialPO1[int(goalStart)][
                    y] = self.goalPotentialPO1[int(goalStart)][y] - np.log2(r2)
                self.goalPotentialPO1[int(goalEnd)][y] = self.goalPotentialPO1[
                    int(goalEnd)][y] - np.log2(r2)
                self.goalPotentialPO1[int(goalStart)][height - (
                    y +
                    1)] = self.goalPotentialPO1[int(goalStart)][height -
                                                                (y + 1)] + (r1)
                self.goalPotentialPO1[int(goalEnd)][height - (
                    y +
                    1)] = self.goalPotentialPO1[int(goalEnd)][height -
                                                              (y + 1)] + (r1)

                self.goalPotentialPO2[int(goalStart)][
                    y] = self.goalPotentialPO2[int(goalStart)][y] + (r1)
                self.goalPotentialPO2[int(goalEnd)][y] = self.goalPotentialPO2[
                    int(goalEnd)][y] + (r1)
                self.goalPotentialPO2[int(goalStart)][
                    height - (y + 1)] = self.goalPotentialPO2[int(goalStart)][
                        height - (y + 1)] - np.log2(r2)
                self.goalPotentialPO2[int(goalEnd)][height - (
                    y + 1
                )] = self.goalPotentialPO2[int(goalEnd)][height -
                                                         (y + 1)] - np.log2(r2)

                self.goalPotentialBP1[int(goalStart)][
                    y] = self.goalPotentialBP1[int(goalStart)][y] - np.log2(r2)
                self.goalPotentialBP1[int(goalEnd)][y] = self.goalPotentialBP1[
                    int(goalEnd)][y] - np.log2(r2)
                self.goalPotentialBP1[int(goalStart)][height - (
                    y +
                    1)] = self.goalPotentialBP1[int(goalStart)][height -
                                                                (y + 1)] + (r1)
                self.goalPotentialBP1[int(goalEnd)][height - (
                    y +
                    1)] = self.goalPotentialBP1[int(goalEnd)][height -
                                                              (y + 1)] + (r1)

                self.goalPotentialBP2[int(goalStart)][
                    y] = self.goalPotentialBP2[int(goalStart)][y] + (r1)
                self.goalPotentialBP2[int(goalEnd)][y] = self.goalPotentialBP2[
                    int(goalEnd)][y] + (r1)
                self.goalPotentialBP2[int(goalStart)][
                    height - (y + 1)] = self.goalPotentialBP2[int(goalStart)][
                        height - (y + 1)] - np.log2(r2)
                self.goalPotentialBP2[int(goalEnd)][height - (
                    y + 1
                )] = self.goalPotentialBP2[int(goalEnd)][height -
                                                         (y + 1)] - np.log2(r2)

        #Create Agents
        for i in range(self.num_agents):
            if i > ((2 * N) - 3):
                a = PlayerAgent(i, self, True)
            else:
                a = PlayerAgent(i, self)
            self.schedule.add(a)

            x = self.random.randrange(self.grid.width)
            y = self.random.randrange(self.grid.height)
            self.grid.place_agent(a, (x, y))

        #Set Up Kickoff
        self.kickoff()

        #Set Up DataCollector
        self.datacollector = DataCollector(model_reporters={
            "Score 1": "score1",
            "Score 2": "score2"
        },
                                           agent_reporters={
                                               "Avg. Displacement": "avgDisp",
                                               "Max Displacement": "maxDisp"
                                           })

    def kickoff(self):
        posBoy = -1
        newPositions = {}
        if self.justConceded == 0:
            posTeam = self.random.randint(1, 2)
        else:
            posTeam = self.justConceded
        for cellContents, x, y in self.grid.coord_iter():
            if len(cellContents) == 0:
                pass
            else:
                for i in cellContents:
                    if posBoy == -1:
                        if i.teamID == posTeam:
                            if i.goalkeeper == True:
                                pass
                            else:
                                i.possession = True
                                posBoy = i.unique_id
                                newPositions[i] = ((self.grid.width / 2) - 1,
                                                   (self.grid.height / 2) - 1)
                    if i.unique_id != posBoy:
                        if i.teamID == 1:
                            if i.goalkeeper == True:
                                x = self.random.randint(
                                    (self.grid.width / 2) - 5,
                                    (self.grid.width / 2) + 3)
                                y = self.random.randint(0, 17)
                                newPositions[i] = (x, y)
                            else:
                                x = self.random.randrange(self.grid.width)
                                y = self.random.randint(
                                    0, (self.grid.height / 2) - 1)
                                newPositions[i] = (x, y)
                        else:
                            if i.goalkeeper == True:
                                x = self.random.randint(
                                    (self.grid.width / 2) - 5,
                                    (self.grid.width / 2) + 3)
                                y = self.random.randint(
                                    self.grid.height - 18,
                                    self.grid.height - 1)
                                newPositions[i] = (x, y)
                            else:
                                x = self.random.randrange(self.grid.width)
                                y = self.random.randint(
                                    (self.grid.height / 2) - 1,
                                    self.grid.height - 1)
                                newPositions[i] = (x, y)
        for key in newPositions.keys():
            (x, y) = newPositions[key]
            x = int(x)
            y = int(y)
            self.grid.move_agent(key, (x, y))
        self.justConceded = 0

    def calcPotential(self):
        for x in range(self.grid.width):
            for y in range(self.grid.height):
                self.movePotentialGK1[x][y] = self.goalPotentialGK1[x][y]
                self.movePotentialGK2[x][y] = self.goalPotentialGK2[x][y]

                self.movePotentialGKP1[x][y] = self.goalPotentialGKP1[x][y]
                self.movePotentialGKP2[x][y] = self.goalPotentialGKP2[x][y]

                self.movePotentialDF1[x][y] = self.goalPotentialDF1[x][y]
                self.movePotentialDF2[x][y] = self.goalPotentialDF2[x][y]

                self.movePotentialPO1[x][y] = self.goalPotentialPO1[x][y]
                self.movePotentialPO2[x][y] = self.goalPotentialPO2[x][y]

                self.movePotentialBP1[x][y] = self.goalPotentialBP1[x][y]
                self.movePotentialBP2[x][y] = self.goalPotentialBP2[x][y]

        playerPos = {}
        for agent, x, y in self.grid.coord_iter():
            if len(agent) == 0:
                pass
            else:
                for i in agent:
                    playerPos[i.unique_id] = {"x": x, "y": y, "state": i.state}
                    if i.state == "":
                        i.checkState()
                        playerPos[i.unique_id]['state'] = i.state
        for key in playerPos.keys():
            agent = playerPos[key]

            for i in range(self.grid.width):
                for j in range(self.grid.height):
                    r = ((agent['x'] - i)**2 + (agent['y'] - j)**2)**(0.5)
                    if r != 0:
                        if agent['state'] == "GK":
                            self.movePotentialGK1[i][
                                j] = self.movePotentialGK1[i][j] + (20 /
                                                                    (r**2))
                            self.movePotentialGK2[i][
                                j] = self.movePotentialGK2[i][j] + (20 /
                                                                    (r**2))

                            self.movePotentialGKP1[i][
                                j] = self.movePotentialGKP1[i][j] + (20 /
                                                                     (r**2))
                            self.movePotentialGKP2[i][
                                j] = self.movePotentialGKP2[i][j] + (20 /
                                                                     (r**2))

                            self.movePotentialDF1[i][
                                j] = self.movePotentialDF1[i][j] + 40 * (
                                    (5 * (2**(-1 / 6)) / r)**12 -
                                    (5 * (2**(-1 / 6)) / r)**6)
                            self.movePotentialDF2[i][
                                j] = self.movePotentialDF2[i][j] + 40 * (
                                    (5 * (2**(-1 / 6)) / r)**12 -
                                    (5 * (2**(-1 / 6)) / r)**6)

                            self.movePotentialPO1[i][
                                j] = self.movePotentialPO1[i][j] + (20 /
                                                                    (r**2))
                            self.movePotentialPO2[i][
                                j] = self.movePotentialPO2[i][j] + (20 /
                                                                    (r**2))

                            self.movePotentialBP1[i][
                                j] = self.movePotentialBP1[i][j] + (20 /
                                                                    (r**2))
                            self.movePotentialBP2[i][
                                j] = self.movePotentialBP2[i][j] + (20 /
                                                                    (r**2))

                        elif agent['state'] == "GKP":
                            self.movePotentialGK1[i][
                                j] = self.movePotentialGK1[i][j] + (20 /
                                                                    (r**2))
                            self.movePotentialGK2[i][
                                j] = self.movePotentialGK2[i][j] + (20 /
                                                                    (r**2))

                            self.movePotentialGKP1[i][
                                j] = self.movePotentialGKP1[i][j]
                            self.movePotentialGKP2[i][
                                j] = self.movePotentialGKP2[i][j]

                            self.movePotentialDF1[i][
                                j] = self.movePotentialDF1[i][j] + (20 /
                                                                    (r**2))
                            self.movePotentialDF2[i][
                                j] = self.movePotentialDF2[i][j] + (20 /
                                                                    (r**2))

                            self.movePotentialPO1[i][
                                j] = self.movePotentialPO1[i][j] + (20 /
                                                                    (r**2))
                            self.movePotentialPO2[i][
                                j] = self.movePotentialPO2[i][j] + (20 /
                                                                    (r**2))

                            self.movePotentialBP1[i][
                                j] = self.movePotentialBP1[i][j]
                            self.movePotentialBP2[i][
                                j] = self.movePotentialBP2[i][j]

                        elif agent['state'] == "DF":
                            self.movePotentialGK1[i][
                                j] = self.movePotentialGK1[i][j] + (20 /
                                                                    (r**2))
                            self.movePotentialGK2[i][
                                j] = self.movePotentialGK2[i][j] + (20 /
                                                                    (r**2))

                            self.movePotentialGKP1[i][
                                j] = self.movePotentialGKP1[i][j] + (20 /
                                                                     (r**2))
                            self.movePotentialGKP2[i][
                                j] = self.movePotentialGKP2[i][j] + (20 /
                                                                     (r**2))

                            self.movePotentialDF1[i][
                                j] = self.movePotentialDF1[i][j] + (20 /
                                                                    (r**2))
                            self.movePotentialDF2[i][
                                j] = self.movePotentialDF2[i][j] + (20 /
                                                                    (r**2))

                            self.movePotentialPO1[i][
                                j] = self.movePotentialPO1[i][j] + (20 /
                                                                    (r**2))
                            self.movePotentialPO2[i][
                                j] = self.movePotentialPO2[i][j] + (20 /
                                                                    (r**2))

                            self.movePotentialBP1[i][
                                j] = self.movePotentialBP1[i][j] + (20 /
                                                                    (r**2))
                            self.movePotentialBP2[i][
                                j] = self.movePotentialBP2[i][j] + (20 /
                                                                    (r**2))

                        elif agent['state'] == "PO":
                            self.movePotentialGK1[i][
                                j] = self.movePotentialGK1[i][j] - (20 /
                                                                    (r**2))
                            self.movePotentialGK2[i][
                                j] = self.movePotentialGK2[i][j] - (20 /
                                                                    (r**2))

                            self.movePotentialGKP1[i][
                                j] = self.movePotentialGKP1[i][j] - (20 /
                                                                     (r**2))
                            self.movePotentialGKP2[i][
                                j] = self.movePotentialGKP2[i][j] - (20 /
                                                                     (r**2))

                            self.movePotentialDF1[i][
                                j] = self.movePotentialDF1[i][j] + 40 * (
                                    (2.5 * (2**(-1 / 6)) / r)**12 -
                                    (2.5 * (2**(-1 / 6)) / r)**6)
                            self.movePotentialDF2[i][
                                j] = self.movePotentialDF2[i][j] + 40 * (
                                    (2.5 * (2**(-1 / 6)) / r)**12 -
                                    (2.5 * (2**(-1 / 6)) / r)**6)

                            self.movePotentialPO1[i][
                                j] = self.movePotentialPO1[i][j] + (20 /
                                                                    (r**2))
                            self.movePotentialPO2[i][
                                j] = self.movePotentialPO2[i][j] + (20 /
                                                                    (r**2))

                            self.movePotentialBP1[i][
                                j] = self.movePotentialBP1[i][j] + (20 /
                                                                    (r**2))
                            self.movePotentialBP2[i][
                                j] = self.movePotentialBP2[i][j] + (20 /
                                                                    (r**2))

                        elif agent['state'] == "BP":
                            self.movePotentialGK1[i][
                                j] = self.movePotentialGK1[i][j] - (20 /
                                                                    (r**2))
                            self.movePotentialGK2[i][
                                j] = self.movePotentialGK2[i][j] - (20 /
                                                                    (r**2))

                            self.movePotentialGKP1[i][
                                j] = self.movePotentialGKP1[i][j]
                            self.movePotentialGKP2[i][
                                j] = self.movePotentialGKP2[i][j]

                            self.movePotentialDF1[i][
                                j] = self.movePotentialDF1[i][j] - (20 /
                                                                    (r**2))
                            self.movePotentialDF2[i][
                                j] = self.movePotentialDF2[i][j] - (20 /
                                                                    (r**2))

                            self.movePotentialPO1[i][
                                j] = self.movePotentialPO1[i][j] + 40 * (
                                    (5 * (2**(-1 / 6)) / r)**12 -
                                    (5 * (2**(-1 / 6)) / r)**6)
                            self.movePotentialPO2[i][
                                j] = self.movePotentialPO2[i][j] + 40 * (
                                    (5 * (2**(-1 / 6)) / r)**12 -
                                    (5 * (2**(-1 / 6)) / r)**6)

                            self.movePotentialBP1[i][
                                j] = self.movePotentialBP1[i][j]
                            self.movePotentialBP2[i][
                                j] = self.movePotentialBP2[i][j]

                        else:
                            print(
                                "Error in CalcPotential: Player has no state")

    def scoreCheck(self):
        '''Checks if any player agent has successfully scored and increments the team's score by 1'''
        if self.justConceded == 0:
            pass
        else:
            if self.justConceded == 1:
                self.score2 = self.score2 + 1
                self.kickoff()
            else:
                self.score1 = self.score1 + 1
                self.kickoff()

    def gridVisual(self):
        grid = np.zeros((self.grid.width, self.grid.height))
        for agent, x, y in self.grid.coord_iter():
            if len(agent) != 0:
                for k in agent:
                    grid[x][y] = k.teamID
        name = "Visualisation\Latest Test\Figure_" + str(self.i) + ".jpg"
        plt.imsave(name, grid)

    def bugTest(self):
        self.calcPotential()
        plt.figure(1)
        plt.clf()
        plt.imshow(self.movePotentialBP1, interpolation="nearest")

        plt.figure(2)
        plt.clf()
        plt.imshow(self.movePotentialBP2, interpolation="nearest")

        plt.figure(3)
        plt.clf()
        plt.imshow(self.movePotentialDF1, interpolation="nearest")

        plt.figure(4)
        plt.clf()
        plt.imshow(self.movePotentialDF2, interpolation="nearest")

        plt.figure(5)
        plt.clf()
        plt.imshow(self.movePotentialGK1, interpolation="nearest")

        plt.figure(6)
        plt.clf()
        plt.imshow(self.movePotentialGK2, interpolation="nearest")

        plt.figure(7)
        plt.clf()
        plt.imshow(self.movePotentialGKP1, interpolation="nearest")

        plt.figure(8)
        plt.clf()
        plt.imshow(self.movePotentialGKP2, interpolation="nearest")

        plt.figure(9)
        plt.clf()
        plt.imshow(self.movePotentialPO1, interpolation="nearest")

        plt.figure(10)
        plt.clf()
        plt.imshow(self.movePotentialPO2, interpolation="nearest")

    def step(self):
        '''Advance the model by one step.'''
        self.calcPotential()
        self.scoreCheck()
        self.datacollector.collect(self)
        self.schedule.step()
        self.i = self.i + 1
        self.gridVisual()

        print("Step: " + str(self.i))
        print(str(self.score1) + " - " + str(self.score2))
Beispiel #24
0
class NetScape(Model):
    
    def __init__(self, height = 50, width = 50, initial_population =200, \
                 Moore = False, torus = True, regrow = 1, seed = 42):
        
        '''
        Args:
            height - y axis of grid_size
            width - x axis of grid size
            initial_population - number of agents starting
            moore - type of neighborhood
            torus - whether or no world wraps
            regrow - amout each resource grows bas each step
            process - Number of additonal proces by agents
            0 = Movement/Survive; 1 = +trade, 2 = +
            
        Initial Parameters: 
            Multigrid
            ActivationbyBreed (see schedule)
            Num_Agents counter to account for each agent number
            timekeeper - dictionary to keep track of time for each section
            start_time - create initial time
            datacollector to collect agent data of model
        '''
        
        self.step_num = 0
        self.height = height
        self.width = width
        self.initial_population = initial_population
        self.num_agents = 0
        #Mesa Agent Scheduler
        #self.schedule = schedule.RandomActivationByBreed(self)
        self.ml = mlm.MultiLevel_Mesa(self, group_to_net = True)
        self.grid = MultiGrid(self.height, self.width, torus=True)
        self.regrow = regrow
        self.running = True
        self.price_record = defaultdict(list)
        self.meta_type = {}
        warnings.filterwarnings("ignore")
                      
        
        '''
        Recorders
          Start datacollector
          Start time recorder
        '''
        self.start_time = time.time()
        
        self.datacollector = DataCollector(\
                             model_reporters = {"MetaAgent": recorder.survivors}, \
                             tables ={"Health":["Agent", "Step", "Sugar_Level", \
                                                "Spice_Level"], \
                             "Time":["Time Per Step"]})
        
        
        '''
        
        Creates the landscape:
            Fours mounds 2 sugar, 2 spice located- 1 in each quadrant
            imports landscape module to account for various landscape sizes
        '''
        self.resource_dict = {}
        
        landscape = Landscape.create_landscape(height, width)
        #places resources from landscape on grid
        for k,v in landscape.items(): 
            resource =  R.resource(k, self, v, self.regrow)
            self.grid.place_agent(resource, (resource.pos[0], resource.pos[1]))
            #POINT
            self.ml.add(resource)
            
        
               
        #fills in empty grids with null value resource agent        
        #Deviation from GrAS -- in empty cells has random resource from 0 to 4
        for a,x,y in self.grid.coord_iter():
            if a == set():
                resource = R.resource((x,y), self, \
                                      (self.random.randrange(0,2), \
                                       self.random.randrange(0,2)),self.regrow)
                self.grid.place_agent(resource, (resource.pos[0], resource.pos[1]))
                #POINT
                self.ml.add(resource)
                        
           
        '''
        Creates the agents:
            
        '''
        pos_array = list(self.ml.agents_by_type[R.resource].keys())
        self.random.shuffle(pos_array)
        vision_array = np.random.randint(1,6,self.initial_population)
        spice_array = np.random.randint(1,6,self.initial_population)
        sugar_array = np.random.randint(1,6,self.initial_population)
        for n in range(self.initial_population):
            #x = 0
            #y = 0
            #print ("position: ", (n, x,y))
            #GrAS p. 108
            sugar = self.random.randrange(25,50)
            spice = self.random.randrange(25,50)
            #GrAS p.108
            sug_bolism = sugar_array[n]
            spice_bolism = spice_array[n]
            #GrAS p. 108
            vision = vision_array[n]
            neighbors = Moore
            a = N.NetAgent(n, pos_array[n], self, \
                                 {"sug_bolism": sug_bolism, \
                                 "spice_bolism": spice_bolism}, \
                                 {1 : sugar, 2: spice}, {"vision": vision}, \
                                 neighbors)
            #POINT
            self.ml.add(a)
            self.grid.place_agent(a,pos_array[n])
            #self.ml.add_link([(a, self.ml._agents[pos_array[n]], {"places": a.pos})])
        
     
    ######################################################################
    #
    #
    #       Step function
    #
    ########################################################################    
        
        
    def step(self):
        
        time_step0 = time.time()
        self.step_num += 1
        print ("STEP ", self.step_num)
        self.ml.net_group(link_type = "trades", link_value = 10, policy = organization.rules)
        #self.ml.net_schedule(link_type = "trades", policy = organization.rules, group_net = True)
        #print (len(self.ml.groups), len(self.ml.schedule), len(self.ml.agents_by_type[N.NetAgent]))
        # can either just not put in the by type or can put in and skip it
        self.ml.step()
        time_step1 = time.time() - time_step0
        self.datacollector.collect(self)
        recorder.get_agent_health(self)
        recorder.get_time(self,time_step1)
        recorder.get_meta_details(self)
class WolfSheep(Model):
    """
    Wolf-Sheep Predation Model
    """

    height = 25
    width = 25

    initial_sheep = 60
    initial_wolves = 40

    sheep_reproduce = 0.2
    wolf_reproduce = 0.1

    wolf_gain_from_food = 13

    grass = True
    grass_regrowth_time = 20
    sheep_gain_from_food = 5

    description = (
        "A model for simulating wolf and sheep (predator-prey) ecosystem modelling."
    )

    def __init__(
        self,
        height=25,
        width=25,
        initial_sheep=60,
        initial_wolves=40,
        sheep_reproduce=0.2,
        wolf_reproduce=0.1,
        wolf_gain_from_food=13,
        grass=True,
        grass_regrowth_time=20,
        sheep_gain_from_food=5,
    ):
        """
        Create a new Wolf-Sheep model with the given parameters.

        Args:
            initial_sheep: Number of sheep to start with
            initial_wolves: Number of wolves to start with
            sheep_reproduce: Probability of each sheep reproducing each step
            wolf_reproduce: Probability of each wolf reproducing each step
            wolf_gain_from_food: Energy a wolf gains from eating a sheep
            grass: Whether to have the sheep eat grass for energy
            grass_regrowth_time: How long it takes for a grass patch to regrow
                                 once it is eaten
            sheep_gain_from_food: Energy sheep gain from grass, if enabled.
        """
        super().__init__()
        # Set parameters
        self.height = height
        self.width = width
        self.initial_sheep = initial_sheep
        self.initial_wolves = initial_wolves
        self.sheep_reproduce = sheep_reproduce
        self.wolf_reproduce = wolf_reproduce
        self.wolf_gain_from_food = wolf_gain_from_food
        self.grass = grass
        self.grass_regrowth_time = grass_regrowth_time
        self.sheep_gain_from_food = sheep_gain_from_food

        self.schedule = RandomActivationByBreed(self)
        self.grid = MultiGrid(self.height, self.width, torus=True)

        # Create sheep:
        for i in range(self.initial_sheep):
            x = self.random.randrange(self.width)
            y = self.random.randrange(self.height)
            energy = self.random.randrange(2 * self.sheep_gain_from_food)
            sheep = Sheep(self.next_id(), (x, y), self, True, energy)
            self.grid.place_agent(sheep, (x, y))
            self.schedule.add(sheep)

        # Create wolves
        for i in range(self.initial_wolves):
            x = self.random.randrange(self.width)
            y = self.random.randrange(self.height)
            energy = self.random.randrange(2 * self.wolf_gain_from_food)
            wolf = Wolf(self.next_id(), (x, y), self, True, energy)
            self.grid.place_agent(wolf, (x, y))
            self.schedule.add(wolf)

        # Create grass patches
        if self.grass:
            for agent, x, y in self.grid.coord_iter():

                fully_grown = self.random.choice([True, False])

                if fully_grown:
                    countdown = self.grass_regrowth_time
                else:
                    countdown = self.random.randrange(self.grass_regrowth_time)

                patch = GrassPatch(self.next_id(), (x, y), self, fully_grown, countdown)
                self.grid.place_agent(patch, (x, y))
                self.schedule.add(patch)

    def step(self):
        self.schedule.step()
Beispiel #26
0
class ForestFire(Model):
    """
    Simple Forest Fire model.
    """
    def __init__(self, height, width, density, catch_chance, burnout_chance,
                 bird_density):
        """
        Create a new forest fire model.

        Args:
            height, width: The size of the grid to model
            density: What fraction of grid cells have a tree in them.
        """
        # Initialize model parameters
        self.height = height
        self.width = width
        self.density = density
        self.catch_chance = catch_chance
        self.burnout_chance = burnout_chance

        # Set up model objects
        self.schedule = RandomActivation(self)
        self.grid = MultiGrid(height, width, torus=False)

        self.datacollector = DataCollector({
            "Fine":
            lambda m: self.count_type(m, "Fine"),
            "On Fire":
            lambda m: self.count_type(m, "On Fire"),
            "Burned Out":
            lambda m: self.count_type(m, "Burned Out")
        })

        # Place a tree in each cell with Prob = density
        for (contents, x, y) in self.grid.coord_iter():
            if random.random() < self.density:
                # Create a tree
                new_tree = TreeCell((x, y), self, self.catch_chance,
                                    self.burnout_chance)
                # Set all trees in the first column on fire.
                if x == 0:
                    new_tree.condition = "On Fire"
                self.grid._place_agent((x, y), new_tree)
                self.schedule.add(new_tree)

        for (contents, x, y) in self.grid.coord_iter():
            if random.random() < (bird_density):
                new_bird = Bird((x, y), self)
                self.grid._place_agent((x, y), new_bird)
                self.schedule.add(new_bird)

        for (contents, x, y) in self.grid.coord_iter():
            new_empty = Empty((x, y), self)
            self.grid._place_agent((x, y), new_empty)
            self.schedule.add(new_empty)

        self.running = True

        # Place a bird in every 10th tree

    def step(self):
        """
        Advance the model by one step.
        """
        self.schedule.step()
        self.datacollector.collect(self)

        # Halt if no more fire
        if self.count_type(self, "On Fire") == 0:
            self.running = False

    @staticmethod
    def count_type(model, tree_condition):
        """
        Helper method to count trees in a given condition in a given model.
        """
        count = 0
        for tree in model.schedule.agents:
            if tree.condition == tree_condition:
                count += 1
        return count
Beispiel #27
0
class NetScape(Model):

    def __init__(self, height = 50, width = 50, initial_population =200, \
                 Moore = False, torus = True, regrow = 1, seed = None):
        '''
        Args:
            height - y axis of grid_size
            width - x axis of grid size
            initial_population - number of agents starting
            moore - type of neighborhood
            torus - whether or no world wraps
            regrow - amout each resource grows bas each step
            process - Number of additonal proces by agents
            0 = Movement/Survive; 1 = +trade, 2 = +
            
        Initial Parameters: 
            Multigrid
            ActivationbyBreed (see schedule)
            Num_Agents counter to account for each agent number
            timekeeper - dictionary to keep track of time for each section
            start_time - create initial time
            datacollector to collect agent data of model
        '''

        self.step_num = 0
        self.height = height
        self.width = width
        self.initial_population = initial_population
        self.num_agents = 0
        #Mesa Agent Scheduler
        #self.schedule = schedule.RandomActivationByBreed(self)
        self.schedule = RandomActivationByBreed(self)
        self.grid = MultiGrid(self.height, self.width, torus=True)
        self.regrow = regrow
        self.running = True
        self.price_record = defaultdict(list)
        '''
        Recorders
          Start datacollector
          Start time recorder
        '''
        self.start_time = time.time()

        self.datacollector = DataCollector(\

                             tables = {"Time":["Time Per Step"]})
        '''
        
        Creates the landscape:
            Fours mounds 2 sugar, 2 spice located- 1 in each quadrant
            imports landscape module to account for various landscape sizes
        '''
        self.resource_dict = {}

        landscape = Landscape.create_landscape(height, width)
        #places resources from landscape on grid
        for k, v in landscape.items():
            resource = R.resource(k, self, v, self.regrow)
            self.grid.place_agent(resource, (resource.pos[0], resource.pos[1]))
            #POINT
            self.schedule.add(resource)

        #fills in empty grids with null value resource agent
        #Deviation from GrAS -- in empty cells has random resource from 0 to 4
        for a, x, y in self.grid.coord_iter():
            if a == set():
                resource = R.resource((x,y), self, \
                                      (self.random.randrange(0,2), \
                                       self.random.randrange(0,2)),self.regrow)
                self.grid.place_agent(resource,
                                      (resource.pos[0], resource.pos[1]))
                #POINT
                self.schedule.add(resource)
        '''
        Creates the agents:
            
        '''
        pos_array = list(self.schedule.agents_by_breed['resource'].keys())
        self.random.shuffle(pos_array)
        vision_array = np.random.randint(4, 6, self.initial_population)
        spice_array = np.random.randint(1, 6, self.initial_population)
        sugar_array = np.random.randint(1, 6, self.initial_population)
        for n in range(self.initial_population):
            #x = 0
            #y = 0
            #print ("position: ", (n, x,y))
            #GrAS p. 108
            sugar = self.random.randrange(25, 50)
            spice = self.random.randrange(25, 50)
            #GrAS p.108
            sug_bolism = sugar_array[n]
            spice_bolism = spice_array[n]
            #GrAS p. 108
            vision = vision_array[n]
            neighbors = Moore
            a = N.NetAgent(n, pos_array[n], self, \
                                 {"sug_bolism": sug_bolism, \
                                 "spice_bolism": spice_bolism}, \
                                 {1 : sugar, 2: spice}, {"vision": vision}, \
                                 neighbors)
            #POINT
            self.schedule.add(a)
            self.grid.place_agent(a, pos_array[n])

    ######################################################################
    #
    #
    #       Step function
    #
    ########################################################################

    def step(self):

        time_step0 = time.time()
        self.step_num += 1
        #self.schedule.step_breed(N.NetAgent)
        #self.schedule.step_breed(R.resource)
        self.schedule.step()
        #print (recorder.survivors(self))
        time_step1 = time.time() - time_step0
        self.datacollector.collect(self)
        #recorder.get_agent_health(self)
        recorder.get_time(self, time_step1)
Beispiel #28
0
class BarrioTortuga(Model):
    '''
    A neighborhood where turtles goes out of their homes, walk around at random
    and meet other turtles.

    '''

    def __init__(self,
                 map_file="barrio-tortuga-map-dense.txt",
                 turtles=250,
                 social_affinity = 0.,
                 nd=2,
                 prtl=PrtLvl.Detailed):
        '''
        Create a new Barrio Tortuga.

        The barrio is created from a map. It is a toroidal object thus moore is always True
        The barrio is fille with turtles that can exist the buildings through a set of doors.
        There are many doors in the building. This is meant to represent the temporal spread in
        the turtles coming in and out of the buildings.

        In a real building persons go out by the same
        door at different times. In Barrio Tortugas, turtles go out of the buildings through a set
        of doors. Each turtle chooses a door to go out at random. This is equivalent to introduce
        randomness on the exit time.

        Args:
            The  name file with the barrio map
            number of turtles in the barrio
            social_affinity a parameter that sets the social affinity of turtles. It takes
            values between -1 and 1. For positive values (0, 1), turtles seek contact with
            other turtles. For negative values (-1, 0), turtles avoid contact with others.
            A value of social_affinity of 1 means that a turtle that finds another turhtle nearby
            always moves to its cell. A social affinity of -1 means that a turtle always tries
            to avoid any turtle nearby.
            nd, a parameter that decides the number of doors (largest for nd=1)
        '''

        # read the map
        self.map_bt                 = np.genfromtxt(map_file)
        self.social_affinity        = social_affinity
        self.avoid_awareness        = -social_affinity

        if print_level(prtl, PrtLvl.Concise):
            print(f'loaded barrio tortuga map with dimensions ->{ self.map_bt.shape}')
            if self.social_affinity >= 0:
                print(f'social affinity ->{ self.social_affinity}')
            else:
                print(f'avoid awareness ->{ self.avoid_awareness}')


        self.height, self.width     = self.map_bt.shape
        self.grid                   = MultiGrid(self.height, self.width, torus=True)
        self.moore                  = True
        self.turtles                = turtles
        self.schedule               = RandomActivation(self)
        self.datacollector          = DataCollector(
        model_reporters             = {"NumberOfEncounters": number_of_encounters}
        )

        # create the patches representing houses and avenues
        id = 0
        for _, x, y in self.grid.coord_iter():
            patch_kind = self.map_bt[x, y]               # patch kind labels buildings or streets
            patch = Patch(id, (x, y), self, patch_kind)
            self.grid.place_agent(patch, (x, y))         # agents are placed in the grid but not in the
                                                         # in the schedule

        # Create turtles distributed randomly in the doors
        doors = self.get_doors(nd)
        if print_level(prtl, PrtLvl.Detailed):
            print(f'doors = {doors}')

        n_doors = len(doors)
        if print_level(prtl, PrtLvl.Concise):
            print(f'number of doors = {n_doors}')

        for i in range(int(self.turtles)):
            n = self.random.randrange(n_doors)  # choose the door
            d = doors[n]
            x=  d[0]
            y = d[1]                    # position of the door
            if print_level(prtl, PrtLvl.Detailed):
                print(f'starting turtle {i} at door number {n}, x,y ={x,y}')

            a = Turtle(i, (x, y), self, True)  # create Turtle

            self.schedule.add(a)               # add to scheduler
            self.grid.place_agent(a, (x, y))   # place Turtle in the grid
        self.running = True

        # activate data collector
        self.datacollector.collect(self)

    def step(self):
        self.schedule.step()
        self.datacollector.collect(self)


    def get_doors(self, nd):
        l,w = self.map_bt.shape
        D = []
        if nd == 1:
            return [(x,y) for x in range(l) for y in range(w) if is_house(self.map_bt, x, y) == False and is_house(self.map_bt, x, y-1) == True]
        else:
            return [(x,y) for x in range(l) for y in range(w) if is_house(self.map_bt, x, y) == False and is_house(self.map_bt, x-1, y-1) == True]
Beispiel #29
0
class HabitatModel(Model):
    def __init__(self, height, width, N, method, outdir, filename=None):
        if method == "fromFile" and not filename:
            raise Exception("you must include a filename to create a world from a file")

        self.running = True

        self.height=height
        self.width=width
        self.N=N
        self.method=method
        self.filename=filename
        self.outdir = outdir

        self.grid = MultiGrid(self.height, self.width, False)
        self.schedule = RandomActivation(self)

        # Create patches
        self.createPatches()
        self.createAgents()
        self.datacollector = MyDataCollector(
            agent_reporters={
                             "x": lambda a: a.pos[0],
                             "y": lambda a: a.pos[1],
                             "alive": lambda a: a.alive,
                             "habitat_preference": lambda a: a.habitat_pref
                             }
                         )
    def cleanUp(self):
        outfile="fossil-record_output.csv"
        self.datacollector.get_agent_vars_dataframe().to_csv(os.path.join(self.outdir, outfile))
        
    def createPatches(self):
        if not self.method in ["random", "rectangle", "fromFile"]:
            raise Exception("unrecognized method for creating patches")
        if self.method=="random":
            for cell, index in zip(self.grid.coord_iter(), range(self.height*self.width)):
                patch = HabitatPatch(self,"patch"+str(index),random.choice(habitat_CHOICES))
                cell_content, x, y = cell
                self.grid.place_agent(patch, (x, y))
        elif self.method=="rectangle": #nonrandom habitat patches
            for cell, index in zip(self.grid.coord_iter(), range(self.height*self.width)):
                cell_content, x, y = cell
                if x < list(range(width))[len(range(width))//2]:
                    patch = HabitatPatch(self,"patch"+str(index),habitat_CHOICES[0])
                else:
                    patch = HabitatPatch(self,"patch"+str(index),habitat_CHOICES[1])
                self.grid.place_agent(patch, (x, y))
        elif self.method=="fromFile":
            df = pandas.read_csv(os.path.join(self.outdir, self.filename))
            fileWorldSize = int(df.shape[0]**(0.5)) #take square root of row count to get size of square
            if not fileWorldSize == self.grid.width or not fileWorldSize==self.grid.height:
                raise Exception("The size of the world in the file doesn't match the height and width of the HabitatModel object. Note: world must be square. ")
            for tup in df.itertuples():
                row = [int(each) for each in tup[1].split(' ')]
                index = tup[0]
                patch = HabitatPatch(self, "patch"+str(index), habitat_CHOICES[row[2]-1]) #subtract 1 to work with python zero-indexing
                self.grid.place_agent(patch,(row[0]-1,row[1]-1))


    def createAgents(self):
        for index in range(self.N):
            agent = Organism(self, "Organism" + str(index).zfill(6), random.choice(habitat_CHOICES))
            self.schedule.add(agent)
            x = random.randrange(self.grid.width)
            y = random.randrange(self.grid.height)
            self.grid.place_agent(agent, (x, y))
    
    def allDead(self):
        responses = []
        for agent in self.schedule.agents:
            responses.append(agent.alive==False)
        return all(responses)
        
    def markReported(self): 
        #method to mark a dead agent as having been reported. Must be run AFTER data collection to ensure desired behavior
        for agent in self.schedule.agents:
            if agent.alive == False:
                agent.reported = True
        
    def step(self):
        self.schedule.step()
        self.datacollector.collect(self)
        self.markReported()
        if self.allDead():
            self.cleanUp()
            self.running=False
class UnivModel(Model):
    """A model with some number of agents."""
    def __init__(self,
                 N,
                 width,
                 height,
                 class_periods=3,
                 class_size=3,
                 majors=False):
        # majors is false if not implemented
        #otherwise majors should be the number of persons per major

        if not majors:

            self.num_agents = N
            self.grid = MultiGrid(5, N, False)
            self.schedule = RandomActivation(self)
            self.contactjournal = np.zeros((N, N), dtype=int)
            self.contactrep = np.zeros((N, N), dtype=int)  # type 2 contact
            self.class_periods = class_periods
            self.class_size = class_size
            self.majorsize = majors

            self.shuffled = np.array(random.shuffle(np.array(range(N))))

            classdet = class_assign_funcs.class_assign(N, class_periods,
                                                       class_size)
            self.classes = classdet[0]
        else:

            self.num_agents = 500
            self.grid = MultiGrid(5, 500, False)
            self.schedule = RandomActivation(self)
            self.contactjournal = np.zeros((500, 500), dtype=int)
            self.contactrep = np.zeros((500, 500), dtype=int)  # type 2 contact
            self.class_periods = class_periods
            self.class_size = class_size
            self.majorsize = majors

            self.shuffled = np.array(random.shuffle(np.array(range(500))))

            classdet = class_assign_funcs.class_assign_majors(
                500, class_periods, class_size)
            self.classes = classdet[0]

        #numberclasses = classdet[1]
        #print("the number of classes is: ", numberclasses)

        self.tick = 0

        # Create agents
        for i in range(self.num_agents):
            a = Person(i, "student", self)
            self.schedule.add(a)

            # Add the agent to a random grid cell
            x = self.random.randrange(self.grid.width)
            y = self.random.randrange(self.grid.height)
            self.grid.place_agent(a, (x, y))

    def step(self, toremove=[], toadd=[]):
        '''Advance the model by one step, 
        then update the contact matrix. 
        removes agents in the toremove list, 
        adds agents in the to add list'''

        for ag in toremove:
            del self.schedule._agents[ag]

        for ag in toadd:
            self.schedule._agents[ag] = Person(ag, "student", self)

        self.schedule.step()
        self.tick += 1
        # need to iterate (hopefully not) through the gridpoints
        # then add up all the contacts

        for cell in self.grid.coord_iter():
            agents, x, y = cell
            ids = [a.unique_id for a in agents]
            if len(agents) > 1:
                combos = list(itertools.combinations(agents, 2))

                for pair in combos:

                    self.contactjournal[pair[0].unique_id,
                                        pair[1].unique_id] += 1
                    self.contactjournal[pair[1].unique_id,
                                        pair[0].unique_id] += 1

                    # tracks type 2 contacts
                    if pair[0].unique_id not in pair[1].metlast:
                        self.contactrep[pair[0].unique_id,
                                        pair[1].unique_id] += 1
                        self.contactrep[pair[1].unique_id,
                                        pair[0].unique_id] += 1

            # assign met last
            for agent in agents:
                agent.metlast = ids
Beispiel #31
0
class SlimeModel(Model):
    def __init__(self, height, width, color, numAgents, gDense, kRate, dcDiffu,
                 dhRes, dtRes, secRate):
        # number of agents per tile
        self.n = numAgents
        # grid density
        self.gD = gDense
        # rate of cAMP decay
        self.k = kRate
        # diffusion constant of cAMP
        self.Dc = dcDiffu
        # spatial resolution for cAMP simulation
        self.Dh = dhRes
        # time resolution for cAMP simulation
        self.Dt = dtRes
        # rate of cAMP secretion by an agent
        self.f = secRate
        # number of rows/columns in spatial array
        self.w = masterHeight
        # agent color
        self.color = color

        # height of grid
        self.height = masterHeight
        # width of grid
        self.width = masterWidth

        # Counter for generating sequential unique id's
        self.j = 0
        # Counter for DataVis agents' unique id's
        self.dv = 0
        # Counter for NumDataVis agents' unique id's
        self.ndv = 0

        # Create randomly ordered scheduler
        self.schedule = SimultaneousActivation(self)
        # Create grid (of type MultiGrid to support multiple agents per cell
        self.grid = MultiGrid(self.width, self.height, torus=False)

        # Initialize list of cAMP molecules
        self.cAMPs = list()

        # Initialize dict for datacollector with total datacollector
        dc = {"Total Amount of cAMP": self.getAmts}

        # Initialize for iterating through columns (x) and rows (y)
        self.x = 0
        self.y = 0

        # Loop to fill datacollector dictionary with dict entries for each column and row
        for x in range(masterWidth):
            dc.update({("x: " + str(x)): self.getColAmts})
            dc.update({("y: " + str(x)): self.getRowAmts})

        # Create datacollector to retrieve total amounts of cAMP from dc dict created above
        self.datacollector = DataCollector(dc)

        # Variable for storing random numbers
        r = 0

        # Initial loop to create agents and fill agents list with them
        for (contents, x, y) in self.grid.coord_iter():
            # Create object of type cAMP
            cell = cAMP([x, y], self, self.j, 0)
            # Add random amoutn of cAMP to cell (<1)
            cell.add(random.random())
            # Place cAMP onto grid at coordinates x, y
            self.grid._place_agent((x, y), cell)
            # Add cAMP molecule to list
            self.cAMPs.append(cell)

            # print("x:", x, " y:", y)

            if x == 50:
                # Create DataVis agent
                ag = DataVis([x, y], self, self.dv)
                # Place DataVis agent
                self.grid.place_agent(ag, tuple([x, y]))

                # Increment unique id counter
                self.dv += 1
            elif x > 50:
                # Create NumDataVis agent with appropriate slice num
                ag = NumDataVis([x, y], self, self.ndv)
                # Place NumDataVis agent
                self.grid.place_agent(ag, tuple([x, y]))

                # Increment unique id counter
                self.ndv += 1
            else:
                # Loop to create SlimeAgents
                if self.gD % 1 != 0:
                    r = random.random()
                    if r <= self.gD:
                        for i in range(self.n):
                            # Create object of type SlimeAgent
                            ag = SlimeAgent([x, y], self, self.j, self.color)
                            # Place agent onto grid at coordinates x, y
                            self.grid.place_agent(ag, tuple([x, y]))
                            # Add agent to schedule
                            self.schedule.add(ag)
                            # Increment j (unique_id variable)
                            self.j += 1
                else:
                    for i in range(self.n):
                        # Create object of type SlimeAgent
                        ag = SlimeAgent([x, y], self, self.j, self.color)
                        # Place agent onto grid at coordinates x, y
                        self.grid.place_agent(ag, tuple([x, y]))
                        # Add agent to schedule
                        self.schedule.add(ag)
                        # Increment j (unique_id variable)
                        self.j += 1

        # Print out number of agents
        print("# of agents:", self.j)

        self.running = True

    # Method for getting total cAMP amount
    def getAmts(self):
        # Initialize empty total variable
        total = 0
        # Loop to get total amount of cAMP from cAMPs list
        for molecule in self.cAMPs:
            total += molecule.getAmt()

        return total

    def getRowAmts(self):
        total = 0
        for x in range(masterWidth):
            try:
                total += self.grid.get_cell_list_contents(
                    (x, self.y))[0].getAmt()
            except IndexError:
                continue

        if self.y == 49:
            self.y = 0
        else:
            self.y += 1

        return total

    def getRowAmt(self, y):
        total = 0
        for x in range(masterWidth - 1):
            try:
                total += self.grid.get_cell_list_contents((x, y))[0].getAmt()
            except IndexError:
                continue

        if self.y == 49:
            self.y = 0
        else:
            self.y += 1

        return total

    def getColAmts(self):
        total = 0
        for y in range(masterHeight):
            try:
                total += self.grid.get_cell_list_contents(
                    (self.x, y))[0].getAmt()
            except IndexError:
                continue

        if self.x == 49:
            self.x = 0
        else:
            self.x += 1

        return total

    def sweepForClusters():
        blacklist = list()
        neighbors = list()
        for (contents, x, y) in self.grid.coord_iter():
            for agent in contents[1::]:
                if type(agent) == SlimeAgent and agent not in blacklist:
                    neighbors = agent.getNeighbors()
                    for neighbor in neighbors:
                        blacklist.append(neighbor)
                '''
                density = blacklist.length / density_coefficent_based_on_area
                clusteredAgents = 
                '''

    # Step method
    def step(self):
        cNeighbors = list()
        neighbors = list()
        lap = 0
        amtSelf = 0
        cAMPobj = cAMP
        newDiag = 0
        oldDiag = 0
        nAgents = 0
        layer = 1
        secRate = 0
        ''' Perform cAMP decay and diffusion actions '''
        for (contents, x, y) in self.grid.coord_iter():

            # This block is a bit messy but it works for now
            cont = True
            for content in contents:
                # Set row amounts if an object is DataVis
                if type(content) is DataVis or type(content) is NumDataVis:
                    content.setRowAmt(self.getRowAmt(y))
                    cont = False

            if cont:
                # Initialize number of agents for layer coloring
                nAgents = len(contents) - 1
                # Reset lap to 0
                lap = 0

                # Set cAMPobj to current tile's cAMP agent
                cAMPobj = contents[0]
                # Set neighbors to cAMPobj's neighbors (Von Neumann)
                neighbors = cAMPobj.getNeighbors()
                # Add cAMP objects form neighbors to cNeighbors
                for neighbor in neighbors:
                    if type(neighbor) is cAMP:
                        cNeighbors.append(neighbor)

                # Add sum of neighbors to lap
                for mol in cNeighbors:
                    lap += mol.getAmt()

                amtSelf = cAMPobj.getAmt()
                # Reassign lap to the laplacian (using previous neighbor sum value)
                lap = (lap - 4 * amtSelf) / (self.Dh**2)
                # Add decay to current cAMP object
                cAMPobj.add(
                    (-cAMPobj.getDecayRate() * amtSelf + self.Dc * lap) *
                    self.Dt)

                # Wipe cNeighbors
                cNeighbors.clear()

                # Iterate through all contents of a grid cell
                for agent in contents[1::]:
                    # Get all neighbors (excuding self)
                    neighbors = agent.getNeighbors()
                    # Examine each neighbor
                    for neighbor in neighbors:
                        # Add cAMP neighbors to list
                        if type(neighbor) is cAMP:
                            cNeighbors.append(neighbor)

                    # Add cAMP secretion to the cell that the agent shares with a cAMP object
                    cAMPobj.add(agent.getSecRate() * self.Dt)
                    # Decide whether or not to move
                    newx = (x + random.randint(-1, 2)) % self.w
                    newy = (y + random.randint(-1, 2)) % self.w

                    # Calculate differences
                    newDiag = ((self.grid[newx - 1][newy - 1])[0]).getAmt()
                    diff = ((self.grid[x - 1][y - 1])[0]).getAmt()

                    # Fix if there are crazy values for diff
                    if diff > 10:
                        diff = 10
                    elif diff < 10:
                        diff = -10

                    # Decide to move
                    if random.random() < np.exp(diff) / (1 + np.exp(diff)):
                        agent.move(tuple([newx, newy]))

                    # Layers for coloring agents based on density
                    agent.addLayer()
                    layer = agent.getLayer()
                    # Only change color of agent that is on top of a stack
                    if layer >= nAgents:
                        self.pickColor(agent, nAgents)

                    # Wipe cNeighbors
                    cNeighbors.clear()

        # Add step to schedule
        self.schedule.step()
        # Collect new data
        self.datacollector.collect(self)

    # Method to select a color based on the topmost agent
    def pickColor(self, topAgent, nAgents):
        shade = topAgent.getShades()
        if nAgents <= 2:
            topAgent.setShade(shade[0])
        elif nAgents == 3:
            topAgent.setShade(shade[1])
        elif nAgents == 4:
            topAgent.setShade(shade[2])
        elif nAgents == 5:
            topAgent.setShade(shade[3])
        elif nAgents == 6:
            topAgent.setShade(shade[4])
        elif nAgents == 7:
            topAgent.setShade(shade[5])
        elif nAgents == 8:
            topAgent.setShade(shade[6])
        elif nAgents == 9:
            topAgent.setShade(shade[7])
Beispiel #32
0
class WolfSheepPredation(Model):
    '''
    Wolf-Sheep Predation Model
    '''

    height = 20
    width = 20

    initial_sheep = 100
    initial_wolves = 50

    sheep_reproduce = 0.04
    wolf_reproduce = 0.05

    wolf_gain_from_food = 20

    grass = False
    grass_regrowth_time = 30
    sheep_gain_from_food = 4

    verbose = False  # Print-monitoring

    def __init__(self,
                 height=20,
                 width=20,
                 initial_sheep=100,
                 initial_wolves=50,
                 sheep_reproduce=0.04,
                 wolf_reproduce=0.05,
                 wolf_gain_from_food=20,
                 grass=False,
                 grass_regrowth_time=30,
                 sheep_gain_from_food=4):
        '''
        Create a new Wolf-Sheep model with the given parameters.

        Args:
            initial_sheep: Number of sheep to start with
            initial_wolves: Number of wolves to start with
            sheep_reproduce: Probability of each sheep reproducing each step
            wolf_reproduce: Probability of each wolf reproducing each step
            wolf_gain_from_food: Energy a wolf gains from eating a sheep
            grass: Whether to have the sheep eat grass for energy
            grass_regrowth_time: How long it takes for a grass patch to regrow
                                 once it is eaten
            sheep_gain_from_food: Energy sheep gain from grass, if enabled.
        '''

        # Set parameters
        self.height = height
        self.width = width
        self.initial_sheep = initial_sheep
        self.initial_wolves = initial_wolves
        self.sheep_reproduce = sheep_reproduce
        self.wolf_reproduce = wolf_reproduce
        self.wolf_gain_from_food = wolf_gain_from_food
        self.grass = grass
        self.grass_regrowth_time = grass_regrowth_time
        self.sheep_gain_from_food = sheep_gain_from_food

        self.schedule = RandomActivationByBreed(self)
        self.grid = MultiGrid(self.height, self.width, torus=True)
        self.datacollector = DataCollector({
            "Wolves":
            lambda m: m.schedule.get_breed_count(Wolf),
            "Sheep":
            lambda m: m.schedule.get_breed_count(Sheep)
        })

        # Create sheep:
        for i in range(self.initial_sheep):
            x = random.randrange(self.width)
            y = random.randrange(self.height)
            energy = random.randrange(2 * self.sheep_gain_from_food)
            sheep = Sheep(self.grid, (x, y), True, energy)
            self.grid.place_agent(sheep, (x, y))
            self.schedule.add(sheep)

        # Create wolves
        for i in range(self.initial_wolves):
            x = random.randrange(self.width)
            y = random.randrange(self.height)
            energy = random.randrange(2 * self.wolf_gain_from_food)
            wolf = Wolf(self.grid, (x, y), True, energy)
            self.grid.place_agent(wolf, (x, y))
            self.schedule.add(wolf)

        # Create grass patches
        if self.grass:
            for agent, x, y in self.grid.coord_iter():

                fully_grown = random.choice([True, False])

                if fully_grown:
                    countdown = self.grass_regrowth_time
                else:
                    countdown = random.randrange(self.grass_regrowth_time)

                patch = GrassPatch(fully_grown, countdown)
                self.grid.place_agent(patch, (x, y))
                self.schedule.add(patch)

        self.running = True

    def step(self):
        self.schedule.step()
        self.datacollector.collect(self)
        if self.verbose:
            print([
                self.schedule.time,
                self.schedule.get_breed_count(Wolf),
                self.schedule.get_breed_count(Sheep)
            ])

    def run_model(self, step_count=200):

        if self.verbose:
            print('Initial number wolves: ',
                  self.schedule.get_breed_count(Wolf))
            print('Initial number sheep: ',
                  self.schedule.get_breed_count(Sheep))

        for i in range(step_count):
            self.step()

        if self.verbose:
            print('')
            print('Final number wolves: ', self.schedule.get_breed_count(Wolf))
            print('Final number sheep: ', self.schedule.get_breed_count(Sheep))
Beispiel #33
0
class SugarscapeModel(Model):
    def __init__(self, height=50, width=50, init_agents=500, max_metabolism=3, max_vision=10, max_init_sugar=5, min_age=30, max_age=60, init_poll=3, ex_ratio=2, ex_mod=1, poll_growth_rule=True, inheritance_rule=True):
        self.height = height
        self.width = width
        self.init_agents = init_agents
        self.init_poll = init_poll
        self.max_metabolism = max_metabolism
        self.max_vision = max_vision
        self.max_init_sugar = max_init_sugar
        self.min_age = min_age
        self.max_age = max_age
        self.ex_ratio = ex_ratio
        self.ex_mod = ex_mod

        self.replacement_rule = True
        self.pollution_rule = False
        self.diffusion_rule = False
        self.push_rule = False
        self.poll_growth_rule = poll_growth_rule
        self.expend_rule = True
        self.inheritance_rule = inheritance_rule

        self.map = self.import_map()
        self.grid = MultiGrid(height, width, torus=True)
        self.schedule = RandomActivationByType(self)
        self.datacollector = DataCollector({'Pollution': (lambda m: m.total_pollution),
                                            'Wealth': (lambda m: m.total_wealth/m.init_agents),
                                            'Agents': (lambda m: len(m.schedule.agents_by_type[ScapeAgent]))},
                                           {'Wealth': self.collect_wealth,
                                            'Metabolism': self.collect_metabolism,
                                            'Vision': self.collect_vision})

        self.total_wealth = 0
        self.total_pollution = 0

        self.populate_sugar()
        self.populate_agents()


    def step(self):
        ''' Step method run by the visualization module'''
        self.schedule.step([ScapeAgent, SugarPatch])
        self.datacollector.collect(self)

        # if self.schedule.time == 20:
        #     self.pollution_rule = True
        if self.schedule.time == 30:
            self.push_rule = True

        self.total_wealth = 0
        self.total_pollution = 0
        for agent in self.schedule.agents_by_type[ScapeAgent]:
            self.total_wealth += agent.wealth
        for patch in self.schedule.agents_by_type[SugarPatch]:
            self.total_pollution += patch.pollution

    def import_map(self):
        ''' Imports a text file into an array to be used when generating and
            placing the sugar Agents into the grid
        '''

        f = open('Maps/sugar_map.txt', 'r')
        map_list = []
        for line in f:
            num_list = line.split(' ')
            for num in num_list:
                map_list.append(int(num[0]))

        return map_list

    def new_agent(self, uid, inheritance):
        ''' Place a new agent on the sugarscape in order to replace a death'''
        free = False
        while not free:
            location = random.choice([cell for cell in self.grid.coord_iter()])
            if len(location[0]) == 1:
                free = True

        pos = (location[1], location[2])
        patch = self.grid.get_cell_list_contents([pos])[0]

        if self.inheritance_rule:
            if inheritance == 'rand':
                wealth = random.randint(1, self.max_init_sugar)
            else:
                wealth = inheritance
        else:
            wealth = random.randint(1, self.max_init_sugar)

        agent = ScapeAgent(uid, pos, wealth, random.randint(1,self.max_metabolism), random.randint(1,self.max_vision), random.randint(self.min_age, self.max_age), patch, self.ex_ratio, self.ex_mod)

        self.grid.place_agent(agent, agent.pos)
        self.schedule.add(agent)

    def populate_agents(self):
        ''' Place ScapeAgent's in random unoccupied locations on the grid with randomomized
            sets of parameters
        '''

        cells = [(cell[1], cell[2]) for cell in self.grid.coord_iter()]
        for i in range(self.init_agents):
            uid = 'a' + str(i)
            location = random.choice(cells)
            cells.remove(location)
            patch = self.grid.get_cell_list_contents([location])[0]
            agent = ScapeAgent(uid, location, random.randint(1,self.max_init_sugar), random.randint(1,self.max_metabolism), random.randint(1,self.max_vision), random.randint(self.min_age, self.max_age), patch, self.ex_ratio, self.ex_mod)
            self.grid.place_agent(agent, location)
            self.schedule.add(agent)

    def populate_sugar(self):
        ''' Place SugarPatch's on every cell with maximum sugar content
            according to the imported 'sugar_map.txt' file
        '''

        map_i = 0
        for cell in self.grid.coord_iter():
            x = cell[1]
            y = cell[2]
            uid = 's'+str(y)+str(x)
            # patch = SugarPatch(uid, (x,y), 3)
            patch = SugarPatch(uid, (x,y), self.map[map_i], self.init_poll)
            self.grid.place_agent(patch, (x,y))
            self.schedule.add(patch)
            map_i += 1

    def collect_wealth(self, agent):
        '''Method for datacollector'''
        if isinstance(agent, ScapeAgent):
            return agent.wealth

    def collect_metabolism(self, agent):
        '''Method for datacollector'''
        if isinstance(agent, ScapeAgent):
            return agent.metabolism

    def collect_vision(self, agent):
        '''Method for datacollector'''
        if isinstance(agent, ScapeAgent):
            return agent.vision

    def calc_gini(self, wealths):
        '''Returns gini coefficient'''
        sort_wealths = sorted(wealths)
        num_agents = len(sort_wealths)
        gini,count = 0,0
        for wealth in sort_wealths:
            gini += wealth * (num_agents - count)
            count += 1
        gini /=  (num_agents*sum(sort_wealths))
        return num_agents**(-1) - 2*gini + 1