Пример #1
0
class ShapeExample(Model):
    def __init__(self, N=2, width=20, height=10):
        self.N = N  # num of agents
        self.headings = ((1, 0), (0, 1), (-1, 0), (0, -1))  # tuples are fast
        self.grid = SingleGrid(width, height, torus=False)
        self.schedule = RandomActivation(self)
        self.make_walker_agents()
        self.running = True

    def make_walker_agents(self):
        unique_id = 0
        while True:
            if unique_id == self.N:
                break
            x = self.random.randrange(self.grid.width)
            y = self.random.randrange(self.grid.height)
            pos = (x, y)
            heading = self.random.choice(self.headings)
            # heading = (1, 0)
            if self.grid.is_cell_empty(pos):
                print("Creating agent {2} at ({0}, {1})".format(
                    x, y, unique_id))
                a = Walker(unique_id, self, pos, heading)
                self.schedule.add(a)
                self.grid.place_agent(a, pos)
                unique_id += 1

    def step(self):
        self.schedule.step()
Пример #2
0
class ShapesModel(Model):
    def __init__(self, N, width=20, height=10):
        self.running = True
        self.N = N    # num of agents
        self.headings = ((1, 0), (0, 1), (-1, 0), (0, -1))  # tuples are fast
        self.grid = SingleGrid(width, height, torus=False)
        self.schedule = RandomActivation(self)
        self.make_walker_agents()

    def make_walker_agents(self):
        unique_id = 0
        while True:
            if unique_id == self.N:
                break
            x = random.randrange(self.grid.width)
            y = random.randrange(self.grid.height)
            pos = (x, y)
            heading = random.choice(self.headings)
            # heading = (1, 0)
            if self.grid.is_cell_empty(pos):
                print("Creating agent {2} at ({0}, {1})"
                      .format(x, y, unique_id))
                a = Walker(unique_id, self, pos, heading)
                self.schedule.add(a)
                self.grid.place_agent(a, pos)
                unique_id += 1

    def step(self):
        self.schedule.step()
Пример #3
0
class TestSingleGrid(unittest.TestCase):
    '''
    Test the SingleGrid object.

    Since it inherits from Grid, all the functionality tested above should
    work here too. Instead, this tests the enforcement.
    '''

    def setUp(self):
        '''
        Create a test non-toroidal grid and populate it with Mock Agents
        '''
        width = 3
        height = 5
        self.grid = SingleGrid(width, height, True)
        self.agents = []
        counter = 0
        for x in range(width):
            for y in range(height):
                if TEST_GRID[x][y] == 0:
                    continue
                counter += 1
                # Create and place the mock agent
                a = MockAgent(counter, None)
                self.agents.append(a)
                self.grid.place_agent(a, (x, y))

    def test_enforcement(self):
        '''
        Test the SingleGrid empty count and enforcement.
        '''

        assert len(self.grid.empties) == 9
        a = MockAgent(100, None)
        with self.assertRaises(Exception):
            self.grid._place_agent((0, 1), a)

        # Place the agent in an empty cell
        self.grid.position_agent(a)
        # Test whether after placing, the empty cells are reduced by 1
        assert a.pos not in self.grid.empties
        assert len(self.grid.empties) == 8
        for i in range(10):
            self.grid.move_to_empty(a)
        assert len(self.grid.empties) == 8

        # Place agents until the grid is full
        empty_cells = len(self.grid.empties)
        for i in range(empty_cells):
            a = MockAgent(101 + i, None)
            self.grid.position_agent(a)
        assert len(self.grid.empties) == 0

        a = MockAgent(110, None)
        with self.assertRaises(Exception):
            self.grid.position_agent(a)
        with self.assertRaises(Exception):
            self.move_to_empty(self.agents[0])
Пример #4
0
class TestSingleGrid(unittest.TestCase):
    '''
    Test the SingleGrid object.

    Since it inherits from Grid, all the functionality tested above should
    work here too. Instead, this tests the enforcement.
    '''
    def setUp(self):
        '''
        Create a test non-toroidal grid and populate it with Mock Agents
        '''
        width = 3
        height = 5
        self.grid = SingleGrid(width, height, True)
        self.agents = []
        counter = 0
        for x in range(width):
            for y in range(height):
                if TEST_GRID[x][y] == 0:
                    continue
                counter += 1
                # Create and place the mock agent
                a = MockAgent(counter, None)
                self.agents.append(a)
                self.grid.place_agent(a, (x, y))

    def test_enforcement(self):
        '''
        Test the SingleGrid empty count and enforcement.
        '''

        assert len(self.grid.empties) == 9
        a = MockAgent(100, None)
        with self.assertRaises(Exception):
            self.grid._place_agent((0, 1), a)

        # Place the agent in an empty cell
        self.grid.position_agent(a)
        # Test whether after placing, the empty cells are reduced by 1
        assert a.pos not in self.grid.empties
        assert len(self.grid.empties) == 8
        for i in range(10):
            self.grid.move_to_empty(a)
        assert len(self.grid.empties) == 8

        # Place agents until the grid is full
        empty_cells = len(self.grid.empties)
        for i in range(empty_cells):
            a = MockAgent(101 + i, None)
            self.grid.position_agent(a)
        assert len(self.grid.empties) == 0

        a = MockAgent(110, None)
        with self.assertRaises(Exception):
            self.grid.position_agent(a)
        with self.assertRaises(Exception):
            self.move_to_empty(self.agents[0])
Пример #5
0
class PdGrid(Model):
    ''' Model class for iterated, spatial prisoner's dilemma model. '''

    schedule_types = {
        "Sequential": BaseScheduler,
        "Random": RandomActivation,
        "Simultaneous": SimultaneousActivation
    }

    # This dictionary holds the payoff for this agent,
    # keyed on: (my_move, other_move)

    payoff = {("C", "C"): 1, ("C", "D"): 0, ("D", "C"): 1.6, ("D", "D"): 0}

    def __init__(self,
                 height=50,
                 width=50,
                 schedule_type="Random",
                 payoffs=None,
                 seed=None):
        '''
        Create a new Spatial Prisoners' Dilemma Model.

        Args:
            height, width: Grid size. There will be one agent per grid cell.
            schedule_type: Can be "Sequential", "Random", or "Simultaneous".
                           Determines the agent activation regime.
            payoffs: (optional) Dictionary of (move, neighbor_move) payoffs.
        '''
        self.grid = SingleGrid(width, height, torus=True)
        self.schedule_type = schedule_type
        self.schedule = self.schedule_types[self.schedule_type](self)

        # Create agents
        for x in range(width):
            for y in range(height):
                agent = PDAgent((x, y), self)
                self.grid.place_agent(agent, (x, y))
                self.schedule.add(agent)

        self.datacollector = DataCollector({
            "Cooperating_Agents":
            lambda m: len([a for a in m.schedule.agents if a.move == "C"])
        })

        self.running = True
        self.datacollector.collect(self)

    def step(self):
        self.schedule.step()
        # collect data
        self.datacollector.collect(self)

    def run(self, n):
        ''' Run the model for n steps. '''
        for _ in range(n):
            self.step()
Пример #6
0
class Foraging(Model):
    
    number_of_bean = 0
    number_of_corn = 0
    number_of_soy = 0
    
    def __init__(self, width=50, height=50, torus=True, num_bug=50, seed=42, strategy=None):
        super().__init__(seed=seed)
        self.number_of_bug = num_bug
        if not(strategy in ["stick", "switch"]):
            raise TypeError("'strategy' must be one of {stick, switch}")
        self.strategy = strategy
        
        self.grid = SingleGrid(width, height, torus)
        self.schedule = RandomActivation(self)
        data = {"Bean": lambda m: m.number_of_bean,
                "Corn": lambda m: m.number_of_corn,
                "Soy": lambda m: m.number_of_soy,
                "Bug": lambda m: m.number_of_bug,
                }
        self.datacollector = DataCollector(data)
        
        # create foods
        self._populate(Bean)
        self._populate(Corn)
        self._populate(Soy)
        
        # create bugs
        for i in range(self.number_of_bug):
            pos = self.grid.find_empty()
            bug = Bug(i, self)
            bug.strategy = self.strategy
            self.grid.place_agent(bug, pos)
            self.schedule.add(bug)
    
    def step(self):
        self.schedule.step()
        self.datacollector.collect(self)
        
        if not(self.grid.exists_empty_cells()):
            self.running = False
    
    def _populate(self, food_type):
        prefix = "number_of_{}"
        
        counter = 0
        while counter < food_type.density * (self.grid.width * self.grid.height):
            pos = self.grid.find_empty()
            food = food_type(counter, self)
            self.grid.place_agent(food, pos)
            self.schedule.add(food)
            food_name = food_type.__name__.lower()
            attr_name = prefix.format(food_name)
            val = getattr(self, attr_name)
            val += 1
            setattr(self, attr_name, val)
            counter += 1
Пример #7
0
class CoopaModel(Model):
    """A model with some number of agents."""
    def __init__(self, N, width, height, agent_type, log_path=None):
        self.running = True
        self.num_agents = N
        self.grid = SingleGrid(width, height, torus=False)
        self.schedule = RandomActivation(self)
        self.message_dispatcher = MessageDispatcher()
        self.layout = Layout()
        self._context = Context()
        self.agent_type = AGENT_TYPES[agent_type]

        self.layout.draw(self.grid)

        # Add drop point(s)
        self.drop_points = [DropPoint(1, self)]
        self.grid.place_agent(self.drop_points[0], (5, 5))

        # Add recharging station(s)
        self.recharge_points = [RechargePoint(1, self)]
        self.grid.place_agent(self.recharge_points[0], (55, 5))

        # Place resources tactically
        self._context.place_few_trash_in_all_rooms(self)

        # the mighty agents arrive
        for i in range(self.num_agents):
            a = self.agent_type(i, self, log_path=log_path)
            self.schedule.add(a)
            self.grid.position_agent(a)

        self.datacollector = DataCollector(
            model_reporters={
                "Trash collected": compute_dropped_trashes,
                "Average battery power": compute_average_battery_power,
                "Max battery power": compute_max_battery_power,
                "Min battery power": compute_min_battery_power
            },
            # agent_reporters={"Trash": "trash_count"}
        )  # An agent attribute

        self.name = "CoopaModel"
        self._logger = utils.create_logger(self.name, log_path=log_path)

    @property
    def time(self):
        return self.schedule.time

    def step(self):
        t = time.monotonic()
        self.datacollector.collect(self)
        self.schedule.step()
        self._log("Finished in {:.5f} seconds.".format(time.monotonic() - t),
                  logging.INFO)

    def _log(self, msg, lvl=logging.DEBUG):
        self._logger.log(lvl, msg, extra={'time': self.time})
Пример #8
0
class PD_Model(Model):
    '''
    Model class for iterated, spatial prisoner's dilemma model.
    '''

    schedule_types = {"Sequential": BaseScheduler,
                      "Random": RandomActivation,
                      "Simultaneous": SimultaneousActivation}

    # This dictionary holds the payoff for this agent,
    # keyed on: (my_move, other_move)

    payoff = {("C", "C"): 1,
              ("C", "D"): 0,
              ("D", "C"): 1.6,
              ("D", "D"): 0}

    def __init__(self, height, width, schedule_type, payoffs=None):
        '''
        Create a new Spatial Prisoners' Dilemma Model.

        Args:
            height, width: Grid size. There will be one agent per grid cell.
            schedule_type: Can be "Sequential", "Random", or "Simultaneous".
                           Determines the agent activation regime.
            payoffs: (optional) Dictionary of (move, neighbor_move) payoffs.
        '''
        self.running = True
        self.grid = SingleGrid(height, width, torus=True)
        self.schedule_type = schedule_type
        self.schedule = self.schedule_types[self.schedule_type](self)

        # Create agents
        for x in range(width):
            for y in range(height):
                agent = PD_Agent((x, y), self)
                self.grid.place_agent(agent, (x, y))
                self.schedule.add(agent)

        self.datacollector = DataCollector({
            "Cooperating_Agents":
            lambda m: len([a for a in m.schedule.agents if a.move == "C"])
        })

    def step(self):
        self.datacollector.collect(self)
        self.schedule.step()

    def run(self, n):
        '''
        Run the model for a certain number of steps.
        '''
        for _ in range(n):
            self.step()
Пример #9
0
class TestSingleGrid(unittest.TestCase):
    '''
    Test the SingleGrid object.

    Since it inherits from Grid, all the functionality tested above should
    work here too. Instead, this tests the enforcement.
    '''

    def setUp(self):
        '''
        Create a test non-toroidal grid and populate it with Mock Agents
        '''
        self.grid = SingleGrid(3, 5, True)
        self.agents = []
        counter = 0
        for y in range(3):
            for x in range(5):
                if TEST_GRID[y][x] == 0:
                    continue
                counter += 1
                # Create and place the mock agent
                a = MockAgent(counter, None)
                self.agents.append(a)
                self.grid.place_agent(a, (x, y))

    def test_enforcement(self):
        '''
        Test the SingleGrid empty count and enforcement.
        '''

        assert len(self.grid.empties) == 10
        a = MockAgent(100, None)
        with self.assertRaises(Exception):
            self.grid._place_agent((1, 0), a)

        # Place the agent in an empty cell
        self.grid.position_agent(a)
        assert a.pos not in self.grid.empties
        assert len(self.grid.empties) == 9
        for i in range(10):
            self.grid.move_to_empty(a)
        assert len(self.grid.empties) == 9

        # Place agents until the grid is full
        for i in range(9):
            a = MockAgent(101 + i, None)
            self.grid.position_agent(a)
        assert len(self.grid.empties) == 0

        a = MockAgent(110, None)
        with self.assertRaises(Exception):
            self.grid.position_agent(a)
        with self.assertRaises(Exception):
            self.move_to_empty(self.agents[0])
Пример #10
0
class Model(Model):

    #initialise function for the model
    def __init__(self, height, width, a_density=0.1, r_=0.1, k_=0.1):

        self.height = height
        self.width = width
        self.a_density = a_density
        self.r_ = r_
        self.k_ = k_

        self.schedule = RandomActivation(self)
        self.grid = SingleGrid(width, height, torus=False)
        self.datacollector = DataCollector(
            {"happy": "happy"},  # Model-level count of happy agents
            # For testing purposes, agent's individual x and y
            {
                "x": lambda a: a.pos[0],
                "y": lambda a: a.pos[1]
            },
        )

        # Set up agents
        # We use a grid iterator that returns
        # the coordinates of a cell as well as
        # its contents. (coord_iter)

        #seed to always start agents in the same place
        random.seed(9001)

        #create food matrix
        n = self.height
        m = self.width
        self.food_matrix = [[100] * m for i in range(n)]

        #randomly place agents
        for cell in self.grid.coord_iter():
            x = cell[1]
            y = cell[2]
            a = random.random()
            if a < self.a_density:
                agent = Agent((x, y), self)
                self.grid.place_agent(agent, (x, y))
                self.schedule.add(agent)

        self.running = True
        self.datacollector.collect(self)

    def step(self):

        self.schedule.step()
        # collect data
        self.datacollector.collect(self)
Пример #11
0
class TestSingleGrid(unittest.TestCase):
    '''
    Test the SingleGrid object.

    Since it inherits from Grid, all the functionality tested above should
    work here too. Instead, this tests the enforcement.
    '''
    def setUp(self):
        '''
        Create a test non-toroidal grid and populate it with Mock Agents
        '''
        self.grid = SingleGrid(3, 5, True)
        self.agents = []
        counter = 0
        for y in range(3):
            for x in range(5):
                if TEST_GRID[y][x] == 0:
                    continue
                counter += 1
                # Create and place the mock agent
                a = MockAgent(counter, None)
                self.agents.append(a)
                self.grid.place_agent(a, (x, y))

    def test_enforcement(self):
        '''
        Test the SingleGrid empty count and enforcement.
        '''

        assert len(self.grid.empties) == 10
        a = MockAgent(100, None)
        with self.assertRaises(Exception):
            self.grid._place_agent((1, 0), a)

        # Place the agent in an empty cell
        self.grid.position_agent(a)
        assert a.pos not in self.grid.empties
        assert len(self.grid.empties) == 9
        for i in range(10):
            self.grid.move_to_empty(a)
        assert len(self.grid.empties) == 9

        # Place agents until the grid is full
        for i in range(9):
            a = MockAgent(101 + i, None)
            self.grid.position_agent(a)
        assert len(self.grid.empties) == 0

        a = MockAgent(110, None)
        with self.assertRaises(Exception):
            self.grid.position_agent(a)
        with self.assertRaises(Exception):
            self.move_to_empty(self.agents[0])
Пример #12
0
class PDModel(Model):

    schedule_types = {"Sequential": BaseScheduler,
                     "Random": RandomActivation,
                     "Simultaneous": SimultaneousActivation}

    def __init__(self, height=8, width=8,
                 number_of_agents=2,
                 schedule_type="Simultaneous",
                 rounds=1,):


        # Model Parameters
        self.height = height
        self.width = width
        self.number_of_agents = number_of_agents
        self.step_count = 0
        self.schedule_type = schedule_type
        self.payoffs = {("C", "C"): 3,
                        ("C", "D"): 0,
                        ("D", "C"): 5,
                        ("D", "D"): 2}


        # Model Functions
        self.schedule = self.schedule_types[self.schedule_type](self)
        self.grid = SingleGrid(self.height, self.width, torus=True)

        # Find list of empty cells
        self.coordinates = [(x, y) for x in range(self.width) for y in range(self.height)]

        self.agentIDs = list(range(1, (number_of_agents + 1)))

        self.make_agents()
        self.running = True

    def make_agents(self):
        for i in range(self.number_of_agents):
            x, y = self.coordinates.pop(0)
            # print("x, y:", x, y)
            # x, y = self.grid.find_empty()
            pdagent = PDAgent((x, y), self, True)
            self.grid.place_agent(pdagent, (x, y))
            self.schedule.add(pdagent)

    def step(self):
        self.schedule.step()
        self.step_count += 1

    def run_model(self, rounds=200):
        for i in range(rounds):
            self.step()
Пример #13
0
class TestSingleGrid(unittest.TestCase):
    def setUp(self):
        self.space = SingleGrid(50, 50, False)
        self.agents = []
        for i, pos in enumerate(TEST_AGENTS_GRID):
            a = MockAgent(i, None)
            self.agents.append(a)
            self.space.place_agent(a, pos)

    def test_agent_positions(self):
        """
        Ensure that the agents are all placed properly.
        """
        for i, pos in enumerate(TEST_AGENTS_GRID):
            a = self.agents[i]
            assert a.pos == pos

    def test_remove_agent(self):
        for i, pos in enumerate(TEST_AGENTS_GRID):
            a = self.agents[i]
            assert a.pos == pos
            assert self.space.grid[pos[0]][pos[1]] == a
            self.space.remove_agent(a)
            assert a.pos is None
            assert self.space.grid[pos[0]][pos[1]] is None

    def test_empty_cells(self):
        if self.space.exists_empty_cells():
            pytest.deprecated_call(self.space.find_empty)
            for i, pos in enumerate(list(self.space.empties)):
                a = MockAgent(-i, pos)
                self.space.position_agent(a, x=pos[0], y=pos[1])
        assert self.space.find_empty() is None
        with self.assertRaises(Exception):
            self.space.move_to_empty(a)

    def move_agent(self):
        agent_number = 0
        initial_pos = TEST_AGENTS_GRID[agent_number]
        final_pos = (7, 7)

        _agent = self.agents[agent_number]

        assert _agent.pos == initial_pos
        assert self.space.grid[initial_pos[0]][initial_pos[1]] == _agent
        assert self.space.grid[final_pos[0]][final_pos[1]] is None
        self.space.move_agent(_agent, final_pos)
        assert _agent.pos == final_pos
        assert self.space.grid[initial_pos[0]][initial_pos[1]] is None
        assert self.space.grid[final_pos[0]][final_pos[1]] == _agent
class ConwayGameOfLifeModel(Model):
    """
    Class describing Conway's game of life using the mesa agent based model framowork
    
    The model contains a grid. In each cell of the grid there is an agent. The agent can be dead or
    alive. At each step of the simulation, the state of the agent can change.

    The model is responsible of creating the grid and handling the iteration of the simulation
    
    """
    def __init__(self, grid_height, grid_width, percentage_of_cell_alive):
        """
        Constructor
        """

        self.grid = SingleGrid(grid_width, grid_height, False)
        self.scheduler = SimultaneousActivation(self)
        self.number_of_agent = grid_width * grid_height

        # Creation of all agent
        for i in range(self.number_of_agent):

            # Randomly chooses the initial state of the agent (0 is alive and 1 is dead)
            # We use choices from the random module because it allows us to specify a distribution
            # (ie. a list of probability for each state). Choices will return a list with ne element
            # which is our state
            probability_alive = percentage_of_cell_alive / 100
            probability_dead = 1 - probability_alive
            state = choices([0, 1], [probability_dead, probability_alive])[0]

            # Creating the agent and adding it to the scheduler
            agent = CellAgent(i, state, self)
            self.scheduler.add(agent)

            # Adding the new agent to the grid
            agent_coordinates = self.grid.find_empty()
            self.grid.place_agent(agent, agent_coordinates)

        # Define if the simulation is running or not
        self.running = True

    def step(self):
        """
        Method to advance the model by one step. It will call the step method of each agent.
        We use a simultaneous scheduler which means we we'll be iterating through all the agent at
        once to determine their next state and then apply the new state
        """
        self.scheduler.step()
Пример #15
0
 def getGridStateAtStep(self, step=0):
     plan_agent_keys = [uid for uid, a in self.planAgents.items()]
     perception_agent_keys = [
         uid for uid, a in self.perceptionAgents.items()
     ]
     navGridAtStep = SingleGrid(self.navigationGrid.height,
                                self.navigationGrid.width, False)
     for key in perception_agent_keys:
         navGridAtStep.place_agent(self.perceptionAgents[key],
                                   self.perceptionAgents[key].pos)
     for key in plan_agent_keys:
         for agent in self.planAgents[key]:
             if agent.steps_left == step and navGridAtStep.is_cell_empty(
                     agent.pos):
                 navGridAtStep.place_agent(agent, agent.pos)
     return navGridAtStep
Пример #16
0
class PetriDish(Model):
    """
    Main model instance. It assignes one cell in each grid location, selecting
    its type randomly at the time of assignment; it assigns a single activated
    Producer cell in the middle of the grid.
    """
    def __init__(self,
                 width=50,
                 height=50,
                 proportion_producers=0.3,
                 proportion_consumers=0.3):
        self.running = True
        self.schedule = BaseScheduler(self)
        self.grid = SingleGrid(width, height, torus=False)

        initial_activator = Producer("Initial activator", self, activated=True)
        center_coords = (math.floor(width / 2), math.floor(height / 2))

        ## Rolled into the placement of other cells
        # self.schedule.add(initial_activator)
        # self.grid.place_agent(initial_activator, center_coords)

        # roll a die and place Producer, Consumer or undifferentiated cell
        for x in range(width):
            for y in range(height):
                roll = r.random()
                coords = (x, y)

                if coords == center_coords:
                    agent = initial_activator
                elif roll <= proportion_producers:
                    agent = Producer(coords, self)
                elif roll <= proportion_producers + proportion_consumers:
                    agent = Consumer(coords, self)
                else:
                    agent = Cell(coords, self)

                self.schedule.add(agent)
                self.grid.place_agent(agent, coords)

    def step(self):
        self.schedule.step()  # goes through agents in the order of addition
Пример #17
0
class TestSingleGrid(unittest.TestCase):
    def setUp(self):
        self.space = SingleGrid(50, 50, False)
        self.agents = []
        for i, pos in enumerate(TEST_AGENTS_GRID):
            a = MockAgent(i, None)
            self.agents.append(a)
            self.space.place_agent(a, pos)

    def test_agent_positions(self):
        '''
        Ensure that the agents are all placed properly.
        '''
        for i, pos in enumerate(TEST_AGENTS_GRID):
            a = self.agents[i]
            assert a.pos == pos

    def test_remove_agent(self):
        for i, pos in enumerate(TEST_AGENTS_GRID):
            a = self.agents[i]
            assert a.pos == pos
            assert self.space.grid[pos[0]][pos[1]] == a
            self.space.remove_agent(a)
            assert a.pos is None
            assert self.space.grid[pos[0]][pos[1]] is None

    def move_agent(self):
        agent_number = 0
        initial_pos = TEST_AGENTS_GRID[agent_number]
        final_pos = (7, 7)

        _agent = self.agents[agent_number]

        assert _agent.pos == initial_pos
        assert self.space.grid[initial_pos[0]][initial_pos[1]] == _agent
        assert self.space.grid[final_pos[0]][final_pos[1]] is None
        self.space.move_agent(_agent, final_pos)
        assert _agent.pos == final_pos
        assert self.space.grid[initial_pos[0]][initial_pos[1]] is None
        assert self.space.grid[final_pos[0]][final_pos[1]] == _agent
Пример #18
0
class TestSingleGrid(unittest.TestCase):
    def setUp(self):
        self.space = SingleGrid(50, 50, False)
        self.agents = []
        for i, pos in enumerate(TEST_AGENTS_GRID):
            a = MockAgent(i, None)
            self.agents.append(a)
            self.space.place_agent(a, pos)

    def test_agent_positions(self):
        '''
        Ensure that the agents are all placed properly.
        '''
        for i, pos in enumerate(TEST_AGENTS_GRID):
            a = self.agents[i]
            assert a.pos == pos

    def test_remove_agent(self):
        for i, pos in enumerate(TEST_AGENTS_GRID):
            a = self.agents[i]
            assert a.pos == pos
            assert self.space.grid[pos[0]][pos[1]] == a
            self.space.remove_agent(a)
            assert a.pos is None
            assert self.space.grid[pos[0]][pos[1]] is None

    def move_agent(self):
        agent_number = 0
        initial_pos = TEST_AGENTS_GRID[agent_number]
        final_pos = (7, 7)

        _agent = self.agents[agent_number]

        assert _agent.pos == initial_pos
        assert self.space.grid[initial_pos[0]][initial_pos[1]] == _agent
        assert self.space.grid[final_pos[0]][final_pos[1]] is None
        self.space.move_agent(_agent, final_pos)
        assert _agent.pos == final_pos
        assert self.space.grid[initial_pos[0]][initial_pos[1]] is None
        assert self.space.grid[final_pos[0]][final_pos[1]] == _agent
Пример #19
0
class Antcluster(Model):
    """A model with some number of agents."""
    def __init__(self, N):
        self.num_agents = N
        self.i = 1
        self.grid = SingleGrid(120, 120, True)
        self.grid1 = SingleGrid(120, 120, True)
        self.schedule = RandomActivation(self)
        self.schedule_dados = BaseScheduler(self)
        # Create agents
        for i in range(self.num_agents):
            a = Ant(i, self)
            self.schedule.add(a)
            x = self.random.randrange(self.grid.width)
            y = self.random.randrange(self.grid.height)
            #z = np.asarray([x,y])
            #print(z)
            plt.axis([-10, 125, -10, 125])
            #plt.scatter(x,y)
            self.grid.place_agent(a, (x, y))
        #create data
        for i in range(150):
            b = dado(i, self)
            self.schedule_dados.add(b)
            x = self.random.randrange(self.grid1.width)
            y = self.random.randrange(self.grid1.height)
            #print(x,y)
            z = np.asarray([x, y])
            plt.axis([-10, 125, -10, 125])
            plt.scatter(x, y)
            self.grid1.place_agent(b, (x, y))

    def step(self):
        '''Advance the model by one step.'''
        self.schedule_dados.step()
        self.schedule.step()
Пример #20
0
class ShapesModel(Model):
    def __init__(self, N, width=20, height=10):
        self.running = True
        self.N = N  # num of agents
        self.headings = ((1, 0), (0, 1), (-1, 0), (0, -1))  # tuples are fast
        self.grid = SingleGrid(width, height, torus=False)
        self.schedule = RandomActivation(self)
        load_scene('shape_model/crossing.txt', self.grid, self)
        """
        self.grid.place_agent(
            Walker(1911, self, (4, 4), type="wall"),
            (4, 4)
        )
        self.make_walls()
        self.make_walker_agents()
        """

    def make_walls(self):
        for i in range(0, 50):
            self.grid.place_agent(Walker(1911, self, (i, 5), type="wall"),
                                  (i, 5))

    def make_walker_agents(self):
        unique_id = 0
        while True:
            if unique_id == self.N:
                break
            x = random.randrange(self.grid.width)
            y = random.randrange(self.grid.height)
            pos = (x, y)
            heading = random.choice(self.headings)
            # heading = (1, 0)
            if self.grid.is_cell_empty(pos):
                print("Creating agent {2} at ({0}, {1})".format(
                    x, y, unique_id))
                a = Walker(unique_id, self, pos, heading)
                self.schedule.add(a)
                self.grid.place_agent(a, (x, y))
                self.grid.place_agent(a, (x + 1, y))
                self.grid.place_agent(a, (x, y + 1))
                self.grid.place_agent(a, (x + 1, y + 1))
                unique_id += 1

    def step(self):
        self.schedule.step()
Пример #21
0
class MondoModel(Model):
    """Questo è il mondo fatto a griglia"""
    def __init__(self, popolazione, width, height):
        super().__init__()
        self.popolazione = popolazione
        self.grid = SingleGrid(width, height, True)
        self.schedule = RandomActivation(self)
        self.points = []
        # Create agents
        for i in range(self.popolazione):
            a = PersonAgent(i, self)
            self.schedule.add(a)
            emptyspace = self.grid.find_empty()
            if emptyspace is not None:
                self.grid.place_agent(a, emptyspace)

        paziente_zero = self.schedule.agents[0]
        paziente_zero.virus = Virus(mortalita=20,
                                    tempo_incubazione=3,
                                    infettivita=70)
        paziente_zero.ttl = paziente_zero.virus.tempo_incubazione

    def step(self):
        '''Advance the model by one step.'''
        self.schedule.step()
        suscettibili = 0
        infetti = 0
        morti = 0
        immuni = 0
        for persona in self.schedule.agents:
            if persona.isAlive is False:
                morti += 1
            elif persona.isImmune is True:
                immuni += 1
            elif persona.virus is not None:
                infetti += 1
            else:
                suscettibili += 1
        self.points.append([suscettibili, infetti, morti, immuni])

    def crea_grafico(self):
        global_health_status = np.zeros((self.grid.width, self.grid.height))
        for persona, x, y in self.grid.coord_iter(
        ):  # ctrl+click per spiegare meglio

            if persona is None:
                global_health_status[x][y] = StatoCella.vuoto
            elif persona.isAlive is False:
                global_health_status[x][y] = StatoCella.morto
            elif persona.isImmune is True:
                global_health_status[x][y] = StatoCella.guarito
            elif persona.virus is not None:
                global_health_status[x][y] = StatoCella.infetto
            else:
                global_health_status[x][y] = StatoCella.suscettibile

        cmap = matplotlib.colors.ListedColormap([(0, 0, 0), (1, 0, 0),
                                                 (1, 1, 0), (0, 0, 1),
                                                 (0, 1, 0)])
        img = plt.imshow(global_health_status,
                         interpolation='nearest',
                         vmin=0,
                         vmax=4,
                         cmap=cmap)
        plt.colorbar(img, ticks=[0, 1, 2, 3, 4])
        plt.show()

    def crea_grafico_2(self):
        matplotlib.pyplot.plot(self.points)
        matplotlib.pyplot.show()
class modelSim(Model):
    """ 
    details of the world 
    
    introduce time is when animal agents first get introduced into the wrold
    disp_rate is the dispersal rate for experiment 3
    dist is perceptual strength for animals if fixed
    det is decision determinacy of animals if fixed
    cog_fixed determines if cognition of animals is fixed to particular values or is allowed to evolve
    if skip_300 is True, patchiness values are not calculated for the first 300 steps-- this makes the model run faster
    collect_cog_dist creates a seperate dataframe for all cognition values for agents at every timestep
    if evolve_disp is true, dispersion rate of plants is free to evolve
    """

    def __init__(self, introduce_time, disp_rate, dist, det, cog_fixed = False, \
                 skip_300 = True, collect_cog_dist = False, evolve_disp = False):

        self.skip_300 = skip_300
        self.cog_fixed = cog_fixed
        self.evolve_disp = evolve_disp
        self.collect_cog_dist = collect_cog_dist
        self.dist = dist
        self.det = det
        self.disp_rate = disp_rate
        self.intro_time = introduce_time
        (self.a1num, self.a2num) = (20, 20)
        self.schedule = RandomActivation(
            self)  # agents take a step in random order
        self.grid = SingleGrid(
            200, 200,
            True)  # the world is a grid with specified height and width

        self.initialize_perception()

        disp = np.power(self.disp_rate, range(0, 100))
        self.disp = disp / sum(disp)
        self.grid_ind = np.indices((200, 200))
        positions = np.maximum(abs(100 - self.grid_ind[0]),
                               abs(100 - self.grid_ind[1]))
        self.positions = np.minimum(positions, 200 - positions)

        self.agentgrid = np.zeros(
            (self.grid.width, self.grid.height
             ))  # allows for calculation of patchiness of both agents
        self.coggrid = np.full(
            (self.nCogPar, self.grid.width, self.grid.height), 101.0)
        self.dispgrid = np.full((2, self.grid.width, self.grid.height), 101.0)
        self.age = []
        (self.nstep, self.unique_id, self.reprod, self.food, self.death,
         self.combat) = (0, 0, 0, 0, 0, 0)

        self.cmap = colors.ListedColormap([
            'midnightblue', 'mediumseagreen', 'white', 'white', 'white',
            'white', 'white'
        ])  #'yellow', 'orange', 'red', 'brown'])
        bounds = [0, 1, 2, 3, 4, 5, 6, 7]
        self.norm = colors.BoundaryNorm(bounds, self.cmap.N)

        self.expect_NN = []
        self.NN = [5, 10]
        for i in self.NN:
            self.expect_NN.append(
                (math.factorial(2 * i) * i) / (2**i * math.factorial(i))**2)

        grid_ind_food = np.indices((21, 21))
        positions_food = np.maximum(abs(10 - grid_ind_food[0]),
                                    abs(10 - grid_ind_food[1]))
        self.positions_food = np.minimum(positions_food, 21 - positions_food)
        if self.collect_cog_dist:
            self.cog_dist_dist = pd.DataFrame(columns=[])
            self.cog_dist_det = pd.DataFrame(columns=[])

        for i in range(self.a1num):  # initiate a1 agents at random locations
            self.introduce_agents("A1")
        self.nA1 = self.a1num
        self.nA2 = 0
#     self.agent_steps = {}

    def initialize_perception(self):
        self.history = pd.DataFrame(columns=[
            "nA1", "nA2", "age", "LIP5", "LIP10", "LIPanim5", "LIPanim10",
            "Morsita5", "Morsita10", "Morsitaanim5", "Morsitaanim10", "NN5",
            "NN10", "NNanim5", "NNanim10", "reprod", "food", "death", "combat",
            "dist", "det", "dist_lower", "det_lower", "dist_upper",
            "det_upper", "dist_ci", "det_ci"
        ])
        self.nCogPar = 2
        (self.start_energy, self.eat_energy, self.tire_energy, self.reproduction_energy, self.cognition_energy) \
        = (10, 5, 3, 20, 1)

    def introduce_agents(self, which_agent):
        x = random.randrange(self.grid.width)
        y = random.randrange(self.grid.height)

        if which_agent == "A1":
            if self.grid.is_cell_empty((x, y)):
                a = A1(self.unique_id, self, self.start_energy, disp_rate=0)
                self.unique_id += 1
                self.grid.position_agent(a, x, y)
                self.schedule.add(a)
                self.agentgrid[x][y] = 1
            else:
                self.introduce_agents(which_agent)
        elif which_agent == "A2":
            if self.cog_fixed:
                c = (self.dist, self.det)
            else:
                c = tuple([0] * self.nCogPar)
            a = A2(self.unique_id,
                   self,
                   self.start_energy,
                   cognition=c,
                   disp_rate=0)
            self.unique_id += 1
            if self.agentgrid[x][y] == 1:
                die = self.grid.get_cell_list_contents([(x, y)])[0]
                die.dead = True
                self.grid.remove_agent(die)
                self.schedule.remove(die)
                self.grid.place_agent(a, (x, y))
                self.schedule.add(a)
                self.agentgrid[x][y] = 2
                self.coggrid[:, x, y] = c
            elif self.agentgrid[x][y] == 0:
                self.grid.place_agent(a, (x, y))
                self.schedule.add(a)
                self.agentgrid[x][y] = 2
                self.coggrid[:, x, y] = c

    def flatten_(self, n, grid, full_grid=False, mean=True, range_=False):
        if full_grid:
            return (grid[n].flatten())
        i = grid[n].flatten()
        if mean:
            i = np.delete(i, np.where(i == 101))
            if len(i) == 0:
                # if range_:
                return ([0] * 4)
            #else:
            #    return(0)
            if range_:
                if self.cog_fixed:
                    return ([np.mean(i)] * 4)
                return (np.concatenate(
                    ([np.mean(i)], np.percentile(i, [2.5, 97.5]),
                     self.calculate_ci(i))))
            return ([np.mean(i), 0, 0, 0])
        else:
            return (i)

    def calculate_ci(self, data):
        if np.min(data) == np.max(data):
            return ([0.0])
        return ([
            np.mean(data) - st.t.interval(
                0.95, len(data) - 1, loc=np.mean(data), scale=st.sem(data))[0]
        ])

    def return_zero(self, num, denom):
        if self.nstep == 1:
            #     print("whaaat")
            return (0)
        if denom == "old_nA2":
            denom = self.history["nA2"][self.nstep - 2]
        if denom == 0.0:
            return 0
        return (num / denom)

    def nearest_neighbor(self, agent):  # fix this later
        if agent == "a1":
            x = np.argwhere(self.agentgrid == 1)
            if len(x) <= 10:
                return ([-1] * len(self.NN))
            elif len(x) > 39990:
                return ([0.97, 0.99])
        #  if self.nstep<300 and self.skip_300:
        #      return([-1,-1] )
        else:
            x = np.argwhere(self.agentgrid == 2)
            if len(x) <= 10:
                return ([-1] * len(self.NN))
        density = len(x) / (self.grid.width)**2
        expect_NN_ = self.expect_NN
        expect_dist = np.array(expect_NN_) / (density**0.5)
        distances = [0, 0]
        for i in x:
            distx = abs(x[:, 0] - i[0])
            distx[distx > 100] = 200 - distx[distx > 100]
            disty = abs(x[:, 1] - i[1])
            disty[disty > 100] = 200 - disty[disty > 100]
            dist = (distx**2 + disty**2)**0.5
            distances[0] += (np.partition(dist, 5)[5])
            distances[1] += (np.partition(dist, 10)[10])
        mean_dist = np.array(distances) / len(x)
        out = mean_dist / expect_dist
        return (out)

    def quadrant_patch(
        self, agent
    ):  # function to calculate the patchiness index of agents at every step
        if agent == "a1":
            x = self.agentgrid == 1
        else:
            x = self.agentgrid == 2
        gsize = np.array([5, 10])
        gnum = 200 / gsize
        qcs = []
        for i in range(2):
            x_ = x.reshape(int(gnum[i]), gsize[i], int(gnum[i]),
                           gsize[i]).sum(1).sum(2)
            mean = np.mean(x_)
            var = np.var(x_)
            if mean == 0.0:
                return ([-1] * 4)
            lip = 1 + (var - mean) / (mean**2)
            morsita = np.sum(x) * ((np.sum(np.power(x_, 2)) - np.sum(x_)) /
                                   (np.sum(x_)**2 - np.sum(x_)))
            qcs += [lip, morsita]
        return (qcs)

    def l_function(self, agent):
        if agent == "a1":
            x = np.argwhere(self.agentgrid == 1)
        else:
            x = np.argwhere(self.agentgrid == 2)
            if len(x) == 0:
                return (-1)
        distances = np.array([])
        for i in x:
            distx = abs(x[:, 0] - i[0])
            distx[distx > 100] = 200 - distx[distx > 100]
            disty = abs(x[:, 1] - i[1])
            disty[disty > 100] = 200 - disty[disty > 100]
            dist = (distx**2 + disty**2)**0.5
            distances = np.concatenate((distances, dist[dist != 0]))
        l = np.array([])
        for i in np.arange(5, 51, 5):
            l = np.append(l, sum(distances < i))
        k = (l * 200**2) / (len(x)**2)
        l = (k / math.pi)**0.5
        return (abs(l - np.arange(5, 51, 5)))

    def collect_hist(self):
        if self.nstep < 300 and self.skip_300:
            NNcalc = [-1, -1]  #self.nearest_neighbor("a1")
            NNanimcalc = [-1, -1]  #self.nearest_neighbor("a2")
        else:
            NNcalc = self.nearest_neighbor("a1")
            NNanimcalc = self.nearest_neighbor("a2")
        quadrantcalc = self.quadrant_patch("a1")
        quadrantanimcalc = self.quadrant_patch("a2")
        dist_values = self.flatten_(0,
                                    grid=self.coggrid,
                                    mean=True,
                                    range_=False)
        det_values = self.flatten_(1,
                                   grid=self.coggrid,
                                   mean=True,
                                   range_=False)
        # l_f = 0#self.l_function("a1")
        dat = {
            "nA1": self.nA1,
            "nA2": self.nA2,
            "age": self.return_zero(sum(self.age), self.nA2),
            "LIP5": quadrantcalc[0],
            "LIP10": quadrantcalc[2],
            "LIPanim5": quadrantanimcalc[0],
            "LIPanim10": quadrantanimcalc[2],
            "Morsita5": quadrantcalc[1],
            "Morsita10": quadrantcalc[3],
            "Morsitaanim5": quadrantanimcalc[1],
            "Morsitaanim10": quadrantanimcalc[3],
            "NN5": NNcalc[0],
            "NN10": NNcalc[1],
            "NNanim5": NNanimcalc[0],
            "NNanim10":
            NNanimcalc[1],  #"l_ripley" : l_f,# self.nearest_neighbor("a2"),  
            "reprod": self.return_zero(self.reprod, "old_nA2"),
            "food": self.return_zero(self.food, self.nA2),
            "death": self.return_zero(self.death, "old_nA2"),
            "combat": self.return_zero(self.combat, "old_nA2"),
            "dist": dist_values[0],
            "det": det_values[0],
            "dist_lower": dist_values[1],
            "det_lower": det_values[1],
            "dist_upper": dist_values[2],
            "det_upper": det_values[2],
            "dist_ci": dist_values[3],
            "det_ci": det_values[3],
            "disp_a1": self.flatten_(0, grid=self.dispgrid)[0],
            "disp_a2": self.flatten_(1, grid=self.dispgrid)[0]
        }
        self.history = self.history.append(dat, ignore_index=True)
        self.age = []
        (self.reprod, self.food, self.death, self.combat) = (0, 0, 0, 0)
        if self.collect_cog_dist:
            if (self.nstep % 10) == 0:
                self.cog_dist_dist[str(self.nstep - 1)] = self.flatten_(
                    0, grid=self.coggrid, full_grid=True, mean=False)
                self.cog_dist_det[str(self.nstep - 1)] = self.flatten_(
                    1, grid=self.coggrid, full_grid=True, mean=False)

    def step(self):
        self.nstep += 1  # step counter
        if self.nstep == self.intro_time:
            for i in range(self.a2num):
                self.introduce_agents("A2")
        self.schedule.step()
        self.nA1 = np.sum(self.agentgrid == 1)
        self.nA2 = np.sum(self.agentgrid == 2)
        self.collect_hist()
        if self.nstep % 10 == 0:
            sys.stdout.write((str(self.nstep) + " " + str(self.nA1) + " " +
                              str(self.nA2) + "\n"))

    def visualize(self):
        f, ax = plt.subplots(1)
        self.agentgrid = self.agentgrid.astype(int)
        ax.imshow(self.agentgrid,
                  interpolation='nearest',
                  cmap=self.cmap,
                  norm=self.norm)
        # plt.axis("off")
        return (f)
Пример #23
0
class Anthill(Model):
    def __init__(self):

        self.grid = SingleGrid(WIDTH, HEIGHT, False)
        self.schedule = RandomActivation(self)
        self.running = True
        self.internalrate = 0.2
        self.ant_id = 1
        self.tau = np.zeros((WIDTH, HEIGHT))
        self.datacollector = DataCollector({
            "Total number of Ants":
            lambda m: self.get_total_ants_number(),
            "mean tau":
            lambda m: self.evaluation1(),
            "sigma":
            lambda m: self.evaluation2(),
            "sigma*":
            lambda m: self.evaluation3(),
        })

        # List containing all coordinates of the boundary, initial ants location and brood location
        self.bound_vals = []
        self.neigh_bound = []
        self.datacollector.collect(self)

        for i in range(WIDTH):
            for j in range(HEIGHT):
                if i == 0 or j == 0 or i == WIDTH - 1 or j == HEIGHT - 1:
                    self.bound_vals.append((i, j))
                if i == 1 or i == WIDTH - 2 or j == 1 or j == HEIGHT - 2:
                    self.neigh_bound.append((i, j))

        # Make a Fence boundary
        b = 0
        for h in self.bound_vals:
            br = Fence(b, self)

            self.grid.place_agent(br, (h[0], h[1]))
            b += 1

    def step(self):
        '''Advance the model by one step.'''
        # Add new ants into the internal area ont he boundary

        for xy in self.neigh_bound:

            # Add with probability internal rate and if the cell is empty
            if self.random.uniform(
                    0, 1) < self.internalrate and self.grid.is_cell_empty(
                        xy) == True:

                a = Ant(self.ant_id, self)

                self.schedule.add(a)
                self.grid.place_agent(a, xy)

                self.ant_id += 1

        # Move the ants
        self.schedule.step()
        self.datacollector.collect(self)

        # Remove all ants on bounary

        for (agents, i, j) in self.grid.coord_iter():
            if (i, j) in self.neigh_bound and type(agents) is Ant:

                self.grid.remove_agent(agents)
                self.schedule.remove(agents)

        data_tau.append(self.mean_tau_ant)
        data_sigma.append(np.sqrt(self.sigma))
        data_sigmastar.append(self.sigmastar)

        if len(data_sigmastar) > 20:
            if abs(data_sigmastar[-2] - data_sigmastar[-1]) < 0.0000001 or len(
                    data_sigmastar) == 2000:
                try:
                    # TAU
                    with open("results/m1_tau_5.pkl", 'rb') as f:
                        tau_old = pickle.load(f)
                        tau_old[int(len(tau_old) + 1)] = data_tau
                        f.close()
                    pickle.dump(tau_old, open("results/m1_tau_5.pkl", 'wb'))

                except:
                    pickle.dump({1: data_tau},
                                open("results/m1_tau_5.pkl", 'wb'))

                try:
                    # SIGMA
                    with open("results/m1_sigma_5.pkl", 'rb') as f:
                        sigma_old = pickle.load(f)
                        sigma_old[int(len(sigma_old) + 1)] = data_sigma
                        f.close()
                    pickle.dump(sigma_old, open("results/m1_sigma_5.pkl",
                                                'wb'))

                except:
                    pickle.dump({1: data_sigma},
                                open("results/m1_sigma_5.pkl", 'wb'))

                try:
                    # SIGMASTAR
                    with open("results/m1_sigmastar_5.pkl", 'rb') as f:
                        sigmastar_old = pickle.load(f)
                        sigmastar_old[int(len(sigmastar_old) +
                                          1)] = data_sigmastar
                        f.close()
                    pickle.dump(sigmastar_old,
                                open("results/m1_sigmastar_5.pkl", 'wb'))

                except:
                    pickle.dump({1: data_sigmastar},
                                open("results/m1_sigmastar_5.pkl", 'wb'))

                try:
                    # MATRIX
                    with open("results/m1_matrix_5.pkl", 'rb') as f:
                        matrix_old = pickle.load(f)
                        matrix_old[int(len(matrix_old) + 1)] = self.tau
                        f.close()
                    pickle.dump(matrix_old,
                                open("results/m1_matrix_5.pkl", 'wb'))

                except:
                    pickle.dump({1: self.tau},
                                open("results/m1_matrix_5.pkl", 'wb'))
                print(
                    "_______________________________________________________________________"
                )
                print("DONE")
                self.running = False

        # with open("tau2_new.txt", "a") as myfile:
        #     myfile.write(str(self.mean_tau_ant) + '\n')
        # with open("sigma2_new.txt", "a") as myfile:
        #     myfile.write(str(np.sqrt(self.sigma)) + '\n')
        # with open("datasigmastar2_new.txt","a") as myfile:
        #     myfile.write(str(self.sigmastar) + "\n")

    def get_total_ants_number(self):
        total_ants = 0
        for (agents, _, _) in self.grid.coord_iter():
            if type(agents) is Ant:
                total_ants += 1
        return total_ants

    def evaluation1(self):

        ##creat a empty grid to store currently information
        total_ants = np.zeros((WIDTH, HEIGHT))

        ## count the number of currently information
        for (agents, i, j) in self.grid.coord_iter():

            if type(agents) is Ant:
                total_ants[i][j] = 1
            else:
                total_ants[i][j] = 0

        ##update the tau
        self.tau = self.tau + total_ants

        ##calcualte the mean tau
        self.mean_tau_ant = self.tau.sum() / ((WIDTH - 2)**2)

        return self.mean_tau_ant

    def evaluation2(self):

        ## we need to minus the mean tau so we need to ensure the result of boundary is zero
        ## so we let the bounday equal mean_tau_ant in this way the (tau-mean_tau_ant) is zero of boundary
        for site in self.bound_vals:
            self.tau[site[0]][site[1]] = self.mean_tau_ant

        ## calculate the sigmaa
        self.sigma = ((self.tau - self.mean_tau_ant)**2).sum() / (
            (WIDTH - 2)**2)

        ## rechange the boundaryy
        for site in self.bound_vals:
            self.tau[site[0]][site[1]] = 0

        return np.sqrt(self.sigma)

    def evaluation3(self):
        ## calculate the sigmastar
        self.sigmastar = np.sqrt(self.sigma) / self.mean_tau_ant

        return self.sigmastar
Пример #24
0
class SchoolModel(Model):
    """
    Model class for the Schelling segregation model.

    ...

    Attributes
    ----------

    height: int
        grid height
    width: int
        grid width
    num_schools:  int
        number of schools
    f : float
        fraction preference of agents for like
    M : float
        utility penalty for homogeneous neighbourhood
    residential_steps :
        number of steps for the residential model
    minority_pc :
        minority fraction
    bounded : boolean
        If True use bounded (predefined neighbourhood) for agents residential choice
    cap_max : float
        school capacity TODO: explain
    radius : int
        neighbourhood radius for agents calculation of residential choice (only used if not bounded)
    household_types :
        labels for different ethnic types of households
    symmetric_positions :
        use symmetric positions for the schools along the grid, or random
    schelling :
        if True use schelling utility function otherwise use assymetric
    school_pos :
        if supplied place schools in the supplied positions - also update school_num
    extended_data :
        if True collect extra data for agents (utility distribution and satisfaction)
        takes up a lot of space
    sample : int
        subsample the empty residential sites to be evaluated to speed up computation
    variable_f : variable_f
        draw values of the ethnic preference, f from a normal distribution
    sigma : float
        The standard deviation of the normal distribution used for f
    alpha : float
        ratio of ethnic to distance to school preference for school utility
    temp : float
        temperature for the behavioural logit rule for agents moving
    households : list
        all household objects
    schools : list
        all school objects
    residential_moves_per_step : int
        number of agents to move residence at every step
    school_moves_per_step : int
        number of agents to move school at every step
    num_households : int
        total number of household agents
    pm : list [ , ]
        number of majority households, number of minority households
    schedule : mesa schedule type
    grid : mesa grid type
    total_moves :
        number of school moves made in particular step
    res_moves :
        number of residential site moves made in particular step
    move :
        type of move recipe - 'random' 'boltzmann' or 'deterministic'
    school_locations : list
       list of locations of all schools (x,y)
    household_locations :
       list of locations of all households (x,y)
    closer_school_from_position : numpy array shape : (width x height)
        map of every grid position to the closest school

    """


    def __init__(self, height=100, width=100, density=0.9, num_neighbourhoods=16, schools_per_neighbourhood=2,minority_pc=0.5, homophily=3, f0=0.6,f1=0.6,\
                 M0=0.8,M1=0.8,T=0.75,
                 alpha=0.5, temp=1, cap_max=1.01, move="boltzmann", symmetric_positions=True,
                 residential_steps=70,schelling=False,bounded=True,
                 residential_moves_per_step=2000, school_moves_per_step =2000,radius=6,proportional = False,
                 torus=False,fs="eq", extended_data = False, school_pos=None, agents=None, sample=4, variable_f=True, sigma=0.35, displacement=8 ):

        # Options  for the model
        self.height = height
        self.width = width
        print("h x w", height, width)
        self.density = density
        #self.num_schools= num_schools
        self.f = [f0, f1]
        self.M = [M0, M1]
        self.residential_steps = residential_steps
        self.minority_pc = minority_pc
        self.bounded = bounded
        self.cap_max = cap_max
        self.T = T
        self.radius = radius
        self.household_types = [0, 1]  # majority, minority !!
        self.symmetric_positions = symmetric_positions
        self.schelling = schelling
        self.school_pos = school_pos
        self.extended_data = extended_data
        self.sample = sample
        self.variable_f = variable_f
        self.sigma = sigma
        self.fs = fs

        # choice parameters
        self.alpha = alpha
        self.temp = temp

        self.households = []
        self.schools = []
        self.neighbourhoods = []
        self.residential_moves_per_step = residential_moves_per_step
        self.school_moves_per_step = school_moves_per_step

        self.num_households = int(width * height * density)
        num_min_households = int(self.minority_pc * self.num_households)
        self.num_neighbourhoods = num_neighbourhoods
        self.schools_per_neigh = schools_per_neighbourhood
        self.num_schools = int(num_neighbourhoods * self.schools_per_neigh)
        self.pm = [
            self.num_households - num_min_households, num_min_households
        ]

        self.schedule = RandomActivation(self)
        self.grid = SingleGrid(height, width, torus=torus)
        self.total_moves = 0
        self.res_moves = 0

        self.move = move

        self.school_locations = []
        self.household_locations = []
        self.neighbourhood_locations = []
        self.closer_school_from_position = np.empty(
            [self.grid.width, self.grid.height])
        self.closer_neighbourhood_from_position = np.empty(
            [self.grid.width, self.grid.height])

        self.happy = 0
        self.res_happy = 0
        self.percent_happy = 0
        self.seg_index = 0
        self.res_seg_index = 0
        self.residential_segregation = 0
        self.collective_utility = 0
        self.comp0,self.comp1,self.comp2,self.comp3,self.comp4,self.comp5,self.comp6,self.comp7, \
        self.comp8, self.comp9, self.comp10, self.comp11, self.comp12, self.comp13, self.comp14, self.comp15 = 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
        self.satisfaction = []
        self.pi_jm = []
        self.pi_jm_fixed = []
        self.compositions = []
        self.average_like_fixed = 0
        self.average_like_variable = 0

        self.my_collector = []
        if torus:
            self.max_dist = self.height / np.sqrt(2)
        else:
            self.max_dist = self.height * np.sqrt(2)

        # Set up agents
        # We use a grid iterator that returns
        # the coordinates of a cell as well as
        # its contents. (coord_iter)
        # Set up schools in symmetric positions along the grid

        # if schools already supplied place them where they should be
        # TODO: fix
        if self.school_pos:
            school_positions = self.school_pos
            self.school_locations = school_pos
            self.num_schools = len(school_pos)
            print("Option not working")
            sys.exit()

        # otherwise calculate the positions
        else:
            if self.num_neighbourhoods == 4:
                neighbourhood_positions = [(width / 4, height / 4),
                                           (width * 3 / 4, height / 4),
                                           (width / 4, height * 3 / 4),
                                           (width * 3 / 4, height * 3 / 4)]
            elif self.num_neighbourhoods == 9:
                n = 6
                neighbourhood_positions = [(width/n,height/n),(width*3/n,height*1/n),(width*5/n,height*1/n),(width/n,height*3/n),\
                                    (width*3/n,height*3/n),(width*5/n,height*3/n),(width*1/n,height*5/n),(width*3/n,height*5/n),\
                                    (width*5/n,height*5/n)]

            elif self.num_neighbourhoods in [25, 64, 16]:
                neighbourhood_positions = []
                n = int(np.sqrt(self.num_neighbourhoods) * 2)
                print(n)
                x1 = range(1, int(n + 1), 2)

                xloc = np.repeat(x1, int(n / 2))
                yloc = np.tile(x1, int(n / 2))

                for i in range(self.num_neighbourhoods):
                    neighbourhood_positions.append(
                        (xloc[i] * height / n, yloc[i] * width / n))

        print(neighbourhood_positions)
        #for i in range(self.num_schools):i
        i = 0
        while len(self.neighbourhoods) < self.num_neighbourhoods:

            if self.symmetric_positions or self.school_pos:
                x = int(neighbourhood_positions[i][0])
                y = int(neighbourhood_positions[i][1])

                #print(x,y)

            else:
                x = random.randrange(start=2, stop=self.grid.width - 2)
                y = random.randrange(start=2, stop=self.grid.height - 2)

            pos = (x, y)
            pos2 = (x + 1, y + 1)
            if schools_per_neighbourhood == 2:
                pos3 = (x - displacement, y - displacement)
                pos2 = (x + displacement, y + displacement)

            do_not_use = self.school_locations + self.neighbourhood_locations
            #if (pos not in do_not_use) and (pos2 not in do_not_use ) and (pos3 not in do_not_use ):
            if (pos not in do_not_use) and (pos2 not in do_not_use):

                #print('pos',pos,pos2,pos3)
                self.school_locations.append(pos2)
                school = SchoolAgent(pos2, self)
                self.grid.place_agent(school, school.unique_id)
                self.schools.append(school)
                self.schedule.add(school)

                if self.schools_per_neigh == 2:
                    # Add another school
                    self.school_locations.append(pos3)
                    school = SchoolAgent(pos3, self)
                    self.grid.place_agent(school, school.unique_id)
                    self.schools.append(school)
                    self.schedule.add(school)

                self.neighbourhood_locations.append(pos)
                neighbourhood = NeighbourhoodAgent(pos, self)
                self.grid.place_agent(neighbourhood, neighbourhood.unique_id)
                self.neighbourhoods.append(neighbourhood)
                self.schedule.add(neighbourhood)

            else:
                print(pos, pos2, pos3, "is found in", do_not_use)
            i += 1
        print("num_schools", len(self.school_locations))

        print("schools completed")

        #print(self.neighbourhood_locations)
        #print("schools",self.school_locations, len(self.school_locations))
        # Set up households

        # If agents are supplied place them where they need to be
        if agents:

            for cell in agents:
                [agent_type, x, y] = cell
                if agent_type in [0, 1]:

                    pos = (x, y)
                    if self.grid.is_cell_empty(pos):
                        agent = HouseholdAgent(pos, self, agent_type)
                        self.grid.place_agent(agent, agent.unique_id)

                        self.household_locations.append(pos)
                        self.households.append(agent)
                        self.schedule.add(agent)

        # otherwise produce them
        else:

            # create household locations but dont create agents yet

            while len(self.household_locations) < self.num_households:

                #Add the agent to a random grid cell
                x = random.randrange(self.grid.width)
                y = random.randrange(self.grid.height)
                pos = (x, y)

                if (pos not in (self.school_locations +
                                self.household_locations +
                                self.neighbourhood_locations)):
                    self.household_locations.append(pos)

            #print(Dij)

            for ind, pos in enumerate(self.household_locations):

                # create a school or create a household

                if ind < int(self.minority_pc * self.num_households):
                    agent_type = self.household_types[1]
                else:
                    agent_type = self.household_types[0]

                household_index = ind
                agent = HouseholdAgent(pos, self, agent_type, household_index)
                #decorator_agent = HouseholdAgent(pos, self, agent_type)

                self.grid.place_agent(agent, agent.unique_id)

                #self.grid.place_agent(decorator_agent, pos)

                self.households.append(agent)
                self.schedule.add(agent)

        self.set_positions_to_school()
        self.set_positions_to_neighbourhood()
        self.calculate_all_distances()
        self.calculate_all_distances_to_neighbourhoods()

        for agent in self.households:

            random_school_index = random.randint(0, len(self.schools) - 1)
            #print("school_index", random_school_index, agent.Dj, len(agent.Dj))

            candidate_school = self.schools[random_school_index]
            agent.allocate(candidate_school, agent.Dj[random_school_index])

            #closer_school = self.schools[p.argmin(Dj)]
            #closer_school.students.append(agent)
        # agent.allocate(closer_school, np.min(Dj))
        #print(agent.school.unique_id)

        self.pi_jm = np.zeros(shape=(len(self.school_locations),
                                     len(self.household_types)))
        self.local_compositions = np.zeros(shape=(len(self.school_locations),
                                                  len(self.household_types)))
        self.avg_school_size = round(density * width * height /
                                     (len(self.schools)))

        if self.extended_data:
            self.datacollector = DataCollector(
                model_reporters={
                    "agent_count": lambda m: m.schedule.get_agent_count(),
                    "seg_index": "seg_index",
                    "residential_segregation": "residential_segregation",
                    "res_seg_index": "res_seg_index",
                    "fixed_res_seg_index": "fixed_res_seg_index",
                    "happy": "happy",
                    "percent_happy": "percent_happy",
                    "total_moves": "total_moves",
                    "compositions0": "compositions0",
                    "compositions1": "compositions1",
                    "comp0": "comp0",
                    "comp1": "comp1",
                    "comp2": "comp2",
                    "comp3": "comp3",
                    "comp4": "comp4",
                    "comp5": "comp5",
                    "comp6": "comp6",
                    "comp7": "comp7",
                    "compositions": "compositions",
                    "collective_utility": "collective_utility"
                },
                agent_reporters={
                    "local_composition": "local_composition",
                    "type": lambda a: a.type,
                    "id": lambda a: a.unique_id,
                    #"fixed_local_composition": "fixed_local_composition",
                    #"variable_local_composition": "variable_local_composition",
                    "school_utilities": "school_utilities",
                    "residential_utilities": "residential_utilities",
                    "pos": "pos"
                })

        else:
            self.datacollector = DataCollector(
                model_reporters={
                    "agent_count": lambda m: m.schedule.get_agent_count(),
                    "seg_index": "seg_index",
                    "residential_segregation": "residential_segregation",
                    "res_seg_index": "res_seg_index",
                    "fixed_res_seg_index": "fixed_res_seg_index",
                    "happy": "happy",
                    "percent_happy": "percent_happy",
                    "total_moves": "total_moves",
                    "compositions0": "compositions0",
                    "compositions1": "compositions1",
                    "comp0": "comp0",
                    "comp1": "comp1",
                    "comp2": "comp2",
                    "comp3": "comp3",
                    "comp4": "comp4",
                    "comp5": "comp5",
                    "comp6": "comp6",
                    "comp7": "comp7",
                    "compositions": "compositions",
                    "collective_utility": "collective_utility"
                },
                agent_reporters={
                    "local_composition": "local_composition",
                    "type": lambda a: a.type,
                    "id": lambda a: a.unique_id,
                    # "fixed_local_composition": "fixed_local_composition",
                    # "variable_local_composition": "variable_local_composition",
                    "pos": "pos"
                })

        # Calculate local composition
        # set size
        for school in self.schools:
            #school.get_local_school_composition()
            #cap = round(np.random.normal(loc=cap_max * self.avg_school_size, scale=self.avg_school_size * 0.05))
            cap = self.avg_school_size * self.cap_max
            school.capacity = cap
            print("cap", self.avg_school_size, cap)
            segregation_index(self)
        #

        print(
            "height = %d; width = %d; density = %.2f; num_schools = %d; minority_pc =  %.2f; "
            "f0 =  %.2f; f1 =  %.2f; M0 =  %.2f; M1 =  %.2f;\
        alpha =  %.2f; temp =  %.2f; cap_max =  %.2f; move = %s; symmetric_positions = %s"
            % (height, width, density, self.num_schools, minority_pc, f0, f1,
               M0, M1, alpha, temp, cap_max, move, symmetric_positions))

        self.total_considered = 0
        self.running = True
        self.datacollector.collect(self)

    def calculate_all_distances(self):
        """

        calculate distance between school and household
        Euclidean or gis shortest road route
        :return: dist

        """

        Dij = distance.cdist(np.array(self.household_locations),
                             np.array(self.school_locations), 'euclidean')

        for household_index, household in enumerate(self.households):
            Dj = Dij[household_index, :]
            household.Dj = Dj

            # Calculate distances of the schools - define the school-neighbourhood and compare
            # closer_school = household.schools[np.argmin(household.)]
            closer_school_index = np.argmin(household.Dj)
            household.closer_school = self.schools[closer_school_index]
            household.closer_school.neighbourhood_students.append(household)

        return (Dij)

    def calculate_all_distances_to_neighbourhoods(self):
        """

        calculate distance between school and household
        Euclidean or gis shortest road route
        :return: dist

        """
        for household_index, household in enumerate(self.households):

            # Calculate distances of the schools - define the school-neighbourhood and compare
            # closer_school = household.schools[np.argmin(household.)]
            household.closer_neighbourhood = self.get_closer_neighbourhood_from_position(
                household.pos)
            household.closer_neighbourhood.neighbourhood_students_indexes.append(
                household_index)

        # just sanity check
        # for i, neighbourhood in enumerate(self.neighbourhoods):
        #     students = neighbourhood.neighbourhood_students_indexes
        #     print("students,",i, len(students))

    def set_positions_to_school(self):
        '''
        calculate closer school from every position on the grid
        Euclidean or gis shortest road route
        :return: dist
        '''
        distance_dict = {}
        # Add the agent to a random grid cell

        all_grid_locations = []

        for x in range(self.grid.width):
            for y in range(self.grid.height):
                all_grid_locations.append((x, y))

        Dij = distance.cdist(np.array(all_grid_locations),
                             np.array(self.school_locations), 'euclidean')

        for i, pos in enumerate(all_grid_locations):
            Dj = Dij[i, :]
            (x, y) = pos
            # Calculate distances of the schools - define the school-neighbourhood and compare
            # closer_school = household.schools[np.argmin(household.)]
            closer_school_index = np.argmin(Dj)
            self.closer_school_from_position[x][y] = closer_school_index

        #print("closer_school_by_position",self.closer_school_from_position)

    def set_positions_to_neighbourhood(self):
        '''
        calculate closer neighbourhood centre from every position on the grid
        Euclidean or gis shortest road route
        :return: dist
        '''
        distance_dict = {}
        # Add the agent to a random grid cell

        all_grid_locations = []

        for x in range(self.grid.width):
            for y in range(self.grid.height):
                all_grid_locations.append((x, y))

        Dij = distance.cdist(np.array(all_grid_locations),
                             np.array(self.neighbourhood_locations),
                             'euclidean')

        for i, pos in enumerate(all_grid_locations):
            Dj = Dij[i, :]
            (x, y) = pos
            # Calculate distances of the schools - define the school-neighbourhood and compare
            # closer_school = household.schools[np.argmin(household.)]
            closer_neighbourhood_index = np.argmin(Dj)
            self.closer_neighbourhood_from_position[x][
                y] = closer_neighbourhood_index

        #print("closer_school_by_position", self.closer_school_from_position)

    def get_closer_school_from_position(self, pos):
        """
        :param pos: (x,y) position
        :return school: school object closest to this position
        """
        (x, y) = pos
        school_index = self.closer_school_from_position[x][y]
        school = self.get_school_from_index(school_index)

        return (school)

    def get_closer_neighbourhood_from_position(self, pos):
        """
        :param pos: (x,y) position
        :return school: school object closest to this position
        """
        (x, y) = pos
        neighbourhood_index = self.closer_neighbourhood_from_position[x][y]
        neighbourhood = self.get_neighbourhood_from_index(neighbourhood_index)

        return (neighbourhood)

    def get_school_from_index(self, school_index):
        """
        :param self: obtain the school object using the index
        :param school_index:
        :return: school object
        """

        return (self.schools[int(school_index)])

    def get_neighbourhood_from_index(self, neighbourhood_index):
        """
        :param self: obtain the school object using the index
        :param school_index:
        :return: school object
        """

        return (self.neighbourhoods[int(neighbourhood_index)])

    def get_households_from_index(self, household_indexes):
        """
        Retrieve household objects from their indexes
        :param household_indexes: list of indexes to retrieve household objects
        :return: households: household objects
        """
        households = []
        for household_index in household_indexes:
            households.append(self.households[household_index])
        return (households)

    def step(self):
        '''
        Run one step of the model. If All agents are happy, halt the model.
        '''
        self.happy = 0  # Reset counter of happy agents
        self.res_happy = 0
        self.total_moves = 0
        self.total_considered = 0
        self.res_moves = 0
        self.satisfaction = []
        self.res_satisfaction = []

        self.schedule.step()

        satisfaction = 0
        res_satisfaction = 0
        print("happy", self.happy)
        print("total_considered", self.total_considered)

        # Once residential steps are done calculate school distances

        if self.schedule.steps <= self.residential_steps or self.schedule.steps == 1:
            # during the residential steps keep recalculating the school neighbourhood compositions
            # this is required for the neighbourhoods metric

            #print("recalculating neighbourhoods")
            # TODO: check this, not sure if this and the recalculation below is needed
            for school in self.schools:
                school.neighbourhood_students = []
            for neighbourhood in self.neighbourhoods:
                neighbourhood.neighbourhood_students_indexes = []

            # update the household locations after a move
            self.household_locations = []
            for i, household in enumerate(self.households):
                self.household_locations.append(household.pos)

            self.calculate_all_distances()
            self.calculate_all_distances_to_neighbourhoods()
            #print("all", self.calculate_all_distances()[i, :])

            # for i, household in enumerate(self.households):
            #     print(household.calculate_distances())
            #     # Calculate distances of the schools - define the school-neighbourhood and compare
            #     # closer_school = household.schools[np.argmin(household.)]
            #     closer_school_index = np.argmin(household.Dj)
            #     household.closer_school = self.schools[closer_school_index]
            #     household.closer_school.neighbourhood_students.append(household)
            #
            #     # Initialize house allocation to school
            #     #household.move_school(closer_school_index, self.schools[closer_school_index])
            #

            self.residential_segregation = segregation_index(
                self, unit="neighbourhood")
            self.res_seg_index = segregation_index(self,
                                                   unit="agents_neighbourhood")
            self.fixed_res_seg_index = segregation_index(
                self, unit="fixed_agents_neighbourhood", radius=1)
            res_satisfaction = np.mean(self.res_satisfaction)

        satisfaction = 0
        # calculate these after residential_model
        if self.schedule.steps > self.residential_steps:
            self.collective_utility = calculate_collective_utility(self)
            print(self.collective_utility)
            self.seg_index = segregation_index(self)
            satisfaction = np.mean(self.satisfaction)



        print("seg_index", "%.2f"%(self.seg_index), "var_res_seg", "%.2f"%(self.res_seg_index), "neighbourhood",
              "%.2f"%(self.residential_segregation), "fixed_res_seg_index","%.2f"%(self.fixed_res_seg_index), \
              "res_satisfaction %.2f" %res_satisfaction,"satisfaction %.2f" %satisfaction,\
              "average_like_fixed %.2f"%self.average_like_fixed,"average_like_var %.2f"%self.average_like_variable  )

        if self.happy == self.schedule.get_agent_count():
            self.running = False

        compositions = []

        # remove this?
        for school in self.schools:
            self.my_collector.append([
                self.schedule.steps, school.unique_id,
                school.get_local_school_composition()
            ])
            self.compositions = school.get_local_school_composition()
            compositions.append(school.get_local_school_composition()[0])
            compositions.append(school.get_local_school_composition()[1])

            self.compositions1 = int(school.get_local_school_composition()[1])
            self.compositions0 = int(school.get_local_school_composition()[0])
            #print("school_students",school.neighbourhood_students)

        #print("comps",compositions,np.sum(compositions) )
        [
            self.comp0, self.comp1, self.comp2, self.comp3, self.comp4,
            self.comp5, self.comp6, self.comp7
        ] = compositions[0:8]
        # collect data
        #
        self.datacollector.collect(self)
        print("moves", self.total_moves, "res_moves", self.res_moves,
              "percent_happy", self.percent_happy)

        for i, household in enumerate(self.households):
            household.school_utilities = []
            household.residential_utilities = []
Пример #25
0
class DiseaseModel(Model):
    """
    A model with some number of agents.
    highS: Number of agents with high sociability.
    middleS: Number of agents with middle sociability.
    lowS: Number of agents with low sociability.
    width: Width of the grid.
    height: Height of the grid.
    edu_setting: If true, agents will follow a schedule and sit in classrooms,
    else they will move freely through an open grid.
    cureProb: Probability of agent getting better.
    cureProbFac: Factor of cureProb getting higher.
    mutateProb: Probability of a disease mutating.
    diseaseRate: Rate at which the disease spreads.
    """
    def __init__(self, highS, middleS, lowS, width, height, edu_setting=True,
                 cureProb=0.1, cureProbFac=2/1440, mutateProb=0.0050,
                 diseaseRate=0.38):
        super().__init__()
        self.num_agents = highS + middleS + lowS
        self.lowS = lowS
        self.middleS = middleS
        self.highS = highS
        self.initialCureProb = cureProb
        self.cureProbFac = cureProbFac
        self.mutateProb = mutateProb
        self.diseaseRate = diseaseRate
        self.edu_setting = edu_setting
        self.maxDisease = 0  # amount of mutations
        self.counter = 540  # keeps track of timesteps
        self.removed = []
        self.exit = (width - 1, floor(height / 2))
        # Check if agents fit within grid
        if self.num_agents > width * height:
            raise ValueError("Number of agents exceeds grid capacity.")

        # Create grid with random activation
        self.grid = SingleGrid(width, height, True)
        self.schedule = RandomActivation(self)

        if edu_setting:
            # Create walls
            numberRooms = 3
            self.add_walls(numberRooms, width, height)

            self.midWidthRoom = floor(width / numberRooms / 2)
            self.midHeightRoom = floor(height / numberRooms / 2)
            self.widthRoom = floor(width / numberRooms)
            self.heightRoom = floor(height / numberRooms)
            numberRows = floor((self.heightRoom) / 2)
            widthRows = self.widthRoom - 4
            location = [[] for _ in range(numberRooms * 2)]
            for i in range(numberRooms):
                for j in range(0, numberRows, 2):
                    startWidth = 2 + (i % 3) * self.widthRoom
                    for currentWidth in range(widthRows):
                        location[i] += [(startWidth + currentWidth, j)]
            for i in range(3, numberRooms * 2):
                for j in range(0, numberRows, 2):
                    startWidth = 2 + (i % 3) * self.widthRoom
                    for currentWidth in range(widthRows):
                        location[i] += [(startWidth + currentWidth,
                                         height - 1 - j)]

            # Set 3 goals per roster
            self.roster = [[location[0], location[3], location[1]],
                           [location[5], location[2], location[0]],
                           [location[4], location[1], location[5]]]

        # Create agents
        self.addAgents(lowS, 0, 0)
        self.addAgents(middleS, lowS, 1)
        self.addAgents(highS, lowS + highS, 2)

        # set up data collecter
        self.datacollector = DataCollector(
            model_reporters={"diseasepercentage": disease_collector},
            agent_reporters={"disease": "disease"})

    def heuristic(self, start, goal):
        """
        Returns manhattan distance.
        start: current location (x,y)
        goal: goal location (x,y)
        """
        dx = abs(start[0] - goal[0])
        dy = abs(start[1] - goal[1])
        return dx + dy

    def get_vertex_neighbors(self, pos):
        """
        Returns all neighbors.
        pos: current position
        """
        n = self.grid.get_neighborhood(pos, moore=False)
        neighbors = []
        for item in n:
            if not abs(item[0] - pos[0]) > 1 and not abs(item[1] - pos[1]) > 1:
                neighbors += [item]
        return neighbors

    def move_cost(self, location):
        """
        Return the cost of a location.
        """
        if self.grid.is_cell_empty(location):
            return 1  # Normal movement cost
        else:
            return 100  # Very difficult to go through walls

    def add_walls(self, n, widthGrid, heightGrid):
        """
        Add walls in grid.
        n: number of rooms horizontally
        widthGrid: width of the grid
        heightGrid: height of the grid
        """
        widthRooms = floor(widthGrid / n)
        heightRooms = floor(heightGrid / n)
        heightHall = heightGrid - 2 * heightRooms
        # Add horizontal walls
        for i in range(n - 1):
            for y in range(heightRooms):
                brick = wall(self.num_agents, self)
                self.grid.place_agent(brick, ((i + 1) * widthRooms, y))
                self.grid.place_agent(brick, ((i + 1) * widthRooms, y +
                                      heightRooms + heightHall))
        doorWidth = 2
        # Add vertical walls
        for x in range(widthGrid):
            if (x % widthRooms) < (widthRooms - doorWidth):
                brick = wall(self.num_agents, self)
                self.grid.place_agent(brick, (x, heightRooms))
                self.grid.place_agent(brick, (x, heightRooms + heightHall - 1))

    def addAgents(self, n, startID, sociability):
        """
        Add agents with a sociability.
        n: number of agents
        startID: ID of the first added agent
        sociability: sociability of the agents
        """
        disease_list = np.random.randint(0, 2, n)
        for i in range(n):
            # Set schedule for every agent if educational setting
            if self.edu_setting:
                a_roster = []
                rosterNumber = self.random.randrange(len(self.roster))
                rooms = self.roster[rosterNumber]
                for roomNumber in range(len(rooms)):
                    loc = self.random.choice(rooms[roomNumber])
                    a_roster += [loc]
                    (self.roster[rosterNumber][roomNumber]).remove(loc)
            else:
                a_roster = []

            a = DiseaseAgent(i + startID, sociability, self, disease_list[i],
                             a_roster)
            self.schedule.add(a)
            # Set agent outside grid, ready to enter, if edu setting
            # else randomly place on empty spot on grid
            if self.edu_setting:
                self.removed += [a]
                a.pos = None
            else:
                self.grid.place_agent(a, self.grid.find_empty())

    def step(self):
        """
        Continue one step in simulation.
        """
        self.counter += 1
        self.datacollector.collect(self)
        self.schedule.step()
Пример #26
0
class MoneyModel(Model):
    """A model with some number of agents."""
    def __init__(self,
                 N,
                 width,
                 height,
                 init_price,
                 init_ei,
                 grow_ei,
                 fixed,
                 rnd,
                 p_q,
                 p_ei,
                 min_neighbor,
                 sub,
                 subsell,
                 burn,
                 seed=None):
        self.num_agents = (width * height)
        self.grid = SingleGrid(width, height, True)
        self.schedule = RandomActivation(self)
        self.running = True
        self.ext_inc = init_ei
        self.grow_ei = grow_ei
        self.min_neighbor = min_neighbor
        self.p_q = p_q
        self.p_ei = p_ei
        self.last_price = 0
        self.price = self.init_price = init_price
        self.tick = 0
        self.true_supply = 0
        self.stock = 0
        self.subtrue = False
        self.sub = sub
        self.subsell = subsell
        self.burn = burn
        self.burned = 0
        self.tax = 0

        print(f'{p_q}, {p_ei}, {min_neighbor}')

        # Create agents
        for i in range(self.num_agents):
            a = MoneyAgent(i, self, N, fixed, rnd)
            self.schedule.add(a)
            self.grid.place_agent(a, (0, 0))
            if i < self.num_agents - 1:
                self.grid.move_to_empty(a)
        self.supply = [k.color for k in self.schedule.agents].count('yellow')

        self.datacollector = DataCollector(
            model_reporters={
                "Supply": compute_supply,
                "Price": compute_price,
                "Stock": compute_stock,
                "Burned": compute_burned
            }
            #agent_reporters={"Wealth": "wealth"}
        )

        #print(f'start time: {time.time() - start_time}')

    def market(self):
        self.tick += 1
        print(self.tick)
        self.last_price = self.price
        self.ext_inc = self.ext_inc * (1 + self.grow_ei)
        self.supply = [k.color for k in self.schedule.agents].count('yellow')
        self.price = max(0, (self.init_price + (self.p_ei * self.ext_inc) -
                             (self.p_q * self.supply)))

        if self.sub == True:
            if self.price < 0.9 * self.last_price:
                self.true_supply = (50 *
                                    ((self.init_price) + (0.2 * self.ext_inc) -
                                     (0.9 * self.last_price)))
                self.stock = (self.stock + (self.supply - self.true_supply))
                self.price = (0.9 * self.last_price)
                self.subtrue = True
            if self.tick < 10:
                self.stock = 0

        if self.subsell == True:
            if self.stock > 0:
                if self.price > self.last_price:
                    if self.subtrue == True:
                        self.supprice = self.init_price + (
                            0.2 * self.ext_inc) - (0.01 *
                                                   (self.supply + self.stock))
                        if self.supprice >= self.last_price:
                            self.price = self.supprice
                            self.stock = 0
                        else:
                            self.price = self.last_price
                            self.true_supply = (50 * ((self.init_price) +
                                                      (0.2 * self.ext_inc) -
                                                      (0.9 * self.last_price)))
                            self.stock = self.stock - (self.true_supply -
                                                       self.supply)

        if self.burn == True:
            if self.stock >= 0:
                self.tax = 0.5 * (self.stock / (self.supply + 0.1))
                self.burned = (self.burned + self.stock)
                self.stock = 0

    def step(self):
        start_time = time.time()
        self.schedule.step()
        print(f'schedule time: {time.time() - start_time}')
        start_time = time.time()
        self.market()
        print(f'market time: {time.time() - start_time}')
        start_time = time.time()
        self.datacollector.collect(self)
        print(f'datacollector time: {time.time() - start_time}')
Пример #27
0
class CHModel(Model):
    """A model with some number of agents."""
    def __init__(self, width, height, random_n = 0, cow_n = 0, plan_n = 0, mc_n = 0, td_n = 0, episode_number = 0, t_mc_n = 0, old_Q_values = None):
        self.running = True 
        #self.num_agents = N
        self.grid = SingleGrid(width, height, True)
        self.schedule = RandomActivation(self)
        
        
        self.id_count = 0 #to assign each agent a unique ID
        #self.max_timesteps = 500 #max timesteps for each episode
        
        # To keep score
        self.total_cow_count = 0.0
        self.current_cow_count = 0.0
        self.score = 0.0
        self.previous_cow_count = 0.0
        
        # Save model for agent use
        self.wallLocations = [(1,5), (1,6), (1,7), (2,7), (3,7), (4,7), (5,7), (6,7), (6,6), (6,5)]
        self.goalState = [(2,5), (3,5), (4,5), (5,5), (2,6), (3,6), (4,6), (5,6)]
        self.goalTarget = (3,5) #corral "entrance" that plan agents herd towards
        self.state = None # encode state at each timestep
        
        self.number_random_agents = random_n
        self.number_cow_agents = cow_n
        self.number_plan_agents = plan_n
        self.number_monte_carlo_agents = mc_n
        self.number_td_agents = td_n
        self.number_trained_mc_agents = t_mc_n
        
        
        # load pre-trained data to add to or make new Q tables for MC Agents
        # set to false to make a new Q table
        #loadpretrained = True
        #if (loadpretrained and (not old_Q_values)):
        #    print("loading pkl file")
        #    with open('mc_q_save.pkl', 'rb') as file:
        #        self.Q_values = dill.load(file)
        
        
        # Monte Carlo Agent model save
        self.Q_table_sharing = True ## If true, agents share a Q table
        self.vision_range = 2 # How far the MC agents can see
        
        if old_Q_values: #load previous Q tables if they exist
            self.Q_values = old_Q_values
        else:
            self.Q_values = [] #no previous Q tables, so make new ones
            if (self.Q_table_sharing):
                # Just one Q table  
                self.Q_values.append(defaultdict(lambda: np.zeros(len(rl_methods.action_space))))
            else:
                #every agent gets it's own Q table
                for agent in range(self.number_monte_carlo_agents):
                    self.Q_values.append(defaultdict(lambda: np.zeros(len(rl_methods.action_space))))
        self.mc_agents = []
        
        self.episode = episode_number
        #calculate episilon based on episode
        #epsilon = 1 / i_episode
        ####### tweak episilon to get better results #######
        self.epsilon = 1.0/((episode_number/800) + 1)
        #self.epsilon = 1.0/((episode_number/8000)+1)

        
        # Place wall agents
        for i in range(len(self.wallLocations)):
            a = WallAgent(self.id_count, self)
            self.id_count += 1
            self.schedule.add(a)
            #print("placing ", a, " at ", self.corralLocations[i])
            self.grid.place_agent(a, self.wallLocations[i])
            
        # Place random agents
        for i in range(self.number_random_agents):
            a = RandomAgent(self.id_count, self)
            self.id_count += 1
            self.schedule.add(a)
            cell_location = self.grid.find_empty()
            self.grid.place_agent(a, cell_location)
            
        # Place cow agents
        for i in range(self.number_cow_agents):
            c = CowAgent(self.id_count, self)
            self.id_count += 1
            self.schedule.add(c)
            #self.cow_agent_list.append(c) #make a list of cows
            cell_location = self.grid.find_empty()
            self.grid.place_agent(c, cell_location)

        # Place plan agents
        for i in range(self.number_plan_agents):
            p = PlanAgent(self.id_count, self)
            self.id_count += 1
            self.schedule.add(p)
            cell_location = self.grid.find_empty()
            self.grid.place_agent(p, cell_location)
            
        # Place monte carlo agents
        for i in range(self.number_monte_carlo_agents):
            Q_table_to_use = None
            if (self.Q_table_sharing): # If sharing Q tables, everyone gets a copy of the same Q table 
                Q_table_to_use = self.Q_values[0]
            else:
                Q_table_to_use = self.Q_values[i] # If not sharing, everyone gets a different Q table
            m = MonteCarloAgent(self.id_count, self, Q_table_to_use, self.epsilon, vision = self.vision_range) # init MC agents with previous Q tables
            self.mc_agents.append(m) # save MC agents to retrieve Q values
            self.id_count += 1
            self.schedule.add(m)
            cell_location = self.grid.find_empty()
            self.grid.place_agent(m, cell_location)
        
        # Place trained monte carlo agents
        # open/load trained Q table
        if (self.number_trained_mc_agents > 0):
            loaded_Q = None
            with open('mc_q_save.pkl', 'rb') as file:
                loaded_Q = dill.load(file)
            if loaded_Q:
                for i in range(self.number_trained_mc_agents):
                    tm = TrainedMonteCarloAgent(self.id_count, self, loaded_Q, vision = self.vision_range)
                    self.id_count += 1
                    self.schedule.add(tm)
                    cell_location = self.grid.find_empty()
                    self.grid.place_agent(tm, cell_location)
            else:
                print("Can't load Q table for trained MC Agents")
            
        # Place TD agents
        for i in range(self.number_td_agents):
            t = TDAgent(self.id_count, self)
            self.id_count += 1
            self.schedule.add(t)
            cell_location = self.grid.find_empty()
            self.grid.place_agent(t, cell_location)
            

            
    def step(self):
        self.state = rl_methods.encode_state(self.grid)
        self.schedule.step()
        self.update_score()
        
        #print(np.matrix(self.state))
        #print("the current score is ", self.score)
        
        # Update rewards of Monte Carlo agents
        
        rewards_type = 3
        # if rewards_type is 
        ###1 use the actual current score
        ###2 use number of cows in goal
        ###3 cows in goal with penalty if cow leaves goal
        
        # how penalized do you want the agents to be for letting cow escape?
        penalty_modifier = 0.0
        # how much of a bonus for getting cows to go in the goal?
        bonus_modifier = 100.0
        # bonus for keeping cows in goal
        bonus_cows = 5.0
        
        for mcagent in self.mc_agents:
            if (rewards_type == 1):
                mcagent.update_rewards(self.score)
            elif (rewards_type == 2):
                mcagent.update_rewards(self.current_cow_count)
            elif (rewards_type == 3):
                penalty = 0.0
                bonus = 0.0
                no_cow_penalty = -1.0
                if (self.current_cow_count < self.previous_cow_count):
                    print("calculating penalty ESCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPE")
                    cows_escaped = (float(self.previous_cow_count) - float(self.current_cow_count))
                    #print("this many escaped: ", cows_escaped, ", modifier: ", penalty_modifier)
                    penalty = penalty_modifier * cows_escaped
                    #print("prev cows ", self.previous_cow_count,  ", cows ", self.current_cow_count,  ", penalty ", penalty)
                if (self.current_cow_count > self.previous_cow_count):
                    print("calculating penalty COWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW")
                    cows_gained = (float(self.current_cow_count) - float(self.previous_cow_count))
                    #print("this many escaped: ", cows_escaped, ", modifier: ", penalty_modifier)
                    bonus = bonus_modifier * cows_gained
                if (self.current_cow_count < self.number_cow_agents):
                    penalty = penalty - (no_cow_penalty * (float(self.number_cow_agents) - float(self.current_cow_count)))
                mcagent.update_rewards((self.current_cow_count * bonus_cows) - penalty + bonus)
                print("current cow count: ", self.current_cow_count, ", penalty: ", penalty, ", bonus: ", bonus, ", no cow ")
                print("total reward: ", (self.current_cow_count * bonus_cows) - penalty + bonus)
            else:
                printing("using default reward")
                mcagent.update_rewards(self.score)

    def update_score(self):
        self.previous_cow_count = self.current_cow_count
        self.current_cow_count = cow_methods.cows_in_goal(self, self.goalState)
        self.total_cow_count += self.current_cow_count
        print(self.total_cow_count, self.current_cow_count, self.schedule.time, " Episode: ", self.episode)
        self.score = self.total_cow_count / self.schedule.time
        
    def get_new_Q_values(self):
        """ Update model Q values at the end of the episode, called by run after each episode """
        new_Q = []
        
        if(self.Q_table_sharing): #If all agents are sharing Q table data
            updated_Q = None
            for agent in self.mc_agents:
                # Update the Q table then pass it on to the next agent on the team to update
                updated_Q = agent.Q_table_update(shared_Q_table = updated_Q) 
            new_Q.append(copy.deepcopy(updated_Q))
        else:
            # If all agents have their own Q tables, update and save for next episode
            for agent in self.mc_agents:
                updated_Q = agent.Q_table_update()
                new_Q.append(copy.deepcopy(updated_Q))
        return new_Q
class ReactionDiffusionModel(Model):
    """A model with some number of agents."""

    #Initialize a model that includes a grid of side-length N and one agent for each grid
    def __init__(self, N):
        #Number of agents
        self.num_agents = N * N

        #The two grids can have just one agent per cell, it is dimensions NxN, and it is toroidal
        self.oldActivatorGrid = SingleGrid(N, N, True)
        self.oldInhibitorGrid = SingleGrid(N, N, True)
        self.currentActivatorGrid = SingleGrid(N, N, True)
        self.currentInhibitorGrid = SingleGrid(N, N, True)

        #Determine how our model will pick agent to interact with
        self.schedule = RandomActivation(self)

        # Create agents
        for i in range(self.num_agents):
            #Initialize a cell with uniqueID = i
            a = Cell(i, self)

            #Add our agent to our scheduler
            self.schedule.add(a)

            #Choose a random, unoccupied cell in our grid and add our agent to it
            #position_agent stores the x and y value for each of our agents
            locationTuple = self.oldActivatorGrid.find_empty()
            if (locationTuple) == (N / 2, N / 2):
                a.act = 2 * a.act
            self.oldActivatorGrid.place_agent(a, locationTuple)
            self.oldInhibitorGrid.place_agent(a, locationTuple)
            self.currentActivatorGrid.place_agent(a, locationTuple)
            self.currentInhibitorGrid.place_agent(a, locationTuple)

    #Method to get activator values in our current activator grid
    def getActivatorGrid(self):
        activator_Grid = np.zeros((self.currentActivatorGrid.width,
                                   self.currentActivatorGrid.height))
        for cell in model.currentActivatorGrid.coord_iter():
            cell_content, x, y = cell
            activator_Grid[x][y] = cell_content.act
        return activator_Grid

    #Method to get inhibitor values in our current inhibitor grid
    def getInhibitorGrid(self):
        inhibitor_Grid = np.zeros((self.currentInhibitorGrid.width,
                                   self.currentInhibitorGrid.height))
        for cell in model.currentInhibitorGrid.coord_iter():
            cell_content, x, y = cell
            inhibitor_Grid[x][y] = cell_content.inh
        return inhibitor_Grid

    def step(self):
        #Determine what the original activator and inhibitor distributions are
        oldActivatorGrid = self.currentActivatorGrid
        oldInhibitorGrid = self.currentInhibitorGrid

        #Perform a step of the model, where we calculate all of the new concentrations
        self.schedule.step()

        #Determine the new activator and inhibitor distributions
        currentActivatorGrid = self.getActivatorGrid()
        currentInhibitorGrid = self.getInhibitorGrid()
Пример #29
0
class AgentKnowledgeMap():
    '''
    *** Constructor:
        Inputs:
               - height and width of the grid used by the AgSimulator

       Actions:
               - Construct navigationGrid
               - Construct planGrid
               - Create agent dictionaries
    '''
    def __init__(self, height, width, model):
        self.navigationGrid = SingleGrid(height, width, False)
        self.planGrid = MultiGrid(height, width, False)
        self.planAgents = defaultdict(list)
        self.perceptionAgents = {}
        self.model = model
        agent = FarmAgent(0, self.model.farmPos, self)
        self.navigationGrid.place_agent(agent, self.model.farmPos)
        self.attendancePoints = list()

    '''
    *** update function is used by each ActiveAgent to update ActiveAgentKnowledgeMap
        Input:
              - ActiveAgentPlanning objects are placed on planGrid
              - PassiveAgentPerception objects are placed on navigationGrid
    '''

    def update(self, agent):
        if (isinstance(agent, ActiveAgentPlanning)):
            self.planGrid.place_agent(agent, agent.pos)
            self.planAgents.setdefault(agent.unique_id, [])
            self.planAgents[agent.unique_id].append(agent)
        elif (isinstance(agent, PassiveAgentPerception)):
            if self.navigationGrid.is_cell_empty(agent.pos):
                self.navigationGrid.place_agent(agent, agent.pos)
                self.perceptionAgents[agent.unique_id] = agent
            else:
                existing_agent = self.navigationGrid.get_cell_list_contents(
                    agent.pos)[0]
                existing_agent.update(agent.state, agent.time_at_current_state)

    # This function is used for removing a step from the KnowledgeMap
    def removeOneStep(self, agentID):
        if self.planAgents[agentID]:
            self.planGrid.remove_agent(self.planAgents[agentID].pop(0))

    # This function is used for canceling the entire plan in case a collision is detected
    def cancelPlan(self, agentID):
        while len(self.planAgents[agentID]) > 0:
            self.planGrid.remove_agent(self.planAgents[agentID].pop(0))

    '''
    *** getGridStateAtStep returns a SingleGrid object with anticipated state of the grid at specified steps
        Input:
              - step for which the SingleGrid should be generated
        Output:
              - SingleGrid object with PassiveAgentPerception objects and ActiveAgentPlanning objects corresponding to chosen step
    '''

    def getGridStateAtStep(self, step=0):
        plan_agent_keys = [uid for uid, a in self.planAgents.items()]
        perception_agent_keys = [
            uid for uid, a in self.perceptionAgents.items()
        ]
        navGridAtStep = SingleGrid(self.navigationGrid.height,
                                   self.navigationGrid.width, False)
        for key in perception_agent_keys:
            navGridAtStep.place_agent(self.perceptionAgents[key],
                                      self.perceptionAgents[key].pos)
        for key in plan_agent_keys:
            for agent in self.planAgents[key]:
                if agent.steps_left == step and navGridAtStep.is_cell_empty(
                        agent.pos):
                    navGridAtStep.place_agent(agent, agent.pos)
        return navGridAtStep

    # This function is used to get a numpy array containing 0 and 1;
    # 0 for empty blocks at step X
    # 1 for any kind of agent at step X
    def getGridAtStepAsNumpyArray(self, step=0):
        plan_agent_keys = [uid for uid, a in self.planAgents.items()]
        perception_agent_keys = [
            uid for uid, a in self.perceptionAgents.items()
        ]
        return_numpy_array = numpy.zeros(
            (self.navigationGrid.width, self.navigationGrid.height),
            dtype='int8')
        for key in perception_agent_keys:
            return_numpy_array[self.perceptionAgents[key].pos[1],
                               self.perceptionAgents[key].pos[0]] = 1
        for agent_key in self.planAgents:
            agent_plans = self.planAgents[agent_key]
            if len(agent_plans) > 0 and len(agent_plans) >= step:
                for plan in agent_plans:
                    if plan.steps_left == step:
                        return_numpy_array[plan.pos[1], plan.pos[0]] = 1
            elif len(agent_plans) == 0:
                active_agent = self.model.schedule.getPassiveAgent(agent_key)
                return_numpy_array[active_agent.pos[1],
                                   active_agent.pos[0]] = 1
            else:
                return_numpy_array[agent_plans[-1].pos[1],
                                   agent_plans[-1].pos[0]] = 1
        return_numpy_array[self.model.farmPos[1], self.model.farmPos[0]] = 1
        return return_numpy_array
Пример #30
0
class DiseaseModel(Model):
	"""
	A model with some number of agents.
	highS: Number of agents with high sociability.
	middleS: Number of agents with middle sociability.
	lowS: Number of agents with low sociability.
	width: Width of the grid.
	height: Height of the grid.
	edu_setting: Classrooms and set schedule if true, else random free movement.
	cureProb: Probability of agent getting better.
	cureProbFac: Factor of cureProb getting higher.
	mutateProb: Probability of a disease mutating.
	diseaseRate: Rate at which the disease spreads.
	"""
	def __init__(self, highS, middleS, lowS, width, height, edu_setting=True, cureProb=0.1, cureProbFac=2/1440, mutateProb=0.0050, diseaseRate=0.38):
		super().__init__()
		self.num_agents = highS + middleS + lowS
		self.lowS = lowS
		self.middleS = middleS
		self.highS = highS
		self.initialCureProb = cureProb
		self.cureProbFac = cureProbFac
		self.mutateProb = mutateProb
		self.diseaseRate = diseaseRate
		self.edu_setting = edu_setting
		self.maxDisease = 0# amount of mutations
		self.counter = 540 # keeps track of timesteps
		self.removed = []
		self.exit = (width-1,floor(height/2))
		# Check if agents fit within grid
		if self.num_agents > width * height:
			raise ValueError("Number of agents exceeds grid capacity.")

		# Create grid with random activation
		self.grid = SingleGrid(width, height, True)
		self.schedule = RandomActivation(self)

		if edu_setting:
			# Create walls
			numberRooms = 3
			self.add_walls(numberRooms, width, height)
			self.midWidthRoom = floor(width / numberRooms / 2)
			self.midHeightRoom = floor(height / numberRooms / 2)

			# Calculate the centers of the 6 rooms
			roomLeftDown = (5 * self.midWidthRoom, self.midHeightRoom)
			roomLeftMid = (3 * self.midWidthRoom, self.midHeightRoom)
			roomLeftUp = (self.midWidthRoom, self.midHeightRoom)
			roomRightDown = (5 * self.midWidthRoom, 5 * self.midHeightRoom, )
			roomRightMid = (3 * self.midWidthRoom, 5 * self.midHeightRoom)
			roomRightUp = (self.midWidthRoom, 5 * self.midHeightRoom)

			# Set 3 goals per roster
			self.roster = [[roomLeftDown, roomLeftUp, roomRightMid], [roomRightMid, roomLeftDown, roomRightDown],
							[roomRightUp, roomRightDown, roomLeftUp]]

		# Create agents
		self.addAgents(lowS, 0, 0)
		self.addAgents(middleS, lowS, 1)
		self.addAgents(highS, lowS + highS, 2)

		self.datacollector = DataCollector(
			model_reporters={"diseasepercentage": disease_collector},
			agent_reporters={"disease": "disease"})

	def heuristic(self, start, goal):
		"""
		Returns manhattan distance.
		start: current location (x,y)
		goal: goal location (x,y)
		"""
		dx = abs(start[0] - goal[0])
		dy = abs(start[1] - goal[1])
		return dx + dy

	def get_vertex_neighbors(self, pos):
		"""
		Returns all neighbors.
		pos: current position
		"""
		n = self.grid.get_neighborhood(pos, moore=False)
		neighbors = []
		for item in n:
			if not abs(item[0]-pos[0]) > 1 and not abs(item[1]-pos[1]) > 1:
				neighbors += [item]
		return neighbors

	def move_cost(self, location):
		"""
		Return the cost of a location.
		"""
		if self.grid.is_cell_empty(location):
			return 1 # Normal movement cost
		else:
			return 100

	def add_walls(self, n, widthGrid, heightGrid):
		"""
		Add walls in grid.
		n: number of rooms horizontally
		widthGrid: width of the grid
		heightGrid: height of the grid
		"""
		widthRooms = floor(widthGrid/n)
		heightRooms = floor(heightGrid/n)
		widthHall = widthGrid - 2 * widthRooms
		heightHall = heightGrid - 2 * heightRooms
		# Add horizontal walls
		for i in range(n - 1):
			for y in range(heightRooms):
				brick = wall(self.num_agents, self)
				self.grid.place_agent(brick, ((i + 1) * widthRooms, y))
				self.grid.place_agent(brick, ((i + 1) * widthRooms, y + heightRooms + heightHall))
		doorWidth = 2
		# Add vertical walls
		for x in range(widthGrid):
			if (x % widthRooms) < (widthRooms - doorWidth):
				brick = wall(self.num_agents, self)
				self.grid.place_agent(brick, (x, heightRooms))
				self.grid.place_agent(brick, (x, heightRooms + heightHall - 1))

	def addAgents(self, n, startID, sociability):
		"""
		Add agents with a sociability.
		n: number of agents
		startID: ID of the first added agent
		sociability: sociability of the agents
		"""
		disease_list = np.random.randint(0,2,n)
		for i in range(n):
			a = DiseaseAgent(i + startID, sociability,self,disease_list[i])
			self.schedule.add(a)
			# Add the agent to a random grid cell
			location = self.grid.find_empty()
			self.grid.place_agent(a, location)
	def step(self):
		"""
		Continue one step in simulation.
		"""
		self.counter += 1
		self.datacollector.collect(self)
		self.schedule.step()
Пример #31
0
class DaisyModel(Model):
    """ "Daisys" grow, when the temperature is right. But they influence temperature themselves via their ability to block a certain amount of sunlight (albedo, indicated by color). They spread and they mutate (changing albedo) and thus adapt to different conditions."""
    def __init__(self, 
                 N, 
                 width, 
                 height, 
                 luminosity, 
                 heat_radius, 
                 mutation_range, 
                 surface_albedo, 
                 daisy_lifespan, 
                 daisy_tmin, 
                 daisy_tmax,
                 lum_model,
                 lum_increase):
        # Setup parameter
        self.dimensions = (width, height)
        self.running = True # never stop!
        self.num_agents = min([N, (width * height)]) # never more agents than cells
        self.grid = SingleGrid(width, height, torus=True)
        self.schedule = RandomActivation(self)
        # Model parameter
        self.mutation_range = mutation_range # default: 0.05
        self.luminosity = luminosity # default 1.35
        self.heat_radius = heat_radius
        self.surface_albedo = surface_albedo # default: 0.4
        self.lum_model = lum_model
        self.lum_increase = lum_increase # tried 0.001
        # Daisy parameter
        self.daisy_lifespan = daisy_lifespan
        self.daisy_tmin = daisy_tmin
        self.daisy_tmax = daisy_tmax

        # to inhibit using same postition twice: draw from urn
        position_list = []
        for i in range(width): # put positions in urn
            for j in range(height):
                position_list.append((i,j))
        for i in range(self.num_agents): # draw from urn
            a = DaisyAgent(i, self, 
                            random.uniform(0.1, 0.9), # random starting albedo
                            self.daisy_lifespan, self.daisy_tmin, self.daisy_tmax)
            self.schedule.add(a)
            pos = random.choice(position_list)
            self.grid.place_agent(a, pos)
            position_list.remove(pos)

        # Data collectors
        self.datacollector = DataCollector(
            model_reporters = {"Solar irradiance": get_irradiance, 
                               "Population": get_population,
                               "Mean albedo": get_mean_albedo,
                               "Population: North - South": get_north_south_population
                               }
        )

    def step(self):
        print(self.lum_model)
        if self.lum_model == 'linear increase':
            self.luminosity = linear_increase(self)


        self.datacollector.collect(self)
        self.schedule.step()
        

    def get_lat(self, pos):
        """ The grid is meant to be a sphere. This gets the latitude. Ranges from 0.0 (equator) to 1.0 (pole).  """
        return (pos[1] / self.dimensions[1])

    def get_GNI(self, pos):
        """ gives solar irradiance, depending on latitude"""
        return self.luminosity * math.sin(self.get_lat(pos)*math.pi)

    def expand_positionlist(self, pos_list):
        """ expands a list of positions, adding neighboring positions  """
        expanded_list = []
        for i in pos_list:
            expanded_list += self.grid.get_neighborhood(i, moore=True, include_center=False)
        return list(set(expanded_list))

    def get_local_heat(self, pos):
        """ Global Horizontal Irradiance (without diffusive irradiance) from pole (lower border) to pole (upper border). model is torus! """
        neighborhood = self.grid.get_neighborhood(pos, moore=True, include_center=True)
        
        if self.heat_radius > 1: # if radius of local temperature is >1, this expand the position list.
            for i in range(self.heat_radius):
                neighborhood = self.expand_positionlist(neighborhood)

        heat = []
        for i in neighborhood:
            if self.grid.is_cell_empty(i): # empty cell: surface albedo
                heat.append(self.get_GNI(pos) * (1 - self.surface_albedo) )
            else:
                inhabitant = self.grid.get_cell_list_contents(i)[0] 
                heat.append(self.get_GNI(pos) * (1 - inhabitant.albedo) ) # cell with daisy
        return sum(heat)/ len(neighborhood)
Пример #32
0
class Foraging(Model):

    number_of_bean = 0
    number_of_corn = 0
    number_of_soy = 0

    def __init__(self,
                 width=50,
                 height=50,
                 torus=True,
                 num_bug=50,
                 seed=42,
                 strategy=None):
        super().__init__(seed=seed)
        self.number_of_bug = num_bug
        if not (strategy in ["stick", "switch"]):
            raise TypeError("'strategy' must be one of {stick, switch}")
        self.strategy = strategy

        self.grid = SingleGrid(width, height, torus)
        self.schedule = RandomActivation(self)
        data = {
            "Bean": lambda m: m.number_of_bean,
            "Corn": lambda m: m.number_of_corn,
            "Soy": lambda m: m.number_of_soy,
            "Bug": lambda m: m.number_of_bug,
        }
        self.datacollector = DataCollector(data)

        # create foods
        self._populate(Bean)
        self._populate(Corn)
        self._populate(Soy)

        # create bugs
        for i in range(self.number_of_bug):
            pos = self.grid.find_empty()
            bug = Bug(i, self)
            bug.strategy = self.strategy
            self.grid.place_agent(bug, pos)
            self.schedule.add(bug)

    def step(self):
        self.schedule.step()
        self.datacollector.collect(self)

        if not (self.grid.exists_empty_cells()):
            self.running = False

    def _populate(self, food_type):
        prefix = "number_of_{}"

        counter = 0
        while counter < food_type.density * (self.grid.width *
                                             self.grid.height):
            pos = self.grid.find_empty()
            food = food_type(counter, self)
            self.grid.place_agent(food, pos)
            self.schedule.add(food)
            food_name = food_type.__name__.lower()
            attr_name = prefix.format(food_name)
            val = getattr(self, attr_name)
            val += 1
            setattr(self, attr_name, val)
            counter += 1
Пример #33
0
class Factory(Model):
    """The Factory model that maintains the state of the whole factory."""

    def __init__(self, grid_w, grid_h, n_robots):
        """Initialize factory."""
        # Initialize.
        self.orders = 0
        self.n_robots = n_robots
        self.scheduler = RandomActivation(self)
        self.grid = SingleGrid(grid_w, grid_h, torus=False)
        self.init_astar()
        # Initialize departments.
        self.machine = Machine("machine", self, self.grid.find_empty())
        self.store = Store("store", self, self.grid.find_empty())
        self.packaging = Packaging("packaging", self, self.grid.find_empty())
        self.dept_positions = [self.machine.pos, self.store.pos, self.packaging.pos]
        # Initialize robots.
        for i in range(self.n_robots):
            # Create robot.
            r = Robot(i, self)
            # Initialize random location.
            pos = self.grid.find_empty()
            self.grid.place_agent(r, pos)
            # Register with scheduler.
            self.scheduler.add(r)
        # Initialize visualization.
        plt.ion()

    def add_order(self):
        """Increment the number of orders to the factory."""
        self.orders += 1

    def step(self):
        """Advance the factory by one step."""
        # Step through factory. Check for orders.
        if self.orders > 0:
            self.store.orders += 1
            self.orders -= 1
        # Step through departments.
        self.store.step()
        self.machine.step()
        self.packaging.step()
        # Step through robots.
        self.scheduler.step()
        # Visualize.
        self.visualize()

    def init_astar(self):
        """Initialize a-star resources so that it doesn't have to calculated for each robot.

        Initialized in such a way that:
            * A diagonal paths are allowed.
            * The path calculated takes into account all obstacles in the grid.
        """
        def get_empty_neighborhood(pos):
            """A sub function to calculate empty neighbors of a point for a-star."""
            neighbors = self.grid.get_neighborhood(pos=pos, moore=True)
            return [n for n in neighbors if self.grid.is_cell_empty(n)]
        # Initialize a path finder object once for the entire factory.
        self.path_finder = astar.pathfinder(neighbors=get_empty_neighborhood,
                                            distance=astar.absolute_distance,
                                            cost=astar.fixed_cost(1))

    def find_nearest_aimless_robot(self, pos):
        """Find the nearest aimless robot to a given position in the factory."""
        def is_aimless(robot, pos):
            """Check if the robot satisfied aimless condition."""
            if robot.destination is None:
                return True
            else:
                return False

        aimless_robots = [robot for robot in self.scheduler.agents if is_aimless(robot, pos)]
        if len(aimless_robots) != 0:
            robot_distances = [astar.absolute_distance(pos, robot.pos) for robot in aimless_robots]
            nearest_index = np.argmin(robot_distances)
            return aimless_robots[nearest_index]
        else:
            return None

    def find_robot_at_position(self, pos):
        """Find robot that is at a given location in the factory that is not busy."""
        for robot in self.scheduler.agents:
            if robot.pos == pos:
                return robot
        return None

    def find_next_position_towards_destination(self, curr_pos, dest_pos):
        """Find the next empty position to move in the direction of the destination."""
        n_steps, path = self.path_finder(curr_pos, dest_pos)  # Handles non-empty locations.
        # NOTE: We cannot find a valid path to the destination when:
        #   1) The destination has an another robot located inside it, which also occurs when curr_pos and
        #       dest_pos are the same.
        #   2) The path is entirely blocked.
        #   In these cases we return the next position to be the curr_pos, in order to wait until things
        #   clear up.
        if n_steps is None or n_steps <= 0:  # No valid path to destination
            next_pos = curr_pos
            print("[MOVE] Warning: No path to destination from {} --> {}".format(curr_pos, dest_pos))
        # This mean there's a valid path to destination.
        else:
            # index 0, is the curr_pos, index 1 is the next position.
            next_pos = path[1]
        return next_pos

    def find_next_position_for_random_walk(self, curr_pos):
        """Find a valid location for a robot to just randomly walk into."""
        def is_pos_empty(pos):
            """A sub function if a cell is empty for random walking."""
            if self.grid.is_cell_empty(pos) and pos not in self.dept_positions:
                return True
            else:
                return False
        neighborhood = self.grid.get_neighborhood(curr_pos, moore=True)
        empty_neighborhood = [n for n in neighborhood if is_pos_empty(n)]
        if len(empty_neighborhood) > 0:
            next_index = np.random.randint(len(empty_neighborhood))
            next_pos = empty_neighborhood[next_index]
        else:
            next_pos = curr_pos
        return next_pos

    def visualize(self):
        """A chess board type visualization."""
        def heatmap(a):
            cMap = ListedColormap(['grey', 'black', 'green', 'orange', 'red', 'blue'])
            sns.heatmap(a, vmin=0, vmax=6, cmap=cMap, linewidths=1)
            plt.pause(0.15)
            plt.clf()

        g = np.zeros((self.grid.height, self.grid.width), dtype=int)
        g[self.store.pos] = 3
        g[self.machine.pos] = 4
        g[self.packaging.pos] = 5
        for robot in self.scheduler.agents:
            if robot.destination is None:
                g[robot.pos] = 1
            else:
                g[robot.pos] = 2

        heatmap(g)
Пример #34
0
class EvacuationModel(Model):
    """
    This is a simulation of a crowd evacuation from a building.
    Several variables are taken into account: the knowledge of the emergency exits, the age and weight of the agents
    and the presence of stewards that can guide agents toward the emergency exits.
    Agents have different strategies to escape the building such as taking the shortest path to an exit or a random one.

    The goal is to study which combinations of agent types are more likely to escape the building and save themselves and
    how the amount of casualties varies with respect to the different variables.
    """
    def __init__(self,
                 N=10,
                 K=0,
                 width=50,
                 height=50,
                 fire_x=1,
                 fire_y=1,
                 civil_info_exchange=True):
        self.num_civilians = N
        self.num_stewards = K
        self.civil_info_exchange = civil_info_exchange
        self.fire_initial_pos = (fire_x, fire_y)
        self.warning_UI = ""
        self.agents_alive = N + K  # Agents alive and inside the building
        self.agents_saved = []  # Agents that managed to get out
        self.agents_killed = []  # Agents that perished during the evacuation
        self.grid = SingleGrid(height, width, False)
        self.graph = None  # General graph representing walkable terrain
        self.schedule = RandomActivation(
            self)  # Every tick, agents move in a different random order
        # Create exits
        self.pos_exits = [(0, 5), (0, 25), (0, 45)]
        for i in range(3):
            self.pos_exits.append((self.grid.width - 1, 14 + i))

        self.draw_environment(self.pos_exits)
        self.graph = path_finding.create_graph(self)
        # Define data collector
        model_collector = {
            "Agents killed": lambda killed: len(self.agents_killed),
            "Agents saved": lambda saved: len(self.agents_saved)
        }
        for exit_pos in self.pos_exits:
            title = "Exit {}".format(exit_pos)
            model_collector[title] = partial(count_agents_saved, exit_pos)
        self.datacollector = DataCollector(model_reporters=model_collector)
        # Create fire
        # for pos in self.fire_initial_pos:  # Only 1 source of fire since we are setting it from UI
        x, y = self.fire_initial_pos
        if not self.is_inside_square((x, y), (0, 29),
                                     (25, 39)) and not self.is_inside_square(
                                         (x, y), (0, 10), (25, 20)):
            pos = self.fire_initial_pos
        else:
            pos = (1, 1)
            self.warning_UI = "<b>WARNING:</b> Sorry but the position of the fire is outside of the building, " \
                              "change the setting and click reset simulation."
        fire_agent = FireAgent(pos, self)
        self.schedule.add(fire_agent)
        self.grid.place_agent(fire_agent, pos)
        # Create civilian agents
        for i in range(self.num_civilians):

            # a civilian agent will know at least the main entrance to the building
            known_exits = self.pos_exits[-3:]
            a = CivilianAgent(i, self, known_exits)

            self.schedule.add(a)
            # Add the agent to a random grid cell

            while True:
                # pick the random coordinate
                x = self.random.randrange(1, self.grid.width - 1)
                y = self.random.randrange(1, self.grid.height - 1)
                # check if the point is empty and inside of the building
                if self.grid.is_cell_empty((x, y)) and not self.is_inside_square((x, y), (0, 29), (25, 39)) \
                        and not self.is_inside_square((x, y), (0, 10), (25, 20)):
                    break

            self.grid.place_agent(a, (x, y))

        # Create steward agents
        for i in range(self.num_civilians,
                       self.num_civilians + self.num_stewards):

            # a steward agent will know all exits.
            known_exits = self.pos_exits
            a = StewardAgent(i, self, known_exits)

            self.schedule.add(a)
            # Add the agent to a random grid cell

            while True:
                # pick the random coordinate
                x = self.random.randrange(1, self.grid.width - 1)
                y = self.random.randrange(1, self.grid.height - 1)
                # check if the point is empty and inside of the building
                if self.grid.is_cell_empty((x, y)) and not self.is_inside_square((x, y), (0, 29), (25, 39)) \
                        and not self.is_inside_square((x, y), (0, 10), (25, 20)):
                    break

            self.grid.place_agent(a, (x, y))

        self.running = True  # Set this to false when we want to finish simulation (e.g. all agents are out of building)
        self.datacollector.collect(self)

    @staticmethod
    def is_inside_square(point, bottom_left, top_right):
        return bottom_left[0] <= point[0] <= top_right[0] and bottom_left[
            1] <= point[1] <= top_right[1]

    def step(self):
        self.schedule.step()
        # collect data
        self.datacollector.collect(self)

        # Halt if no more agents in the building
        if self.count_agents(self) == 0:
            self.running = False

    def remove_agent(self, agent, reason, **kwargs):
        """
        Removes an agent from the simulation. Depending on the reason it can be
        Args:
            agent (Agent):
            reason (Reasons):

        Returns:
            None
        """
        if reason == Reasons.SAVED:
            self.agents_saved.append(agent)
        elif reason == Reasons.KILLED_BY_FIRE:
            self.agents_killed.append(agent)

        self.agents_alive -= 1
        self.schedule.remove(agent)
        self.grid.remove_agent(agent)

    def draw_environment(self, exits=None):
        length_E = int(self.grid.height /
                       5)  # length of the vertical segments of the E
        depth_E = int(self.grid.width /
                      2)  # length of the horizontal segments of the E
        for i in range(3):
            start = max(0, 2 * i * length_E)
            self.draw_wall((0, start), (0, start + length_E - 1))
        for i in range(2):
            start = 2 * i * length_E + length_E
            self.draw_wall((depth_E, start), (depth_E, start + length_E - 1))
        # Horizontal lines of the E (BB)
        aux_y_coord = [
            length_E, 2 * length_E, 3 * length_E - 1, 4 * length_E - 1
        ]
        for y in aux_y_coord:
            self.draw_wall((0, y), (depth_E, y))
        top_left_corner = (0, self.grid.height - 1)
        top_right_corner = (self.grid.width - 1, self.grid.height - 1)
        bottom_right_corner = (self.grid.width - 1, 0)
        # Draw long contour lines E
        self.draw_wall((0, 0), bottom_right_corner)
        self.draw_wall(top_left_corner, top_right_corner)
        self.draw_wall(bottom_right_corner, top_right_corner)

        # Draw exits
        self.draw_exits(exits)

    def draw_wall(self, start, end):
        """
        Draws a line that goes from start point to end point.

        Args:
            start (List): Coordinates of line's starting point
            end (List): Coordinates of line's end point

        Returns:
            None
        """
        diff_x, diff_y = np.subtract(end, start)
        wall_coordinates = np.asarray(start)

        if self.grid.is_cell_empty(wall_coordinates.tolist()):
            w = WallAgent(wall_coordinates.tolist(), self)
            self.grid.place_agent(w, wall_coordinates.tolist())

        while diff_x != 0 or diff_y != 0:
            if abs(diff_x) == abs(diff_y):
                # diagonal wall
                wall_coordinates[0] += np.sign(diff_x)
                wall_coordinates[1] += np.sign(diff_y)
                diff_x -= 1
                diff_y -= 1
            elif abs(diff_x) < abs(diff_y):
                # wall built in y dimension
                wall_coordinates[1] += np.sign(diff_y)
                diff_y -= 1
            else:
                # wall built in x dimension
                wall_coordinates[0] += np.sign(diff_x)
                diff_x -= 1
            if self.grid.is_cell_empty(wall_coordinates.tolist()):
                w = WallAgent(wall_coordinates.tolist(), self)
                self.grid.place_agent(w, wall_coordinates.tolist())

    def draw_exits(self, exits_list):
        for ext in exits_list:
            e = ExitAgent(ext, self)
            if not self.grid.is_cell_empty(ext):
                # Only walls should exist in the grid at this time, so no need to remove it from scheduler
                agent = self.grid.get_cell_list_contents(ext)
                self.grid.remove_agent(agent[0])
            # Place exit
            self.schedule.add(e)
            self.grid.place_agent(e, ext)

    def spread_fire(self, fire_agent):
        fire_neighbors = self.grid.get_neighborhood(fire_agent.pos,
                                                    moore=True,
                                                    include_center=False)
        for grid_space in fire_neighbors:
            if self.grid.is_cell_empty(grid_space):
                # Create new fire agent and add it to grid and scheduler
                new_fire_agent = FireAgent(grid_space, self)
                self.schedule.add(new_fire_agent)
                self.grid.place_agent(new_fire_agent, grid_space)
            else:
                # If human agents, eliminate them and spread anyway
                agent = self.grid.get_cell_list_contents(grid_space)[0]
                if isinstance(agent, (CivilianAgent, StewardAgent)):
                    new_fire_agent = FireAgent(grid_space, self)
                    self.remove_agent(agent, Reasons.KILLED_BY_FIRE)
                    self.schedule.add(new_fire_agent)
                    self.grid.place_agent(new_fire_agent, grid_space)

    @staticmethod
    def count_agents(model):
        """
        Helper method to count agents alive and still in the building.
        """
        count = 0
        for agent in model.schedule.agents:
            agent_type = type(agent)
            if (agent_type == CivilianAgent) or (agent_type == StewardAgent):
                count += 1
        return count
Пример #35
0
class PDModel(Model):

    schedule_types = {"Sequential": BaseScheduler,
                     "Random": RandomActivation,
                     "Simultaneous": SimultaneousActivation}

    def __init__(self, height=5, width=5,    # even numbers are checkerboard fair
                 number_of_agents=25,
                 schedule_type="Simultaneous",
                 rounds=2500,
                 collect_data=True,
                 agent_printing=False,
                 randspawn=False,

                 kNN_spawn=False,
                 kNN_training=False,
                 kNN_testing=False,
                 DD=1,
                 CC=1.5,
                 CD=-2,
                 DC=2,
                 simplified_payoffs=False,
                 b=0,
                 c=0,
                 batch_iterations=2,  # wait what is this doing again
                 learning_rate=1,
                 theta=0.015,
                 init_ppD = 0.5,
                 k=11,
                 msize=1,  # the n of obj in short memory, e.g. 2 =[('C', 'C')] or [('C', 'C'), ('C', 'D')] if paired
                 memoryPaired=False,  # set to True for states/memory items as paired outcomes, e.g. ('C', 'D')
                 learnFrom="them",  # options being 'me', 'them', 'us', for my own history, opponent history and paired
                 chosenOne=7,

                 sarsa_spawn=True,  # should mean checkerboard
                 sarsa_training=True,          #TODO: THESE VARIABLES HAVE BEEN TURNED OFF FOR MOODY SARSA TESTING
                 sarsa_testing=True,
                 sarsa_distro=0,
                 sarsa_oppo="LEARN",
                 epsilon=0.99,
                 alpha=0.1,
                 gamma=0.95,
                 export_q=True,
                 alpha_floor=0.01,
                 epsilon_floor=0.05,

                 moody_sarsa_spawn=False,  # should mean checkerboard
                 moody_sarsa_training=False,
                 moody_sarsa_testing=False,
                 moody_sarsa_distro=0,
                 moody_sarsa_oppo="TFT",
                 moody_epsilon=0.9,
                 moody_alpha=0.1,
                 moody_gamma=0.95,
                 moody_export_q=True,
                 moody_alpha_floor=0.01,
                 moody_epsilon_floor=0.01,
                 moody_msize=20,  # the n of obj in short memory, e.g. 2 =[('C', 'C')] or [('C', 'C'), ('C', 'D')] if paired
                 moody_memoryPaired=False,  # set to True for states/memory items as paired outcomes, e.g. ('C', 'D')
                 moody_learnFrom="them",  # options being 'me', 'them', 'us', for my own history, opponent history and paired
                 moody_chosenOne=6,
                 moody_statemode='stateless',
                 moody_MA=1,
                 moody_opponents=True,
                 moody_startmood=50,
                 startingBehav='C',

                 sensitivity=0,
                 sensitive_agents=[],
                 ):

        # ---------- Model Parameters --------
        self.height = height
        self.width = width
        self.number_of_agents = number_of_agents
        self.step_count = 0
        self.DD = DD
        self.CC = CC
        self.CD = CD
        self.DC = DC
        self.b = b
        self.c = c
        self.batch_iterations = batch_iterations
        self.theta = theta
        self.init_ppD = init_ppD
        self.learning_rate = learning_rate
        self.simplified_payoffs = simplified_payoffs
        self.rounds = rounds
        self.randspawn = randspawn
        self.iteration_n = 0
        self.new_filenumber = 0
        self.kNN_spawn = kNN_spawn
        self.kNN_testing = kNN_testing
        self.kNN_training = kNN_training
        self.kNN_accuracy = 0
        self.k = k
        self.msize = msize
        self.memoryPaired = memoryPaired

        self.sarsa_spawn = sarsa_spawn
        self.sarsa_training = sarsa_training
        self.sarsa_testing = sarsa_testing
        self.sarsa_distro = sarsa_distro
        self.sarsa_oppo = sarsa_oppo
        self.alpha = alpha
        self.gamma = gamma
        self.epsilon = epsilon
        self.export_q = export_q
        self.learnFrom = learnFrom
        self.chosenOne = chosenOne
        self.alpha_floor = alpha_floor
        self.epsilon_floor = epsilon_floor

        self.moody_msize = moody_msize
        self.moody_memoryPaired = moody_memoryPaired

        self.moody_sarsa_spawn = moody_sarsa_spawn
        self.moody_sarsa_training = moody_sarsa_training
        self.moody_sarsa_testing = moody_sarsa_testing
        self.moody_sarsa_distro = moody_sarsa_distro
        self.moody_sarsa_oppo = moody_sarsa_oppo
        self.moody_alpha = moody_alpha
        self.moody_gamma = moody_gamma
        self.moody_epsilon = moody_epsilon
        self.moody_export_q = moody_export_q
        self.moody_learnFrom = moody_learnFrom
        self.moody_chosenOne = moody_chosenOne
        self.moody_alpha_floor = moody_alpha_floor
        self.moody_epsilon_floor = moody_epsilon_floor
        self.moody_statemode = moody_statemode
        self.moody_MA = moody_MA
        self.moody_opponents = moody_opponents
        self.moody_startmood = moody_startmood

        self.startingBehav = startingBehav
        self.sensitivity = sensitivity
        self.sensitive_agents = sensitive_agents

        self.sensitive_agents = [(0,)]
        self.coop_index = (self.CC - self.DD) / (self.DC - self.CD)

        """ This section here changes the spawn locations if the number of agents is changed"""
        if self.number_of_agents == 2:
            self.height = 2
            self.width = 2
        if self.number_of_agents == 4:
            self.height = 2
            self.width = 2
        if self.number_of_agents == 9:
            self.height = 3
            self.width = 3
        if self.number_of_agents == 16:
            self.height = 4
            self.width = 4
        if self.number_of_agents == 25:
            self.height = 5
            self.height = 5
        if self.number_of_agents == 36:
            self.height = 6
            self.width = 6
        if self.number_of_agents == 49:
            self.height = 7
            self.width = 7
        if self.number_of_agents == 64:
            self.height = 8
            self.width = 8

        if self.memoryPaired:
            self.learnFrom = 'us'
            if self.msize > 4:
                self.msize = 4

        # TODO: Add opponents to the oppoList for if opponent 'MIXED' is used
        self.oppoList = [
                         "TFT",
                         "LEARN",
                         "MOODYLEARN",
                          "ANGEL",
                         "DEVIL",
                         "VPP",
                         "RANDOM",
                         "WSLS",
                         # "iWSLS",
                         ]

        # kNN Spawning
        if self.kNN_training:
            self.kNN_strategies = {1: "DEVIL", 3: "DEVIL", 5: "DEVIL", 6: "DEVIL", 16: "DEVIL", 18: "DEVIL",
                                   20: "DEVIL", 29: "DEVIL", 31: "DEVIL", 33: "DEVIL", 34: "DEVIL", 44: "DEVIL",
                                   46: "DEVIL", 2: "ANGEL", 4: "ANGEL", 14: "ANGEL", 15: "ANGEL", 17: "ANGEL",
                                   19: "ANGEL", 28: "ANGEL", 30: "ANGEL", 32: "ANGEL", 42: "ANGEL", 43: "ANGEL",
                                   45: "ANGEL", 47: "ANGEL", 8: "VPP", 11: "VPP", 23: "VPP", 26: "VPP", 35: "VPP",
                                   38: "VPP", 41: "VPP", 7: "WSLS", 10: "WSLS", 13: "WSLS", 22: "WSLS", 25: "WSLS",
                                   37: "WSLS", 40: "WSLS", 9: "TFT", 12: "TFT", 21: "TFT", 24: "TFT", 27: "TFT",
                                   36: "TFT", 39: "TFT"}
        elif self.kNN_testing:
            self.kNN_strategies = {2: "DEVIL", 4: "DEVIL", 14: "DEVIL", 15: "DEVIL", 17: "DEVIL", 19: "DEVIL",
                                   28: "DEVIL", 30: "DEVIL", 32: "DEVIL", 42: "DEVIL", 43: "DEVIL", 45: "DEVIL",
                                   47: "DEVIL", 1: "ANGEL", 3: "ANGEL", 5: "ANGEL", 6: "ANGEL", 16: "ANGEL",
                                   18: "ANGEL", 20: "ANGEL", 29: "ANGEL", 31: "ANGEL", 33: "ANGEL", 34: "ANGEL",
                                   44: "ANGEL", 46: "ANGEL", 7: "TFT", 10: "TFT", 13: "TFT", 23: "TFT", 26: "TFT",
                                   36: "TFT", 39: "TFT", 8: "VPP", 11: "VPP", 21: "VPP", 24: "VPP", 27: "VPP",
                                   37: "VPP", 40: "VPP", 9: "WSLS", 12: "WSLS", 22: "WSLS", 25: "WSLS",
                                   35: "WSLS", 38: "WSLS", 41: "WSLS"}

        with open('filename_number.csv', 'r') as f:
            reader = csv.reader(f)  # pass the file to our csv reader
            rows = []
            for row in reader:
                rows.append(row)

            filenumber = rows[0]
            filenumber = filenumber[0]
            # filenumber = filenumber[3:]
            filenumber = int(filenumber)
            self.iteration_n = filenumber
            self.new_filenumber = [filenumber + 1]

        with open('filename_number.csv', 'w') as f:
            # Overwrite the old file with the modified rows
            writer = csv.writer(f)
            writer.writerow(self.new_filenumber)

        # self.iteration_n needs to be pulled from a csv file and then deleted from said csv file
        if self.sarsa_spawn:
            concatenator = ('wave3_neutralpayoff_%s_%s_%s_sarsa_no_%s' % (self.msize, self.learnFrom, self.sarsa_oppo, self.iteration_n), "a")
        elif self.moody_sarsa_spawn:
            if type(self.moody_sarsa_oppo) == list:
                concatenator = ('csvfix_mood%s_DC_%s_%sx%s_mA_%s_%s_%s_msarsa_no_%s' % (
                self.moody_startmood, self.DC, self.width, self.width, self.moody_MA,
                self.moody_statemode, "mixedOppo", self.iteration_n), "a")
            else:
                concatenator = ('startwith%s_mood%s_eps_%s_%sx%s_mA_%s_%s_%s_msarsa_no_%s' % (self.startingBehav, self.moody_startmood, self.moody_epsilon, self.width, self.width, self.moody_MA,
                                                                                          self.moody_statemode, self.moody_sarsa_oppo, self.iteration_n), "a")
        else:
            concatenator = ('xxx_nosarsa_no_%s' % (self.iteration_n), "a")
        self.exp_n = concatenator[0]

        self.filename = ('%s model output.csv' % (self.exp_n), "a")

        self.schedule_type = schedule_type
        if not self.simplified_payoffs:
            self.payoffs = {("C", "C"): self.CC,
                            ("C", "D"): self.CD,
                            ("D", "C"): self.DC,
                            ("D", "D"): self.DD}
        elif self.simplified_payoffs:
            self.payoffs = {("C", "C"): self.b - abs(self.c),
                            ("C", "D"): - abs(self.c),
                            ("D", "C"): self.c,
                            ("D", "D"): 0}

        self.collect_data = collect_data
        self.agent_printing = agent_printing
        self.agent_list = []

        # Model Functions
        self.schedule = self.schedule_types[self.schedule_type](self)
        self.grid = SingleGrid(self.height, self.width, torus=True)

        # Find list of empty cells
        self.coordinates = [(x, y) for x in range(self.width) for y in range(self.height)]
        # print(self.coordinates)
        self.experimental_coordinates = [(3, 8), (4, 8), (5, 8), (6, 8), (7, 8), (1, 7), (2, 7), (3, 7), (4, 7), (5, 7),
                                         (6, 7),
                                         (7, 7), (8, 7), (9, 7), (3, 6), (4, 6), (5, 6), (6, 6), (7, 6), (1, 5), (2, 5),
                                         (3, 5),
                                         (4, 5), (5, 5), (6, 5), (7, 5), (8, 5), (9, 5), (3, 4), (4, 4), (5, 4), (6, 4),
                                         (7, 4),
                                         (1, 3), (2, 3), (3, 3), (4, 3), (5, 3), (6, 3), (7, 3), (8, 3), (9, 3), (3, 2),
                                         (4, 2),
                                         (5, 2), (6, 2), (7, 2)]

        self.agentIDs = list(range(1, (number_of_agents + 1)))

        # ----- Storage -----
        self.agents_cooperating = 0
        self.agents_defecting = 0
        self.number_of_defects = 0
        self.number_of_coops = 0
        self.coops_utility = 0
        self.defects_utility = 0
        self.highest_score = 0

        self.datacollector = DataCollector(model_reporters={
            "Cooperations": get_num_coop_agents,
            "Defections": get_num_defect_agents,
            "Percentage Cooperations": get_per_coop_agents,
            "Percentage Defections": get_per_defect_agents,
            "Average Mood": get_av_mood,
            "Cooperators": get_cooperators,
            "Defectors": get_defectors,
            "TFT Performance": get_tft_performance,
            "TFT Cooperations": get_tft_cooperations,
            "VPP Performance": get_vpp_performance,
            "VPP Cooperations": get_vpp_cooperations,
            "WSLS Performance": get_wsls_performance,
            "WSLS Cooperations": get_wsls_cooperations,
            "iWSLS Performance": get_iwsls_performance,
            "iWSLS Cooperations": get_iwsls_cooperations,
            "LEARN Performance": get_learn_performance,
            "MutualCooperations": get_learn_mutC,
            "LEARN Cooperations": get_learn_cooperations,
            "moodyLEARN Performance": get_moodylearn_performance,
            "moodyMutualCooperations": get_moodylearn_mutC,
            "moodyLEARN Cooperations": get_moodylearn_cooperations,
            "Model Params": track_params,
        },
            agent_reporters={
                "Cooperations": lambda x: x.number_of_c,
                "Defections": lambda x: x.number_of_d
            })

        self.memory_states = statemaker.get_memory_states([0, 'C', 'D'], self.msize, self.memoryPaired)
        self.moody_memory_states = statemaker_moody.get_memory_states([0, 'C', 'D'], self.moody_statemode, self.number_of_agents)
        # self.training_data = []
        self.training_data = pickle.load(open("training_data_5.p", "rb"))
        self.state_values = self.state_evaluation(self.memory_states)
        self.moody_state_values = self.moody_state_evaluation(self.moody_memory_states)

        self.firstgame = self.first_game_check()
        self.agent_ppds = {}
        self.set_ppds()
        self.agent_ppds = pickle.load(open("agent_ppds.p", "rb"))
        self.training_data = []
        self.training_data = pickle.load(open("training_data_50.p",
                                              "rb"))  # //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

        if not kNN_spawn:
            self.make_agents()
        elif kNN_spawn:
            self.make_set_agents()
        self.running = True
        self.datacollector.collect(self)

    def first_game_check(self):
        try:
            success = pickle.load(open("firstgame.p", "rb"))
            # print("Val of Success was : ", success)
            if success == 1:
                return False
        except (OSError, IOError) as e:
            foo = 1
            pickle.dump(foo, open("firstgame.p", "wb"))
            return True

    def output_data(self, steptime):
        with open('{}.csv'.format(self.filename), 'a', newline='') as csvfile:
            fieldnames = ['n agents', 'stepcount', 'steptime', 'cooperating', 'defecting', 'coop total', 'defect total',]

            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)

            if self.step_count == 1:
                writer.writeheader()
            writer.writerow({'n agents': self.number_of_agents, 'stepcount': self.step_count, 'steptime': steptime, 'cooperating': self.agents_cooperating, 'defecting': self.agents_defecting,
                             'coop total': self.number_of_coops, 'defect total': self.number_of_defects,
                             })
        if self.kNN_testing:
            with open('{}_kNN.csv'.format(self.filename), 'a', newline='') as csvfile:
                fieldnames = ['k', 'accuracy',]

                writer = csv.DictWriter(csvfile, fieldnames=fieldnames)

                kNN_accuracy_percent = ((self.kNN_accuracy / 24) * 100)

                if self.step_count == 1:
                    writer.writeheader()
                writer.writerow({'k': self.k, 'accuracy': kNN_accuracy_percent,
                                 })

                self.kNN_accuracy = 0  # Hopefully resetting this value here is fine

        # with open('{} agent strategies.csv'.format(self.filename), 'a', newline='') as csvfile:
        #     fieldnames = ['stepcount', 'agent_strategy']
        #
        #     writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        #
        #     if self.step_count == 1:
        #         writer.writeheader()
        #     writer.writerow({'stepcount': self.step_count, 'agent_strategy': self.agent_list})

    # def get_memory_states(self, behaviours):
    #     """ Get a list of all possible states given n behaviour options and
    #         r spaces in the agent's memory - CURRENTLY: 7  """
    #     options = behaviours
    #     permutations = []
    #     for i1 in options:
    #         for i2 in options:
    #             for i3 in options:
    #                 for i4 in options:
    #                     for i5 in options:
    #                         for i6 in options:
    #                             for i7 in options:
    #                                 permutations.append([i1, i2, i3, i4, i5, i6, i7])
    #
    #     # to generate the < step 7 states
    #     permutations.append([0, 0, 0, 0, 0, 0, 0])
    #     initial_state1 = [0, 0, 0, 0, 0, 0]
    #     initial_state2 = [0, 0, 0, 0, 0]
    #     initial_state3 = [0, 0, 0, 0]
    #     initial_state4 = [0, 0, 0]
    #     initial_state5 = [0, 0]
    #     initial_state6 = [0]
    #
    #     for ii1 in options:
    #         new = initial_state1 + [ii1]
    #         permutations.append(new)
    #     for ii2 in options:
    #         for iii2 in options:
    #             new = initial_state2 + [ii2] + [iii2]
    #             permutations.append(new)
    #     for ii3 in options:
    #         for iii3 in options:
    #             for iiii3 in options:
    #                 new = initial_state3 + [ii3] + [iii3] + [iiii3]
    #                 permutations.append(new)
    #     for ii4 in options:
    #         for iii4 in options:
    #             for iiii4 in options:
    #                 for iiiii4 in options:
    #                     new = initial_state4 + [ii4] + [iii4] + [iiii4] + [iiiii4]
    #                     permutations.append(new)
    #     for ii5 in options:
    #         for iii5 in options:
    #             for iiii5 in options:
    #                 for iiiii5 in options:
    #                     for iiiiii5 in options:
    #                         new = initial_state5 + [ii5] + [iii5] + [iiii5] + [iiiii5] + [iiiiii5]
    #                         permutations.append(new)
    #     for ii6 in options:
    #         for iii6 in options:
    #             for iiii6 in options:
    #                 for iiiii6 in options:
    #                     for iiiiii6 in options:
    #                         for iiiiiii6 in options:
    #                             new = initial_state6 + [ii6] + [iii6] + [iiii6] + [iiiii6] + [iiiiii6] + [iiiiiii6]
    #                             permutations.append(new)
    #     return permutations

    def set_ppds(self):
        """ Below: need to remove this part of the function as it will reset ppds to be whatever the br_params specifies,
                    when actually we want it to start at 0.5 and then go unaltered by this method for the subsequent games"""
        initialised = {}
        n_of_a = 0
        if self.kNN_spawn:
            n_of_a = 47
        else:
            n_of_a = self.number_of_agents

        if self.firstgame:
            for i in range(n_of_a):
                # print("n of a", i)
                initialised[i + 1] = [self.init_ppD, self.init_ppD, self.init_ppD, self.init_ppD]
                with open("agent_ppds.p", "wb") as f:
                    pickle.dump(initialised, f)

                """ This is used for setting ppD to a model-specified value. For agents
                    to alter their own ppDs for, they must use the kNN system and 
                    extract from a pickle file [INCOMPLETE] the classification of partner
                    etc. from the previous game."""

    def state_evaluation(self, state_list):
        # if self.stepCount == 1:

        state_value = []
        if not self.memoryPaired:

            for i in state_list:
                current_value = 0
                for j in range(len(i)):
                    item = i[j]
                    # print("Array", i, "Index", j, "Item", item)
                    if item == 'C':
                        current_value = current_value + (1 * j)  # Slight bias towards cooperation
                    if item == 'D':
                        current_value = current_value - (1 * j)
                    if item == 0:
                        current_value = current_value
                state_value.append(current_value)

        elif self.memoryPaired:
            for i in state_list:
                counter = 0
                i = list(i)
                # print(i)
                current_value = 0
                for j in i:
                    # item = i[1]  # should hopefully index the opponent's move in each of the pairs
                    # TODO: I don't think state_evaluation currently affects anything but we will see
                    item = j
                    # print("Array", i, "Index", j, "Item", item)
                    if item == 'C':
                        current_value = current_value + (1 * counter)  # Should there be a slight bias towards C?
                    if item == 'D':
                        current_value = current_value - (1 * counter)
                    if item == 0:
                        current_value = current_value
                    counter += 1
                state_value.append(current_value)

        return state_value

    def moody_state_evaluation(self, state_list):
        # if self.stepCount == 1:

        state_value = []
        if not self.moody_memoryPaired:

            for i in state_list:
                current_value = 0
                for j in range(len(i)):
                    item = i[j]
                    # print("Array", i, "Index", j, "Item", item)
                    if item == 'C':
                        current_value = current_value + (1 * j)  # Slight bias towards cooperation
                    if item == 'D':
                        current_value = current_value - (1 * j)
                    if item == 0:
                        current_value = current_value
                state_value.append(current_value)

        elif self.moody_memoryPaired:
            for i in state_list:
                counter = 0
                i = list(i)
                # print(i)
                current_value = 0
                for j in i:
                    # item = i[1]  # should hopefully index the opponent's move in each of the pairs
                    # TODO: I don't think state_evaluation currently affects anything but we will see
                    item = j
                    # print("Array", i, "Index", j, "Item", item)
                    if item == 'C':
                        current_value = current_value + (1 * counter)  # Should there be a slight bias towards C?
                    if item == 'D':
                        current_value = current_value - (1 * counter)
                    if item == 0:
                        current_value = current_value
                    counter += 1
                state_value.append(current_value)

        return state_value

    def get_highest_score(self):
        scores = [a.score for a in self.schedule.agents]
        self.highest_score = max(scores)

    def reset_values(self):
        # self.agents_defecting = 0
        # self.agents_cooperating = 0
        # self.number_of_defects = 0
        self.number_of_NULL = 0  # should be coops

    def training_data_collector(self):
        if self.kNN_training:
            if not os.path.isfile('training_data.p'):
                training_data = []
                with open("training_data.p", "wb") as f:
                    pickle.dump(training_data, f)

            agent_training_data = [a.training_data for a in self.schedule.agents]
            training_data = []

            for i in agent_training_data:
                # print("agent has:", i)
                if len(i) is not 0:
                    for j in range(len(i)):
                        jj = i[j]
                        # print("data to append", jj)
                        training_data.append(jj)

            # print("save data", save_data)
            with open("training_data.p", "rb") as f:
                training_update = pickle.load(f)

            print("Training Data Size Pre-Update:", len(training_update))
            for i in training_data:
                training_update.append(i)
            print("Training Data Size Post-Update:", len(training_update))
            # print(training_update)
            with open("training_data.p", "wb") as f:
                pickle.dump(training_update, f)
        else:
            return

    # def make_agents(self):
    #
    #     # generate current experiment ppD pickle if one does not exist?
    #     # if not os.path.isfile('agent_ppds.p'):
    #     #     initialised = {}
    #     #     for i in range(self.number_of_agents):
    #     #         initialised[i+1] = [self.init_ppD, self.init_ppD, self.init_ppD, self.init_ppD]
    #     #         pickle.dump(initialised, open("agent_ppds.p", "wb"))
    #
    #     if not self.randspawn:
    #         for i in range(self.number_of_agents):
    #             """This is for adding agents in sequentially."""
    #             x, y = self.coordinates.pop(0)
    #             print("x, y:", x, y)
    #             # x, y = self.grid.find_empty()
    #             pdagent = PDAgent((x, y), self, True)
    #             self.grid.place_agent(pdagent, (x, y))
    #             self.schedule.add(pdagent)
    #
    #     elif self.randspawn:
    #         """ This is for adding in agents randomly """
    #         for i in range(self.number_of_agents):
    #             x, y = self.coordinates.pop(random.randrange(len(self.coordinates)))
    #             # print("x, y:", x, y)
    #             # x, y = self.grid.find_empty()
    #             pdagent = PDAgent((x, y), self, True)
    #             self.grid.place_agent(pdagent, (x, y))
    #             self.schedule.add(pdagent)

    def make_agents(self):
        with open("agent_ppds.p", "rb") as f:
            self.agent_ppds = pickle.load(f)

        if not self.randspawn:
            for i in range(self.number_of_agents):
                # print(self.number_of_agents)
                # print(self.coordinates)
                """This is for adding agents in sequentially."""
                x, y = self.coordinates.pop(0)
                # print("x, y:", x, y)
                # x, y = self.grid.find_empty()
                pdagent = PDAgent((x, y), self, True)
                self.grid.place_agent(pdagent, (x, y))
                self.schedule.add(pdagent)

        elif self.randspawn:
            """ This is for adding in agents randomly """
            for i in range(self.number_of_agents):
                x, y = self.coordinates.pop(random.randrange(len(self.coordinates)))
                # print("x, y:", x, y)
                # x, y = self.grid.find_empty()
                pdagent = PDAgent((x, y), self, True)
                self.grid.place_agent(pdagent, (x, y))
                self.schedule.add(pdagent)

    def export_q_tables(self, init):      # TODO: Does this need a moody counterpart? =============================
        qs = [a.qtable for a in self.schedule.agents]
        # we need to print/save a list of the keys
        # then print/save each
        print('qs', qs)
        qvals = []
        for i in qs:
            if i is not []:
                # take each agent's qtable
                # print('i', i)
                for j in i:
                    # take each item in that table
                    temp_qs = []
                    # print('j', j)
                    item = i[j]
                    for k in item:
                        temp_qs.append(k)
                        # append all the qvalues into one big list
                    qvals.append(temp_qs)

        print('qvals', qvals)
        if init:
            with open('{} qinit.csv'.format(self.filename), 'a', newline='') as csvfile:
                fieldnames = ['q']
                writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                writer.writeheader()
                writer.writerow({'q': qvals})
        else:
            with open('{} qend.csv'.format(self.filename), 'a', newline='') as csvfile:
                fieldnames = ['q']
                writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                writer.writeheader()
                writer.writerow({'q': qvals})

    def update_agent_ppds(self, ppds):
        with open("agent_ppds.p", "wb") as f:
            pickle.dump(ppds, f)

    def make_set_agents(self):
        # generate current experiment ppD pickle if one does not exist?
        # if not os.path.isfile('agent_ppds.p'):
        #     initialised = {}
        #     for i in range(self.number_of_agents):
        #         initialised[i + 1] = [self.init_ppD, self.init_ppD, self.init_ppD, self.init_ppD]
        #         pickle.dump(initialised, open("agent_ppds.p", "wb"))

        for i in range(47):
            """This is for adding agents in sequentially."""
            # x, y = self.experimental_coordinates.pop(0)
            # print(i)
            x, y = self.experimental_coordinates[i]
            # print("x, y:", x, y)
            # x, y = self.grid.find_empty()
            pdagent = PDAgent((x, y), self, True)
            self.grid.place_agent(pdagent, (x, y))
            self.schedule.add(pdagent)

    def step(self):
        start = time.time()
        self.schedule.step()
        if self.step_count == self.rounds - 1:
            self.update_agent_ppds(self.agent_ppds)
            self.training_data_collector()
        self.step_count += 1
        # print("Step:", self.step_count)
        end = time.time()
        steptime = end - start
        if self.collect_data:
            self.output_data(steptime)
        self.datacollector.collect(self)
        self.get_highest_score()
        self.reset_values()

        # if self.export_q:
        #     if self.step_count == 1:
        #         self.export_q_tables(True)
        #     export intitial q tables
        if self.step_count == self.rounds - 1:
            if self.export_q:
                for j in self.memory_states:
                    for k in range(2):
                        with open('{} states_agent37.csv'.format(self.filename), 'a', newline='') as csvfile:
                            fieldnames = ['state']
                            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                            # writer.writeheader()
                            writer.writerow({'state': j})

            if self.moody_export_q:
                for j in self.moody_memory_states:
                    for k in range(2):
                        with open('{} states_agent36.csv'.format(self.filename), 'a', newline='') as csvfile:
                            fieldnames = ['state']
                            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                            # writer.writeheader()
                            writer.writerow({'state': j})

    def run_model(self, rounds=1000):
        for i in range(self.rounds):
            self.step()
Пример #36
0
class SDGrid(Model):
    ''' Model class for iterated, spatial social dilemma model. '''

    schedule_types = {"Sequential": BaseScheduler,
                      "Random": RandomActivation,
                      "Simultaneous": SimultaneousActivation}

    # This dictionary holds the payoff for this agent,
    # keyed on: (my_move, other_move)

    #NOTE: Payoffs must be given by the user in the format below as a dict object. 
    
    def __init__(self, height=0, width=0, schedule_type="Random", payoffs=None, seed=2514, p = .1, implement = "Epstein", num_RL =500, ep_length=1):
        '''
        Create a new Spatial Prisoners' Dilemma Model.
        Args:
            height, width: Grid size. There will be one agent per grid cell.
            schedule_type: Can be "Sequential", "Random", or "Simultaneous".
                           Determines the agent activation regime.
            payoffs: (required) Dictionary of (move, neighbor_move) payoffs.
        '''
        #Set default grid size if none inputted by user
        if height == 0:
            h = 50
        else:
            h = height
        if width == 0:
            w = 50
        else:
            w = width
        
        assert height or width < 0, "Grid heigth and width must be positive numbers."

        if payoffs:
            self.payoff = payoffs
        
        else: 
            self.payoff = {(C, C): 5,
                        (C, D): -5,
                        (D, C): 6,
                        (D, D): -6}

        self.grid = SingleGrid(h, w, torus=True)
        self.schedule_type = schedule_type
        self.schedule = self.schedule_types[self.schedule_type](self)
        self.implement = implement
        self.ep_length = ep_length
        self.num_RL = num_RL
        #FIXME: THis is a bandaid fix for MESA's loop bug (see trello for SD ABM):

        self.kill_list = []
        self.fertile_agents = []

        if self.implement == "Epstein":
            leave_empty = np.random.choice(a = [False, True], size = (width, height), p = [p, 1-p])
        else:
            pass

        # Create agents: automatically populates agents and grid; 
        count = 0
        for x in range(width):
            for y in range(height):
                
                if implement == "Epstein":
                    if leave_empty[x, y]:
                        continue
                    else:
                        agent = SDAgent(count, (x, y), self)
                        count +=1
                else:
                    agent = SDAgent(count, (x, y), self)
                    count +=1

                self.grid.place_agent(agent, (x, y))
                self.schedule.add(agent)
########################################################################################################################
        #FIXME: may need to make an unique id for agents to do this correctly
        #FIXME: this will have to be generalized later for when multipe batches of episodes are being run
########################################################################################################################
        # learners = np.random.choice([self.schedule._agents], self.num_RL) 

        # #switch them to learn mode
        # for agent in learners:
        #     agent.learn_mode = True
        

        # TODO: Make data collection easier for user; need to do same in BWT / Cartel

        self.datacollector = DataCollector(model_reporters={
            "Learning Cooperating_Agents":
            lambda m: (len([a for a in m.schedule.agents if a.move == C and a.unique_id == 1] )),
            "Learning Defecting_Agents": 
            lambda n: (len([b for b in n.schedule.agents if b.move == D and b.unique_id == 1] ))
        })

        self.running = True
        self.datacollector.collect(self)

    def step(self):

        self.schedule.step()
        for agent in self.schedule.agents:
            if agent.unique_id == 1:
                agent.learn = True
                #print('agent 1 has learn set to {}'.format(agent.learn))
                agent.update_policy()
        # collect data
        self.datacollector.collect(self)

        # self.purge()
        # self.replicate_agents()

        #if (self.schedule.time % self.ep_length == 0) and (self.schedule.time > 0):
            #print(self.schedule.time)
            
            # learners = random.sample(self.schedule.agents, self.num_RL) 
            # for agent in learners:
            #     agent.learn = True
        
            # for agent in learners:
            #     agent.update_policy()
            #     agent.learn = False
            #     if agent == learners[-1]: 
            #         print("################################# Update finished #################################")
            

        
    def replicate_agents(self):
        
        if self.fertile_agents is not None:

            for agent in self.fertile_agents:
                if agent.pos is not None:
                    try:
                        agent.replicate()
                    except ValueError:
                        #print("Caught a bad egg, boss!")
                        continue


    def purge(self):

        if len(self.kill_list)>0:
            for agent in self.kill_list:
                self.grid.remove_agent(agent)
                self.schedule.remove(agent)
        
            self.kill_list = []
        else:
            pass


    def run(self, n):
        ''' Run the model for n steps. '''
        for _ in range(n):
            self.step()