Example #1
0
class MesaAxelrod(Model):
    def __init__(self, height=20, width=20, players=100, strategies=[axl.Random, axl.TitForTat, axl.Grudger]):
        super().__init__()

        self.height = height
        self.width = width

        # add a scheduler
        self.scheduler = RandomActivation(self)

        # add a grid
        self.grid = MultiGrid(self.width, self.height, torus=True)

        # random players
        for i in range(players):
            x = random.randrange(self.width)
            y = random.randrange(self.height)

            strategy = random.choice(strategies)

            self.new_agent((x, y), strategy)

    def new_agent(self, pos, strategy):
        """
        Method that creates a new agent, and adds it to the correct scheduler.
        """
        agent = Player(self.next_id(), self, pos, strategy)

        self.grid.place_agent(agent, pos)
        self.scheduler.add(agent)

    def remove_agent(self, agent):
        """
        Method that removes an agent from the grid and the correct scheduler.
        """
        self.grid.remove_agent(agent)
        self.scheduler.remove(agent)

    def step(self):
        """
        step through all agents and step()
        self-implemented scheduler that allows removal of agents during stepping
        """
        agents = self.scheduler._agents

        agent_keys = list(agents.keys())
        for key in agent_keys:
            if key in agents:
                agents[key].step()

    def run_model(self, step_count=200):
        """
        Method that runs the model for a specific amount of steps.
        """
        for i in range(step_count):
            self.step()
Example #2
0
class PolicyEmergenceSM(Model):
    '''
	Simplest Model for the policy emergence model.
	'''
    def __init__(self,
                 PE_type,
                 SM_inputs,
                 AplusPL_inputs,
                 AplusCo_inputs,
                 AplusPK_inputs,
                 height=20,
                 width=20,
                 input_LHS=False):

        self.height = height  # height of the canvas
        self.width = width  # width of the canvas

        self.SM_inputs = SM_inputs  # inputs for the entire model
        self.PE_type = PE_type  # model type (SM, A+PL, A+Co, A+PK, A+PI)

        self.resources_aff = SM_inputs[2]  # resources per affiliation agent

        self.stepCount = 0  # int - [-] - initialisation of step counter
        self.agenda_PC = None  # initialisation of agenda policy core issue tracker
        self.policy_implemented_number = None  # initialisation of policy number tracker
        self.policy_formulation_run = False  # check value for running policy formulation

        self.w_el_influence = self.SM_inputs[
            5]  # float - [-] - electorate influence weight constant

        # batchrunner inputs
        self.input_LHS = input_LHS

        # ACF+PL parameters
        if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type:
            self.conflict_level = AplusPL_inputs[0]
            self.resources_spend_incr_agents = AplusPL_inputs[1]

        # ACF+Co parameters
        if 'A+Co' in self.PE_type:
            self.PC_interest = AplusCo_inputs[0]
            if self.input_LHS:
                self.coa_creation_thresh = self.input_LHS[1]  # LHS inputs
                self.coa_resources_share = self.input_LHS[0]  # LHS inputs
            else:
                self.coa_creation_thresh = AplusCo_inputs[1]
                self.coa_resources_share = AplusCo_inputs[3]
            self.coa_coherence_thresh = AplusCo_inputs[2]
            self.resources_spend_incr_coal = AplusCo_inputs[4]
            print('res. share:', round(self.coa_resources_share, 3),
                  ', coa. threshold:', round(self.coa_creation_thresh, 3))

            self.coalition_list = []

        # +PK parameters
        self.PK = False
        if '+PK' in self.PE_type:
            self.PK = True
        self.PK_catchup = AplusPK_inputs[0]

        self.schedule = RandomActivation(self)  # mesa random activation method
        self.grid = SingleGrid(height, width,
                               torus=True)  # mesa grid creation method

        # creation of the datacollector vector

        if 'A+Co' in self.PE_type:
            self.datacollector = DataCollector(
                # Model-level variables
                model_reporters={
                    "step": "stepCount",
                    "AS_PF": get_problem_policy_chosen,
                    "agent_attributes": get_agents_attributes,
                    "coalitions_attributes": get_coalitions_attributes,
                    "electorate_attributes": get_electorate_attributes
                },
                # Agent-level variables
                agent_reporters={
                    "x":
                    lambda a: a.pos[0],
                    "y":
                    lambda a: a.pos[1],
                    "Agent type":
                    lambda a: type(a),
                    "Issuetree":
                    lambda a: getattr(a, 'issuetree', [None])[
                        a.unique_id
                        if isinstance(a, ActiveAgent) and not isinstance(
                            a, Coalition) else 0]
                })
        else:
            self.datacollector = DataCollector(
                # Model-level variables
                model_reporters={
                    "step": "stepCount",
                    "AS_PF": get_problem_policy_chosen,
                    "agent_attributes": get_agents_attributes,
                    "electorate_attributes": get_electorate_attributes
                },
                # Agent-level variables
                agent_reporters={
                    "x":
                    lambda a: a.pos[0],
                    "y":
                    lambda a: a.pos[1],
                    "Agent type":
                    lambda a: type(a),
                    "Issuetree":
                    lambda a: getattr(a, 'issuetree', [None])[
                        a.unique_id if isinstance(a, ActiveAgent) else 0]
                })

        self.len_S, self.len_PC, self.len_DC, self.len_CR = belief_tree_input(
        )  # setting up belief tree
        self.policy_instruments, self.len_ins, self.PF_indices = policy_instrument_input(
        )  # setting up policy instruments
        init_active_agents(self, self.len_S, self.len_PC, self.len_DC,
                           self.len_CR, self.len_PC, self.len_ins,
                           self.SM_inputs)  # setting up active agents
        init_electorate_agents(self, self.len_S, self.len_PC, self.len_DC,
                               self.SM_inputs)  # setting up passive agents
        init_truth_agent(self, self.len_S, self.len_PC, self.len_DC,
                         self.len_ins)  # setting up truth agent

        self.running = True
        self.numberOfAgents = self.schedule.get_agent_count()
        self.datacollector.collect(self)

    def step(self, KPIs):
        '''
		Main steps of the Simplest Model for policy emergence:
		0. Module interface - Input
		1. Agenda setting step
		2. Policy formulation step
		3. Data collection
		'''

        self.KPIs = KPIs  # saving the indicators

        # 0. initialisation
        self.module_interface_input(
            self.KPIs)  # communicating the beliefs (indicators)
        self.electorate_influence(
            self.w_el_influence)  # electorate influence actions
        if 'A+Co' in self.PE_type:
            self.coalition_creation_algorithm()

        # 1. agenda setting
        self.agenda_setting()

        # 2. policy formulation
        if self.policy_formulation_run:
            policy_implemented = self.policy_formulation()
        else:
            policy_implemented = self.policy_instruments[-1]

        # 3. data collection
        self.stepCount += 1  # iterate the steps counter
        self.datacollector.collect(self)  # collect data

        print("Step ends", "\n")

        return policy_implemented

    def module_interface_input(self, KPIs):
        '''
		The module interface input step consists of actions related to the module interface and the policy emergence model
		'''

        len_DC = self.len_DC
        len_PC = self.len_PC
        len_S = self.len_S
        len_ins = self.len_ins

        # saving the issue tree of the truth agent
        for agent in self.schedule.agent_buffer(shuffled=True):
            if isinstance(agent, TruthAgent):
                agent.issuetree_truth = KPIs
                truth_issuetree = agent.issuetree_truth
                truth_policytree = agent.policytree_truth

        # Transferring policy impact to active agents
        for agent in self.schedule.agent_buffer(shuffled=True):
            if isinstance(agent, ActiveAgent) and not isinstance(
                    agent, Coalition):  # selecting only active agents
                # for PFj in range(len_PC): # communicating the policy family likelihoods
                # 	for PFij in range(len_PC):
                # 		agent.policytree[agent.unique_id][PFj][PFij] = truth_policytree[PFj][PFij]

                for insj in range(
                        len_ins
                ):  # communicating the policy instruments impacts
                    agent.policytree[agent.unique_id][
                        len_PC + insj][0:len_S] = truth_policytree[len_PC +
                                                                   insj]

                for issue in range(
                        len_DC + len_PC + len_S
                ):  # communicating the issue beliefs from the KPIs
                    agent.issuetree[
                        agent.unique_id][issue][0] = truth_issuetree[issue]
                self.preference_update(
                    agent, agent.unique_id)  # updating the preferences

    def resources_distribution(self):

        if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type:
            for agent in self.schedule.agent_buffer(shuffled=False):
                if isinstance(agent,
                              ActiveAgent):  # selecting only active agents
                    if agent.affiliation == 0:  # affiliation 0
                        agent.resources = 0.01 * self.number_activeagents * self.resources_aff[
                            0] / 100
                    if agent.affiliation == 1:  # affiliation 1
                        agent.resources = 0.01 * self.number_activeagents * self.resources_aff[
                            1] / 100
                    agent.resources_action = agent.resources  # assigning resources for the actions for both
        if 'A+Co' in self.PE_type:  # attribution of the resources to coalitions
            for coalition in self.schedule.agent_buffer(shuffled=False):
                if isinstance(coalition, Coalition):
                    resources = 0
                    for agent_mem in coalition.members:
                        resources += agent_mem.resources * self.coa_resources_share
                        agent_mem.resources -= self.coa_resources_share * agent_mem.resources
                        agent.resources_action = agent.resources  # assigning resources for the actions for both
                    coalition.resources = resources
                    coalition.resources_action = coalition.resources  # assigning resources for the actions for both

    def agenda_setting(self):
        '''
		In the agenda setting step, the active agents first select their policy core issue of preference and then select
		the agenda.
		'''

        # resources distribution
        self.resources_distribution()

        # active agent policy core selection
        for agent in self.schedule.agent_buffer(shuffled=False):
            if isinstance(agent, ActiveAgent):  # selecting only active agents
                agent.selection_PC()

        if 'A+Co' in self.PE_type:
            for coalition in self.schedule.agent_buffer(shuffled=True):
                if isinstance(coalition,
                              Coalition):  # selecting only coalitions
                    coalition.interactions_intra_coalition(
                        'AS')  # intra-coalition interactions

        # active agent interactions (including coalitions)
        if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type:
            for agent in self.schedule.agent_buffer(shuffled=True):
                if isinstance(agent,
                              ActiveAgent):  # selecting only active agents
                    agent.interactions('AS', self.PK)

        # active agent policy core selection (after agent interactions)
        if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type:
            # active agent policy core selection
            for agent in self.schedule.agent_buffer(shuffled=False):
                if isinstance(agent,
                              ActiveAgent):  # selecting only active agents
                    agent.selection_PC()

        # for each agent, selection of their preferred policy core issue
        selected_PC_list = []
        number_ActiveAgents = 0
        for agent in self.schedule.agent_buffer(shuffled=False):
            if isinstance(agent,
                          ActiveAgent):  # considering only policy makers
                selected_PC_list.append(agent.selected_PC)
                number_ActiveAgents += 1

        # finding the most common policy core issue and its frequency
        d = defaultdict(int)
        for i in selected_PC_list:
            d[i] += 1
        result = max(d.items(), key=lambda x: x[1])
        agenda_PC_temp = result[0]
        agenda_PC_temp_frequency = result[1]

        # checking for majority
        if agenda_PC_temp_frequency > int(number_ActiveAgents / 2):
            self.agenda_PC = agenda_PC_temp
            self.policy_formulation_run = True  # allowing for policy formulation to happen
            print("The agenda consists of PC", self.agenda_PC, ".")
        else:  # if no majority
            self.policy_formulation_run = False
            print("No agenda was formed, moving to the next step.")

        # for purposes of not changing the entire code - the policy family selected is set at 0 so all policy instruments
        # are always considered in the rest of the model
        self.agenda_PF = 0

    def policy_formulation(self):
        '''
		In the policy formulation step, the policy maker agents first select their policy core issue of preference and then
		they select the policy that is to be implemented if there is a majority of them.
		'''

        # resources distribution
        self.resources_distribution()

        # calculation of policy instruments preferences
        if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type:
            for agent in self.schedule.agent_buffer(shuffled=False):
                if isinstance(agent, ActiveAgent):
                    agent.selection_S()
                    agent.selection_PI(
                    )  # individual agent policy instrument selection

        if 'A+Co' in self.PE_type:
            for coalition in self.schedule.agent_buffer(shuffled=True):
                if isinstance(coalition,
                              Coalition):  # selecting only active agents
                    # print('selected_PC', agent.selected_PC)
                    coalition.interactions_intra_coalition('PF')
                    # coalition.interactions('PF')

        # active agent interactions
        if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type:
            for agent in self.schedule.agent_buffer(shuffled=True):
                if isinstance(agent,
                              ActiveAgent):  # selecting only active agents
                    agent.interactions('PF', self.PK)

        # calculation of policy instruments preferences
        selected_PI_list = []
        number_PMs = 0
        for agent in self.schedule.agent_buffer(shuffled=False):
            if isinstance(
                    agent, ActiveAgent
            ) and agent.agent_type == 'policymaker':  # considering only policy makers
                agent.selection_S()
                agent.selection_PI(
                )  # individual agent policy instrument selection
                selected_PI_list.append(
                    agent.selected_PI
                )  # appending the policy instruments selected to a list for all PMs
                number_PMs += 1

        # finding the most common policy instrument and its frequency
        d = defaultdict(int)
        print(selected_PI_list)
        for i in selected_PI_list:
            d[i] += 1
        result = max(d.items(), key=lambda x: x[1])
        self.policy_implemented_number = result[0]
        policy_implemented_number_frequency = result[1]

        # check for the majority and implemented if satisfied
        if policy_implemented_number_frequency > int(number_PMs / 2):
            print("The policy selected is policy instrument ",
                  self.policy_implemented_number, ".")
            policy_implemented = self.policy_instruments[
                self.policy_implemented_number]
        else:  # if no majority
            print("No consensus on a policy instrument.")
            policy_implemented = self.policy_instruments[
                -1]  # selecting status quo policy instrument

        return policy_implemented

    def preference_update(self, agent, who, coalition_check=False):
        '''
		This function is used to call the preference update functions of the issues of the active agents.
		'''

        if coalition_check:
            who = self.number_activeagents

        self.preference_update_DC(agent,
                                  who)  # deep core issue preference update
        self.preference_update_PC(agent,
                                  who)  # policy core issue preference update
        self.preference_update_S(agent, who)  #

    def preference_update_DC(self, agent, who):
        """
		This function is used to update the preferences of the deep core issues of agents in their
		respective issue trees.

		agent - this is the owner of the issue tree
		who - this is the part of the issuetree that is considered - agent.unique_id should be used for this -
		this is done to also include partial knowledge preference calculation
		"""

        len_DC = self.len_DC

        # calculation of the denominator
        PC_denominator = 0
        for h in range(len_DC):
            issue_belief = agent.issuetree[who][h][0]
            issue_goal = agent.issuetree[who][h][1]
            gap = issue_goal - issue_belief
            if issue_goal is not None and issue_belief is not None:
                PC_denominator += abs(gap)

        # selection of the numerator and calculation of the preference
        for i in range(len_DC):
            issue_belief = agent.issuetree[who][i][0]
            issue_goal = agent.issuetree[who][i][1]
            gap = issue_goal - issue_belief
            if PC_denominator != 0:  # make sure the denominator is not 0
                agent.issuetree[who][i][2] = abs(gap) / PC_denominator
            else:
                agent.issuetree[who][i][2] = 0

    def preference_update_PC(self, agent, who):
        """
		This function is used to update the preferences of the policy core issues of agents in their
		respective issue trees.

		agent - this is the owner of the belief tree
		who - this is the part of the issuetree that is considered - agent.unique_id should be used for this -
		this is done to also include partial knowledge preference calculation
		"""

        len_DC = self.len_DC
        len_PC = self.len_PC
        len_S = self.len_S

        PC_denominator = 0
        # calculation of the denominator
        for j in range(
                len_PC):  # selecting the causal relations starting from PC

            for k in range(len_DC):
                cr = agent.issuetree[who][len_DC + len_PC + len_S + j +
                                          (k * len_PC)][0]
                issue_belief = agent.issuetree[who][k][0]
                issue_goal = agent.issuetree[who][k][1]
                gap = issue_goal - issue_belief
                if issue_goal is not None and issue_belief is not None and cr is not None \
                  and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)):
                    # contingency for partial knowledge issues and check if cr and belief-goal are same sign
                    PC_denominator = PC_denominator + abs(cr * gap)

        # addition of the gaps of the associated mid-level issues
        for i in range(len_PC):
            issue_belief = agent.issuetree[who][len_DC + i][0]
            issue_goal = agent.issuetree[who][len_DC + i][1]
            gap = issue_goal - issue_belief
            if issue_goal is not None and issue_belief is not None:  # contingency for partial knowledge issues
                PC_denominator += abs(gap)

        # calculation the numerator and the preference
        for j in range(len_PC):  # select one by one the PC

            # calculation of the right side of the numerator
            PC_numerator = 0
            for k in range(
                    len_DC):  # selecting the causal relations starting from DC
                issue_belief = agent.issuetree[who][k][0]
                issue_goal = agent.issuetree[who][k][1]
                cr = agent.issuetree[who][len_DC + len_PC + len_S + j +
                                          (k * len_PC)][0]
                gap = issue_goal - issue_belief
                if issue_goal is not None and issue_belief is not None and cr is not None \
                  and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)):
                    # contingency for partial knowledge issues and check if cr and belief-goal are same sign
                    PC_numerator += abs(cr * gap)

            # addition of the gap to the numerator
            issue_belief = agent.issuetree[who][len_DC + j][0]
            issue_goal = agent.issuetree[who][len_DC + j][1]
            gap = issue_goal - issue_belief
            if issue_goal is not None and issue_belief is not None:  # contingency for partial knowledge issues
                PC_numerator += abs(gap)

            # calculation of the preferences
            if PC_denominator != 0:
                agent.issuetree[who][len_DC + j][2] = round(
                    PC_numerator / PC_denominator, 3)
            else:
                agent.issuetree[who][len_DC + j][2] = 0

    def preference_update_S(self, agent, who):
        """
		This function is used to update the preferences of secondary issues the agents in their
		respective issue trees.

		agent - this is the owner of the belief tree
		who - this is the part of the issuetree that is considered - agent.unique_id should be used for this -
		this is done to also include partial knowledge preference calculation
		"""

        len_DC = self.len_DC
        len_PC = self.len_PC
        len_S = self.len_S

        S_denominator = 0
        # calculation of the denominator
        for j in range(len_S):

            for k in range(
                    len_PC):  # selecting the causal relations starting from S
                issue_belief = agent.issuetree[who][len_DC + k][0]
                issue_goal = agent.issuetree[who][len_DC + k][1]
                cr = agent.issuetree[who][len_DC + len_PC + len_S +
                                          len_DC * len_PC + j + (k * len_S)][0]
                gap = issue_goal - issue_belief
                if issue_goal is not None and issue_belief is not None and cr is not None \
                  and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)):
                    # contingency for partial knowledge issues and check if cr and belief-goal are same sign
                    S_denominator += abs(cr * gap)

        # addition of the gaps of the associated secondary issues
        for j in range(len_S):
            issue_belief = agent.issuetree[who][len_DC + len_PC + j][0]
            issue_goal = agent.issuetree[who][len_DC + len_PC + j][1]
            # print(issue_goal, type(issue_goal), type(issue_belief))
            gap = issue_goal - issue_belief
            if issue_goal is not None and issue_belief is not None:  # contingency for partial knowledge issues
                S_denominator += abs(gap)

        # calculation the numerator and the preference
        for j in range(len_S):  # select one by one the S

            # calculation of the right side of the numerator
            S_numerator = 0
            for k in range(
                    len_PC):  # selecting the causal relations starting from PC
                # Contingency for partial knowledge issues
                cr = agent.issuetree[who][len_DC + len_PC + len_S +
                                          len_DC * len_PC + j + (k * len_S)][0]
                issue_belief = agent.issuetree[who][len_DC + k][0]
                issue_goal = agent.issuetree[who][len_DC + k][1]
                gap = issue_goal - issue_belief
                if issue_goal is not None and issue_belief is not None and cr is not None \
                  and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)):
                    # contingency for partial knowledge issues and check if cr and gap are same sign
                    S_numerator += abs(cr * gap)

            # addition of the gap to the numerator
            issue_belief = agent.issuetree[who][len_DC + len_PC + j][0]
            issue_goal = agent.issuetree[who][len_DC + len_PC + j][1]
            gap = issue_goal - issue_belief
            if issue_goal is not None and issue_belief is not None:  # contingency for partial knowledge issues
                S_numerator += abs(gap)

            # calculation of the preferences
            if S_denominator != 0:
                agent.issuetree[who][len_DC + len_PC + j][2] = round(
                    S_numerator / S_denominator, 3)
            else:
                agent.issuetree[who][len_DC + len_PC + j][2] = 0

    def electorate_influence(self, w_el_influence):
        '''
		This function calls the influence actions in the electorate agent class.
		'''

        for agent in self.schedule.agent_buffer(shuffled=True):
            if isinstance(agent, ElectorateAgent):
                agent.electorate_influence(w_el_influence)

    def coalition_creation_algorithm(self):
        '''
		Function that is used to reset the coalitions at the beginning of each round
		A maximum of two coalitions are allowed. The agents have to be within a certain threshold of their goals to be
		assembled together.
		Note that the preferred states only are considered and not the actual beliefs of the actors - this could be a
		problem when considering the partial information case.

		:return:
		'''

        # resetting the coalitions before the creation of new ones
        for coalition in self.schedule.agent_buffer(shuffled=False):
            if isinstance(coalition, Coalition):
                self.schedule.remove(coalition)

        # saving the agents in a list with their belief values
        list_agents_1 = []  # active agent list
        for agent in self.schedule.agent_buffer(shuffled=False):
            if isinstance(agent, ActiveAgent):
                list_agents_1.append(
                    (agent,
                     agent.issuetree[agent.unique_id][self.len_DC +
                                                      self.PC_interest][1]))
        list_agents_1.sort(
            key=lambda x: x[1])  # sorting the list based on the goals

        # checking for groups for first coalition
        list_coalition_number = []
        for i in range(len(list_agents_1)):
            count = 0
            for j in range(len(list_agents_1)):
                if list_agents_1[i][
                        1] - self.coa_creation_thresh <= list_agents_1[j][
                            1] <= list_agents_1[i][
                                1] + self.coa_creation_thresh:
                    count += 1
            list_coalition_number.append(count)

        index = list_coalition_number.index(
            max(list_coalition_number
                ))  # finding the grouping with the most member index

        list_coalition_members = []
        list_agents_2 = copy.copy(list_agents_1)
        for i in range(len(list_agents_1)):
            if list_agents_1[index][
                    1] - self.coa_creation_thresh <= list_agents_1[i][
                        1] <= list_agents_1[index][
                            1] + self.coa_creation_thresh:
                list_coalition_members.append(list_agents_1[i][0])
                list_agents_2.remove(list_agents_1[i])

        self.coalition_creation(
            1001, list_coalition_members
        )  # creating the coalition with the selected members

        if len(list_agents_2) > 2:  #check if there are enough agents left:

            # checking for groups for second coalition
            list_coalition_number = []
            for i in range(len(list_agents_2)):
                count = 0
                for j in range(len(list_agents_2)):
                    if list_agents_2[i][
                            1] - self.coa_creation_thresh <= list_agents_2[j][
                                1] <= list_agents_2[i][
                                    1] + self.coa_creation_thresh:
                        count += 1
                list_coalition_number.append(count)
            index = list_coalition_number.index(
                max(list_coalition_number
                    ))  # finding the grouping with the most member index

            list_coalition_members = []
            for i in range(len(list_agents_2)):
                if list_agents_2[index][
                        1] - self.coa_creation_thresh <= list_agents_2[i][
                            1] <= list_agents_2[index][
                                1] + self.coa_creation_thresh:
                    list_coalition_members.append(list_agents_2[i][0])

            self.coalition_creation(
                1002, list_coalition_members
            )  # creating the coalition with selected members

    def coalition_creation(self, unique_id, members):
        '''
		Function that is used to create the object Coalition which is a sub-agent of the ActiveAgent class
		:param unique_id:
		:param members:
		:return:
		'''

        x = 0
        y = 0
        resources = 0  # resources are reset to 0
        len_DC = self.len_DC
        len_PC = self.len_PC
        len_S = self.len_S
        len_CR = self.len_CR
        len_PF = self.len_PC
        len_ins = self.len_ins

        issuetree_coal = [None]  # creation of the issue tree
        issuetree_coal[0] = issuetree_creation(
            len_DC, len_PC, len_S, len_CR)  # using the newly made function
        for r in range(
                self.number_activeagents
        ):  # last spot is where the coalition beliefs are stored
            issuetree_coal.append(
                issuetree_creation(len_DC, len_PC, len_S, len_CR))

        policytree_coal = [None]  # creation of the policy tree
        policytree_coal[0] = members[0].policytree[members[0].unique_id]
        for r in range(self.number_activeagents):
            policytree_coal.append(members[0].policytree[members[0].unique_id])
        # note that the policy tree is simply copied ... this will not work in the case of partial information where a different
        # algorithm will need to be found for this part of the model

        # creation of the coalition agent
        agent = Coalition((x, y), unique_id, self, 'coalition', resources, 'X',
                          issuetree_coal, policytree_coal, members)
        self.coalition_belief_update(agent, members)
        self.preference_update(agent, unique_id,
                               True)  # updating the issue tree preferences
        self.grid.position_agent(agent, (x, y))
        self.schedule.add(agent)

    def coalition_belief_update(self, coalition, members):
        '''
		Function that is used to update the beliefs of the coalition to an average of the agents members of this said
		coalition.
		:param coalition:
		:param members:
		:return:
		'''

        len_DC = self.len_DC
        len_PC = self.len_PC
        len_S = self.len_S
        len_CR = self.len_CR

        for k in range(
                len_DC + len_PC +
                len_S):  # updating the preferred states and actual beliefs
            belief = 0
            goal = 0
            for agent_mem in members:
                id = agent_mem.unique_id
                belief += agent_mem.issuetree[id][k][0]
                goal += agent_mem.issuetree[id][k][1]
            coalition.issuetree[
                self.number_activeagents][k][0] = belief / len(members)
            coalition.issuetree[
                self.number_activeagents][k][1] = goal / len(members)

        for k in range(len_CR):  # updating the causal relations
            CR = 0
            for agent_mem in members:
                id = agent_mem.unique_id
                CR += agent_mem.issuetree[id][len_DC + len_PC + len_S + k][0]
            coalition.issuetree[self.number_activeagents][
                len_DC + len_PC + len_S + k][0] = CR / len(members)

        if self.PK:  # for the partial knowledge
            for agent in self.schedule.agent_buffer(shuffled=False):
                if agent not in members and isinstance(
                        agent,
                        ActiveAgent) and not isinstance(agent, Coalition):
                    id = agent.unique_id
                    for k in range(len_DC + len_PC +
                                   len_S):  # updating the preferred states
                        goal = 0
                        for agent_mem in members:
                            goal += agent_mem.issuetree[id][k][1]
                        coalition.issuetree[id][k][1] = goal / len(members)

                    for k in range(len_CR):  # updating the causal relations
                        CR = 0
                        for agent_mem in members:
                            CR += agent_mem.issuetree[id][len_DC + len_PC +
                                                          len_S + k][0]
                        coalition.issuetree[id][len_DC + len_PC + len_S +
                                                k][0] = CR / len(members)
Example #3
0
class LabourMarket_quotas(Model):
    def __init__(self, N, width, height, M=0.4):
        self.num_agents = N
        self.total_agents_count = N
        self.gap_perceived = 0
        self.alpha = 0.6
        self.p_initial = [0.1, 0.73, 0.12, 0.05]
        self.num_jobs = int(N * M)
        self.grid = MultiGrid(width, height, True)
        self.schedule = RandomActivation(self)

        # Create agents
        for i in range(self.num_agents):
            level = np.random.choice([0, 1, 2, 3], p=self.p_initial)
            education = max(0, np.random.randn(1)[0] + level + 1)
            age = max(23, np.random.randn(1)[0] * 12 + 44)
            total_tenure = max(0, np.random.randn(1)[0] * 10 + 25)

            a = Worker_quotas(i, self, level, age, education, total_tenure)
            self.schedule.add(a)

            if a.gender == 'M':
                a.level = np.random.choice([0, 1, 2, 3],
                                           p=[0.09, 0.71, 0.14, 0.06])
            elif a.gender == 'F':
                a.level = np.random.choice([0, 1, 2, 3],
                                           p=[0.12, 0.76, 0.10, 0.02])

            x = self.random.randrange(self.grid.width)
            y = self.random.randrange(self.grid.height)
            self.grid.place_agent(a, (x, y))

        self.datacollector = DataCollector(
            model_reporters={
                'aspiration_M': average_aspiration_M,
                'aspiration_F': average_aspiration_F,
                'level_M': average_level_M,
                'level_F': average_level_F,
                'agents_M': average_agents_M,
                'agents_F': average_agents_F,
                'tenure_M': average_tenure_M,
                'tenure_F': average_tenure_F,
                'age_M': average_age_M,
                'age_F': average_age_F,
                'skills_M': average_skills_M,
                'skills_F': average_skills_F,
                'average_value': average_value,
                'average_value_M': average_value_M,
                'average_value_F': average_value_F,
                'male_level_distribution': male_level_distribution,
                'female_level_distribution': female_level_distribution,
                'male_skill_distribution': male_skill_distribution,
                'female_skill_distribution': female_skill_distribution,
                'male_value_distribution': male_value_distribution,
                'female_value_distribution': female_value_distribution
            })

    def update_gap_perceived(self):
        self.gap_perceived = average_level_M(self) - average_level_F(self)

    def add_agents(self):
        new_entry = len([agent for agent in self.schedule.agents if agent.trials >= 4 \
                    or (agent.total_tenure + agent.age) >= 100 \
                    or agent.age > 60 \
                    or agent.total_tenure > 40])

        if new_entry > 0:
            for i in range(new_entry):
                self.total_agents_count += 1
                a = Worker_quotas(self.total_agents_count, self)
                self.schedule.add(a)

                x = self.random.randrange(self.grid.width)
                y = self.random.randrange(self.grid.height)
                self.grid.place_agent(a, (x, y))
                self.num_agents += 1

    def remove_agents(self):
        old_agents=[agent for agent in self.schedule.agents if agent.trials >= 4 \
                    or (agent.total_tenure + agent.age) >= 100 \
                    or agent.age > 60 \
                    or agent.total_tenure > 40]

        if len(old_agents) > 0:
            for i in old_agents:
                self.schedule.remove(i)
                self.num_agents -= 1

    def step(self):
        self.add_agents()
        self.remove_agents()
        self.jobschedule = RandomActivation(self)
        for i in range(self.num_jobs):
            j = JobOffer_quotas(i, self)
            self.jobschedule.add(j)
        self.update_gap_perceived()
        self.schedule.step()
        self.jobschedule.step()
        self.datacollector.collect(self)
Example #4
0
class GTModel(Model):
    def __init__(self, debug, size, i_n_agents, i_strategy, i_energy,
                 child_location, movement, k, T, M, p, d, strategies_to_count,
                 count_tolerance, mutation_type, death_threshold, n_groups):
        self.grid = SingleGrid(size, size, torus=True)
        self.schedule = RandomActivation(self)
        self.running = True
        self.debug = debug
        self.size = size
        self.agent_idx = 0
        self.i_energy = i_energy

        # Payoff matrix in the form (my_move, op_move) : my_reward
        self.payoff = {
            ('C', 'C'): 2,
            ('C', 'D'): -3,
            ('D', 'C'): 3,
            ('D', 'D'): -1,
        }
        # Constant for max population control (cost of surviving)
        self.k = k
        # Constant for controlling dying of old age
        self.M = M
        # Minimum lifespan
        self.T = T
        # Minimum energy level to reproduce
        self.p = p
        # Mutation "amplitude"
        self.d = d
        # Whether to spawn children near parents or randomly
        self.child_location = child_location
        # Specify the type of movement allowed for the agents
        self.movement = movement
        # Specify how the agents mutate
        self.mutation_type = mutation_type
        # The minimum total_energy needed for an agent to survive
        self.death_threshold = death_threshold

        # Vars regarding which strategies to look for
        self.strategies_to_count = strategies_to_count
        self.count_tolerance = count_tolerance

        # Add agents (one agent per cell)
        all_coords = [(x, y) for x in range(size) for y in range(size)]
        agent_coords = self.random.sample(all_coords, i_n_agents)

        for _ in range(i_n_agents):
            group_idx = (None if n_groups is None else self.random.choice(
                range(n_groups)))
            agent = GTAgent(self.agent_idx, group_idx, self, i_strategy.copy(),
                            i_energy)
            self.agent_idx += 1
            self.schedule.add(agent)
            self.grid.place_agent(agent, agent_coords.pop())

        # Collect data
        self.datacollector = DataCollector(
            model_reporters={
                **{
                    'strategies': get_strategies,
                    'n_agents': total_n_agents,
                    'avg_agent_age': avg_agent_age,
                    'n_friendlier': n_friendlier,
                    'n_aggressive': n_aggressive,
                    'perc_cooperative_actions': perc_cooperative_actions,
                    'n_neighbors': n_neighbor_measure,
                    'avg_delta_energy': avg_delta_energy,
                    'perc_CC': perc_CC_interactions,
                    'lin_fit_NC': coop_per_neig,
                    'lin_fit_NC_intc': coop_per_neig_intc,
                },
                **{
                    label: strategy_counter_factory(strategy, count_tolerance)
                    for label, strategy in strategies_to_count.items()
                }
            })

    def alpha(self):
        # Return the cost of surviving, alpha
        DC = self.payoff[('D', 'C')]
        CC = self.payoff[('C', 'C')]
        N = len(self.schedule.agents)

        return self.k + 4 * (DC + CC) * N / (self.size * self.size)

    def time_to_die(self, agent):
        # There is a chance every iteration to die of old age: (A - T) / M
        # There is a 100% to die if the agents total energy reaches 0
        return (agent.total_energy < self.death_threshold
                or self.random.random() < (agent.age - self.T) / self.M)

    def get_child_location(self, agent):
        if self.child_location == 'global':
            return self.random.choice(sorted(self.grid.empties))

        elif self.child_location == 'local':
            # Iterate over the radius, starting at 1 to find empty cells
            for rad in range(1, int(self.size / 2)):
                possible_steps = [
                    cell for cell in self.grid.get_neighborhood(
                        agent.pos,
                        moore=False,
                        include_center=False,
                        radius=rad,
                    ) if self.grid.is_cell_empty(cell)
                ]

                if possible_steps:
                    return self.random.choice(possible_steps)

            # If no free cells in radius size/2 pick a random empty cell
            return self.random.choice(sorted(self.grid.empties))

    def maybe_mutate(self, agent):
        # Mutate by adding a random d to individual Pi's
        if self.mutation_type == 'stochastic':
            # Copy the damn list
            new_strategy = agent.strategy.copy()
            # There is a 20% chance of mutation
            if self.random.random() < 0.2:
                # Each Pi is mutated uniformly by [-d, d]
                for i in range(4):
                    mutation = self.random.uniform(-self.d, self.d)
                    new_val = new_strategy[i] + mutation
                    # Keep probabilities in [0, 1]
                    new_val = (0 if new_val < 0 else
                               1 if new_val > 1 else new_val)
                    new_strategy[i] = new_val

        # Mutate by choosing a random strategy from the list set
        elif self.mutation_type == 'fixed':
            new_strategy = random.choice(
                list(self.strategies_to_count.values()))

        elif self.mutation_type == 'gaussian_sentimental':
            # Copy the damn list
            new_strategy = agent.strategy.copy()
            # There is a 20% chance of mutation
            if self.random.random() < 0.2:
                # Each Pi is mutated by a value drawn from a gaussian
                # with mean=delta_energy
                for i in range(4):
                    mutation = self.random.normalvariate(
                        (agent.delta_energy + self.alpha()) / 14, self.d)
                    new_val = new_strategy[i] + mutation
                    # Keep probabilities in [0, 1]
                    new_val = (0 if new_val < 0 else
                               1 if new_val > 1 else new_val)
                    new_strategy[i] = new_val

        return new_strategy

    def maybe_reproduce(self, agent):
        # If we have the energy to reproduce, do so
        if agent.total_energy >= self.p:
            # Create the child
            new_strategy = self.maybe_mutate(agent)
            child = GTAgent(self.agent_idx, agent.group_id, self, new_strategy,
                            self.i_energy)
            self.agent_idx += 1

            # Set parent and child energy levels to p/2
            child.total_energy = self.p / 2
            agent.total_energy = self.p / 2

            # Place child (Remove agent argument for global child placement)
            self.schedule.add(child)
            self.grid.place_agent(child, self.get_child_location(agent))

    def step(self):
        if self.debug:
            print('\n\n==================================================')
            print('==================================================')
            print('==================================================')
            pprint(vars(self))

        # First collect data
        self.datacollector.collect(self)

        # Then check for dead agents and for new agents
        for agent in self.schedule.agent_buffer(shuffled=True):
            # First check if dead
            if self.time_to_die(agent):
                self.grid.remove_agent(agent)
                self.schedule.remove(agent)

            # Otherwise check if can reproduce
            else:
                self.maybe_reproduce(agent)

        # Finally, step each agent
        self.schedule.step()

    def check_strategy(self, agent):
        # Helper function to check which strategy an agent would count as
        def is_same(strategy, a_strategy):
            tol = self.count_tolerance
            return all(strategy[i] - tol < a_strategy[i] < strategy[i] + tol
                       for i in range(4))

        return [
            name for name, strat in self.strategies_to_count.items()
            if is_same(strat, agent.strategy)
        ]
Example #5
0
class Model(Model):
    def __init__(self, N, B, T, Treg, width, height):
        self.num_myelin = N * 8
        self.num_agents = N + B + T + Treg + self.num_myelin
        self.num_neurons = N
        self.num_myelin = 4
        self.num_limfocytB = B
        self.num_active_B = 0
        self.num_infected_B = 0
        self.num_limfocytT = T
        self.num_activeT = 0
        self.num_limfocytTreg = Treg
        self.grid = MultiGrid(width, height, True)
        self.schedule = RandomActivation(self)
        self.available_ids = set()
        self.dead_agents = set()
        self.new_agents = set()
        self.max_id = 0
        self.step_count = 1
        self.cytokina = 0
        self.cytokina_prev = 0
        self.sum = 0
        self.B = 0
        self.a = 0.80
        self.Ymax = 100
        open('new_and_dead.txt', 'w').close()
        # Create agents
        neuron_positions = [[3, 3], [3, 10], [3, 20], [3, 27], [10, 3],
                            [10, 10], [10, 20], [10, 27], [19, 3], [19, 10],
                            [19, 20], [19, 27], [26, 3], [26, 10], [26, 20],
                            [26, 27], [14, 15]]
        for i in range(self.num_neurons):
            a = Neuron(i, self, "Neuron")
            self.schedule.add(a)
            #Add agent to a random grid cell
            #x=self.random.randrange(self.grid.width)
            #y=self.random.randrange(self.grid.height)
            #self.grid.place_agent(a,(x,y))
            pos = neuron_positions[i]
            self.grid.place_agent(a, (pos[0], pos[1]))
            cells = self.grid.get_neighborhood(a.pos, True, False, 1)
            id = self.num_agents - i * 8
            for cell in cells:
                m = Myelin(id, self, "Myelin")
                self.schedule.add(m)
                self.grid.place_agent(m, cell)
                id -= 1

        #dodawanie różnych typów agentów zgodnie z ich liczbą podaną przy inicjacji modelu
        for i in range(self.num_limfocytB):
            a = LimfocytB(i + self.num_neurons, self, "LimfocytB")
            self.schedule.add(a)
            #Add agent to a random grid cell
            x = self.random.randrange(self.grid.width)
            y = self.random.randrange(self.grid.height)
            self.grid.place_agent(a, (x, y))
        for i in range(self.num_limfocytT):
            a = LimfocytT(i + N + B, self, "LimfocytT")
            self.schedule.add(a)
            #Add agent to a random grid cell
            x = self.random.randrange(self.grid.width)
            y = self.random.randrange(self.grid.height)
            self.grid.place_agent(a, (x, y))
        for i in range(self.num_limfocytTreg):
            a = LimfocytTreg(i + N + B + T, self, "LimfocytTreg")
            self.schedule.add(a)
            #Add agent to a random grid cell
            x = self.random.randrange(self.grid.width)
            y = self.random.randrange(self.grid.height)
            self.grid.place_agent(a, (x, y))

        self.max_id = self.num_agents - 1

        self.datacollector_population = DataCollector(
            model_reporters={"Populacja": compute_population})
        self.datacollector_T_population = DataCollector(
            model_reporters={"Populacja Limfocytów T": T_population})
        #self.datacollector_T_precentage=DataCollector(
        #model_reporters={"Precentage Limfocyt T": T_popualtion_precentage})
        self.datacollector_B_population = DataCollector(
            model_reporters={"Populacja Limfocytów B": B_population})
        self.datacollector_Treg_population = DataCollector(
            model_reporters={"Populacja Limfocytów Treg": Treg_population})
        self.datacollector_B_active_population = DataCollector(
            model_reporters={
                "Populacja Aktywnych Limfocytów B": B_activated_population
            })
        self.datacollector_T_active_population = DataCollector(
            model_reporters={
                "Populacja Aktywnych Limfocytów T": T_activated_population
            })

        self.datacollector_B_infected_population = DataCollector(
            model_reporters={
                "Populacja Zainfekowanych Limfocytów B": B_infected_population
            })

        self.datacollector_myelin_population = DataCollector(
            model_reporters={
                "Populacja osłonek mielinowych": myelin_population
            })

        self.datacollector_myelin_healths = DataCollector(
            model_reporters={
                "Suma punktów życia osłonek mielinowych": myelin_healths
            })

    def step(self):
        #print("self running: "+str(self.running()))

        self.schedule.step()
        self.datacollector_population.collect(self)
        self.datacollector_T_population.collect(self)
        #self.datacollector_T_precentage.collect(self)
        self.datacollector_B_population.collect(self)
        self.datacollector_Treg_population.collect(self)
        self.datacollector_B_active_population.collect(self)
        self.datacollector_T_active_population.collect(self)
        self.datacollector_B_infected_population.collect(self)
        self.datacollector_myelin_population.collect(self)
        self.datacollector_myelin_healths.collect(self)
        self.adding_removing()

        self.datacollector_myelin_healths.get_model_vars_dataframe().to_csv(
            r'Data/myelin_healths25.txt', sep=' ', mode='w')
        self.datacollector_T_population.get_model_vars_dataframe().to_csv(
            r'Data/T_population25.txt', sep=' ', mode='w')
        self.datacollector_B_population.get_model_vars_dataframe().to_csv(
            r'Data/B_population25.txt', sep=' ', mode='w')
        self.datacollector_Treg_population.get_model_vars_dataframe().to_csv(
            r'Data/Treg_population25.txt', sep=' ', mode='w')
        self.datacollector_B_active_population.get_model_vars_dataframe(
        ).to_csv(r'Data/B_active_population25.txt', sep=' ', mode='w')
        self.datacollector_T_active_population.get_model_vars_dataframe(
        ).to_csv(r'Data/T_active_population25.txt', sep=' ', mode='w')
        self.datacollector_B_infected_population.get_model_vars_dataframe(
        ).to_csv(r'Data/B_infected_population25.txt', sep=' ', mode='w')

        print("Liczba agentów: " + str(self.num_agents))
        print("MaxID: " + str(self.max_id))

        self.cytokina = max(
            min((self.B + self.cytokina_prev), self.Ymax) * self.a, 0)

        print("Cytokina " + str(self.cytokina))
        print("Cytokina_prev " + str(self.cytokina_prev))

        f = open("agents.txt", 'a')
        f.write("======Step : " + str(self.step_count) + "\n")
        for agent in self.schedule.agents:
            f.write("Agent: " + str(agent.type) + " " + str(agent.unique_id) +
                    str(agent.pos) + "\n")

        f.close()
        self.cytokina_prev = self.cytokina
        self.B = 0

    def running(self):
        self.step()

    def adding_removing(
            self):  #funckja odpowiedzialna za dodawanie i usuwanie agentów
        #print("AddingRemoving")
        f = open("new_and_dead.txt", 'a')
        f.write("Step " + str(self.step_count) + "\n")
        f.write("======Dead agents======: " + "\n")
        for d in self.dead_agents:
            try:
                self.schedule.remove(d)
                self.num_agents -= 1
                self.available_ids.add(d.unique_id)
            except KeyError:
                continue
            try:
                self.grid._remove_agent(d.pos, d)
            except KeyError:
                continue
            f.write(str(d.unique_id) + " " + d.type + "\n")
            #if d.type=="AktywowanyLimfocytT":
            #    self.cytokina-=1
        self.dead_agents.clear()
        f.write("======New Agents=====: " + "\n")
        for n in self.new_agents:
            self.schedule.add(n)
            self.num_agents += 1
            self.grid.place_agent(n, n.pos)
            if n.unique_id in self.available_ids:
                self.available_ids.remove(n.unique_id)
            f.write(str(n.unique_id) + " " + n.type + "\n")
            #if n.type=="AktywowanyLimfocytT":
            #   self.cytokina+=1
        self.new_agents.clear()
        m = 1
        n = 0
        for agent in self.schedule.agents:
            if agent.unique_id > m:
                m = agent.unique_id
            if (agent.type == "LimfocytT") or (agent.type
                                               == "AktywowanyLimfocytT"):
                n += 1
        self.max_id = m
        self.num_limfocytT = 0
        self.deficiencies()
        f.close()

    def deficiencies(self):
        n = B_population(self)
        if n == 0:
            if len(self.available_ids) == 0:
                self.max_id += 1
                id = self.max_id
            else:
                id = min(self.available_ids)
                self.available_ids.remove(id)
            agent = LimfocytB(id, self, "LimfocytB")
            self.schedule.add(agent)
            x = self.random.randrange(self.grid.width)
            y = self.random.randrange(self.grid.height)
            self.grid.place_agent(agent, (x, y))
            self.num_agents += 1
        n = T_population(self)
        if n == 0:
            for i in range(10):
                if len(self.available_ids) == 0:
                    self.max_id += 1
                    id = self.max_id
                else:
                    id = min(self.available_ids)
                    self.available_ids.remove(id)
                agent = LimfocytB(id, self, "LimfocytT")
                self.schedule.add(agent)
                x = self.random.randrange(self.grid.width)
                y = self.random.randrange(self.grid.height)
                self.grid.place_agent(agent, (x, y))
                self.num_agents += 1
        n = Treg_population(self)
        if n == 0:
            if len(self.available_ids) == 0:
                self.max_id += 1
                id = self.max_id
            else:
                id = min(self.available_ids)
                self.available_ids.remove(id)
            agent = LimfocytB(id, self, "LimfocytTreg")
            self.schedule.add(agent)
            x = self.random.randrange(self.grid.width)
            y = self.random.randrange(self.grid.height)
            self.grid.place_agent(agent, (x, y))
            self.num_agents += 1
Example #6
0
class PJIAModel(Model):
    """A model with some number of agents."""
    def __init__(
            self,
            # Define canvas
            width=1000,
            height=1000 * 480. / 1100,
            # Define agents
            n_offloaders=1,
            offloaders_speed=5,
            coordinator_memory=2,
            equipment_speed=10,
            # Define actions of agents
            arrival_window=30,
            interaction_aircraft_offloader=10,
            interaction_coordinator_offloader=5,
            interaction_aircraft_coordinator=5,
            # Define positions [% of canvas]
            offloaders_position=[0.1, 0.2],
            coordinator_position=[0.05, 0.5],
            equipment_position=[0.2, 0.1],
            terminal_building_pos=[0.25, 0.21]):

        # Define canvas
        self.space = ContinuousSpace(width, height, False)
        self.airport_coordinates = airport_coordinates

        # Define aircraft agents
        self.aircraft_schedule = acSchema
        self.n_aircraft = len(self.aircraft_schedule.Arrival)
        self.total_amount_cargo = sum(self.aircraft_schedule.Cargo)

        # Define coordinator agents
        self.coordinator_position = coordinator_position
        self.coordinator_memory = coordinator_memory

        # Define the offloaders agents
        self.n_offloaders = n_offloaders
        self.offloaders_position = offloaders_position
        self.offloaders_speed = offloaders_speed

        # Define Equipment objects:
        self.equipment_position = equipment_position
        self.equipment_speed = equipment_speed

        # Define Cargo objects:
        self.cargo_number = 0  # will be used for later agents/objects
        #self.terminal_building_pos = terminal_building_pos
        self.terminal_building_pos = [
            terminal_building_pos[0] * self.space.x_max,
            terminal_building_pos[1] * self.space.y_max
        ]

        # Define interactions:
        self.interaction_aircraft_offloader = interaction_aircraft_offloader
        self.interaction_coordinator_offloader = interaction_coordinator_offloader
        self.interaction_aircraft_coordinator = interaction_aircraft_coordinator

        # =============================================================================
        #         voor taxiing
        # =============================================================================
        # Copy the the table from excel
        self.taxi_coordinates_excel = pd.DataFrame.copy(airport_coordinates,
                                                        deep=True)
        # Multiply the coordinates in excel (which are in percentage) by the width and height of the grid
        self.taxi_coordinates_excel['X_pos'] *= self.space.x_max
        self.taxi_coordinates_excel['Y_pos'] *= self.space.y_max

        # make array with only coordinates
        self.taxi_coordinates = np.array([[
            self.taxi_coordinates_excel['X_pos'][0],
            self.taxi_coordinates_excel['Y_pos'][0]
        ]])

        for i in range(1, len(self.taxi_coordinates_excel)):
            self.taxi_coordinates = np.append(self.taxi_coordinates, [[
                self.taxi_coordinates_excel['X_pos'][i],
                self.taxi_coordinates_excel['Y_pos'][i]
            ]],
                                              axis=0)

        # make array with only taxi nodes coordinates:
        self.CP_coordinates = self.taxi_coordinates[13:]

        # Civilian Parking spots, name and occupation
        #self.CP_spots = {'CP1' : 'Free', 'CP2': 'Free', 'CP3': 'Free', 'CP4': 'Free', 'CP5': 'Free', 'CP6': 'Free', 'CP7': 'Free', 'CP8': 'Free', 'CP9': 'Free', 'CP10': 'Free'}
        #self.CP_spots_occupation = ['Free','Free', 'Free', 'Free', 'Free', 'Free','Free', 'Free','Free']
        self.CP_spots_occupation = [
            'Free', 'Free', 'Occupied', 'Free', 'Occupied', 'Free', 'Free',
            'Free', 'Free'
        ]
        self.CP_spots_name = [
            'CP1', 'CP2', 'CP3', 'CP4', 'CP5', 'CP6', 'CP7', 'CP8', 'CP9'
        ]

        # Determine all the routes from start point to parking
        self.route_S1_CP14 = np.array([self.taxi_coordinates[0]])
        self.route_S2_CP14 = np.array(
            [self.taxi_coordinates[1], self.taxi_coordinates[0]])

        self.route_S1_CP5 = np.array(
            [self.taxi_coordinates[0], self.taxi_coordinates[1]])
        self.route_S2_CP5 = np.array([self.taxi_coordinates[1]])

        self.route_S1_CP6 = np.array([
            self.taxi_coordinates[0], self.taxi_coordinates[1],
            self.taxi_coordinates[2]
        ])
        self.route_S2_CP6 = np.array(
            [self.taxi_coordinates[1], self.taxi_coordinates[2]])

        self.route_S1_CP7 = np.array([
            self.taxi_coordinates[0], self.taxi_coordinates[1],
            self.taxi_coordinates[2], self.taxi_coordinates[3]
        ])
        self.route_S2_CP7 = np.array([
            self.taxi_coordinates[1], self.taxi_coordinates[2],
            self.taxi_coordinates[3]
        ])

        self.route_S1_CP89 = np.array([
            self.taxi_coordinates[0], self.taxi_coordinates[1],
            self.taxi_coordinates[2], self.taxi_coordinates[5]
        ])
        self.route_S2_CP89 = np.array([
            self.taxi_coordinates[1], self.taxi_coordinates[2],
            self.taxi_coordinates[5]
        ])

        # Determine routes from parking to exit
        self.route_CP13_E1 = np.array([
            self.taxi_coordinates[4], self.taxi_coordinates[7],
            self.taxi_coordinates[6], self.taxi_coordinates[11]
        ])

        self.route_CP4_E2 = np.array([
            self.taxi_coordinates[1], self.taxi_coordinates[2],
            self.taxi_coordinates[3], self.taxi_coordinates[12]
        ])

        self.route_CP5_E2 = np.array([
            self.taxi_coordinates[2], self.taxi_coordinates[3],
            self.taxi_coordinates[12]
        ])

        self.route_CP67_E2 = np.array(
            [self.taxi_coordinates[3], self.taxi_coordinates[12]])

        self.route_CP89_E2 = np.array([
            self.taxi_coordinates[5], self.taxi_coordinates[2],
            self.taxi_coordinates[3], self.taxi_coordinates[12]
        ])
        # =============================================================================

        # Define the running
        # Stop when all aircraft have exited
        self.exited_aircraft = 0
        self.schedule = RandomActivation(self)
        # Make all agents
        self.make_coordinator()
        self.make_offloader()
        self.make_aircraft()
        self.make_equipment()
        self.make_cargo()

        # Start running
        self.running = True
        self.start_time = 0
        self.exit_step = 1000

    # =========================================================================
    #  Create all aircraft, the aircraft are not all initialized at the same time,
    #  but within an arrival window.
    # =========================================================================

    def make_aircraft(self):

        # =============================================================================
        #             # Starting position
        #             pos = np.array((aircraft_schedule.Origin_X[i]* self.space.x_max, aircraft_schedule.Origin_Y[i] * self.space.y_max))
        #             # Position of parking spot
        #             parking_pos = np.array((aircraft_schedule.Parking_X[i] * self.space.x_max, aircraft_schedule.Parking_Y[i] * self.space.y_max))
        # =============================================================================

        for i in range(self.n_aircraft):
            # Look for the correct position of the starting point
            for x in range(len(self.airport_coordinates)):
                if self.aircraft_schedule.Start_Name[
                        i] == self.airport_coordinates.Name[x]:
                    Start_X = airport_coordinates.X_pos[x]
                    Start_Y = airport_coordinates.Y_pos[x]

            ## Get the aircraft data and schedule from the excel file 'aircraft_schedule'
            # Starting pos
            pos = np.array(
                (Start_X * self.space.x_max, Start_Y * self.space.y_max))
            # # Position of exit
            # exit_pos = np.array((self.aircraft_schedule.Exit_X[i] * self.space.x_max, self.aircraft_schedule.Exit_Y[i] * self.space.y_max))
            # Time the aircraft 'lands'
            arrival_time = self.aircraft_schedule.Arrival[i]
            # Speed of the aircraft
            speed = self.aircraft_schedule.Speed[i]
            # Amount of cargo in the aircraft
            n_cargo = self.aircraft_schedule.Cargo[i]
            # The position/ID in the schedule
            schedule_ID = self.aircraft_schedule.Aircraft_ID[i]

            print('I am aircraft', schedule_ID, 'my starting pos is:', pos)
            aircraft = Aircraft(
                i,
                self,
                pos,
                #parking_pos,
                arrival_time,
                speed,
                n_cargo,
                schedule_ID)

            self.space.place_agent(aircraft, pos)
            self.schedule.add(aircraft)

    # =========================================================================
    #  Create all offloaders
    # =========================================================================

    def make_offloader(self):

        for i in range(self.n_offloaders):
            # Starting position is the waiting position
            waiting_pos = np.array(
                (self.offloaders_position[0] * self.space.x_max,
                 self.offloaders_position[1] * self.space.y_max))
            pos = waiting_pos
            speed = self.offloaders_speed

            print('I am an offloader and my starting pos is:', pos)
            offloader = OffloadingAgent(i + self.n_aircraft, self, pos,
                                        waiting_pos, speed)

            self.space.place_agent(offloader, pos)
            self.schedule.add(offloader)

    # =========================================================================
    #  Create coordinator
    # =========================================================================

    def make_coordinator(self):

        # for i in range(self.n_offloaders):
        # Starting position is the waiting position
        waiting_pos = np.array(
            (self.coordinator_position[0] * self.space.x_max,
             self.coordinator_position[1] * self.space.y_max))
        pos = waiting_pos
        speed = self.offloaders_speed

        print('I am a coordinator and my starting pos is:', pos)
        coordinator = CoordinatingAgent(
            1 + self.n_aircraft + self.n_offloaders,
            self,
            pos,
            waiting_pos,
            speed,
            coordinator_memory=self.coordinator_memory)

        self.space.place_agent(coordinator, pos)
        self.schedule.add(coordinator)

    # =========================================================================
    #  Create equipment for offloading
    # =========================================================================

    def make_equipment(self):

        # for i in range(self.n_offloaders):
        # Starting position is the waiting position
        parking_pos = np.array((self.equipment_position[0] * self.space.x_max,
                                self.equipment_position[1] * self.space.y_max))
        pos = parking_pos
        speed = self.equipment_speed

        equipment = Equipment(1 + self.n_aircraft + self.n_offloaders + 1,
                              self, pos, parking_pos, speed)

        self.space.place_agent(equipment, pos)
        self.schedule.add(equipment)

    # =========================================================================
    #  Create cargo
    # =========================================================================

    def make_cargo(self):
        cargo_number = 0
        #terminal_building_pos = self.terminal_building_pos

        for i in range(self.n_aircraft):
            n_cargo = self.aircraft_schedule.Cargo[i]

            # Look for the correct position of the starting point
            for x in range(len(self.airport_coordinates)):
                if self.aircraft_schedule.Start_Name[
                        i] == self.airport_coordinates.Name[x]:
                    Start_X = airport_coordinates.X_pos[x]
                    Start_Y = airport_coordinates.Y_pos[x]
                    break

            for j in range(n_cargo):
                ## Starting pos
                pos = np.array(
                    (Start_X * self.space.x_max, Start_Y * self.space.y_max))
                # The position/ID in the schedule
                schedule_ID = self.aircraft_schedule.Aircraft_ID[i]
                # Cargo_number for the ID when creating a cargo agent
                cargo_number += 1
                #previous_cargo_number = cargo_number
                cargo = Cargo(
                    cargo_number + self.n_aircraft + self.n_offloaders + 1 +
                    1,  # 1xCoordinator and 1x Equipment
                    self,
                    pos,
                    schedule_ID)

                self.space.place_agent(cargo, pos)
                self.schedule.add(cargo)
        self.cargo_number = cargo_number
        print('cargo number', cargo_number)

# =============================================================================
#
# # =============================================================================
# # dummy
# # =============================================================================
#     def make_dummy(self):
#         pos = np.array((0.03*self.space.x_max,0.03*self.space.y_max))
#         speed = 5
#         destinations = np.array([[self.space.x_max-0.0001,0.03*self.space.y_max], [self.space.x_max*0.03,self.space.y_max-0.0001]])
#         print('destinations dummies', destinations)
#         for i in range(2):
#             destination = destinations[i]
#             dummy_name = i+1
#             print('I am dummy:', dummy_name)
#             print('my destination is:', destination)
#             print('I am agent number:', dummy_name + self.cargo_number + self.n_aircraft + self.n_offloaders + 1 + 1)
#             dummy = Dummy(
#                         dummy_name + self.cargo_number + self.n_aircraft + self.n_offloaders + 1 + 1, # 1xCoordinator and 1x Equipment
#                         self,
#                         pos,
#                         speed,
#                         destination,
#                         dummy_name
#                         )
#
#             self.space.place_agent(dummy, pos)
#             self.schedule.add(dummy)
# =============================================================================
# =========================================================================
# Define what happens in the model in each step.
# =========================================================================

    def step(self):
        #all_arrived = True
        if self.schedule.steps == 0:
            self.start_time = time.time()

        if self.schedule.steps == self.exit_step:
            self.running = False
        elif self.n_aircraft == self.exited_aircraft:
            if self.exit_step == 1000:
                self.exit_step = self.schedule.steps + 50
                print("--- % s seconds ---" %
                      round(time.time() - self.start_time, 2))

        self.schedule.step()
        for agent in self.schedule.agents:
            if type(agent) == Aircraft:
                if agent.aircraft_state == 'Gone':
                    self.space.remove_agent(agent)
                    self.schedule.remove(agent)
Example #7
0
class VirusModel(Model):
    """A virus model with some number of agents"""
    def __init__(self):
        # self.G = nx.erdos_renyi_graph(n=self.num_nodes, p=prob)
        # self.G = nx.erdos_renyi_graph(n=3, p=0.5)
        self.G = nx.Graph()
        self.G.add_node(0)
        self.G.add_node(1)
        self.G.add_node(2)
        self.G.add_node(3)
        self.G.add_node(4)
        self.G.add_node(4)
        self.G.add_edge(0, 1)
        self.G.add_edge(0, 2)
        self.G.add_edge(0, 3)
        self.G.add_edge(0, 4)
        self.G.add_edge(0, 5)
        self.G.add_edge(1, 4)
        self.G.add_edge(4, 5)
        self.grid = NetworkGrid(self.G)

        self.rooms = {}
        self.rooms[0] = {"name": "Wejście", "rates": {}}
        self.rooms[1] = {"name": "Czytelnia", "rates": {"Nauka": 2}}
        self.rooms[2] = {"name": "Chillout", "rates": {"Relaks": 10}}
        self.rooms[3] = {"name": "Biuro", "rates": {"Praca": 1.5}}
        self.rooms[4] = {"name": "Toaleta", "rates": {"Toaleta": 30}}
        self.rooms[5] = {
            "name": "Kawiarnia",
            "rates": {
                "Jedzenie": 12,
                "Kultura": 0.5
            }
        }

        collector_dict = {}
        for i, room in enumerate(self.rooms):
            collector_dict[self.rooms[i]["name"]] = lambda model, i=i: len(
                model.grid.get_cell_list_contents([i])) - 1
        self.datacollector = DataCollector(collector_dict)

        self.schedule = RandomActivation(self)

        # Create agents
        for i, node in enumerate(self.G.nodes()):
            r = RoomAgent(i, self, self.rooms[i]["name"],
                          self.rooms[i]["rates"])
            self.schedule.add(r)

            # Add the agent to the node
            self.grid.place_agent(r, node)

        self.prob_needs = {
            "Jedzenie": [4, 0.6],
            "Toaleta": [2, 0.6],
            "Relaks": [5, 1]
        }
        self.prob_studs = {
            "Nauka": [2, 1.5],
            "Praca": [0, 0.5],
            "Kultura": [0, 1.0]
        }
        self.prob_works = {
            "Nauka": [0, 0.3],
            "Praca": [6, 1.0],
            "Kultura": [0, 0.2]
        }
        self.prob_tours = {
            "Nauka": [0, 0.3],
            "Praca": [0, 0.5],
            "Kultura": [1, 1.0]
        }
        self.prob_local = {
            "Nauka": [1, 0.7],
            "Praca": [2, 0.9],
            "Kultura": [1, 1.0]
        }

        # godziny        0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
        self.rate_studs = [
            0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
            0, 0
        ]
        self.rate_works = [
            0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0,
            0, 0
        ]
        self.rate_tours = [
            0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3, 3, 3, 4, 4, 4, 6, 6, 4, 3, 2,
            0, 0
        ]
        self.rate_local = [
            0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 4, 4, 2, 2, 4, 5, 6, 6, 6, 3, 0,
            0, 0
        ]

        self.running = True
        self.datacollector.collect(self)

        self.tm = 0 * 60
        self.count = 0

    def get_sample(self, probs):
        ret = {}
        for k, [m, s] in probs.items():
            tm = int(np.clip(np.random.normal(m, s) * 60, 15, 600))
            ret[k] = tm
        return ret

    def step(self):
        # prepare list for the satisfied agents
        self.satisfied = []

        # add new agents
        hour = int(self.tm / 60)
        if (hour > 23):
            hour = 0

        for i in range(self.rate_studs[hour]):
            a = HumanAgent(100 + self.count, self,
                           self.get_sample(self.prob_needs),
                           self.get_sample(self.prob_studs))
            self.schedule.add(a)
            self.grid.place_agent(a, 0)
            self.count += 1

        for i in range(self.rate_works[hour]):
            a = HumanAgent(100 + self.count, self,
                           self.get_sample(self.prob_needs),
                           self.get_sample(self.prob_works))
            self.schedule.add(a)
            self.grid.place_agent(a, 0)
            self.count += 1

        # update system
        self.schedule.step()

        # collect data
        self.datacollector.collect(self)

        # make time step
        self.tm = self.tm + 1
        if (self.tm > 24 * 60):
            self.datacollector.get_model_vars_dataframe().to_csv("one_day.csv")
            self.tm = 0

        # remove satisfied agents from the system
        for a in self.satisfied:
            print(a.unique_id, a.goals, "is satisfied")
            self.grid.move_agent(a, 0)
            self.grid._remove_agent(a, 0)
            self.schedule.remove(a)

    def run_model(self, n):
        for i in range(n):
            self.step()

    def find_best_room(self, goal):
        #print("Looking for room for", goal)
        for i, room in enumerate(self.rooms):
            #print("Room", room, self.rooms[room]["rates"])
            if goal in self.rooms[room]["rates"]:
                return room
        return -1
class Enterprise(Model):
    def __init__(self,basedate):
        super().__init__(1)
        self.date = basedate
        self.num_baseagents = 0
        self.num_locations = 0
        self.locations = {}
        self.schedule = RandomActivation(self)
        self.paytable = PayTable("2018-general-schedule-pay-rates.csv")
        self.units = {}
        self.deadpool = []
        #
        #self.jobboard = JobBoard(0,self)
        #self.schedule.add(self.jobboard)
        
        self.agt_network = nx.Graph()
        self.unit_network = nx.DiGraph()
        self.unit_displaypos = None
        
    def LoadData(self):
        
        #Read in locations data
        #--> LOC, GLC, LMS, OPP, OCN, ACT, OPP
        locs = pd.DataFrame().from_csv("locations.csv").reset_index()
        loc_params = {}
        
        #Establish locations
        for l in locs.index:
            loc_params = {"GLC":locs.loc[l]["GLC"],"OPP":locs.loc[l]["OPP"],"OCN":locs.loc[l]["OCN"],
                          "LMS":locs.loc[l]["LMS"],"ACT":locs.loc[l]["ACT"]}
            loc = Location(locs.loc[l]["LOC"],self,**loc_params)
            self.locations[locs.loc[l]["LOC"]] = loc
            self.num_locations+=1
        
        #Read in unit data
        #--> Node ID, UIC, NAM, LOCID, CMD
        Units = pd.DataFrame().from_csv("orgs.csv")
        
        #Read in network specific chain of command
        self.unit_network, self.unit_displaypos = ReadNetLayout("command.net")  
        
        #Read in TDA data
        #--> UIC,UPN,LOCID,PLN,GRD,SER,STP,CMD,FND,OCN,EID,LNM,DERS,FMSZ,DWL,SKLZ,EXP,SCD
        TDAData = pd.DataFrame().from_csv("tdadata.csv").groupby("UIC")
    
        #Establish units
        unit_params = {}
        i=1
        for uic in Units.index:
            #Load Unit Location Data
            unit = Units.loc[uic]
            
            #Set up unit parameters UIC, name, and command
            unit_params = {"UIC":uic, "NAM":unit["NAM"], "CMD":unit["CMD"], "NID":unit["NID"]}
            
            #Instantiate Unit Agent
            newunit = Unit(i,self,**unit_params)
            
            #Load Unit Personnel Data//Build out TDA
            myo = TDAData.get_group(uic).reset_index()
            
            #keep track of Agents IDs for network instantiation
            netw = []
            for uid in myo.index:
                newagt = None
                if myo.loc[uid]["EID"] != "VACANT":
                    newagt = BaseAgent(myo.loc[uid]["EID"],self)
                
                    # Place Billet and Employee
                    s = self.paytable.GetSalVal(myo.loc[uid]["LOC"],myo.loc[uid]["GRD"],myo.loc[uid]["STP"])
                    emp_dict = {"SAL":s, "UNT": newunit}
                
                    #build required parameter string
                    for d in ["OCN", "TYP", "GRD", "SER", "STP", "LOC", "LNM", "DWL", "SCD", "FMS", "AGE","TIG"]:
                        emp_dict[d] = myo.loc[uid][d]
                    emp_dict["FEX"] = myo.loc[uid]["FEX"].split("|")
                    emp_dict["GEX"] = myo.loc[uid]["GEX"].split("|")
                    
                    newagt.NewPosition(**emp_dict)
                    
                    #Unit aggregate experience
                    newunit.agg_funcexp.add(newagt.getfuncexp())
                    newunit.agg_geoexp.add(newagt.getgeoexp())
                    
                    #Forceset the dwell on initialization
                    newagt.setdwell(myo.loc[uid]["DWL"])
                    
                    #Record the agent and their respective dwell
                    netw.append([newagt.getUPI(),newagt.getdwell()])
                    
                    #add node to the network, UPI is the Node ID
                    self.agt_network.add_node(newagt.getUPI(),object=newagt)
                    
                    #record number of base agents
                    self.num_baseagents += 1
                    
                    #Add Employee agent to scheduler
                    self.schedule.add(newagt)
                
                # Place billet and occupant into TDA
                #build required parameter string
                tda_dict = {"OCC":newagt,"KEY":False}
                for d in ["UPN", "AMS", "AGD", "SER", "LOC", "PLN"]: 
                    tda_dict[d] = myo.loc[uid][d]               
                newunit.InitTDA(**tda_dict)
                
                #print("Adding node: ",myo.loc[uid]["UPN"])
                self.unit_network.add_node(myo.loc[uid]["UPN"])
                #print("Adding Edge from: ",myo.loc[uid]["UPN"]," to: ", Units.loc[uic]["NID"])
                self.unit_network.add_edge(myo.loc[uid]["UPN"], Units.loc[uic]["NID"])
                    
            for (n_i,i_w) in netw:
                for (n_j,j_w) in netw:
                    if (n_j != j_w):
                        dwellweight = i_w / j_w #Risk of DIV/0
                        self.agt_network.add_edge(n_i,n_j,weight=dwellweight)
                            
            #Add location to Schedule
            #print("Adding unit:", newunit.getname())
            newunit.RecordCivPay()
            newunit.RecordFillRate()
            #self.schedule.add(newunit)
            self.units[newunit.uic] = newunit
            
            #Increase locid number 
            i+=1
            
        self.num_locations = i

        #Load TDAs into Locations

    def PrintLocations(self):
        for a in self.schedule.agents:
            print(a)
            
    def RemoveAgent(self,agt):
        self.deadpool.append(agt)
        self.schedule.remove(agt)
        
    def step(self):
        print("Model step ",self.date)
        self.date = self.date + dt.timedelta(days=1)
        #Step Through Agents
        self.schedule.step()
        
        #Step through units for clean-up
        #ul = np.random.shuffle(list(self.units.keys()))
        for u in self.units.keys():
            self.units[u].step()
Example #9
0
class Covid(Model):
    '''
    Covid model class. Handles agent creation, placement and scheduling.
    '''
    def __init__(self,
                 population=100,
                 width=100,
                 height=100,
                 mobility=6,
                 social_distance=2,
                 asymptomatic_percentage=50.0,
                 imperial=True):
        '''
        Create a new Covid model.

        Args:
            population: Number of people (density) with one asymptomatic infected person.
            imperial: Agent rotates between home, work and community.  For home the agent returns to a random point near a fixed home position.  Community has the agent randomly placed in the space.  Work has 90% like home but with a fixed work position and 10% random like community.  This is patterned after the Imperial College model.  Turning off imperial iterates with each agent traveling a random direction and distance from the current position.
            asymptomatic_percentage: Percentage of infected people that are asymptomatic.  Asymptomatic people transmit the virus for 42 time steps versus 15 time steps for those that are symptomatic.
            social_distance: Distance at which neighboring susceptible agents can b ecome infected.
            mobility: The maximum distance that an agent can travel.
        '''

        self.current_id = 0
        self.population = population
        self.mobility = mobility
        self.social_distance = social_distance
        self.asymptomatic_percentage = asymptomatic_percentage
        self.imperial = imperial
        if imperial:
            self.state = "home"
        else:
            self.state = "diffusion"
        self.schedule = RandomActivation(self)
        self.space = ContinuousSpace(width, height, True)
        self.make_agents()
        self.running = True

        self.datacollector = DataCollector({
            "Susceptible":
            lambda m: self.count("Susceptible"),
            "Infected":
            lambda m: self.count("Infected"),
            "Recovered":
            lambda m: self.count("Recovered")
        })

    def make_agents(self):
        '''
        Create self.population agents, with random positions and starting headings.
        '''

        for i in range(0, 1):
            x = self.random.random() * self.space.x_max
            y = self.random.random() * self.space.y_max
            pos = np.array((x, y))
            asymptomatic = True
            person = Infected(self.next_id(), self, pos, asymptomatic)
            self.space.place_agent(person, pos)
            self.schedule.add(person)

        for i in range(self.population - 1):
            x = self.random.random() * self.space.x_max
            y = self.random.random() * self.space.y_max
            pos = np.array((x, y))
            person = Susceptible(self.next_id(), self, pos)
            self.space.place_agent(person, pos)
            self.schedule.add(person)

    def step(self):
        self.infect()

        if self.state == "home":
            self.state = "work"
        elif self.state == "work":
            self.state = "community"
        elif self.state == "community":
            self.state = "home"

        self.schedule.step()

        # collect data
        self.datacollector.collect(self)
        if self.count("Infected") == 0:
            self.running = False

    def infect(self):
        agent_keys = list(self.schedule._agents.keys())
        susceptible = []
        for agent_key in agent_keys:
            if self.schedule._agents[agent_key].name == "Susceptible":
                susceptible.append(agent_key)
        for agent_key in susceptible:
            agent = self.schedule._agents[agent_key]
            neighbors = self.space.get_neighbors(agent.pos,
                                                 self.social_distance)
            for neighbor in neighbors:
                if neighbor.name == "Infected":
                    asymptomatic = False
                    if (100.0 * self.random.random() <
                            self.asymptomatic_percentage):
                        asymptomatic = True
                    person = Infected(self.next_id(), self, agent.pos,
                                      asymptomatic)
                    if self.imperial:
                        person.set_imperial(agent.home, agent.work,
                                            agent.travel)
                    self.space.remove_agent(agent)
                    self.schedule.remove(agent)
                    self.space.place_agent(person, person.pos)
                    self.schedule.add(person)
                    break

    def count(self, type):
        agent_keys = list(self.schedule._agents.keys())
        num = 0
        for agent_key in agent_keys:
            if self.schedule._agents[agent_key].name == type:
                num += 1
        return num
Example #10
0
class MarketModel(Model):
    def __init__(self,
                 buff,
                 plot_buff,
                 max_agents_number,
                 market,
                 checkout_slider,
                 width=50,
                 height=50,
                 steps=3000):
        self.steps = steps
        self.plot_buff = plot_buff
        self.running = True
        self.market = market
        self.checkout_slider = checkout_slider
        self.num_agents = max_agents_number
        self.schedule = RandomActivation(self)
        self.grid = MultiGrid(width, height, True)
        self.grid_mutex = Lock()
        self.agents_number = 1
        self.regal_agents = []
        self.checkout_agents = []
        self.opened_checkouts = []
        self.closed_checkouts = []
        self.space_graph = np.zeros((width, height))
        self.place_checkouts()
        self.place_regals()
        self.thread_pool = ThreadPool(20)
        self.customer_list = buff
        self.total_income = 0.0
        self.agent_counts = [[0 for x in range(self.grid.width)]
                             for y in range(self.grid.height)]
        self.plot_buff.append(self.agent_counts)
        self.plot_buff.append(self.grid.width)
        self.plot_buff.append(self.grid.height)
        self.income_data_collector = DataCollector(
            model_reporters={"total_income": get_income})

        self.queue_length_data_collector = DataCollector(
            model_reporters={
                "average_queue_length": compute_average_queue_size
            })

        self.open_checkouts()

    def add_agent(self):
        i = random.randint(0, 1)
        if i == 0:
            a = CommonCustomer(self.agents_number, self, self.market.articles)
        else:
            a = LazyCustomer(self.agents_number, self, self.market.articles)
        # a = CommonCustomer(self.agents_number, self, self.market.articles)
        self.agents_number += 1
        self.schedule.add(a)
        self.grid.place_agent(a, (a.x, a.y))
        self.customer_list.append(a)

    def place_checkouts(self):
        for checkout_location in self.market.cashRegisters:
            checkout_agent = Checkout(self.agents_number, self,
                                      checkout_location)
            self.agents_number += 1
            self.grid.place_agent(checkout_agent, checkout_location)
            self.checkout_agents.append(checkout_agent)
            self.space_graph[checkout_location[0], checkout_location[1]] = 1
        self.closed_checkouts = self.checkout_agents

    def place_regals(self):
        for regal in self.market.regals:
            self.place_regal(regal)

    def place_regal(self, regal):
        for shelf in regal.shelf_list:
            self.place_shelf(shelf)

    def place_shelf(self, shelf):
        shelf_agent = ShelfAgent(self.agents_number, self, shelf)
        pos = shelf_agent.get_location()
        self.agents_number += 1
        self.grid.place_agent(shelf_agent, pos)
        self.regal_agents.append(shelf_agent)
        self.space_graph[pos[0], pos[1]] = 1

    def open_checkouts(self):
        for i in range(0, len(self.checkout_agents),
                       len(self.checkout_agents) // self.checkout_slider):
            checkout = self.closed_checkouts.pop(
                random.randint(0,
                               len(self.closed_checkouts) - 1))
            checkout.open()
            self.opened_checkouts.append(checkout)
            self.schedule.add(checkout)

    def open_random_checkout(self):
        if len(self.closed_checkouts) != 0:
            checkout = self.closed_checkouts.pop(
                random.randint(0,
                               len(self.closed_checkouts) - 1))
            checkout.open()
            self.opened_checkouts.append(checkout)
            self.schedule.add(checkout)

    def close_random_checkout(self):
        if len(self.opened_checkouts) > 1:
            checkout = self.opened_checkouts.pop(
                random.randint(0,
                               len(self.opened_checkouts) - 1))
            checkout.close()
            self.closed_checkouts.append(checkout)
            # self.schedule.add(checkout)

    def find_nearest_checkouts(self, location, n):
        new_list = self.opened_checkouts.copy()
        ordered_list = sorted(new_list,
                              key=(lambda x:
                                   ((x.location[0] - location[0])**2) + (
                                       (x.location[1] - location[1])**2)))
        return ordered_list[0:]

    def generate_random_starting_pos(self):
        pos_list = [(self.grid.width // 2 + 1, 0), (self.grid.width - 1, 1),
                    (self.grid.width - 1, self.grid.height - 2)]
        i = random.randint(0, len(pos_list) - 1)
        pos = pos_list[i]

        if i == 0:
            pos = (pos_list[0][0] + random.randint(-2, 2), pos_list[0][1])

        return pos

    def step(self):
        print(self.checkout_slider)
        self.income_data_collector.collect(self)
        self.queue_length_data_collector.collect(self)

        if compute_average_queue_size(self) > 3:
            self.open_random_checkout()

        if compute_average_queue_size(self) < 3:
            self.close_random_checkout()

        sigma = 1
        cycle_steps = 1500
        n = self.schedule.steps // cycle_steps + 1
        gauss = gaussian(
            self.schedule.steps * (6 * sigma / cycle_steps) - 3 * n * sigma, 0,
            sigma) * self.num_agents
        print(gauss)
        while len(self.schedule.agents) - len(
                self.checkout_agents) < np.ceil(gauss):
            self.add_agent()

        self.schedule.step()

    def move_agent(self, agent, new_pos):
        # self.grid_mutex.acquire()
        self.agent_counts[new_pos[0]][new_pos[1]] += 1
        self.plot_buff[0] = self.agent_counts
        self.grid.move_agent(agent, new_pos)
        # self.grid_mutex.release()

    def remove_agent(self, agent):
        # self.grid_mutex.acquire()
        self.grid.remove_agent(agent)
        self.schedule.remove(agent)
        if type(agent) is CommonCustomer:
            self.customer_list.remove(agent)
        # self.grid_mutex.release()

    def get_customers(self):
        return self.customer_list
class Community(Model):
    """A model with some number of agents."""
    def __init__(self, N, M, topics):
        global unique_id
        self.schedule = RandomActivation(self)

        # Group was initialied with n members
        self.num_agents = N
        # Group is initialized with m messages
        self.totalMessages = M
        self.messages = []
        self.member_joined = 0
        self.member_left = 0

        for x in range(self.totalMessages):
            # self.messages.append(Message("A"))
            self.messages.append(Message(random.choice(topics)))

        self.topics = topics
        # Create agents
        for i in range(self.num_agents):
            unique_id = i
            a = GroupMember(i, self)
            self.schedule.add(a)

        self.datacollector = DataCollector(
            model_reporters={
                "Average_Info_Benefit": compute_benefit,
                "Count_of_Members_Joined": compute_member_joined,
                "Count_of_Members_Left": compute_member_left

                # "Messge Topics": compute_topics
            })

    def step(self):
        # Look over all the agents
        for a in self.schedule.agents:

            # Calculate the cost to read messages
            if len(self.messages) > 0:
                count_of_interest = 0
                for x in self.messages:
                    if x.topic in a.topic_interests:
                        count_of_interest = count_of_interest + 1

                # Calculate reading benefit: 1 while < 40 | 1/x while above 40
                if count_of_interest < 40:
                    a.InfoB = a.InfoB + count_of_interest
                else:
                    a.InfoB = a.InfoB + 40
                    for j in range(1, count_of_interest % 40):
                        a.InfoB = a.InfoB + 1 / j

                # The cost is the proportion of irrelevant messages
                cost = 1 - (count_of_interest / len(self.messages))
                a.InfoB = a.InfoB - (a.InfoB * cost)

                # not_interesting = len(self.messages) - count_of_interest
                #
                # if not_interesting == 0:
                #     signal_to_noise = 1
                # else:
                #     signal_to_noise = count_of_interest / not_interesting
                #
                #
                # # If signal to noise is 0, all of the messages were not interesting
                # if signal_to_noise == 0:
                #     # If nothing was interesting remove half of your benefit
                #     a.InfoB = a.InfoB / 2
                # else:
                #     cost = len(self.messages) / signal_to_noise
                #     print(len(self.messages))
                #     a.InfoB = a.InfoB - cost

    def post(self):
        global unique_id
        self.messages = []
        for a in self.schedule.agents:
            if a.InfoB >= 1:
                new_message_topic = random.choice(a.topic_interests)
                new_message = Message(new_message_topic)
                self.messages.append(new_message)
                # a.InfoB = a.InfoB - .1 * a.InfoB

        if len(self.schedule.agents) < 25:
            a = GroupMember(unique_id + 1, self)
            unique_id = unique_id + 1
            self.schedule.add(a)
            self.member_joined = self.member_joined + 1

        for a in self.schedule.agents:
            if a.InfoB < 1:
                self.schedule.remove(a)
                self.member_left = self.member_left + 1

        Agents = []
        for a in self.schedule.agents:
            Agents.append(a.topic_interests)
        print(Agents)

        self.datacollector.collect(self)
        self.schedule.step()
Example #12
0
class HumanitarianLogistics(Model):
    """A model with: number of azc
        rate of newcomer arrival
        dimensions width and height"""
    def __init__(self, shock_period, shock_duration, shock_rate, N_cities, N_a,
                 nc_rate, width, height):

        #canvas info
        self.width = width
        self.height = height

        #sim boilerplate
        self.grid = MultiGrid(width, height, True)
        self.schedule = RandomActivation(self)
        self.running = True

        self.num_nc = 0  #counts number of applicants
        self.num_azc = N_a  #number of AZC in sim
        self.nc_rate = nc_rate  #rate of inflow of newcomers
        self.num_cities = N_cities  #number of cities in sim
        self.num_buildings = 3

        self.num_activity_centers = 2
        self.num_activities_per_center = 2
        self.num_per_step = 10

        self.num_activity_centers = 2
        self.num_activities_per_center = 2

        #initialize shock values
        self.shock_period = shock_period  #how often does shock occur
        self.shock_duration = shock_duration  #how long does shock last
        self._shock_duration = shock_duration  #current position in shock
        self.shock_rate = shock_rate  #amt of increase during shock
        self.shock = False  #shock flag
        self.number_added = 1  #base rate of influx
        self.number_shocks = 4
        self.shock_growth = 2

        #dict of probabilities of first/second decision success rates by country
        self.specs = {}
        # list of multinomial probabilities for countries
        self.country_multinomial = []
        # list of shock distributions for countries related to adding newcomers
        self.country_shock_dist = []
        # list of countries
        self.country_list = []
        with open("country-input.csv") as csvfile:
            reader = csv.DictReader(csvfile)
            for row in reader:
                decisionList = []
                decisionList.append(float(row['DecisionA']))
                decisionList.append(float(row['DecisionB']))
                self.specs[row['Country']] = decisionList
                self.country_shock_dist.append(row['ShockDist'])
                self.country_list.append(row['Country'])
                self.country_multinomial.append(row['Multinomial'])

        self.country_count = np.zeros(
            len(self.country_list
                ))  #keeps track of how many applicants from each country
        self.country_success = np.zeros(len(self.country_list))
        self.current_country_index = -1

        #records capacitiy of each AZC type
        self.datacollector = DataCollector(model_reporters={
            'Cap - Extended-AS': calc_extended_as,
            'Cap - AS': calc_as
        })

        #records success rates of each country of origin using current_country_index
        #which is manipulated in sr_country
        sr_functions = {}
        for i in range(0, len(self.country_list)):
            self.current_country_index = i
            sr_functions[self.country_list[
                self.current_country_index]] = sr_country

        self.sr = DataCollector(model_reporters=sr_functions)

        self.capacity_dc = DataCollector(model_reporters={
            'Current Capacity': coa_occ,
            'Projected Capacity': coa_proj
        })

        #Ter apel
        ta_pos = (int(self.width / 2), int(self.height / 6 * 5))
        ta_id = self.num_cities + 1
        ter_apel = City(self.num_cities + 1, self, ta_pos)
        ta_coa = COA(ta_id, self, ter_apel)
        ta_coa.ta = True
        self.schedule.add(ta_coa)
        ta_ind = IND(ta_id, self, ter_apel)
        self.schedule.add(ta_ind)
        ta_azc = AZC(ta_id, self, 'edp', ta_pos, ta_coa)
        ta_azc.ta = True
        ta_coa.azcs.add(ta_azc)
        ta_coa.capacities[ta_azc] = ta_azc.occupancy
        self.schedule.add(ta_azc)
        self.grid.place_agent(ta_azc, ta_pos)
        self.ter_apel = ta_azc

        #add activities
        self.test_activity = Football(0, self, 5)
        self.schedule.add(self.test_activity)

        #generate cities

        for city in range(self.num_cities):
            space_per_city = int(self.width / self.num_cities)

            orientation_x = int(
                space_per_city / 2 + city * space_per_city +
                int(space_per_city / self.num_azc / 2))  #center point for city
            pos = (orientation_x, int(self.height / 2))  #placeholder position
            city_size = np.random.uniform(low=0, high=1)
            city_is_big = False
            if city_size > 0.70:
                city_is_big = True
            current_city = City(city, self, pos,
                                city_is_big)  #instantiates city
            #add COA
            current_coa = COA(city, self, current_city)
            current_city.coa = current_coa
            self.schedule.add(current_coa)
            self.grid.place_agent(current_coa,
                                  (pos[0], int(self.height / 3 * 2)))
            current_ind = IND(city, self, current_city)
            self.schedule.add(current_ind)
            current_coa.IND = current_ind
            current_ind.coa = current_coa
            #adds city to schedule n grid
            self.schedule.add(current_city)
            self.grid.place_agent(current_city, (current_city.pos))

            #azc location essentials
            space_per_azc = int(space_per_city / self.num_azc)
            azc_starting_point = orientation_x - (.5 * space_per_city)
            num_activity_centers_added = 0
            # Create AZCs
            for i in range(self.num_azc):
                '''
                if i == 0:
                    occupant_type = 'edp'   # ter apel
                '''
                if i < self.num_azc - 2:
                    occupant_type = 'as'  # standard AZC
                elif i == self.num_azc - 2:
                    occupant_type = 'as_ext'  # extended procedure AZC
                else:
                    occupant_type = 'tr'  # 'Housing' for those with

                #place evenly
                x = int(azc_starting_point + i * space_per_azc)
                y = int(self.height * .5)

                a = AZC(i, self, occupant_type, (x, y),
                        current_coa)  #instantiate
                self.schedule.add(a)  #add in time
                self.grid.place_agent(a, (x, y))  #add in spaace
                current_city.buildings.add(a)
                if a.occupant_type != 'tr':
                    current_coa.azcs.add(a)
                    current_coa.capacities[a] = a.occupancy

                if a.occupant_type == 'tr':
                    current_city.social_housing = a

                #add viz
                v = AZC_Viz(self, a)
                self.schedule.add(v)
                self.grid.place_agent(v, v.pos)

            #create civilian buildings

            y = int(self.height / 5)
            space_per_building = space_per_city / self.num_buildings
            row_size = 15

            if city == 0:
                x = int(azc_starting_point + .5 * space_per_building)
                current = Hotel(self.num_buildings + 1, self, (x, y), 1000)
                current_city.buildings.add(current)
                current.city = current_city
                self.grid.place_agent(current, (x, y))
                self.schedule.add(current)

                empty = Empty(self.num_buildings + 1, self,
                              (int(x + space_per_building), y), 100)
                current_city.buildings.add(empty)
                empty.city = current_city
                self.grid.place_agent(empty, (int(x + space_per_building), y))
                self.schedule.add(empty)

            for bdg in range(city * self.num_buildings):

                x = int(azc_starting_point + (bdg % 3) * space_per_building)

                if bdg == 0:

                    current = Hotel(bdg, self, (x, y), 1000)
                    current_city.buildings.add(current)
                    current.city = current_city
                    self.grid.place_agent(current, (x, y))
                    self.schedule.add(current)
                else:
                    empty = Empty(bdg, self, (x, y - row_size * int(bdg / 3)),
                                  100 * bdg)
                    current_city.buildings.add(empty)
                    empty.city = current_city
                    self.grid.place_agent(empty,
                                          (x, y - row_size * int(bdg / 3)))
                    self.schedule.add(empty)

    def house(self, newcomer):

        #find building for newcomers legal status
        eligible_buildings = [
            x for x in self.schedule.agents
            if type(x) is AZC and x.occupant_type == newcomer.ls
        ]

        #take first one, in future, evaluate buildings on some criteria
        destination = eligible_buildings[0]
        house_loc = destination.pos  #where is it

        if newcomer.ls is not 'edp':
            newcomer.loc.occupancy -= 1  #reduce occupance of prev building

        #add noise so agents don't overlap
        x = house_loc[0] + np.random.randint(-20, 20)
        y = house_loc[1] + np.random.randint(-20, 20)

        self.grid.move_agent(newcomer, (x, y))  #place

        destination.occupants.add(newcomer)  #add agent to building roster
        newcomer.loc = destination  #update agent location

        destination.occupancy += 1  #update occupancy

    def Remove(self, agent):

        agent.loc.occupancy -= 1  #reduce occupancy of building
        agent.loc.occupants.remove(agent)

        #remove from time n space
        self.schedule.remove(agent)
        self.grid.remove_agent(agent)

    def shock_distribution(self):
        #draws a random discrete number from multinomial distribution
        country = np.random.multinomial(1, self.country_shock_dist, size=1)

        # turns that distribution into a number
        country = np.where(country == 1)[1][0]

        # assigns that number to a country
        country_of_origin = self.country_list[country]
        return country_of_origin

    def country_distribution(self):
        #draws a random discrete number from multinomial distribution
        country = np.random.multinomial(1, self.country_multinomial, size=1)

        # turns that distribution into a number
        country = np.where(country == 1)[1][0]

        # updates country count
        self.country_count[country] += 1

        # assigns that number to a country
        country_of_origin = self.country_list[country]
        return country_of_origin

    def addNewcomer(self, shock, country_of_origin):

        #increase count
        self.num_nc += 1

        if not shock:

            country_of_origin = self.country_distribution()

        else:

            self.country_count[self.country_list.index(country_of_origin)] += 1

        x = np.random.randint(0, 10, dtype='int')
        y = np.random.randint(0, 10, dtype='int')
        #define newcomer
        r = Newcomer(self.num_nc, self, country_of_origin, (x, y))
        self.schedule.add(r)
        self.grid.place_agent(r, r.pos)

        #find coa
        coa = [x for x in self.schedule.agents if type(x) is COA][0]

        coa.intake(r)  #place n ter apel
        coa.newcomers.add(r)  #adds NC to coa's list of residents
        r.coa = coa  #likewise for the newcomer

    def step(self):
        self.schedule.step()
        self.datacollector.collect(self)  #collects occupancy data
        self.sr.collect(self)  #collects success rate data
        self.capacity_dc.collect(self)

        if self.schedule.steps % self.shock_period == 0:
            self.shock = True
            self.shock_counter = 0

        if self.shock:

            #if self._shock_duration > (self._shock_duration / 2):
            #    self.number_added += self.shock_rate
            #else:
            #    self.number_added -= self.shock_rate
            self.number_added += self.shock_rate
            for i in range(int(self.number_added)):

                shock_country = self.shock_distribution()

                self.addNewcomer(
                    True, shock_country
                )  # currently in data file all shocks come from Syria
                self.shock_counter += 1
            self._shock_duration -= 1

            if self._shock_duration == 0:

                self.shock = False
                self._shock_duration = self.shock_duration
                self.number_added = 1

                self.shock_counter = 0
                self.shock_rate = self.shock_rate * self.shock_growth
        else:

            #adds newcomers to simuluation at a given rate
            if uniform(0, 1) < self.nc_rate:
                for i in range(self.num_per_step):

                    self.addNewcomer(False, None)
Example #13
0
class ProtestModel(Model):
    def __init__(self, initial_num_citizens, initial_num_media,
                 hardcore_density, hanger_on_density, observer_density,
                 agent_vision_radius, agent_move_falibility,
                 default_hardcore_move_vector, default_hanger_on_move_vector,
                 default_observer_move_vector, default_cop_move_vector,
                 default_media_move_vector, citizen_jailed_sensitivity,
                 citizen_pictures_sensitivity, citizen_cops_sensitivity,
                 max_days, height, width, agent_regions, obstacle_regions,
                 flag_regions, cop_regions, arrest_delay, jail_time):

        super().__init__()
        self.steps_per_day = 12

        # Population initialisation
        self.initial_num_cops = len(co_ords_for_area(cop_regions))
        self.initial_num_citizens = initial_num_citizens
        self.initial_num_media = initial_num_media
        self.hardcore_density = hardcore_density
        self.hanger_on_density = hanger_on_density
        self.hanger_on_density = observer_density

        # Agent init

        # Agent movement factors
        self.agent_vision_radius = agent_vision_radius
        self.agent_move_falibility = agent_move_falibility

        # vector order:
        # [violent, active, quiet, cop, media, flag, obstacle]
        self.default_hardcore_move_vector = default_hardcore_move_vector
        self.default_hanger_on_move_vector = default_hanger_on_move_vector
        self.default_observer_move_vector = default_observer_move_vector
        self.default_cop_move_vector = default_cop_move_vector
        self.default_media_move_vector = default_media_move_vector

        # Citizen legitimacy update factors
        self.citizen_jailed_sensitivity = citizen_jailed_sensitivity
        self.citizen_pictures_sensitivity = citizen_pictures_sensitivity
        self.citizen_cops_sensitivity = citizen_cops_sensitivity

        # Core model code
        # Model step represents 2 hours
        self.max_iters = max_days * self.steps_per_day
        self.iterations = 0
        self.schedule = RandomActivation(self)
        self.grid = Grid(width, height, torus=False)
        self.height = height
        self.width = width
        self.running = True

        self.previous_day_jailed_count = 0
        self.previous_day_pictures_count = 0
        self.previous_day_cops_count = self.initial_num_cops
        self.jailed_count = 0
        self.pictures_count = 0
        self.cops_count = self.initial_num_cops

        # Set such that when cops/agents are 2:1, the perceived arrest chance is 0.9
        self.arrest_delay = arrest_delay
        self.arrest_constant = 1.15
        self.jail = []  # stores jailed agents
        self.jail_time = jail_time  # Represents "harshness" of current regime.

        if not (hardcore_density + hanger_on_density + observer_density == 1):
            raise ConfigError("Protestor densities must add up to 1")

        if self.initial_num_cops + initial_num_citizens + initial_num_media > (
                height * width):
            raise ConfigError("Too many humans for the given grid")

        self.total_fights = 0
        self.total_jailed = 0
        self.hours_without_protest = 0
        self.hours_without_conflict = 0

        self.datacollector = DataCollector(
            model_reporters={
                "Quiet":
                lambda model: model.num_in_state("quiet"),
                "Active":
                lambda model: model.num_in_state("active"),
                "Violent":
                lambda model: model.num_in_state("violent"),
                "Fighting":
                lambda model: model.num_in_state("fighting"),
                "Protesting":
                lambda model: model.num_protesting(),
                "Jailed":
                lambda model: model.num_jailed(),
                "Frustrated":
                lambda model: model.num_frustrated(),
                "Average legitimacy":
                lambda model: model.average_legitimacy(),
                "Average grievance":
                lambda model: model.average_grievance(),
                "Average ripeness":
                lambda model: model.average_grievance() * model.num_in_state(
                    "quiet") / float(model.average_risk_aversion()),
                "Cop count":
                lambda model: model.num_cops(),
                "Num pictures":
                lambda model: model.num_pictures(),
                "Total fights":
                lambda model: model.total_fights,
                "Total jailed":
                lambda model: model.total_jailed,
                "Protest waiting time":
                lambda model: model.hours_without_protest,
                "Conflict waiting time":
                lambda model: model.hours_without_conflict,
            },
            agent_reporters={
                "perceived_gain":
                lambda agent: agent.perceived_gain()
                if isinstance(agent, Citizen) else 0,
                "net_risk_active":
                lambda agent: agent.net_risk("active")
                if isinstance(agent, Citizen) else 0,
                "net_risk_violent":
                lambda agent: agent.perceived_gain() - agent.net_risk(
                    "violent") if isinstance(agent, Citizen) else 0,
                "act_utils":
                lambda agent: agent.net_risk("violent")
                if isinstance(agent, Citizen) else 0,
                "threshold":
                lambda agent: agent.threshold
                if isinstance(agent, Citizen) else 0,
            })

        self.agent_regions = agent_regions
        self.cop_regions = cop_regions
        self.flag_regions = flag_regions
        self.obstacle_regions = obstacle_regions

        # Place objects
        for position in co_ords_for_area(obstacle_regions):
            self.grid[position[0]][position[1]] = Object("obstacle", position)

        for position in co_ords_for_area(flag_regions):
            self.grid[position[0]][position[1]] = Object("flag", position)

        unique_id = 1

        for cop_region in cop_regions:
            frozen = cop_region["frozen"]

            for position in co_ords_for_area([cop_region]):
                self.add_cop(unique_id, frozen, position[0], position[1])
                unique_id += 1

        placed_media = 0
        placed_citizens = 0
        population = initial_num_media + initial_num_citizens
        while (placed_media + placed_citizens) < population:
            (x, y) = random.choice(co_ords_for_area(agent_regions))

            if self.grid.is_cell_empty((x, y)):
                seed = random.random()

                # Optimised for adding citizens
                if seed > (float(initial_num_media) / population):
                    if placed_citizens < initial_num_citizens:
                        self.add_citizen(unique_id, x, y)
                        placed_citizens += 1

                else:
                    if placed_media < initial_num_media:
                        placed_media += 1
                        self.add_media(unique_id, x, y)

                unique_id += 1

    def add_cop(self, id, frozen, x, y):
        vector = self.default_cop_move_vector
        cop = Cop(
            id,  #unique_id,
            self,  #model,
            (x, y),  #position,
            self.agent_vision_radius,  #agent_vision_radius,
            vector[0],  #violent_affinity,
            vector[1],  #active_affinity,
            vector[2],  #quiet_affinity,
            vector[3],  #cop_affinity,
            vector[4],  #media_affinity,
            vector[5],  #flag_affinity,
            vector[6],  #obstacle_affinity,
            frozen)
        self.add_agent(cop, x, y)

    def add_citizen(self, id, x, y):
        seed = random.random()

        # Choose a random subtype and setup parameter brackets and movement vector.
        if seed < self.hardcore_density:
            agent_type = "hardcore"
            vector = self.default_hardcore_move_vector
            risk_lower = 0.8
            risk_upper = 0.95
        elif seed < self.hardcore_density + self.hanger_on_density:
            agent_type = "hanger_on"
            vector = self.default_hanger_on_move_vector
            risk_lower = 0.4
            risk_upper = 0.6
        else:
            agent_type = "observer"
            vector = self.default_observer_move_vector
            risk_lower = 0
            risk_upper = 0.32

        citizen = Citizen(
            id,  #unique_id,
            self,  #model,
            (x, y),  #position,
            self.agent_vision_radius,  #agent_vision_radius,
            vector[0],  #violent_affinity,
            vector[1],  #active_affinity,
            vector[2],  #quiet_affinity,
            vector[3],  #cop_affinity,
            vector[4],  #media_affinity,
            vector[5],  #flag_affinity,
            vector[6],  #obstacle_affinity,
            agent_type,  #citizen_type,
            "quiet",  #state: starts are quiet for all
            random.uniform(
                0, 0.2
            ),  #hardship: uniform distribution between 0 and 1, type independant.
            random.uniform(
                0.7, 0.9
            ),  #perceived_legitimacy: uniform distribution between 0 and 1, type independant.
            random.uniform(risk_lower,
                           risk_upper),  #risk_tolerance: type dependant
            1 - random.uniform(
                risk_lower, risk_upper
            )  #threshold: type dependant, but reversed from risk profile
        )
        self.add_agent(citizen, x, y)

    def add_media(self, id, x, y):
        vector = self.default_media_move_vector
        media = Media(
            id,  #unique_id,
            self,  #model,
            (x, y),  #position,
            self.agent_vision_radius,  #agent_vision_radius,
            vector[0],  #violent_affinity,
            vector[1],  #active_affinity,
            vector[2],  #quiet_affinity,
            vector[3],  #cop_affinity,
            vector[4],  #media_affinity,
            vector[5],  #flag_affinity,
            vector[6]  #obstacle_affinity,
        )
        self.add_agent(media, x, y)

    def add_agent(self, agent, x, y):
        self.grid[x][y] = agent
        self.schedule.add(agent)

    def num_jailed(self):
        return len(
            list(
                filter(
                    lambda agent: (
                        (type(agent) == Citizen) and (agent.arrested)),
                    self.schedule.agents)))

    def agents_in_state(self, state):
        return list(
            filter(
                lambda agent: (
                    (type(agent) == Citizen) and (agent.state == state)),
                self.schedule.agents))

    def num_in_state(self, state):
        return len(self.agents_in_state(state))

    def num_protesting(self):
        return self.num_in_state("fighting") + self.num_in_state(
            "violent") + self.num_in_state("active")

    # Frustrated means that perceived gain is sufficient
    # to trigger activation but net risk is preventing
    # said activation, leaving the agent frustrated,
    def num_frustrated(self):
        return len(
            list(
                filter(
                    lambda agent:
                    ((type(agent) == Citizen) and
                     (agent.perceived_gain() > agent.threshold) and
                     (agent.state not in ["violent", "active", "fighting"])),
                    self.schedule.agents)))

    def average_legitimacy(self):
        citizen_legitimacy = list(
            map(lambda a: a.perceived_legitimacy, (list(
                filter(lambda agent:
                       (type(agent) == Citizen), self.schedule.agents)))))
        summed_legitimacy = sum(citizen_legitimacy)
        count = len(citizen_legitimacy)
        return summed_legitimacy / float(count) * 100

    def average_grievance(self):
        citizen_grievance = list(
            map(lambda a: a.perceived_gain(), (list(
                filter(lambda agent:
                       (type(agent) == Citizen), self.schedule.agents)))))
        summed_grievance = sum(citizen_grievance)
        count = len(citizen_grievance)
        return summed_grievance / float(count) * 100

    def average_risk_aversion(self):
        citizen_ra = list(
            map(lambda a: (1 - a.risk_tolerance), (list(
                filter(lambda agent:
                       (type(agent) == Citizen), self.schedule.agents)))))
        summed_ra = sum(citizen_ra)
        count = len(citizen_ra)
        return summed_ra / float(count) * 100

    def num_pictures(self):
        media_agents = list(
            filter(lambda agent: (type(agent) == Media), self.schedule.agents))
        return sum(map(lambda agent: agent.picture_count, media_agents))

    def num_cops(self):
        return len(
            list(
                filter(lambda agent: (type(agent) == Cop),
                       self.schedule.agents)))

    def free_agent_from_jail(self, agent):
        placed = False
        while not placed:
            position = random.choice(co_ords_for_area(self.agent_regions))
            if self.grid.is_cell_empty(position):
                self.grid[position[0]][position[1]] = agent
                return position

    def jail_agent(self, agent):
        self.grid[agent.position[0]][agent.position[1]] = None
        agent.position = None
        agent.planned_position = None
        self.total_jailed += 1

    # This updates citizen perceived legitimacy based on
    # model level variables
    # and resets pictures taken by media agents.
    def daily_update(self):
        self.previous_day_jailed_count = self.jailed_count
        self.previous_day_pictures_count = self.pictures_count
        self.previous_day_cops_count = self.cops_count
        self.jailed_count = self.num_jailed()
        self.pictures_count = self.num_pictures()
        self.cops_count = self.num_cops()

        # Adjust perceived legitimacy of all agents based on arrests, cops and pictures of fights.
        citizen_agents = list(
            filter(lambda agent: (type(agent) == Citizen),
                   self.schedule.agents))

        for citizen in citizen_agents:
            citizen.update_legitimacy()

        # Reset number of pictures taken by reporters
        media_agents = list(
            filter(lambda agent: (type(agent) == Media), self.schedule.agents))
        for media in media_agents:
            media.picture_count = 0

    def experimental_changes(self):
        initial_spark_iteration = 10

        spark_hardship_increase = 0.25
        spark_legitimacy_decrease = 0.4

        protest_response_iteration = 30
        protest_response = "none"
        cop_modifier = 150

        if self.iterations == initial_spark_iteration:
            citizen_agents = list(
                filter(lambda agent: (type(agent) == Citizen),
                       self.schedule.agents))

            for citizen in citizen_agents:
                citizen.hardship += spark_hardship_increase
                citizen.perceived_legitimacy -= spark_legitimacy_decrease

        if self.iterations == protest_response_iteration:
            if protest_response == "cops":
                max_id = max(
                    list(
                        map(lambda agent: agent.unique_id,
                            self.schedule.agents)))
                unique_id = max_id + 1

                placed = 0
                while placed < cop_modifier:
                    position = random.choice(
                        co_ords_for_area(self.agent_regions))
                    if self.grid.is_cell_empty(position):
                        self.add_cop(unique_id, False, position[0],
                                     position[1])
                        unique_id += 1
                        placed += 1

            elif protest_response == "remove_cops":
                removed = 0
                while removed < cop_modifier and self.num_cops() > 0:
                    cop = random.choice(
                        list(
                            filter(lambda agent: (type(agent) == Cop),
                                   self.schedule.agents)))
                    self.schedule.remove(cop)
                    self.grid[cop.position[0]][cop.position[1]] = None
                    removed += 1

    # Advance the model a single iteration.
    def step(self):

        # Collect waiting time information.
        if self.num_protesting() > (0.25 * self.initial_num_citizens):
            self.hours_without_protest = 0
        else:
            self.hours_without_protest += 1

        if self.num_in_state("fighting") + self.num_in_state("violent") > (
                0.15 * self.initial_num_citizens):
            self.hours_without_conflict = 0
        else:
            self.hours_without_conflict += 1

        # Run data collector.
        self.datacollector.collect(self)

        # Step the model
        self.schedule.step()
        self.iterations += 1
        if self.iterations > self.max_iters:
            self.running = False

        # Perform updates that occur once per 'day', ie: on a non-iteration basis,
        if self.iterations % self.steps_per_day == 0:
            self.daily_update()

        # Check for experimental changes once per iteration.
        self.experimental_changes()
class CivilViolenceModel(Model):
    """ Civil violence model class """
    def __init__(self,
                 max_iter=200,
                 height=40,
                 width=40,
                 agent_density=0.7,
                 agent_vision=7,
                 active_agent_density=0.01,
                 cop_density=0.04,
                 cop_vision=7,
                 inf_threshold=40,
                 tackle_inf=False,
                 k=2.3,
                 graph_type=GraphType.BARABASI_ALBERT.name,
                 p=0.1,
                 p_ws=0.1,
                 directed=False,
                 max_jail_term=30,
                 active_threshold_t=0.1,
                 initial_legitimacy_l0=0.82,
                 movement=True,
                 seed=None):
        """
        Create a new civil violence model.

        :param max_iter: Maximum number of steps in the simulation.
        :param height: Grid height.
        :param width: Grid width.
        :param agent_density: Approximate percentage of cells occupied by citizen agents.
        :param agent_vision: Radius of the agent vision in every direction.
        :param active_agent_density: Enforce initial percentage of cells occupied by active agents.
        :param cop_density: Approximate percentage of cells occupied by cops.
        :param cop_vision: Radius of the cop vision in every direction.
        :param initial_legitimacy_l0: Initial legitimacy of the central authority.
        :param inf_threshold: Amount of nodes that need to be connected before an agent is considered an influencer.
        :param tackle_inf: Remove influencer when outbreaks starting
        :param max_jail_term: Maximal jail term.
        :param active_threshold_t: Threshold where citizen agent became active.
        :param k: Arrest term constant k.
        :param graph_type: Graph used to build network
        :param p: Probability for edge creation
        :param directed: Is graph directed
        :param movement: Can agent move at end of an iteration
        :param seed: random seed

        Additional attributes:
            running : is the model running
            iteration : current step of the simulation
            citizen_list : a list storing the citizen agents added to the model.   
            influencer_list : a list storing the citizien agents that are influencers

            grid : A 2D cellular automata representing the real world space environment
            network : A NetworkGrid with as many nodes as (citizen) agents representing the social network.
            Agent in the NetworkGrid are deep copy of agent in the MultiGrid has Mesa implementation is based on
            the usage of a single space. (Example: NetworkGrid place_agent method will change "pos" attribute from agent
            meaning one agent can't be on both MultiGrid and NetworkGrid).
            We maintain a dictionary of agent position instead.

        """
        super().__init__()

        # =============================
        # === Initialize attributes ===
        # =============================

        self.seed = seed
        self.random.seed(self.seed)

        # Initialize Model grid and schedule
        self.height = height
        self.width = width
        self.grid = MultiGrid(self.width, self.height,
                              torus=True)  # Grid or MultiGrid ?
        self.schedule = RandomActivation(self)
        self.max_iter = max_iter
        self.iteration = 0  # Simulation iteration counter
        self.movement = movement

        # Set Model main attributes
        self.max_jail_term = max_jail_term
        self.active_threshold_t = active_threshold_t
        self.initial_legitimacy_l0 = initial_legitimacy_l0
        self.legitimacy = initial_legitimacy_l0
        self.k = k
        self.graph_type = graph_type

        self.agent_density = agent_density
        self.agent_vision = agent_vision
        self.active_agent_density = active_agent_density
        self.cop_density = cop_density
        self.cop_vision = cop_vision
        self.inf_threshold = inf_threshold

        self.citizen_list = []
        self.cop_list = []
        self.influencer_list = []
        self.jailings_list = [0, 0, 0, 0]
        self.outbreaks = 0
        self.outbreak_now = 0
        self.outbreak_influencer_now = False
        self.tackle_inf = tackle_inf

        date = datetime.now()
        self.path = f'output/{self.graph_type}_{date.month}_{date.day}_{date.hour}_{date.minute}_'

        # === Set Data collection ===
        self.datacollector = DataCollector(
            model_reporters=self.get_model_reporters(),
            agent_reporters=self.get_agent_reporters())

        # ==============================
        # === Initialize environment ===
        # ==============================

        # Add agents to the model
        unique_id = 0
        for (contents, x, y) in self.grid.coord_iter():
            random_x = self.random.random()
            if random_x < self.agent_density:
                # Add agents
                agent = Citizen(unique_id=unique_id,
                                model=self,
                                pos=(x, y),
                                hardship=self.random.random(),
                                susceptibility=self.random.random(),
                                influence=self.random.random(),
                                expression_intensity=self.random.random(),
                                legitimacy=self.initial_legitimacy_l0,
                                risk_aversion=self.random.random(),
                                threshold=self.active_threshold_t,
                                vision=self.agent_vision)

                unique_id += 1
                self.citizen_list.append(agent)
                self.grid.place_agent(agent,
                                      (x, y))  # Place agent in the MultiGrid
                self.schedule.add(agent)

            elif random_x < (self.agent_density + self.active_agent_density):
                # Enforce an initial proportion of active agents
                agent = Citizen(unique_id=unique_id,
                                model=self,
                                pos=(x, y),
                                hardship=self.random.random(),
                                susceptibility=self.random.random(),
                                influence=self.random.random(),
                                expression_intensity=self.random.random(),
                                legitimacy=self.initial_legitimacy_l0,
                                risk_aversion=self.random.random(),
                                threshold=0,
                                vision=self.agent_vision)

                unique_id += 1
                self.citizen_list.append(agent)
                self.grid.place_agent(agent,
                                      (x, y))  # Place agent in the MultiGrid
                self.schedule.add(agent)

            elif random_x < (self.agent_density + self.active_agent_density +
                             self.cop_density):
                # Add law enforcement officer
                agent = Cop(unique_id=unique_id,
                            model=self,
                            pos=(x, y),
                            vision=self.cop_vision)

                unique_id += 1
                self.cop_list.append(agent)
                self.grid.place_agent(agent,
                                      (x, y))  # Place agent in the MultiGrid
                self.schedule.add(agent)

        # Generate a social network composed of every civilian agents
        self.G, self.network_dict = generate_network(self.citizen_list,
                                                     graph_type, p, p_ws,
                                                     directed, seed)
        # print_network(self.G, self.network_dict)  # Uncomment to print the network.

        # With network in place, set the influencers.
        self.set_influencers(self.inf_threshold)

        # Create the graph show the frequency of degrees for the nodes
        create_fig(self.G.degree, draw=False)  # Set draw=True to draw a figure

        self.running = True
        self.datacollector.collect(self)

    def step(self):
        """
        One step in agent-based model simulation
        """

        self.schedule.step()
        self.iteration += 1
        self.update_legitimacy()

        self.outbreak_score_monitoring()
        self.datacollector.collect(self)

        # Save initial values
        if self.iteration == 1:
            self.save_initial_values(save=False)

        # Stop the model after a certain amount of iterations.
        if self.iteration > self.max_iter:
            self.save_data(save=False)
            self.running = False

    def outbreak_score_monitoring(self):
        if self.tackle_inf:
            if self.count_type_citizens(
                    "ACTIVE") > 30 and not self.outbreak_influencer_now:
                self.jail_influencer()
                self.outbreak_influencer_now = True

            if self.count_type_citizens("ACTIVE") < 30:
                self.outbreak_influencer_now = False

        # Count amount of outbreaks
        if self.count_type_citizens("ACTIVE") > 50 and self.outbreak_now == 0:
            self.outbreaks += 1  # Total number of outbreak
            self.outbreak_now = 1  # Indicate if outbreak now

        if self.count_type_citizens("ACTIVE") < 50:
            self.outbreak_now = 0

    def save_data(self, save=True):

        if save is not False:
            df_end = self.datacollector.get_agent_vars_dataframe()
            name = self.path + 'run_values.csv'
            df_end.to_csv(name)
        else:
            pass

    def save_initial_values(self, save=False):

        if save is not False:
            dictionary_data = {
                'agent_density': self.agent_density,
                'agent_vision': self.agent_vision,
                'active_agent_density': self.active_agent_density,
                'cop_density': self.cop_density,
                'initial_legitimacy_l0': self.initial_legitimacy_l0,
                'inf_threshold': self.inf_threshold,
                'max_iter': self.max_iter,
                'max_jail_term': self.max_jail_term,
                'active_threshold_t': self.active_threshold_t,
                'k': self.k,
                'graph_type': self.graph_type,
            }

            name = self.path + 'ini_values.json'
            a_file = open(name, "w")
            json.dump(dictionary_data, a_file)
            a_file.close()
        else:
            pass

    def update_legitimacy(self):
        """
        Compute legitimacy (Epstein Working Paper 2001)
        """
        self.jailings_list[3] = self.jailings_list[2]
        self.jailings_list[2] = self.jailings_list[1]
        nb_active_and_quiescent = self.count_type_citizens(
            "ACTIVE") + self.count_type_citizens("QUIESCENT")
        self.jailings_list[1] = self.jailings_list[
            0] / nb_active_and_quiescent  # + 1 to avoid division by zero
        self.jailings_list[0] = 0

        sum_jailed = self.jailings_list[1] - self.jailings_list[
            2]**2 - self.jailings_list[3]**3
        self.legitimacy = self.initial_legitimacy_l0 * (1 - sum_jailed)
        if self.legitimacy <= 0:
            self.legitimacy = 0

    def get_model_reporters(self):
        """
        Dictionary of model reporter names and attributes/funcs
        Reference to functions instead of lambda are provided to handle multiprocessing case.
        Multiprocessing pool cannot directly handle lambda.
        """
        return {
            "QUIESCENT": compute_quiescent,
            "ACTIVE": compute_active,
            "JAILED": compute_active,
            "LEGITIMACY": compute_legitimacy,
            "INFLUENCERS": compute_influencers,
            "OUTBREAKS": compute_outbreaks
        }

    def get_agent_reporters(self):
        """
        Dictionary of agent reporter names and attributes/funcs
        """

        return {
            "Grievance": "grievance",
            "Hardship": "hardship",
            "State": "state",
            "Influencer": "influencer",
            "N_connections": "network_neighbors",
            "InfluencePi": "influence"
        }

    def count_type_citizens(self, state_req):
        """
        Helper method to count agents.
        Cop agents can't disappear from the map, so number of cops can be retrieved from model attributes.
        """
        count = 0
        for agent in self.citizen_list:
            if type(agent).__name__.upper() == 'COP':
                continue
            if agent.jail_sentence and state_req == 'JAILED':
                count += 1
            else:
                if agent.state is State.ACTIVE and state_req == 'ACTIVE':
                    count += 1
                elif agent.state == State.QUIESCENT and state_req == 'QUIESCENT':
                    count += 1
        return count

    def remove_agent_grid(self, agent):
        """
        Removes an agent from the grid.
        """
        self.grid.remove_agent(agent)

    def add_jailed(self, agent):
        """
        Un-jail an agent
        If the sentence of a jailed agent is over, place him back on a random empty cell in the grid.
        """

        if len(self.grid.empties) == 0:
            raise Exception("There are no empty cells.")

        new_pos = self.random.choice(list(self.grid.empties))
        self.grid.place_agent(agent, new_pos)

    def set_influencers(self, inf_threshold=150):
        """
        If an agent in the network is connected to a large amount of nodes, this agent can
        be considered an influencer and receives a corresponding tag.
        :param inf_threshold: determine how many connections a node needs to be considered an influencer
        """
        for agent in self.citizen_list:
            agent.set_influencer(
                len(list(self.G.neighbors(agent.network_node))), inf_threshold)
            if agent.influencer:
                self.influencer_list.append(agent)

    def remove_influencer(self):
        """
        Removes a random agent with the influencer tag from the grid.
        Gives manual control over the model to evaluate the influence of influencers.
        """
        if self.influencer_list:
            for i in range(len(self.influencer_list)):
                to_remove = self.random.choice(self.influencer_list)
                if to_remove.pos:  # Check if influencer is jailed.
                    self.grid.remove_agent(to_remove)
                self.influencer_list.remove(to_remove)
                self.citizen_list.remove(to_remove)
                self.schedule.remove(to_remove)
                self.G.remove_node(to_remove.network_node)

    def jail_influencer(self):
        """
        Jail a random agent with the influencer tag from the grid.
        Gives manual control over the model to evaluate the influence of influencers.
        """
        if self.influencer_list:
            for i in range(len(self.influencer_list)):
                arrestee = self.random.choice(self.influencer_list)
                if arrestee.state == State.JAILED:  # Check if influencer is jailed.
                    continue
                sentence = random.randint(1, self.max_jail_term)
                arrestee.jail_sentence = sentence
                arrestee.state = State.JAILED
                self.jailings_list[0] += 1
                if sentence > 0:
                    self.remove_agent_grid(arrestee)

                print(arrestee.unique_id,
                      ' was an influencer and has been jailed.')
Example #15
0
class EvacuationModel(Model):
    """
    This is a simulation of a crowd evacuation from a building.
    Several variables are taken into account: the knowledge of the emergency exits, the age and weight of the agents
    and the presence of stewards that can guide agents toward the emergency exits.
    Agents have different strategies to escape the building such as taking the shortest path to an exit or a random one.

    The goal is to study which combinations of agent types are more likely to escape the building and save themselves and
    how the amount of casualties varies with respect to the different variables.
    """
    def __init__(self,
                 N=10,
                 K=0,
                 width=50,
                 height=50,
                 fire_x=1,
                 fire_y=1,
                 civil_info_exchange=True):
        self.num_civilians = N
        self.num_stewards = K
        self.civil_info_exchange = civil_info_exchange
        self.fire_initial_pos = (fire_x, fire_y)
        self.warning_UI = ""
        self.agents_alive = N + K  # Agents alive and inside the building
        self.agents_saved = []  # Agents that managed to get out
        self.agents_killed = []  # Agents that perished during the evacuation
        self.grid = SingleGrid(height, width, False)
        self.graph = None  # General graph representing walkable terrain
        self.schedule = RandomActivation(
            self)  # Every tick, agents move in a different random order
        # Create exits
        self.pos_exits = [(0, 5), (0, 25), (0, 45)]
        for i in range(3):
            self.pos_exits.append((self.grid.width - 1, 14 + i))

        self.draw_environment(self.pos_exits)
        self.graph = path_finding.create_graph(self)
        # Define data collector
        model_collector = {
            "Agents killed": lambda killed: len(self.agents_killed),
            "Agents saved": lambda saved: len(self.agents_saved)
        }
        for exit_pos in self.pos_exits:
            title = "Exit {}".format(exit_pos)
            model_collector[title] = partial(count_agents_saved, exit_pos)
        self.datacollector = DataCollector(model_reporters=model_collector)
        # Create fire
        # for pos in self.fire_initial_pos:  # Only 1 source of fire since we are setting it from UI
        x, y = self.fire_initial_pos
        if not self.is_inside_square((x, y), (0, 29),
                                     (25, 39)) and not self.is_inside_square(
                                         (x, y), (0, 10), (25, 20)):
            pos = self.fire_initial_pos
        else:
            pos = (1, 1)
            self.warning_UI = "<b>WARNING:</b> Sorry but the position of the fire is outside of the building, " \
                              "change the setting and click reset simulation."
        fire_agent = FireAgent(pos, self)
        self.schedule.add(fire_agent)
        self.grid.place_agent(fire_agent, pos)
        # Create civilian agents
        for i in range(self.num_civilians):

            # a civilian agent will know at least the main entrance to the building
            known_exits = self.pos_exits[-3:]
            a = CivilianAgent(i, self, known_exits)

            self.schedule.add(a)
            # Add the agent to a random grid cell

            while True:
                # pick the random coordinate
                x = self.random.randrange(1, self.grid.width - 1)
                y = self.random.randrange(1, self.grid.height - 1)
                # check if the point is empty and inside of the building
                if self.grid.is_cell_empty((x, y)) and not self.is_inside_square((x, y), (0, 29), (25, 39)) \
                        and not self.is_inside_square((x, y), (0, 10), (25, 20)):
                    break

            self.grid.place_agent(a, (x, y))

        # Create steward agents
        for i in range(self.num_civilians,
                       self.num_civilians + self.num_stewards):

            # a steward agent will know all exits.
            known_exits = self.pos_exits
            a = StewardAgent(i, self, known_exits)

            self.schedule.add(a)
            # Add the agent to a random grid cell

            while True:
                # pick the random coordinate
                x = self.random.randrange(1, self.grid.width - 1)
                y = self.random.randrange(1, self.grid.height - 1)
                # check if the point is empty and inside of the building
                if self.grid.is_cell_empty((x, y)) and not self.is_inside_square((x, y), (0, 29), (25, 39)) \
                        and not self.is_inside_square((x, y), (0, 10), (25, 20)):
                    break

            self.grid.place_agent(a, (x, y))

        self.running = True  # Set this to false when we want to finish simulation (e.g. all agents are out of building)
        self.datacollector.collect(self)

    @staticmethod
    def is_inside_square(point, bottom_left, top_right):
        return bottom_left[0] <= point[0] <= top_right[0] and bottom_left[
            1] <= point[1] <= top_right[1]

    def step(self):
        self.schedule.step()
        # collect data
        self.datacollector.collect(self)

        # Halt if no more agents in the building
        if self.count_agents(self) == 0:
            self.running = False

    def remove_agent(self, agent, reason, **kwargs):
        """
        Removes an agent from the simulation. Depending on the reason it can be
        Args:
            agent (Agent):
            reason (Reasons):

        Returns:
            None
        """
        if reason == Reasons.SAVED:
            self.agents_saved.append(agent)
        elif reason == Reasons.KILLED_BY_FIRE:
            self.agents_killed.append(agent)

        self.agents_alive -= 1
        self.schedule.remove(agent)
        self.grid.remove_agent(agent)

    def draw_environment(self, exits=None):
        length_E = int(self.grid.height /
                       5)  # length of the vertical segments of the E
        depth_E = int(self.grid.width /
                      2)  # length of the horizontal segments of the E
        for i in range(3):
            start = max(0, 2 * i * length_E)
            self.draw_wall((0, start), (0, start + length_E - 1))
        for i in range(2):
            start = 2 * i * length_E + length_E
            self.draw_wall((depth_E, start), (depth_E, start + length_E - 1))
        # Horizontal lines of the E (BB)
        aux_y_coord = [
            length_E, 2 * length_E, 3 * length_E - 1, 4 * length_E - 1
        ]
        for y in aux_y_coord:
            self.draw_wall((0, y), (depth_E, y))
        top_left_corner = (0, self.grid.height - 1)
        top_right_corner = (self.grid.width - 1, self.grid.height - 1)
        bottom_right_corner = (self.grid.width - 1, 0)
        # Draw long contour lines E
        self.draw_wall((0, 0), bottom_right_corner)
        self.draw_wall(top_left_corner, top_right_corner)
        self.draw_wall(bottom_right_corner, top_right_corner)

        # Draw exits
        self.draw_exits(exits)

    def draw_wall(self, start, end):
        """
        Draws a line that goes from start point to end point.

        Args:
            start (List): Coordinates of line's starting point
            end (List): Coordinates of line's end point

        Returns:
            None
        """
        diff_x, diff_y = np.subtract(end, start)
        wall_coordinates = np.asarray(start)

        if self.grid.is_cell_empty(wall_coordinates.tolist()):
            w = WallAgent(wall_coordinates.tolist(), self)
            self.grid.place_agent(w, wall_coordinates.tolist())

        while diff_x != 0 or diff_y != 0:
            if abs(diff_x) == abs(diff_y):
                # diagonal wall
                wall_coordinates[0] += np.sign(diff_x)
                wall_coordinates[1] += np.sign(diff_y)
                diff_x -= 1
                diff_y -= 1
            elif abs(diff_x) < abs(diff_y):
                # wall built in y dimension
                wall_coordinates[1] += np.sign(diff_y)
                diff_y -= 1
            else:
                # wall built in x dimension
                wall_coordinates[0] += np.sign(diff_x)
                diff_x -= 1
            if self.grid.is_cell_empty(wall_coordinates.tolist()):
                w = WallAgent(wall_coordinates.tolist(), self)
                self.grid.place_agent(w, wall_coordinates.tolist())

    def draw_exits(self, exits_list):
        for ext in exits_list:
            e = ExitAgent(ext, self)
            if not self.grid.is_cell_empty(ext):
                # Only walls should exist in the grid at this time, so no need to remove it from scheduler
                agent = self.grid.get_cell_list_contents(ext)
                self.grid.remove_agent(agent[0])
            # Place exit
            self.schedule.add(e)
            self.grid.place_agent(e, ext)

    def spread_fire(self, fire_agent):
        fire_neighbors = self.grid.get_neighborhood(fire_agent.pos,
                                                    moore=True,
                                                    include_center=False)
        for grid_space in fire_neighbors:
            if self.grid.is_cell_empty(grid_space):
                # Create new fire agent and add it to grid and scheduler
                new_fire_agent = FireAgent(grid_space, self)
                self.schedule.add(new_fire_agent)
                self.grid.place_agent(new_fire_agent, grid_space)
            else:
                # If human agents, eliminate them and spread anyway
                agent = self.grid.get_cell_list_contents(grid_space)[0]
                if isinstance(agent, (CivilianAgent, StewardAgent)):
                    new_fire_agent = FireAgent(grid_space, self)
                    self.remove_agent(agent, Reasons.KILLED_BY_FIRE)
                    self.schedule.add(new_fire_agent)
                    self.grid.place_agent(new_fire_agent, grid_space)

    @staticmethod
    def count_agents(model):
        """
        Helper method to count agents alive and still in the building.
        """
        count = 0
        for agent in model.schedule.agents:
            agent_type = type(agent)
            if (agent_type == CivilianAgent) or (agent_type == StewardAgent):
                count += 1
        return count
class modelSim(Model):
    """ 
    details of the world 
    
    introduce time is when animal agents first get introduced into the wrold
    disp_rate is the dispersal rate for experiment 3
    dist is perceptual strength for animals if fixed
    det is decision determinacy of animals if fixed
    cog_fixed determines if cognition of animals is fixed to particular values or is allowed to evolve
    if skip_300 is True, patchiness values are not calculated for the first 300 steps-- this makes the model run faster
    collect_cog_dist creates a seperate dataframe for all cognition values for agents at every timestep
    if evolve_disp is true, dispersion rate of plants is free to evolve
    """

    def __init__(self, introduce_time, disp_rate, dist, det, cog_fixed = False, \
                 skip_300 = True, collect_cog_dist = False, evolve_disp = False):

        self.skip_300 = skip_300
        self.cog_fixed = cog_fixed
        self.evolve_disp = evolve_disp
        self.collect_cog_dist = collect_cog_dist
        self.dist = dist
        self.det = det
        self.disp_rate = disp_rate
        self.intro_time = introduce_time
        (self.a1num, self.a2num) = (20, 20)
        self.schedule = RandomActivation(
            self)  # agents take a step in random order
        self.grid = SingleGrid(
            200, 200,
            True)  # the world is a grid with specified height and width

        self.initialize_perception()

        disp = np.power(self.disp_rate, range(0, 100))
        self.disp = disp / sum(disp)
        self.grid_ind = np.indices((200, 200))
        positions = np.maximum(abs(100 - self.grid_ind[0]),
                               abs(100 - self.grid_ind[1]))
        self.positions = np.minimum(positions, 200 - positions)

        self.agentgrid = np.zeros(
            (self.grid.width, self.grid.height
             ))  # allows for calculation of patchiness of both agents
        self.coggrid = np.full(
            (self.nCogPar, self.grid.width, self.grid.height), 101.0)
        self.dispgrid = np.full((2, self.grid.width, self.grid.height), 101.0)
        self.age = []
        (self.nstep, self.unique_id, self.reprod, self.food, self.death,
         self.combat) = (0, 0, 0, 0, 0, 0)

        self.cmap = colors.ListedColormap([
            'midnightblue', 'mediumseagreen', 'white', 'white', 'white',
            'white', 'white'
        ])  #'yellow', 'orange', 'red', 'brown'])
        bounds = [0, 1, 2, 3, 4, 5, 6, 7]
        self.norm = colors.BoundaryNorm(bounds, self.cmap.N)

        self.expect_NN = []
        self.NN = [5, 10]
        for i in self.NN:
            self.expect_NN.append(
                (math.factorial(2 * i) * i) / (2**i * math.factorial(i))**2)

        grid_ind_food = np.indices((21, 21))
        positions_food = np.maximum(abs(10 - grid_ind_food[0]),
                                    abs(10 - grid_ind_food[1]))
        self.positions_food = np.minimum(positions_food, 21 - positions_food)
        if self.collect_cog_dist:
            self.cog_dist_dist = pd.DataFrame(columns=[])
            self.cog_dist_det = pd.DataFrame(columns=[])

        for i in range(self.a1num):  # initiate a1 agents at random locations
            self.introduce_agents("A1")
        self.nA1 = self.a1num
        self.nA2 = 0
#     self.agent_steps = {}

    def initialize_perception(self):
        self.history = pd.DataFrame(columns=[
            "nA1", "nA2", "age", "LIP5", "LIP10", "LIPanim5", "LIPanim10",
            "Morsita5", "Morsita10", "Morsitaanim5", "Morsitaanim10", "NN5",
            "NN10", "NNanim5", "NNanim10", "reprod", "food", "death", "combat",
            "dist", "det", "dist_lower", "det_lower", "dist_upper",
            "det_upper", "dist_ci", "det_ci"
        ])
        self.nCogPar = 2
        (self.start_energy, self.eat_energy, self.tire_energy, self.reproduction_energy, self.cognition_energy) \
        = (10, 5, 3, 20, 1)

    def introduce_agents(self, which_agent):
        x = random.randrange(self.grid.width)
        y = random.randrange(self.grid.height)

        if which_agent == "A1":
            if self.grid.is_cell_empty((x, y)):
                a = A1(self.unique_id, self, self.start_energy, disp_rate=0)
                self.unique_id += 1
                self.grid.position_agent(a, x, y)
                self.schedule.add(a)
                self.agentgrid[x][y] = 1
            else:
                self.introduce_agents(which_agent)
        elif which_agent == "A2":
            if self.cog_fixed:
                c = (self.dist, self.det)
            else:
                c = tuple([0] * self.nCogPar)
            a = A2(self.unique_id,
                   self,
                   self.start_energy,
                   cognition=c,
                   disp_rate=0)
            self.unique_id += 1
            if self.agentgrid[x][y] == 1:
                die = self.grid.get_cell_list_contents([(x, y)])[0]
                die.dead = True
                self.grid.remove_agent(die)
                self.schedule.remove(die)
                self.grid.place_agent(a, (x, y))
                self.schedule.add(a)
                self.agentgrid[x][y] = 2
                self.coggrid[:, x, y] = c
            elif self.agentgrid[x][y] == 0:
                self.grid.place_agent(a, (x, y))
                self.schedule.add(a)
                self.agentgrid[x][y] = 2
                self.coggrid[:, x, y] = c

    def flatten_(self, n, grid, full_grid=False, mean=True, range_=False):
        if full_grid:
            return (grid[n].flatten())
        i = grid[n].flatten()
        if mean:
            i = np.delete(i, np.where(i == 101))
            if len(i) == 0:
                # if range_:
                return ([0] * 4)
            #else:
            #    return(0)
            if range_:
                if self.cog_fixed:
                    return ([np.mean(i)] * 4)
                return (np.concatenate(
                    ([np.mean(i)], np.percentile(i, [2.5, 97.5]),
                     self.calculate_ci(i))))
            return ([np.mean(i), 0, 0, 0])
        else:
            return (i)

    def calculate_ci(self, data):
        if np.min(data) == np.max(data):
            return ([0.0])
        return ([
            np.mean(data) - st.t.interval(
                0.95, len(data) - 1, loc=np.mean(data), scale=st.sem(data))[0]
        ])

    def return_zero(self, num, denom):
        if self.nstep == 1:
            #     print("whaaat")
            return (0)
        if denom == "old_nA2":
            denom = self.history["nA2"][self.nstep - 2]
        if denom == 0.0:
            return 0
        return (num / denom)

    def nearest_neighbor(self, agent):  # fix this later
        if agent == "a1":
            x = np.argwhere(self.agentgrid == 1)
            if len(x) <= 10:
                return ([-1] * len(self.NN))
            elif len(x) > 39990:
                return ([0.97, 0.99])
        #  if self.nstep<300 and self.skip_300:
        #      return([-1,-1] )
        else:
            x = np.argwhere(self.agentgrid == 2)
            if len(x) <= 10:
                return ([-1] * len(self.NN))
        density = len(x) / (self.grid.width)**2
        expect_NN_ = self.expect_NN
        expect_dist = np.array(expect_NN_) / (density**0.5)
        distances = [0, 0]
        for i in x:
            distx = abs(x[:, 0] - i[0])
            distx[distx > 100] = 200 - distx[distx > 100]
            disty = abs(x[:, 1] - i[1])
            disty[disty > 100] = 200 - disty[disty > 100]
            dist = (distx**2 + disty**2)**0.5
            distances[0] += (np.partition(dist, 5)[5])
            distances[1] += (np.partition(dist, 10)[10])
        mean_dist = np.array(distances) / len(x)
        out = mean_dist / expect_dist
        return (out)

    def quadrant_patch(
        self, agent
    ):  # function to calculate the patchiness index of agents at every step
        if agent == "a1":
            x = self.agentgrid == 1
        else:
            x = self.agentgrid == 2
        gsize = np.array([5, 10])
        gnum = 200 / gsize
        qcs = []
        for i in range(2):
            x_ = x.reshape(int(gnum[i]), gsize[i], int(gnum[i]),
                           gsize[i]).sum(1).sum(2)
            mean = np.mean(x_)
            var = np.var(x_)
            if mean == 0.0:
                return ([-1] * 4)
            lip = 1 + (var - mean) / (mean**2)
            morsita = np.sum(x) * ((np.sum(np.power(x_, 2)) - np.sum(x_)) /
                                   (np.sum(x_)**2 - np.sum(x_)))
            qcs += [lip, morsita]
        return (qcs)

    def l_function(self, agent):
        if agent == "a1":
            x = np.argwhere(self.agentgrid == 1)
        else:
            x = np.argwhere(self.agentgrid == 2)
            if len(x) == 0:
                return (-1)
        distances = np.array([])
        for i in x:
            distx = abs(x[:, 0] - i[0])
            distx[distx > 100] = 200 - distx[distx > 100]
            disty = abs(x[:, 1] - i[1])
            disty[disty > 100] = 200 - disty[disty > 100]
            dist = (distx**2 + disty**2)**0.5
            distances = np.concatenate((distances, dist[dist != 0]))
        l = np.array([])
        for i in np.arange(5, 51, 5):
            l = np.append(l, sum(distances < i))
        k = (l * 200**2) / (len(x)**2)
        l = (k / math.pi)**0.5
        return (abs(l - np.arange(5, 51, 5)))

    def collect_hist(self):
        if self.nstep < 300 and self.skip_300:
            NNcalc = [-1, -1]  #self.nearest_neighbor("a1")
            NNanimcalc = [-1, -1]  #self.nearest_neighbor("a2")
        else:
            NNcalc = self.nearest_neighbor("a1")
            NNanimcalc = self.nearest_neighbor("a2")
        quadrantcalc = self.quadrant_patch("a1")
        quadrantanimcalc = self.quadrant_patch("a2")
        dist_values = self.flatten_(0,
                                    grid=self.coggrid,
                                    mean=True,
                                    range_=False)
        det_values = self.flatten_(1,
                                   grid=self.coggrid,
                                   mean=True,
                                   range_=False)
        # l_f = 0#self.l_function("a1")
        dat = {
            "nA1": self.nA1,
            "nA2": self.nA2,
            "age": self.return_zero(sum(self.age), self.nA2),
            "LIP5": quadrantcalc[0],
            "LIP10": quadrantcalc[2],
            "LIPanim5": quadrantanimcalc[0],
            "LIPanim10": quadrantanimcalc[2],
            "Morsita5": quadrantcalc[1],
            "Morsita10": quadrantcalc[3],
            "Morsitaanim5": quadrantanimcalc[1],
            "Morsitaanim10": quadrantanimcalc[3],
            "NN5": NNcalc[0],
            "NN10": NNcalc[1],
            "NNanim5": NNanimcalc[0],
            "NNanim10":
            NNanimcalc[1],  #"l_ripley" : l_f,# self.nearest_neighbor("a2"),  
            "reprod": self.return_zero(self.reprod, "old_nA2"),
            "food": self.return_zero(self.food, self.nA2),
            "death": self.return_zero(self.death, "old_nA2"),
            "combat": self.return_zero(self.combat, "old_nA2"),
            "dist": dist_values[0],
            "det": det_values[0],
            "dist_lower": dist_values[1],
            "det_lower": det_values[1],
            "dist_upper": dist_values[2],
            "det_upper": det_values[2],
            "dist_ci": dist_values[3],
            "det_ci": det_values[3],
            "disp_a1": self.flatten_(0, grid=self.dispgrid)[0],
            "disp_a2": self.flatten_(1, grid=self.dispgrid)[0]
        }
        self.history = self.history.append(dat, ignore_index=True)
        self.age = []
        (self.reprod, self.food, self.death, self.combat) = (0, 0, 0, 0)
        if self.collect_cog_dist:
            if (self.nstep % 10) == 0:
                self.cog_dist_dist[str(self.nstep - 1)] = self.flatten_(
                    0, grid=self.coggrid, full_grid=True, mean=False)
                self.cog_dist_det[str(self.nstep - 1)] = self.flatten_(
                    1, grid=self.coggrid, full_grid=True, mean=False)

    def step(self):
        self.nstep += 1  # step counter
        if self.nstep == self.intro_time:
            for i in range(self.a2num):
                self.introduce_agents("A2")
        self.schedule.step()
        self.nA1 = np.sum(self.agentgrid == 1)
        self.nA2 = np.sum(self.agentgrid == 2)
        self.collect_hist()
        if self.nstep % 10 == 0:
            sys.stdout.write((str(self.nstep) + " " + str(self.nA1) + " " +
                              str(self.nA2) + "\n"))

    def visualize(self):
        f, ax = plt.subplots(1)
        self.agentgrid = self.agentgrid.astype(int)
        ax.imshow(self.agentgrid,
                  interpolation='nearest',
                  cmap=self.cmap,
                  norm=self.norm)
        # plt.axis("off")
        return (f)
class SeqRosModel(Model):
    def __init__(self):
        self.speed_model_plus = Net(4096)
        self.speed_model_plus.load_state_dict(
            torch.load('./trained_models/TMM.pkl',
                       map_location=lambda storage, loc: storage))
        self.file_path = NUCLEI_DATA_PATH + 't%03d-nuclei'

        print('Parsing the Embryo...')
        self.embryo = Embryo(NUCLEI_DATA_PATH)
        self.embryo.read_data()
        self.embryo.get_embryo_visual_params()

        self.embryo.volume = 2500578

        self.ai_cell = AI_CELL

        self.start_point = START_POINT
        self.end_point = END_POINT

        self.ticks = 0
        self.tick_resolution = TICK_RESOLUTION
        self.end_tick = (self.end_point -
                         self.start_point) * self.tick_resolution
        self.stage_destination_point = self.start_point

        self.plane_resolution = PLANE_RESOLUTION

        self.current_cell_list = []
        self.dividing_cell_overall = []
        self.next_stage_destination_list = {}
        self.state_value_dict = {}
        self.schedule = RandomActivation(self)

        self.init_env()
        self.update_stage_destination()

        self.plane = DrawPlane(width=self.embryo.width,
                               height=self.embryo.height,
                               w_offset=self.embryo.wid_offset,
                               h_offset=self.embryo.hei_offset,
                               scale_factor=CANVAS_DISPLAY_SCALE_FACTOR)
        self.canvas = self.plane.canvas
        self.plane_draw = PLANE_DRAW
        self.draw(self.plane_draw)

    def draw(self, n_plane):
        self.canvas.delete("all")
        for cell in self.schedule.agents:
            if cell.cell_name == self.ai_cell:
                center_plane = round(cell.location[2] / 5.0)
                n_plane = center_plane

        draw_range = np.arange(center_plane - PLANE_THRESHOLD,
                               center_plane + PLANE_THRESHOLD + 1, 1)
        draw_range = draw_range.tolist()
        draw_range.reverse()

        for n in draw_range:
            angle = np.pi * 0.5 / (PLANE_THRESHOLD + 1) * np.abs(n - n_plane)
            level = None
            for cell in self.schedule.agents:
                if cell.cell_name == self.ai_cell:
                    type = 'AI'
                    ai_location = cell.location
                    ai_center = cell.location[0:2]
                    ai_radius = cell.diameter / 2.0 * np.cos(angle)
                    continue
                else:
                    type = 'NUMB'
                if round(cell.location[2] / 5.0) == n:
                    self.plane.draw_cell(center=cell.location[0:2],
                                         radius=cell.diameter / 2.0 *
                                         np.cos(angle),
                                         type=type,
                                         level=level)
            if round(ai_location[2] / 5.0) == n:
                self.plane.draw_cell(center=ai_center,
                                     radius=ai_radius,
                                     type='AI',
                                     level=None)

        self.canvas.pack()
        self.canvas.update()
        time.sleep(FRESH_TIME)

    def radis_ratio(self, cn):
        r = -1
        if cn[0:2] == "AB":
            r = 0.55 * (0.5**(len(cn) - 2))
        elif cn == "P1":
            r = 0.45
        elif cn == "EMS":
            r = 0.45 * 0.54
        elif cn == "P2":
            r = 0.45 * 0.46
        elif cn[0:2] == "MS":
            r = 0.45 * 0.54 * 0.5 * (0.5**(len(cn) - 2))
        elif cn == "E":
            r = 0.45 * 0.54 * 0.5
        elif cn[0] == "E" and len(cn) >= 2 and cn[1] != "M":
            r = 0.45 * 0.54 * 0.5 * (0.5**(len(cn) - 1))
        elif cn[0] == "C":
            r = 0.45 * 0.46 * 0.53 * (0.5**(len(cn) - 1))
        elif cn == "P3":
            r = 0.45 * 0.46 * 0.47
        elif cn[0] == "D":
            r = 0.45 * 0.46 * 0.47 * 0.52 * (0.5**(len(cn) - 1))
        elif cn == "P4":
            r = 0.45 * 0.46 * 0.47 * 0.48
        if r == -1:
            return 0.00000001
        return r**(1.0 / 3)

    def get_radius(self, cell_name):
        if cell_name[0:2] == "AB":
            v = 0.55 * (0.5**(len(cell_name) - 2))
        elif cell_name == "P1":
            v = 0.45
        elif cell_name == "EMS":
            v = 0.45 * 0.54
        elif cell_name == "P2":
            v = 0.45 * 0.46
        elif cell_name[0:2] == "MS":
            v = 0.45 * 0.54 * 0.5 * (0.5**(len(cell_name) - 2))
        elif cell_name == "E":
            v = 0.45 * 0.54 * 0.5
        elif cell_name[0] == "E" and len(
                cell_name) >= 2 and cell_name[1] != "M":
            v = 0.45 * 0.54 * 0.5 * (0.5**(len(cell_name) - 1))
        elif cell_name[0] == "C":
            v = 0.45 * 0.46 * 0.53 * (0.5**(len(cell_name) - 1))
        elif cell_name == "P3":
            v = 0.45 * 0.46 * 0.47
        elif cell_name[0] == "D":
            v = 0.45 * 0.46 * 0.47 * 0.52 * (0.5**(len(cell_name) - 1))
        elif cell_name == "P4":
            v = 0.45 * 0.46 * 0.47 * 0.48
        elif cell_name in ['Z2', 'Z3']:
            v = 0.45 * 0.46 * 0.47 * 0.48 * 0.5
        else:
            print('ERROR!!!!! CELL NOT FOUND IN CALCULATING HER RADIUS!!!!',
                  cell_name)
            print('Use an average value.')
            v = v = 0.55 * (0.5**(9 - 2))  #ABarppppa

        radius = pow(self.embryo.volume * v / (4 / 3.0 * np.pi), 1 / 3.0)
        radius = radius

        return radius

    def get_cell_daughter(self, cell_name, cell_dict):
        daughter = []
        if cell_name == 'P0':
            daughter = ['AB', 'P1']
        elif cell_name == 'P1':
            daughter = ['EMS', 'P2']
        elif cell_name == 'P2':
            daughter = ['C', 'P3']
        elif cell_name == 'P3':
            daughter = ['D', 'P4']
        elif cell_name == 'P4':
            daughter = ['Z2', 'Z3']
        elif cell_name == 'EMS':
            daughter = ['MS', 'E']
        ## standard name ###
        else:
            for cell in cell_dict.keys():
                if cell.startswith(cell_name) and len(
                        cell) == len(cell_name) + 1:
                    daughter.append(cell)
            daughter = sorted(daughter)
        if daughter == []:
            daughter = ['', '']
        return daughter

    def init_env(self):
        with open(self.file_path % self.start_point) as file:
            for line in file:
                line = line[:len(line) - 1]
                vec = line.split(', ')
                id = int(vec[0])
                location = np.array(
                    (float(vec[5]), float(vec[6]), float(vec[7])))
                ########### add noise to initial location##################
                location_noise = np.random.normal(0, 0.1, 2)
                location[0:2] = location[0:2] + location_noise
                ########### add noise to initial location##################
                diameter = float(vec[8])
                cell_name = vec[9]
                if cell_name[0:3] == 'Nuc':
                    continue
                if cell_name != '':
                    self.current_cell_list.append(cell_name)
                    a = CellAgent(id, self, cell_name, location, diameter)
                    self.schedule.add(a)

    def set_cell_next_location(self):
        for cell in self.schedule.agents:
            if cell.cell_name in self.next_stage_destination_list:
                cell.next_location = (self.next_stage_destination_list[cell.cell_name][0:3] - cell.location) \
                    / (self.tick_resolution - self.ticks % self.tick_resolution) + cell.location
                cell.diameter = self.next_stage_destination_list[
                    cell.cell_name][3]
            else:
                ### new cell born ###
                mother = cell.cell_name
                daughter = self.get_cell_daughter(
                    cell.cell_name, self.next_stage_destination_list)
                if daughter[0] == '':
                    self.schedule.remove(cell)
                    continue
                cell.cell_name = daughter[0]
                cell.diameter = self.next_stage_destination_list[
                    daughter[0]][3]
                cell.next_location = (self.next_stage_destination_list[daughter[0]][0:3] - cell.location) \
                    / (self.tick_resolution - self.ticks % self.tick_resolution) + cell.location
                new_id = len(self.schedule.agents) + 1
                new_diameter = self.next_stage_destination_list[daughter[1]][3]
                a = CellAgent(new_id, self, daughter[1], cell.location,
                              new_diameter)
                self.schedule.add(a)
                a.next_location = (self.next_stage_destination_list[daughter[1]][0:3] - a.location) \
                    / (self.tick_resolution - self.ticks % self.tick_resolution) + a.location

                self.dividing_cell_overall.append(mother)

    def update_stage_destination(self):
        current_stage_destination_point = self.start_point + 1 + int(
            self.ticks / self.tick_resolution)
        if self.stage_destination_point == current_stage_destination_point:
            return
        else:
            self.stage_destination_point = current_stage_destination_point
            self.next_stage_destination_list.clear()
            with open(self.file_path % self.stage_destination_point) as file:
                for line in file:
                    line = line[:len(line) - 1]
                    vec = line.split(', ')
                    id = int(vec[0])
                    loc_and_dia = np.array((float(vec[5]), float(vec[6]),
                                            float(vec[7]), float(vec[8])))
                    cell_name = vec[9]
                    if cell_name != '':
                        self.next_stage_destination_list[
                            cell_name] = loc_and_dia

    def render(self):
        if self.ticks % FRESH_PERIOD == 0:
            self.draw(self.plane_draw)

    def reset(self):
        self.ticks = 0

        self.start_point = START_POINT
        self.end_point = END_POINT

        self.end_tick = (self.end_point -
                         self.start_point) * self.tick_resolution
        self.stage_destination_point = self.start_point
        self.current_cell_list = []
        self.dividing_cell_overall = []
        self.next_stage_destination_list = {}
        self.state_value_dict = {}

        del self.schedule.agents[:]
        self.init_env()
        self.update_stage_destination()
        s = self.get_state()

        return s

    def get_state(self):
        s = []
        low_plane = PLANE_DRAW - INPUT_PLANE_RANGE
        if low_plane <= 1:
            low_plane = 1
        high_plane = PLANE_DRAW + INPUT_PLANE_RANGE + 1
        for p in range(low_plane, high_plane):
            image = Image.new('RGB',
                              (self.embryo.width - self.embryo.wid_offset,
                               self.embryo.height - self.embryo.hei_offset))
            draw = ImageDraw.Draw(image)
            for cell in self.schedule.agents:
                if cell.cell_name == self.ai_cell:
                    fill_color = 'red'

                else:
                    fill_color = 'green'
                cell_loc = np.array((cell.location[0], cell.location[1], \
                    cell.location[2]))
                radius = self.get_radius(cell.cell_name)
                z_diff = cell_loc[2] - p * self.plane_resolution
                if abs(z_diff) < radius:
                    radius *= 0.5
                    z_diff *= 0.5
                    radius_projection = (radius**2 - z_diff**2)**0.5
                    draw.ellipse((cell_loc[0] - radius_projection -
                                  self.embryo.wid_offset, cell_loc[1] -
                                  radius_projection - self.embryo.hei_offset,
                                  cell_loc[0] + radius_projection -
                                  self.embryo.wid_offset, cell_loc[1] +
                                  radius_projection - self.embryo.hei_offset),
                                 fill=fill_color,
                                 outline='black')

            image = image.resize((128, 128))
            image_np = np.array(image).astype(
                np.float32) / 255  #widthxheightx3
            image_np = np.rollaxis(image_np, 2)  #3x2widthxheight

            if len(s) == 0:
                s = image_np
            else:
                s = np.concatenate((s, image_np), axis=0)

        return s

    def step(self):
        r = 0
        done = False
        sg_done = False
        if self.ticks > 0 and self.ticks % self.tick_resolution == 0:
            self.update_stage_destination()

        self.set_cell_next_location()
        self.schedule.step()
        self.ticks += 1

        s_ = self.get_state()
        ai_location = np.zeros(3)

        for cell in self.schedule.agents:
            if cell.cell_name == self.ai_cell:
                ai_location = np.array((cell.location[0], cell.location[1], \
                    cell.location[2]))

        if self.ticks == self.end_tick:
            done = True

        return s_, r, sg_done, done
Example #18
0
class EconomyModel(Model):
    """A model with some number of agents."""
    
    # initialization of the model
    def __init__(self ,seed, steps, dyn_firms, scenario, shock, con_pref, firm_scen,imit_scen,model_params):
        
        np.random.seed(seed)
        
        # here you define the attribute number of agents
        self.num_agents = sum([a['num'] for a in firm_scen])
        self.num_steps = steps
        self.total_I_f = 0 ; self.total_ms_f = 0
        self.YT = gdp; self.Y = gdp; self.MA_y_L = gdp; self.MA_y_S = gdp
        self.GT = 1
        self.Θ_g = 0; self.Θ_b = 0; self.Θ_c = 0; self.Θ_growth = 0
        self.current_growth = 0 # remove it from here
        self.total_diff_firms = 0 # remove it from here       
        self.dyn_firms = dyn_firms
        self.λ_e = scenario['λ_e'] ; self.λ_b = scenario['λ_b'] ; self.λ_g = scenario['λ_g']
        self.shock = shock # comment the parts referring to the shock
        self.pp = scenario["pp"]
        self.λ_g_b = (self.λ_g + self.pp/100)/(self.λ_e+self.λ_b+self.λ_g+self.pp/100)-self.λ_g
        self.λ_start = scenario["λ_start"]
        self.λ_dur = scenario["λ_dur"]
        self.λ_off = scenario["λ_off"]
        
        self.ω_g = model_params['ω_g'] ;self.ω_b = model_params['ω_b'] 
        self.ω_c = model_params['ω_c']
        self.G_min = model_params['G_min']; self.G_max = model_params['G_max'] 
        self.M_e = model_params['M_e']; self.γ_e = model_params['γ_e']
        self.p_hat = model_params['p_hat']
        self.α = model_params['α'] ; self.μ = model_params['μ']
        self.ψ = model_params['ψ'] ; self.Φ = model_params['Φ']
        self.δ = model_params['δ'] 
        self.Pr_new = model_params['Pr_new'] 
        self.η = model_params['η'] ; self.Ω = model_params['Ω']
        self.f_a = model_params['f_a'] 

        self.running = True

        # Metrics
        self.MSWA_g = 0; self.MSWA_b = 0; self.MSWA_e = 0
        
        # consumer preferences

        self.con = con_pref

        # Self.imit_scen is not used somewhere yet,
        # it could be used to define the characteristics of the entering firm 
        # without making them imitators
        self.imit_scen = imit_scen

        # Probability that commercial bank would finance a green loan
        self.P_r_l = {"green":scenario['P_r_l_g'], "quality":1, "efficiency":1}

        # Influence of SIB on probability of investments 
        self.σ = {"green":scenario['σ_g'], "quality":0, "efficiency":0}

        # Dictionary containing the lists of firms 
        # investing at each characteristic at every moment  
        self.invest = {'green':[],'efficiency':[],'quality':[]}

        self.schedule = RandomActivation(self)
        # the collection takes place after the step function 
        self.dc = DataCollector(model_reporters = {"agents": lambda m: m.schedule.get_agent_count(),
                                                   "Total_I_fs": lambda m: m.total_I_f,
                                                   "PotentialGDP": lambda m: m.YT,
                                                   "LongTermGDP": lambda m: m.MA_y_L,
                                                   "shortTermGDP": lambda m: m.MA_y_S,
                                                   "ActualGDP": lambda m: m.Y,
                                                   "GDPGrowth": lambda m: m.GT,
                                                   "InvestmentGrowth": lambda m: m.Θ_growth, 
                                                   "λ_e" : lambda m: m.λ_e,
                                                   "λ_b" : lambda m: m.λ_b,
                                                   "λ_g" : lambda m: m.λ_g,
                                                   "index I" : lambda m: m.I,
                                                   "MSWA_g"  : lambda m: m.MSWA_g,
                                                   "MSWA_b"  : lambda m: m.MSWA_b,
                                                   "MSWA_e"  : lambda m: m.MSWA_e,
                                                   "HHI"     : lambda m: m.HHI

                                                    },
                                agent_reporters = {"name": lambda a: a.unique_id, 
                                                   "e_fs": lambda a: a.e_f, 
                                                   "p_fs": lambda a: a.p_f,
                                                   "b_fs": lambda a: a.b_f, 
                                                   "g_fs": lambda a: a.g_f,
                                                   "I_fs": lambda a: a.I_f,
                                                   "ms_fs": lambda a: a.ms_f,
                                                   "r_fs": lambda a: a.r_f,
                                                   "c_fs": lambda a: a.c_f,
                                                   "f_c_fs": lambda a: a.f_c_f,
                                                   "π_fs": lambda a: a.π_f,
                                                   "s_fs": lambda a: a.s_f,
                                                   "step": lambda a: a.step_2,
                                                   "proj": lambda a: a.act_proj,
                                                   "p_success": lambda a: a.p_success,
                                                   "s_to_r": lambda a: a.s_f/a.r_f,
                                                   "green": lambda a: a.p_i_g,
                                                   "efficiency": lambda a: a.p_i_e,
                                                   "quality": lambda a: a.p_i_b,
                                                   "p_r_imits": lambda a: a.p_r_imit,
                                                   "age" : lambda a: a.age,
                                                   'innov': lambda a: a.innov,
                                                   "sum_of_innov": lambda a:
                                                   a.p_i_e + a.p_i_b + a.p_i_g})

        # Create firms
        temp_num = 0
        for el in firm_scen:
            for i in range(el['num']):
                a = Firm(temp_num, self, el, False)
                temp_num += 1
                self.schedule.add(a)    

    def agg_income(self):

        # Modult for calculating growth in investments
        # self.Θ_growth = 0 if self.step == 1 else self.Θ_growth
        self.Θ_b = 0; self.Θ_g = 0; self.Θ_c = 0
        for a in self.schedule.agents:
            if a.unique_id in self.invest['green']:
                self.Θ_g = self.Θ_g + a.ms_f
            if a.unique_id in self.invest['efficiency']:
                self.Θ_c = self.Θ_c + a.ms_f
            if a.unique_id in self.invest['quality']:
                self.Θ_b = self.Θ_b + a.ms_f
        
        self.Θ_growth = 0 if self.schedule.time == 1 else self.Θ_growth
        self.Θ_growth = (self.Θ_g + self.Θ_b +self.Θ_c) - self.current_growth
        self.current_growth = 0 if self.schedule.time == 1 else (self.Θ_g + self.Θ_b +self.Θ_c)

        # Module for calculating GDPs

        self.GT = (self.ω_g * self.Θ_g + self.ω_b * self.Θ_b - self.ω_c * self.Θ_c) / (self.ω_b)* (self.G_max-self.G_min)/2 + (self.G_max+self.G_min)/2

        # self.GT = (self.ω_g * self.Θ_g + self.ω_b * self.Θ_b - self.ω_c * self.Θ_c) * (self.G_max - self.G_min)/2 + self.G_min
        
        if self.schedule.time >= self.con['con_start'] and self.schedule.time <= self.con['con_start'] + self.con['con_duration']: 
            # self.GT += self.shock['shock_drop']/12
            self.λ_g += self.con['increase']/(self.con['con_duration'])
            self.λ_e -= self.con['increase']/(self.con['con_duration']*2)
            self.λ_b -= self.con['increase']/(self.con['con_duration']*2)

        # module for simulating the recession
        if self.schedule.time >= self.shock['shock_start'] and self.schedule.time <= self.shock['shock_start'] + self.shock['shock_duration']: 
            self.GT += self.shock['shock_drop']/12
        
        # if (self.schedule.time > self.shock['shock_start'] + self.shock['shock_duration']) and (self.schedule.time <= self.shock['shock_start'] + self.shock['shock_duration'] + 12):
        #     self.λ_e += self.shock['shock_drop']/12/3 * self.shock['shock_duration']/12
        #     self.λ_b -= self.shock['shock_drop']/24/3 * self.shock['shock_duration']/12
        #     self.λ_g -= self.shock['shock_drop']/24/3 * self.shock['shock_duration']/12

        if self.λ_dur > 0:
            if self.schedule.time == self.λ_start: 
                self.λ_e -= self.λ_g_b/2
                self.λ_b -= self.λ_g_b/2
                self.λ_g += self.λ_g_b
                for a in self.schedule.agents:
                    a.λ_e = self.λ_e
                    a.λ_b = self.λ_b
                    a.λ_g = self.λ_g                   
            if self.schedule.time >= self.λ_start + self.λ_dur and self.schedule.time < self.λ_start + self.λ_dur + self.λ_off :
                self.λ_e += self.λ_g_b/2/self.λ_off
                self.λ_b += self.λ_g_b/2/self.λ_off
                self.λ_g -= self.λ_g_b/self.λ_off
                for a in self.schedule.agents:
                    a.λ_e = self.λ_e
                    a.λ_b = self.λ_b
                    a.λ_g = self.λ_g                   

        self.YT = self.YT * self.GT
        if self.λ_dur > 0:
            if self.schedule.time == self.λ_start :
                self.YT = (self.pp/100+1) * self.YT
            if self.schedule.time >= self.λ_start + self.λ_dur and self.schedule.time < self.λ_start + self.λ_dur + self.λ_off:
                self.YT = (1-self.pp/100/self.λ_off) * self.YT
        self.Y = ζ * self.Y + (1-ζ) * self.YT
        self.MA_y_L = self.MA_y_L * δ_long + self.Y * (1 - δ_long) 
        self.MA_y_S = self.MA_y_S * δ_short + self.Y * (1 - δ_short)
        self.I = self.MA_y_S / self.MA_y_L
        
    def step(self):
        # Phase 1: Trading, assigning sales to firms and updating their accounts:
        self.schedule.step()
        
        self.agg_income()

        total_I_f = sum(np.array([a.I_f for a in self.schedule.agents])**self.α)
        for a in self.schedule.agents:
            a.update_agent_a(total_I_f, self.Y,self.α,self.μ,self.Φ,self.ψ,self.δ)
        
        total_ms_f = sum(np.array([a.ms_f for a in self.schedule.agents])**self.η)
        for a in self.schedule.agents:
            a.update_agent_b(total_ms_f,self.η)

        # create metrics inside the agents
        # MSWA
        self.MSWA_g = sum(np.array([a.ms_f*a.g_f for a in self.schedule.agents]))
        self.MSWA_b = sum(np.array([a.ms_f*a.b_f for a in self.schedule.agents]))
        self.MSWA_e = sum(np.array([a.ms_f*a.e_f for a in self.schedule.agents]))
        self.HHI = sum(np.array([a.ms_f**2 for a in self.schedule.agents]))


        self.dc.collect(self) # data collection after the accounts of firms have been updated

        # Phase 2: Each firm attempts or continuous an innovation project, whose successful           
        for a in self.schedule.agents:
                            
            if a.act_proj != False:
                if a.loan_dur < L_xs[a.act_proj]:
                    a.loan_dur += 1
                    # if a.unique_id in self.invest[a.act_proj]:
                    #     self.invest[a.act_pr3j].remove(a.unique_id)
                else:
                    a.loan_dur = 0 
                    self.invest[a.act_proj].remove(a.unique_id)
                    if np.random.binomial(1,a.P_r_x_succ[a.act_proj]):
                        if a.act_proj == "quality":
                            a.b_f = a.b_f + 0.2 if a.b_f < 39.5 else a.b_f
                            # a.g_f = a.g_f - 0.15 if a.g_f > 0 else a.g_f
                            # a.e_f = a.e_f - 0.15 if a.e_f > 0 else a.e_f
                            a.p_f = self.p_hat + np.log(self.M_e/a.e_f - 1)/self.γ_e # (1)
                        if a.act_proj == "green":
                            a.g_f = a.g_f + 0.2 if a.g_f < 39.5 else a.g_f
                            # a.b_f = a.b_f - 0.15 if a.b_f > 0 else a.b_f
                            # a.e_f = a.e_f - 0.15 if a.e_f > 0 else a.e_f
                            a.p_f = self.p_hat + np.log(self.M_e/a.e_f - 1)/self.γ_e # (1)
                        if a.act_proj == "efficiency":
                            a.e_f = a.e_f + 0.2 if a.e_f < 39.5 else a.e_f
                            # a.g_f = a.g_f - 0.15 if a.g_f > 0 else a.g_f
                            # a.b_f = a.b_f - 0.15 if a.b_f > 0 else a.b_f
                            a.p_f = self.p_hat + np.log(self.M_e/a.e_f - 1)/self.γ_e # (1) 
                    a.act_proj = False

            if a.act_proj == False:
                temp_choice = np.random.choice( ['efficiency', 'green', 'quality'], 1,
                p = [a.p_i_e, a.p_i_g, a.p_i_b])
                
                if a.s_f > 0: 
                    # this is a rather bad formula
                    a.p_success = self.f_a * a.s_f/a.r_f * (self.P_r_l[temp_choice[0]] + self.σ[temp_choice[0]]) * self.I
                else:
                    a.p_success = 0
                
                a.p_success = 1 if a.p_success > 1 else a.p_success

                if np.random.binomial(1,a.p_success):
                    a.act_proj = temp_choice[0]
              
                    a.loan_dur += 1    
                    self.invest[temp_choice[0]].append(a.unique_id)
            
        if self.dyn_firms:
            removed = False
            for a in self.schedule.agents:
                if a.age > minAge:
                    if a.s_f < 0:
                        if a.ms_f < τ:
                            self.schedule.remove(a)
                            removed = True
                    if a.ms_f < τ_2 and removed == False:
                        self.schedule.remove(a)
                        removed = True 
            num_of_entered_firms = 0
            for a in self.schedule.agents:
                if np.random.binomial(1,a.p_r_imit*self.Pr_new): 

                    a2 = Firm(self.num_agents + self.total_diff_firms,
                    self, self.imit_scen, True)
                    self.total_diff_firms += 1 
                    temp_random = np.random.uniform()
                    # here it decides what type of innovator the firm entering 
                    # firm will be
                    if temp_random < 1/3:
                        a2.e_f = a.e_f*self.Ω ; a2.b_f = a.b_f*self.Ω ; a2.g_f = a.g_f/self.Ω
                        a2.innov = 'Green'
                    elif temp_random < 2/3:
                        a2.e_f = a.e_f*self.Ω ; a2.b_f = a.b_f/self.Ω ; a2.g_f = a.g_f*self.Ω
                        a2.innov = 'Qual'
                    else:
                        a2.e_f = a.e_f/self.Ω ; a2.b_f = a.b_f*self.Ω ; a2.g_f = a.g_f*self.Ω
                        a2.innov = 'Effic'

                    a2.p_f = self.p_hat + np.log(self.M_e/a2.e_f - 1)/self.γ_e # (1)
                    a2.I_f = (a2.e_f**self.λ_e)*(a2.b_f**self.λ_b)*(a2.g_f**self.λ_g)
                    a2.step_2 = a.step_2
                    a2.loan_dur = 0
                    a2.age = -1

                    self.schedule.add(a2)
                    num_of_entered_firms += 1
class ProcessModel(Model):

    def __init__(self, initial_densities, width, height, density_radius=(1, 1, 1), frequency_radius=(1, 1, 1),
                 dispersal_radius=(1, 1, 1), max_cells_per_unit=10, deterministic_death=True, age_limit=20,
                 death_ratio=0.2, death_period_limit=0, birth_rates=(0.2, 0.2, 0.2), k=25, l=20, a=1):

        super().__init__()

        self.num_selfish, self.num_cooperative, self.num_tkiller = initial_densities
        self.num_tumor_cells = self.num_selfish + self.num_cooperative
        self.num_total = self.num_tumor_cells + self.num_tkiller

        self.space = ContinuousSpace(width, height, True)
        self.schedule = RandomActivation(self)

        self.density_radius = density_radius
        self.frequency_radius = frequency_radius
        self.dispersal_radius = dispersal_radius

        self.max_cells_per_unit = max_cells_per_unit
        self.deterministic_death = deterministic_death
        self.age_limit = age_limit
        self.death_ratio = death_ratio
        self.death_period_limit = death_period_limit
        self.birth_rates = birth_rates
        self.R = min(width, height)/2.0
        self.k, self.l, self.a = k, l, a

        self.cells2add = []
        self.cells2delete = []
        self.all_cell_pos = [[], []]
        self.pos_selfish_cells = [[], []]
        self.pos_cooperative_cells = [[], []]
        self.pos_tkiller_cells = [[], []]
        self.pos_dead_cells = [[], []]

        added_selfish, added_cooperative, added_tkiller = 0, 0, 0
        random.seed(100)

        def n_cells():
            return added_selfish + added_cooperative + added_tkiller

        mortality_rates = [random.uniform(0, 0.2), random.uniform(0, 0.2), random.uniform(0, 0.2)]
        while n_cells() < self.num_total:

            rnd_i = random.randrange(3)

            if rnd_i == 0 and added_selfish < self.num_selfish:
                a = CellAgent(n_cells(), self, rnd_i)
                a.set_gamma(0.1)
                a.set_epsilon(0.7)
                a.set_d(mortality_rates[0])
                added_selfish += 1

            elif rnd_i == 1 and added_cooperative < self.num_cooperative:
                a = CellAgent(n_cells(), self, rnd_i)
                a.set_gamma(0.1)
                a.set_epsilon(0.7)
                a.set_d(mortality_rates[1])
                added_cooperative += 1

            elif rnd_i == 2 and added_tkiller < self.num_tkiller:
                a = CellAgent(n_cells(), self, rnd_i)
                a.set_gamma(0.1)
                a.set_delta(0.001)
                a.set_d(mortality_rates[2])
                added_tkiller += 1

            else:
                continue

            self.schedule.add(a)
            centered = 0.2
            center_width_min = self.space.center[0] - (0.5 * self.space.width)
            center_width_max = self.space.center[0] + (0.5 * self.space.width)
            center_height_min = self.space.center[1] - (0.5 * self.space.height)
            center_height_max = self.space.center[1] + (0.5 * self.space.height)

            # Add the agent to a random pos
            x = random.uniform(center_width_min, center_width_max)
            y = random.uniform(center_height_min, center_height_max)

            self.space.place_agent(a, (x, y))

        self.counter = 0
        self.density = []
        self.average_birthrate = [[0, 0], [0, 0]]
        self.average_deathrate = [[0, 0], [0, 0]]

    @property
    def g(self):
        density = self.get_density()
        if density[0] + density[1] == 0:
            return 0
        return density[1]/(density[0] + density[1])

    def get_density(self, agents=None):
        density = [0, 0, 0]
        if agents is None:
            agents = self.schedule.agents
        for cell in agents:
            density[cell.type] += 1
        return density

    def new_cell2add(self, cell):
        if not self.cells2add.__contains__(cell):
            self.cells2add.append(cell)

    def new_cell2delete(self, cell):
        if not self.cells2delete.__contains__(cell):
            self.cells2delete.append(cell)

    def add_cell_pos(self, pos, cell_type):

        if cell_type == 0:
            self.pos_selfish_cells[0].append(pos[0])
            self.pos_selfish_cells[1].append(pos[1])
        elif cell_type == 1:
            self.pos_cooperative_cells[0].append(pos[0])
            self.pos_cooperative_cells[1].append(pos[1])
        elif cell_type == 2:
            self.pos_tkiller_cells[0].append(pos[0])
            self.pos_tkiller_cells[1].append(pos[1])
        else:
            return

    def clear_all_cell_pos(self):
        self.pos_selfish_cells = [[], []]
        self.pos_cooperative_cells = [[], []]
        self.pos_tkiller_cells = [[], []]

    def step(self):

        self.density = self.get_density()
        self.average_birthrate = [[0, 0], [0, 0]]
        self.average_deathrate = [[0, 0], [0, 0]]

        self.schedule.step()
        self.counter = 0

        print("Average Birthrate: ", self.average_birthrate[0][0] / self.average_birthrate[0][1])
        print("Average Deathrate: ", self.average_deathrate[0][0] / self.average_deathrate[0][1])

    def add_new_cells(self):

        added_types = [0, 0, 0]
        for c, p in self.cells2add:
            added_types[c.type] += 1
            self.schedule.add(c)
            self.space.place_agent(c, p)
            self.add_cell_pos(p, c.type)
            self.cells2add.remove((c, p))

        return added_types

    def delete_dead_cells(self):

        dead_types = [0, 0, 0]
        for c in self.cells2delete:
            dead_types[c.type] += 1
            removed_x, removed_y = False, False
            if c.type == 0:
                if self.pos_selfish_cells[0].__contains__(c.pos[0]):
                    self.pos_selfish_cells[0].remove(c.pos[0])

                if self.pos_selfish_cells[1].__contains__(c.pos[1]):
                    self.pos_selfish_cells[1].remove(c.pos[1])
                    removed_y = True
            elif c.type == 1:
                if self.pos_cooperative_cells[0].__contains__(c.pos[0]):
                    self.pos_cooperative_cells[0].remove(c.pos[0])
                    removed_x = True
                if self.pos_cooperative_cells[1].__contains__(c.pos[1]):
                    self.pos_cooperative_cells[1].remove(c.pos[1])
                    removed_y = True
            elif c.type == 2:
                if self.pos_tkiller_cells[0].__contains__(c.pos[0]):
                    self.pos_tkiller_cells[0].remove(c.pos[0])
                    removed_x = True
                if self.pos_tkiller_cells[1].__contains__(c.pos[1]):
                    self.pos_tkiller_cells[1].remove(c.pos[1])
                    removed_y = True

            self.schedule.remove(c)
            self.cells2delete.remove(c)

        return dead_types
Example #20
0
class Anthill(Model):
    def __init__(self):

        self.grid = SingleGrid(WIDTH, HEIGHT, False)
        self.schedule = RandomActivation(self)
        self.running = True
        self.internalrate = 0.2
        self.ant_id = 1
        self.tau = np.zeros((WIDTH, HEIGHT))
        self.datacollector = DataCollector({
            "Total number of Ants":
            lambda m: self.get_total_ants_number(),
            "mean tau":
            lambda m: self.evaluation1(),
            "sigma":
            lambda m: self.evaluation2(),
            "sigma*":
            lambda m: self.evaluation3(),
        })

        # List containing all coordinates of the boundary, initial ants location and brood location
        self.bound_vals = []
        self.neigh_bound = []
        self.datacollector.collect(self)

        for i in range(WIDTH):
            for j in range(HEIGHT):
                if i == 0 or j == 0 or i == WIDTH - 1 or j == HEIGHT - 1:
                    self.bound_vals.append((i, j))
                if i == 1 or i == WIDTH - 2 or j == 1 or j == HEIGHT - 2:
                    self.neigh_bound.append((i, j))

        # Make a Fence boundary
        b = 0
        for h in self.bound_vals:
            br = Fence(b, self)

            self.grid.place_agent(br, (h[0], h[1]))
            b += 1

    def step(self):
        '''Advance the model by one step.'''
        # Add new ants into the internal area ont he boundary

        for xy in self.neigh_bound:

            # Add with probability internal rate and if the cell is empty
            if self.random.uniform(
                    0, 1) < self.internalrate and self.grid.is_cell_empty(
                        xy) == True:

                a = Ant(self.ant_id, self)

                self.schedule.add(a)
                self.grid.place_agent(a, xy)

                self.ant_id += 1

        # Move the ants
        self.schedule.step()
        self.datacollector.collect(self)

        # Remove all ants on bounary

        for (agents, i, j) in self.grid.coord_iter():
            if (i, j) in self.neigh_bound and type(agents) is Ant:

                self.grid.remove_agent(agents)
                self.schedule.remove(agents)

        data_tau.append(self.mean_tau_ant)
        data_sigma.append(np.sqrt(self.sigma))
        data_sigmastar.append(self.sigmastar)

        if len(data_sigmastar) > 20:
            if abs(data_sigmastar[-2] - data_sigmastar[-1]) < 0.0000001 or len(
                    data_sigmastar) == 2000:
                try:
                    # TAU
                    with open("results/m1_tau_5.pkl", 'rb') as f:
                        tau_old = pickle.load(f)
                        tau_old[int(len(tau_old) + 1)] = data_tau
                        f.close()
                    pickle.dump(tau_old, open("results/m1_tau_5.pkl", 'wb'))

                except:
                    pickle.dump({1: data_tau},
                                open("results/m1_tau_5.pkl", 'wb'))

                try:
                    # SIGMA
                    with open("results/m1_sigma_5.pkl", 'rb') as f:
                        sigma_old = pickle.load(f)
                        sigma_old[int(len(sigma_old) + 1)] = data_sigma
                        f.close()
                    pickle.dump(sigma_old, open("results/m1_sigma_5.pkl",
                                                'wb'))

                except:
                    pickle.dump({1: data_sigma},
                                open("results/m1_sigma_5.pkl", 'wb'))

                try:
                    # SIGMASTAR
                    with open("results/m1_sigmastar_5.pkl", 'rb') as f:
                        sigmastar_old = pickle.load(f)
                        sigmastar_old[int(len(sigmastar_old) +
                                          1)] = data_sigmastar
                        f.close()
                    pickle.dump(sigmastar_old,
                                open("results/m1_sigmastar_5.pkl", 'wb'))

                except:
                    pickle.dump({1: data_sigmastar},
                                open("results/m1_sigmastar_5.pkl", 'wb'))

                try:
                    # MATRIX
                    with open("results/m1_matrix_5.pkl", 'rb') as f:
                        matrix_old = pickle.load(f)
                        matrix_old[int(len(matrix_old) + 1)] = self.tau
                        f.close()
                    pickle.dump(matrix_old,
                                open("results/m1_matrix_5.pkl", 'wb'))

                except:
                    pickle.dump({1: self.tau},
                                open("results/m1_matrix_5.pkl", 'wb'))
                print(
                    "_______________________________________________________________________"
                )
                print("DONE")
                self.running = False

        # with open("tau2_new.txt", "a") as myfile:
        #     myfile.write(str(self.mean_tau_ant) + '\n')
        # with open("sigma2_new.txt", "a") as myfile:
        #     myfile.write(str(np.sqrt(self.sigma)) + '\n')
        # with open("datasigmastar2_new.txt","a") as myfile:
        #     myfile.write(str(self.sigmastar) + "\n")

    def get_total_ants_number(self):
        total_ants = 0
        for (agents, _, _) in self.grid.coord_iter():
            if type(agents) is Ant:
                total_ants += 1
        return total_ants

    def evaluation1(self):

        ##creat a empty grid to store currently information
        total_ants = np.zeros((WIDTH, HEIGHT))

        ## count the number of currently information
        for (agents, i, j) in self.grid.coord_iter():

            if type(agents) is Ant:
                total_ants[i][j] = 1
            else:
                total_ants[i][j] = 0

        ##update the tau
        self.tau = self.tau + total_ants

        ##calcualte the mean tau
        self.mean_tau_ant = self.tau.sum() / ((WIDTH - 2)**2)

        return self.mean_tau_ant

    def evaluation2(self):

        ## we need to minus the mean tau so we need to ensure the result of boundary is zero
        ## so we let the bounday equal mean_tau_ant in this way the (tau-mean_tau_ant) is zero of boundary
        for site in self.bound_vals:
            self.tau[site[0]][site[1]] = self.mean_tau_ant

        ## calculate the sigmaa
        self.sigma = ((self.tau - self.mean_tau_ant)**2).sum() / (
            (WIDTH - 2)**2)

        ## rechange the boundaryy
        for site in self.bound_vals:
            self.tau[site[0]][site[1]] = 0

        return np.sqrt(self.sigma)

    def evaluation3(self):
        ## calculate the sigmastar
        self.sigmastar = np.sqrt(self.sigma) / self.mean_tau_ant

        return self.sigmastar
Example #21
0
class SchoolModel(Model):
    """A mode to simulate family-school choice.

    A outlined description is as follows:
    1. Initialize `num_schools` # of schools with give parameters.
    2. At each step (a year), we generate `num_families` # of families.
    Each family has a set of priorities and then makes a school choice.
    3. After choosing a school, the school finances and future family 
    decision priorities evolve.
    """
    def __init__(self, num_families, num_schools, family_priority_weights):
        self.num_families = num_families
        self.num_schools = num_schools
        # Though we randomly initialize family priorities, this global variable
        # controls macro market preferences. E.g. are families growing more
        # cost conscious? TO DO: Make this happen as general macro-economy worsens,
        # which should also lower school endowment growth rate.
        self.family_priority_weights = family_priority_weights

        # How to take steps -- each agent activeated in random order.
        self.schedule = RandomActivation(self)

        # Create schools
        for i in range(self.num_schools):
            # Parameterize school.
            # TO DO: Implement in a seperate function?
            endowment = scipy.stats.expon().rvs(1)[0]  #random.randint(0,100)
            prestige = np.abs(np.random.normal(0, 1, 1)[0])
            tuition = prestige
            annual_fund = random.randint(0, 100)
            efficacy = np.abs(np.random.normal(0, 1, 1)[0])
            priorities = helpers.make_random_school_priorities()
            # Endowment draw is 0 if the school only cares about prestige,
            # whereas endownment draw is MAX_ENDOWMENT_DRAW
            # if school cares only about equity and efficacy. Theory: schools that
            # care about equity and efficacy will spend on current students, whereas
            # per Gladwell, saving money and building endowment increases reputation
            # at a cost of not adding much immediate value to today's students.
            endowment_draw = MAX_ENDOWMENT_DRAW * (priorities['Equity'] +
                                                   priorities['Efficacy']) / 1
            # Set a school in city close to 0, remote farther from there. Normal distribution implies
            # more schools closer to 1D center.
            location = np.random.normal(0, 100, 1)[0]

            a_school = agents.SchoolAgent(i, self, endowment, prestige,
                                          tuition, annual_fund, efficacy,
                                          endowment_draw, priorities, location)
            self.schedule.add(a_school)

    def step(self, i):
        # schools= [obj for obj in self.schedule._agents.values() if isinstance(obj, agents.SchoolAgent)]
        # prestiges = [i.prestige for i in schools]

        # Each step, create new families.
        start_id = self.num_schools + self.num_families * i  # Give unique family id.
        end_id = self.num_schools + self.num_families * (i + 1)

        for j in range(start_id, end_id):  # Need to have unique ids.

            # A randomly initialized wealth. As per https://arxiv.org/pdf/cond-mat/0103544,
            # most American's have wealth following exponential distribution.
            # A rough estimation. Here there are no <50 values.
            # TO DO: Think about financial aid.
            wealth = scipy.stats.expon(50, 200).rvs(1)[0]

            # What is their top priority? TO DO: Make priorities a weighted average where
            # they care about all priorities but individual families have different weightings.
            priorities = helpers.make_random_family_priorities(self)
            # print('Family {} has these priorities {}'.format(j, priority))
            # Where they live. Most people live in median place. This is simulating
            # cities versus rural living.
            location = np.random.normal(
                0, 100, 1
            )[0]  # Set a school in city close to 0, remote farther from there.
            # TO DO, have schools choose or set location based on some more intelligent (Schelling) game theoretic model.

            family = agents.FamilyAgent(j, self, wealth, priorities, location)
            self.schedule.add(family)

        self.schedule.step()
        current_families = [
            obj for obj in self.schedule._agents.values()
            if isinstance(obj, agents.FamilyAgent)
        ]
        # Remove this generation of families.
        for f in current_families:
            self.schedule.remove(f)
        print('Global family priority weights at step {}: {}'.format(
            i, self.family_priority_weights))
Example #22
0
class SludgeMonsterModel(Model):
    def __init__(self,
                 num_agents,
                 width=100,
                 height=100,
                 food_growth_prob=0.0005,
                 initial_food_growth=.30,
                 collection_frequency=1):
        self.running = True
        self.width = width
        self.height = height
        self.food_growth_prob = food_growth_prob
        self.initial_food_growth = initial_food_growth
        self.food_type = SludgeFood
        self.schedule = RandomActivation(self)
        self.grid = MultiGrid(self.width, self.height, True)
        self.datacollector = DataCollector(
            model_reporters={
                "friendliness":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="friendliness"),
                "anger":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="anger"),
                "fertility":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="fertility"),
                "max_attack":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="max_attack"),
                "max_hug_benefit":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="max_hug_benefit"),
                "_decay_mult":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="_decay_mult"),
                "leadership":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="leadership"),
                "follower_mult":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="follower_mult"),
                "leader_attraction":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="leader_attraction"),
                "food_attraction":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="food_attraction"),
                "sight":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="sight"),
                "movement_noise":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              attrib_name="movement_noise"),
                "is_following":
                lambda m: m.average_agent_val(agent_type=SludgeMonster,
                                              func=lambda a: a.is_following)
            })

        self.collection_frequency = collection_frequency
        self.num_agents = num_agents
        for i in range(self.num_agents):
            self.add_agent()

        self.grow_food(self.initial_food_growth)

    def average_agent_val(self, agent_type, attrib_name=None, func=None):
        vals = []
        agent_count = 0
        if attrib_name:
            for agent in self.schedule.agents:
                if isinstance(agent, agent_type):
                    vals.append(getattr(agent, attrib_name))
                    agent_count += 1
        elif func:
            for agent in self.schedule.agents:
                if isinstance(agent, agent_type):
                    vals.append(func(agent))
                    agent_count += 1
        else:
            raise Exception("bad params")
        if agent_count > 0:
            return functools.reduce(operator.add, vals) / float(agent_count)
        else:
            return 0

    def step(self):
        if self.schedule.steps % self.collection_frequency == 0:
            self.datacollector.collect(self)
        self.schedule.step()
        self.grow_food(self.food_growth_prob)

    def grow_food(self, growth_prob):
        area = self.height * self.width
        food_growth_areas = np.random.random(
            (self.height, self.width)) < (growth_prob)
        existing_food = self.get_agent_locations(self.food_type)
        food_growth_areas = np.logical_and(food_growth_areas,
                                           np.logical_not(existing_food))
        for y in range(self.height):
            for x in range(self.width):
                if food_growth_areas[y, x]:
                    self.add_agent(agent=self.food_type(self), pos=(x, y))

    def get_agent_locations(self, agent_type):
        truth_table = np.zeros((self.height, self.width), np.bool)
        for contents, x, y in self.grid.coord_iter():
            if any([isinstance(agent, agent_type) for agent in contents]):
                truth_table[y, x] = True
        return truth_table

    def remove(self, agent):
        self.grid.remove_agent(agent)
        self.schedule.remove(agent)

    def add_agent(self, agent=None, pos=None):
        if not pos:
            x = random.randrange(self.width)
            y = random.randrange(self.height)
            pos = (x, y)
        if not agent:
            agent = SludgeMonster(self)
        self.schedule.add(agent)
        self.grid.place_agent(agent, pos)
        self.num_agents = len(self.schedule.agents)
        return agent

    def status_str(self):
        val = [self.__class__.__name__]
        for agent in self.schedule.agents:
            val.append(agent.status_str())
        if len(self.schedule.agents) == 0:
            val.append("No agents in model.")
        return "\n".join(val)
Example #23
0
class Covid(Model):
    '''
    Covid model class. Handles agent creation, placement and scheduling.
    '''
    def __init__(self,
                 population=100,
                 width=100,
                 height=100,
                 mobility=6,
                 social_distance=2,
                 asymptomatic_percentage=50.0):
        '''
        Create a new Covid model.

        Args:
            population: Number of people (density) with one asymptomatic infected person.
            asymptomatic_percentage: Percentage of infected people that are asymptomatic.  Asymptomatic people transmit the virus for 42 time steps versus 15 time steps for those that are symptomatic.
            social_distance: Distance at which neighboring susceptible agents can b ecome infected.
            mobility: The maximum distance that an agent can travel.
        '''

        self.current_id = 0
        self.population = population
        self.mobility = mobility
        self.social_distance = social_distance
        self.asymptomatic_percentage = asymptomatic_percentage
        self.state = "home"
        self.schedule = RandomActivation(self)
        self.space = ContinuousSpace(width, height, False)
        self.image = Image.open(r"nmcounties.jpg")
        self.num = {
            'San Juan': 0,
            'Rio Arriba': 0,
            'Taos': 0,
            'Colfax': 0,
            'Union': 0,
            'Los Alamos': 0,
            'Mora': 0,
            'Harding': 0,
            'McKinley': 0,
            'Sandoval': 0,
            'Santa Fe': 0,
            'San Miguel': 0,
            'Quay': 0,
            'Cibola': 0,
            'Valencia': 0,
            'Bernalillo': 0,
            'Torrance': 0,
            'Guadalupe': 0,
            'Curry': 0,
            'Catron': 0,
            'Socorro': 0,
            'Lincoln': 0,
            'De Baca': 0,
            'Roosevelt': 0,
            'Sierra': 0,
            'Chaves': 0,
            'Hidalgo': 0,
            'Grant': 0,
            'Luna': 0,
            'Doña Ana': 0,
            'Otero': 0,
            'Eddy': 0,
            'Lea': 0,
        }
        self.pop = {
            'San Juan': 0,
            'Rio Arriba': 0,
            'Taos': 0,
            'Colfax': 0,
            'Union': 0,
            'Los Alamos': 0,
            'Mora': 0,
            'Harding': 0,
            'McKinley': 0,
            'Sandoval': 0,
            'Santa Fe': 0,
            'San Miguel': 0,
            'Quay': 0,
            'Cibola': 0,
            'Valencia': 0,
            'Bernalillo': 0,
            'Torrance': 0,
            'Guadalupe': 0,
            'Curry': 0,
            'Catron': 0,
            'Socorro': 0,
            'Lincoln': 0,
            'De Baca': 0,
            'Roosevelt': 0,
            'Sierra': 0,
            'Chaves': 0,
            'Hidalgo': 0,
            'Grant': 0,
            'Luna': 0,
            'Doña Ana': 0,
            'Otero': 0,
            'Eddy': 0,
            'Lea': 0,
        }
        with open('counties.csv') as csvfile:
            reader = csv.reader(csvfile, delimiter=',', quotechar='"')
            for row in reader:
                county = row[2].replace(' County', '')
                if (county != '' and county != 'County'):
                    population = row[3].replace(',', '')
                    self.pop[county] = population
        total = 0
        for value in self.pop.values():
            total += int(value)
        for county, value in self.pop.items():
            self.pop[county] = round(
                int(self.population) * int(value) / int(total))
        total = 0
        for value in self.pop.values():
            total += int(value)
        if (self.population != total):
            self.pop['Bernalillo'] += self.population - total

        self.make_agents()
        self.running = True

        self.datacollector = DataCollector({
            "Susceptible":
            lambda m: self.count("Susceptible"),
            "Infected":
            lambda m: self.count("Infected"),
            "Recovered":
            lambda m: self.count("Recovered")
        })

    def counties(self, pixel):
        if (pixel == (87, 127, 77)):
            return 'San Juan'
        elif (pixel == (168, 144, 178)):
            return 'Rio Arriba'
        elif (pixel == (131, 141, 91)):
            return 'Taos'
        elif (pixel == (189, 204, 119)):
            return 'Colfax'
        elif (pixel == (197, 112, 58)):
            return 'Union'
        elif (pixel == (211, 165, 80)):
            return 'Los Alamos'
        elif (pixel == (186, 81, 52)):
            return 'Mora'
        elif (pixel == (106, 97, 126)):
            return 'Harding'
        elif (pixel == (91, 124, 143)):
            return 'McKinley'
        elif (pixel == (92, 59, 40)):
            return 'Sandoval'
        elif (pixel == (75, 113, 116)):
            return 'Santa Fe'
        elif (pixel == (109, 103, 53)):
            return 'San Miguel'
        elif (pixel == (49, 73, 73)):
            return 'Quay'
        elif (pixel == (178, 62, 49)):
            return 'Cibola'
        elif (pixel == (138, 99, 84)):
            return 'Valencia'
        elif (pixel == (137, 184, 214)):
            return 'Bernalillo'
        elif (pixel == (106, 106, 104)):
            return 'Torrance'
        elif (pixel == (146, 117, 87)):
            return 'Guadalupe'
        elif (pixel == (156, 150, 88)):
            return 'Curry'
        elif (pixel == (67, 94, 149)):
            return 'Catron'
        elif (pixel == (55, 80, 50)):
            return 'Socorro'
        elif (pixel == (145, 186, 178)):
            return 'Lincoln'
        elif (pixel == (82, 33, 37)):
            return 'De Baca'
        elif (pixel == (195, 189, 189)):
            return 'Roosevelt'
        elif (pixel == (238, 219, 99)):
            return 'Sierra'
        elif (pixel == (243, 234, 129)):
            return 'Chaves'
        elif (pixel == (41, 30, 60)):
            return 'Hidalgo'
        elif (pixel == (116, 140, 106)):
            return 'Grant'
        elif (pixel == (11, 10, 8)):
            return 'Luna'
        elif (pixel == (157, 56, 74)):
            return 'Doña Ana'
        elif (pixel == (52, 53, 48)):
            return 'Otero'
        elif (pixel == (207, 144, 135)):
            return 'Eddy'
        elif (pixel == (138, 171, 80)):
            return 'Lea'
        else:
            return ''

    def make_agents(self):
        '''
        Create self.population agents, with random positions and starting headings.
        '''

        for i in range(self.population):
            pos = self.inside()
            person = Susceptible(self.next_id(), self, pos)
            self.space.place_agent(person, pos)
            self.schedule.add(person)
        agent_key = random.randint(0, self.population - 1)
        agent = self.schedule._agents[agent_key]
        asymptomatic = True
        person = Infected(self.next_id(), self, agent.pos, True)
        person.set_imperial(agent.home, agent.work, agent.travel)
        self.space.remove_agent(agent)
        self.schedule.remove(agent)
        self.space.place_agent(person, person.pos)
        self.schedule.add(person)

    def inside(self):
        while (1):
            x = self.random.random() * self.space.x_max
            y = self.random.random() * self.space.y_max
            if (y > 10.0 and y < self.space.y_max - 10.0 and x > 10.0
                    and x < self.space.x_max - 10.0):
                coordinate = x, y
                pixel = self.image.getpixel(coordinate)
                if (pixel != (255, 255, 255)):
                    county = self.counties(pixel)
                    if (county != ''):
                        if (self.num[county] < self.pop[county]):
                            self.num[county] += 1
                            return np.array((x, y))

    def step(self):
        self.infect()

        if self.state == "home":
            self.state = "work"
        elif self.state == "work":
            self.state = "community"
        elif self.state == "community":
            self.state = "home"

        self.schedule.step()

        # collect data
        self.datacollector.collect(self)
        if self.count("Infected") == 0:
            self.running = False

    def infect(self):
        agent_keys = list(self.schedule._agents.keys())
        susceptible = []
        for agent_key in agent_keys:
            if self.schedule._agents[agent_key].name == "Susceptible":
                susceptible.append(agent_key)
        for agent_key in susceptible:
            agent = self.schedule._agents[agent_key]
            neighbors = self.space.get_neighbors(agent.pos,
                                                 self.social_distance)
            for neighbor in neighbors:
                if neighbor.name == "Infected":
                    asymptomatic = False
                    if (100.0 * self.random.random() <
                            self.asymptomatic_percentage):
                        asymptomatic = True
                    person = Infected(self.next_id(), self, agent.pos,
                                      asymptomatic)
                    person.set_imperial(agent.home, agent.work, agent.travel)
                    self.space.remove_agent(agent)
                    self.schedule.remove(agent)
                    self.space.place_agent(person, person.pos)
                    self.schedule.add(person)
                    break

    def count(self, type):
        agent_keys = list(self.schedule._agents.keys())
        num = 0
        for agent_key in agent_keys:
            if self.schedule._agents[agent_key].name == type:
                num += 1
        return num
Example #24
0
class BoidFlockers(Model):
    """
    Flocker model class. Handles agent creation, placement and scheduling.
    """    
    def __init__(
        self,
        population=100,
        width=100,
        height=100,
        speed=1,
        vision=10,
        separation=2,
        rate = 10,
        size_factor = 2,
        sim_length = 1200,
        angle_min = -180,
        angle_max = 180):
        """
        Create a new Flockers model.

        Args:
            width, height: Size of the space.
            speed: How fast should the Boids move.
            vision: How far around should each Boid look for its neighbors
            separation: What's the minimum distance each Boid will attempt to
                    keep from any other
                    """
                    
        self.population = population
        self.unique_id = 1
        self.vision = vision
        self.speed = speed
        self.separation = separation
        self.schedule = RandomActivation(self)
        self.space = ContinuousSpace(width, height, False)
        self.size_factor = size_factor
        
        self.angle_min = angle_min
        self.angle_max = angle_max
        
        self.running = True
        self.rate = rate
        self.kill_agents = []
        self.queue = []
        self.input_rate = 0
        self.num_agents = 0
        
        self.arrival = 0
        self.departure = 0
        
        self.n_confs = 0
        self.n_intrusion = 0
        
        self.dep_del = 0
        self.enroute_del = 0
        self.tot_del = self.dep_del + self.enroute_del
        
        self.sim_length = sim_length
        
        self.datacollector = DataCollector(
            model_reporters= {"Occupancy": compute_N,
                              "Total flow": compute_flow,
                              "Effective flow": compute_eff_flow,
                              "Inpute rate": compute_inp,
                              "Output rate": compute_out,
                              'Effective speed': compute_eff_speed ,
                              'Speed': compute_speed,
                              'Queue length': compute_queue_len,
                              'size':'size_factor',
                              'inp_rate':'rate',
                              'vision':'vision',
                              'def_speed':'speed',
                              'sep':'separation',
                              'Departure Delay':'dep_del',
                              'Enroute Delay': 'enroute_del',
                              'Total Delay': 'tot_del',
                              'N Conflicts': 'n_confs',
                              'N Intrusions': 'n_intrusion',
                              'N Arrivals': 'arrival',
                              'N Departures': 'departure'
                              
                             },
            agent_reporters = {'id':'unique_id',
                                'prev_dist':'previos_distance',
                                'cur_dist':'current_distance',
                                'phys_speed':'physic_speed',
                                'vision':'vision',
                                'def_speed':'speed',
                                'sep':'separation',
                                'x': lambda x: x.pos[0],
                                'y': lambda x: x.pos[1]}
            )
        
    def make_od(self):
    
            x = self.space.x_max/2 + np.random.uniform(-1,1)\
                * self.space.x_max/2/self.size_factor
            y = self.space.y_max/2 + np.random.uniform(-1,1)\
                * self.space.y_max/2/self.size_factor
            pos = np.array((x, y))
            x_dest = self.space.x_max/2 + np.random.uniform(-1,1) \
                * self.space.x_max/2/self.size_factor
            y_dest = self.space.y_max/2 + np.random.uniform(-1,1) \
                * self.space.y_max/2/self.size_factor
            dest = np.array((x_dest, y_dest))
            return pos,dest
        
    def make_od2(self):
        
        valid = False
        while valid == False:
                x = self.space.x_max/2 + np.random.uniform(-1,1)\
                    * self.space.x_max/2/self.size_factor
                y = self.space.y_max/2 + np.random.uniform(-1,1)\
                    * self.space.y_max/2/self.size_factor
                pos = np.array((x, y))
                
                x_dest = self.space.x_max/2 + np.random.uniform(-1,1) \
                    * self.space.x_max/2/self.size_factor
                y_dest = self.space.y_max/2 + np.random.uniform(-1,1) \
                    * self.space.y_max/2/self.size_factor
                dest = np.array((x_dest, y_dest))
                
                vector = (dest - pos)/np.linalg.norm((dest - pos))
                angle = np.arctan2(vector[0],vector[1]) * 180 / np.pi
                
                if angle >= self.angle_min and angle <= self.angle_max:
                    valid =True
                else: continue
        return pos, dest
        
    def make_agents(self, od, init_time):

            pos = od[0]
            dest = od[1]            
            velocity = np.random.random(2) * 2 - 1
            
            boid = Boid(
                unique_id=self.unique_id,
                model=self,
                pos=pos,
                speed = self.speed,
                velocity=velocity,
                destination = dest,
                vision=self.vision,
                separation=self.separation,
                init_time = init_time
            )
            return boid

    def place_boid(self, boid):
        self.space.place_agent(boid, boid.pos)
        self.schedule.add(boid)
        
    def agent_maker(self):
        per_step = self.rate/60
        fractional = per_step % 1
        integer = int(per_step - round(fractional))
        num_frac = np.random.binomial(size=1, n=1, p= fractional)
        self.input_rate = int(integer+num_frac)
        #create agents
       
        for i in range(self.input_rate):
            od = self.make_od2()
            agent = self.make_agents(od, init_time = self.schedule.time)
            agent.od_dist = agent.distance()
            self.unique_id += 1
            
            if self.num_agents >= 1:
                neighbors = self.space.get_neighbors(od[0], self.separation, False)
                if len(neighbors) == 0:
                    agent.entry_time = self.schedule.time
                    self.place_boid(agent)
                    self.arrival += 1
                    self.num_agents += 1
                    #print('first attempt!')
                else:
                    self.queue.append(agent)
                    
            if self.num_agents == 0: 
                agent.entry_time = self.schedule.time
                self.place_boid(agent)
                self.arrival += 1
                self.num_agents += 1
            
            
    def queue_clearer(self, time):
        for index, agent in enumerate(self.queue):
            od = agent.pos
            # print(od)
            neighbors = self.space.get_neighbors(od[0], self.separation, False)
            if len(neighbors) == 0:
                agent.entry_time = time
                # agent.dep_del = max(agent.entry_time - agent.init_time,0)
                # print('second attempt!',agent.unique_id,agent.init_time, agent.entry_time, agent.dep_del)
                self.dep_del += agent.entry_time - agent.init_time
                self.place_boid(agent)
                self.arrival += 1
                self.num_agents += 1
                del self.queue[index]
            else:
                pass
        
    def step(self):
        """
        Create agents here
        """
        #collect data
        # try:
        self.n_confs = 0
        self.n_intrusion = 0
        
        #compute input rate for step
        if self.schedule.time <self.sim_length:
            self.agent_maker()
        else: pass
        self.queue_clearer(time= self.schedule.time)
        self.kill_agents = []
        #make 1 step
        self.schedule.step()
        #remove agents that arrived at their destinations
        self.departure += len(self.kill_agents)
        
        for i in self.kill_agents:
            self.enroute_del += i.enroute_del
            self.schedule.remove(i)
            self.num_agents -= 1
            self.space.remove_agent(i)
        try:
            self.datacollector.collect(self)
        except: 
            pass
Example #25
0
class CancerModel(Model):
    def xprint(self, *args):
        logger.info("CANCER MODEL:  " + " ".join(map(str, args)))

    def __init__(self, cure_agent_type, config):

        self.xprint("STARTING SIMULATION !!!")
        self.counter = 0
        self.decay_number = 0
        self.lifetime_counter = 0
        self.metastasis_score = 0
        eat_values = {CancerCell: 1, HealthyCell: -1, CancerStemCell: 5}
        assert (issubclass(cure_agent_type, CureAgent))

        agent_memory_range = config["Agent"]["memory_limit"]
        agent_memory_type = config["Agent"]["memory_type"]
        radoznalost = config["Agent"]["curiosity"]
        turn_off_modifiers = config["Tumor"]["turn_off_modifiers"]
        CC_mutation_probability = config["Tumor"]["mutation_probability"]
        is_tumor_growing = config["Tumor"]["is_growing"]
        tumor_movement_stopping_range = config["Tumor"][
            "movement_stopping_range"]
        steps_before_mutation = config["Model"]["steps_before_mutation"]
        self.SAMPLE_i = config["Model"]["sample_i"]

        self.cure_number = config["Model"]["NA_number"]
        self.probabilites_ranges = config["Agent"]["probabilities_limits"]
        self.modifier_fraction = config["Tumor"]["modifier_fraction"]
        self.mode = config["Simulation"]["mode"]
        fname = "xxx" if self.mode == "learning" else config["Simulation"][
            "fname"]

        self.MUTATION_PERCENTAGE = config["Model"]["mutation_percentage"]
        tumor_growth_probability = config["Tumor"]["growth_probability"]
        cancer_cell_number = config["Model"]["CC_number"]

        #DATA COLLECTION

        self.datacollector = DataCollector(
            model_reporters={
                "FitnessFunction": fitness_funkcija,
                "AverageSpeed": speed_avg,
                "AverageMemoryCapacity": memory_size_all_avg,
                "PopulationHeterogenity": population_heterogenity,
                "MutationAmount": mutation_amount,
                "CancerStemCell Number": CSC_number,
                "CSC Specialized Agents": CSC_specialized_agents,
                "CancerHeterogenity1": cancer_heterogenity_1,
                "CancerHeterogenity2": cancer_heterogenity_2,
                "CC_Number": CC_number,
                "HealthyCell_Number": HC_number,
                "MetastasisScore": "metastasis_score",
                "CancerSize": cancer_size,
                "TotalTumorResiliance": overall_cancer_resiliance,
                "TumorResiliance_Pi": cancer_resiliance_Pi,
                "TumorResiliance_Pd": cancer_resiliance_Pd,
                "TumorResiliance_Pa": cancer_resiliance_Pa,
                "TumorResiliance_Pk": cancer_resiliance_Pk,
                "TumorResiliance_Psd": cancer_resiliance_Psd,
                "NumberOfMutatedCells": mutated_CCs_num,
                "TumorCoverage": tumor_coverage,
                "AveragePd": average_Pd,
                "AveragePa": average_Pa,
                "AveragePi": average_Pi,
                "AveragePk": average_Pk,
                "AveragePsd": average_Psd,
                #      "PopulationClusters":cluster_counts
            },
            agent_reporters={
                "Pi": get_Pi,
                "Pa": get_Pa,
                "Pd": get_Pd,
                "speed": get_speed,
                "Psd": get_Psd,
                "Pk": get_Pk,
                "memory_size": get_memory_size,
                "type": get_agent_type
            })
        grid_size = math.ceil(math.sqrt(cancer_cell_number * 4))

        self.STEPS_BEFORE_MUTATION = steps_before_mutation
        self.grid = MultiGrid(grid_size, grid_size, False)
        self.NUM_OF_INJECTION_POINTS = config["Model"]["injection_points"]
        #        self.speeds = list(range(1,grid_size//2))
        self.speeds = [
            1
        ]  #TODO ovo mozda bolje? ne znam da li treba u config fajlu?
        poss = self.generate_cancer_cell_positions(grid_size,
                                                   cancer_cell_number)
        num_CSC = math.ceil(percentage(1, cancer_cell_number))
        pos_CSC = [self.random.choice(poss) for i in range(num_CSC)]

        #ACTIVATE SIMULATION
        self.schedule = RandomActivation(self)
        self.running = True

        #PLACE CANCER CELLS

        for i in range(cancer_cell_number):
            pos = poss[i]
            has_modifiers = False if ((i < (
                (1 - self.modifier_fraction) * cancer_cell_number))
                                      or turn_off_modifiers is True
                                      ) else True  #10 % will be with modifiers
            c = CancerStemCell("CANCER_STEM_CELL-"+str(uuid.uuid4()),self,value = eat_values[CancerStemCell],has_modifiers=has_modifiers,mutation_probability=CC_mutation_probability,grows=is_tumor_growing,growth_probability=tumor_growth_probability) \
                if pos in pos_CSC else CancerCell("CANCER_CELL-"+str(uuid.uuid4()),self,value=eat_values[CancerCell],has_modifiers=has_modifiers,mutation_probability=CC_mutation_probability,grows=is_tumor_growing,growth_probability=tumor_growth_probability)
            self.grid.place_agent(c, pos)
            self.schedule.add(c)

        #PLACE HEALTHY CELLS

        for (i, (contents, x, y)) in enumerate(self.grid.coord_iter()):
            nbrs = self.grid.get_neighborhood([x, y], moore=True)
            second_nbrs = []
            for nbr in nbrs:
                second_nbrs += self.grid.get_neighborhood(nbr, moore=True)
            nbrs = nbrs + second_nbrs
            nbr_contents = self.grid.get_cell_list_contents(nbrs)
            nbr_CCs = [
                nbr for nbr in nbr_contents if isinstance(nbr, CancerCell)
            ]

            if not contents and len(nbr_CCs) == 0:
                c = HealthyCell(uuid.uuid4(), self, eat_values[HealthyCell])
                self.grid.place_agent(c, (x, y))
                self.schedule.add(c)

        if self.mode == "simulation":
            self.duplicate_mutate_or_kill = self.simulation_mode_function
            self.decay_probability = 0.04  #MAGIC NUMBER
            self.read_nanoagents_from_file(
                fname=fname,
                cure_agent_type=cure_agent_type,
                tumor_movement_stopping_range=tumor_movement_stopping_range,
                agent_memory_range=agent_memory_range,
                radoznalost=radoznalost)

        elif self.mode == "learning":
            self.decay_probability = 0
            self.make_nanoagents_from_scratch(cure_agent_type, radoznalost,
                                              agent_memory_type,
                                              agent_memory_range,
                                              tumor_movement_stopping_range,
                                              grid_size)
        else:
            assert ("False")

    def get_random_positions_on_empty_cells(self):
        """Gets the N random currently empty positions on the grid"""
        #GETTING EMPTY POSITIONS (for placing nano agents)
        empty_cells = [(x, y) for (i, (contents, x,
                                       y)) in enumerate(self.grid.coord_iter())
                       if not contents]
        positions = [
            self.random.choice(empty_cells)
            for i in range(self.NUM_OF_INJECTION_POINTS)
        ]
        self.xprint("The 5 random empty positions are %s" % positions)
        return positions

    def inject_nanoagents(self):
        """Injects the nanoagents, they will be activated slowly after"""
        from itertools import cycle

        positions = cycle(self.get_random_positions_on_empty_cells())
        for a in self.agents:
            self.grid.place_agent(a, next(positions))
        self.agents_iterator = iter(self.agents)

    def activate_next_batch_of_agents(self):
        agents_to_be_placed_at_each_steps = round(len(self.agents) /
                                                  14)  #MAGIC NUMBER
        for i in range(agents_to_be_placed_at_each_steps):
            self.schedule.add(next(self.agents_iterator))

    def read_nanoagents_from_file(self, fname, cure_agent_type, radoznalost,
                                  agent_memory_range,
                                  tumor_movement_stopping_range):
        import pandas as pd
        self.xprint("Reading nanoagents from file")
        df = pd.read_csv(fname)
        self.agents = []

        for i, row in df.iterrows():
            a = cure_agent_type(
                uuid.uuid4(),
                self,
                speeds=self.speeds,
                radoznalost=radoznalost,
                memory_type=row.memory_size,
                memory_range=agent_memory_range,
                tumor_movement_stopping_range=tumor_movement_stopping_range,
                probabilities_ranges=self.probabilites_ranges)
            a.Pi = row.Pi
            a.Pa = row.Pa
            a.Pd = row.Pd
            a.memory_size = row.memory_size
            a.memorija = FixSizeOrderedDict(max=row.memory_size)
            a.tumor_movement_stopping_rate = row.tumor_movement_stopping_rate
            self.agents.append(a)

    def make_nanoagents_from_scratch(self, cure_agent_type, radoznalost,
                                     agent_memory_type, agent_memory_range,
                                     tumor_movement_stopping_range, grid_size):
        from itertools import cycle
        self.xprint("Making nanoagents from scratch")
        positions = cycle(self.get_random_positions_on_empty_cells())
        for i in range(self.cure_number):
            pos = next(positions)
            self.xprint(pos)
            a = cure_agent_type(
                uuid.uuid4(),
                self,
                speeds=self.speeds,
                radoznalost=radoznalost,
                memory_type=agent_memory_type,
                memory_range=agent_memory_range,
                tumor_movement_stopping_range=tumor_movement_stopping_range,
                probabilities_ranges=self.probabilites_ranges)
            self.grid.place_agent(a, pos)
            self.schedule.add(a)

    def simulation_mode_function(self):
        self.xprint("In simulation mode - not duplicating or mutating")
        return

    def generate_cancer_cell_positions(self, grid_size, cancer_cells_number):
        center = grid_size // 2
        poss = [(center, center)]
        for pos in poss:
            poss += [
                n for n in self.grid.get_neighborhood(
                    pos, moore=True, include_center=False) if n not in poss
            ]
            if len(set(poss)) >= cancer_cells_number:
                break
        poss = list(set(poss))
        return poss

    def duplicate_mutate_or_kill(self):
        if self.mode == "simulation":
            assert (False)
        koliko = math.ceil(
            percentage(self.MUTATION_PERCENTAGE, self.cure_number))
        cureagents = [
            c for c in self.schedule.agents if isinstance(c, CureAgent)
        ]
        sortirani = sorted(cureagents, key=lambda x: x.points, reverse=True)
        poslednji = sortirani[-koliko:]
        prvi = sortirani[:koliko]
        sredina = len(sortirani) // 2
        pocetak_sredine = sredina - (koliko // 2)
        kraj_sredine = sredina + (koliko // 2)
        srednji = sortirani[pocetak_sredine:kraj_sredine]
        self.mutate_agents(srednji)
        assert (len(prvi) == len(poslednji))
        self.remove_agents(poslednji)
        self.duplicate_agents(prvi)

    def mutate_agents(self, agents):
        self.xprint("Mutating middle agents")
        for a in agents:
            a.mutate()

    def remove_agents(self, agents):
        for a in agents:
            self.kill_cell(a)

    def duplicate_agents(self, agents):
        for a in agents:
            a_new = a.copy()
            self.grid.place_agent(a_new, (1, 1))
            self.schedule.add(a_new)

    def kill_cell(self, cell):
        self.grid.remove_agent(cell)
        self.schedule.remove(cell)

    def detach_stem_cell(self, cell):
        self.metastasis_score += 1
        self.kill_cell(cell)

    def kill_all_agents(self):
        agents = [a for a in self.schedule.agents if isinstance(a, CureAgent)]
        for a in agents:
            a.kill_self()

    def write_population_to_file(self, i):
        df = record_model_population(self)
        df.to_csv("./Populations/Population{}-step{}.csv".format(
            self.SAMPLE_i, i))

    def step(self):
        self.datacollector.collect(self)

        if self.mode == "simulation":
            self.simulation_step()

        self.write_population_to_file(self.counter)

        self.schedule.step()

        self.counter += 1
        self.lifetime_counter += 1

        if self.counter % self.STEPS_BEFORE_MUTATION == 0:
            #ovde ga stavljamo da izbegnemo da na nultom koraku uradi to
            self.duplicate_mutate_or_kill()

    def simulation_step(self):
        LIFETIME_OF_NANOAGENTS = 80
        REINJECTION_PERIOD = 100
        if self.lifetime_counter > LIFETIME_OF_NANOAGENTS:
            self.kill_all_agents()
        # if self.counter%(LIFETIME_OF_NANOAGENTS+REINJECTION_PERIOD)==0:
        #         #svakih 180 koraka se ubrizgavaju

        #     self.inject_nanoagents()
        #     self.lifetime_counter = 0
        agents_at_each_step = round(self.cure_number / 14)
        for i in range(agents_at_each_step):
            try:
                self.schedule.add(next(self.agents_iterator))
            except StopIteration:
                break
        agents = [a for a in self.schedule.agents if isinstance(a, CureAgent)]
        assert (len(agents) <= self.cure_number)
Example #26
0
class CancerModel(Model):
    def __init__(self,cancer_cells_number,cure_number,eat_values, verovatnoca_mutacije):
        self.counter = 0
        self.cure_number = cure_number
        radoznalosti = list(np.arange(0.01,KILLING_PROBABILITY,0.01))
        print(radoznalosti)
        self.datacollector = DataCollector(
        model_reporters = {"FitnessFunction":fitness_funkcija,
                           "SpeedSum":overall_speed,"SmartMedicine":num_of_smart_medicine,
                           "RadoznalostSum":radoznalost_sum  })
        grid_size = math.ceil(math.sqrt(cancer_cells_number*4))
        self.grid = MultiGrid(grid_size,grid_size,False)
        speeds = list(range(grid_size//2)) #popravi te boje TODO
        print(speeds)

        poss = self.generate_cancer_cell_positions(grid_size,cancer_cells_number)
        num_CSC = math.ceil(percentage(1,cancer_cells_number))
        pos_CSC = [self.random.choice(poss) for i in range(num_CSC)]
        self.schedule = RandomActivation(self)
        self.running = True
        for i in range(cancer_cells_number):
            pos = poss[i]
            c = CancerStemCell(uuid.uuid4(),self,value = eat_values[CancerStemCell.__class__]) if pos in pos_CSC else CancerCell(i,self,value=eat_values[CancerCell.__class__])
            self.grid.place_agent(c,pos)
            self.schedule.add(c)
        for i in range(cure_number):
            #pos = self.grid.find_empty()
            pos = (0,0)
            radoznalost = self.random.choice(radoznalosti)
            speed = self.random.choice(speeds)
            a = CureAgent(uuid.uuid4(),self,speed = speed,radoznalost=radoznalost) if i< cure_number//2 else SmartCureAgent(uuid.uuid4(),self,speed=speed,radoznalost = radoznalost)
            self.grid.place_agent(a,pos)
            self.schedule.add(a)

        for (i,(contents, x,y)) in enumerate(self.grid.coord_iter()):
            if not contents:
                c = HealthyCell(uuid.uuid4(),self,eat_values[HealthyCell.__class__])
                self.grid.place_agent(c,(x,y))
                self.schedule.add(c)

    def generate_cancer_cell_positions(self,grid_size,cancer_cells_number):
        center = grid_size//2
        poss = [(center,center)]
        for pos in poss:
            poss+=[n for n in self.grid.get_neighborhood(pos,moore=True,include_center=False) if n not in poss]
            if len(set(poss))>=cancer_cells_number:
                break
        poss = list(set(poss))
        return poss


    def duplicate_or_kill(self):
        koliko = math.ceil(percentage(5,self.cure_number)) # TODO igor javlja kako biramo procena
        cureagents = [c for c in self.schedule.agents if isinstance(c,CureAgent)]
        sortirani = sorted(cureagents, key=lambda x: x.points, reverse=True)
        poslednji = sortirani[-koliko:]
        prvi = sortirani[:koliko]
        assert(len(prvi)==len(poslednji))
        self.remove_agents(poslednji)
        self.duplicate_agents(prvi)

    def remove_agents(self,agents):
        for a in agents:
            self.schedule.remove(a)
            self.grid.remove_agent(a)
    def duplicate_agents(self,agents):
        for a in agents:
            a_new = a.__class__(uuid.uuid4(),model=self,speed = a.speed,radoznalost = a.radoznalost) #TODO ostale parametre isto poistoveti
            self.grid.place_agent(a_new,(1,1))
            self.schedule.add(a_new)

    def step(self):
        self.datacollector.collect(self)
        self.counter+=1
        self.schedule.step()
        if self.counter%10 ==0: # TODO ovo menjamo, parameter TODO
            #TODO sredi boje i
            #TODO sredi ovo pucanje zbog nule u latin hypercube
            #TODO napravi da je R promenljivo
            self.duplicate_or_kill()