Exemplo n.º 1
0
class BoidFlockers(Model):
    '''
    A Mesa implementation of flocking agents inspired by https://doi.org/10.1109/IROS.2014.6943105.
    '''
    def __init__(self, formation, population, width, height, vision, min_dist,
                 flock_vel, accel_time, equi_dist, repulse_max, repulse_spring,
                 align_frict, align_slope, align_min, wall_decay, wall_frict,
                 form_shape, form_track, form_decay, wp_tolerance):
        '''
        Create a new Flockers model.

        Args:
            population: Number of agents.
            width, height: Size of the space.
            vision: How far around should each agents look for its neighbors.
            min_dist: Minimum allowed distance between agents before a collision occurs. This is only used for statistics.
        '''
        # set parameters
        self.population = population
        self.space = ContinuousSpace(width, height, torus=False)
        self.vision = vision
        self.min_dist = min_dist
        self.params = dict(formation=formation,
                           population=population,
                           flock_vel=flock_vel,
                           accel_time=accel_time,
                           equi_dist=equi_dist,
                           repulse_max=repulse_max,
                           repulse_spring=repulse_spring,
                           align_frict=align_frict,
                           align_slope=align_slope,
                           align_min=align_min,
                           wall_decay=wall_decay,
                           wall_frict=wall_frict,
                           form_shape=form_shape,
                           form_track=form_track,
                           form_decay=form_decay,
                           wp_tolerance=wp_tolerance)

        # data collection for plots
        self.datacollector = DataCollector(
            model_reporters={
                "Minimum Distance": minimum_distance,
                "Maximum Distance": maximum_distance,
                "Average Distance": average_distance,
                "Collisions": collisions,
                "Messags Distributed": messages_distributed,
                "Messags Centralized": messages_centralized
            })

        # execute agents sequentially in a random order
        self.schedule = RandomActivation(self)

        # place agents
        self.make_agents()

        # pairwise distances
        self.agent_distances()

        # run model
        self.running = True

        # collect initial data sample
        self.datacollector.collect(self)

    def agent_distances(self):
        '''
        Compute the pairwise distances between all agents.
        '''
        self.density = [
            self.space.get_distance(pair[0].pos, pair[1].pos)
            for pair in it.combinations(self.schedule.agent_buffer(), 2)
        ]

    def make_agents(self):
        '''
        Create self.population agents and place it at the center of the environment.
        '''
        s = np.floor(np.sqrt(self.population))
        for i in range(self.population):
            x = self.space.center[
                0] - self.min_dist * s + 2 * self.min_dist * (i % s)
            y = self.space.center[1] - self.min_dist * np.floor(
                self.population / s) + 2 * self.min_dist * np.floor(i / s)
            pos = np.array((x, y))
            velocity = np.array([0, 1.0])
            boid = Boid(i, self, pos, velocity, self.vision, **self.params)
            self.space.place_agent(boid, pos)
            self.schedule.add(boid)

    def step(self):
        '''
        Execute one step of the simulation.
        '''
        self.schedule.step()
        self.agent_distances()
        self.datacollector.collect(self)
Exemplo n.º 2
0
class PolicyEmergenceSM(Model):
    '''
	Simplest Model for the policy emergence model.
	'''
    def __init__(self, SM_inputs, height=20, width=20):

        self.height = height  # height of the canvas
        self.width = width  # width of the canvas

        self.SM_inputs = SM_inputs  # inputs for the entire model

        self.stepCount = 0  # int - [-] - initialisation of step counter
        self.agenda_PC = None  # initialisation of agenda policy core issue tracker
        self.policy_implemented_number = None  # initialisation of policy number tracker
        self.policy_formulation_run = False  # check value for running policy formulation

        self.w_el_influence = self.SM_inputs[
            9]  # float - [-] - electorate influence weight constant
        # todo - consider also saving the electorate influence parameter

        self.schedule = RandomActivation(self)  # mesa random activation method
        self.grid = SingleGrid(height, width,
                               torus=True)  # mesa grid creation method

        # creation of the datacollector vector
        self.datacollector = DataCollector(
            # Model-level variables
            model_reporters={
                "step": "stepCount",
                "AS_PF": get_problem_policy_chosen,
                "agent_attributes": get_agents_attributes,
                "electorate_attributes": get_electorate_attributes
            },
            # Agent-level variables
            agent_reporters={
                "x":
                lambda a: a.pos[0],
                "y":
                lambda a: a.pos[1],
                "Agent type":
                lambda a: type(a),
                "Issuetree":
                lambda a: getattr(a, 'issuetree', [None])[
                    a.unique_id if isinstance(a, ActiveAgent) else 0]
            })

        self.len_S, self.len_PC, self.len_DC, self.len_CR = belief_tree_input(
        )  # setting up belief tree
        self.policy_instruments, self.len_ins, self.PF_indices = policy_instrument_input(
        )  # setting up policy instruments
        init_active_agents(self, self.len_S, self.len_PC, self.len_DC,
                           self.len_CR, self.len_PC, self.len_ins,
                           self.SM_inputs)  # setting up active agents
        init_electorate_agents(self, self.len_S, self.len_PC, self.len_DC,
                               self.SM_inputs)  # setting up passive agents
        init_truth_agent(self, self.len_S, self.len_PC, self.len_DC,
                         self.len_ins)  # setting up truth agent

        self.running = True
        self.numberOfAgents = self.schedule.get_agent_count()
        self.datacollector.collect(self)

    def step(self, KPIs):
        '''
		Main steps of the Simplest Model for policy emergence:
		0. Module interface - Input
		1. Agenda setting step
		2. Policy formulation step
		3. Data collection
		'''

        self.KPIs = KPIs  # saving the indicators

        # 0. initialisation
        self.module_interface_input(
            self.KPIs)  # communicating the beliefs (indicators)
        self.electorate_influence(
            self.w_el_influence)  # electorate influence actions

        # 1. agenda setting
        self.agenda_setting()

        # 2. policy formulation
        if self.policy_formulation_run:
            policy_implemented = self.policy_formulation()
        else:
            policy_implemented = self.policy_instruments[-1]

        # 3. data collection
        self.stepCount += 1  # iterate the steps counter
        self.datacollector.collect(self)  # collect data

        print("Step ends", "\n")

        return policy_implemented

    def module_interface_input(self, KPIs):
        '''
		The module interface input step consists of actions related to the module interface and the policy emergence model
		'''

        len_DC = self.len_DC
        len_PC = self.len_PC
        len_S = self.len_S
        len_ins = self.len_ins

        # saving the issue tree of the truth agent
        for agent in self.schedule.agent_buffer(shuffled=True):
            if isinstance(agent, TruthAgent):
                agent.issuetree_truth = KPIs
                truth_issuetree = agent.issuetree_truth
                truth_policytree = agent.policytree_truth

        # Transferring policy impact to active agents
        for agent in self.schedule.agent_buffer(shuffled=True):
            if isinstance(agent, ActiveAgent):  # selecting only active agents
                # for PFj in range(len_PC): # communicating the policy family likelihoods
                # 	for PFij in range(len_PC):
                # 		agent.policytree[agent.unique_id][PFj][PFij] = truth_policytree[PFj][PFij]

                for insj in range(
                        len_ins
                ):  # communicating the policy instruments impacts
                    agent.policytree[agent.unique_id][
                        len_PC + insj][0:len_S] = truth_policytree[len_PC +
                                                                   insj]

                for issue in range(
                        len_DC + len_PC + len_S
                ):  # communicating the issue beliefs from the KPIs
                    agent.issuetree[
                        agent.unique_id][issue][0] = truth_issuetree[issue]
                self.preference_update(
                    agent, agent.unique_id)  # updating the preferences

    def agenda_setting(self):
        '''
		In the agenda setting step, the active agents first select their policy core issue of preference and then select
		the agenda.
		'''

        # active agent policy core selection
        for agent in self.schedule.agent_buffer(shuffled=False):
            if isinstance(agent, ActiveAgent):  # selecting only active agents
                agent.selection_PC()

        # for each agent, selection of their preferred policy core issue
        selected_PC_list = []
        number_ActiveAgents = 0
        for agent in self.schedule.agent_buffer(shuffled=False):
            if isinstance(agent,
                          ActiveAgent):  # considering only policy makers
                selected_PC_list.append(agent.selected_PC)
                number_ActiveAgents += 1

        # finding the most common policy core issue and its frequency
        d = defaultdict(int)
        for i in selected_PC_list:
            d[i] += 1
        result = max(d.items(), key=lambda x: x[1])
        agenda_PC_temp = result[0]
        agenda_PC_temp_frequency = result[1]

        # checking for majority
        if agenda_PC_temp_frequency > int(number_ActiveAgents / 2):
            self.agenda_PC = agenda_PC_temp
            self.policy_formulation_run = True  # allowing for policy formulation to happen
            print("The agenda consists of PC", self.agenda_PC, ".")
        else:  # if no majority
            self.policy_formulation_run = False
            print("No agenda was formed, moving to the next step.")

        # for purposes of not changing the entire code - the policy family selected is set at 0 so all policy instruments
        # are always considered in the rest of the model
        self.agenda_PF = 0

    def policy_formulation(self):
        '''
		In the policy formulation step, the policy maker agents first select their policy core issue of preference and then
		they select the policy that is to be implemented if there is a majority of them.
		'''

        # calculation of policy instruments preferences
        selected_PI_list = []
        number_PMs = 0
        for agent in self.schedule.agent_buffer(shuffled=False):
            if isinstance(
                    agent, ActiveAgent
            ) and agent.agent_type == 'policymaker':  # considering only policy makers
                agent.selection_S()
                agent.selection_PI(
                )  # individual agent policy instrument selection
                selected_PI_list.append(
                    agent.selected_PI
                )  # appending the policy instruments selected to a list for all PMs
                number_PMs += 1

        # finding the most common policy instrument and its frequency
        d = defaultdict(int)
        for i in selected_PI_list:
            d[i] += 1
        result = max(d.items(), key=lambda x: x[1])
        self.policy_implemented_number = result[0]
        policy_implemented_number_frequency = result[1]

        # check for the majority and implemented if satisfied
        if policy_implemented_number_frequency > int(number_PMs / 2):
            print("The policy selected is policy instrument ",
                  self.policy_implemented_number, ".")
            policy_implemented = self.policy_instruments[
                self.policy_implemented_number]
        else:  # if no majority
            print("No consensus on a policy instrument.")
            policy_implemented = self.policy_instruments[
                -1]  # selecting status quo policy instrument

        return policy_implemented

    def preference_update(self, agent, who):
        '''
		This function is used to call the preference update functions of the issues of the active agents.
		'''

        self.preference_update_DC(agent,
                                  who)  # deep core issue preference update
        self.preference_update_PC(agent,
                                  who)  # policy core issue preference update
        self.preference_update_S(agent, who)  #

    def preference_update_DC(self, agent, who):
        """
		This function is used to update the preferences of the deep core issues of agents in their
		respective issue trees.

		agent - this is the owner of the issue tree
		who - this is the part of the issuetree that is considered - agent.unique_id should be used for this -
		this is done to also include partial knowledge preference calculation
		"""

        len_DC = self.len_DC

        # calculation of the denominator
        PC_denominator = 0
        for h in range(len_DC):
            issue_belief = agent.issuetree[who][h][0]
            issue_goal = agent.issuetree[who][h][1]
            gap = issue_goal - issue_belief
            if issue_goal is not None and issue_belief is not None:
                PC_denominator += abs(gap)

        # selection of the numerator and calculation of the preference
        for i in range(len_DC):
            issue_belief = agent.issuetree[who][i][0]
            issue_goal = agent.issuetree[who][i][1]
            gap = issue_goal - issue_belief
            if PC_denominator != 0:  # make sure the denominator is not 0
                agent.issuetree[who][i][2] = abs(gap) / PC_denominator
            else:
                agent.issuetree[who][i][2] = 0

    def preference_update_PC(self, agent, who):
        """
		This function is used to update the preferences of the policy core issues of agents in their
		respective issue trees.

		agent - this is the owner of the belief tree
		who - this is the part of the issuetree that is considered - agent.unique_id should be used for this -
		this is done to also include partial knowledge preference calculation
		"""

        len_DC = self.len_DC
        len_PC = self.len_PC
        len_S = self.len_S

        PC_denominator = 0
        # calculation of the denominator
        for j in range(
                len_PC):  # selecting the causal relations starting from PC

            for k in range(len_DC):
                cr = agent.issuetree[who][len_DC + len_PC + len_S + j +
                                          (k * len_PC)][0]
                issue_belief = agent.issuetree[who][k][0]
                issue_goal = agent.issuetree[who][k][1]
                gap = issue_goal - issue_belief
                if issue_goal is not None and issue_belief is not None and cr is not None \
                  and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)):
                    # contingency for partial knowledge issues and check if cr and belief-goal are same sign
                    PC_denominator = PC_denominator + abs(cr * gap)

        # addition of the gaps of the associated mid-level issues
        for i in range(len_PC):
            issue_belief = agent.issuetree[who][len_DC + i][0]
            issue_goal = agent.issuetree[who][len_DC + i][1]
            gap = issue_goal - issue_belief
            if issue_goal is not None and issue_belief is not None:  # contingency for partial knowledge issues
                PC_denominator += abs(gap)

        # calculation the numerator and the preference
        for j in range(len_PC):  # select one by one the PC

            # calculation of the right side of the numerator
            PC_numerator = 0
            for k in range(
                    len_DC):  # selecting the causal relations starting from DC
                issue_belief = agent.issuetree[who][k][0]
                issue_goal = agent.issuetree[who][k][1]
                cr = agent.issuetree[who][len_DC + len_PC + len_S + j +
                                          (k * len_PC)][0]
                gap = issue_goal - issue_belief
                if issue_goal is not None and issue_belief is not None and cr is not None \
                  and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)):
                    # contingency for partial knowledge issues and check if cr and belief-goal are same sign
                    PC_numerator += abs(cr * gap)

            # addition of the gap to the numerator
            issue_belief = agent.issuetree[who][len_DC + j][0]
            issue_goal = agent.issuetree[who][len_DC + j][1]
            gap = issue_goal - issue_belief
            if issue_goal is not None and issue_belief is not None:  # contingency for partial knowledge issues
                PC_numerator += abs(gap)

            # calculation of the preferences
            if PC_denominator != 0:
                agent.issuetree[who][len_DC + j][2] = round(
                    PC_numerator / PC_denominator, 3)
            else:
                agent.issuetree[who][len_DC + j][2] = 0

    def preference_update_S(self, agent, who):
        """
		This function is used to update the preferences of secondary issues the agents in their
		respective issue trees.

		agent - this is the owner of the belief tree
		who - this is the part of the issuetree that is considered - agent.unique_id should be used for this -
		this is done to also include partial knowledge preference calculation
		"""

        len_DC = self.len_DC
        len_PC = self.len_PC
        len_S = self.len_S

        S_denominator = 0
        # calculation of the denominator
        for j in range(len_S):

            for k in range(
                    len_PC):  # selecting the causal relations starting from S
                issue_belief = agent.issuetree[who][len_DC + k][0]
                issue_goal = agent.issuetree[who][len_DC + k][1]
                cr = agent.issuetree[who][len_DC + len_PC + len_S +
                                          len_DC * len_PC + j + (k * len_S)][0]
                gap = issue_goal - issue_belief
                if issue_goal is not None and issue_belief is not None and cr is not None \
                  and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)):
                    # contingency for partial knowledge issues and check if cr and belief-goal are same sign
                    S_denominator += abs(cr * gap)

        # addition of the gaps of the associated secondary issues
        for j in range(len_S):
            issue_belief = agent.issuetree[who][len_DC + len_PC + j][0]
            issue_goal = agent.issuetree[who][len_DC + len_PC + j][1]
            gap = issue_goal - issue_belief
            if issue_goal is not None and issue_belief is not None:  # contingency for partial knowledge issues
                S_denominator += abs(gap)

        # calculation the numerator and the preference
        for j in range(len_S):  # select one by one the S

            # calculation of the right side of the numerator
            S_numerator = 0
            for k in range(
                    len_PC):  # selecting the causal relations starting from PC
                # Contingency for partial knowledge issues
                cr = agent.issuetree[who][len_DC + len_PC + len_S +
                                          len_DC * len_PC + j + (k * len_S)][0]
                issue_belief = agent.issuetree[who][len_DC + k][0]
                issue_goal = agent.issuetree[who][len_DC + k][1]
                gap = issue_goal - issue_belief
                if issue_goal is not None and issue_belief is not None and cr is not None \
                  and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)):
                    # contingency for partial knowledge issues and check if cr and gap are same sign
                    S_numerator += abs(cr * gap)

            # addition of the gap to the numerator
            issue_belief = agent.issuetree[who][len_DC + len_PC + j][0]
            issue_goal = agent.issuetree[who][len_DC + len_PC + j][1]
            gap = issue_goal - issue_belief
            if issue_goal is not None and issue_belief is not None:  # contingency for partial knowledge issues
                S_numerator += abs(gap)

            # calculation of the preferences
            if S_denominator != 0:
                agent.issuetree[who][len_DC + len_PC + j][2] = round(
                    S_numerator / S_denominator, 3)
            else:
                agent.issuetree[who][len_DC + len_PC + j][2] = 0

    def electorate_influence(self, w_el_influence):
        '''
		This function calls the influence actions in the electorate agent class.
		'''

        for agent in self.schedule.agent_buffer(shuffled=True):
            if isinstance(agent, ElectorateAgent):
                agent.electorate_influence(w_el_influence)
Exemplo n.º 3
0
class PolicyEmergenceSM(Model):
    '''
	Simplest Model for the policy emergence model.
	'''
    def __init__(self,
                 PE_type,
                 SM_inputs,
                 AplusPL_inputs,
                 AplusCo_inputs,
                 AplusPK_inputs,
                 height=20,
                 width=20,
                 input_LHS=False):

        self.height = height  # height of the canvas
        self.width = width  # width of the canvas

        self.SM_inputs = SM_inputs  # inputs for the entire model
        self.PE_type = PE_type  # model type (SM, A+PL, A+Co, A+PK, A+PI)

        self.resources_aff = SM_inputs[2]  # resources per affiliation agent

        self.stepCount = 0  # int - [-] - initialisation of step counter
        self.agenda_PC = None  # initialisation of agenda policy core issue tracker
        self.policy_implemented_number = None  # initialisation of policy number tracker
        self.policy_formulation_run = False  # check value for running policy formulation

        self.w_el_influence = self.SM_inputs[
            5]  # float - [-] - electorate influence weight constant

        # batchrunner inputs
        self.input_LHS = input_LHS

        # ACF+PL parameters
        if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type:
            self.conflict_level = AplusPL_inputs[0]
            self.resources_spend_incr_agents = AplusPL_inputs[1]

        # ACF+Co parameters
        if 'A+Co' in self.PE_type:
            self.PC_interest = AplusCo_inputs[0]
            if self.input_LHS:
                self.coa_creation_thresh = self.input_LHS[1]  # LHS inputs
                self.coa_resources_share = self.input_LHS[0]  # LHS inputs
            else:
                self.coa_creation_thresh = AplusCo_inputs[1]
                self.coa_resources_share = AplusCo_inputs[3]
            self.coa_coherence_thresh = AplusCo_inputs[2]
            self.resources_spend_incr_coal = AplusCo_inputs[4]
            print('res. share:', round(self.coa_resources_share, 3),
                  ', coa. threshold:', round(self.coa_creation_thresh, 3))

            self.coalition_list = []

        # +PK parameters
        self.PK = False
        if '+PK' in self.PE_type:
            self.PK = True
        self.PK_catchup = AplusPK_inputs[0]

        self.schedule = RandomActivation(self)  # mesa random activation method
        self.grid = SingleGrid(height, width,
                               torus=True)  # mesa grid creation method

        # creation of the datacollector vector

        if 'A+Co' in self.PE_type:
            self.datacollector = DataCollector(
                # Model-level variables
                model_reporters={
                    "step": "stepCount",
                    "AS_PF": get_problem_policy_chosen,
                    "agent_attributes": get_agents_attributes,
                    "coalitions_attributes": get_coalitions_attributes,
                    "electorate_attributes": get_electorate_attributes
                },
                # Agent-level variables
                agent_reporters={
                    "x":
                    lambda a: a.pos[0],
                    "y":
                    lambda a: a.pos[1],
                    "Agent type":
                    lambda a: type(a),
                    "Issuetree":
                    lambda a: getattr(a, 'issuetree', [None])[
                        a.unique_id
                        if isinstance(a, ActiveAgent) and not isinstance(
                            a, Coalition) else 0]
                })
        else:
            self.datacollector = DataCollector(
                # Model-level variables
                model_reporters={
                    "step": "stepCount",
                    "AS_PF": get_problem_policy_chosen,
                    "agent_attributes": get_agents_attributes,
                    "electorate_attributes": get_electorate_attributes
                },
                # Agent-level variables
                agent_reporters={
                    "x":
                    lambda a: a.pos[0],
                    "y":
                    lambda a: a.pos[1],
                    "Agent type":
                    lambda a: type(a),
                    "Issuetree":
                    lambda a: getattr(a, 'issuetree', [None])[
                        a.unique_id if isinstance(a, ActiveAgent) else 0]
                })

        self.len_S, self.len_PC, self.len_DC, self.len_CR = belief_tree_input(
        )  # setting up belief tree
        self.policy_instruments, self.len_ins, self.PF_indices = policy_instrument_input(
        )  # setting up policy instruments
        init_active_agents(self, self.len_S, self.len_PC, self.len_DC,
                           self.len_CR, self.len_PC, self.len_ins,
                           self.SM_inputs)  # setting up active agents
        init_electorate_agents(self, self.len_S, self.len_PC, self.len_DC,
                               self.SM_inputs)  # setting up passive agents
        init_truth_agent(self, self.len_S, self.len_PC, self.len_DC,
                         self.len_ins)  # setting up truth agent

        self.running = True
        self.numberOfAgents = self.schedule.get_agent_count()
        self.datacollector.collect(self)

    def step(self, KPIs):
        '''
		Main steps of the Simplest Model for policy emergence:
		0. Module interface - Input
		1. Agenda setting step
		2. Policy formulation step
		3. Data collection
		'''

        self.KPIs = KPIs  # saving the indicators

        # 0. initialisation
        self.module_interface_input(
            self.KPIs)  # communicating the beliefs (indicators)
        self.electorate_influence(
            self.w_el_influence)  # electorate influence actions
        if 'A+Co' in self.PE_type:
            self.coalition_creation_algorithm()

        # 1. agenda setting
        self.agenda_setting()

        # 2. policy formulation
        if self.policy_formulation_run:
            policy_implemented = self.policy_formulation()
        else:
            policy_implemented = self.policy_instruments[-1]

        # 3. data collection
        self.stepCount += 1  # iterate the steps counter
        self.datacollector.collect(self)  # collect data

        print("Step ends", "\n")

        return policy_implemented

    def module_interface_input(self, KPIs):
        '''
		The module interface input step consists of actions related to the module interface and the policy emergence model
		'''

        len_DC = self.len_DC
        len_PC = self.len_PC
        len_S = self.len_S
        len_ins = self.len_ins

        # saving the issue tree of the truth agent
        for agent in self.schedule.agent_buffer(shuffled=True):
            if isinstance(agent, TruthAgent):
                agent.issuetree_truth = KPIs
                truth_issuetree = agent.issuetree_truth
                truth_policytree = agent.policytree_truth

        # Transferring policy impact to active agents
        for agent in self.schedule.agent_buffer(shuffled=True):
            if isinstance(agent, ActiveAgent) and not isinstance(
                    agent, Coalition):  # selecting only active agents
                # for PFj in range(len_PC): # communicating the policy family likelihoods
                # 	for PFij in range(len_PC):
                # 		agent.policytree[agent.unique_id][PFj][PFij] = truth_policytree[PFj][PFij]

                for insj in range(
                        len_ins
                ):  # communicating the policy instruments impacts
                    agent.policytree[agent.unique_id][
                        len_PC + insj][0:len_S] = truth_policytree[len_PC +
                                                                   insj]

                for issue in range(
                        len_DC + len_PC + len_S
                ):  # communicating the issue beliefs from the KPIs
                    agent.issuetree[
                        agent.unique_id][issue][0] = truth_issuetree[issue]
                self.preference_update(
                    agent, agent.unique_id)  # updating the preferences

    def resources_distribution(self):

        if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type:
            for agent in self.schedule.agent_buffer(shuffled=False):
                if isinstance(agent,
                              ActiveAgent):  # selecting only active agents
                    if agent.affiliation == 0:  # affiliation 0
                        agent.resources = 0.01 * self.number_activeagents * self.resources_aff[
                            0] / 100
                    if agent.affiliation == 1:  # affiliation 1
                        agent.resources = 0.01 * self.number_activeagents * self.resources_aff[
                            1] / 100
                    agent.resources_action = agent.resources  # assigning resources for the actions for both
        if 'A+Co' in self.PE_type:  # attribution of the resources to coalitions
            for coalition in self.schedule.agent_buffer(shuffled=False):
                if isinstance(coalition, Coalition):
                    resources = 0
                    for agent_mem in coalition.members:
                        resources += agent_mem.resources * self.coa_resources_share
                        agent_mem.resources -= self.coa_resources_share * agent_mem.resources
                        agent.resources_action = agent.resources  # assigning resources for the actions for both
                    coalition.resources = resources
                    coalition.resources_action = coalition.resources  # assigning resources for the actions for both

    def agenda_setting(self):
        '''
		In the agenda setting step, the active agents first select their policy core issue of preference and then select
		the agenda.
		'''

        # resources distribution
        self.resources_distribution()

        # active agent policy core selection
        for agent in self.schedule.agent_buffer(shuffled=False):
            if isinstance(agent, ActiveAgent):  # selecting only active agents
                agent.selection_PC()

        if 'A+Co' in self.PE_type:
            for coalition in self.schedule.agent_buffer(shuffled=True):
                if isinstance(coalition,
                              Coalition):  # selecting only coalitions
                    coalition.interactions_intra_coalition(
                        'AS')  # intra-coalition interactions

        # active agent interactions (including coalitions)
        if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type:
            for agent in self.schedule.agent_buffer(shuffled=True):
                if isinstance(agent,
                              ActiveAgent):  # selecting only active agents
                    agent.interactions('AS', self.PK)

        # active agent policy core selection (after agent interactions)
        if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type:
            # active agent policy core selection
            for agent in self.schedule.agent_buffer(shuffled=False):
                if isinstance(agent,
                              ActiveAgent):  # selecting only active agents
                    agent.selection_PC()

        # for each agent, selection of their preferred policy core issue
        selected_PC_list = []
        number_ActiveAgents = 0
        for agent in self.schedule.agent_buffer(shuffled=False):
            if isinstance(agent,
                          ActiveAgent):  # considering only policy makers
                selected_PC_list.append(agent.selected_PC)
                number_ActiveAgents += 1

        # finding the most common policy core issue and its frequency
        d = defaultdict(int)
        for i in selected_PC_list:
            d[i] += 1
        result = max(d.items(), key=lambda x: x[1])
        agenda_PC_temp = result[0]
        agenda_PC_temp_frequency = result[1]

        # checking for majority
        if agenda_PC_temp_frequency > int(number_ActiveAgents / 2):
            self.agenda_PC = agenda_PC_temp
            self.policy_formulation_run = True  # allowing for policy formulation to happen
            print("The agenda consists of PC", self.agenda_PC, ".")
        else:  # if no majority
            self.policy_formulation_run = False
            print("No agenda was formed, moving to the next step.")

        # for purposes of not changing the entire code - the policy family selected is set at 0 so all policy instruments
        # are always considered in the rest of the model
        self.agenda_PF = 0

    def policy_formulation(self):
        '''
		In the policy formulation step, the policy maker agents first select their policy core issue of preference and then
		they select the policy that is to be implemented if there is a majority of them.
		'''

        # resources distribution
        self.resources_distribution()

        # calculation of policy instruments preferences
        if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type:
            for agent in self.schedule.agent_buffer(shuffled=False):
                if isinstance(agent, ActiveAgent):
                    agent.selection_S()
                    agent.selection_PI(
                    )  # individual agent policy instrument selection

        if 'A+Co' in self.PE_type:
            for coalition in self.schedule.agent_buffer(shuffled=True):
                if isinstance(coalition,
                              Coalition):  # selecting only active agents
                    # print('selected_PC', agent.selected_PC)
                    coalition.interactions_intra_coalition('PF')
                    # coalition.interactions('PF')

        # active agent interactions
        if 'A+PL' in self.PE_type or 'A+Co' in self.PE_type:
            for agent in self.schedule.agent_buffer(shuffled=True):
                if isinstance(agent,
                              ActiveAgent):  # selecting only active agents
                    agent.interactions('PF', self.PK)

        # calculation of policy instruments preferences
        selected_PI_list = []
        number_PMs = 0
        for agent in self.schedule.agent_buffer(shuffled=False):
            if isinstance(
                    agent, ActiveAgent
            ) and agent.agent_type == 'policymaker':  # considering only policy makers
                agent.selection_S()
                agent.selection_PI(
                )  # individual agent policy instrument selection
                selected_PI_list.append(
                    agent.selected_PI
                )  # appending the policy instruments selected to a list for all PMs
                number_PMs += 1

        # finding the most common policy instrument and its frequency
        d = defaultdict(int)
        print(selected_PI_list)
        for i in selected_PI_list:
            d[i] += 1
        result = max(d.items(), key=lambda x: x[1])
        self.policy_implemented_number = result[0]
        policy_implemented_number_frequency = result[1]

        # check for the majority and implemented if satisfied
        if policy_implemented_number_frequency > int(number_PMs / 2):
            print("The policy selected is policy instrument ",
                  self.policy_implemented_number, ".")
            policy_implemented = self.policy_instruments[
                self.policy_implemented_number]
        else:  # if no majority
            print("No consensus on a policy instrument.")
            policy_implemented = self.policy_instruments[
                -1]  # selecting status quo policy instrument

        return policy_implemented

    def preference_update(self, agent, who, coalition_check=False):
        '''
		This function is used to call the preference update functions of the issues of the active agents.
		'''

        if coalition_check:
            who = self.number_activeagents

        self.preference_update_DC(agent,
                                  who)  # deep core issue preference update
        self.preference_update_PC(agent,
                                  who)  # policy core issue preference update
        self.preference_update_S(agent, who)  #

    def preference_update_DC(self, agent, who):
        """
		This function is used to update the preferences of the deep core issues of agents in their
		respective issue trees.

		agent - this is the owner of the issue tree
		who - this is the part of the issuetree that is considered - agent.unique_id should be used for this -
		this is done to also include partial knowledge preference calculation
		"""

        len_DC = self.len_DC

        # calculation of the denominator
        PC_denominator = 0
        for h in range(len_DC):
            issue_belief = agent.issuetree[who][h][0]
            issue_goal = agent.issuetree[who][h][1]
            gap = issue_goal - issue_belief
            if issue_goal is not None and issue_belief is not None:
                PC_denominator += abs(gap)

        # selection of the numerator and calculation of the preference
        for i in range(len_DC):
            issue_belief = agent.issuetree[who][i][0]
            issue_goal = agent.issuetree[who][i][1]
            gap = issue_goal - issue_belief
            if PC_denominator != 0:  # make sure the denominator is not 0
                agent.issuetree[who][i][2] = abs(gap) / PC_denominator
            else:
                agent.issuetree[who][i][2] = 0

    def preference_update_PC(self, agent, who):
        """
		This function is used to update the preferences of the policy core issues of agents in their
		respective issue trees.

		agent - this is the owner of the belief tree
		who - this is the part of the issuetree that is considered - agent.unique_id should be used for this -
		this is done to also include partial knowledge preference calculation
		"""

        len_DC = self.len_DC
        len_PC = self.len_PC
        len_S = self.len_S

        PC_denominator = 0
        # calculation of the denominator
        for j in range(
                len_PC):  # selecting the causal relations starting from PC

            for k in range(len_DC):
                cr = agent.issuetree[who][len_DC + len_PC + len_S + j +
                                          (k * len_PC)][0]
                issue_belief = agent.issuetree[who][k][0]
                issue_goal = agent.issuetree[who][k][1]
                gap = issue_goal - issue_belief
                if issue_goal is not None and issue_belief is not None and cr is not None \
                  and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)):
                    # contingency for partial knowledge issues and check if cr and belief-goal are same sign
                    PC_denominator = PC_denominator + abs(cr * gap)

        # addition of the gaps of the associated mid-level issues
        for i in range(len_PC):
            issue_belief = agent.issuetree[who][len_DC + i][0]
            issue_goal = agent.issuetree[who][len_DC + i][1]
            gap = issue_goal - issue_belief
            if issue_goal is not None and issue_belief is not None:  # contingency for partial knowledge issues
                PC_denominator += abs(gap)

        # calculation the numerator and the preference
        for j in range(len_PC):  # select one by one the PC

            # calculation of the right side of the numerator
            PC_numerator = 0
            for k in range(
                    len_DC):  # selecting the causal relations starting from DC
                issue_belief = agent.issuetree[who][k][0]
                issue_goal = agent.issuetree[who][k][1]
                cr = agent.issuetree[who][len_DC + len_PC + len_S + j +
                                          (k * len_PC)][0]
                gap = issue_goal - issue_belief
                if issue_goal is not None and issue_belief is not None and cr is not None \
                  and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)):
                    # contingency for partial knowledge issues and check if cr and belief-goal are same sign
                    PC_numerator += abs(cr * gap)

            # addition of the gap to the numerator
            issue_belief = agent.issuetree[who][len_DC + j][0]
            issue_goal = agent.issuetree[who][len_DC + j][1]
            gap = issue_goal - issue_belief
            if issue_goal is not None and issue_belief is not None:  # contingency for partial knowledge issues
                PC_numerator += abs(gap)

            # calculation of the preferences
            if PC_denominator != 0:
                agent.issuetree[who][len_DC + j][2] = round(
                    PC_numerator / PC_denominator, 3)
            else:
                agent.issuetree[who][len_DC + j][2] = 0

    def preference_update_S(self, agent, who):
        """
		This function is used to update the preferences of secondary issues the agents in their
		respective issue trees.

		agent - this is the owner of the belief tree
		who - this is the part of the issuetree that is considered - agent.unique_id should be used for this -
		this is done to also include partial knowledge preference calculation
		"""

        len_DC = self.len_DC
        len_PC = self.len_PC
        len_S = self.len_S

        S_denominator = 0
        # calculation of the denominator
        for j in range(len_S):

            for k in range(
                    len_PC):  # selecting the causal relations starting from S
                issue_belief = agent.issuetree[who][len_DC + k][0]
                issue_goal = agent.issuetree[who][len_DC + k][1]
                cr = agent.issuetree[who][len_DC + len_PC + len_S +
                                          len_DC * len_PC + j + (k * len_S)][0]
                gap = issue_goal - issue_belief
                if issue_goal is not None and issue_belief is not None and cr is not None \
                  and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)):
                    # contingency for partial knowledge issues and check if cr and belief-goal are same sign
                    S_denominator += abs(cr * gap)

        # addition of the gaps of the associated secondary issues
        for j in range(len_S):
            issue_belief = agent.issuetree[who][len_DC + len_PC + j][0]
            issue_goal = agent.issuetree[who][len_DC + len_PC + j][1]
            # print(issue_goal, type(issue_goal), type(issue_belief))
            gap = issue_goal - issue_belief
            if issue_goal is not None and issue_belief is not None:  # contingency for partial knowledge issues
                S_denominator += abs(gap)

        # calculation the numerator and the preference
        for j in range(len_S):  # select one by one the S

            # calculation of the right side of the numerator
            S_numerator = 0
            for k in range(
                    len_PC):  # selecting the causal relations starting from PC
                # Contingency for partial knowledge issues
                cr = agent.issuetree[who][len_DC + len_PC + len_S +
                                          len_DC * len_PC + j + (k * len_S)][0]
                issue_belief = agent.issuetree[who][len_DC + k][0]
                issue_goal = agent.issuetree[who][len_DC + k][1]
                gap = issue_goal - issue_belief
                if issue_goal is not None and issue_belief is not None and cr is not None \
                  and ((cr < 0 and gap < 0) or (cr > 0 and gap > 0)):
                    # contingency for partial knowledge issues and check if cr and gap are same sign
                    S_numerator += abs(cr * gap)

            # addition of the gap to the numerator
            issue_belief = agent.issuetree[who][len_DC + len_PC + j][0]
            issue_goal = agent.issuetree[who][len_DC + len_PC + j][1]
            gap = issue_goal - issue_belief
            if issue_goal is not None and issue_belief is not None:  # contingency for partial knowledge issues
                S_numerator += abs(gap)

            # calculation of the preferences
            if S_denominator != 0:
                agent.issuetree[who][len_DC + len_PC + j][2] = round(
                    S_numerator / S_denominator, 3)
            else:
                agent.issuetree[who][len_DC + len_PC + j][2] = 0

    def electorate_influence(self, w_el_influence):
        '''
		This function calls the influence actions in the electorate agent class.
		'''

        for agent in self.schedule.agent_buffer(shuffled=True):
            if isinstance(agent, ElectorateAgent):
                agent.electorate_influence(w_el_influence)

    def coalition_creation_algorithm(self):
        '''
		Function that is used to reset the coalitions at the beginning of each round
		A maximum of two coalitions are allowed. The agents have to be within a certain threshold of their goals to be
		assembled together.
		Note that the preferred states only are considered and not the actual beliefs of the actors - this could be a
		problem when considering the partial information case.

		:return:
		'''

        # resetting the coalitions before the creation of new ones
        for coalition in self.schedule.agent_buffer(shuffled=False):
            if isinstance(coalition, Coalition):
                self.schedule.remove(coalition)

        # saving the agents in a list with their belief values
        list_agents_1 = []  # active agent list
        for agent in self.schedule.agent_buffer(shuffled=False):
            if isinstance(agent, ActiveAgent):
                list_agents_1.append(
                    (agent,
                     agent.issuetree[agent.unique_id][self.len_DC +
                                                      self.PC_interest][1]))
        list_agents_1.sort(
            key=lambda x: x[1])  # sorting the list based on the goals

        # checking for groups for first coalition
        list_coalition_number = []
        for i in range(len(list_agents_1)):
            count = 0
            for j in range(len(list_agents_1)):
                if list_agents_1[i][
                        1] - self.coa_creation_thresh <= list_agents_1[j][
                            1] <= list_agents_1[i][
                                1] + self.coa_creation_thresh:
                    count += 1
            list_coalition_number.append(count)

        index = list_coalition_number.index(
            max(list_coalition_number
                ))  # finding the grouping with the most member index

        list_coalition_members = []
        list_agents_2 = copy.copy(list_agents_1)
        for i in range(len(list_agents_1)):
            if list_agents_1[index][
                    1] - self.coa_creation_thresh <= list_agents_1[i][
                        1] <= list_agents_1[index][
                            1] + self.coa_creation_thresh:
                list_coalition_members.append(list_agents_1[i][0])
                list_agents_2.remove(list_agents_1[i])

        self.coalition_creation(
            1001, list_coalition_members
        )  # creating the coalition with the selected members

        if len(list_agents_2) > 2:  #check if there are enough agents left:

            # checking for groups for second coalition
            list_coalition_number = []
            for i in range(len(list_agents_2)):
                count = 0
                for j in range(len(list_agents_2)):
                    if list_agents_2[i][
                            1] - self.coa_creation_thresh <= list_agents_2[j][
                                1] <= list_agents_2[i][
                                    1] + self.coa_creation_thresh:
                        count += 1
                list_coalition_number.append(count)
            index = list_coalition_number.index(
                max(list_coalition_number
                    ))  # finding the grouping with the most member index

            list_coalition_members = []
            for i in range(len(list_agents_2)):
                if list_agents_2[index][
                        1] - self.coa_creation_thresh <= list_agents_2[i][
                            1] <= list_agents_2[index][
                                1] + self.coa_creation_thresh:
                    list_coalition_members.append(list_agents_2[i][0])

            self.coalition_creation(
                1002, list_coalition_members
            )  # creating the coalition with selected members

    def coalition_creation(self, unique_id, members):
        '''
		Function that is used to create the object Coalition which is a sub-agent of the ActiveAgent class
		:param unique_id:
		:param members:
		:return:
		'''

        x = 0
        y = 0
        resources = 0  # resources are reset to 0
        len_DC = self.len_DC
        len_PC = self.len_PC
        len_S = self.len_S
        len_CR = self.len_CR
        len_PF = self.len_PC
        len_ins = self.len_ins

        issuetree_coal = [None]  # creation of the issue tree
        issuetree_coal[0] = issuetree_creation(
            len_DC, len_PC, len_S, len_CR)  # using the newly made function
        for r in range(
                self.number_activeagents
        ):  # last spot is where the coalition beliefs are stored
            issuetree_coal.append(
                issuetree_creation(len_DC, len_PC, len_S, len_CR))

        policytree_coal = [None]  # creation of the policy tree
        policytree_coal[0] = members[0].policytree[members[0].unique_id]
        for r in range(self.number_activeagents):
            policytree_coal.append(members[0].policytree[members[0].unique_id])
        # note that the policy tree is simply copied ... this will not work in the case of partial information where a different
        # algorithm will need to be found for this part of the model

        # creation of the coalition agent
        agent = Coalition((x, y), unique_id, self, 'coalition', resources, 'X',
                          issuetree_coal, policytree_coal, members)
        self.coalition_belief_update(agent, members)
        self.preference_update(agent, unique_id,
                               True)  # updating the issue tree preferences
        self.grid.position_agent(agent, (x, y))
        self.schedule.add(agent)

    def coalition_belief_update(self, coalition, members):
        '''
		Function that is used to update the beliefs of the coalition to an average of the agents members of this said
		coalition.
		:param coalition:
		:param members:
		:return:
		'''

        len_DC = self.len_DC
        len_PC = self.len_PC
        len_S = self.len_S
        len_CR = self.len_CR

        for k in range(
                len_DC + len_PC +
                len_S):  # updating the preferred states and actual beliefs
            belief = 0
            goal = 0
            for agent_mem in members:
                id = agent_mem.unique_id
                belief += agent_mem.issuetree[id][k][0]
                goal += agent_mem.issuetree[id][k][1]
            coalition.issuetree[
                self.number_activeagents][k][0] = belief / len(members)
            coalition.issuetree[
                self.number_activeagents][k][1] = goal / len(members)

        for k in range(len_CR):  # updating the causal relations
            CR = 0
            for agent_mem in members:
                id = agent_mem.unique_id
                CR += agent_mem.issuetree[id][len_DC + len_PC + len_S + k][0]
            coalition.issuetree[self.number_activeagents][
                len_DC + len_PC + len_S + k][0] = CR / len(members)

        if self.PK:  # for the partial knowledge
            for agent in self.schedule.agent_buffer(shuffled=False):
                if agent not in members and isinstance(
                        agent,
                        ActiveAgent) and not isinstance(agent, Coalition):
                    id = agent.unique_id
                    for k in range(len_DC + len_PC +
                                   len_S):  # updating the preferred states
                        goal = 0
                        for agent_mem in members:
                            goal += agent_mem.issuetree[id][k][1]
                        coalition.issuetree[id][k][1] = goal / len(members)

                    for k in range(len_CR):  # updating the causal relations
                        CR = 0
                        for agent_mem in members:
                            CR += agent_mem.issuetree[id][len_DC + len_PC +
                                                          len_S + k][0]
                        coalition.issuetree[id][len_DC + len_PC + len_S +
                                                k][0] = CR / len(members)
Exemplo n.º 4
0
class LenExtended(Model):
    def __init__(self, num_hh, num_cmp, household_parameters, company_parameters, network_density):

        self.num_hh = num_hh
        self.num_cmp = num_cmp
        self.current_day = 0
        self.hh_schedule = RandomActivation(self)
        self.cmp_schedule = RandomActivation(self)
        self.social_network = nx.barabasi_albert_graph(num_hh, network_density)
        for i in range(self.num_cmp):
            c = Company(i, self, company_parameters)
            self.cmp_schedule.add(c)

        for i in range(self.num_hh):
            h = Householder(i, self, household_parameters)
            self.hh_schedule.add(h)

        # Datacollector
        self.datacollector = DataCollector(
            # Household parameters
            {"hh_wealth": lambda m: [x.wealth for x in self.hh_schedule.agent_buffer()],
             "hh_wage": lambda m: [x.wage for x in self.hh_schedule.agent_buffer()],
             "consumption": lambda m: h.consumption,
             "companies": lambda m: h.companies,
             "company": lambda m: [x.company for x in self.hh_schedule.agent_buffer()],
             # "wage_decreasing_coefficient": lambda m: h.wage_decreasing_coefficient,
             # "critical_price_ratio": lambda m: h.critical_price_ratio,
             # "consumption_power": lambda m: h.consumption_power,
             # "unemployed_attempts": lambda m: h.unemployed_attempts,
             # "search_job_chance": lambda m: h.search_job_chance,
             # "prob_search_price": lambda m: h.prob_search_price,
             # "prob_search_prod": lambda m: h.prob_search_prod,
             # "a_connections_number": lambda m: h.a_connections_number,

             # Company parameters
             "C_wealth": lambda m: c.wealth,
             "C_wage": lambda m: c.wage,
             "price": lambda m: [x.price for x in self.cmp_schedule.agent_buffer()],
             "looking_for_worker": lambda m: c.looking_for_worker,
             # "full_workplaces": lambda m: c.full_workplaces,
             # "workers_in_previous_month": lambda m: c.workers_in_previous_month,
             "demand": lambda m: c.demand,
             # "demand_min_coefficient": lambda m: c.demand_min_coefficient,
             # "demand_max_coefficient": lambda m: c.demand_max_coefficient,
             "inventory": lambda m: [x.inventory for x in self.cmp_schedule.agent_buffer()],
             # "sigma": lambda m: c.sigma,
             "gamma": lambda m: c.gamma,
             # "phi_min": lambda m: c.phi_min,
             # "phi_max": lambda m: c.phi_max,
             # "tau": lambda m: c.tau,
             # "upsilon": lambda m: c.upsilon,
             "lambda_coefficient": lambda m: c.lambda_coefficient,
             # "money_buffer_coefficient": lambda m: c.money_buffer_coefficient,
             "households": lambda m: [len(x.households) for x in self.cmp_schedule.agent_buffer()],
             "marketing_investments": lambda m: [x.marketing_investments for x in self.cmp_schedule.agent_buffer()],
             "marketing_boost": lambda m: c.marketing_boost})

    def step(self):
        self.cmp_schedule.step()
        self.hh_schedule.step()
        if self.current_day % 10 == 0:
            self.datacollector.collect(self)

        self.current_day += 1
Exemplo n.º 5
0
class Schelling(Model):
    '''
    Model class for the SM coupled to the Schelling segregation model.
    This class has been modified from the original mesa Schelling model.
    '''

    def __init__(self, height=20, width=20, density=0.8, minority_pc=0.2, homophilyType0=0.5, homophilyType1=0.5, movementQuota=0.30, happyCheckRadius=5, moveCheckRadius=10, last_move_quota=5):
        '''
        '''

        self.height = height
        self.width = width
        self.density = density
        self.minority_pc = minority_pc
        self.homophilyType0 = homophilyType0
        self.homophilyType1 = homophilyType1
        self.movementQuota = movementQuota
        self.happyCheckRadius = happyCheckRadius
        self.moveCheckRadius = moveCheckRadius
        self.last_move_quota = last_move_quota

        self.schedule = RandomActivation(self)
        self.grid = SingleGrid(height, width, torus=True)

        self.happy = 0
        self.happytype0 = 0
        self.happytype1 = 0
        self.stepCount = 0
        self.evenness = 0
        self.empty = 0
        self.type0agents = 0
        self.type1agents = 0
        self.movement = 0
        self.movementtype0 = 0
        self.movementtype1 = 0
        self.movementQuotaCount = 0
        self.numberOfAgents = 0
        self.datacollector = DataCollector(
            # Model-level count of happy agents
            {"step": "stepCount", "happy": "happy", "happytype0": "happytype0", "happytype1": "happytype1", "movement": "movement", "movementtype0": "movementtype0", "movementtype1": "movementtype1","evenness": "evenness", "numberOfAgents": "numberOfAgents", "homophilyType0": "homophilyType0", "homophilyType1": "homophilyType1", "movementQuota": "movementQuota", "happyCheckRadius": "happyCheckRadius", "last_move_quota": "last_move_quota"},
            # For testing purposes, agent's individual x and y
            {"x": lambda a: a.pos[0], "y": lambda a: a.pos[1], "Agent type": lambda a:a.type})

        # , "z": lambda a:a.type

        # Set up agents
        # We use a grid iterator that returns
        # the coordinates of a cell as well as
        # its contents. (coord_iter)
        for cell in self.grid.coord_iter():
            x = cell[1]
            y = cell[2]
            if self.random.random() < self.density:
                if self.random.random() < self.minority_pc:
                    agent_type = 1
                else:
                    agent_type = 0

                last_move = round(self.random.random()*10)  # randomly assign a value from 0 to 10
                agent = SchellingAgent((x, y), self, agent_type, last_move)
                self.grid.position_agent(agent, (x, y))
                self.schedule.add(agent)
        # print("Schedule: ", len(self.schedule.agents))

        self.running = True
        self.numberOfAgents = self.schedule.get_agent_count()
        self.datacollector.collect(self)


    def step(self, policy):
        '''
        Run one step of the model. If All agents are happy, halt the model.
        Note on the eveness paramater calculation:
            It cannot be performed in the step function of the agents as then it would not take consider periods of time during which the agents are still moving, making the parameter calculation inaccurate. 
        '''
        self.happy = 0  # Reset counter of happy agents
        self.happytype0 = 0  # Reset counter of happy type 0 agents
        self.happytype1 = 0  # Reset counter of happy type 1 agents
        self.empty = 0  # Reset counter of empty cells
        self.type0agents = 0  # Reset count of type 0 agents
        self.type1agents = 0  # Reset count of type 1 agents
        self.movementQuotaCount = 0  # Reset count of the movement quota
        self.movement = 0  # Reset counter of movement of agents
        self.movementtype0 = 0  # Reset counter of movement of type 0 agents
        self.movementtype1 = 0  # Reset counter of movement of type 1 agents

        # introduction of the selected policy in the Schelling model
        # happy check vision changes
        if policy[0] != None and self.happyCheckRadius<15 and self.happyCheckRadius>1:
            self.happyCheckRadius += policy[0]
        # movement quota changes
        if policy[1] != None and self.movementQuota<1 and self.movementQuota>0.05:
            self.movementQuota += policy[1]
        # last movement threshold
        if policy[2] != None and self.last_move_quota<50 and self.last_move_quota>0:
            self.last_move_quota += policy[2]
        # type 0 preference
        if policy[3] != None and self.homophilyType0<1 and self.homophilyType0>0:
            self.homophilyType0 += policy[3]
        # type 1 preference
        if policy[4] != None and self.homophilyType1<1 and self.homophilyType1>0:
            self.homophilyType1 += policy[4]

        # run the step for the agents
        self.schedule.step()
        # print(self.movementQuotaCount, " agents moved.")
        # print(round(self.happy/self.schedule.get_agent_count() * 100,2), "percent are happy agents.")

        # calculating empty counter
        self.empty = (self.height*self.width) - self.schedule.get_agent_count()
        # calculating type 0 and type 1 agent numbers
        for agent in self.schedule.agent_buffer(shuffled=True):
            # print(agent.type)
            if agent.type == 0:
                self.type0agents += 1
            if agent.type == 1:
                self.type1agents += 1

        # calculation of evenness (segregation parameter) using Haw (2015).
        self.evenness_calculation()

        # iterate the steps counter
        self.stepCount += 1

        # collect data
        self.datacollector.collect(self)
        

        # checking the datacollector
        # if self.stepCount % 2 == 0:
        #     print(self.datacollector.get_model_vars_dataframe())
        #     print(self.datacollector.get_agent_vars_dataframe())

        if self.happy == self.schedule.get_agent_count():
            self.running = False
            print("All agents are happy, the simulation ends!")

        output_KPIs = [self.evenness, self.movement, self.happy, self.movementtype0, self.movementtype1, self.happytype0, self.happytype1]
        return output_KPIs, self.type0agents, self.type1agents

    def evenness_calculation(self):

        '''
        To calculate the evenness parameter, one needs to first subdivide the grid into areas of more than one square each. The evenness will be then calculated based on the distribution of type 0 and type 1 agents in each of these areas.
        The division into area needs to be done carefully as it depends on the inputs within the model (width and height of the grid).
        '''

        # check for a square grid
        if self.height != self.width:
            self.running = False
            print("WARNING - The grid is not a square, please insert the same width and height")

        # reset the evenness parameter
        self.evenness = 0

        # algorithm to calculate evenness
        n = 4  # number of big areas considered in width and height
        if self.height % n == 0:
            # consider all big areas
            for big_dy in range(n):
                for big_dx in range(n):
                    # looking within one big areas, going through all cells
                    listAgents = []
                    for small_dy in range(int(self.height/n)):
                        for small_dx in range(int(self.height/n)):
                            for agents in self.schedule.agent_buffer(shuffled=True):
                                if agents.pos == (self.height/n * big_dx + small_dx, self.height/n * big_dy + small_dy):
                                    listAgents.append(agents)
                    # calculating evenness for each big area
                    countType0agents = 0  # Reset of the type counter for type 0 agents
                    countType1agents = 0  # Reset of the type counter for type 1 agents
                    # checking the type of agents in the big area
                    for agents in listAgents:
                        if agents.type == 0:
                            countType0agents += 1
                        if agents.type == 1:
                            countType1agents += 1
                    self.evenness += 0.5 * abs((countType0agents/self.type0agents) - (countType1agents/self.type1agents))
        # print("evenness :", round(self.evenness,2))
class BoltzmannWealthModel(Model):
    """A simple model of an economy where agents exchange currency at random.

    All the agents begin with one unit of currency, and each time step can give
    a unit of currency to another agent. Note how, over time, this produces a
    highly skewed distribution of wealth.
    """
    def __init__(self, N, width, height):
        self.num_agents = N
        self.width = width
        self.height = height
        self.grid = MultiGrid(height, width, False)  #non toroidal grid
        self.schedule = RandomActivation(self)
        self.datacollector = DataCollector(
            model_reporters={"Coverage": compute_coverage},
            agent_reporters={"Wealth": "wealth"})
        # Create agents
        self.coveredArea = []
        self.interactionCount = 0
        self.interactionRateAverage = 0
        self.coveragePercentage = 0
        self.coveragePercentageAverage = 0

        areaNum = ceil(sqrt(self.num_agents))
        areaDistx = self.width / (sqrt(self.num_agents))
        areaDistx = floor(areaDistx)
        areaDisty = self.height / (sqrt(self.num_agents))
        areaDisty = floor(areaDisty)

        self.dtx = areaDistx
        self.dty = areaDisty

        for i in range(self.num_agents):

            xlow = (i % areaNum) * areaDistx
            xup = xlow + areaDistx - 1

            ylow = floor(i / areaNum) * areaDisty
            yup = ylow + areaDisty - 1

            x = floor((xlow + xup) / 2) + 1
            y = floor((ylow + yup) / 2) + 1

            xlow = x - 1
            xup = x + 1
            ylow = y - 1
            yup = y + 1

            a = MoneyAgent(i, self, xup, xlow, yup, ylow)
            self.schedule.add(a)

            #place agent at the center of its limit coor
            self.grid.place_agent(a, (x, y))
            # Add the agent to a random grid cell

        self.running = True
        self.datacollector.collect(self)

    def step(self):
        print(self.schedule.steps)
        self.interactionCount = 0
        self.schedule.step()
        # collect data
        self.datacollector.collect(self)

        with open('reportRandomStride.csv', 'a') as reportFile:
            coverage = compute_coverage(self)
            percentage = ceil(
                10000 * coverage / self.width / self.height) / 100

            interactionRate = self.interactionCount / self.num_agents / (
                self.num_agents - 1)
            #number of interaction/possible interaction /2 for double counting
            interactionRate = ceil(10000 * interactionRate) / 100

            self.interactionRateAverage = (
                self.interactionRateAverage * (self.schedule.steps - 1) +
                interactionRate) / self.schedule.steps
            self.interactionRateAverage = ceil(
                100 * self.interactionRateAverage) / 100

            self.coveragePercentageAverage = (self.coveragePercentageAverage *
                                              (self.schedule.steps - 1) +
                                              percentage) / self.schedule.steps
            self.coveragePercentageAverage = ceil(
                100 * self.coveragePercentageAverage) / 100

            rewriter = csv.writer(reportFile,
                                  delimiter=' ',
                                  quotechar='"',
                                  quoting=csv.QUOTE_MINIMAL)
            rewriter.writerow([
                "step:", self.schedule.steps, "InteractionRate:",
                interactionRate, '%', "AvgInteractionRate",
                self.interactionRateAverage, '%', "Coverage:", coverage,
                percentage, '%', self.coveragePercentageAverage, '%'
            ])

    def run_model(self, n):
        for i in range(n):
            self.step()


#            print(self.schedule.steps)

    def initPos(self):
        for agent in self.schedule.agent_buffer():
            for i in self.schedule.agent_buffer():
                if i != agent:
                    #                    print ("Heldddlo")
                    #                    divisor = ((i.pos[0]-agent.pos[0])**2+(i.pos[1]-agent.pos[1])**2)
                    divisor = ((i.pos[0] - agent.pos[0]) +
                               (i.pos[1] - agent.pos[1]))
                    agent.forcex += (agent.pos[0] - i.pos[0]) / divisor
                    agent.forcey += (agent.pos[1] - i.pos[1]) / divisor

            #virtual force from boundary
            lx = self.dtx / 2 - agent.pos[0]
            ly = self.dty / 2 - agent.pos[1]
            ux = self.dtx / 2 - (self.width - agent.pos[0])
            uy = self.dty / 2 - (self.height - agent.pos[1])
            # =============================================================================
            #             agent.forcex += (np.heaviside(lx,0)/(lx**2) - np.heaviside(ux,0)/(ux**2))
            #
            #             agent.forcey += (np.heaviside(ly,0)/(ly**2) - np.heaviside(uy,0)/(uy**2))
            # =============================================================================

            agent.forcex += (np.heaviside(lx, 0) / (lx) - np.heaviside(ux, 0) /
                             (ux))

            agent.forcey += (np.heaviside(ly, 0) / (ly) - np.heaviside(uy, 0) /
                             (uy))
        for agent in self.model.schedule.agent_buffer():
            vx = agent.forcex / np.linalg.norm(agent.force)
            vy = agent.forcey / np.linalg.norm(agent.force)

            apx = agent.pos[0]
            apy = agent.pos[1]
            if abs(vx) > 0.5:
                apx += vx // abs(vx)
            if abs(vy) > 0.5:
                apy += vy // abs(vy)
            self.grid.place_agent(agent, (apx, apy))
Exemplo n.º 7
0
class PolicyEmergenceSM(Model):

	'''
	Simplest Model for the policy emergence model.
	'''

	def __init__(self, SM_inputs, height=20, width=20):

		self.height = height
		self.width = width

		self.SM_inputs = SM_inputs

		self.stepCount = 0
		self.agenda_PC = None
		self.agenda_PF = None
		self.policy_implemented = None
		self.policy_implemented_number = None
		self.policy_formulation_run = False  # True if an agenda is found

		self.schedule = RandomActivation(self)
		self.grid = SingleGrid(height, width, torus=True)

		# creation of the datacollector vector
		self.datacollector = DataCollector(
			# Model-level variables
			model_reporters =  {
				"step": "stepCount",
				"AS_PF": get_problem_policy_chosen,
				"agent_attributes": get_agents_attributes},
			# Agent-level variables
			agent_reporters = {
				"x": lambda a: a.pos[0],
				"y": lambda a: a.pos[1],
				"Agent type": lambda a:type(a), 
				"Issuetree": lambda a: getattr(a, 'issuetree', [None])[a.unique_id if isinstance(a, ActiveAgent) else 0]}
			)

		# , "agenda_PC":"agenda_PC", "agenda_PF":"agenda_PF", "policy_implemented": "policy_implemented"

		# "x": lambda a: a.pos[0], "y": lambda a: a.pos[1]
		# "z": lambda a:a.issuetree

		# belief tree properties
		self.len_S, self.len_PC, self.len_DC, self.len_CR = issue_tree_input(self)
		# print(self.len_S, self.len_PC, self.len_DC, self.len_CR)

		# issue tree properties
		self.policy_instruments, self.len_ins_1, self.len_ins_2, self.len_ins_all, self.PF_indices = policy_instrument_input(self, self.len_PC)

		# Set up active agents
		init_active_agents(self, self.len_S, self.len_PC, self.len_DC, self.len_CR, self.len_PC, self.len_ins_1, self.len_ins_2, self.len_ins_all, self.SM_inputs)

		# Set up passive agents
		init_electorate_agents(self, self.len_S, self.len_PC, self.len_DC, self.SM_inputs)

		# Set up truth agent
		init_truth_agent(self, self.len_S, self.len_PC, self.len_DC, self.len_ins_1, self.len_ins_2, self.len_ins_all)
		# the issue tree will need to be updated at a later stage witht he values from the system/policy context

		# print("Schedule has : ", len(self.schedule.agents), " agents.")
		# print(self.schedule.agents)
		# print(" ")

		# for agent in self.schedule.agent_buffer(shuffled=False):
		# 	print(' ')
		# 	print(agent)
		# 	print(type(agent))
		# 	if isinstance(agent, ActiveAgent):
		# 		print(agent.unique_id, " ", agent.pos, " ", agent.agent_type, " ", agent.resources, " ", agent.affiliation, " ", agent.issuetree[agent.unique_id], " ", agent.policytree[agent.unique_id][0])
		# 	if isinstance(agent, ElectorateAgent):
		# 		print(agent.unique_id, " ", agent.pos, " ", agent.affiliation, " ", agent.issuetree)
		# 	if isinstance(agent, TruthAgent):
		# 		print(agent.pos, " ", agent.issuetree)

		self.running = True
		self.numberOfAgents = self.schedule.get_agent_count()
		self.datacollector.collect(self)

	def step(self, KPIs):
		print(" ")
		print("Step +1 - Policy emergence model")
		print("Step count: ", self.stepCount)

		'''
		Main steps of the Simplest Model for policy emergence:
		0. Module interface - Input
			Obtention of the beliefs from the system/policy context
			!! This is to be implemented at a later stage
		1. Agenda setting step
		2. Policy formulation step
		3. Module interface - Output
			Implementation of the policy instrument selected
		'''

		# saving the attributes
		self.KPIs = KPIs

		# 0.
		self.module_interface_input(self.KPIs)

		'''
		TO DO:
		- Introduce the transfer of information between the external parties and the truth agent relates to the policy impacts
		'''

		# 1.
		self.agenda_setting()

		# 2.
		if self.policy_formulation_run:
			self.policy_formulation()
		else:
			self.policy_implemented = self.policy_instruments[-1]

		# 3.
		# self.module_interface_output()

		# end of step actions:
		# iterate the steps counter
		self.stepCount += 1

		# collect data
		self.datacollector.collect(self)

		print("step ends")
		print(" ")

		# print(self.datacollector.get_agent_vars_dataframe())
		print(self.datacollector.get_model_vars_dataframe())

		return self.policy_implemented

	def module_interface_input(self, KPIs):

		'''
		The module interface input step consists of actions related to the module interface and the policy emergence model

		Missing:
		- Electorate actions
		'''

		# selection of the Truth agent policy tree and issue tree
		for agent in self.schedule.agent_buffer(shuffled=True):
			if isinstance(agent, TruthAgent):
				truth_policytree = agent.policytree_truth
				for issue in range(self.len_DC+self.len_PC+self.len_S):
					agent.issuetree_truth[issue] = KPIs[issue]
				truth_issuetree = agent.issuetree_truth

		# Transferring policy impact to active agents
		for agent in self.schedule.agent_buffer(shuffled=True):
			if isinstance(agent, ActiveAgent):
				# replacing the policy family likelihoods
				for PFj in range(self.len_PC):
					for PFij in range(self.len_PC):
						agent.policytree[agent.unique_id][PFj][PFij] = truth_policytree[PFj][PFij]

				# replacing the policy instruments impacts
				for insj in range(self.len_ins_1 + self.len_ins_2 + self.len_ins_all):
					agent.policytree[agent.unique_id][self.len_PC+insj][0:self.len_S] = truth_policytree[self.len_PC+insj]

				# replacing the issue beliefs from the KPIs
				for issue in range(self.len_DC+self.len_PC+self.len_S):
					agent.issuetree[agent.unique_id][issue][0] = truth_issuetree[issue]
				self.preference_update(agent, agent.unique_id)

	def agenda_setting(self):

		'''
		The agenda setting step is the first step in the policy process conceptualised in this model. The steps are given as follows:
		1. Active agents policy core issue selection
		2. Active agents policy family selection
		3. Active agents actions [to be detailed later]
		4. Active agents policy core issue selection update
		5. Active agents policy family selection update
		6. Agenda selection
		'''

		# 1. & 2.
		for agent in self.schedule.agent_buffer(shuffled=False):
			if isinstance(agent, ActiveAgent):  # considering only active agents
				agent.selection_PC()
				agent.selection_PF()
				# print("PC and PF selected for  agent", agent.unique_id, ": ", agent.selected_PC, agent.selected_PF)

		# 3.

		# 4. & 5.
		for agent in self.schedule.agent_buffer(shuffled=False):
			if isinstance(agent, ActiveAgent):  # considering only active agents
				agent.selection_PC()
				agent.selection_PF()

		# 6. 
		# All active agents considered
		selected_PC_list = []
		selected_PF_list = []
		number_ActiveAgents = 0
		for agent in self.schedule.agent_buffer(shuffled=False):
			if isinstance(agent, ActiveAgent):  # considering only policy makers
				selected_PC_list.append(agent.selected_PC)
				selected_PF_list.append(agent.selected_PF)
				number_ActiveAgents += 1

		# finding the most common policy core issue and its frequency
		d = defaultdict(int)
		for i in selected_PC_list:
			d[i] += 1
		result = max(d.items(), key=lambda x: x[1])
		agenda_PC_temp = result[0]
		agenda_PC_temp_frequency = result[1]

		# finding the most common policy family issue and its frequency
		d = defaultdict(int)
		for i in selected_PF_list:
			d[i] += 1
		result = max(d.items(), key=lambda x: x[1])
		agenda_PF_temp = result[0]
		agenda_PF_temp_frequency = result[1]

		# checking for majority
		if agenda_PC_temp_frequency > int(number_ActiveAgents/2) and agenda_PF_temp_frequency > int(number_ActiveAgents/2):
			self.agenda_PC = agenda_PC_temp
			self.agenda_PF = agenda_PF_temp
			self.policy_formulation_run = True
			print("The agenda consists of PC", self.agenda_PC, " and PF", self.agenda_PF, ".")
		else:
			self.policy_formulation_run = False
			print("No agenda was formed, moving to the next step.")

	def policy_formulation(self):

		'''
		The policy formulation step is the second step in the policy process conceptualised in this model. The steps are given as follows:
		0. Detailing of policy instruments that can be considered
		1. Active agents deep core issue selection
		2. Active agents policy instrument selection
		3. Active agents actions [to be detailed later]
		4. Active agents policy instrument selection update
		5. Policy instrument selection

		NOTE: THIS CODE DOESNT CONSIDER MAJORITY WHEN MORE THAN THREE POLICY MAKERS ARE INCLUDED, IT CONSIDERS THE MAXIMUM. THIS NEEDS TO BE ADAPTED TO CONSIDER 50% OR MORE!
		'''

		print("Policy formulation being introduced")

		# 0.
		possible_PI = self.PF_indices[self.agenda_PF]

		# 1. & 2.
		for agent in self.schedule.agent_buffer(shuffled=False):
			if isinstance(agent, ActiveAgent):  # considering only active agents
				agent.selection_S()
				agent.selection_PI()

		# 3.

		# 4. & 5.
		for agent in self.schedule.agent_buffer(shuffled=False):
			if isinstance(agent, ActiveAgent):  # considering only active agents
				agent.selection_PI()

		# 6. 
		# Only policy makers considered
		selected_PI_list = []
		number_PMs = 0
		for agent in self.schedule.agent_buffer(shuffled=False):
			if isinstance(agent, ActiveAgent) and agent.agent_type == 'policymaker':  # considering only policy makers
				selected_PI_list.append(agent.selected_PI)
				number_PMs += 1

		# finding the most common secondary issue and its frequency
		d = defaultdict(int)
		for i in selected_PI_list:
			d[i] += 1
		result = max(d.items(), key=lambda x: x[1])
		self.policy_implemented_number = result[0]
		policy_implemented_number_frequency = result[1]

		# check for the majority and implemented if satisfied
		if policy_implemented_number_frequency > int(number_PMs/2):
			print("The policy instrument selected is policy instrument ", self.policy_implemented_number, ".")
			self.policy_implemented = self.policy_instruments[self.policy_implemented_number]
		else:
			print("No consensus on a policy instrument.")
			self.policy_implemented = self.policy_instruments[-1] # selecting last policy instrument which is the no instrument policy instrument

	def module_interface_output(self):

		print("Module interface output not introduced yet")

	def preference_update(self, agent, who):

		self.preference_update_DC(agent, who)

		self.preference_update_PC(agent, who)

		self.preference_update_S(agent, who)

	def preference_update_DC(self, agent, who):

		"""
		The preference update function (DC)
		===========================

		This function is used to update the preferences of the deep core issues of agents in their
		respective belief trees.

		agent - this is the owner of the belief tree
		who - this is the part of the belieftree that is considered - agent.unique_id should be used for this - this is done to also include partial knowledge preference calculation

		"""	

		len_DC = self.len_DC
		len_PC = self.len_PC
		len_S = self.len_S

		#####
		# 1.5.1. Preference calculation for the deep core issues

		# 1.5.1.1. Calculation of the denominator
		PC_denominator = 0
		for h in range(len_DC):
			if agent.issuetree[who][h][1] == None or agent.issuetree[who][h][0] == None:
				PC_denominator = 0
			else:
				PC_denominator = PC_denominator + abs(agent.issuetree[who][h][1] - agent.issuetree[who][h][0])
		# print('The denominator is given by: ' + str(PC_denominator))

		# 1.5.1.2. Selection of the numerator and calculation of the preference
		for i in range(len_DC):
			# There are rare occasions where the denominator could be 0
			if PC_denominator != 0:
				agent.issuetree[who][i][2] = abs(agent.issuetree[who][i][1] - agent.issuetree[who][i][0]) / PC_denominator
			else:
				agent.issuetree[who][i][2] = 0

	def preference_update_PC(self, agent, who):

		"""
		The preference update function (PC)
		===========================

		This function is used to update the preferences of the policy core issues of agents in their
		respective belief trees.

		agent - this is the owner of the belief tree
		who - this is the part of the belieftree that is considered - agent.unique_id should be used for this - this is done to also include partial knowledge preference calculation

		"""	

		len_DC = self.len_DC
		len_PC = self.len_PC
		len_S = self.len_S

		#####	
		# 1.5.2 Preference calculation for the policy core issues
		PC_denominator = 0
		# 1.5.2.1. Calculation of the denominator
		for j in range(len_PC):
			# print('Selection PC' + str(j+1))
			# print('State of the PC' + str(j+1) + ': ' + str(agent.issuetree[0][len_DC + j][0])) # the state printed
			# Selecting the causal relations starting from PC
			for k in range(len_DC):
				# Contingency for partial knowledge issues
				if agent.issuetree[who][k][1] == None or agent.issuetree[who][k][0] == None or agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0] == None:
					PC_denominator += 0
				else:
					# print('Causal Relation PC' + str(j+1) + ' - PC' + str(k+1) + ': ' + str(agent.issuetree[0][len_DC+len_PC+len_S+j+(k*len_PC)][1]))
					# print('Gap of PC' + str(k+1) + ': ' + str((agent.issuetree[0][k][1] - agent.issuetree[0][k][0])))
					# Check if causal relation and gap are both positive of both negative
					# print('agent.issuetree[' + str(who) + '][' + str(len_DC+len_PC+len_S+j+(k*len_PC)) + '][0]: ' + str(agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0]))
					if (agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0] < 0 and (agent.issuetree[who][k][1] - agent.issuetree[who][k][0]) < 0) or (agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0] > 0 and (agent.issuetree[who][k][1] - agent.issuetree[who][k][0]) > 0):
						PC_denominator = PC_denominator + abs(agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0]*(agent.issuetree[who][k][1] - agent.issuetree[who][k][0]))
						# print('This is the PC numerator: ' + str(PC_denominator))
					else:
						PC_denominator = PC_denominator	

		# 1.5.2.2. Addition of the gaps of the associated mid-level issues
		for i in range(len_PC):
			# Contingency for partial knowledge issues
			if agent.issuetree[who][len_DC + i][1] == None or agent.issuetree[who][len_DC + i][0] == None:
				PC_denominator = PC_denominator
			else:
				# print('This is the gap for the PC' + str(i+1) + ': ' + str(agent.issuetree[0][len_DC + i][1] - agent.issuetree[0][len_DC + i][0]))
				PC_denominator += abs(agent.issuetree[who][len_DC + i][1] - agent.issuetree[who][len_DC + i][0])
		# print('This is the S denominator: ' + str(PC_denominator))
		
		# 1.5.2.3 Calculation the numerator and the preference
		# Select one by one the PC
		for j in range(len_PC):

			# 1.5.2.3.1. Calculation of the right side of the numerator
			PC_numerator = 0
			# print('Selection PC' + str(j+1))
			# print('State of the PC' + str(j+1) + ': ' + str(agent.issuetree[0][len_DC + j][0])) # the state printed
			# Selecting the causal relations starting from DC
			for k in range(len_DC):
				# Contingency for partial knowledge issues
				if agent.issuetree[who][k][1] == None or agent.issuetree[who][k][0] == None or agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0] == None:
					PC_numerator += 0
				else:
					# print('Causal Relation PC' + str(j+1) + ' - DC' + str(k+1) + ': ' + str(agent.issuetree[0][len_DC+len_PC+len_S+j+(k*len_PC)][1]))
					# print('Gap of DC' + str(k+1) + ': ' + str((agent.issuetree[0][k][1] - agent.issuetree[0][k][0])))
					# Check if causal relation and gap are both positive of both negative
					if (agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0] < 0 and (agent.issuetree[who][k][1] - agent.issuetree[who][k][0]) < 0) or (agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0] > 0 and (agent.issuetree[who][k][1] - agent.issuetree[who][k][0]) > 0):
						PC_numerator = PC_numerator + abs(agent.issuetree[who][len_DC+len_PC+len_S+j+(k*len_PC)][0]*(agent.issuetree[who][k][1] - agent.issuetree[who][k][0]))
						# print('This is the PC numerator: ' + str(PC_numerator))
					else:
						PC_numerator = PC_numerator	

			# 1.5.2.3.2. Addition of the gap to the numerator
			# Contingency for partial knowledge issues
			if agent.issuetree[who][len_DC + j][1] == None or agent.issuetree[who][len_DC + j][0] == None:
				PC_numerator += 0
			else:
				# print('This is the gap for the PC' + str(j+1) + ': ' + str(agent.issuetree[0][len_DC + j][1] - agent.issuetree[0][len_DC + j][0]))
				PC_numerator += abs(agent.issuetree[who][len_DC + j][1] - agent.issuetree[who][len_DC + j][0])
			# print('The numerator is equal to: ' + str(PC_numerator))
			# print('The denominator is equal to: ' + str(PC_denominator))

			# 1.5.2.3.3. Calculation of the preference
			if PC_denominator != 0:
				agent.issuetree[who][len_DC+j][2] = round(PC_numerator/PC_denominator,3) 
			# print('The new preference of the policy core PC' + str(j+1) + ' is: ' + str(agent.issuetree[0][len_DC+j][2]))
			else:
				agent.issuetree[who][len_DC+j][2] = 0

	def preference_update_S(self, agent, who):

		"""
		The preference update function (S)
		===========================

		This function is used to update the preferences of secondary issues the agents in their
		respective belief trees.

		agent - this is the owner of the belief tree
		who - this is the part of the belieftree that is considered - agent.unique_id should be used for this - this is done to also include partial knowledge preference calculation

		"""	

		len_DC = self.len_DC
		len_PC = self.len_PC
		len_S = self.len_S

		#####	
		# 1.5.3 Preference calculation for the secondary issues
		S_denominator = 0
		# 1.5.2.1. Calculation of the denominator
		for j in range(len_S):
			# print('Selection S' + str(j+1))
			# print('State of the S' + str(j+1) + ': ' + str(agent.issuetree[0][len_DC + len_PC + j][0])) # the state printed
			# Selecting the causal relations starting from S
			for k in range(len_PC):
				# Contingency for partial knowledge issues
				if agent.issuetree[who][len_DC + k][1] == None or agent.issuetree[who][len_DC + k][0] == None or agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0] == None:
					S_denominator += 0
				else:
					# print('Causal Relation S' + str(j+1) + ' - PC' + str(k+1) + ': ' + str(agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0]))
					# print('Gap of PC' + str(k+1) + ': ' + str((agent.issuetree[who][len_DC+k][1] - agent.issuetree[who][len_DC+k][0])))
					# Check if causal relation and gap are both positive of both negative
					# print('agent.issuetree[' + str(who) + '][' + str(len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)) + '][0]: ' + str(agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0]))
					if (agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0] < 0 and (agent.issuetree[who][len_DC+k][1] - agent.issuetree[who][len_DC+k][0]) < 0) or (agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0] > 0 and (agent.issuetree[who][len_DC+k][1] - agent.issuetree[who][len_DC+k][0]) > 0):
						S_denominator += abs(agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0]*(agent.issuetree[who][len_DC+k][1] - agent.issuetree[who][len_DC+k][0]))
						# print('This is the PC numerator: ' + str(S_denominator))
					else:
						S_denominator = S_denominator	

		# 1.5.2.2. Addition of the gaps of the associated secondary issues
		for j in range(len_S):
			# Contingency for partial knowledge issues
			if agent.issuetree[who][len_DC+len_PC+j][1] == None or agent.issuetree[who][len_DC+len_PC+j][0] == None:
				S_denominator = S_denominator
			else:
				# print('This is the gap for the PC' + str(i+1) + ': ' + str(agent.issuetree[0][len_DC + len_PC + i][1] - agent.issuetree[0][len_DC + len_PC + i][0]))
				S_denominator += abs(agent.issuetree[who][len_DC+len_PC+j][1] - agent.issuetree[who][len_DC+len_PC+j][0])
		# print('This is the PC denominator: ' + str(S_denominator))
		
		# 1.5.2.3 Calculation the numerator and the preference
		# Select one by one the S
		for j in range(len_S):

			# 1.5.2.3.1. Calculation of the right side of the numerator
			S_numerator = 0
			# print('Selection S' + str(j+1))
			# print('State of the S' + str(j+1) + ': ' + str(agent.issuetree[who][len_DC + len_PC + j][0])) # the state printed
			# Selecting the causal relations starting from PC
			for k in range(len_PC):
				# Contingency for partial knowledge issues
				if agent.issuetree[who][len_DC + k][1] == None or agent.issuetree[who][len_DC + k][0] == None or agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0] == None:
					S_numerator = 0
				else:
					# print('Causal Relation S' + str(j+1) + ' - PC' + str(k+1) + ': ' + str(agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0]))
					# print('Gap of PC' + str(k+1) + ': ' + str((agent.issuetree[who][len_DC + k][1] - agent.issuetree[who][len_DC + k][0])))
					# Check if causal relation and gap are both positive of both negative
					if (agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0] < 0 and (agent.issuetree[who][len_DC+k][1] - agent.issuetree[who][len_DC+k][0]) < 0) or (agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0] > 0 and (agent.issuetree[who][len_DC+k][1] - agent.issuetree[who][len_DC+k][0]) > 0):
						S_numerator += abs(agent.issuetree[who][len_DC+len_PC+len_S+len_DC*len_PC+j+(k*len_S)][0]*(agent.issuetree[who][len_DC+k][1] - agent.issuetree[who][len_DC+k][0]))
						# print('This is the PC numerator: ' + str(S_numerator))
					else:
						S_numerator = S_numerator

			# 1.5.2.3.2. Addition of the gap to the numerator
			# Contingency for partial knowledge issues
			if agent.issuetree[who][len_DC+len_PC+j][1] == None or agent.issuetree[who][len_DC+len_PC+j][0] == None:
				S_numerator += 0
			else:
				# print('This is the gap for the PC' + str(j+1) + ': ' + str(agent.issuetree[who][len_DC+len_PC+j][1] - agent.issuetree[who][len_DC+len_PC+j][0]))
				S_numerator += abs(agent.issuetree[who][len_DC+len_PC+j][1] - agent.issuetree[who][len_DC+len_PC+j][0])
			# print('The numerator is equal to: ' + str(S_numerator))
			# print('The denominator is equal to: ' + str(S_denominator))

			# 1.5.2.3.3. Calculation of the preference
			if S_denominator != 0:
				agent.issuetree[who][len_DC+len_PC+j][2] = round(S_numerator/S_denominator,3) 
			# print('The new preference of the policy core PC' + str(j+1) + ' is: ' + str(agent.issuetree[0][len_DC+j][2]))
			else:
				agent.issuetree[who][len_DC+len_PC+j][2] = 0
Exemplo n.º 8
0
class BattleModel(Model):
    """A model with some number of agents."""
    def __init__(self, red_col, red_row, red_squad, blue_col, blue_row,
                 blue_squad, blue_agents_elite_squad, red_movement,
                 blue_movement, width, height):
        self.running = True
        self.space = ContinuousSpace(width, height, False)
        self.schedule = RandomActivation(self)
        self.next_agent_id = 1

        self.RED_MOVEMENT_SPEED = red_movement
        self.BLUE_MOVEMENT_SPEED = blue_movement

        separation_y = 1.5
        # Find center
        red_first_y = ((height / 2 -
                        (red_squad * red_row / 2 * separation_y)) +
                       separation_y / 2) - ((red_squad - 1) * separation_y * 2)
        blue_first_y = (
            (height / 2 - (blue_squad * blue_row / 2 * separation_y)) +
            separation_y / 2) - ((blue_squad - 1) * separation_y * 2)

        # Create agents
        self.spawner(15.0, red_first_y, 1.5, separation_y, red_col, red_row,
                     red_squad, 0, 'red')
        self.spawner(width - 15.0, blue_first_y, -1.5, separation_y, blue_col,
                     blue_row, blue_squad, blue_agents_elite_squad, 'blue')

    def spawner(self, first_x, first_y, separation_x, separation_y, cols, rows,
                squad, elite_squad, type):
        for i in range(cols):
            for j in range(rows * squad):
                x = first_x + (separation_x * i)
                y = first_y + (separation_y * j)

                # Squad separator
                y = y + (4 * separation_y) * (floor(j / rows))

                casual = squad - elite_squad
                if casual < 0:
                    casual = 0

                elite = True
                if casual > 0:
                    if squad - 1 == floor(j / rows):
                        elite = False
                if casual > 1:
                    if floor(j / rows) == 0:
                        elite = False
                if casual > 2:
                    if squad - 2 == floor(j / rows):
                        elite = False
                if casual > 3:
                    if floor(j / rows) == 1:
                        elite = False
                if casual == 5:
                    elite = False

                self.spawn(x, y, type, elite)

    def spawn(self, x, y, type, elite):
        if (type == 'red'):
            a = warrior_agent.RedWarrior(self.next_agent_id, self)
        elif (elite == True):
            a = warrior_agent.BlueEliteWarrior(self.next_agent_id, self)
        else:
            a = warrior_agent.BlueCommonWarrior(self.next_agent_id, self)
        pos = np.array((x, y))
        self.schedule.add(a)
        self.space.place_agent(a, pos)

        self.next_agent_id += 1

    def step(self):
        self.schedule.step()

        agents_and_allies_morale = []
        for agent in self.schedule.agent_buffer(
                False):  #type: warrior_agent.WarriorAgent
            agents_and_allies_morale.append(
                (agent,
                 agent.get_average_morale_of_allies_in_flocking_radius()))

        for (
                agent, allies_morale
        ) in agents_and_allies_morale:  #type: (warrior_agent.WarriorAgent, float)
            agent.update_morale(agent.calculate_new_morale(allies_morale))

        print("Żywych agentów: " + str(len(self.schedule.agents)))
Exemplo n.º 9
0
class GTModel(Model):
    def __init__(self, debug, size, i_n_agents, i_strategy, i_energy,
                 child_location, movement, k, T, M, p, d, strategies_to_count,
                 count_tolerance, mutation_type, death_threshold, n_groups):
        self.grid = SingleGrid(size, size, torus=True)
        self.schedule = RandomActivation(self)
        self.running = True
        self.debug = debug
        self.size = size
        self.agent_idx = 0
        self.i_energy = i_energy

        # Payoff matrix in the form (my_move, op_move) : my_reward
        self.payoff = {
            ('C', 'C'): 2,
            ('C', 'D'): -3,
            ('D', 'C'): 3,
            ('D', 'D'): -1,
        }
        # Constant for max population control (cost of surviving)
        self.k = k
        # Constant for controlling dying of old age
        self.M = M
        # Minimum lifespan
        self.T = T
        # Minimum energy level to reproduce
        self.p = p
        # Mutation "amplitude"
        self.d = d
        # Whether to spawn children near parents or randomly
        self.child_location = child_location
        # Specify the type of movement allowed for the agents
        self.movement = movement
        # Specify how the agents mutate
        self.mutation_type = mutation_type
        # The minimum total_energy needed for an agent to survive
        self.death_threshold = death_threshold

        # Vars regarding which strategies to look for
        self.strategies_to_count = strategies_to_count
        self.count_tolerance = count_tolerance

        # Add agents (one agent per cell)
        all_coords = [(x, y) for x in range(size) for y in range(size)]
        agent_coords = self.random.sample(all_coords, i_n_agents)

        for _ in range(i_n_agents):
            group_idx = (None if n_groups is None else self.random.choice(
                range(n_groups)))
            agent = GTAgent(self.agent_idx, group_idx, self, i_strategy.copy(),
                            i_energy)
            self.agent_idx += 1
            self.schedule.add(agent)
            self.grid.place_agent(agent, agent_coords.pop())

        # Collect data
        self.datacollector = DataCollector(
            model_reporters={
                **{
                    'strategies': get_strategies,
                    'n_agents': total_n_agents,
                    'avg_agent_age': avg_agent_age,
                    'n_friendlier': n_friendlier,
                    'n_aggressive': n_aggressive,
                    'perc_cooperative_actions': perc_cooperative_actions,
                    'n_neighbors': n_neighbor_measure,
                    'avg_delta_energy': avg_delta_energy,
                    'perc_CC': perc_CC_interactions,
                    'lin_fit_NC': coop_per_neig,
                    'lin_fit_NC_intc': coop_per_neig_intc,
                },
                **{
                    label: strategy_counter_factory(strategy, count_tolerance)
                    for label, strategy in strategies_to_count.items()
                }
            })

    def alpha(self):
        # Return the cost of surviving, alpha
        DC = self.payoff[('D', 'C')]
        CC = self.payoff[('C', 'C')]
        N = len(self.schedule.agents)

        return self.k + 4 * (DC + CC) * N / (self.size * self.size)

    def time_to_die(self, agent):
        # There is a chance every iteration to die of old age: (A - T) / M
        # There is a 100% to die if the agents total energy reaches 0
        return (agent.total_energy < self.death_threshold
                or self.random.random() < (agent.age - self.T) / self.M)

    def get_child_location(self, agent):
        if self.child_location == 'global':
            return self.random.choice(sorted(self.grid.empties))

        elif self.child_location == 'local':
            # Iterate over the radius, starting at 1 to find empty cells
            for rad in range(1, int(self.size / 2)):
                possible_steps = [
                    cell for cell in self.grid.get_neighborhood(
                        agent.pos,
                        moore=False,
                        include_center=False,
                        radius=rad,
                    ) if self.grid.is_cell_empty(cell)
                ]

                if possible_steps:
                    return self.random.choice(possible_steps)

            # If no free cells in radius size/2 pick a random empty cell
            return self.random.choice(sorted(self.grid.empties))

    def maybe_mutate(self, agent):
        # Mutate by adding a random d to individual Pi's
        if self.mutation_type == 'stochastic':
            # Copy the damn list
            new_strategy = agent.strategy.copy()
            # There is a 20% chance of mutation
            if self.random.random() < 0.2:
                # Each Pi is mutated uniformly by [-d, d]
                for i in range(4):
                    mutation = self.random.uniform(-self.d, self.d)
                    new_val = new_strategy[i] + mutation
                    # Keep probabilities in [0, 1]
                    new_val = (0 if new_val < 0 else
                               1 if new_val > 1 else new_val)
                    new_strategy[i] = new_val

        # Mutate by choosing a random strategy from the list set
        elif self.mutation_type == 'fixed':
            new_strategy = random.choice(
                list(self.strategies_to_count.values()))

        elif self.mutation_type == 'gaussian_sentimental':
            # Copy the damn list
            new_strategy = agent.strategy.copy()
            # There is a 20% chance of mutation
            if self.random.random() < 0.2:
                # Each Pi is mutated by a value drawn from a gaussian
                # with mean=delta_energy
                for i in range(4):
                    mutation = self.random.normalvariate(
                        (agent.delta_energy + self.alpha()) / 14, self.d)
                    new_val = new_strategy[i] + mutation
                    # Keep probabilities in [0, 1]
                    new_val = (0 if new_val < 0 else
                               1 if new_val > 1 else new_val)
                    new_strategy[i] = new_val

        return new_strategy

    def maybe_reproduce(self, agent):
        # If we have the energy to reproduce, do so
        if agent.total_energy >= self.p:
            # Create the child
            new_strategy = self.maybe_mutate(agent)
            child = GTAgent(self.agent_idx, agent.group_id, self, new_strategy,
                            self.i_energy)
            self.agent_idx += 1

            # Set parent and child energy levels to p/2
            child.total_energy = self.p / 2
            agent.total_energy = self.p / 2

            # Place child (Remove agent argument for global child placement)
            self.schedule.add(child)
            self.grid.place_agent(child, self.get_child_location(agent))

    def step(self):
        if self.debug:
            print('\n\n==================================================')
            print('==================================================')
            print('==================================================')
            pprint(vars(self))

        # First collect data
        self.datacollector.collect(self)

        # Then check for dead agents and for new agents
        for agent in self.schedule.agent_buffer(shuffled=True):
            # First check if dead
            if self.time_to_die(agent):
                self.grid.remove_agent(agent)
                self.schedule.remove(agent)

            # Otherwise check if can reproduce
            else:
                self.maybe_reproduce(agent)

        # Finally, step each agent
        self.schedule.step()

    def check_strategy(self, agent):
        # Helper function to check which strategy an agent would count as
        def is_same(strategy, a_strategy):
            tol = self.count_tolerance
            return all(strategy[i] - tol < a_strategy[i] < strategy[i] + tol
                       for i in range(4))

        return [
            name for name, strat in self.strategies_to_count.items()
            if is_same(strat, agent.strategy)
        ]