Example #1
0
    def control(self, num_steps):

        # check if the agent is over any target
        if self.current_target is not None:
            if (self.current_target.position -
                    self.position).get_length() > self.current_target.size:
                self.on_target = False
                self.current_target = None
                self.residence_times.append(self.steps_on_target)
            else:
                self.steps_on_target += 1
        else:
            self.current_target = None
            for t in self.arena.targets:
                if (t.position - self.position).get_length() < t.size:
                    self.current_target = t
                    self.steps_on_target = 0
                    break

        # agent basic movement: go straight
        self.apply_velocity = pysage.Vec2d(CRWLEVYAgent.linear_speed, 0)
        self.apply_velocity.rotate(self.velocity.get_angle())

        # agent random walk: decide step length and turning angle
        self.count_motion_steps -= 1
        if self.count_motion_steps <= 0:
            # step length
            self.count_motion_steps = int(
                math.fabs(
                    distribution_functions.levy(CRWLEVYAgent.std_motion_steps,
                                                CRWLEVYAgent.levy_exponent)))
            # turning angle
            crw_angle = 0
            if CRWLEVYAgent.CRW_exponent == 0:
                crw_angle = distribution_functions.uniform_distribution(
                    0, (2 * math.pi))
            else:
                crw_angle = distribution_functions.wrapped_cauchy_ppf(
                    CRWLEVYAgent.CRW_exponent)

            self.apply_velocity.rotate(crw_angle)

        # check if close to the arena border
        if self.position.get_length(
        ) >= self.arena.size_radius - CRWLEVYAgent.size:
            self.apply_velocity = pysage.Vec2d(CRWLEVYAgent.linear_speed, 0)
            self.apply_velocity.rotate(math.pi + self.position.get_angle() +
                                       random.uniform(-math.pi / 2, math.pi /
                                                      2))
Example #2
0
    def init_experiment(self):
        pysage.Agent.init_experiment(self)

        self.count_motion_steps = int(
            math.fabs(
                distribution_functions.levy(CRWLEVYAgent.std_motion_steps,
                                            CRWLEVYAgent.levy_exponent)))

        # data for colletive decision making
        self.step_neighbours = []
        self.step_target = None
        self.target_committed = None
        self.target_value = 0
        self.target_color = "black"
        self.next_committed_state = None
        self.decision_made_Neigh = False  # save info if decision made by agent
Example #3
0
    def init_experiment(self):
        pysage.Agent.init_experiment(self)

        #self.count_motion_steps = stabrnd.stabrnd(CRWLEVYAgent.levy_exponent, 0, CRWLEVYAgent.std_motion_steps, CRWLEVYAgent.average_motion_steps, 1, 1)
        self.count_motion_steps = int(
            math.fabs(
                distribution_functions.levy(CRWLEVYAgent.std_motion_steps,
                                            CRWLEVYAgent.levy_exponent)))
        del self.inventory[:]
        del self.received_targets[:]
        del self.visited_target_id[:]
        del self.step_on_target_time[:]
        del self.distance_from_centre[:]
        # del self.step_on_central_place_time[:]

        self.on_target = False
        self.on_central_place = False
Example #4
0
    def control(self, num_steps):

        # first check if the agent is passing over any target
        if self.on_target and self.current_target is not None:
            if (self.current_target.position -
                    self.position).get_length() > self.current_target.size:
                self.on_target = False
                self.current_target = None
        else:
            self.on_target = False
            self.current_target = None
            for t in self.arena.targets:
                if (t.position - self.position).get_length() < t.size:
                    self.on_target = True
                    self.current_target = t
                    # check during each time step if the agent is at a target.
                    # If yes, save target information to be used during N step
                    # for decision when agent might not be under a target anymore
                    self.step_target = t
                    break

        # check all neighbors every time step to save their info for later processing
        # in case no neighbors are located at N step when decision is made
        step_neighbours = self.arena.get_neighbour_agents(
            self, CRWLEVYAgent.interaction_range)
        # if any neighbors available, save to the agent data
        if step_neighbours:
            self.step_neighbours = step_neighbours

        # make a decision every N time steps
        if num_steps % self.arena.decision_step == 0:
            # calculate time scaling from robot to macro
            TimeScaling = round(
                (self.arena.timestep_length * self.arena.time_scale *
                 self.arena.decision_step), 7)

            # decision making
            if self.target_committed is None:
                # Discovery
                discovery_value = 0
                discovery_target = None
                discovery_color = "black"
                if self.on_target or self.step_target:
                    # if there is no current target, but has one target previously
                    # visited then use this as the current target
                    if self.step_target:
                        self.current_target = self.step_target
                    discovery_value = self.current_target.value * TimeScaling
                    discovery_target = self.current_target.id
                    discovery_color = self.current_target.color

                # Recruitment
                recruitment_value = 0
                recruitment_target = None
                recruitment_color = "black"
                neighbours = self.arena.get_neighbour_agents(
                    self, CRWLEVYAgent.interaction_range)
                if neighbours or self.step_neighbours:
                    if neighbours:
                        selected_neighbour = random.choice(neighbours)
                    # if neighbor info was saved previously, also recruit
                    elif self.step_neighbours:
                        selected_neighbour = random.choice(
                            self.step_neighbours)
                    recruitment_value = selected_neighbour.target_value * TimeScaling
                    recruitment_target = selected_neighbour.target_committed
                    recruitment_color = selected_neighbour.target_color

                if discovery_value + recruitment_value > 1:
                    print "[ERROR] Probabilities go over maximimum value"
                    print repr(discovery_value + recruitment_value)
                    sys.exit(2)

                # self.decision_made_Neigh = False
                # self.decision_made_Targ = False
                rand_number = random.uniform(0, 1)
                if rand_number < discovery_value:
                    self.target_committed = discovery_target
                    self.target_value = discovery_value / TimeScaling
                    self.target_color = discovery_color
                    self.arena.update_target_commitment(discovery_target, 1)
                    # self.decision_made_Targ = True

                elif rand_number < discovery_value + recruitment_value:
                    self.target_committed = recruitment_target
                    self.target_value = recruitment_value / TimeScaling
                    self.target_color = recruitment_color
                    self.step_neighbours = None  # reset list of neighbors after decision
                    self.arena.update_target_commitment(recruitment_target, 1)
                #     self.decision_made_Neigh = True
                #
                # else: self.decision_made_Neigh = False

            else:
                # Abandonment
                prob_abandonment = 1 / self.target_value * TimeScaling  #0.5; #1.0 - self.current_target.value

                # Cross-Inhibition
                neighbours = self.arena.get_neighbour_agents(
                    self, CRWLEVYAgent.interaction_range)
                prob_crossinhibition = 0
                if neighbours or self.step_neighbours:
                    if neighbours:
                        selected_neighbour = random.choice(neighbours)
                    # if neighbor info was saved previously, also do cross-inhibition
                    elif self.step_neighbours:
                        selected_neighbour = random.choice(
                            self.step_neighbours)

                    if selected_neighbour.target_committed != self.target_committed:
                        prob_crossinhibition = selected_neighbour.target_value * TimeScaling  #selected_neighbour.target_value
                        # prob_crossinhibition = 0.3 * TimeScaling #selected_neighbour.target_value

                if prob_abandonment + prob_crossinhibition > 1.0:
                    print "[ERROR] Probabilities go over maximimum value"
                    sys.exit(2)

                # self.decision_made_Neigh = False
                if random.uniform(0,
                                  1) < prob_abandonment + prob_crossinhibition:
                    self.arena.update_target_commitment(
                        self.target_committed, -1)
                    self.target_committed = None
                    self.target_value = 0
                    self.target_color = "black"
                    self.step_neighbours = None  # reset list of neighbors when decision made
                    # self.decision_made_Neigh = True

            self.step_target = None  # reset info
            self.step_neighbours = None  # reset info

        # agent basic movement: go straight
        self.apply_velocity = pysage.Vec2d(CRWLEVYAgent.linear_speed, 0)
        self.apply_velocity.rotate(self.velocity.get_angle())

        # agent random walk: decide step length and turning angle
        self.count_motion_steps -= 1
        if self.count_motion_steps <= 0:
            # step length
            self.count_motion_steps = int(
                math.fabs(
                    distribution_functions.levy(CRWLEVYAgent.std_motion_steps,
                                                CRWLEVYAgent.levy_exponent)))
            # turning angle
            crw_angle = 0
            if CRWLEVYAgent.CRW_exponent == 0:
                crw_angle = random.uniform(0, (2 * math.pi))
            else:
                crw_angle = wrapcauchy.rvs(CRWLEVYAgent.CRW_exponent)

            self.apply_velocity.rotate(crw_angle)
Example #5
0
    def control(self):
        # first check if some target is within the interaction range

        for t in self.arena.targets:
            if (t.position - self.position).get_length() < t.size:
                if not self.on_target:
                    self.on_target = True
                    self.count_motion_steps = 0
                    if t.id not in self.inventory:
                        self.inventory.append(t.id)
                    self.visited_target_id.append(t.id)
                    if len(self.step_on_target_time) > 0:
                        self.step_on_target_time.append(
                            self.arena.num_steps -
                            self.step_on_target_time[-1])
                    else:
                        self.step_on_target_time.append(self.arena.num_steps)
            else:
                self.on_target = False
        if self.arena.arena_type == "unbounded" and (self.arena.num_steps +
                                                     1) % 5000 == 0:
            if self.arena.num_steps > 0:
                self.distance_from_centre.append(
                    (self.arena.central_place.position -
                     self.position).get_length())

        ## if self.arena_type=="unbounded":
        ##     if(self.arena.central_place.position - self.position).get_length() < self.arena.central_place.size:
        ##         if not self.on_central_place:
        ##             self.on_central_place = True
        ##             self.count_motion_steps = 0
        ##             if len(self.step_on_central_place_time) > 0 :
        ##                 self.step_on_central_place_time.append(self.arena.num_steps - self.step_on_central_place_time[-1])
        ##             else:
        ##                 self.step_on_central_place_time.append(self.arena.num_steps)
        ## 	else:
        ##         self.on_central_place = False

        # agent basic movement: go straight
        self.apply_velocity = pysage.Vec2d(CRWLEVYAgent.linear_speed, 0)
        self.apply_velocity.rotate(self.velocity.get_angle())

        # agent random walk: decide step length and turning angle
        self.count_motion_steps -= 1
        if self.count_motion_steps <= 0:
            # step length
            self.count_motion_steps = int(
                math.fabs(
                    distribution_functions.levy(CRWLEVYAgent.std_motion_steps,
                                                CRWLEVYAgent.levy_exponent)))
            #self.count_motion_steps = math.fabs(stabrnd.stabrnd(CRWLEVYAgent.levy_exponent, 0, CRWLEVYAgent.std_motion_steps, CRWLEVYAgent.average_motion_steps, 1, 1))
            # turning angle
            crw_angle = 0
            if self.arena.arena_type == "unbounded" and (
                    scipy.random.random(1) <= CRWLEVYAgent.bias_probability):
                crw_angle = (self.arena.central_place.position - self.position
                             ).get_angle() - self.velocity.get_angle()
            elif CRWLEVYAgent.CRW_exponent == 0:
                crw_angle = random.uniform(0, (2 * math.pi))
            else:
                crw_angle = wrapcauchy.rvs(CRWLEVYAgent.CRW_exponent)

            self.apply_velocity.rotate(crw_angle)