コード例 #1
0
ファイル: forces.py プロジェクト: ahumanita/PySocialForce
    def _get_force(self):
        lambda_importance = self.config("lambda_importance", 2.0)
        gamma = self.config("gamma", 0.35)
        n = self.config("n", 2)
        n_prime = self.config("n_prime", 3)

        pos_diff = stateutils.each_diff(self.peds.pos())  # n*(n-1)x2 other - self
        diff_direction, diff_length = stateutils.normalize(pos_diff)
        vel_diff = -1.0 * stateutils.each_diff(self.peds.vel())  # n*(n-1)x2 self - other

        # compute interaction direction t_ij
        interaction_vec = lambda_importance * vel_diff + diff_direction
        interaction_direction, interaction_length = stateutils.normalize(interaction_vec)

        # compute angle theta (between interaction and position difference vector)
        theta = stateutils.vector_angles(interaction_direction) - stateutils.vector_angles(
            diff_direction
        )
        # compute model parameter B = gamma * ||D||
        B = gamma * interaction_length

        force_velocity_amount = np.exp(-1.0 * diff_length / B - np.square(n_prime * B * theta))
        force_angle_amount = -np.sign(theta) * np.exp(
            -1.0 * diff_length / B - np.square(n * B * theta)
        )
        force_velocity = force_velocity_amount.reshape(-1, 1) * interaction_direction
        force_angle = force_angle_amount.reshape(-1, 1) * stateutils.left_normal(
            interaction_direction
        )

        force = force_velocity + force_angle  # n*(n-1) x 2
        force = np.sum(force.reshape((self.peds.size(), -1, 2)), axis=1)
        return force * self.factor
コード例 #2
0
    def _get_force(self):
        relexation_time = self.config("relaxation_time", 0.5)
        goal_threshold = self.config("goal_threshold", 0.1)
        pos = self.peds.pos()
        vel = self.peds.vel()
        goal = self.peds.goal()

        goal0 = np.zeros((len(self.peds.pos()), 2))
        first_vectors = goal0 - pos
        direction, dist = stateutils.normalize(first_vectors)

        destination_vectors = goal - pos
        directions_i, dist_i = stateutils.normalize(destination_vectors)
        for i, entry in enumerate(direction):
            if dist[i] < 6 or i in self.peds.met_point:
                direction[i] = directions_i[i]
                dist[i] = dist_i[i]
                if i not in self.peds.met_point:
                    self.peds.met_point.append(i)

        # direction, dist = stateutils.normalize(goal - pos)
        force = np.zeros((self.peds.size(), 2))
        force[dist > goal_threshold] = (
            direction * self.peds.max_speeds.reshape((-1, 1)) - vel.reshape(
                (-1, 2)))[dist > goal_threshold, :]
        force[dist <= goal_threshold] = -1.0 * vel[dist <= goal_threshold]
        force /= relexation_time
        return force * self.factor
コード例 #3
0
ファイル: forces.py プロジェクト: ahumanita/PySocialForce
    def _get_force(self):
        forces = np.zeros((self.peds.size(), 2))
        directions, dist = stateutils.desired_directions(self.peds.state)
        if self.peds.has_group():
            for group in self.peds.groups:
                group_size = len(group)
                # 1-agent groups don't need to compute this
                if group_size <= 1:
                    continue
                member_pos = self.peds.pos()[group, :]
                member_directions = directions[group, :]
                member_dist = dist[group]
                # use center of mass without the current agent
                relative_com = np.array(
                    [
                        stateutils.center_of_mass(member_pos[np.arange(group_size) != i, :2])
                        - member_pos[i, :]
                        for i in range(group_size)
                    ]
                )

                com_directions, com_dist = stateutils.normalize(relative_com)
                # angle between walking direction and center of mass
                element_prod = np.array(
                    [np.dot(d, c) for d, c in zip(member_directions, com_directions)]
                )
                force = (
                    com_dist.reshape(-1, 1)
                    * element_prod.reshape(-1, 1)
                    / member_dist.reshape(-1, 1)
                    * member_directions
                )
                forces[group, :] += force

        return forces * self.factor
コード例 #4
0
ファイル: forces.py プロジェクト: ahumanita/PySocialForce
    def _get_force(self):
        forces = np.zeros((self.peds.size(), 2))
        vision_angle = self.config("fov_phi", 100.0)
        directions, _ = stateutils.desired_directions(self.peds.state)
        if self.peds.has_group():
            for group in self.peds.groups:
                group_size = len(group)
                # 1-agent groups don't need to compute this
                if group_size <= 1:
                    continue
                member_pos = self.peds.pos()[group, :]
                member_directions = directions[group, :]
                # use center of mass without the current agent
                relative_com = np.array(
                    [
                        stateutils.center_of_mass(member_pos[np.arange(group_size) != i, :2])
                        - member_pos[i, :]
                        for i in range(group_size)
                    ]
                )

                com_directions, _ = stateutils.normalize(relative_com)
                # angle between walking direction and center of mass
                element_prod = np.array(
                    [np.dot(d, c) for d, c in zip(member_directions, com_directions)]
                )
                com_angles = np.degrees(np.arccos(element_prod))
                rotation = np.radians(
                    [a - vision_angle if a > vision_angle else 0.0 for a in com_angles]
                )
                force = -rotation.reshape(-1, 1) * member_directions
                forces[group, :] += force

        return forces * self.factor
コード例 #5
0
ファイル: forces.py プロジェクト: ahumanita/PySocialForce
 def _get_force(self):
     forces = np.zeros((self.peds.size(), 2))
     if self.peds.has_group():
         for group in self.peds.groups:
             threshold = (len(group) - 1) / 2
             member_pos = self.peds.pos()[group, :]
             com = stateutils.center_of_mass(member_pos)
             force_vec = com - member_pos
             vectors, norms = stateutils.normalize(force_vec)
             vectors[norms < threshold] = [0, 0]
             forces[group, :] += vectors
     return forces * self.factor
コード例 #6
0
ファイル: forces.py プロジェクト: ahumanita/PySocialForce
 def _get_force(self):
     relexation_time = self.config("relaxation_time", 0.5)
     goal_threshold = self.config("goal_threshold", 0.1)
     pos = self.peds.pos()
     vel = self.peds.vel()
     goal = self.peds.goal()
     direction, dist = stateutils.normalize(goal - pos)
     force = np.zeros((self.peds.size(), 2))
     force[dist > goal_threshold] = (
         direction * self.peds.max_speeds.reshape((-1, 1)) - vel.reshape((-1, 2))
     )[dist > goal_threshold, :]
     force[dist <= goal_threshold] = -1.0 * vel[dist <= goal_threshold]
     force /= relexation_time
     return force * self.factor
コード例 #7
0
ファイル: forces.py プロジェクト: ahumanita/PySocialForce
    def _get_force(self):
        threshold = self.config("threshold", 0.5)
        forces = np.zeros((self.peds.size(), 2))
        if self.peds.has_group():
            for group in self.peds.groups:
                size = len(group)
                member_pos = self.peds.pos()[group, :]
                diff = stateutils.each_diff(member_pos)  # others - self
                _, norms = stateutils.normalize(diff)
                diff[norms > threshold, :] = 0
                # forces[group, :] += np.sum(diff, axis=0)
                forces[group, :] += np.sum(diff.reshape((size, -1, 2)), axis=1)

        return forces * self.factor
コード例 #8
0
ファイル: forces.py プロジェクト: ahumanita/PySocialForce
    def _get_force(self):
        sigma = self.config("sigma", 0.2)
        threshold = self.config("threshold", 0.2) + self.peds.agent_radius
        force = np.zeros((self.peds.size(), 2))
        if self.scene.get_fires() is None :
            return force
        fires = np.vstack(self.scene.get_fires())
        pos = self.peds.pos()

        for i, p in enumerate(pos):
            diff = p - fires
            directions, dist = stateutils.normalize(diff)
            dist = dist - self.peds.agent_radius
            if np.all(dist >= threshold):
                continue
            dist_mask = dist < threshold
            directions[dist_mask] *= np.exp(-dist[dist_mask].reshape(-1, 1) / sigma)
            force[i] = np.sum(directions[dist_mask], axis=0)

        return force * self.factor