def _get_force(self): forces = np.zeros((self.peds.size(), 2)) vision_angle = self.config("fov_phi", 100.0) directions, _ = stateutils.desired_directions(self.peds.state) if self.peds.has_group(): for group in self.peds.groups: group_size = len(group) # 1-agent groups don't need to compute this if group_size <= 1: continue member_pos = self.peds.pos()[group, :] member_directions = directions[group, :] # use center of mass without the current agent relative_com = np.array( [ stateutils.center_of_mass(member_pos[np.arange(group_size) != i, :2]) - member_pos[i, :] for i in range(group_size) ] ) com_directions, _ = stateutils.normalize(relative_com) # angle between walking direction and center of mass element_prod = np.array( [np.dot(d, c) for d, c in zip(member_directions, com_directions)] ) com_angles = np.degrees(np.arccos(element_prod)) rotation = np.radians( [a - vision_angle if a > vision_angle else 0.0 for a in com_angles] ) force = -rotation.reshape(-1, 1) * member_directions forces[group, :] += force return forces * self.factor
def _get_force(self): forces = np.zeros((self.peds.size(), 2)) directions, dist = stateutils.desired_directions(self.peds.state) if self.peds.has_group(): for group in self.peds.groups: group_size = len(group) # 1-agent groups don't need to compute this if group_size <= 1: continue member_pos = self.peds.pos()[group, :] member_directions = directions[group, :] member_dist = dist[group] # use center of mass without the current agent relative_com = np.array( [ stateutils.center_of_mass(member_pos[np.arange(group_size) != i, :2]) - member_pos[i, :] for i in range(group_size) ] ) com_directions, com_dist = stateutils.normalize(relative_com) # angle between walking direction and center of mass element_prod = np.array( [np.dot(d, c) for d, c in zip(member_directions, com_directions)] ) force = ( com_dist.reshape(-1, 1) * element_prod.reshape(-1, 1) / member_dist.reshape(-1, 1) * member_directions ) forces[group, :] += force return forces * self.factor
def step(self, force, groups=None): """Move peds according to forces""" # desired velocity desired_velocity = self.vel() + self.step_width * force desired_velocity = self.capped_velocity(desired_velocity, self.max_speeds) # stop when arrived desired_velocity[ stateutils.desired_directions(self.state)[1] < 0.5] = [0, 0] # update state next_state = self.state next_state[:, 0:2] += desired_velocity * self.step_width next_state[:, 2:4] = desired_velocity next_groups = self.groups if groups is not None: next_groups = groups self.update(next_state, next_groups)
def grad_r_ab(self, state, delta=1e-3): """Compute gradient wrt r_ab using finite difference differentiation.""" r_ab = self.r_ab(state) speeds = stateutils.speeds(state) desired_directions = stateutils.desired_directions(state) dx = np.array([[[delta, 0.0]]]) dy = np.array([[[0.0, delta]]]) v = self.value_r_ab(r_ab, speeds, desired_directions) dvdx = (self.value_r_ab(r_ab + dx, speeds, desired_directions) - v) / delta dvdy = (self.value_r_ab(r_ab + dy, speeds, desired_directions) - v) / delta # remove gradients from self-intereactions np.fill_diagonal(dvdx, 0.0) np.fill_diagonal(dvdy, 0.0) return np.stack((dvdx, dvdy), axis=-1)
def desired_directions(self): return stateutils.desired_directions(self.state)[0]
def step(self, force, groups=None): """Move peds according to forces""" next_state = self.state desired_velocity = self.vel() #+ self.step_width * force # Avoid fire if self.simulator.get_fires() is not None: # Get position opposite to fire f = self.simulator.get_fires()[0] fire_center = np.array([ f[:, 0][0] + (f[:, 0][-1] - f[:, 0][0]) / 2, f[:, 1][0] + (f[:, 1][-1] - f[:, 1][0]) / 2 ]) opp_fire_dir = -(fire_center - next_state[:, :2]) target_dir = desired_velocity # Get angles target_angle = np.mod( np.arctan2(target_dir[:, 1], target_dir[:, 0]), 2 * np.pi) fire_angle = np.mod( np.arctan2(opp_fire_dir[:, 1], opp_fire_dir[:, 0]), 2 * np.pi) # Average angles and give weight of smoke impact theta = np.zeros(len(target_angle)) index1 = np.where(np.abs(target_angle - fire_angle) >= np.pi) index2 = np.where(np.abs(target_angle - fire_angle) < np.pi) esc_fir = self.esc_fir theta[index1] = ((1 - esc_fir - (1 - esc_fir) * next_state[index1, 9]) * target_angle[index1] + (esc_fir + (1 - esc_fir) * next_state[index1, 9]) * fire_angle[index1]) - 2 * np.pi * (1 - esc_fir) theta[index2] = ( (1 - esc_fir - (1 - esc_fir) * next_state[index2, 9]) * target_angle[index2] + (esc_fir + (1 - esc_fir) * next_state[index2, 9]) * fire_angle[index2]) # Add random angle due to smoke and panic else: target_dir = desired_velocity theta = np.mod(np.arctan2(target_dir[:, 1], target_dir[:, 0]), 2 * np.pi) theta += (next_state[:, 9] + next_state[:, 11]) * np.random.uniform( -np.pi, np.pi, len(desired_velocity)) L_vel = np.sqrt(desired_velocity[:, 0]**2 + desired_velocity[:, 1]**2) # Update next position desired_velocity[:, 0] = L_vel * np.cos(theta) desired_velocity[:, 1] = L_vel * np.sin(theta) # desired velocity desired_velocity = desired_velocity + self.step_width * force desired_velocity = self.capped_velocity(desired_velocity, self.max_speeds) # stop when arrived desired_velocity[ stateutils.desired_directions(self.state)[1] < 0.5] = [0, 0] # update state next_state[:, 0] += desired_velocity[:, 0] * self.step_width * next_state[:, 10] * ( 1 + next_state[:, 11]) next_state[:, 1] += desired_velocity[:, 1] * self.step_width * next_state[:, 10] * ( 1 + next_state[:, 11]) next_state[:, 2:4] = desired_velocity next_groups = self.groups if self.border is not None: escaped_rows = np.where((next_state[:, 0] <= self.border[0]) | (next_state[:, 0] >= self.border[1]) | (next_state[:, 1] <= self.border[2]) | (next_state[:, 1] >= self.border[3]))[0] if escaped_rows.size > 0: next_state[escaped_rows, 7] = 1 # TODO: efficient way to do this? # Update target to next exit when agent is close enough if self.simulator.get_exits() is not None: exits = self.simulator.get_exits() exit_as_target = np.where(next_state[:, 8] >= 0)[0] if exit_as_target.size > 0: for p in np.nditer(exit_as_target): if np.linalg.norm(next_state[p, :2] - exits[int(next_state[ p, 8]), :2]) < 2 * self.agent_radius: next_exit_id = exits[int(next_state[p, 8]), 3] if next_exit_id is not None: self.set_exit_goal(p, next_exit_id) # Update the directions of those that don't follow an exit yet self.update_target(next_state) # Qiu, 2009: Following people get as direction the average direction of the other group members self.follower_model(next_state) # Smoke and panic over time if self.simulator.get_fires() is not None: f = self.simulator.get_fires()[0] index_escaped = np.where(next_state[:, 7] == 1.0)[0] next_state[~index_escaped, 11] += self.panic_change_t next_state, new_smoke_radius = stateutils.smoke( next_state, self.simulator.get_smoke_radius(), f, self.smoke_change, self.health_change, self.panic_change_s) self.simulator.set_smoke_radius(new_smoke_radius) self.smoke_radii.append(new_smoke_radius) if groups is not None: next_groups = groups self.update(next_state, next_groups)
def __call__(self, state): speeds = stateutils.speeds(state) return self.value_r_ab(self.r_ab(state), speeds, stateutils.desired_directions(state))