Exemple #1
0
    def read_agents_from_file(self, filename):

        wb = xlrd.open_workbook(filename)
        sheet = wb.sheet_by_index(0)

        self.coordinates = []
        self.colors = []
        self.max_speeds = []
        for i in range(1, sheet.nrows):
            # agent_id = sheet.cell_value(i, 0)
            pos = sheet.cell_value(i, 1).split(',')
            color = sheet.cell_value(i, 2).split(',')
            max_speed = float(sheet.cell_value(i, 3))

            pos_x, pos_y = float(pos[0]), float(pos[1])
            pos = Vector2(pos_x, pos_y)
            color = torch.tensor([int(color[0]), int(color[1]), int(color[2])])
            self.add_agent(pos)
            self.agents_[i-1].max_speed_ = max_speed
            self.agents_[i-1].color_ = np.array(color)
            self.coordinates.append(pos_x)
            self.coordinates.append(pos_y)
            self.colors.append(color)
            self.max_speeds.append(max_speed)
            dists = self.dist_to_goals(pos, self.goals[i-1])
            self.d_prev.append(dists)
            self.v_prev.append(Vector2(0., 0.))
        print('load done')
Exemple #2
0
    def reset_room(self):
        index = 0
        while index < self.num_agents:
            self.delete_agent(index)

        if self.open_time == 0:
            self.goals = [[self.exit1] for _ in range(glo_num_agents)]
        else:
            self.goals = [[self.exit1, self.exit2]
                          for _ in range(glo_num_agents)]
        self.global_time_ = 0
        self.kd_tree_.agents_ = None
        self.kd_tree_.agentTree_ = None
        self.d_prev = []
        self.v_prev = []
        # self.arrived = [False for _ in range(glo_num_agents)]
        # self.random_agents(glo_num_agents, glo_center, glo_border, glo_wall_width)
        for i in range(glo_num_agents):
            pos = Vector2(self.coordinates[i*2], self.coordinates[i*2+1])
            self.add_agent(pos)
            self.agents_[i].max_speed_ = Max_Speed  # self.max_speeds[i]
            self.agents_[i].color_ = self.colors[i]
            self.arrived.append(False)
            dists = self.dist_to_goals(pos, self.goals[i])
            self.d_prev.append(dists)
            self.v_prev.append(Vector2(0., 0.))
Exemple #3
0
 def random_agents(self, num_agents, center, border, w_w, ratio=[1, 1]):
     self.coordinates = []
     self.max_speeds = np.random.random(num_agents) * 3. + Max_Speed
     exit_1 = (int)(ratio[0] * num_agents / sum(ratio))
     # (int)(ratio[1] * num_agents / sum(ratio))
     exit_2 = num_agents - exit_1
     exits_count = [exit_1, exit_2]
     ratio_count = [0, 0]
     i = 0
     while i < (exit_1+exit_2):
         coord = center[0]-border[0]+w_w*3 + \
             np.random.random(2) * (border[0]*2-w_w*6)
         pos = Vector2(coord[0], coord[1])
         dists = self.dist_to_goals(pos, self.goals[i])
         dists = np.array(dists)
         min_index = dists.argmin()
         if ratio_count[min_index] < exits_count[min_index]:
             ratio_count[min_index] += 1
             self.add_agent(pos)
             self.colors.append(np.random.randint(0, 255, 3))
             self.agents_[i].max_speed_ = self.max_speeds[i]
             self.agents_[i].color_ = self.colors[i]
             self.d_prev.append(dists)
             self.v_prev.append(Vector2(0., 0.))
             self.coordinates.append(coord[0])
             self.coordinates.append(coord[1])
             i += 1
     self.save_agents('agents_info.xls')
Exemple #4
0
    def setup_scenario(self):
        # Specify the global time step of the simulation.
        self.simulator_.set_time_step(0.25)

        # Specify the default parameters for agents that are subsequently added.
        self.simulator_.set_agent_defaults(15.0, 10, 10.0, 10.0, 1.5, 2.0, Vector2(0.0, 0.0))

        # Add agents, specifying their start position, and store their goals on the opposite side of the environment.
        for i in range(250):
            self.simulator_.add_agent(200.0 *
                Vector2(math.cos(i * 2.0 * math.pi / 250.0), math.sin(i * 2.0 * math.pi / 250.0)))
            self.goals_.append(-self.simulator_.agents_[i].position_)
Exemple #5
0
 def action_to_vector_discrete(pos, target, direction, prev):
     # go straight if near to the goal
     # min_dist = np.array(prev).min()
     # if min_dist < Radius * 8:
     #     min_index = np.array(prev).argmax()
     #     directV = rvo_math.normalize(target[min_index]-pos)
     #     return directV * Max_Speed
     V_pref = Vector2(0.0, 0.0)
     # rvo_math.normalize(target[0]-pos)  # Vector2(0., 1.)  #
     directV = Vector2(0., 1.)
     if direction == Direction.Forward.value:
         V_pref = directV
     elif direction == Direction.Backward.value:
         V_pref = -directV
     elif direction == Direction.FR.value:
         theta = -math.pi / 4.
         x1, y1 = directV.x, directV.y
         x2 = math.cos(theta) * x1 - math.sin(theta) * y1
         y2 = math.sin(theta) * x1 + math.cos(theta) * y1
         V_pref = Vector2(x2, y2)
     elif direction == Direction.FL.value:
         theta = math.pi / 4
         x1, y1 = directV.x, directV.y
         x2 = math.cos(theta) * x1 - math.sin(theta) * y1
         y2 = math.sin(theta) * x1 + math.cos(theta) * y1
         V_pref = Vector2(x2, y2)
     elif direction == Direction.BR.value:
         theta = -3*math.pi / 4.
         x1, y1 = directV.x, directV.y
         x2 = math.cos(theta) * x1 - math.sin(theta) * y1
         y2 = math.sin(theta) * x1 + math.cos(theta) * y1
         V_pref = Vector2(x2, y2)
     elif direction == Direction.BL.value:
         theta = 3*math.pi / 4
         x1, y1 = directV.x, directV.y
         x2 = math.cos(theta) * x1 - math.sin(theta) * y1
         y2 = math.sin(theta) * x1 + math.cos(theta) * y1
         V_pref = Vector2(x2, y2)
     elif direction == Direction.Right.value:
         theta = -math.pi / 2.
         x1, y1 = directV.x, directV.y
         x2 = math.cos(theta) * x1 - math.sin(theta) * y1
         y2 = math.sin(theta) * x1 + math.cos(theta) * y1
         V_pref = Vector2(x2, y2)
     elif direction == Direction.Left.value:
         theta = math.pi / 2
         x1, y1 = directV.x, directV.y
         x2 = math.cos(theta) * x1 - math.sin(theta) * y1
         y2 = math.sin(theta) * x1 + math.cos(theta) * y1
         V_pref = Vector2(x2, y2)
     else:
         V_pref = Vector2(0., 0.)
     return V_pref * Max_Speed
Exemple #6
0
 def action_to_vector(pos, target, action, max_speed, ORCA=False):
     angle, speed = action[0], max_speed
     heading = rvo_math.normalize(target-pos)
     # if ORCA:
     #     heading = rvo_math.normalize(target-pos)
     # else:
     #     heading = Vector2(1., 0.)  # rvo_math.normalize(target-pos)
     # angle = -math.pi + random.random() * 2 * math.pi
     x1, y1 = heading.x, heading.y
     x2 = math.cos(angle) * x1 - math.sin(angle) * y1
     y2 = math.sin(angle) * x1 + math.cos(angle) * y1
     return speed * Vector2(x2, y2)
Exemple #7
0
    def set_preferred_velocities(self):
        # Set the preferred velocity to be a vector of unit magnitude (speed) in the direction of the goal.
        for i in range(self.simulator_.num_agents):
            goal_vector = self.goals_[i] - self.simulator_.agents_[i].position_

            if rvo_math.abs_sq(goal_vector) > 1.0:
                goal_vector = rvo_math.normalize(goal_vector)

            self.simulator_.set_agent_pref_velocity(i, goal_vector)

            # Perturb a little to avoid deadlocks due to perfect symmetry.
            angle = random.random() * 2.0 * math.pi
            dist = random.random() * 0.0001

            self.simulator_.set_agent_pref_velocity(
                i, self.simulator_.agents_[i].pref_velocity_ +
                dist * Vector2(math.cos(angle), math.sin(angle)))
Exemple #8
0
    def setup_scenario(self, num_agents, load_agents=False, read_agents=False):
        self.set_time_step(0.5)
        self.set_agent_defaults(Radius*5, 5, 5.0, 5.0, Radius,
                                Max_Speed, Vector2(0., 0.))
        extent = (-100., -100., 100., 100.)
        if self.scenario == Scenario.One_Exit:
            extent = self.init_single_exit_room(
                num_agents, load_agents=load_agents, read_agents=read_agents)
        elif self.scenario == Scenario.Two_Exits:
            extent = self.init_two_exit_room(
                num_agents, load_agents=load_agents, read_agents=read_agents)

        # view
        self.view = Map_Screen(
            extent[0], extent[2], extent[1], extent[3], width, height)

        # 转换坐标之后再画
        img = np.ones((width, height, 3), np.uint8) * 0
        img = self.draw_polygon(img)
        cv2.imwrite('screenshots/background.png', img)
Exemple #9
0
    def setup_scenario(self):
        # Specify the global time step of the simulation.
        self.simulator_.set_time_step(0.25)

        # Specify the default parameters for agents that are subsequently added.
        self.simulator_.set_agent_defaults(15.0, 10, 5.0, 5.0, 2.0, 2.0,
                                           Vector2(0.0, 0.0))

        # Add agents, specifying their start position, and store their goals on the opposite side of the environment.
        for i in range(5):
            for j in range(5):
                self.simulator_.add_agent(
                    Vector2(55.0 + i * 10.0, 55.0 + j * 10.0))
                self.goals_.append(Vector2(-75.0, -75.0))

                self.simulator_.add_agent(
                    Vector2(-55.0 - i * 10.0, 55.0 + j * 10.0))
                self.goals_.append(Vector2(75.0, -75.0))

                self.simulator_.add_agent(
                    Vector2(55.0 + i * 10.0, -55.0 - j * 10.0))
                self.goals_.append(Vector2(-75.0, 75.0))

                self.simulator_.add_agent(
                    Vector2(-55.0 - i * 10.0, -55.0 - j * 10.0))
                self.goals_.append(Vector2(75.0, 75.0))

        # Add (polygonal) obstacles, specifying their vertices in counterclockwise order.
        obstacle1 = []
        obstacle1.append(Vector2(-10.0, 40.0))
        obstacle1.append(Vector2(-40.0, 40.0))
        obstacle1.append(Vector2(-40.0, 10.0))
        obstacle1.append(Vector2(-10.0, 10.0))
        self.simulator_.add_obstacle(obstacle1)
        self.obstacles_.append(obstacle1)

        obstacle2 = []
        obstacle2.append(Vector2(10.0, 40.0))
        obstacle2.append(Vector2(10.0, 10.0))
        obstacle2.append(Vector2(40.0, 10.0))
        obstacle2.append(Vector2(40.0, 40.0))
        self.simulator_.add_obstacle(obstacle2)
        self.obstacles_.append(obstacle2)

        obstacle3 = []
        obstacle3.append(Vector2(10.0, -40.0))
        obstacle3.append(Vector2(40.0, -40.0))
        obstacle3.append(Vector2(40.0, -10.0))
        obstacle3.append(Vector2(10.0, -10.0))
        self.simulator_.add_obstacle(obstacle3)
        self.obstacles_.append(obstacle3)

        obstacle4 = []
        obstacle4.append(Vector2(-10.0, -40.0))
        obstacle4.append(Vector2(-10.0, -10.0))
        obstacle4.append(Vector2(-40.0, -10.0))
        obstacle4.append(Vector2(-40.0, -40.0))
        self.simulator_.add_obstacle(obstacle4)
        self.obstacles_.append(obstacle4)

        # Process the obstacles so that they are accounted for in the simulation.
        self.simulator_.process_obstacles()
Exemple #10
0
 def screen_to_map(self, screen):
     map_x = self.scale_x*screen[0] + self.map_minX
     map_y = self.scale_y*(self.win_y-screen[1]) + self.map_minY
     return Vector2(map_x, map_y)
Exemple #11
0
    def init_two_exit_room(self, num_agents, width_=100, height_=100, load_agents=True, read_agents=False):

        inner_width = width_ * 0.8
        inner_height = height_ * 0.8
        wall_width = Radius * 1.5
        origin_eixt_width = (2.0 * Radius) * 2.0
        # 1.0 is a hyperparameter, which to define the different of two exits
        another_exit_width = origin_eixt_width * 1.0
        half_inner_width, half_inner_height = inner_width / 2., inner_height / 2.

        self.walls = []
        wall1 = []  # left top
        wall2 = []  # left bottom
        wall3 = []  # bottom left
        wall4 = []  # bottom right
        wall5 = []  # top
        wall6 = []  # right

        half_left_exit_width = another_exit_width / 2.0
        wall1.append(Vector2(-half_inner_width -
                             wall_width, half_left_exit_width))
        wall1.append(Vector2(-half_inner_width, half_left_exit_width))
        wall1.append(Vector2(-half_inner_width, half_inner_height))
        wall1.append(Vector2(-half_inner_width-wall_width, half_inner_height))
        self.walls.append(wall1)

        wall2.append(Vector2(-half_inner_width-wall_width, -half_inner_height))
        wall2.append(Vector2(-half_inner_width, -half_inner_height))
        wall2.append(Vector2(-half_inner_width, -half_left_exit_width))
        wall2.append(Vector2(-half_inner_width -
                             wall_width, -half_left_exit_width))
        self.walls.append(wall2)

        half_bottom_exit_width = origin_eixt_width / 2.0
        wall3.append(Vector2(-half_inner_width-wall_width, -
                             half_inner_height-wall_width))
        wall3.append(Vector2(-half_bottom_exit_width, -
                             half_inner_height-wall_width))
        wall3.append(Vector2(-half_bottom_exit_width, -half_inner_height))
        wall3.append(Vector2(-half_inner_width-wall_width, -half_inner_height))
        self.walls.append(wall3)

        wall4.append(Vector2(half_bottom_exit_width, -
                             half_inner_height-wall_width))
        wall4.append(Vector2(half_inner_width+wall_width, -
                             half_inner_height-wall_width))
        wall4.append(Vector2(half_inner_width+wall_width, -half_inner_height))
        wall4.append(Vector2(half_bottom_exit_width, -half_inner_height))
        self.walls.append(wall4)

        wall5.append(Vector2(-half_inner_width-wall_width, half_inner_height))
        wall5.append(Vector2(half_inner_width+wall_width, half_inner_height))
        wall5.append(Vector2(half_inner_width+wall_width,
                             half_inner_height+wall_width))
        wall5.append(Vector2(-half_inner_width-wall_width,
                             half_inner_height+wall_width))
        self.walls.append(wall5)

        wall6.append(Vector2(half_inner_width, -half_inner_height))
        wall6.append(Vector2(half_inner_width+wall_width, -half_inner_height))
        wall6.append(Vector2(half_inner_width+wall_width, half_inner_height))
        wall6.append(Vector2(half_inner_width, half_inner_height))
        self.walls.append(wall6)

        self.add_obstacle(wall1)
        self.add_obstacle(wall2)
        self.add_obstacle(wall3)
        self.add_obstacle(wall4)
        self.add_obstacle(wall5)
        self.add_obstacle(wall6)

        self.process_obstacles()

        self.exit1 = Vector2(0., -half_inner_height-wall_width-5.0)
        self.exit2 = Vector2(-half_inner_width-wall_width-5.0, 0.)
        self.goals = [[self.exit1, self.exit2] for _ in range(num_agents)]

        if load_agents:
            if read_agents:
                self.read_agents_from_file('agents_info.xls')
            else:
                global glo_center, glo_border, glo_wall_width
                glo_center = [0., 0.]
                glo_border = [half_inner_width, half_inner_height]
                glo_wall_width = wall_width
                self.random_agents(num_agents, glo_center,
                                   glo_border, wall_width)

        map_min_x, map_min_y = -width_/2., -height_/2.
        map_max_x, map_max_y = width_/2., height_/2.
        return (map_min_x, map_min_y, map_max_x, map_max_y)