예제 #1
0
    def get_human_times(self):
        """
        Run the whole simulation to the end and compute the average time for human to reach goal.
        Once an agent reaches the goal, it stops moving and becomes an obstacle
        (doesn't need to take half responsibility to avoid collision).

        :return:
        """
        # centralized orca simulator for all humans
        if not self.robot.reached_destination():
            raise ValueError('Episode is not done yet')
        params = (10, 10, 5, 5)
        sim = rvo2.PyRVOSimulator(self.time_step, *params, 0.3, 1)
        sim.addAgent(self.robot.get_position(), *params, self.robot.radius,
                     self.robot.v_pref, self.robot.get_velocity())
        for human in self.humans:
            sim.addAgent(human.get_position(), *params, human.radius,
                         human.v_pref, human.get_velocity())

        max_time = 1000
        while not all(self.human_times):
            for i, agent in enumerate([self.robot] + self.humans):
                vel_pref = np.array(agent.get_goal_position()) - np.array(
                    agent.get_position())
                if norm(vel_pref) > 1:
                    vel_pref /= norm(vel_pref)
                sim.setAgentPrefVelocity(i, tuple(vel_pref))
            sim.doStep()
            self.global_time += self.time_step
            if self.global_time > max_time:
                logging.warning('Simulation cannot terminate!')
            for i, human in enumerate(self.humans):
                if self.human_times[i] == 0 and human.reached_destination():
                    self.human_times[i] = self.global_time

            # for visualization
            self.robot.set_position(sim.getAgentPosition(0))
            for i, human in enumerate(self.humans):
                human.set_position(sim.getAgentPosition(i + 1))
            self.states.append([
                self.robot.get_full_state(),
                [human.get_full_state() for human in self.humans]
            ])

        del sim
        return self.human_times
예제 #2
0
    def predict(self, state, action_space=None, members=None):
        """ Centralized planning for all agents """
        params = self.neighbor_dist, self.max_neighbors, self.time_horizon, self.time_horizon_obst
        if self.sim is not None and self.sim.getNumAgents() != len(state):
            del self.sim
            self.sim = None

        if self.sim is None:
            # 环境最大速度是1.2
            self.sim = rvo2.PyRVOSimulator(self.time_step, *params,
                                           self.radius + self.safety_space,
                                           self.max_speed)
            for i, agent_state in enumerate(state):
                if i < 3:  # 将groupmember的最大速度设置为1.2
                    self.sim.addAgent(agent_state.position, *params,
                                      agent_state.radius, self.max_speed,
                                      agent_state.velocity)
                else:  # 将其他agent的最大速度设置为1
                    self.sim.addAgent(agent_state.position, *params,
                                      agent_state.radius, 1,
                                      agent_state.velocity)
        else:
            for i, agent_state in enumerate(state):
                self.sim.setAgentPosition(i, agent_state.position)
                self.sim.setAgentVelocity(i, agent_state.velocity)

        # Set the preferred velocity to be a vector of unit magnitude (speed) in the direction of the goal.
        for i, agent_state in enumerate(state):
            if i < 3:
                # 位置相减得到的是0.25秒应该走完的向量,乘以4得到的是速度,这个速度的差别使得相互追赶
                velocity = np.array((4 * (agent_state.gx - agent_state.px),
                                     4 * (agent_state.gy - agent_state.py)))
                self.sim.setAgentPrefVelocity(i, (velocity[0], velocity[1]))
            else:
                velocity = np.array((agent_state.gx - agent_state.px,
                                     agent_state.gy - agent_state.py))
                speed = np.linalg.norm(velocity)
                pref_vel = velocity / speed if speed > 1 else velocity  # 设置为1相当于提前四个步长就减速了,可设置为1
                self.sim.setAgentPrefVelocity(i, (pref_vel[0], pref_vel[1]))

        self.sim.doStep()
        actions = [
            ActionXY(*self.sim.getAgentVelocity(i)) for i in range(len(state))
        ]

        return actions
예제 #3
0
    def predict(self, state, action_space=None, members=None):
        self_state = state.self_state
        agents_states = state.agents_states
        # params 是一个list
        params = self.neighbor_dist, self.max_neighbors, self.time_horizon, self.time_horizon_obst
        # 更新sim
        if self.sim is not None and self.sim.getNumAgents() != len(
                state.agents_states) + 1:
            del self.sim
            self.sim = None
        if self.sim is None:
            self.sim = rvo2.PyRVOSimulator(self.time_step, *params,
                                           self.radius, self.max_speed)
            # 位置, *params, 半径, v_pref(期望速度大小), 速度
            self.sim.addAgent(self_state.position, *params,
                              self_state.radius + self.safety_space,
                              self_state.v_pref, self_state.velocity)
            for agent_state in state.agents_states:
                self.sim.addAgent(agent_state.position, *params,
                                  agent_state.radius + self.safety_space, 1,
                                  agent_state.velocity)  # 自己的期望速度是最大速度
        else:  # 只是更新sim里的部分属性
            self.sim.setAgentPosition(0, self_state.position)
            self.sim.setAgentVelocity(0, self_state.velocity)
            for i, agent_state in enumerate(state.agents_states):
                self.sim.setAgentPosition(i + 1, agent_state.position)
                self.sim.setAgentVelocity(i + 1, agent_state.velocity)

        velocity = np.array(
            (self_state.gx - self_state.px, self_state.gy - self_state.py))
        speed = np.linalg.norm(velocity)  # 根号下a^2+b^2
        pref_vel = velocity / speed if speed > 0.1 else velocity  # 当和目标的距离小于0.1才开始减速
        self.sim.setAgentPrefVelocity(0, tuple(pref_vel))  # 设置机器人期望速度

        # 设置其他人期望速度,都设置为0,因为自己观测不到
        for i, agent_state in enumerate(state.agents_states):
            self.sim.setAgentPrefVelocity(i + 1, (0, 0))  # 其他人的期望速度都是0

        self.sim.doStep()
        action = ActionXY(*self.sim.getAgentVelocity(0))

        return action
예제 #4
0
    def predict(self, state):
        """ Centralized planning for all agents """
        params = (
            self.neighbor_dist,
            self.max_neighbors,
            self.time_horizon,
            self.time_horizon_obst,
        )
        if self.sim is not None and self.sim.getNumAgents() != len(state):
            del self.sim
            self.sim = None

        if self.sim is None:
            self.sim = rvo2.PyRVOSimulator(self.time_step, *params,
                                           self.radius, self.max_speed)
            for agent_state in state:
                self.sim.addAgent(
                    agent_state.position, *params,
                    agent_state.radius + 0.01 + self.safety_space,
                    self.max_speed, agent_state.velocity)
        else:
            for i, agent_state in enumerate(state):
                self.sim.setAgentPosition(i, agent_state.position)
                self.sim.setAgentVelocity(i, agent_state.velocity)

        # Set the preferred velocity to be a vector of unit magnitude (speed) in the direction of the goal.
        for i, agent_state in enumerate(state):
            velocity = np.array((agent_state.gx - agent_state.px,
                                 agent_state.gy - agent_state.py))
            speed = np.linalg.norm(velocity)
            pref_vel = velocity / speed if speed > 1 else velocity
            self.sim.setAgentPrefVelocity(i, (pref_vel[0], pref_vel[1]))

        self.sim.doStep()
        actions = [
            ActionXY(*self.sim.getAgentVelocity(i)) for i in range(len(state))
        ]

        return actions
예제 #5
0
def generate_orca_trajectory(num_ped, min_dist=3, react_time=1.5, end_range=1.0):
    sim = rvo2.PyRVOSimulator(1 / 2.5, min_dist, 10, react_time, 2, 0.4, 2)
    #1.5

    ##Initiliaze a scene
    trajectories, positions, goals, speed = overfit_initialize(num_ped, sim)
    done = False
    reaching_goal_by_ped = [False] * num_ped
    count = 0

    ##Simulate a scene
    while not done and count < 150:
        sim.doStep()
        reaching_goal = []
        for i in range(num_ped):
            if count == 0:
                trajectories[i].pop(0)
            position = sim.getAgentPosition(i)

            ## Append only if Goal not reached
            if not reaching_goal_by_ped[i]:
                trajectories[i].append(position)

            # check if this agent reaches the goal
            if np.linalg.norm(np.array(position) - np.array(goals[i])) < end_range:
                reaching_goal.append(True)
                sim.setAgentPrefVelocity(i, (0, 0))
                reaching_goal_by_ped[i] = True
            else:
                reaching_goal.append(False)
                velocity = np.array((goals[i][0] - position[0], goals[i][1] - position[1]))
                speed = np.linalg.norm(velocity)
                pref_vel = velocity / speed if speed > 1 else velocity
                sim.setAgentPrefVelocity(i, tuple(pref_vel.tolist()))
        count += 1
        done = all(reaching_goal)

    return trajectories
예제 #6
0
    def __init__(self):
        InternalPolicy.__init__(self, str="RVO")

        self.dt = Config.DT
        neighbor_dist = Config.SENSING_HORIZON
        max_neighbors = Config.MAX_NUM_AGENTS_IN_ENVIRONMENT

        self.has_fixed_speed = False
        self.heading_noise = False

        self.max_delta_heading = np.pi/6
        
        # TODO share this parameter with environment
        time_horizon = Config.RVO_TIME_HORIZON # NOTE: bjorn used 1.0 in training for corl19
        # Initialize RVO simulator
        self.sim = rvo2.PyRVOSimulator(timeStep=self.dt, neighborDist=neighbor_dist, 
            maxNeighbors=max_neighbors, timeHorizon=time_horizon, 
            timeHorizonObst=time_horizon, radius=0.0, 
            maxSpeed=0.0)

        self.is_init = False

        self.use_non_coop_policy = True
    def init_simulator(self):
        # Initializing RVO2 simulator && add agents to self._ped_list
        self._ped_list = []
        timeStep = 1.
        neighborDist = self._ped_radius  # safe-radius to observe states
        maxNeighbors = 8
        timeHorizon = 2.0
        timeHorizonObst = timeHorizon
        radius = 0.05  # size of the agent
        maxSpeed = 0.2

        sim = rvo2.PyRVOSimulator(timeStep, neighborDist, maxNeighbors,
                                  timeHorizon, timeHorizonObst, radius,
                                  maxSpeed)
        for i in range(self._num_ped):
            ai = sim.addAgent(
                (self._default_ped_states[i, 0], self._default_ped_states[i,
                                                                          1]))
            self._ped_list.append(ai)
            vx = self._ped_speed[i] * np.cos(self._ped_direc[i])
            vy = self._ped_speed[i] * np.sin(self._ped_direc[i])
            sim.setAgentPrefVelocity(ai, (vx, vy))
        return sim
예제 #8
0
    def __init__(self, aamks_vars):
        self.evacuees = Evacuees
        self.max_speed = 0
        self.current_time = 0
        self.positions = []
        self.velocities = []
        self.velocity_vector = []
        self.speed_vec = []
        self.fed = []
        self.fed_vec = []
        self.finished = []
        self.finished_vec = []
        self.trajectory = []
        self.focus = []
        self.smoke_query = None
        self.rset = 0
        self.per_9 = 0
        self.floor = 0
        self.nav = None
        self.room_list = OrderedDict()
        self.rooms_in_smoke = []

        f = open('{}/{}/config.json'.format(os.environ['AAMKS_PATH'], 'evac'),
                 'r')
        self.config = json.load(f)

        self.general = aamks_vars

        self.sim = rvo2.PyRVOSimulator(self.config['TIME_STEP'],
                                       self.config['NEIGHBOR_DISTANCE'],
                                       self.config['MAX_NEIGHBOR'],
                                       self.config['TIME_HORIZON'],
                                       self.config['TIME_HORIZON_OBSTACLE'],
                                       self.config['RADIUS'], self.max_speed)
        self.elog = self.general['logger']
        self.elog.info('ORCA on {} floor initiated'.format(self.floor))
예제 #9
0
#!/usr/bin/env python

import rvo2

timeStep = 0.1
neighborDist = 1.0
maxNeighbors = 5
timeHorizon = 1.0
timeHorizonObst = timeHorizon
radius = 0.4
maxSpeed = 2
sim = rvo2.PyRVOSimulator(timeStep, neighborDist, maxNeighbors, timeHorizon,
                          timeHorizonObst, radius, maxSpeed)

# Pass either just the position (the other parameters then use
# the default values passed to the PyRVOSimulator constructor),
# or pass all available parameters.
a0 = sim.addAgent((0, 0))
a1 = sim.addAgent((1, 0))
a2 = sim.addAgent((1, 1))
a3 = sim.addAgent((0, 1), 1.5, 5, 1.5, 2, 0.4, 2, (0, 0))

# Obstacles are also supported.
# o1 = sim.addObstacle([(0.1, 0.1), (-0.1, 0.1), (-0.1, -0.1)])
# sim.processObstacles()

sim.setAgentPrefVelocity(a0, (1, 1))
sim.setAgentPrefVelocity(a1, (-1, 1))
sim.setAgentPrefVelocity(a2, (-1, -1))
sim.setAgentPrefVelocity(a3, (1, -1))
예제 #10
0
def generate_orca_trajectory(sim_scene,
                             num_ped,
                             min_dist=3,
                             react_time=1.5,
                             end_range=1.0,
                             mode=None):
    """ Simulating Scenario using ORCA """
    ## Default: (1 / 60., 1.5, 5, 1.5, 2, 0.4, 2)
    sampling_rate = 1

    ## Circle Crossing
    if sim_scene == 'circle_crossing':
        fps = 100
        sampling_rate = fps / 2.5
        sim = rvo2.PyRVOSimulator(1 / fps, 10, 10, 5, 5, 0.3, 1)
        if mode == 'trajnet':
            sim = rvo2.PyRVOSimulator(1 / fps, 4, 10, 4, 5, 0.6,
                                      1.5)  ## (TrajNet++)
        trajectories, _, goals, speed = generate_circle_crossing(num_ped,
                                                                 sim,
                                                                 mode=mode)
    else:
        raise NotImplementedError

    # run
    done = False
    reaching_goal_by_ped = [False] * num_ped
    count = 0
    valid = True
    ##Simulate a scene
    while not done and count < 6000:
        count += 1
        sim.doStep()
        reaching_goal = []
        for i in range(num_ped):
            if count == 1:
                trajectories[i].pop(0)
            position = sim.getAgentPosition(i)

            ## Append only if Goal not reached
            if not reaching_goal_by_ped[i]:
                if count % sampling_rate == 0:
                    trajectories[i].append(position)

            # check if this agent reaches the goal
            if np.linalg.norm(np.array(position) -
                              np.array(goals[i])) < end_range:
                reaching_goal.append(True)
                sim.setAgentPrefVelocity(i, (0, 0))
                reaching_goal_by_ped[i] = True
            else:
                reaching_goal.append(False)
                velocity = np.array(
                    (goals[i][0] - position[0], goals[i][1] - position[1]))
                speed = np.linalg.norm(velocity)
                pref_vel = 1 * velocity / speed if speed > 1 else velocity
                sim.setAgentPrefVelocity(i, tuple(pref_vel.tolist()))
        done = all(reaching_goal)

    if not done or not are_smoothes(trajectories):
        valid = False

    return trajectories, valid, goals
예제 #11
0
    def predict(self, state):
        """
        Create a rvo2 simulation at each time step and run one step
        Python-RVO2 API: https://github.com/sybrenstuvel/Python-RVO2/blob/master/src/rvo2.pyx
        How simulation is done in RVO2: https://github.com/sybrenstuvel/Python-RVO2/blob/master/src/Agent.cpp

        Agent doesn't stop moving after it reaches the goal, because once it stops moving, the reciprocal rule is broken

        :param state:
        :return:
        """
        self_state = state.self_state
        params = self.neighbor_dist, self.max_neighbors, self.time_horizon, self.time_horizon_obst
        if self.sim is not None and self.sim.getNumAgents() != len(
                state.human_states) + 1:
            del self.sim
            self.sim = None
        if self.sim is None:
            self.sim = rvo2.PyRVOSimulator(self.time_step, *params,
                                           self.radius, self.max_speed)
            self.sim.addAgent(self_state.position, *params,
                              self_state.radius + 0.01 + self.safety_space,
                              self_state.v_pref, self_state.velocity)
            for human_state in state.human_states:
                self.sim.addAgent(
                    human_state.position, *params,
                    human_state.radius + 0.01 + self.safety_space,
                    self.max_speed, human_state.velocity)
        else:
            # time.sleep((1))
            # self.sim.setAgentPosition(0, self_state.position)
            # print('------------------------------------------------------------------------------------')
            # print('robot ',state.self_state.position)

            self.sim.setAgentVelocity(0, self_state.velocity)
            for i, human_state in enumerate(state.human_states):
                self.sim.setAgentPosition(i + 1, human_state.position)
                self.sim.setAgentVelocity(i + 1, human_state.velocity)
                #print("human"+ str(i)+": " , state.human_states[i].position)
                #time.sleep(1)

        # Set the preferred velocity to be a vector of unit magnitude (speed) in the direction of the goal.
        velocity = np.array(
            (self_state.gx - self_state.px, self_state.gy - self_state.py))
        speed = np.linalg.norm(velocity)
        pref_vel = velocity / speed if speed > 1 else velocity

        # Perturb a little to avoid deadlocks due to perfect symmetry.
        # perturb_angle = np.random.random() * 2 * np.pi
        # perturb_dist = np.random.random() * 0.01
        # perturb_vel = np.array((np.cos(perturb_angle), np.sin(perturb_angle))) * perturb_dist
        # pref_vel += perturb_vel

        self.sim.setAgentPrefVelocity(0, tuple(pref_vel))
        for i, human_state in enumerate(state.human_states):
            # unknown goal position of other humans
            self.sim.setAgentPrefVelocity(i + 1, (0, 0))

        self.sim.doStep()
        action = ActionXY(*self.sim.getAgentVelocity(0))
        self.last_state = state

        return action
예제 #12
0
# spawn walls and other obstacles
BOXSIZE = 800
WALLWIDTH = 50
obstacles = dispUtils.createWalls(windowSurface, BOXSIZE, WALLWIDTH)

# box
boxX = int(random.uniform(200, 800))
boxY = int(random.uniform(200, 800))

########################################################################################################################################################################

#  timestep, neighbourDist, maxNeigh, timeHorizon, timeHorizion obstac, radius, maxspeed, velocity(vector2)
numberOfAgents = 5

sim = rvo2.PyRVOSimulator(1 / 60., 1.5, numberOfAgents, 1.5, 2, 0.4, 2)
# Pass either just the position (the other parameters then use
# the default values passed to the PyRVOSimulator constructor),
# or pass all available parameters.
a0 = sim.addAgent((0, 0))
a1 = sim.addAgent((1, 0))
a2 = sim.addAgent((1, 1))
a3 = sim.addAgent((0, 1), 1.5, 5, 1.5, 2, 0.4, 2, (0, 0))

# Obstacles are also supported.
o1 = sim.addObstacle([(0.1, 0.1), (-0.1, 0.1), (-0.1, -0.1)])
sim.processObstacles()

sim.setAgentPrefVelocity(a0, (1, 1))
sim.setAgentPrefVelocity(a1, (-1, 1))
sim.setAgentPrefVelocity(a2, (-1, -1))
예제 #13
0
def predict_all(input_paths, goals, n_predict=12):

    pred_length = n_predict

    fps = 100
    sampling_rate = fps / 2.5

    sim = rvo2.PyRVOSimulator(1 / fps, 4, 10, 4, 5, 0.6, 1.5)  ## (TrajNet++)
    trajectories = [[input_paths[i][-1]] for i, _ in enumerate(input_paths)]
    [sim.addAgent((p[-1][0], p[-1][1])) for p in input_paths]

    num_ped = len(trajectories)
    reaching_goal_by_ped = [False] * num_ped
    count = 0
    end_range = 1.0
    done = False

    for i in range(num_ped):
        velocity = np.array((input_paths[i][-1][0] - input_paths[i][-3][0],
                             input_paths[i][-1][1] - input_paths[i][-3][1]))
        velocity = velocity / 0.8
        sim.setAgentVelocity(i, tuple(velocity.tolist()))

        velocity = np.array((goals[i][0] - input_paths[i][-1][0],
                             goals[i][1] - input_paths[i][-1][1]))
        speed = np.linalg.norm(velocity)
        pref_vel = 1 * velocity / speed if speed > 1 else velocity
        sim.setAgentPrefVelocity(i, tuple(pref_vel.tolist()))

    ##Simulate a scene
    while (not done) and count < sampling_rate * pred_length + 1:
        # print("Count: ", count)
        count += 1
        sim.doStep()
        reaching_goal = []
        for i in range(num_ped):
            if count == 1:
                trajectories[i].pop(0)
            position = sim.getAgentPosition(i)

            ## Append only if Goal not reached
            if not reaching_goal_by_ped[i]:
                if count % sampling_rate == 0:
                    trajectories[i].append(position)

            # check if this agent reaches the goal
            if np.linalg.norm(np.array(position) -
                              np.array(goals[i])) < end_range:
                reaching_goal.append(True)
                sim.setAgentPrefVelocity(i, (0, 0))
                reaching_goal_by_ped[i] = True
            else:
                reaching_goal.append(False)
                velocity = np.array(
                    (goals[i][0] - position[0], goals[i][1] - position[1]))
                speed = np.linalg.norm(velocity)
                pref_vel = 1 * velocity / speed if speed > 1 else velocity
                sim.setAgentPrefVelocity(i, tuple(pref_vel.tolist()))

        done = all(reaching_goal)

    return trajectories
예제 #14
0
파일: 5robot.py 프로젝트: princeward/orcapy
	get the desired velocity (normalized)
	'''
    vel = [0.0, 0.0]
    vel[0] = des_pos[0] - cur_pos[0]
    vel[1] = des_pos[1] - cur_pos[1]
    norm = np.sqrt(vel[0]**2 + vel[1]**2)
    if norm > 1:
        vel[0] /= norm
        vel[1] /= norm
    return (vel[0], vel[1])


sim = rvo2.PyRVOSimulator(timeStep=0.5,
                          neighborDist=20,
                          maxNeighbors=5,
                          timeHorizon=1.5,
                          timeHorizonObst=2,
                          radius=5,
                          maxSpeed=20)

N_Agent = 5
init_pos = np.array([[50.0, 80.0, 50.0, 20.0, 10.0],
                     [10.0, 50.0, 90.0, 75.0, 30.0]])
goal_pos = np.array([[50.0, 20.0, 10.0, 50.0, 80.0],
                     [90.0, 70.0, 30.0, 10.0, 50.0]])
assert (N_Agent == init_pos.shape[1])

agents = [
    sim.addAgent((init_pos[0][i], init_pos[1][i])) for i in range(N_Agent)
]
예제 #15
0
def main():
    initPosMainRobot = [[0, 0], [5, 0], [5, 5], [0, 5], [0, 2.5], [2.5, 0],
                        [5, 2.5], [2.5, 5], [5, 3.75], [5, 1.25], [0, 1.25],
                        [0, 3.75]]
    initPosObstRobot = [[1.5, 1.5], [3.5, 3.5], [3.5, 1.5], [1.5, 3.5], [4, 2],
                        [3, 4], [1, 3], [2, 1]]
    goalPos = [[5, 5], [0, 5], [0, 0], [5, 0], [5, 2.5], [2.5, 5], [0, 2.5],
               [2.5, 0], [0, 1.25], [0, 3.75], [5, 1.25], [5, 3.75]]

    allRobots = []

    for i in range(0, mainRobotNumber):
        allRobots = allRobots + [initPosMainRobot[i]]
    for i in range(0, obsNumber):
        allRobots = allRobots + [initPosObstRobot[i]]
    if mode == 0:
        tmpStr = "Const"
    elif mode == 1:
        tmpStr = "Linear"
    elif mode == 2:
        tmpStr = "Quad"
    rList = []
    f = open(
        "/home/howoongjun/catkin_ws/src/simple_create/src/DataSave/log/log" +
        str(mainRobotNumber) + "robots" + str(obsNumber) + "obstacles_ORCA_" +
        tmpStr + "_" + datetime.datetime.now().strftime('%y%m%d') + ".txt",
        'w')

    sim = rvo2.PyRVOSimulator(1 / 60., agentRadius * 20, 12, 1.5, 2,
                              agentRadius * 1.55, 2.0)
    SimAgent = []
    for i in range(0, mainRobotNumber):
        SimAgent = SimAgent + [
            sim.addAgent((initPosMainRobot[i][0], initPosMainRobot[i][1]))
        ]
    for i in range(0, obsNumber):
        SimAgent = SimAgent + [
            sim.addAgent((initPosObstRobot[i][0], initPosObstRobot[i][1]))
        ]

    rospy.init_node('circler', anonymous=True)
    rate = rospy.Rate(50)  #hz

    posMainRobot_pub = []
    posMainRobot_msg = []
    posObstRobot_pub = []
    posObstRobot_msg = []

    twistMainRobot_pub = []
    twistMainRobot_msg = []
    twistObstRobot_pub = []
    twistObstRobot_msg = []

    rospy.logwarn("Loading !!!")

    for i in range(0, mainRobotNumber):
        posMainRobot_pub = posMainRobot_pub + [
            rospy.Publisher(
                'gazebo/set_model_state', ModelState, queue_size=10)
        ]
        twistMainRobot_pub = twistMainRobot_pub + [
            rospy.Publisher(
                'mainRobot' + str(i) + '/cmd_vel', Twist, queue_size=10)
        ]
        posMainRobot_msg.append(ModelState())
        twistMainRobot_msg.append(Twist())
        posMainRobot_msg[i].model_name = "mainRobot" + str(i)

        [
            posMainRobot_msg[i].pose.position.x,
            posMainRobot_msg[i].pose.position.y
        ] = initPosMainRobot[i]
        posMainRobot_msg[i].pose.position.z = 0
        posMainRobot_pub[i].publish(posMainRobot_msg[i])
        twistMainRobot_msg[i].linear.x = 0
        twistMainRobot_msg[i].linear.y = 0
        twistMainRobot_msg[i].linear.z = 0
        twistMainRobot_msg[i].angular.x = 0
        twistMainRobot_msg[i].angular.y = 0
        twistMainRobot_msg[i].angular.z = 0
    for i in range(0, obsNumber):
        posObstRobot_pub = posObstRobot_pub + [
            rospy.Publisher(
                'gazebo/set_model_state', ModelState, queue_size=10)
        ]
        twistObstRobot_pub = twistObstRobot_pub + [
            rospy.Publisher(
                'obsRobot' + str(i) + '/cmd_vel', Twist, queue_size=10)
        ]
        posObstRobot_msg.append(ModelState())
        twistObstRobot_msg.append(Twist())

        posObstRobot_msg[i].model_name = "obsRobot" + str(i)
        [
            posObstRobot_msg[i].pose.position.x,
            posObstRobot_msg[i].pose.position.y
        ] = initPosObstRobot[i]
        posObstRobot_msg[i].pose.position.z = 0
        posObstRobot_pub[i].publish(posObstRobot_msg[i])
        twistObstRobot_msg[i].linear.x = 0
        twistObstRobot_msg[i].linear.y = 0
        twistObstRobot_msg[i].linear.z = 0
        twistObstRobot_msg[i].angular.x = 0
        twistObstRobot_msg[i].angular.y = 0
        twistObstRobot_msg[i].angular.z = 0

    time.sleep(10)
    fps = 0
    elapsed = 0
    for e in range(num_episodes):
        start = time.time()
        done = False
        rospy.logwarn("Episode %d Starts!", e)
        rospy.logwarn(datetime.datetime.now().strftime('%H:%M:%S'))
        for i in range(0, mainRobotNumber):
            [
                posMainRobot_msg[i].pose.position.x,
                posMainRobot_msg[i].pose.position.y
            ] = initPosMainRobot[i]
            posMainRobot_msg[i].pose.position.z = 0
            posMainRobot_msg[0].pose.orientation.z = 0
            posMainRobot_msg[1].pose.orientation.z = 1
            posMainRobot_msg[2].pose.orientation.z = 1
            posMainRobot_msg[3].pose.orientation.z = 0
            # posMainRobot_msg[6].pose.orientation.z = 1
            # posMainRobot_msg[9].pose.orientation.z = 1
            # posMainRobot_msg[8].pose.orientation.z = 1
            posMainRobot_pub[i].publish(posMainRobot_msg[i])
            twistMainRobot_msg[i].linear.x = 0
            twistMainRobot_msg[i].angular.z = 0
            twistMainRobot_pub[i].publish(twistMainRobot_msg[i])
        for i in range(0, obsNumber):
            [
                posObstRobot_msg[i].pose.position.x,
                posObstRobot_msg[i].pose.position.y
            ] = initPosObstRobot[i]
            posObstRobot_msg[i].pose.position.z = 0
            posObstRobot_msg[0].pose.orientation.z = 0
            posObstRobot_msg[1].pose.orientation.z = 1
            # posObstRobot_msg[2].pose.orientation.z = 0
            # posObstRobot_msg[3].pose.orientation.z = 1
            posObstRobot_pub[i].publish(posObstRobot_msg[i])
        # Initialize goalReached flag
        goalReached = []

        for i in range(0, mainRobotNumber):
            goalReached = goalReached + [False]

        frame = 0
        ckTime = 0
        while not done:
            start = time.time()
            frame += 1
            object_coordinates = []
            obst_coordinates = []
            prefVel = []

            # Move obstacles
            for obsRobNo in range(0, obsNumber):
                twistObstRobot_msg[obsRobNo].linear.x = random.randrange(
                    0, 2)  #linearX
                twistObstRobot_msg[obsRobNo].angular.z = random.randrange(
                    -4, 5)  #angularZ
                twistObstRobot_pub[obsRobNo].publish(
                    twistObstRobot_msg[obsRobNo])

            for i in range(0, mainRobotNumber):
                model_coordinates = rospy.ServiceProxy(
                    'gazebo/get_model_state', GetModelState)
                object_coordinates = object_coordinates + [
                    model_coordinates("mainRobot" + str(i), "")
                ]

            for i in range(0, obsNumber):
                model_coordinates = rospy.ServiceProxy(
                    'gazebo/get_model_state', GetModelState)
                obst_coordinates = obst_coordinates + [
                    model_coordinates("obsRobot" + str(i), "")
                ]
                sim.setAgentPosition(mainRobotNumber + i,
                                     (obst_coordinates[i].pose.position.x,
                                      obst_coordinates[i].pose.position.y))

            # Move Main robots
            for curRobNo in range(0, mainRobotNumber):
                quaternion = (object_coordinates[curRobNo].pose.orientation.x,
                              object_coordinates[curRobNo].pose.orientation.y,
                              object_coordinates[curRobNo].pose.orientation.z,
                              object_coordinates[curRobNo].pose.orientation.w)
                euler = euler_from_quaternion(quaternion)
                yaw = euler[2]
                linearX = 0
                angularZ = 0
                sim.setAgentPosition(
                    curRobNo, (object_coordinates[curRobNo].pose.position.x,
                               object_coordinates[curRobNo].pose.position.y))
                tmpPref = [
                    goalPos[curRobNo][0] -
                    object_coordinates[curRobNo].pose.position.x,
                    goalPos[curRobNo][1] -
                    object_coordinates[curRobNo].pose.position.y
                ]
                prefVel = [
                    2.0 * tmpPref[0] /
                    math.sqrt(tmpPref[0]**2 + tmpPref[1]**2),
                    2.0 * tmpPref[1] / math.sqrt(tmpPref[0]**2 + tmpPref[1]**2)
                ]
                sim.setAgentPrefVelocity(curRobNo, (prefVel[0], prefVel[1]))
                sim.doStep()
                [linearX,
                 angularZ] = takeAction(sim.getAgentVelocity(curRobNo), yaw)
                twistMainRobot_msg[curRobNo].linear.x = linearX
                twistMainRobot_msg[curRobNo].angular.z = angularZ  # * 0.5
                twistMainRobot_pub[curRobNo].publish(
                    twistMainRobot_msg[curRobNo])

                collisionFlag = 0

                if (math.sqrt((object_coordinates[curRobNo].pose.position.x -
                               goalPos[curRobNo][0])**2 +
                              (object_coordinates[curRobNo].pose.position.y -
                               goalPos[curRobNo][1])**2) <= 2 * agentRadius):
                    if goalReached[curRobNo] == False:
                        rospy.logwarn(
                            str(curRobNo) + " Robot has reached to the goal!")
                    goalReached[curRobNo] = True
                for i in range(0, mainRobotNumber):
                    if i != curRobNo:
                        if math.sqrt(
                            (object_coordinates[curRobNo].pose.position.x -
                             object_coordinates[i].pose.position.x)**2 +
                            (object_coordinates[curRobNo].pose.position.y -
                             object_coordinates[i].pose.position.y)**2
                        ) < 2 * agentRadius:
                            rospy.logerr("Collision with a main robot")
                            collisionFlag = -1
                            done = True
                for i in range(0, obsNumber):
                    if math.sqrt(
                        (obst_coordinates[i].pose.position.x -
                         object_coordinates[curRobNo].pose.position.x)**2 +
                        (obst_coordinates[i].pose.position.y -
                         object_coordinates[curRobNo].pose.position.y)**2
                    ) < 2 * agentRadius:
                        rospy.logerr("Collision with an Obstacle")
                        collisionFlag = -1
                        done = True
            # rospy.logwarn("================================")

            tmpCount = 1
            for i in range(0, mainRobotNumber):
                tmpCount = tmpCount * goalReached[i]
            if tmpCount == 1:
                done = True
                rList.append(1)

            if done:
                if collisionFlag == -1:
                    rList.append(0)
                initPosMainRobot = [[0, 0], [5, 0], [5, 5], [0, 5], [0, 2.5],
                                    [2.5, 0], [5, 2.5], [2.5, 5], [5, 3.75],
                                    [5, 1.25], [0, 1.25], [0, 3.75]]
                for i in range(0, obsNumber):
                    [
                        posObstRobot_msg[i].pose.position.x,
                        posObstRobot_msg[i].pose.position.y
                    ] = initPosObstRobot[i]
                    posObstRobot_msg[i].pose.position.z = 0
                    posObstRobot_pub[i].publish(posObstRobot_msg[i])
            rate.sleep()
            final = time.time()
            ckTime = (ckTime * (frame - 1) + final - start) / frame

        # fps = (fps * e + frame / (final - start)) / (e + 1)
        if e != 0:
            if collisionFlag != -1 and sum(rList) != 0:
                elapsed = (elapsed *
                           (sum(rList) - 1) + final - start) / sum(rList)
            rospy.logwarn("Percent of successful episodes: %f %%",
                          100.0 * sum(rList) / (e + 1))
        rospy.logwarn("Average Processing Time: %f", ckTime)
        data = "Episode_%d_%f \n" % (e, ckTime)
        f.write(data)
        # rospy.logwarn("Elapsed Time: %f s", final - start)
        # rospy.logwarn("Frame per Second: %f fps", frame / (final - start))
        # rospy.logwarn("Average Time: %f s", elapsed)
        # rospy.logwarn("Average FPS: %f fps", fps)
        rospy.logwarn(
            "====================================================================="
        )
    f.close()
예제 #16
0
import random
import rospy
import time
import threading
import math
import sys
from utils import *
from nav_msgs.msg import Odometry

import A2easyGo as easyGo

rospy.init_node('A2_mvs', anonymous=False)

SIMUL_HZ = 10.0

sim = rvo2.PyRVOSimulator(1 / SIMUL_HZ, 15.0, 10, 5.0, 2.0, 0.15, 3.0)

COL = 10.0
ROW = 10.0
voxel_size = 0.5
size = voxel_size / 2

#ROBOT MOVE
SPEED = 20  # 14
ROTATE_SPEED = 25  # 25
ORCA_THRES = 2

global Target
Target = [-2.5, 0]

예제 #17
0
    def planner_routine(self, event=None):
        nowstamp = rospy.Time.now()
        if self.tf_wpt_in_fix is None:
            rospy.logwarn_throttle(10., "Global path not available yet")
            return
        if self.tf_rob_in_fix is None:
            rospy.logwarn_throttle(10., "tf_rob_in_fix not available yet")
            return
        if self.odom is None:
            rospy.logwarn_throttle(10., "odom not available yet")
            return
        # remove old obstacles from buffer
        k2s = 2.
        with self.lock:
            tic = timer()
            dynamic_obstacles = []
            self.obstacles_buffer = [
                obst_msg for obst_msg in self.obstacles_buffer
                if nowstamp - obst_msg.header.stamp < rospy.Duration(k2s)
            ]
            # DEBUG remove
            #             for obst_msg in self.obstacles_buffer:
            for obst_msg in self.obstacles_buffer[-1:]:
                dynamic_obstacles.extend(obst_msg.obstacles)
            # take into account static obstacles if not old
            static_obstacles = []
            if self.static_obstacles_buffer is not None:
                if (nowstamp - self.static_obstacles_buffer.header.stamp
                    ) < rospy.Duration(k2s):
                    static_obstacles = self.static_obstacles_buffer.obstacles
            # vel to goal in fix
            goal_in_fix = np.array(Pose2D(self.tf_wpt_in_fix)[:2])
            toc = timer()
            print("Lock preproc {:.1f} ms".format((toc - tic) * 1000.))
            # robot position
            tf_rob_in_fix = self.tf_rob_in_fix

        tic = timer()

        # RVO set up
        positions = []
        radii = []
        velocities = []
        obstacles = []
        metadata = []  # agent metadata

        # create agents for RVO
        # first agent is the robot
        positions.append(Pose2D(tf_rob_in_fix)[:2])
        radii.append(self.kRadius)
        velocities.append(
            [self.odom.twist.twist.linear.x, self.odom.twist.twist.linear.y])
        metadata.append({'source': 'robot'})
        # create agent for every dynamic obstacle
        for obst in dynamic_obstacles:
            positions.append(
                [obst.polygon.points[0].x, obst.polygon.points[0].y])
            radii.append(obst.radius)
            velocities.append([
                obst.velocities.twist.linear.x, obst.velocities.twist.linear.y
            ])
            metadata.append({'source': 'dynamic'})
        # create agents for local static obstacles (table leg, chair, etc)
        for obst in static_obstacles:
            if obst.radius > 0.5:
                continue  # TODO big walls make big circles : but dangerous to do this!
            positions.append(
                [obst.polygon.points[0].x, obst.polygon.points[0].y])
            radii.append(obst.radius)
            velocities.append([
                obst.velocities.twist.linear.x, obst.velocities.twist.linear.y
            ])
            metadata.append({'source': 'nonleg'})

        # create static obstacle vertices from refmap if available (walls, etc)
        if self.refmap_manager.tf_frame_in_refmap is not None:
            kMapObstMaxDist = 3.
            obstacles_in_ref = self.refmap_manager.map_as_closed_obstacles
            pose2d_ref_in_fix = inverse_pose2d(
                Pose2D(self.refmap_manager.tf_frame_in_refmap))
            near_obstacles = [
                apply_tf(vertices, pose2d_ref_in_fix)
                for vertices in obstacles_in_ref if len(vertices) > 1
            ]
            near_obstacles = [
                vertices for vertices in near_obstacles if np.mean(
                    np.linalg.norm(vertices - np.array(positions[0]), axis=1))
                < kMapObstMaxDist
            ]
            obstacles = near_obstacles

        toc = timer()
        print("Pre-RVO {:.1f} ms".format((toc - tic) * 1000.))
        tic = timer()

        # RVO ----------------
        import rvo2
        t_horizon = 10.
        DT = 1 / 10.
        #RVOSimulator(timeStep, neighborDist, maxNeighbors, timeHorizon, timeHorizonObst, radius, maxSpeed, velocity = [0,0]);
        sim = rvo2.PyRVOSimulator(DT, 1.5, 5, 1.5, 2, 0.4, 2)

        # Pass either just the position (the other parameters then use
        # the default values passed to the PyRVOSimulator constructor),
        # or pass all available parameters.
        agents = []
        for p, v, r, m in zip(positions, velocities, radii, metadata):
            #addAgent(posxy, neighborDist, maxNeighbors, timeHorizon, timeHorizonObst, radius, maxSpeed, velocity = [0,0]);
            a = sim.addAgent(tuple(p), radius=r, velocity=tuple(v))
            agents.append(a)
            # static agents can't move
            if m['source'] != 'robot' and np.linalg.norm(v) == 0:
                sim.setAgentMaxSpeed(a, 0)

        # Obstacle(list of vertices), anticlockwise (clockwise for bounding obstacle)
        for vert in obstacles:
            o1 = sim.addObstacle(list(vert))
        sim.processObstacles()

        toc = timer()
        print("RVO init {:.1f} ms".format((toc - tic) * 1000.))
        tic = timer()

        n_steps = int(t_horizon / DT)
        pred_tracks = [[] for a in agents]
        pred_vels = [[] for a in agents]
        pred_t = []
        for step in range(n_steps):
            for i in range(len(agents)):
                # TODO: early stop if agent 0 reaches goal
                a = agents[i]
                pref_vel = velocities[
                    i]  # assume agents want to keep initial vel
                if i == 0:
                    vector_to_goal = goal_in_fix - np.array(
                        sim.getAgentPosition(a))
                    pref_vel = self.kIdealVelocity * vector_to_goal / np.linalg.norm(
                        vector_to_goal)
                sim.setAgentPrefVelocity(a, tuple(pref_vel))
                pred_tracks[i].append(sim.getAgentPosition(a))
                pred_vels[i].append(sim.getAgentVelocity(a))
            pred_t.append(1. * step * DT)
            sim.doStep()
        # -------------------------
        toc = timer()
        print("RVO steps {} - {} agents - {} obstacles".format(
            step, len(agents), len(obstacles)))
        print("RVO sim {:.1f} ms".format((toc - tic) * 1000.))

        # PLOT ---------------------------
        #         self.transport_data = (sim, agents, metadata, radii, obstacles, pred_tracks)
        # -----------------------------

        # publish predicted tracks as paths
        rob_track = pred_tracks[0]
        pub = rospy.Publisher("/rvo_robot_plan", Path, queue_size=1)
        path_msg = Path()
        path_msg.header.stamp = nowstamp
        path_msg.header.frame_id = self.kFixedFrame
        for pose, t in zip(rob_track, pred_t):
            pose_msg = PoseStamped()
            pose_msg.header.stamp = nowstamp + rospy.Duration(t)
            pose_msg.header.frame_id = path_msg.header.frame_id
            pose_msg.pose.position.x = pose[0]
            pose_msg.pose.position.y = pose[1]
            path_msg.poses.append(pose_msg)
        pub.publish(path_msg)

        # publish tracks
        pub = rospy.Publisher("/rvo_simulated_tracks",
                              MarkerArray,
                              queue_size=1)
        ma = MarkerArray()
        id_ = 0
        end_x = [sim.getAgentPosition(agent_no)[0] for agent_no in agents]
        end_y = [sim.getAgentPosition(agent_no)[1] for agent_no in agents]
        end_t = pred_t[-1]
        for i in range(len(agents)):
            color = (0, 0, 0, 1)  # black
            color = (1, .8, 0,
                     1) if metadata[i]['source'] == 'robot' else color  # green
            color = (1, 0, 0,
                     1) if metadata[i]['source'] == 'dynamic' else color  # red
            color = (0, 0, 1,
                     1) if metadata[i]['source'] == 'nonleg' else color  # blue
            # track line
            mk = Marker()
            mk.lifetime = rospy.Duration(0.1)
            mk.header.frame_id = self.kFixedFrame
            mk.ns = "tracks"
            mk.id = i
            mk.type = 4  # LINE_STRIP
            mk.action = 0
            mk.scale.x = 0.02
            mk.color.r = color[0]
            mk.color.g = color[1]
            mk.color.b = color[2]
            mk.color.a = color[3]
            mk.frame_locked = True
            xx = np.array(pred_tracks[i])[:, 0]
            yy = np.array(pred_tracks[i])[:, 1]
            for x, y, t in zip(xx, yy, pred_t):
                pt = Point()
                pt.x = x
                pt.y = y
                pt.z = t / t_horizon
                mk.points.append(pt)
            ma.markers.append(mk)
            # endpoint
            r = radii[i]
            mk = Marker()
            mk.lifetime = rospy.Duration(0.1)
            mk.header.frame_id = self.kFixedFrame
            mk.ns = "endpoints"
            mk.id = i
            mk.type = 3  # CYLINDER
            mk.action = 0
            mk.scale.x = r * 2.
            mk.scale.y = r * 2.
            mk.scale.z = 0.1
            mk.color.r = color[0]
            mk.color.g = color[1]
            mk.color.b = color[2]
            mk.color.a = color[3]
            mk.frame_locked = True
            mk.pose.position.x = end_x[i]
            mk.pose.position.y = end_y[i]
            mk.pose.position.z = end_t / t_horizon
            ma.markers.append(mk)
        pub.publish(ma)

        pred_rob_vel_in_fix = np.array(
            pred_vels[0]
            [1])  # 2 is a cheat because 0 is usually the current speed
        pred_end_in_fix = np.array([end_x[0], end_y[0]])
        # in robot frame
        pose2d_fix_in_rob = inverse_pose2d(Pose2D(tf_rob_in_fix))
        pred_rob_vel_in_rob = apply_tf_to_vel(
            np.array(list(pred_rob_vel_in_fix) + [0]), pose2d_fix_in_rob)[:2]
        pred_end_in_rob = apply_tf(pred_end_in_fix, pose2d_fix_in_rob)
        # resulting command vel, and path
        best_u, best_v = pred_rob_vel_in_rob
        # check if goal is reached
        if np.linalg.norm(pred_end_in_rob) < 0.1:
            best_u, best_v = (0, 0)

        # Slow turn towards goal
        # TODO
        best_w = 0
        WMAX = 0.5
        gx, gy = pred_end_in_rob
        angle_to_goal = np.arctan2(gy, gx)  # [-pi, pi]
        if np.sqrt(gx * gx + gy * gy) > 0.5:  # turn only if goal is far away
            if np.abs(angle_to_goal) > (np.pi / 4 / 10):  # deadzone
                best_w = np.clip(angle_to_goal, -WMAX, WMAX)  # linear ramp

        cmd_vel_pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1)
        if not self.STOP:
            # publish cmd_vel
            cmd_vel_msg = Twist()
            cmd_vel_msg.linear.x = best_u
            cmd_vel_msg.linear.y = best_v
            cmd_vel_msg.angular.z = best_w
            cmd_vel_pub.publish(cmd_vel_msg)
예제 #18
0
def predict(input_paths, dest_dict=None, dest_type='interp', orca_params=[1.5, 1.5, 0.4],
            predict_all=True, n_predict=12, obs_length=9):
    pred_length = n_predict

    def init_states(input_paths, sim, start_frame, dest_dict, dest_type):
        positions, goals, speed = [], [], []
        for i, _ in enumerate(input_paths):
            path = input_paths[i]
            ped_id = path[0].pedestrian
            past_path = [t for t in path if t.frame <= start_frame]
            future_path = [t for t in path if t.frame > start_frame]
            past_frames = [t.frame for t in path if t.frame <= start_frame]
            len_path = len(past_path)

            ## To consider agent or not consider.
            if start_frame in past_frames:
                curr = past_path[-1]

                ## Velocity
                if len_path >= 4:
                    stride = 3
                    prev = past_path[-4]
                else:
                    stride = len_path - 1
                    prev = past_path[-len_path]
                curr_vel, curr_speed = vel_state(prev, curr, stride)
                max_speed = MAX_SPEED_MULTIPLIER * curr_speed

                ## Destination
                if dest_type == 'true':
                    if dest_dict is not None:
                        [d_x, d_y] = dest_dict[ped_id]
                    else:
                        raise ValueError
                elif dest_type == 'interp':
                    [d_x, d_y] = dest_state(past_path, len_path)
                elif dest_type == 'pred_end':
                    [d_x, d_y] = [future_path[-1].x, future_path[-1].y]
                else:
                    raise NotImplementedError

                positions.append((curr.x, curr.y))
                speed.append((curr_speed))
                goals.append((d_x, d_y))

                sim.addAgent((curr.x, curr.y), maxSpeed=max_speed, velocity=tuple(curr_vel))

        trajectories = [[positions[i]] for i in range(len(positions))]
        return trajectories, positions, goals, speed

    def vel_state(prev, curr, stride):
        if stride == 0:
            return [0, 0], 0
        diff = np.array([curr.x - prev.x, curr.y - prev.y])
        theta = np.arctan2(diff[1], diff[0])
        speed = np.linalg.norm(diff) / (stride * 0.4)
        return [speed*np.cos(theta), speed*np.sin(theta)], speed

    def dest_state(path, length):
        if length == 1:
            return [path[-1].x, path[-1].y]
        x = [t.x for t in path]
        y = [t.y for t in path]
        time = list(range(length))
        f = interp1d(x=time, y=[x, y], fill_value='extrapolate')
        return f(time[-1] + pred_length)

    multimodal_outputs = {}
    primary = input_paths[0]
    neighbours_tracks = []
    frame_diff = primary[1].frame - primary[0].frame
    start_frame = primary[obs_length-1].frame
    first_frame = primary[obs_length-1].frame + frame_diff

    fps = 20
    sampling_rate = fps / 2.5

    ## orca_params = [nDist, nReact, radius]

    ## Parameters              freq          nD        obD       nR        oR     rad       max.spd
    sim = rvo2.PyRVOSimulator(1 / fps, orca_params[0], 10, orca_params[1], 5, orca_params[2], 1.5)

    # initialize
    trajectories, _, goals, speed = init_states(input_paths, sim, start_frame, dest_dict, dest_type)

    num_ped = len(speed)
    count = 0
    end_range = 0.05
    ##Simulate a scene
    while count < sampling_rate * pred_length + 1:
        count += 1
        sim.doStep()
        for i in range(num_ped):
            if count == 1:
                trajectories[i].pop(0)
            position = sim.getAgentPosition(i)

            if count % sampling_rate == 0:
                trajectories[i].append(position)

            # check if this agent reaches the goal
            if np.linalg.norm(np.array(position) - np.array(goals[i])) < end_range:
                sim.setAgentPrefVelocity(i, (0, 0))

            else:
            # Move towards goal
                velocity = np.array((goals[i][0] - position[0], goals[i][1] - position[1]))
                curr_speed = np.linalg.norm(velocity)
                pref_vel = speed[i] * velocity / curr_speed if curr_speed > speed[i] else velocity
                sim.setAgentPrefVelocity(i, tuple(pref_vel.tolist()))


    states = np.array(trajectories).transpose(1, 0, 2)

    # predictions
    primary_track = states[:, 0, 0:2]
    neighbours_tracks = states[:, 1:, 0:2]

    ## Primary Prediction Only
    if not predict_all:
        neighbours_tracks = []

    # Unimodal Prediction
    multimodal_outputs[0] = primary_track, neighbours_tracks
    return multimodal_outputs
예제 #19
0
import rvo2
import numpy as np
import matplotlib.pyplot as plt

sim = rvo2.PyRVOSimulator(1 / 100., 1, 5, 1.5, 1.5, 0.5, 5)

# Pass either just the position (the other parameters then use
# the default values passed to the PyRVOSimulator constructor),
# or pass all available parameters.
a0 = sim.addAgent((0, 0))
a1 = sim.addAgent((1, 0))
a2 = sim.addAgent((1, 1))
a3 = sim.addAgent((0, 1))

# Obstacles are also supported.
o1 = sim.addObstacle([(0.1, 0.1), (-0.1, 0.1), (-0.1, -0.1), (0.5, 0.5),
                      (0.3, 0.3), (0.2, 0.2), (0.2, 0.5)])
sim.processObstacles()

sim.setAgentPrefVelocity(a0, (5, 5))
sim.setAgentPrefVelocity(a1, (-5, 5))
sim.setAgentPrefVelocity(a2, (-5, -5))
sim.setAgentPrefVelocity(a3, (5, -5))

print('Simulation has %i agents and %i obstacle vertices in it.' %
      (sim.getNumAgents(), sim.getNumObstacleVertices()))

print('Running simulation')

for step in range(80):
    sim.doStep()
예제 #20
0
			counter += 1

	rect = 0.0, 100.0, 400.0, 150.0
	gameDisplay.fill(white)
	pygame.draw.line(gameDisplay, (0,0,255), center_origin((200.0, 31.0)), center_origin((-200.0, 31.0)), 1)
	pygame.draw.line(gameDisplay, (0,0,255), center_origin((200.0, 27.4)), center_origin((-200.0, 27.4)), 1)
	agent(poslist, green)

	pygame.display.update()
	clock.tick()

def center_origin(p):
    return (p[0] + display_width // 2, p[1] + display_height // 2)

#timestep, neighborDist, maxNeighbors, timeHorizon, timeHorizonObst, radius, maxSpeed
sim = rvo2.PyRVOSimulator(time_step, NEIGHBOR_DIST, max_neighbors, time_horizon_obst, time_horizon_obst, radius, max_speed)  

# Pass either just the position (the other parameters then use
# the default values passed to the PyRVOSimulator constructor),
# or pass all available parameters.

#Initialize initial parameters

for i in range(num_agents):
	sim.setAgentDefaults(neighbor_distance, num_neighbors, time_horizon_ORCA , time_horizon_obst_ORCA , radius_ORCA , max_speed_ORCA)

#Network input: own_positon, own_velocity, relative_neighbor_positions, relative_neighbor_velocities, relative_obstacle_position, distance_to_goal, done
#Network output: Velocity

#NETWORK
class critic(tf.keras.Model):
예제 #21
0
    def __init__(self,
                 numAgents=50,
                 scenario="crowd",
                 online_actions=None,
                 visualize=True):

        # ORCA config
        self.timeStep = 1 / 60.
        self.neighborDist = 5
        self.maxNeighbors = 10
        self.timeHorizon = 1.5
        self.radius = 0.5
        self.maxSpeed = 1

        self.sim = rvo2.PyRVOSimulator(timeStep=self.timeStep,
                                       neighborDist=self.neighborDist,
                                       maxNeighbors=self.maxNeighbors,
                                       timeHorizon=self.timeHorizon,
                                       timeHorizonObst=self.timeHorizon,
                                       radius=self.radius,
                                       maxSpeed=self.maxSpeed)

        # ALAN config
        self.default_online_actions = [(1, 0), (0.70711, 0.70711), (0, 1),
                                       (-0.70711, 0.70711), (-1, 0),
                                       (-0.70711, -0.70711), (0, -1),
                                       (0.70711, -0.70711)]
        self.online_actions = self.default_online_actions

        # Update online actions if necessary
        if online_actions is None:
            self.online_actions = self.default_online_actions
        else:
            self.online_actions = online_actions

        self.gamma = 0.6
        self.timewindow = 2
        self.online_temp = 0.2

        # world config
        self.numAgents = numAgents
        self.scenario = scenario

        self.pixelsize = 1000
        self.envsize = self.radius * numAgents

        self.step_count = 0
        self.max_step = int((10 / self.timeStep) * self.numAgents)

        self.agents_done = [0] * self.numAgents
        self.agents_time = [self.max_step * self.timeStep] * self.numAgents

        self._init_world()
        self.visualize = visualize
        if visualize:
            self._init_visworld()
            self.visualize_action_space(self.online_actions)

        # Data collection
        self.min_TTime = 0
        self.TTime = 0

        # Start timer (used for simulation video)
        self.t_start = time.time()
        self.play_speed = 4
예제 #22
0
 def _init_rvo(self, neighborDist=1.5, maxNeighbors=5, timeHorizon=1.5, timeHorizonObst=2):
     self.sim = rvo2.PyRVOSimulator(self.dt, neighborDist, maxNeighbors,
                                    timeHorizon, timeHorizonObst, self.r, self.vmax)
예제 #23
0
    def react(self, robot, sensor_suite, visualize=False):

        range_scan = sensor_suite.range_scan
        #puck_scan = sensor_suite.puck_scan

        # We seem to have to create a new simulator object each time because
        # otherwise it would contain the obstacles from the last time step.
        # If there was a 'removeObstacle' method it would be a bit nicer.
        sim = rvo2.PyRVOSimulator(
            1 / 60.,  # Time step
            1.5,  # neighborDist
            5,  # maxNeighbors
            1.5,  # timeHorizon (other agents)
            1.5,  #2     # timeHorizon (obstacles)
            robot.radius,  # agent radius
            MAX_LINEAR_SPEED)  # agent max speed
        agent = sim.addAgent((0, 0))

        # Add range scan points as obstacles for the RVO simulator
        n = len(range_scan.ranges)
        points = []
        for i in range(0, n):
            rho = range_scan.INNER_RADIUS + range_scan.ranges[i]
            #if not (rho == float('inf') or isnan(rho)):
            theta = range_scan.angles[i]
            points.append((rho * cos(theta), rho * sin(theta)))

        # Add pucks from the puck scan
        #for puck in puck_scan.pucks:
        #    rho = puck.distance
        #    theta = puck.angle
        #    points.append((rho*cos(theta), rho*sin(theta)))

        # Add fake points behind the robot to make it think twice about going
        # backwards.
        #n_fake = 0
        #start_angle = range_scan.ANGLE_MAX
        #stop_angle = range_scan.ANGLE_MIN + 2*pi
        #angle_delta = (stop_angle - start_angle) / (n_fake - 1)
        #for i in range(n_fake):
        #    theta = start_angle + i * angle_delta
        #    rho = 2 * robot.radius
        #    points.append((rho*cos(theta), rho*sin(theta)))
        #    if visualize:
        #        vx,vy = rho*cos(theta), rho*sin(theta)
        #        self.draw_line_from_robot(robot, vx, vy, 0, 0, 255, 1)

        # The scan points will be treated together as a single "negative"
        # obstacle, with vertices specified in CW order.  This requires the
        # following sort.
        points.sort(key=lambda p: -atan2(p[1], p[0]))
        sim.addObstacle(points)
        sim.processObstacles()

        # Get the velocity in the robot reference frame with the clockwise
        # rotation matrix
        cos_theta = cos(robot.body.angle)
        sin_theta = sin(robot.body.angle)
        cur_vx = robot.body.velocity.x * cos_theta + \
                 robot.body.velocity.y * sin_theta
        cur_vy = -robot.body.velocity.x * sin_theta + \
                  robot.body.velocity.y * cos_theta

        # To prevent oscillation we will generally just test the preferred
        # velocities in the immediate neighbourhood (within the pref_vels list)
        # of the preferred velocity chosen last time.
        if self.last_mag < 20:
            # Last time the magnitude of the chosen velocity was very low.
            # Do a full search over the preferred velocities.
            start_index = 0
            stop_index = self.NUMBER_PREF_VELS - 1
        elif self.last_index == 0:
            start_index = 0
            stop_index = 1
        elif self.last_index == len(self.pref_vels) - 1:
            start_index = self.NUMBER_PREF_VELS - 2
            stop_index = self.NUMBER_PREF_VELS - 1
        else:
            # This is the general case.
            start_index = self.last_index - 1
            stop_index = self.last_index + 1

        highest_mag = 0
        chosen_vel = None
        chosen_index = None
        for i in range(start_index, stop_index + 1):
            pref_vel = self.pref_vels[i]

            # Initializing from scratch each time
            sim.setAgentPosition(agent, (0, 0))
            sim.setAgentVelocity(agent, (cur_vx, cur_vy))
            sim.setAgentPrefVelocity(agent, pref_vel)

            for j in range(self.SIM_STEPS):
                sim.doStep()

            (vx, vy) = sim.getAgentVelocity(0)
            #print "vel: {}, {}".format(vx, vy)
            if visualize:
                self.draw_line_from_robot(robot, vx, vy, 255, 255, 255, 3)

            mag = sqrt(vx * vx + vy * vy)
            if mag > highest_mag:
                highest_mag = mag
                chosen_vel = (vx, vy)
                chosen_index = i

        self.last_index = chosen_index
        self.last_mag = highest_mag
        #print "highest_mag: {}".format(highest_mag)

        #chosen_vel = (avg_vx / len(self.pref_vels),
        #              avg_vy / len(self.pref_vels))

        if visualize and chosen_vel != None:
            self.draw_line_from_robot(robot, chosen_vel[0], chosen_vel[1], 255,
                                      0, 127, 5)

        #print "MAX_LINEAR_SPEED: {}".format(MAX_LINEAR_SPEED)
        #print "current_vel: {}, {}".format(cur_vx, cur_vy)
        #print "MAG OF current_vel: {}".format(sqrt(cur_vx**2+ cur_vy**2))
        #print "chosen_vel: {}, {}".format(chosen_vel[0], chosen_vel[1])
        #print "MAG OF chosen_vel: {}".format(sqrt(chosen_vel[0]**2+ chosen_vel[1]**2))

        # Now treat (vx, vy) as the goal and apply the simple control law
        twist = Twist()
        if chosen_vel != None:
            twist.linear = 0.1 * chosen_vel[0]
            twist.angular = 0.02 * chosen_vel[1]
        else:
            print "NO AVAILABLE VELOCITY!"
            #for r in range_scan.ranges:
            #    print r
        return twist
예제 #24
0
import numpy as np
import rvo2
import matplotlib.pyplot as plt
import json
import matplotlib.animation as anim
from matplotlib import patches as mpaths

sim = rvo2.PyRVOSimulator(0.05, 0.8, 5, 1, 0.05, 0.3, 2)

# orderedDict
#jeżeli nie widzi celu idzie kawałek dalej


class Agent:
    def __init__(self, goals, pretime, speed, position):
        self.agent = sim.addAgent(position)
        self.speed = speed
        sim.setAgentMaxSpeed(self.agent, speed)
        self.goals = goals
        self.current_goal = 0
        self.pretime = pretime
        self.velocity = [(1, 1)]

    def get_position(self):
        return sim.getAgentPosition(self.agent)

    def set_velocity(self, goal):
        velocity_real = np.array(goal) - np.array(
            sim.getAgentPosition(self.agent))
        len_velocity_real = np.linalg.norm(velocity_real)
        velocity = (
예제 #25
0
#!/usr/bin/env python3

from __future__ import print_function
import rvo2

sim = rvo2.PyRVOSimulator(1 / 60., 1.5, 5, 1.5, 0.4, 2)

# Pass either just the position (the other parameters then use
# the default values passed to the PyRVOSimulator constructor),
# or pass all available parameters.
a0 = sim.addAgent((0, 0, 0))
a1 = sim.addAgent((1, 0, 0))
a2 = sim.addAgent((1, 1, 1))
a3 = sim.addAgent((0, 1, 2), 1.5, 5, 1.5, 0.4, 2, (0, 0, 0))

sim.setAgentPrefVelocity(a0, (1, 1, 1))
sim.setAgentPrefVelocity(a1, (-1, 1, 1))
sim.setAgentPrefVelocity(a2, (-1, -1, 1))
sim.setAgentPrefVelocity(a3, (1, -1, 1))

print('Simulation has %i agents' % (sim.getNumAgents()))

print('Running simulation')

for step in range(20):
    sim.doStep()

    positions = [
        '(%5.3f, %5.3f, %5.3f)' % sim.getAgentPosition(agent_no)
        for agent_no in (a0, a1, a2, a3)
    ]
예제 #26
0
#!/usr/bin/env python

import rvo2
import numpy as np
import matplotlib.pyplot as plt

sim = rvo2.PyRVOSimulator(1 / 30., 0.1, 5, 1, 1, 0.1, 3)

# Pass either just the position (the other parameters then use
# the default values passed to the PyRVOSimulator constructor),
# or pass all available parameters.
a0 = sim.addAgent((1, 1))
a1 = sim.addAgent((-1, -1))
a2 = sim.addAgent((-1, 1))
a3 = sim.addAgent((1, -1))

# Obstacles are also supported.
#o1 = sim.addObstacle([(-0.1, -0.1),(0.1, 1),(-0.1, 1),(0.2, -1),(-0.5, -0.5),(-0.3, -0.1)])
sim.processObstacles()

sim.setAgentPrefVelocity(a0, (-1, -1))
sim.setAgentPrefVelocity(a1, (1, 1))
sim.setAgentPrefVelocity(a2, (1, -1))
sim.setAgentPrefVelocity(a3, (-1, 1))

plt.ion()

print('Simulation has %i agents and %i obstacle vertices in it.' %
      (sim.getNumAgents(), sim.getNumObstacleVertices()))

print('Running simulation')
예제 #27
0
"""robot0.robotShapeID = canvas.create_oval(0,0,0,0, state = 'hidden', fill = "#df9f3c")
robot1.robotShapeID = canvas.create_oval(0, 0, 0,0, state = 'hidden', fill = "#df7c18")
robot2.robotShapeID = canvas.create_oval(0, 0, 0,0, state = 'hidden', fill = "#c18080")
robot3.robotShapeID = canvas.create_oval(0, 0, 0,0, state = 'hidden', fill = "#e8bb76")"""
robots = [robot0, robot1, robot2,
          robot3]  # the num refers to the ID of the circle created
global robot_pos
afterID = -1
#turn on robot publishers
robot0.publisher = rospy.Publisher("robot0_kiwi", Kiwi, queue_size=10)
robot1.publisher = rospy.Publisher("robot1_kiwi", Kiwi, queue_size=10)
robot2.publisher = rospy.Publisher("robot2_kiwi", Kiwi, queue_size=10)
robot3.publisher = rospy.Publisher("robot3_kiwi", Kiwi, queue_size=10)

#ORCA
sim = rvo2.PyRVOSimulator(1 / 60., 110, 2, 1.5, 50, 100, 100)

#o1 = sim.addObstacle([(0, 0), (800, 0), (800, -5), (0, -5)])
#o2 = sim.addObstacle([(0, 0), (-5, 0), (-5, 600), (0, 600)])
#ssim.processObstacles()

#####END OF GUI SETUP#####
#####
#note2: Need to fix initialization process to consistently run using OpenCV because of the error
#note3: should create a class or struct called robot that contains simulation location,
#real location, whether it's reached it's goal,
#######


#publish to real world
def step(goalx, goaly):
예제 #28
0
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.patches import Ellipse
import numpy as np
import rvo2

sim = rvo2.PyRVOSimulator(0.07, 1.5, 5, 1, 0.5, 0.55, 2)
#1/60. time step-ile sekund na dany krok [0.05/0.1]s
#neither distance - ile wcześniej zaczyna reagować by zmieniać kurs [1/1.5]m
#liczba agentów z którymi ma się minąć [5]
#time horizon-ograniczony stożek widoczności (zmienia już kurs) agent agent [1]s
#time horizon obstacle agent przeszkoda [0.5]s (jak najmniejszy aby agent podchodził blisko ściany) jeden krok czasowy
#local movement (nie ominie ściany, trzeba dodać global coś)
#radious(wielkość agenta,(koło))
#max speed długość wektora
#velocity-prędkość początkowa[tupla] wektor jednostkowy

#QueryVisibility- może się przełączyć na inny punkt gdy już go zobaczy

#gdzie agent, gdzie jego punkt, znormalizować i do biblioteki, norma-max speed

a0 = sim.addAgent((-5, 0))
a1 = sim.addAgent((2, 0))
a2 = sim.addAgent((0, 5))
a3 = sim.addAgent((0, -5))
a4 = sim.addAgent((-1, 3))

o1 = sim.addObstacle([(0.1, -8),(0.1,8)])
o2 = sim.addObstacle([(-0.5,2),(2,2)])
sim.processObstacles()
예제 #29
0
import rvo2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anim
from matplotlib import patches as mpaths

sim = rvo2.PyRVOSimulator(0.1, 1.5, 5, 1.5, 2, .4, 2)

# Pass either just the position (the other parameters then use
# the default values passed to the PyRVOSimulator constructor),
# or pass all available parameters.
a0_start = (0, 0)
a0_goal = (10, 0)
a1_start = (10, 0)
a1_goal = (-10, 0)
a0 = sim.addAgent(a0_start)
a1 = sim.addAgent(a1_start)
#a2 = sim.addAgent((1, 1))
#a3 = sim.addAgent((0, 1), 1.5, 5, 1.5, 2, 0.4, 2, (0, 0))

# Obstacles are also supported.
#o1 = sim.addObstacle([(0.5, 0.025), (0.5, -0.025), (0.65, 0.025), (0.65, -0.025)])
#sim.processObstacles()

sim.setAgentPrefVelocity(a0, a0_goal)
sim.setAgentPrefVelocity(a1, a1_goal)
#sim.setAgentPrefVelocity(a2, (-1, -1))
#sim.setAgentPrefVelocity(a3, (1, -1))

print('Simulation has %i agents and %i obstacle vertices in it.' %
      (sim.getNumAgents(), sim.getNumObstacleVertices()))
예제 #30
0
def generate_orca_trajectory(sim_scene,
                             num_ped,
                             min_dist=3,
                             react_time=1.5,
                             end_range=1.0):
    """ Simulating Scenario using ORCA """
    ## Default: (1 / 60., 1.5, 5, 1.5, 2, 0.4, 2)
    sampling_rate = 1

    ##Initiliaze simulators & scenes
    if sim_scene == 'two_ped':
        sim = rvo2.PyRVOSimulator(1 / 2.5, min_dist, 10, react_time, 2, 0.4, 2)
        trajectories, _, goals, speed = overfit_initialize(num_ped, sim)

    ## Circle Overfit
    elif sim_scene == 'circle_overfit':
        sim = rvo2.PyRVOSimulator(1 / 2.5, 2, 10, 2, 2, 0.4, 1.2)
        trajectories, _, goals, speed = overfit_initialize_circle(num_ped, sim)

    ## Circle Crossing
    elif sim_scene == 'circle_crossing':
        fps = 20
        sampling_rate = fps / 2.5
        sim = rvo2.PyRVOSimulator(1 / fps, 10, 10, 5, 5, 0.3, 1)
        # sim = rvo2.PyRVOSimulator(1/fps, 4, 10, 4, 5, 0.6, 1.5) ## (TrajNet++)
        trajectories, _, goals, speed = generate_circle_crossing(num_ped, sim)

    ## Square Crossing
    elif sim_scene == 'square_crossing':
        fps = 5
        sampling_rate = fps / 2.5
        sim = rvo2.PyRVOSimulator(1 / fps, 10, 10, 5, 5, 0.3, 1)
        trajectories, _, goals, speed = generate_square_crossing(num_ped, sim)

    else:
        raise NotImplementedError

    # run
    done = False
    reaching_goal_by_ped = [False] * num_ped
    count = 0

    ##Simulate a scene
    while not done and count < 6000:
        sim.doStep()
        reaching_goal = []
        for i in range(num_ped):
            if count == 0:
                trajectories[i].pop(0)
            position = sim.getAgentPosition(i)

            ## Append only if Goal not reached
            if not reaching_goal_by_ped[i]:
                if count % sampling_rate == 0:
                    trajectories[i].append(position)

            # check if this agent reaches the goal
            if np.linalg.norm(np.array(position) -
                              np.array(goals[i])) < end_range:
                reaching_goal.append(True)
                sim.setAgentPrefVelocity(i, (0, 0))
                reaching_goal_by_ped[i] = True
            else:
                reaching_goal.append(False)
                velocity = np.array(
                    (goals[i][0] - position[0], goals[i][1] - position[1]))
                speed = np.linalg.norm(velocity)
                pref_vel = 1 * velocity / speed if speed > 1 else velocity
                sim.setAgentPrefVelocity(i, tuple(pref_vel.tolist()))
        count += 1
        done = all(reaching_goal)

    return trajectories, count