Esempio n. 1
0
    def test_exception_handling(self):
        logger = setup_logging(level=logging.DEBUG)

        velocity = np.array([np.inf, np.inf, np.inf])
        path = np.array([[0, 0], [0, 1], [0, 2]])
        trajectory = VelocityTrajectory(path, velocity)

        smoother = VelocitySmoother()
        smoother.load_trajectory(trajectory)
        sol, X, V = smoother.smooth_velocity(trajectory.pathlength,
                                             trajectory.velocity)

        assert all(trajectory.velocity == V)
Esempio n. 2
0
from igp2.goal import PointGoal
from igp2.opendrive.map import Map
from igp2.opendrive.plot_map import plot_map
from igp2.planlibrary.maneuver import Maneuver
from igp2.recognition.astar import AStar
from igp2.recognition.goalprobabilities import GoalsProbabilities
from igp2.recognition.goalrecognition import GoalRecognition
from igp2.velocitysmoother import VelocitySmoother

# This script showcases how to run goal recognition on the InD Dataset

SCENARIO = "round"
PLOT = True

if __name__ == '__main__':
    setup_logging()
    logger = logging.getLogger(__name__)
    colors = "brgyk"
    scenario_map = Map.parse_from_opendrive(f"scenarios/maps/{SCENARIO}.xodr")
    data_loader = InDDataLoader(f"scenarios/configs/{SCENARIO}.json",
                                ["valid"])
    data_loader.load()
    goals = np.array(data_loader.scenario.config.goals)
    goals = [PointGoal(center, 1.5) for center in goals]

    cost_factors = {
        "time": 0.001,
        "velocity": 0.0,
        "acceleration": 0.0,
        "jerk": 0.,
        "heading": 10,
Esempio n. 3
0
                               n=10,
                               amax_m_s2=5,
                               lambda_acc=10)
goal_recognition = ip.GoalRecognition(astar=astar,
                                      smoother=smoother,
                                      scenario_map=scenario_map,
                                      cost=cost,
                                      reward_as_difference=True,
                                      n_trajectories=2)
mcts = ip.MCTS(scenario_map,
               n_simulations=5,
               max_depth=7,
               store_results='final')

if __name__ == '__main__':
    ip.setup_logging()
    seed = 0
    np.random.seed(seed)
    random.seed(seed)
    try:
        goal_probabilities = pickle.load(open("preds.p", "rb"))
    except:
        for agent_id in frame:
            logger.info(f"Running prediction for Agent {agent_id}")
            goal_recognition.update_goals_probabilities(
                goal_probabilities[agent_id],
                ip.VelocityTrajectory.from_agent_state(frame[agent_id]),
                agent_id, frame, frame, None)
        pickle.dump(goal_probabilities, open("preds.p", "wb"))

    mcts.search(ego_id, goals[ego_goal_id], frame,
Esempio n. 4
0
cost = Cost(factors=cost_factors)
smoother = VelocitySmoother(vmin_m_s=1,
                            vmax_m_s=10,
                            n=10,
                            amax_m_s2=5,
                            lambda_acc=10)
goal_recognition = GoalRecognition(astar=astar,
                                   smoother=smoother,
                                   scenario_map=scenario_map,
                                   cost=cost,
                                   reward_as_difference=True,
                                   n_trajectories=2)
#mcts = MCTS(scenario_map)

if __name__ == '__main__':
    logger = setup_logging(level=logging.INFO)

    try:
        goal_probabilities = pickle.load(open("preds.py", "rb"))
    except:
        for agent_id in frame:
            logger.info(f"Running prediction for Agent {agent_id}")
            goal_recognition.update_goals_probabilities(
                goal_probabilities[agent_id],
                VelocityTrajectory.from_agent_state(frame[agent_id]), agent_id,
                frame, frame, None)
        pickle.dump(goal_probabilities, open("preds.py", "wb"))

    ego_id = 2
    simulator = Simulator(ego_id,
                          frame,