Exemplo n.º 1
0
        sumo_headless=True,
        seed=seed,
        # envision_record_data_replay_path="./data_replay",
    )

    for episode in episodes(n=num_episodes):
        agent = agent_spec.build_agent()
        observations = env.reset()
        episode.record_scenario(env.scenario_log)

        dones = {"__all__": False}
        while not dones["__all__"]:
            agent_obs = observations[AGENT_ID]
            agent_action = agent.act(agent_obs)
            observations, rewards, dones, infos = env.step({AGENT_ID: agent_action})
            episode.record_step(observations, rewards, dones, infos)

    env.close()


if __name__ == "__main__":
    parser = default_argument_parser("single-agent-example")
    args = parser.parse_args()

    main(
        scenarios=args.scenarios,
        headless=args.headless,
        num_episodes=args.episodes,
        seed=args.seed,
    )
Exemplo n.º 2
0
        seed=seed,
    )

    for episode in episodes(n=num_episodes):
        agent = agent_spec.build_agent()
        observations = env.reset()
        episode.record_scenario(env.scenario_log)

        dones = {"__all__": False}
        while not dones["__all__"]:
            agent_obs = observations[AGENT_ID]
            agent_action = agent.act(agent_obs)
            observations, rewards, dones, infos = env.step(
                {AGENT_ID: agent_action})
            episode.record_step(observations, rewards, dones, infos)

    env.close()


if __name__ == "__main__":
    parser = default_argument_parser("human-in-the-loop-example")
    args = parser.parse_args()

    main(
        scenarios=args.scenarios,
        sim_name=args.sim_name,
        headless=args.headless,
        num_episodes=args.episodes,
        seed=args.seed,
    )
Exemplo n.º 3
0
    for episode in episodes(n=num_episodes):
        agent = open_agent_spec.build_agent()

        observations = env.reset()
        episode.record_scenario(env.scenario_log)

        dones = {"__all__": False}
        while not dones["__all__"]:
            agent_obs = observations[AGENT_ID]
            agent_action = agent.act(agent_obs)
            observations, rewards, dones, infos = env.step(
                {AGENT_ID: agent_action})
            episode.record_step(observations, rewards, dones, infos)

        del agent

    env.close()


if __name__ == "__main__":
    parser = default_argument_parser("OpEn-trajectory-optimizer-example")
    args = parser.parse_args()

    main(
        scenarios=args.scenarios,
        sim_name=args.sim_name,
        headless=args.headless,
        num_episodes=args.episodes,
        seed=args.seed,
    )
            # required: get traffic_history_provider and set time offset
            traffic_history_provider = smarts.get_provider_by_type(
                TrafficHistoryProvider)
            assert traffic_history_provider
            traffic_history_provider.set_start_time(mission.start_time)

            modified_mission = replace(mission, start_time=0.0)
            scenario.set_ego_missions({agent_id: modified_mission})
            observations = smarts.reset(scenario)

            dones = {agent_id: False}
            while not dones[agent_id]:
                agent_obs = observations[agent_id]
                agent_action = agent.act(agent_obs)

                observations, rewards, dones, infos = smarts.step(
                    {agent_id: agent_action})

    smarts.destroy()


if __name__ == "__main__":
    parser = default_argument_parser("history-vehicles-replacement-example")
    args = parser.parse_args()

    main(
        scenarios=args.scenarios,
        headless=args.headless,
        seed=args.seed,
    )
    smarts = SMARTS(
        agent_interfaces={},
        traffic_sim=SumoTrafficSimulation(headless=True, auto_start=True),
        envision=Envision(),
    )
    scenarios_iterator = Scenario.scenario_variations(
        scenarios,
        list([]),
    )

    smarts.reset(next(scenarios_iterator))

    for _ in range(5000):
        smarts.step({})
        smarts.attach_sensors_to_vehicles(
            agent_spec, smarts.vehicle_index.social_vehicle_ids())
        obs, _, _, _ = smarts.observe_from(
            smarts.vehicle_index.social_vehicle_ids())
        # TODO: save observations for imitation learning


if __name__ == "__main__":
    parser = default_argument_parser("observation-collection-example")
    args = parser.parse_args()

    main(
        scenarios=args.scenarios,
        headless=args.headless,
        seed=args.seed,
    )
Exemplo n.º 6
0
        sumo_headless=True,
        visdom=False,
        seed=seed,
        timestep_sec=0.1,
    )

    if max_episode_steps is None:
        max_episode_steps = 1000

    for episode in episodes(n=num_episodes):
        env.reset()
        episode.record_scenario(env.scenario_log)

        for _ in range(max_episode_steps):
            env.step({})
            episode.record_step({}, {}, {}, {})

    env.close()


if __name__ == "__main__":
    parser = default_argument_parser("egoless-example")
    args = parser.parse_args()

    main(
        scenarios=args.scenarios,
        headless=args.headless,
        num_episodes=args.episodes,
        seed=args.seed,
    )
Exemplo n.º 7
0
        agents = {
            agent_id: agent_spec.build_agent()
            for agent_id, agent_spec in agent_specs.items()
        }
        observations = env.reset()
        episode.record_scenario(env.scenario_log)

        dones = {"__all__": False}
        while not dones["__all__"]:
            actions = {
                agent_id: agents[agent_id].act(agent_obs)
                for agent_id, agent_obs in observations.items()
            }

            observations, rewards, dones, infos = env.step(actions)
            episode.record_step(observations, rewards, dones, infos)

    env.close()


if __name__ == "__main__":
    parser = default_argument_parser("multi-agent-example")
    args = parser.parse_args()

    main(
        scenarios=args.scenarios,
        headless=args.headless,
        num_episodes=args.episodes,
        seed=args.seed,
    )
Exemplo n.º 8
0
    seed,
):
    ray.init()
    ray.wait([
        train.remote(
            training_scenarios,
            evaluation_scenarios,
            headless,
            num_episodes,
            seed,
        )
    ])


if __name__ == "__main__":
    parser = default_argument_parser("pytorch-example")
    parser.add_argument(
        "--evaluation-scenario",
        default="scenarios/loop",
        help="The scenario to use for evaluation.",
        type=str,
    )
    args = parser.parse_args()

    main(
        training_scenarios=args.scenarios,
        evaluation_scenarios=[args.evaluation_scenario],
        headless=args.headless,
        num_episodes=args.num_episodes,
        seed=args.seed,
    )
Exemplo n.º 9
0
        # envision_record_data_replay_path="./data_replay",
    )

    for episode in episodes(n=num_episodes):
        agent = agent_spec.build_agent()
        observations = env.reset()
        episode.record_scenario(env.scenario_log)

        dones = {"__all__": False}
        while not dones["__all__"]:
            agent_obs = observations[AGENT_ID]
            agent_action = agent.act(agent_obs)
            observations, rewards, dones, infos = env.step(
                {AGENT_ID: agent_action})
            episode.record_step(observations, rewards, dones, infos)

    env.close()


if __name__ == "__main__":
    parser = default_argument_parser("trajectory-tracking-agent-example")
    args = parser.parse_args()

    main(
        scenarios=args.scenarios,
        sim_name=args.sim_name,
        headless=args.headless,
        num_episodes=args.episodes,
        seed=args.seed,
    )
Exemplo n.º 10
0
    # )
    parser.add_argument("--headless",
                        default=True,
                        action="store_true",
                        help="Turn on headless mode")
    parser.add_argument("--seed",
                        type=int,
                        default=1234,
                        help="Seed for scenario generation")
    # parser.add_argument(
    #     "--log_dir",
    #     default="./log/results",
    #     type=str,
    #     help="Path to store RLlib log and checkpoints, default is ./log/results",
    # )
    # parser.add_argument("--config_file", "-f", type=str, required=True)
    # parser.add_argument("--restore_path", type=str, default=None)
    # parser.add_argument("--num_workers", type=int, default=1, help="RLlib num workers")
    # parser.add_argument("--cluster", action="store_true")
    # parser.add_argument(
    #     "--num_episodes", type=int, default=1000, help="num of episode"
    # )

    return parser.parse_args()


if __name__ == "__main__":
    parser = default_argument_parser("Benchmark server")
    args = parser.parse_args()

    main(scenarios=args.scenarios, headless=args.headless, seed=args.seed)
                if episode.sim_time - episode_sim_time_frame_with_visible_object > 0.5:
                    break
            else:
                episode_sim_time_frame_with_visible_object = episode.sim_time

        # remove scenes less than 6 seconds
        if episode.sim_time - episode_sim_time_epoch < 9.99:
            rm(f"{OUTPUT_DIR}/frames/scene-{scene_idx:04d}/")
            rm(f'{OUTPUT_DIR}/annotations/scene-{scene_idx:04d}_instances_ann.csv'
               )
            rm(f'{OUTPUT_DIR}/ego_poses/scene-{scene_idx:04d}_ego_pose.csv')

        time.sleep(2)
        if scene_idx >= end_scene_idx:
            break

    env.close()


if __name__ == "__main__":
    parser = default_argument_parser("data-collector-agent")
    args = parser.parse_args()

    main(
        scenarios=args.scenarios,
        sim_name=args.sim_name,
        headless=args.headless,
        num_episodes=args.episodes,
        seed=args.seed,
    )