def render_video(self, config_idx, folder): viewer = MPViewer(params=ParameterServer(), center=[375, 0], enforce_x_length=True, x_length=100.0, use_world_bounds=True) video_exporter = VideoRenderer(renderer=viewer, world_step_time=0.2) super().visualize(viewer=video_exporter, configs_idx_list=[config_idx], \ real_time_factor=10, fontsize=6) video_exporter.export_video(filename=os.path.join(folder,"video"), \ remove_image_dir = True)
def test_python_behavior_model(self): # World Definition scenario_param_file = "macro_actions_test.json" # must be within examples params folder params = ParameterServer(filename=os.path.join( os.path.dirname(__file__), "params/", scenario_param_file)) world = World(params) # Define two behavior models one python one standard c++ model behavior_model = PythonDistanceBehavior(params) execution_model = ExecutionModelInterpolate(params) dynamic_model = SingleTrackModel(params) behavior_model2 = BehaviorConstantAcceleration(params) execution_model2 = ExecutionModelInterpolate(params) dynamic_model2 = SingleTrackModel(params) # Define the map interface and load a testing map map_interface = MapInterface() xodr_map = MakeXodrMapOneRoadTwoLanes() map_interface.SetOpenDriveMap(xodr_map) world.SetMap(map_interface) # Define the agent shapes agent_2d_shape = CarRectangle() init_state = np.array([0, 3, -5.25, 0, 20]) # Define the goal definition for agents center_line = Line2d() center_line.AddPoint(Point2d(0.0, -1.75)) center_line.AddPoint(Point2d(100.0, -1.75)) max_lateral_dist = (0.4, 0.5) max_orientation_diff = (0.08, 0.1) velocity_range = (5.0, 20.0) goal_definition = GoalDefinitionStateLimitsFrenet( center_line, max_lateral_dist, max_orientation_diff, velocity_range) # define two agents with the different behavior models agent_params = params.AddChild("agent1") agent = Agent(init_state, behavior_model, dynamic_model, execution_model, agent_2d_shape, agent_params, goal_definition, map_interface) world.AddAgent(agent) init_state2 = np.array([0, 25, -5.25, 0, 15]) agent2 = Agent(init_state2, behavior_model2, dynamic_model2, execution_model2, agent_2d_shape, agent_params, goal_definition, map_interface) world.AddAgent(agent2) # viewer viewer = MPViewer(params=params, use_world_bounds=True) # World Simulation sim_step_time = params["simulation"]["step_time", "Step-time in simulation", 0.2] sim_real_time_factor = params["simulation"][ "real_time_factor", "execution in real-time or faster", 1] # Draw map video_renderer = VideoRenderer(renderer=viewer, world_step_time=sim_step_time) for _ in range(0, 20): world.Step(sim_step_time) viewer.clear() video_renderer.drawWorld(world) video_renderer.drawGoalDefinition(goal_definition, "red", 0.5, "red") time.sleep(sim_step_time / sim_real_time_factor) video_renderer.export_video(filename="./test_video_intermediate", remove_image_dir=True)
def main(): print("Experiment server at :", os.getcwd()) args = configure_args() #load exp params exp_dir = args.output_dir params_filename = glob.glob(os.path.join(exp_dir, "params_[!behavior]*")) params = ParameterServer(filename=params_filename[0]) params.load(fn=params_filename[0]) params["ML"]["BaseAgent"]["SummaryPath"] = os.path.join(exp_dir, "agent/summaries") params["ML"]["BaseAgent"]["CheckpointPath"] = os.path.join(exp_dir, "agent/checkpoints") splits = 8 behavior_params_filename = glob.glob(os.path.join(exp_dir, "behavior_params*")) if behavior_params_filename: params_behavior = ParameterServer(filename=behavior_params_filename[0]) else: params_behavior = ParameterServer(filename="configuration/params/1D_desired_gap_no_prior.json") behavior_space = configure_behavior_space(params_behavior) hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(split=splits) observer = BeliefObserver(params, hypothesis_set, splits=splits) behavior = BehaviorDiscreteMacroActionsML(params_behavior) evaluator = GoalReached(params) viewer = MPViewer(params=params, x_range=[-35, 35], y_range=[-35, 35], follow_agent_id=True) # database creation dir_prefix = "" dbs = DatabaseSerializer(test_scenarios=2, test_world_steps=2, num_serialize_scenarios=10) dbs.process(os.path.join(dir_prefix, "configuration/database"), filter_sets="**/**/interaction_merging_light_dense_1D.json") local_release_filename = dbs.release(version="test") db = BenchmarkDatabase(database_root=local_release_filename) scenario_generator, _, _ = db.get_scenario_generator(0) video_renderer = VideoRenderer(renderer=viewer, world_step_time=0.2) env = HyDiscreteHighway(params=params, scenario_generation=scenario_generator, behavior=behavior, evaluator=evaluator, observer=observer, viewer=video_renderer, render=is_local) # non-agent evaluation mode num_steps = 100 num_samples = params_behavior["BehaviorSpace"]["Hypothesis"]["BehaviorHypothesisIDM"]["NumSamples"] print("Steps, samples, splits", num_steps, num_samples, splits) step = 1 env.reset() threshold = observer.is_enabled_threshold discretize = observer.is_discretize beliefs_df = pd.DataFrame(columns=["Step", "Action", "Agent", "Beliefs", "HyNum"]) beliefs_orig_df = pd.DataFrame(columns=["Step", "Action", "Agent", "Beliefs", "HyNum"]) while step <= num_steps: action = 5 #np.random.randint(0, behavior.action_space.n) next_state, reward, done, info = env.step(action) for agent, beliefs in observer.beliefs.items(): beliefs = np.asarray(beliefs) oring = deepcopy(beliefs) for i, belief in enumerate(oring): beliefs_orig_df = beliefs_orig_df.append({"Step": step, "Action": action, "Agent": agent, "Beliefs": belief, "HyNum": i}, ignore_index=True) if discretize: beliefs = observer.discretize_beliefs(beliefs) if threshold: beliefs = observer.threshold_beliefs(beliefs) for i, belief in enumerate(beliefs): beliefs_df = beliefs_df.append({"Step": step, "Action": action, "Agent": agent, "Beliefs": belief, "HyNum": i}, ignore_index=True) step += 1 suffix = "switch" if threshold: suffix += "_threshold" if discretize: suffix += "_discretize" beliefs_data_filename = "beliefs_{}_{}_{}".format(splits, num_samples, num_steps) beliefs_data_filename += suffix print(beliefs_data_filename) beliefs_df.to_pickle(os.path.join(str(Path.home()), "master_thesis/code/hythe-src/beliefs_data/", beliefs_data_filename)) beliefs_data_filename = "orig_beliefs_{}_{}_{}".format(splits, num_samples, num_steps) beliefs_data_filename += suffix print(beliefs_data_filename) beliefs_orig_df.to_pickle(os.path.join(str(Path.home()), "master_thesis/code/hythe-src/beliefs_data/", beliefs_data_filename)) video_filename = os.path.join(str(Path.home()), "master_thesis/code/hythe-src/beliefs_data/", "video_{}".format(num_samples)) print(video_filename) video_filename += suffix print(video_filename) video_renderer.export_video(filename=video_filename) return
def test_uct_single_agent(self): try: from bark.core.models.behavior import BehaviorUCTSingleAgentMacroActions except: print("Rerun with --define planner_uct=true") return # World Definition scenario_param_file = "macro_actions_test.json" # must be within examples params folder params = ParameterServer(filename=os.path.join( os.path.dirname(__file__), "params/", scenario_param_file)) world = World(params) # Model Definitions behavior_model = BehaviorUCTSingleAgentMacroActions(params) execution_model = ExecutionModelInterpolate(params) dynamic_model = SingleTrackModel(params) behavior_model2 = BehaviorConstantAcceleration(params) execution_model2 = ExecutionModelInterpolate(params) dynamic_model2 = SingleTrackModel(params) # Map Definition map_interface = MapInterface() xodr_map = MakeXodrMapOneRoadTwoLanes() map_interface.SetOpenDriveMap(xodr_map) world.SetMap(map_interface) # agent_2d_shape = CarLimousine() agent_2d_shape = CarRectangle() init_state = np.array([0, 3, -5.25, 0, 20]) agent_params = params.AddChild("agent1") # goal_polygon = Polygon2d( # [1, 1, 0], [Point2d(0, 0), Point2d(0, 2), Point2d(2, 2), Point2d(2, 0)]) # goal_definition = GoalDefinitionPolygon(goal_polygon) # goal_polygon = goal_polygon.Translate(Point2d(90, -2)) center_line = Line2d() center_line.AddPoint(Point2d(0.0, -1.75)) center_line.AddPoint(Point2d(100.0, -1.75)) max_lateral_dist = (0.4, 0.5) max_orientation_diff = (0.08, 0.1) velocity_range = (5.0, 20.0) goal_definition = GoalDefinitionStateLimitsFrenet( center_line, max_lateral_dist, max_orientation_diff, velocity_range) agent = Agent(init_state, behavior_model, dynamic_model, execution_model, agent_2d_shape, agent_params, goal_definition, map_interface) world.AddAgent(agent) init_state2 = np.array([0, 25, -5.25, 0, 0]) agent2 = Agent(init_state2, behavior_model2, dynamic_model2, execution_model2, agent_2d_shape, agent_params, goal_definition, map_interface) world.AddAgent(agent2) # viewer viewer = MPViewer(params=params, use_world_bounds=True) # World Simulation sim_step_time = params["simulation"]["step_time", "Step-time in simulation", 0.2] sim_real_time_factor = params["simulation"][ "real_time_factor", "execution in real-time or faster", 1] # Draw map video_renderer = VideoRenderer(renderer=viewer, world_step_time=sim_step_time) for _ in range(0, 5): world.Step(sim_step_time) viewer.clear() video_renderer.drawWorld(world) video_renderer.drawGoalDefinition(goal_definition) time.sleep(sim_step_time / sim_real_time_factor) video_renderer.export_video(filename="./test_video_intermediate", remove_image_dir=True)