Ejemplo n.º 1
0
def run_configuration(argv):
    # params = ParameterServer(filename="examples/example_params/tfa_params.json")
    params = ParameterServer()
    # NOTE: Modify these paths in order to save the checkpoints and summaries
    params["ML"]["BehaviorTFAAgents"][
        "CheckpointPath"] = "/Users/hart/Development/bark-ml/checkpoints/"
    params["ML"]["TFARunner"][
        "SummaryPath"] = "/Users/hart/Development/bark-ml/summaries/"
    params["World"]["remove_agents_out_of_map"] = True

    # create environment
    bp = ContinuousMergingBlueprint(params,
                                    number_of_senarios=2500,
                                    random_seed=0)
    env = SingleAgentRuntime(blueprint=bp, render=False)

    # PPO-agent
    # ppo_agent = BehaviorPPOAgent(environment=env,
    #                              params=params)
    # env.ml_behavior = ppo_agent
    # runner = PPORunner(params=params,
    #                    environment=env,
    #                    agent=ppo_agent)

    # SAC-agent
    sac_agent = BehaviorSACAgent(environment=env, params=params)
    env.ml_behavior = sac_agent
    runner = SACRunner(params=params, environment=env, agent=sac_agent)
    if FLAGS.mode == "train":
        runner.SetupSummaryWriter()
        runner.Train()
    elif FLAGS.mode == "visualize":
        runner.Visualize(5)
Ejemplo n.º 2
0
def prepare_agent(agent, params, env):
  env.ml_behavior = agent
  runner = SACRunner(params=params, environment=env, agent=agent)
  
  iterator = iter(agent._dataset)
  runner._collection_driver.run()
  experience, _ = next(iterator)
  agent._agent.train(experience)
Ejemplo n.º 3
0
def run_rl_example(env, agent, params, mode="visualize"):
  env.ml_behavior = agent
  runner = SACRunner(params=params,
                     environment=env,
                     agent=agent)

  if mode == "train":
    runner.SetupSummaryWriter()
    runner.Train()
  elif mode == "visualize":
    runner.Visualize(5)
  elif mode == "evaluate":
    runner.Evaluate()
Ejemplo n.º 4
0
def run_rl_example(env, agent, params, mode="visualize"):
  env.ml_behavior = agent
  runner = SACRunner(params=params,
                     environment=env,
                     agent=agent)

  if mode == "train":
    runner.SetupSummaryWriter()
    runner.Train()
  elif mode == "visualize":
    runner.Run(num_episodes=10, render=True)
  elif mode == "evaluate":
    runner.Run(num_episodes=10, render=False)
    
Ejemplo n.º 5
0
def run_configuration(argv):
    # Uncomment one of the following default parameter filename definitions,
    # depending on which GNN library you'd like to use.

    # File with standard parameters for tf2_gnn use:
    # param_filename = "examples/example_params/tfa_sac_gnn_tf2_gnn_default.json"

    # File with standard parameters for spektral use:
    param_filename = "examples/example_params/tfa_sac_gnn_spektral_default.json"
    params = ParameterServer(filename=param_filename)

    # NOTE: Modify these paths to specify your preferred path for checkpoints and summaries
    # params["ML"]["BehaviorTFAAgents"]["CheckpointPath"] = "YOUR_PATH"
    # params["ML"]["TFARunner"]["SummaryPath"] = "YOUR_PATH"

    #viewer = MPViewer(
    #  params=params,
    #  x_range=[-35, 35],
    #  y_range=[-35, 35],
    #  follow_agent_id=True)

    #viewer = VideoRenderer(
    #  renderer=viewer,
    #  world_step_time=0.2,
    #  fig_path="/your_path_here/training/video/")

    # create environment
    bp = ContinuousHighwayBlueprint(params,
                                    number_of_senarios=2500,
                                    random_seed=0)

    observer = GraphObserver(params=params)

    env = SingleAgentRuntime(blueprint=bp, observer=observer, render=False)

    sac_agent = BehaviorGraphSACAgent(environment=env,
                                      observer=observer,
                                      params=params)
    env.ml_behavior = sac_agent
    runner = SACRunner(params=params, environment=env, agent=sac_agent)

    if FLAGS.mode == "train":
        runner.SetupSummaryWriter()
        runner.Train()
    elif FLAGS.mode == "visualize":
        runner.Visualize(5)
    elif FLAGS.mode == "evaluate":
        runner.Evaluate()
Ejemplo n.º 6
0
def run_configuration(argv):
    params = ParameterServer(
        filename="examples/example_params/tfa_params.json")
    # params = ParameterServer()
    # NOTE: Modify these paths in order to save the checkpoints and summaries
    # params["ML"]["BehaviorTFAAgents"]["CheckpointPath"] = "/Users/hart/Development/bark-ml/checkpoints_merging_nn/"
    # params["ML"]["TFARunner"]["SummaryPath"] = "/Users/hart/Development/bark-ml/checkpoints_merging_nn/"
    params["Visualization"]["Agents"]["Alpha"]["Other"] = 0.2
    params["Visualization"]["Agents"]["Alpha"]["Controlled"] = 0.2
    params["Visualization"]["Agents"]["Alpha"]["Controlled"] = 0.2
    params["ML"]["VisualizeCfWorlds"] = False
    params["ML"]["VisualizeCfHeatmap"] = True
    params["World"]["remove_agents_out_of_map"] = False

    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)

    # create environment
    bp = ContinuousMergingBlueprint(params, num_scenarios=10000, random_seed=0)
    env = SingleAgentRuntime(blueprint=bp, render=False, viewer=viewer)

    # PPO-agent
    # ppo_agent = BehaviorPPOAgent(environment=env,
    #                              params=params)
    # env.ml_behavior = ppo_agent
    # runner = PPORunner(params=params,
    #                    environment=env,
    #                    agent=ppo_agent)

    # SAC-agent
    sac_agent = BehaviorSACAgent(environment=env, params=params)
    env.ml_behavior = sac_agent
    runner = SACRunner(params=params, environment=env, agent=sac_agent)
    if FLAGS.mode == "train":
        runner.SetupSummaryWriter()
        runner.Train()
    elif FLAGS.mode == "visualize":
        runner.Run(num_episodes=50, render=True)
    elif FLAGS.mode == "evaluate":
        runner.Run(num_episodes=100, render=False)
Ejemplo n.º 7
0
def run_configuration(argv):
    params = ParameterServer()

    # NOTE: Modify these paths to specify your preferred path for checkpoints and summaries
    # params["ML"]["BehaviorTFAAgents"]["CheckpointPath"] = "/Users/hart/Development/bark-ml/checkpoints_merge_spektral_att2/"
    # params["ML"]["TFARunner"]["SummaryPath"] = "/Users/hart/Development/bark-ml/checkpoints_merge_spektral_att2/"

    #viewer = MPViewer(
    #  params=params,
    #  x_range=[-35, 35],
    #  y_range=[-35, 35],
    #  follow_agent_id=True)
    #viewer = VideoRenderer(
    #  renderer=viewer,
    #  world_step_time=0.2,
    #  fig_path="/your_path_here/training/video/")

    # create environment
    bp = ContinuousMergingBlueprint(params, num_scenarios=2500, random_seed=0)

    observer = GraphObserver(params=params)

    env = SingleAgentRuntime(blueprint=bp, observer=observer, render=False)
    sac_agent = BehaviorGraphSACAgent(environment=env,
                                      observer=observer,
                                      params=params,
                                      init_gnn='init_interaction_network')
    env.ml_behavior = sac_agent
    runner = SACRunner(params=params, environment=env, agent=sac_agent)

    if FLAGS.mode == "train":
        runner.SetupSummaryWriter()
        runner.Train()
    elif FLAGS.mode == "visualize":
        runner.Run(num_episodes=10, render=True)
    elif FLAGS.mode == "evaluate":
        runner.Run(num_episodes=250, render=False)
Ejemplo n.º 8
0
from barkscape.server.base_server import BaseServer
from barkscape.server.base_handler import BaseHandler
from barkscape.server.runners.bark_ml_runner_runner import BARKMLRunnerRunner


def load_exp_runner(file_name):
  return ExperimentRunner(
    json_file=file_name,
    mode=None,
    random_seed=0)


if __name__ == "__main__":
  # load experiment
  exp_runner_gnn = load_exp_runner(
    "/Users/hart/Development/bark-ml/experiments/configs/phd/01_hyperparams/gnns/merging_large_embedding.json")
  runtime = exp_runner_gnn._experiment._runtime
  
  # set buffered viewer
  viewer = BufferedViewer()
  runtime._viewer = viewer
  runner = SACRunner(params=exp_runner_gnn._params,
                     environment=runtime,
                     agent=exp_runner_gnn._experiment._agent)

  # run-stuff
  logger = logging.getLogger()
  bark_server = BaseServer(
    runner=BARKMLRunnerRunner, runnable_object=runner, logger=logger)
  bark_server.Start()
Ejemplo n.º 9
0
def run_configuration(argv):
  params = ParameterServer()
  # NOTE: Modify these paths to specify your preferred path for checkpoints and summaries
  # params["ML"]["BehaviorTFAAgents"]["CheckpointPath"] = "/Users/hart/Development/bark-ml/checkpoints/"
  # params["ML"]["TFARunner"]["SummaryPath"] = "/Users/hart/Development/bark-ml/checkpoints/"
  params["Visualization"]["Agents"]["Alpha"]["Other"] = 0.2
  params["Visualization"]["Agents"]["Alpha"]["Controlled"] = 0.2
  params["Visualization"]["Agents"]["Alpha"]["Controlled"] = 0.2
  params["ML"]["VisualizeCfWorlds"] = False
  params["ML"]["VisualizeCfHeatmap"] = False
  # params["ML"]["ResultsFolder"] = "/Users/hart/Development/bark-ml/results/data/"

  # viewer = MPViewer(
  #   params=params,
  #   x_range=[-35, 35],
  #   y_range=[-35, 35],
  #   follow_agent_id=True)


  # create environment
  bp = ContinuousMergingBlueprint(params,
                                  num_scenarios=2500,
                                  random_seed=0)

  observer = GraphObserver(params=params)

  behavior_model_pool = []
  for count, a in enumerate([-5., 0., 5.]):
    local_params = params.AddChild("local_"+str(count))
    local_params["BehaviorConstantAcceleration"]["ConstAcceleration"] = a
    behavior = BehaviorConstantAcceleration(local_params)
    behavior_model_pool.append(behavior)

  env = CounterfactualRuntime(
    blueprint=bp,
    observer=observer,
    render=False,
    params=params,
    behavior_model_pool=behavior_model_pool)
  sac_agent = BehaviorGraphSACAgent(environment=env,
                                    observer=observer,
                                    params=params)
  env.ml_behavior = sac_agent
  runner = SACRunner(params=params,
                     environment=env,
                     agent=sac_agent)

  if FLAGS.mode == "train":
    runner.SetupSummaryWriter()
    runner.Train()
  elif FLAGS.mode == "visualize":
    runner._environment._max_col_rate = 0.
    runner.Run(num_episodes=1, render=True)
  elif FLAGS.mode == "evaluate":
    for cr in np.arange(0, 1, 0.1):
      runner._environment._max_col_rate = cr
      runner.Run(num_episodes=250, render=False, max_col_rate=cr)
    runner._environment._tracer.Save(
      params["ML"]["ResultsFolder"] + "evaluation_results_runtime.pckl")
    goal_reached = runner._tracer.success_rate
    runner._tracer.Save(
      params["ML"]["ResultsFolder"] + "evaluation_results_runner.pckl")