JSONFILE = 'database/sample_benchmark2.json' environments_dict_base = [ 'WetSunset_route00024', 'SoftRainSunset_route00000', 'WetNoon_route00024' ] params = { 'save_dataset': True, 'docker_name': 'carlalatest:latest', 'gpu': 5, 'batch_size': 1, 'remove_wrong_data': False, 'non_rendering_mode': False, 'carla_recording': False # TODO testing } agent = NPCAgent() AGENT_NAME = 'NPCAgent' # The episodes to be checked must always be sequential def check_folder(env_name, number_episodes): """ Check if the folder contain the expected number of episodes and if they are complete. """ path = os.path.join(os.environ["SRL_DATASET_PATH"], 'sample_benchmark2', env_name) # List number of folders check if match expected environments_count = 0 for filename in os.listdir(path):
params = { 'save_dataset': True, 'save_sensors': True, 'save_trajectories': True, 'save_opponents': True, 'save_opp_trajectories': False, 'docker_name': 'carlalatest:latest', 'gpu': 0, 'batch_size': 1, 'remove_wrong_data': False, 'non_rendering_mode': False, 'carla_recording': False } # The idea is that the agent class should be completely independent agent = NPCAgent(sensors_dict=[]) # this could be joined env_batch = CEXP( json, params=params, execute_all=True, ignore_previous_execution=True, port=arguments.port) # THe experience is built, the files necessary # to load CARLA and the scenarios are made # Here some docker was set env_batch.start() for env in env_batch: try: # The policy selected to run this experience vector # (The class basically) This policy can also learn, just
} # The idea is that the agent class should be completely independent agent = NPCAgent(sensors_dict=[{ 'type': 'sensor.camera.rgb', 'x': 2.0, 'y': 0.0, 'z': 1.40, 'roll': 0.0, 'pitch': -15.0, 'yaw': 0.0, 'width': 800, 'height': 600, 'fov': 100, 'id': 'rgb' }, { 'type': 'sensor.camera.rgb', 'x': 2.0, 'y': 0.0, 'z': 15.40, 'roll': 0.0, 'pitch': -30.0, 'yaw': 0.0, 'width': 800, 'height': 600, 'fov': 120, 'id': 'rgb_view' }], noise=True) # this could be joined env_batch = CEXP( json,
def collect_data(json_file, params, eliminated_environments, collector_id): # The idea is that the agent class should be completely independent # TODO this has to go to a separate file and to be merged with package agent = NPCAgent( sensors_dict = [{'type': 'sensor.camera.rgb', 'x': 2.0, 'y': 0.0, 'z': 1.40, 'roll': 0.0, 'pitch': -15.0, 'yaw': 0.0, 'width': 800, 'height': 600, 'fov': 100, 'id': 'rgb_central'}, {'type': 'sensor.camera.semantic_segmentation', 'x': 2.0, 'y': 0.0, 'z': 1.40, 'roll': 0.0, 'pitch': -15.0, 'yaw': 0.0, 'width': 800, 'height': 600, 'fov': 100, 'id': 'labels_central'}, {'type': 'sensor.camera.rgb', 'x': 2.0, 'y': 0.0, 'z': 1.40, 'roll': 0.0, 'pitch': -15.0, 'yaw': -30.0, 'width': 800, 'height': 600, 'fov': 100, 'id': 'rgb_left'}, {'type': 'sensor.camera.semantic_segmentation', 'x': 2.0, 'y': 0.0, 'z': 1.40, 'roll': 0.0, 'pitch': -15.0, 'yaw': -30.0, 'width': 800, 'height': 600, 'fov': 100, 'id': 'labels_left'}, {'type': 'sensor.camera.rgb', 'x': 2.0, 'y': 0.0, 'z': 1.40, 'roll': 0.0, 'pitch': -15.0, 'yaw': 30.0, 'width': 800, 'height': 600, 'fov': 100, 'id': 'rgb_right'}, {'type': 'sensor.camera.semantic_segmentation', 'x': 2.0, 'y': 0.0, 'z': 1.40, 'roll': 0.0, 'pitch': -15.0, 'yaw': 30.0, 'width': 800, 'height': 600, 'fov': 100, 'id': 'labels_right'}, {'type': 'sensor.can_bus', 'reading_frequency': 25, 'id': 'can_bus' }, {'type': 'sensor.other.gnss', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'id': 'GPS'} ]) # this could be joined env_batch = CEXP(json_file, params=params, execute_all=True, eliminated_environments=eliminated_environments) # THe experience is built, the files necessary # to load CARLA and the scenarios are made # Here some docker was set NPCAgent._name = 'Multi' env_batch.start(agent_name=NPCAgent._name) for env in env_batch: try: # The policy selected to run this experience vector (The class basically) This policy can also learn, just # by taking the output from the experience. # I need a mechanism to test the rewards so I can test the policy gradient strategy print (" Collector ", collector_id, " Collecting for ", env) states, rewards = agent.unroll(env) agent.reinforce(rewards) except KeyboardInterrupt: env.stop() break except: traceback.print_exc() # Just try again agent.reset() env.stop() print(" ENVIRONMENT BROKE trying again.") env_batch.cleanup() logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
# A single loop being made json_file = 'database/sample_benchmark.json' # Dictionary with the necessary params related to the execution not the model itself. params = { 'save_dataset': False, 'docker_name': 'carlalatest:latest', 'gpu': 0, 'batch_size': 1, 'remove_wrong_data': False, 'non_rendering_mode': False, 'carla_recording': True } # TODO for now batch size is one number_of_iterations = 10 # The idea is that the agent class should be completely independent agent = NPCAgent() env_batch = CEXP( json_file, params, number_of_iterations, params['batch_size'], sequential=True, debug=True) # THe experience is built, the files necessary # to load CARLA and the scenarios are made # Here some docker was set env_batch.start() for env in env_batch: states, rewards = agent.unroll(env) # if the agent is already un
'batch_size': 1, 'remove_wrong_data': False, 'non_rendering_mode': True, 'carla_recording': False } # TODO for now batch size is one number_of_iterations = 400 # The idea is that the agent class should be completely independent agent = NPCAgent(sensors_dict=[{ 'type': 'sensor.camera.rgb', 'x': 2.0, 'y': 0.0, 'z': 1.40, 'roll': 0.0, 'pitch': -15.0, 'yaw': 0.0, 'width': 1800, 'height': 1200, 'fov': 100, 'id': 'rgb_central' }]) # this could be joined env_batch = CEXP( json, params=params, execute_all=True, port=arguments.port) # THe experience is built, the files necessary # to load CARLA and the scenarios are made # Here some docker was set env_batch.start() for env in env_batch: