def drive(host, port, gpu_number, path, show_screen, resolution, noise_type, config_path, type_of_driver, experiment_name, city_name, game, drivers_name): print "port:", port use_planner = False screen_manager = ScreenManager() if game == "Carla": from carla_recorder import Recorder if type_of_driver == "Human": from carla_human import CarlaHuman driver = CarlaHuman( use_planner, 'drive_interfaces/carla_interface/' + city_name + '.txt', 'drive_interfaces/carla_interface/' + city_name + '.png', augment_left_right=False) else: from carla_machine import CarlaMachine driver = CarlaMachine("0",experiment_name,use_planner,'drive_interfaces/carla_interface/' + city_name + '.txt',\ 'drive_interfaces/carla_interface/' + city_name + '.png',augment_left_right=False) '''if drive_config.interface == "VirtualElektra": from carla_recorder import Recorder if drive_config.type_of_driver == "Human": from virtual_elektra_human import VirtualElektraHuman driver = VirtualElektraHuman(drive_config) else: from virtual_elektra_carla_machine import VirtualElektraMachine driver = VirtualElektraMachine("0",experiment_name,drive_config,memory_use)''' noiser = Noiser(noise_type) print host print port driver.start(host, port, config_path, resolution) first_time = True if show_screen: screen_manager.start_screen(resolution, 3, 2) folder_name = str(datetime.datetime.today( ).day) + '_' + 'Carla_' + type_of_driver + '_' + experiment_name folder_name += '_' + str(get_latest_file_number(path, folder_name)) recorder = Recorder(path + folder_name + '/', 88, 200) #Note: resolution size is 400,300. but we give input to network 200,100 by cropping it. direction = 2 old_speed = 0 #the speed we start the car with iteration = 0 try: while direction != -1: #which never happens capture_time = time.time() direction_time = time.time() rewards, image = driver.get_sensor_data() for event in pygame.event.get(): # User did something if event.type == pygame.QUIT: # If user clicked close done = True # Flag that we are done so we exit this loop recording = driver.get_recording() action, new_speed, human_intervention = driver.compute_action( old_speed, rewards, image ) #passing rewards so that finally carla speed = computed speed #depending on driver being human or machine, new_speed can be the one given by driver or the network resp. action_noisy, drifting_time, will_drift = noiser.compute_noise( action) if recording: recorder.record(image, rewards, action, action_noisy, human_intervention) if show_screen: if game == "Carla": #print len(image) screen_manager.plot_driving_interface(capture_time,np.copy(image),\ action,action_noisy,recording and (drifting_time == 0.0 or will_drift),\ drifting_time,will_drift,rewards.speed,new_speed,0,0,0,type_of_driver, driver.continous_steer, human_intervention) # else: dist_to_goal = math.sqrt( (rewards.goal[0] - rewards.position[0]) * (rewards.goal[0] - rewards.position[0]) + (rewards.goal[1] - rewards.position[1]) * (rewards.goal[1] - rewards.position[1])) image = image[:, :, ::-1] screen_manager.plot_driving_interface( capture_time,np.copy(image), action,action_noisy,\ rewards.direction,recording and (drifting_time == 0.0 or will_drift),drifting_time,will_drift\ ,rewards.speed,0,0,None,rewards.reseted,driver.get_number_completions(),dist_to_goal,0) # iteration += 1 old_speed = new_speed driver.act(action_noisy) except: traceback.print_exc() finally: #driver.write_performance_file(path,folder_name,iteration) pygame.quit() if type_of_driver == "Machine": print "Machine:", driver.machine_driving_count print "Human:", driver.human_driving_count autonomy = (float(driver.machine_driving_count) / float(driver.machine_driving_count + driver.human_driving_count)) * 100 print("Autonomy: {0:.2f}%".format(autonomy)) print "Machine checkpoint score:", driver.checkpoint_score driver.tester.plot_map()
positions_to_test = range(7, 15) #total hdf5 files path = '../Desktop/VirtualElektraData2_W1/SeqTrain/' screen = ScreenManager() image_queue = deque() speed_list = [] steer_list = [] noisy_steer_list = [] actions_queue = deque() screen.start_screen([200, 88], 1, 2) images = np.array([200, 88, 3]) actions = Control() for h_num in positions_to_test: print " SEQUENCE NUMBER ", h_num '''if h_num == 50: continue''' data = h5py.File(path + 'data_' + str(h_num).zfill(5) + '.h5', "r") # skip to highway for i in range(0, 200): #every hdf5 files containg data for 200 images
# positions_to_test += range(i-1,i+2) positions_to_test = range(0, len( [name for name in os.listdir(path) if os.path.isfile(os.path.join(path, name))])) #positions_to_test = range(10 * 3, 33 * 3) screen = ScreenManager() image_queue = deque() actions_queue = deque() # Start a screen to show everything. The way we work is that we do IMAGES x Sensor. # But maybe a more arbitrary configuration may be useful screen.start_screen([resolution[0], resolution[1]], [1, 1], 2) ts = [] images = [np.zeros([resolution[1], resolution[0], 3])] * sensors['RGB'] labels = [np.zeros([resolution[1], resolution[0], 1])] * sensors['labels'] depths = [np.zeros([resolution[1], resolution[0], 3])] * sensors['depth'] steer_gt_order = [0] * 3 steer_pred1_order = [0] * 3 steer_pred2_order = [0] * 3 steer_pred1_vec = [] steer_pred2_vec = [] steer_gt_vec = [] actions = [Control()] * sensors['RGB'] actions_noise = [Control()] * sensors['RGB']
def drive_elektra( experiment_name, drive_config, input_method, name=None, memory_use=1.0, ): driver, recorder = get_instance(drive_config, experiment_name, name, memory_use, input_method) noiser = Noiser(drive_config.noise) #print 'before starting' driver.start() first_time = True if drive_config.show_screen: screen_manager = ScreenManager() screen_manager.start_screen(drive_config.resolution, drive_config.number_screens, drive_config.scale_factor) driver.use_planner = False old_speed = 0 #the speed we start the car with direction = 2 iteration = 0 try: while direction != -1: capture_time = time.time() images = driver.get_sensor_data() for event in pygame.event.get(): # User did something if event.type == pygame.QUIT: # If user clicked close done = True # Flag that we are done so we exit this loop recording = driver.get_recording( ) #just booleans, received from joystick driver.get_reset() #just booleans, received from joystick action, new_speed, human_intervention = driver.compute_action( images, old_speed) #rewards.speed #action_noisy,drifting_time,will_drift = noiser.compute_noise(action[drive_config.middle_camera]) action_noisy, drifting_time, will_drift = noiser.compute_noise( action) if recording: print "RECORDING" recorder.record(images, action.speed, action.steer, action_noisy.steer, human_intervention) '''if drive_config.show_screen: if drive_config.interface == "Carla": for i in range(drive_config.number_screens-1): screen_manager.plot_driving_interface( capture_time,np.copy(images[i]),\ action[i],action_noisy,driver.compute_direction((rewards.player_x,rewards.player_y,22),(rewards.ori_x,rewards.ori_y,rewards.ori_z)),recording and (drifting_time == 0.0 or will_drift),\ drifting_time,will_drift,rewards.speed,0,0,i) # else: print "Not supported interface" pass''' '''if drive_config.type_of_driver == "Machine" and drive_config.show_screen and drive_config.plot_vbp: image_vbp =driver.compute_perception_activations(images[drive_config.middle_camera],rewards.speed) screen_manager.plot_image(image_vbp,1)''' iteration += 1 old_speed = new_speed driver.act(action_noisy) except: traceback.print_exc() finally: driver.end() #driver.write_performance_file(path,folder_name,iteration) pygame.quit()
# positions_to_test = range(0,660) # positions_to_test = [617,618,619,620,622,623,624,636,637,638,639] # positions_to_test = [637,638] # positions_to_test = [55,108,109,353,410,411,426,441,442] # positions_to_test = [656,657,675,676,854,855,859,860,861,902] path = '/media/matthias/7E0CF8640CF818BB/Github/Desktop/2_SegData_1/' screen = ScreenManager() image_queue = deque() speed_list = [] actions_queue = deque() speed_list_noise = [] just_noise = [] screen.start_screen([800, 600], 1, 1) ts = [] images = [np.array([800, 600, 3]), np.array([800, 600, 3]), np.array([800, 600, 3])] # 200x88 actions = [Control(), Control(), Control()] actions_noise = [Control(), Control(), Control()] for h_num in positions_to_test: print(" SEQUENCE NUMBER ", h_num) data = h5py.File(path + 'data_' + str(h_num).zfill(5) + '.h5', "r") # redata = h5py.File('/media/adas/012B4138528FF294/NewGTA/redata_'+ str(h_num).zfill(5) +'.h5', "r") # print log.keys()
# positions_to_test = [] # for i in initial_positions: # positions_to_test += range(i-1,i+2) positions_to_test = list(range(h5start, h5end + 1)) screen = ScreenManager() image_queue = deque() actions_queue = deque() # Start a screen to show everything. The way we work is that we do IMAGES x Sensor. # But maybe a more arbitrary configuration may be useful screen.start_screen([resolution[0] * 2, resolution[1] * 2], [sensors['RGB'], 2], 1) ts = [] images = [np.zeros([resolution[1], resolution[0], 3])] * sensors['RGB'] labels = [np.zeros([resolution[1], resolution[0], 1])] * sensors['labels'] depths = [np.zeros([resolution[1], resolution[0], 3])] * sensors['depth'] actions = [Control()] * sensors['RGB'] actions_noise = [Control()] * sensors['RGB'] for h_num in positions_to_test: print(" SEQUENCE NUMBER ", h_num) try: data = h5py.File(path + 'data_' + str(h_num).zfill(5) + '.h5', "r") except Exception as e: print(e)