def preprocess_data(rgb, gt, seg, w=256, h=160, crop_w=0, crop_h=0, resize_only_rgb=False): crop_top = np.floor((rgb.shape[0] - crop_h) / 2).astype(np.uint8) crop_bottom = rgb.shape[0] - np.floor( (rgb.shape[0] - crop_h) / 2).astype(np.uint8) crop_left = np.floor((rgb.shape[1] - crop_w) / 2).astype(np.uint8) crop_right = rgb.shape[1] - np.floor( (rgb.shape[1] - crop_w) / 2).astype(np.uint8) rgb = np.asarray(rgb, dtype=np.float32) / 255. rgb = cv2.resize(rgb, (w, h), cv2.INTER_LINEAR) rgb = np.expand_dims(rgb, 0) gt = np.asarray(gt, dtype=np.float32) if not resize_only_rgb: gt = cv2.resize(gt, (w, h), cv2.INTER_NEAREST) gt = EvaluationUtils.depth_to_meters_airsim(gt) if not resize_only_rgb: seg = cv2.resize(seg, (w, h), cv2.INTER_NEAREST) return rgb, gt, seg
def preprocess_data(rgb, gt, seg, w=256, h=160, crop_w=0, crop_h=0, resize_only_rgb = False): crop_top = np.floor((rgb.shape[0] - crop_h) / 2).astype(np.uint8) crop_bottom = rgb.shape[0] - np.floor((rgb.shape[0] - crop_h) / 2).astype(np.uint8) crop_left = np.floor((rgb.shape[1] - crop_w) / 2).astype(np.uint8) crop_right = rgb.shape[1] - np.floor((rgb.shape[1] - crop_w) / 2).astype(np.uint8) rgb = np.asarray(rgb, dtype=np.float32) / 255. rgb = cv2.resize(rgb, (w, h), cv2.INTER_LINEAR) rgb = np.expand_dims(rgb, 0) gt = np.asarray(gt, dtype=np.float32) if not resize_only_rgb: gt = cv2.resize(gt, (w, h), cv2.INTER_NEAREST) gt = EvaluationUtils.depth_to_meters_airsim(gt) if not resize_only_rgb: seg = cv2.resize(seg, (w, h), cv2.INTER_NEAREST) return rgb, gt, seg
if not resize_only_rgb: gt = cv2.resize(gt, (w, h), cv2.INTER_NEAREST) gt = EvaluationUtils.depth_to_meters_airsim(gt) if not resize_only_rgb: seg = cv2.resize(seg, (w, h), cv2.INTER_NEAREST) return rgb, gt, seg #edit config.py as required config, unparsed = get_config() #Edit model_name to choose model between ['jmod2','cadena','detector','depth','eigen'] model_name = 'jmod2' model, detector_only = EvaluationUtils.load_model(model_name, config) showImages = False dataset_main_dir = config.data_set_dir test_dirs = config.data_test_dirs #compute_depth_branch_stats_on_obs is set to False when evaluating detector-only models jmod2_stats = JMOD2Stats(model_name, compute_depth_branch_stats_on_obs=not detector_only) for test_dir in test_dirs: depth_gt_paths = sorted( glob(os.path.join(dataset_main_dir, test_dir, 'depth', '*' + '.png'))) rgb_paths = sorted( glob(os.path.join(dataset_main_dir, test_dir, 'rgb', '*' + '.png')))
gt = np.asarray(gt, dtype=np.float32) if not resize_only_rgb: gt = cv2.resize(gt, (w, h), cv2.INTER_NEAREST) gt = EvaluationUtils.depth_to_meters_airsim(gt) if not resize_only_rgb: seg = cv2.resize(seg, (w, h), cv2.INTER_NEAREST) return rgb, gt, seg #edit config.py as required config, unparsed = get_config() #Edit model_name to choose model between ['jmod2','cadena','detector','depth','eigen'] model_name = 'jmod2' model, detector_only = EvaluationUtils.load_model(model_name, config) showImages = False dataset_main_dir = config.data_set_dir test_dirs = config.data_test_dirs #compute_depth_branch_stats_on_obs is set to False when evaluating detector-only models jmod2_stats = JMOD2Stats(model_name, compute_depth_branch_stats_on_obs=not detector_only) for test_dir in test_dirs: depth_gt_paths = sorted(glob(os.path.join(dataset_main_dir, test_dir, 'depth', '*' + '.png'))) rgb_paths = sorted(glob(os.path.join(dataset_main_dir, test_dir, 'rgb', '*' + '.png'))) seg_paths = sorted(glob(os.path.join(dataset_main_dir, test_dir, 'segmentation', '*' + '.png')))
int(parsed_obs[0]), 6] = parsed_obs[6] * 0.1 # m obstacles_label[int(parsed_obs[1]), int(parsed_obs[0]), 7] = parsed_obs[7] * 0.1 # v labels = np.reshape(obstacles_label, (40, 8)) return labels #edit config.py as required config, unparsed = get_config() #Edit model_name to choose model between ['jmod2','cadena','detector','depth','eigen'] model_name = 'odl' number_classes = config.number_classes model, detector_only = EvaluationUtils.load_model(model_name, config, number_classes) showImages = True dataset_main_dir = config.data_set_dir test_dirs = config.data_test_dirs #compute_depth_branch_stats_on_obs is set to False when evaluating detector-only models jmod2_stats = JMOD2Stats(model_name, compute_depth_branch_stats_on_obs=not detector_only) i = 0 confMatrix = False true_obs = [] pred_obs = []
import numpy as np import lib.EvaluationUtils as EvaluationUtils from config import get_config from lib.Evaluators import JMOD2Stats config, unparsed = get_config() #Edit model_name to choose model between ['jmod2','cadena','detector','depth','eigen'] model_name = 'jmod2' model, detector_only = EvaluationUtils.load_model(model_name, config) #Download the file from dataset_file_path = "data/zurich_test_set/zurich_forest_dataset_with_obs_label.npy" dataset = np.load(dataset_file_path).item() len_data = len(dataset['images']) showImages = True #compute_depth_branch_stats_on_obs is set to False when evaluating detector-only models stats = JMOD2Stats(model_name, compute_depth_branch_stats_on_obs=not detector_only) for i in range(len_data): rgb = dataset['images'][i] gt = dataset['depth'][i] list_obstacles = dataset['obstacles'][i] results = model.run(rgb) corr_depth = None
import lib.EvaluationUtils as EvaluationUtils import h5py import numpy as np from lib.Evaluators import JMOD2Stats from config import get_config config, unparsed = get_config() #Edit model_name to choose model between ['jmod2','cadena','detector','depth','eigen'] model_name = 'jmod2' model, detector_only= EvaluationUtils.load_model(model_name, config) #Download and zip the dataset from main_data_dir = "data/zurich_test_set" hdf5_file = main_data_dir + "/seq_%02d.h5" showImages = True #compute_depth_branch_stats_on_obs is set to False when evaluating detector-only models stats = JMOD2Stats(model_name, compute_depth_branch_stats_on_obs=not detector_only) for k in range(0,4): zurich_data = h5py.File(hdf5_file%k,'r') images_t = np.transpose(np.asarray(zurich_data['data'], dtype=np.float32)[:,:,:,:],[0,2,3,1]) images = np.zeros(shape=(images_t.shape[0],images_t.shape[1],images_t.shape[2],3)) #gray to RGB images[:,:,:,0] = images_t[:,:,:,0] images[:,:,:,1] = images_t[:,:,:,0] images[:,:,:,2] = images_t[:,:,:,0]
import numpy as np import lib.EvaluationUtils as EvaluationUtils from config import get_config from lib.Evaluators import JMOD2Stats config, unparsed = get_config() #Edit model_name to choose model between ['jmod2','cadena','detector','depth','eigen'] model_name = 'jmod2' model, detector_only = EvaluationUtils.load_model(model_name, config) #Download the file from dataset_file_path = "data/zurich_test_set/zurich_forest_dataset_with_obs_label.npy" dataset = np.load(dataset_file_path).item() len_data = len(dataset['images']) showImages = True #compute_depth_branch_stats_on_obs is set to False when evaluating detector-only models stats = JMOD2Stats(model_name, compute_depth_branch_stats_on_obs=not detector_only) for i in range(len_data): rgb = dataset['images'][i] gt = dataset['depth'][i] list_obstacles = dataset['obstacles'][i] results = model.run(rgb)
def run(self, input, resize=False): if len(input.shape) == 4: img = cv2.resize(input[0, :, :, :], (self.config.input_width, self.config.input_height), cv2.INTER_LINEAR) input = np.zeros(shape=(1, img.shape[0], img.shape[1], img.shape[2])) input[0, :, :, :] = img r_chan = input[:, :, :, 2] g_chan = input[:, :, :, 1] b_chan = input[:, :, :, 0] if len(input.shape) == 3: img = cv2.resize(input, (self.config.input_width, self.config.input_height), cv2.INTER_LINEAR) input = np.zeros(shape=(1, img.shape[0], img.shape[1], img.shape[2])) input[0, :, :, :] = img r_chan = input[:, :, :, 2] g_chan = input[:, :, :, 1] b_chan = input[:, :, :, 0] elif len(input.shape) == 2: input = cv2.resize(input, (self.config.input_width, self.config.input_height), cv2.INTER_LINEAR) input = np.expand_dims(input, 0) r_chan = input g_chan = input b_chan = input # caso 3 depth = np.zeros_like(r_chan) seg = np.zeros_like(r_chan) r_chan = np.expand_dims(r_chan, axis=3).astype(np.float32) g_chan = np.expand_dims(g_chan, axis=3).astype(np.float32) b_chan = np.expand_dims(b_chan, axis=3).astype(np.float32) depth = np.expand_dims(depth, axis=3).astype(np.float32) seg = np.expand_dims(seg, axis=3).astype(np.float32) net_input = [depth, r_chan, g_chan, b_chan, seg] t0 = time.time() net_output = self.model.predict(net_input, batch_size=1) # [0] pred_depth = net_output[0] * 39.75 segm = net_output[4] if self.config.cadena_resize_out: resized_depth = np.zeros(shape=(1,160,256,1)) resized_segm = np.zeros(shape=(1, 160, 256, 1)) resized_depth[0,:,:,0] = cv2.resize(pred_depth[0,:,:,0], (256, 160), cv2.INTER_NEAREST) resized_segm[0,:,:,0] = cv2.resize(segm[0,:,:,0], (256, 160), cv2.INTER_NEAREST) pred_depth = resized_depth segm = resized_segm pred_obs = EvaluationUtils.get_obstacles_from_seg_and_depth(pred_depth, segm, segm_thr=-1, is_gt=True) #correction_factor = self.compute_correction_factor(pred_depth, pred_obs) #corrected_depth = np.array(pred_depth) * correction_factor print ("Elapsed time: {}").format(time.time() - t0) return [pred_depth, pred_obs, None]
plt.show() plt.clf() if __name__ == "__main__": import lib.EvaluationUtils as EvaluationUtils from config import get_config # edit config.py as required config, unparsed = get_config() # Edit model_name to choose model between ['jmod2','cadena','detector','depth','eigen'] model_name = 'odl' number_classes = config.number_classes model, detector_only = EvaluationUtils.load_model(model_name, config, number_classes) showImages = True vrep_connection = VrepConnection("127.0.0.1", 25000, force_finish_comm=False) image_getter = ImageGetter(vrep_connection, "NAO_vision1") depth_getter = ImageGetter(vrep_connection, "NAO_vision3") pose_getter = PoseGetter(vrep_connection, "NAO") obstacle_interpreter = ObstacleInterpreter(number_classes=number_classes, correct_depth=False) particle_map = ParticleMap(objects=np.array([[900, 430, 2, -1], [900, 170, 2, 1], [900, 300, 2, 0], [900, 250, 1, 0]]),