def d8n_read_all_images(filename, t0=None, t1=None): """ Raises a ValueError if not data could be read. Returns a numpy array. data = d8n_read_all_images(bag) print data.shape # (928,) print data.dtype # [('timestamp', '<f8'), ('rgb', 'u1', (480, 640, 3))] """ import rosbag # @UnresolvedImport filename = expand_environment(filename) if not os.path.exists(filename): msg = 'File does not exist: %r' % filename raise ValueError(msg) bag = rosbag.Bag(filename) that_topic = get_image_topic(bag) data = [] first_timestamp = None with rosbag.Bag(filename, 'r') as bag: for j, (topic, msg, t) in enumerate(bag.read_messages()): if topic == that_topic: float_time = t.to_sec() if first_timestamp is None: first_timestamp = float_time rel_time = float_time - first_timestamp if t0 is not None: if rel_time < t0: continue if t1 is not None: if rel_time > t1: continue rgb = numpy_from_ros_compressed(msg) data.append({'timestamp': float_time, 'rgb': rgb}) if j % 10 == 0: print('Read %d images from topic %s' % (j, topic)) print('Returned %d images' % len(data)) if not data: raise ValueError('no data found') H, W, _ = rgb.shape # (480, 640, 3) print('Detected image shape: %s x %s' % (W, H)) n = len(data) dtype = [ ('timestamp', 'float'), ('rgb', 'uint8', (H, W, 3)), ] x = np.zeros((n, ), dtype=dtype) for i, v in enumerate(data): x[i]['timestamp'] = v['timestamp'] x[i]['rgb'][:] = v['rgb'] return x
def d8n_read_all_images(filename, t0=None, t1=None): """ Raises a ValueError if not data could be read. Returns a numpy array. data = d8n_read_all_images(bag) print data.shape # (928,) print data.dtype # [('timestamp', '<f8'), ('rgb', 'u1', (480, 640, 3))] """ import rosbag # @UnresolvedImport filename = expand_environment(filename) if not os.path.exists(filename): msg = 'File does not exist: %r' % filename raise ValueError(msg) bag = rosbag.Bag(filename) that_topic = get_image_topic(bag) data = [] first_timestamp = None with rosbag.Bag(filename, 'r') as bag: for j, (topic, msg, t) in enumerate(bag.read_messages()): if topic == that_topic: float_time = t.to_sec() if first_timestamp is None: first_timestamp = float_time rel_time = float_time - first_timestamp if t0 is not None: if rel_time < t0: continue if t1 is not None: if rel_time > t1: continue rgb = numpy_from_ros_compressed(msg) data.append({'timestamp': float_time, 'rgb': rgb}) if j % 10 == 0: print('Read %d images from topic %s' % (j, topic)) print('Returned %d images' % len(data)) if not data: raise ValueError('no data found') H, W, _ = rgb.shape # (480, 640, 3) print('Detected image shape: %s x %s' % (W, H)) n = len(data) dtype = [ ('timestamp', 'float'), ('rgb', 'uint8', (H, W, 3)), ] x = np.zeros((n,), dtype=dtype) for i, v in enumerate(data): x[i]['timestamp'] = v['timestamp'] x[i]['rgb'][:] = v['rgb'] return x
def read_file(filename): import cv2 from duckietown_utils.expand_variables import expand_environment filename = expand_environment(filename) img = cv2.imread(filename) if img is None: msg = 'Cannot read filename %r.' % filename raise ValueError(msg) return img
def check_environment_variables(): vs = { "DUCKIETOWN_ROOT": """ DUCKIETOWN_ROOT should be set. """, "DUCKIETOWN_DATA": """ The environment variable DUCKIETOWN_DATA must either: 1) be set to "n/a" 2) point to an existing path corresponding to Dropbox/duckietown-data. (containing a subdirectory 'logs') """, "VEHICLE_NAME": "The environment variable VEHICLE_NAME must be the name of your robot \n" " (if you are on the robot). Please add this line to ~/.bashrc: \n" " \n" " export VEHICLE_NAME=<your vehicle name>\n", } # Only check this if we are on the robot if not on_duckiebot(): del vs["VEHICLE_NAME"] # do not check DUCKIETOWN_DATA on robot if on_duckiebot(): del vs["DUCKIETOWN_DATA"] errors = [] for v in vs: if not v in os.environ: e = "Environment variable %r not defined." % v errors.append(e + "\n" + vs[v]) if not on_duckiebot(): if "DUCKIETOWN_DATA" in os.environ: path = os.environ["DUCKIETOWN_DATA"] if path != "n/a": f = expand_environment(path) logs = os.path.join(f, "logs") if not os.path.exists(f) or not os.path.exists(logs): e = vs["DUCKIETOWN_DATA"] errors.append(e) if errors: raise Exception("\n---\n".join(errors))
def anti_instagram_annotations_test(): base = "${DUCKIETOWN_DATA}/phase3-misc-files/so1/" base = expand_environment(base) dirs = locate_files(base, "*.iids1", alsodirs=True) directory_results = {} overall_results = [] if not dirs: raise ValueError("No IIDS1 directories") for d in dirs: import getpass uname = getpass.getuser() out = os.path.join(os.path.dirname(d), uname, os.path.basename(d) + ".v") if not os.path.exists(out): os.makedirs(out) results = examine_dataset(d, out) overall_results = merge_comparison_results(results, overall_results) directory_results[d] = results db = shelve.open("tests_results", flag="n") db["directory_results"] = directory_results db["overall_results"] = overall_results db.close() print ("overall average error: %f" % (overall_results["total_error"] / overall_results["total_pixels"])) print ("overall regions checked: %f" % (overall_results["total_regions"])) for t in overall_results["v_vals"].keys(): print ( "region %f: RGB %f,%f,%f, HSV %f,%f,%f" % ( t, np.mean(overall_results["r_vals"][t]), np.mean(overall_results["g_vals"][t]), np.mean(overall_results["b_vals"][t]), np.mean(overall_results["h_vals"][t]), np.mean(overall_results["s_vals"][t]), np.mean(overall_results["v_vals"][t]), ) ) ipython_if_guy()
def anti_instagram_annotations_test(): base = "${DUCKIETOWN_DATA}/phase3-misc-files/so1/" base = expand_environment(base) dirs = locate_files(base, '*.iids1', alsodirs=True) directory_results = {} overall_results = [] if not dirs: raise ValueError('No IIDS1 directories') for d in dirs: import getpass uname = getpass.getuser() out = os.path.join(os.path.dirname(d), uname, os.path.basename(d) + '.v') if not os.path.exists(out): os.makedirs(out) results = examine_dataset(d, out) overall_results = merge_comparison_results(results, overall_results) directory_results[d] = results db = shelve.open('tests_results', flag='n') db['directory_results'] = directory_results db['overall_results'] = overall_results db.close() print("overall average error: %f" % (overall_results['total_error'] / overall_results['total_pixels'])) print("overall regions checked: %f" % (overall_results['total_regions'])) for t in overall_results['v_vals'].keys(): print("region %f: RGB %f,%f,%f, HSV %f,%f,%f" % (t, np.mean(overall_results['r_vals'][t]), np.mean(overall_results['g_vals'][t]), np.mean(overall_results['b_vals'][t]), np.mean(overall_results['h_vals'][t]), np.mean(overall_results['s_vals'][t]), np.mean(overall_results['v_vals'][t]))) ipython_if_guy()
def examine_dataset(dirname, out): logger.info(dirname) dirname = expand_environment(dirname) jpgs = locate_files(dirname, "*.jpg") mats = locate_files(dirname, "*.mat") logger.debug("I found %d jpgs and %d mats" % (len(jpgs), len(mats))) if len(jpgs) == 0: msg = "Not enough jpgs." raise ValueError(msg) # if len(mats) == 0: # msg = 'Not enough mats.' # raise ValueError(msg) first_jpg = sorted(jpgs)[0] logger.debug("Using jpg %r to learn transformation." % first_jpg) first_jpg_image = image_cv_from_jpg_fn(first_jpg) success, health, parameters = calculate_transform(first_jpg_image) s = "" s += "success: %s\n" % str(success) s += "health: %s\n" % str(health) s += "parameters: %s\n" % str(parameters) w = os.path.join(out, "learned_transform.txt") with open(w, "w") as f: f.write(s) logger.info(s) transform = ScaleAndShift(**parameters) config_dir = "${DUCKIETOWN_ROOT}/catkin_ws/src/duckietown/config/baseline/line_detector/line_detector_node/" config_dir = expand_environment(config_dir) configurations = locate_files(config_dir, "*.yaml") # logger.info('configurations: %r' % configurations) for j in jpgs: summaries = [] shape = (200, 160) interpolation = cv2.INTER_NEAREST bn = os.path.splitext(os.path.basename(j))[0] fn = os.path.join(out, "%s.all.png" % (bn)) if os.path.exists(fn): logger.debug("Skipping because file exists: %r" % fn) else: for c in configurations: logger.info("Trying %r" % c) name = os.path.splitext(os.path.basename(c))[0] if name in ["oreo", "myrtle", "bad_lighting", "226-night"]: continue with open(c) as f: stuff = yaml.load(f) if not "detector" in stuff: msg = 'Cannot find "detector" section in %r' % c raise ValueError(msg) detector = stuff["detector"] logger.info(detector) if not isinstance(detector, list) and len(detector) == 2: raise ValueError(detector) from duckietown_utils.instantiate_utils import instantiate def LineDetectorClass(): return instantiate(detector[0], detector[1]) s = run_detection( transform, j, out, shape=shape, interpolation=interpolation, name=name, LineDetectorClass=LineDetectorClass, ) summaries.append(s) together = make_images_grid(summaries, cols=1, pad=10, bgcolor=[0.5, 0.5, 0.5]) cv2.imwrite(fn, zoom_image(together, 4)) # ipython_if_guy() overall_results = [] comparison_results = {} for m in mats: logger.debug(m) jpg = os.path.splitext(m)[0] + ".jpg" if not os.path.exists(jpg): msg = "JPG %r for mat %r does not exist" % (jpg, m) logger.error(msg) else: frame_results = test_pair(transform, jpg, m, out) comparison_results[m] = frame_results overall_results = merge_comparison_results(comparison_results[m], overall_results) print "comparison_results[m]=frame_results" # ipython_if_guy() print "finished mats: " + dirname return overall_results
def examine_dataset(dirname, out): logger.info(dirname) dirname = expand_environment(dirname) jpgs = locate_files(dirname, '*.jpg') mats = locate_files(dirname, '*.mat') logger.debug('I found %d jpgs and %d mats' % (len(jpgs), len(mats))) if len(jpgs) == 0: msg = 'Not enough jpgs.' raise ValueError(msg) # if len(mats) == 0: # msg = 'Not enough mats.' # raise ValueError(msg) first_jpg = sorted(jpgs)[0] logger.debug('Using jpg %r to learn transformation.' % first_jpg) first_jpg_image = image_cv_from_jpg_fn(first_jpg) success, health, parameters = calculate_transform(first_jpg_image) s = "" s += 'success: %s\n' % str(success) s += 'health: %s\n' % str(health) s += 'parameters: %s\n' % str(parameters) w = os.path.join(out, 'learned_transform.txt') with open(w, 'w') as f: f.write(s) logger.info(s) transform = ScaleAndShift(**parameters) config_dir = '${DUCKIETOWN_ROOT}/catkin_ws/src/duckietown/config/baseline/line_detector/line_detector_node/' config_dir = expand_environment(config_dir) configurations = locate_files(config_dir, '*.yaml') #logger.info('configurations: %r' % configurations) for j in jpgs: summaries = [] shape = (200, 160) interpolation = cv2.INTER_NEAREST bn = os.path.splitext(os.path.basename(j))[0] fn = os.path.join(out, '%s.all.png' % (bn)) if os.path.exists(fn): logger.debug('Skipping because file exists: %r' % fn) else: for c in configurations: logger.info('Trying %r' % c) name = os.path.splitext(os.path.basename(c))[0] if name in ['oreo', 'myrtle', 'bad_lighting', '226-night']: continue with open(c) as f: stuff = yaml.load(f) if not 'detector' in stuff: msg = 'Cannot find "detector" section in %r' % c raise ValueError(msg) detector = stuff['detector'] logger.info(detector) if not isinstance(detector, list) and len(detector) == 2: raise ValueError(detector) from duckietown_utils.instantiate_utils import instantiate def LineDetectorClass(): return instantiate(detector[0], detector[1]) s = run_detection(transform, j, out, shape=shape, interpolation=interpolation, name=name, LineDetectorClass=LineDetectorClass) summaries.append(s) together = make_images_grid(summaries, cols=1, pad=10, bgcolor=[.5, .5, .5]) cv2.imwrite(fn, zoom_image(together, 4)) # ipython_if_guy() overall_results = [] comparison_results = {} for m in mats: logger.debug(m) jpg = os.path.splitext(m)[0] + '.jpg' if not os.path.exists(jpg): msg = 'JPG %r for mat %r does not exist' % (jpg, m) logger.error(msg) else: frame_results = test_pair(transform, jpg, m, out) comparison_results[m] = frame_results overall_results = merge_comparison_results(comparison_results[m], overall_results) print "comparison_results[m]=frame_results" # ipython_if_guy() print "finished mats: " + dirname return overall_results