def write_similarity_matrix(A, out, more=False, images=None): dtu.safe_pickle_dump(A, out + '.pickle') # rgb = rgb_zoom(scale(A), 8) rgb = scale(A) rgb = dtu.d8_image_zoom_linear(rgb, 8) dtu.write_image_as_jpg(rgb, out + '.jpg') if more: r = Report() n = A.shape[0] for i in range(n): f = r.figure() ignore = 10 Ai = A[i, :].copy() for i2 in range(i - ignore, i + ignore): if 0 <= i2 < n: Ai[i2] = np.inf jbest = np.argmin(Ai) with f.plot('i-%d' % i) as pylab: pylab.plot(A[i, :], '-*') if images: f.data_rgb(str(i), dtu.bgr_from_rgb(images[i])) f.data_rgb(str(jbest), dtu.bgr_from_rgb(images[jbest])) r.to_html(out + '.html')
def write_images(bag_filename, topic, basename): """ Returns the name of the first created file """ dtu.logger.info('reading topic %r from %r' % (topic, bag_filename)) bag = rosbag.Bag(bag_filename) nfound = bag.get_message_count(topic) if nfound == 0: msg = 'Found 0 messages for topic %s' % topic msg += '\n\n' + dtu.indent(get_summary_of_bag_messages(bag), ' ') raise ValueError(msg) res = dtu.d8n_read_all_images_from_bag(bag, topic) n = len(res) filenames = [] for i in range(n): rgb = res[i]['rgb'] data = dtu.png_from_bgr(dtu.bgr_from_rgb(rgb)) if n == 1: fn = basename + '.png' else: fn = basename + '-%02d' % i + '.png' filenames.append(fn) dtu.write_data_to_file(data, fn) bag.close() return filenames[0]
def look_at(log, output, anti_instagram, line_detector, image_prep, lane_filter, all_details): filename = get_local_bag_file(log) bag = rosbag.Bag(filename) vehicle_name = dtu.which_robot(bag) dtu.logger.info('Vehicle name: %s' % vehicle_name) gp = GroundProjection(vehicle_name) topic = dtu.get_image_topic(bag) res = dtu.d8n_read_all_images_from_bag(bag, topic, max_images=1) image_cv = res[0]['rgb'] # dtu.logger.debug(dtu.describe_value(image_cv)) image_cv_bgr = dtu.bgr_from_rgb(image_cv) dtu.DuckietownConstants.show_timeit_benchmarks = True res, _stats = run_pipeline(image_cv_bgr, gp=gp, anti_instagram_name=anti_instagram, line_detector_name=line_detector, image_prep_name=image_prep, lane_filter_name=lane_filter, all_details=all_details) res = dtu.resize_small_images(res) dtu.write_bgr_images_as_jpgs(res, output)
def go(self): vehicle_name = dtu.get_current_robot_name() if dtu.on_duckiebot( ) else "default" output = self.options.output if output is None: output = 'out-pipeline' # + dtu.get_md5(self.options.image)[:6] self.info('No --output given, using %s' % output) if self.options.image is not None: image_filename = self.options.image if image_filename.startswith('http'): image_filename = dtu.get_file_from_url(image_filename) bgr = dtu.bgr_from_jpg_fn(image_filename) else: print("Validating using the ROS image stream...") import rospy from sensor_msgs.msg import CompressedImage topic_name = os.path.join('/', vehicle_name, 'camera_node/image/compressed') print('Let\'s wait for an image. Say cheese!') # Dummy to get ROS message rospy.init_node('single_image') img_msg = None try: img_msg = rospy.wait_for_message(topic_name, CompressedImage, timeout=10) print('Image captured!') except rospy.ROSException as e: print( '\n\n\nDidn\'t get any message!: %s\n MAKE SURE YOU USE DT SHELL COMMANDS OF VERSION 4.1.9 OR HIGHER!\n\n\n' % (e, )) bgr = dtu.bgr_from_rgb(dtu.rgb_from_ros(img_msg)) self.info('Picture taken: %s ' % str(bgr.shape)) gp = GroundProjection(vehicle_name) dtu.DuckietownConstants.show_timeit_benchmarks = True res, _stats = run_pipeline( bgr, gp, line_detector_name=self.options.line_detector, image_prep_name=self.options.image_prep, anti_instagram_name=self.options.anti_instagram, lane_filter_name=self.options.lane_filter) self.info('Resizing images..') res = dtu.resize_small_images(res) self.info('Writing images..') dtu.write_bgr_images_as_jpgs(res, output)
def go(self): robot_name = dtu.get_current_robot_name() output = self.options.output if output is None: output = 'out-calibrate-extrinsics' # + dtu.get_md5(self.options.image)[:6] self.info('No --output given, using %s' % output) if self.options.input is None: print("{}\nCalibrating using the ROS image stream...\n".format( "*" * 20)) import rospy from sensor_msgs.msg import CompressedImage topic_name = os.path.join('/', robot_name, 'camera_node/image/compressed') print('Topic to listen to is: %s' % topic_name) print('Let\'s wait for an image. Say cheese!') # Dummy for getting a ROS message rospy.init_node('calibrate_extrinsics') img_msg = None try: img_msg = rospy.wait_for_message(topic_name, CompressedImage, timeout=10) print('Image captured!') except rospy.ROSException as e: print( '\n\n\n' 'Didn\'t get any message!: %s\n ' 'MAKE SURE YOU USE DT SHELL COMMANDS OF VERSION 4.1.9 OR HIGHER!' '\n\n\n' % (e, )) bgr = dtu.bgr_from_rgb(dtu.rgb_from_ros(img_msg)) print('Picture taken: %s ' % str(bgr.shape)) else: print('Loading input image %s' % self.options.input) bgr = dtu.bgr_from_jpg_fn(self.options.input) if bgr.shape[1] != 640: interpolation = cv2.INTER_CUBIC bgr = dtu.d8_image_resize_fit(bgr, 640, interpolation) print('Resized to: %s ' % str(bgr.shape)) # Disable the old calibration file print("Disableing old homography") disable_old_homography(robot_name) print("Obtaining camera info") try: camera_info = get_camera_info_for_robot(robot_name) except Exception as E: print("Error on obtaining camera info!") print(E) print("Get default homography") try: homography_dummy = get_homography_default() except Exception as E: print("Error on getting homography") print(E) print("Rectify image") try: rect = Rectify(camera_info) except Exception as E: print("Error rectifying image!") print(E) print("Calculate GPG") try: gpg = GroundProjectionGeometry(camera_info.width, camera_info.height, homography_dummy.reshape((3, 3))) except Exception as E: print("Error calculating GPG!") print(E) print("Ordered Dict") res = OrderedDict() try: bgr_rectified = rect.rectify(bgr, interpolation=cv2.INTER_CUBIC) res['bgr'] = bgr res['bgr_rectified'] = bgr_rectified _new_matrix, res['rectified_full_ratio_auto'] = rect.rectify_full( bgr, ratio=1.65) (result_gpg, status) = gpg.estimate_homography(bgr_rectified) if status is not None: raise Exception(status) save_homography(result_gpg.H, robot_name) msg = ''' To check that this worked well, place the robot on the road, and run: rosrun complete_image_pipeline single_image Look at the produced jpgs. ''' print(msg) except Exception as E: print(E) finally: dtu.write_bgr_images_as_jpgs(res, output)
def go(self): robot_name = dtu.get_current_robot_name() output = self.options.output if output is None: output = 'out-calibrate-extrinsics' # + dtu.get_md5(self.options.image)[:6] self.info('No --output given, using %s' % output) if self.options.input is None: print("{}\nCalibrating using the ROS image stream...\n".format( "*" * 20)) import rospy from sensor_msgs.msg import CompressedImage topic_name = os.path.join('/', robot_name, 'camera_node/image/compressed') print('Topic to listen to is: %s' % topic_name) print('Let\'s wait for an image. Say cheese!') # Dummy for getting a ROS message rospy.init_node('calibrate_extrinsics') img_msg = None try: img_msg = rospy.wait_for_message(topic_name, CompressedImage, timeout=10) print('Image captured!') except rospy.ROSException as e: print( '\n\n\nDidn\'t get any message!: %s\n MAKE SURE YOU USE DT SHELL COMMANDS OF VERSION 4.1.9 OR HIGHER!\n\n\n' % (e, )) bgr = dtu.bgr_from_rgb(dtu.rgb_from_ros(img_msg)) self.info('Picture taken: %s ' % str(bgr.shape)) else: self.info('Loading input image %s' % self.options.input) bgr = dtu.bgr_from_jpg_fn(self.options.input) if bgr.shape[1] != 640: interpolation = cv2.INTER_CUBIC bgr = dtu.d8_image_resize_fit(bgr, 640, interpolation) self.info('Resized to: %s ' % str(bgr.shape)) # Disable the old calibration file disable_old_homography(robot_name) camera_info = get_camera_info_for_robot(robot_name) homography_dummy = get_homography_default() gpg = GroundProjectionGeometry(camera_info, homography_dummy) res = OrderedDict() try: bgr_rectified = gpg.rectify(bgr, interpolation=cv2.INTER_CUBIC) res['bgr'] = bgr res['bgr_rectified'] = bgr_rectified _new_matrix, res['rectified_full_ratio_auto'] = gpg.rectify_full( bgr, ratio=1.65) result = estimate_homography(bgr_rectified) dtu.check_isinstance(result, HomographyEstimationResult) if result.bgr_detected_refined is not None: res['bgr_detected_refined'] = result.bgr_detected_refined if not result.success: raise Exception(result.error) save_homography(result.H, robot_name) msg = ''' To check that this worked well, place the robot on the road, and run: rosrun complete_image_pipeline single_image Look at the produced jpgs. ''' self.info(msg) finally: dtu.write_bgr_images_as_jpgs(res, output)
def work(log, outd, max_images, only_camera, write_frames): filename = get_local_bag_file(log) t0 = log.t0 t1 = log.t1 MIN_HEIGHT = 480 # noinspection PyUnboundLocalVariable bag = rosbag.Bag(filename) main = dtu.get_image_topic(bag) topics = [_ for _, __ in dtu.d8n_get_all_images_topic_bag(bag)] bag.close() dtu.logger.debug('%s - topics: %s' % (filename, topics)) for topic in topics: if only_camera and topic != main: continue try: bag = rosbag.Bag(filename) except: msg = 'Cannot read Bag file %s' % filename dtu.logger.error(msg) raise bag_proxy = dtu.BagReadProxy(bag, t0, t1) res = dtu.d8n_read_all_images_from_bag(bag_proxy, topic, max_images=max_images, use_relative_time=True) bag.close() d = topic.replace('/', '_') if d.startswith('_'): d = d[1:] d0 = os.path.join(outd, d) images_with_label = [] for i in range(len(res)): rgb = res[i]['rgb'] H, _W = rgb.shape[:2] if H < MIN_HEIGHT: zoom = int(np.ceil(MIN_HEIGHT / H)) rgb = dtu.zoom_image(rgb, zoom) timestamp = res[i]['timestamp'] s = 't = %.2f' % (timestamp - t0) with_label = dtu.add_header_to_rgb(rgb, s) images_with_label.append(with_label) if write_frames: for i, rgb in enumerate(images_with_label): rgb = res[i]['rgb'] fn = os.path.join(d0, ('image-%05d' % i) + '.jpg') dtu.write_bgr_as_jpg(dtu.bgr_from_rgb(rgb), fn) grid = dtu.make_images_grid( images_with_label, pad=4, bgcolor=dtu.ColorConstants.RGB_DUCKIETOWN_YELLOW) s = log.log_name grid = dtu.add_header_to_rgb(grid, s, max_height=32) if (topic != main) or len(topics) > 1: fn = d0 + '.jpg' dtu.write_rgb_as_jpg(grid, fn) if topic == main: fn = outd + '.thumbnails.jpg' dtu.write_rgb_as_jpg(grid, fn)
def processImage_(self, image_msg): self.stats.processed() if self.intermittent_log_now(): self.intermittent_log(self.stats.info()) self.stats.reset() tk = TimeKeeper(image_msg) self.intermittent_counter += 1 # Decode from compressed image with OpenCV try: if False: image_cv = dtu.bgr_from_jpg(image_msg.data) else: image_cv_rgb = dtu.rgb_from_jpg_by_JPEG_library(image_msg.data) image_cv = dtu.bgr_from_rgb(image_cv_rgb) except ValueError as e: self.loginfo('Could not decode image: %s' % e) return tk.completed('decoded') # Resize and crop image hei_original, wid_original = image_cv.shape[0:2] if self.image_size[0] != hei_original or self.image_size[ 1] != wid_original: # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2) image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]), interpolation=cv2.INTER_NEAREST) image_cv = image_cv[self.top_cutoff:, :, :] tk.completed('resized') # apply color correction image_cv_corr = self.ai.applyTransform(image_cv) # image_cv_corr = cv2.convertScaleAbs(image_cv_corr) tk.completed('corrected') # Set the image to be detected self.detector.setImage(image_cv_corr) # Detect lines and normals white = self.detector.detectLines('white') yellow = self.detector.detectLines('yellow') red = self.detector.detectLines('red') tk.completed('detected') # SegmentList constructor segmentList = SegmentList() segmentList.header.stamp = image_msg.header.stamp # Convert to normalized pixel coordinates, and add segments to segmentList arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff)) arr_ratio = np.array( (1. / self.image_size[1], 1. / self.image_size[0], 1. / self.image_size[1], 1. / self.image_size[0])) if len(white.lines) > 0: lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio) segmentList.segments.extend( self.toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE)) if len(yellow.lines) > 0: lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio) segmentList.segments.extend( self.toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW)) if len(red.lines) > 0: lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio) segmentList.segments.extend( self.toSegmentMsg(lines_normalized_red, red.normals, Segment.RED)) self.intermittent_log( '# segments: white %3d yellow %3d red %3d' % (len(white.lines), len(yellow.lines), len(red.lines))) tk.completed('prepared') # Publish segmentList self.pub_lines.publish(segmentList) tk.completed('--pub_lines--') # VISUALIZATION only below if self.verbose: # Draw lines and normals image_with_lines = np.copy(image_cv_corr) drawLines(image_with_lines, white.lines, (0, 0, 0)) drawLines(image_with_lines, yellow.lines, (255, 0, 0)) drawLines(image_with_lines, red.lines, (0, 255, 0)) tk.completed('drawn') # Publish the frame with lines image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8") image_msg_out.header.stamp = image_msg.header.stamp self.pub_image.publish(image_msg_out) tk.completed('pub_image') # if self.verbose: colorSegment = color_segment(white.area, red.area, yellow.area) edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8") colorSegment_msg_out = self.bridge.cv2_to_imgmsg( colorSegment, "bgr8") self.pub_edge.publish(edge_msg_out) self.pub_colorSegment.publish(colorSegment_msg_out) tk.completed('pub_edge/pub_segment') self.intermittent_log(tk.getall())
def process_log(self, bag_in, prefix_in, bag_out, prefix_out, utils): #@UnusedVariable log_name = utils.get_log().log_name vehicle_name = dtu.which_robot(bag_in) dtu.logger.info('Vehicle name: %s' % vehicle_name) gp = GroundProjection(vehicle_name) topic = dtu.get_image_topic(bag_in) bgcolor = dtu.ColorConstants.BGR_DUCKIETOWN_YELLOW sequence = bag_in.read_messages_plus(topics=[topic]) for _i, mp in enumerate(sequence): bgr = dtu.bgr_from_rgb(dtu.rgb_from_ros(mp.msg)) res, stats = run_pipeline(bgr, gp=gp, line_detector_name=self.line_detector, image_prep_name=self.image_prep, lane_filter_name=self.lane_filter, anti_instagram_name=self.anti_instagram, all_details=self.all_details) rect = (480, 640) if not self.all_details else (240, 320) res = dtu.resize_images_to_fit_in_rect(res, rect, bgcolor=bgcolor) print('abs: %s window: %s fron log: %s' % (mp.time_absolute, mp.time_window, mp.time_from_physical_log_start)) headers = [ "Robot: %s log: %s time: %.2f s" % (vehicle_name, log_name, mp.time_from_physical_log_start), "Algorithms | color correction: %s | preparation: %s | detector: %s | filter: %s" % ( self.anti_instagram, self.image_prep, self.line_detector, self.lane_filter, ) ] res = dtu.write_bgr_images_as_jpgs(res, dirname=None, bgcolor=bgcolor) cv_image = res['all'] for head in reversed(headers): max_height = 35 cv_image = dtu.add_header_to_bgr(cv_image, head, max_height=max_height) otopic = "all" omsg = dtu.d8_compressed_image_from_cv_image( cv_image, same_timestamp_as=mp.msg) t = rospy.Time.from_sec(mp.time_absolute) # @UndefinedVariable print('written %r at t = %s' % (otopic, t.to_sec())) bag_out.write(prefix_out + '/' + otopic, omsg, t=t) for name, value in stats.items(): utils.write_stat(prefix_out + '/' + name, value, t=t)