def do_comparer_plot(bag_in, prefix_in, bag_out, prefix_out, signals, plot_name): topic2messages = read_data_for_signals(bag_in, prefix_in, signals) n = len(topic2messages) if n < 2: msg = 'Not enough topics' raise ValueError(msg) for i, j in itertools.product(range(n), range(n)): if i == j: continue s1 = signals[i] d1 = topic2messages[prefix_in + s1.topic] s2 = signals[j] d2 = topic2messages[prefix_in + s2.topic] bgr = _do_plot(s1, d1, s2, d2) t_inf = rospy.Time.from_sec( bag_in.get_end_time()) # @UndefinedVariable omsg = dtu.d8_compressed_image_from_cv_image(bgr, timestamp=t_inf) otopic = prefix_out + '/' + plot_name + '_%s_%s' % (i, j) bag_out.write(otopic, omsg, t=t_inf)
def process_image(self, image): # YOUR CODE GOES HERE #height, width, depth = image.shape #height=480, width=640 #RECTIFY THE IMAGE FIRST!!! jpg_data = image[:, :, ::-1] #before sending: REDEFINE DATA return d8_compressed_image_from_cv_image(jpg_data)
def processMsg(self, msg_in): # Get the image from the message im_rgb = du.rgb_from_ros(msg_in) im_bgr = cv2.cvtColor(im_rgb, cv2.COLOR_RGB2BGR) # Apply filters im_mod_bgr = self.applyFilters(im_bgr, self.filters) # Convert to message msg_out = msg_in msg_out.data = du.d8_compressed_image_from_cv_image(im_mod_bgr, msg_in).data return msg_out
def cbFilter(self, msg): filter = self.setupParameter("~filter") image = dt.rgb_from_ros(msg) image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) if filter == 'flip-vertical': image = np.fliplr(image) if filter == 'flip-horizontal': image = np.flipud(image) if filter == 'greyscale': image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if filter == 'sepia': image_copy = np.empty(image.shape, 'float') image = cv2.transform(image, M_Sepia, image_copy) new_msg = dt.d8_compressed_image_from_cv_image(image) self.pub_topic_a.publish(new_msg)
def visualize_image(self, image,obst_list): size = obst_list.poses.__len__() for i in range(0,size): top = obst_list.poses[i].orientation.x bottom = obst_list.poses[i].orientation.y left = obst_list.poses[i].orientation.z right = obst_list.poses[i].orientation.w points = np.float32([[left,left,right,right],[top,bottom,bottom,top]]) pts = self.detector.bird_view_pixel2real_pic_pixel(points) if (obst_list.poses[i].position.z<0): cv2.polylines(image,np.int32([np.transpose(pts)]),True,(0,255,0),3) else: cv2.polylines(image,np.int32([np.transpose(pts)]),True,(255,0,0),3) return d8_compressed_image_from_cv_image(image[:,:,::-1])
def visualize_bird_perspective(self, image,obst_list): #hsv = cv2.cvtColor(image[self.crop:, :, :], cv2.COLOR_RGB2HSV) bird_img = cv2.warpPerspective(image, self.detector.M, (self.detector.img_width, self.detector.img_height)) size = obst_list.poses.__len__() for i in range(0,size): top = obst_list.poses[i].orientation.x bottom = obst_list.poses[i].orientation.y left = obst_list.poses[i].orientation.z right = obst_list.poses[i].orientation.w points = np.float32([[left,left,right,right],[top,bottom,bottom,top]]) if (obst_list.poses[i].position.z<0): cv2.polylines(bird_img,np.int32([np.transpose(points)]),True,(0,255,0),3) else: cv2.polylines(bird_img,np.int32([np.transpose(points)]),True,(255,0,0),3) return d8_compressed_image_from_cv_image(bird_img[:,:,::-1])
def callback(self, image): if not self.active: return if not self.thread_lock.acquire(False): return #start = time.time() obst_list = PoseArray() marker_list = MarkerArray() # pass RECTIFIED IMAGE TO DETECTOR MODULE #1. EXTRACT OBSTACLES and return the pose array obst_list = self.detector.process_image( rectify(rgb_from_ros(image), self.intrinsics)) obst_list.header.stamp = image.header.stamp #for synchronization #interessant um zu schauen ob stau oder nicht!!!! #print image.header.stamp.to_sec() self.publisher_arr.publish(obst_list) #EXPLANATION: (x,y) is world coordinates of obstacle, z is radius of obstacle #QUATERNION HAS NO MEANING!!!! #3. VISUALIZE POSE ARRAY IN TF if (self.show_marker): marker_list = self.visualizer.visualize_marker(obst_list) self.publisher_marker.publish(marker_list) #4. EVENTUALLY DISPLAY !!!!CROPPED!!!!!! IMAGE if (self.show_image): obst_image = CompressedImage() obst_image.header.stamp = image.header.stamp obst_image.format = "jpeg" obst_image.data = self.visualizer.visualize_image( rectify(rgb_from_ros(image), self.intrinsics), obst_list) #here i want to display cropped image rgb_image = rgb_from_ros(obst_image.data) obst_image.data = d8_compressed_image_from_cv_image( rgb_image[self.detector.crop:, :, ::-1]) #THIS part only to visualize the cropped version -> somehow a little inefficient but keeps #the visualizer.py modular!!! self.publisher_img.publish(obst_image.data) if (self.show_bird_perspective): bird_perspective_image = CompressedImage() bird_perspective_image.header.stamp = image.header.stamp bird_perspective_image.format = "jpeg" bird_perspective_image.data = self.visualizer.visualize_bird_perspective( rectify(rgb_from_ros(image), self.intrinsics), obst_list) #here i want to display cropped image rgb_image = rgb_from_ros(obst_image.data) obst_image.data = d8_compressed_image_from_cv_image(rgb_image) #THIS part only to visualize the cropped version -> somehow a little inefficient but keeps #the visualizer.py modular!!! self.publisher_img_bird_perspective.publish(obst_image.data) #end = time.time() #print "OBST DETECTION TOOK: s" #print(end - start) self.r.sleep() self.thread_lock.release()
def callback(msg): img = rgb_from_ros(msg) gen = instagram(img, filter_lst) newmsg = d8_compressed_image_from_cv_image(gen) publisher.publish(newmsg)
gen = cv2.cvtColor(gen, cv2.COLOR_BGR2GRAY).reshape( (gen.shape[0], gen.shape[1], 1)) gen = np.concatenate([gen, gen, gen], axis=2) elif f == "sepia": sepia_kernel = np.array([[.272, .534, .131], [.349, .686, .168], [.393, .769, .189]]) gen = cv2.transform(gen, sepia_kernel) else: raise Exception('filter not found') return gen bag_file = sys.argv[1] topic_sel = sys.argv[2] filters = sys.argv[3].split(":") bag_out = rosbag.Bag(sys.argv[4], mode="w") bag = rosbag.Bag(bag_file) for topic, msg, t in bag.read_messages(): if topic == topic_sel: img = rgb_from_ros(msg) img = instagram(img, filters) msg = d8_compressed_image_from_cv_image(img) bag_out.write(topic=topic, msg=msg, t=t) bag.close() bag_out.close()
def do_plot(bag_in, prefix_in, bag_out, prefix_out, signals, plot_name): topic2messages = defaultdict(lambda: dict(timestamp=[], data=[])) topics = [] for signal_spec in signals: dtu.check_isinstance(signal_spec, PlotSignalSpec) topic_fqn = prefix_in + signal_spec.topic topics.append(topic_fqn) for _j, mp in enumerate(bag_in.read_messages_plus(topics=topics)): data = interpret_ros(mp.msg) topic2messages[mp.topic]['data'].append(data) topic2messages[mp.topic]['timestamp'].append( mp.time_from_physical_log_start) for signal_spec in signals: topic_fqn = prefix_in + signal_spec.topic if not topic_fqn in topic2messages: msg = 'Could not found any value for topic %r.' % topic_fqn raise ValueError(msg) bgcolor = dtu.ColorConstants.RGB_DUCKIETOWN_YELLOW dpi = 100 figure_args = dict(facecolor=dtu.matplotlib_01_from_rgb(bgcolor)) a = dtu.CreateImageFromPylab(dpi=dpi, figure_args=figure_args) use_legend = len(signals) >= 3 # todo: check same units with a as pylab: axes = [] _fig, ax0 = pylab.subplots() ax0.set_xlabel('time (s)') axes.append(ax0) if use_legend: for i in range(len(signals) - 1): axes.append(ax0) else: for i in range(len(signals) - 1): axes.append(ax0.twinx()) for i, signal_spec in enumerate(signals): ax = axes[i] topic_fqn = prefix_in + signal_spec.topic recorded = topic2messages[topic_fqn] data = np.array(recorded['data']) t = np.array(recorded['timestamp']) color = signal_spec.color markersize = 5 data_converted = convert_unit(data, signal_spec.units, signal_spec.units_display) ax.plot(t, data_converted, 'o', color=color, label=signal_spec.label, markersize=markersize, clip_on=False) if not use_legend: label = '%s [%s]' % (signal_spec.label, signal_spec.units_display) ax.set_ylabel(label, color=signal_spec.color) ax.tick_params('y', colors=color) ax.set_ylim(signal_spec.min, signal_spec.max) outward_offset = 20 ax.xaxis.set_tick_params(direction='out') ax.yaxis.set_tick_params(direction='out') ax.spines['left'].set_position(('outward', outward_offset)) ax.spines['top'].set_color('none') # don't draw spine ax.spines['bottom'].set_position(('outward', outward_offset)) ax.spines['right'].set_position(('outward', outward_offset)) pos = {0: 'left', 1: 'right', 2: 'right'}[i] ax.spines[pos].set_color(color) ax.xaxis.set_ticks_position('bottom') if use_legend: label = '[%s]' % (signal_spec.units_display) ax.set_ylabel(label) pylab.legend() bgr = a.get_bgr() plot_name = plot_name.replace('/', '') # output_filename = os.path.join('tmp', plot_name +'.png') # dtu.write_bgr_as_jpg(bgr, output_filename) t_inf = rospy.Time.from_sec(bag_in.get_end_time()) # @UndefinedVariable omsg = dtu.d8_compressed_image_from_cv_image(bgr, timestamp=t_inf) otopic = prefix_out + '/' + plot_name bag_out.write(otopic, omsg, t=t_inf)
# Convert to message msg_out = msg_in msg_out.data = du.d8_compressed_image_from_cv_image(im_mod_bgr, msg_in).data return msg_out ### --- Testing --- # if __name__ == "__main__": # Load im = cv2.imread("sunset.jpg", cv2.IMREAD_COLOR) msg_in = CompressedImage() # Not manipulated msg_in.data = du.d8_compressed_image_from_cv_image(im, msg_in).data inst = Instagram("") msg_out = inst.processMsg(msg_in) im_out_rgb = du.rgb_from_ros(msg_out) im_out_bgr = cv2.cvtColor(im_out_rgb, cv2.COLOR_RGB2BGR) cv2.imwrite("untouched.jpg", im_out_bgr) # Flip vertically msg_in.data = du.d8_compressed_image_from_cv_image(im, msg_in).data inst = Instagram("flip-vertical") msg_out = inst.processMsg(msg_in) im_out_rgb = du.rgb_from_ros(msg_out) im_out_bgr = cv2.cvtColor(im_out_rgb, cv2.COLOR_RGB2BGR) cv2.imwrite("vflip.jpg", im_out_bgr) # Flip horizontally
def process_image(self, image): return d8_compressed_image_from_cv_image( rectify(image[..., ::-1], self.intrinsics)).data
def process_log(self, bag_in, prefix_in, bag_out, prefix_out, utils): #@UnusedVariable log_name = utils.get_log().log_name vehicle_name = dtu.which_robot(bag_in) dtu.logger.info('Vehicle name: %s' % vehicle_name) gp = GroundProjection(vehicle_name) topic = dtu.get_image_topic(bag_in) bgcolor = dtu.ColorConstants.BGR_DUCKIETOWN_YELLOW sequence = bag_in.read_messages_plus(topics=[topic]) for _i, mp in enumerate(sequence): bgr = dtu.bgr_from_rgb(dtu.rgb_from_ros(mp.msg)) res, stats = run_pipeline(bgr, gp=gp, line_detector_name=self.line_detector, image_prep_name=self.image_prep, lane_filter_name=self.lane_filter, anti_instagram_name=self.anti_instagram, all_details=self.all_details) rect = (480, 640) if not self.all_details else (240, 320) res = dtu.resize_images_to_fit_in_rect(res, rect, bgcolor=bgcolor) print('abs: %s window: %s fron log: %s' % (mp.time_absolute, mp.time_window, mp.time_from_physical_log_start)) headers = [ "Robot: %s log: %s time: %.2f s" % (vehicle_name, log_name, mp.time_from_physical_log_start), "Algorithms | color correction: %s | preparation: %s | detector: %s | filter: %s" % ( self.anti_instagram, self.image_prep, self.line_detector, self.lane_filter, ) ] res = dtu.write_bgr_images_as_jpgs(res, dirname=None, bgcolor=bgcolor) cv_image = res['all'] for head in reversed(headers): max_height = 35 cv_image = dtu.add_header_to_bgr(cv_image, head, max_height=max_height) otopic = "all" omsg = dtu.d8_compressed_image_from_cv_image( cv_image, same_timestamp_as=mp.msg) t = rospy.Time.from_sec(mp.time_absolute) # @UndefinedVariable print('written %r at t = %s' % (otopic, t.to_sec())) bag_out.write(prefix_out + '/' + otopic, omsg, t=t) for name, value in stats.items(): utils.write_stat(prefix_out + '/' + name, value, t=t)