def callback(self, rgb): # convert ros imgmsg to opencv img rgb = self.cv_bridge.imgmsg_to_cv2(rgb, desired_encoding='bgr8') # send image to dnn client rospy.loginfo_once("send image to client") su.sendall_image(self.sock, rgb) rospy.loginfo_once("receive inference results from client") probs = su.recvall_pickle(self.sock) labels = su.recvall_pickle(self.sock) boxes = su.recvall_pickle(self.sock) masks = su.recvall_pickle(self.sock) for i in range(len(labels)): prob = probs[i] label = labels[i] x1, y1, x2, y2 = boxes[i].astype(int) mask = masks[i] cv2.putText(rgb, COCO_INSTANCE_CATEGORY_NAMES[int(label)], (x1+10, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,0,255), 2) cv2.rectangle(rgb, (x1, y1), (x2, y2), (0,255,0), 3) # convert opencv img to ros imgmsg img_msg = self.cv_bridge.cv2_to_imgmsg(rgb, encoding='bgr8') # # publish it as topic self.result_pub.publish(img_msg) rospy.loginfo_once("Published the result as topic. check /detection_result")
def identify_bracket(sock, rgb_crop, vis_img, mask, cntr, p1, p2): # bracket => compare the number of pixels & binary stable classification mask1, mask2, cntr, p1, p2 = divide_mask(mask, cntr, p1, p2) n_pix_1 = np.sum(mask1) n_pix_2 = np.sum(mask2) if n_pix_1 < n_pix_2: p1 = cntr + 2 * (cntr - p1) su.sendall_image(sock, rgb_crop) pred_result = su.recvall_pickle(sock) angle = get_2d_orientation(cntr, p1) return angle, p1, vis_img, bool(pred_result)
def identify_side(sock, rgb_img, w, h, offset, vis_img, mask, cntr, p1, p2): # def identify_side(vis_img, rgb_crop, mask, cntr, p1, p2): mask1, mask2, cntr, p1, p2 = divide_mask(mask, cntr, p1, p2) contours1, _ = cv2.findContours(mask1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) contours2, _ = cv2.findContours(mask2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) rect1 = cv2.minAreaRect(max(contours1, key=cv2.contourArea)) rect2 = cv2.minAreaRect(max(contours2, key=cv2.contourArea)) w1 = min(rect1[1][0], rect1[1][1]) w2 = min(rect2[1][0], rect2[1][1]) box1 = cv2.boxPoints(rect1) box1 = np.int0(box1) box2 = cv2.boxPoints(rect2) box2 = np.int0(box2) offset = 0 x1, x2, y1, y2 = get_xy2box(box1, offset, w, h) rgb1 = np.uint8(rgb_img[y1:y2, x1:x2].copy()) su.sendall_image(sock, rgb1) is_bolt = su.recvall_pickle(sock) if is_bolt: cv2.drawContours(vis_img, [box1], 0, (255, 0, 0), 2) else: p1 = cntr + 2 * (cntr - p1) cv2.drawContours(vis_img, [box2], 0, (255, 0, 0), 2) # x1, x2, y1, y2 = get_xy2box(box2, offset, w, h) # rgb2 = np.uint8(rgb_img[y1:y2, x1:x2].copy()) # time.sleep(0.3) # su.sendall_image(sock, rgb2) # pred_label2 = bool(su.recvall_pickle(sock)) # prob2 = bool(su.recvall_pickle(sock)) # if prob1 and prob2: # if prob1 > prob2: # pred_label = pred_label1 # else: # pred_label = pred_label2 # else: # pred_label = pred_label1 # if pred_label: # p1 = cntr + 2*(cntr-p1) # cv2.drawContours(vis_img, [box2], 0, (128, 160, 32), 2) # else: # cv2.drawContours(vis_img, [box1], 0, (128, 160, 32), 2) angle = get_2d_orientation(cntr, p1) return angle, p1, vis_img
def callback(self, rgb): # convert ros imgmsg to opencv img rgb = self.cv_bridge.imgmsg_to_cv2(rgb, desired_encoding='bgr8') H, W, C = rgb.shape rgb_crop = rgb[int(H/4):int(3*H/4), int(W/4):int(3*W/4), :] rgb_crop = cv2.resize(rgb_crop, (224, 224)) # send image to dnn client rospy.loginfo_once("send image to client") su.sendall_image(self.sock, rgb_crop) rospy.loginfo_once("receive inference results from client") class_name = su.recvall_pickle(self.sock) cv2.putText(rgb, class_name, (int(0.4*W), int(0.9*H)), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,0,255), 2) cv2.rectangle(rgb, (int(W/4), int(H/4)), (int(3*W/4), int(3*H/4)), (0,255,0), 3) # convert opencv img to ros imgmsg img_msg = self.cv_bridge.cv2_to_imgmsg(rgb, encoding='bgr8') # # publish it as topic self.result_pub.publish(img_msg) rospy.loginfo_once("Published the result as topic. check /classification_result")
with open(yaml_path) as f: params = yaml.load(f, Loader=yaml.FullLoader) cfg = setup_cfg(params) os.environ["CUDA_VISIBLE_DEVICES"] = params["is_gpu_id"] device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("==> Loading CenterMask on", device, params["is_gpu_id"]) demo = VisualizationDemo(cfg) sock = su.initialize_client(params["tcp_ip"], params["centermask_tcp_port"]) while True: img = su.recvall_image(sock) predictions, vis_output = demo.run_on_image(img) pred_masks = predictions["instances"].pred_masks.cpu().detach().numpy( ) # (N, H, W), pred_boxes = predictions["instances"].pred_boxes.tensor.cpu().detach( ).numpy() # (x1, y1, x2, y2) pred_scores = predictions["instances"].scores.cpu().detach().numpy( ) # a vector of N confidence scores. pred_classes = predictions["instances"].pred_classes.cpu().detach( ).numpy() # [0, num_categories). vis_img = cv2.resize(vis_output.get_image()[:, :, ::-1], (params["width"], params["height"])) su.sendall_image(sock, vis_img) su.sendall_pickle(sock, pred_masks) su.sendall_pickle(sock, pred_boxes) su.sendall_pickle(sock, pred_classes) su.sendall_pickle(sock, pred_scores) s.close()