Beispiel #1
0
 def split(self, node, w, h):
     node.used = True
     node.down = Block(
         Rect(x=node.rect.x,
              y=node.rect.y + h,
              width=w,
              height=node.rect.height - h))
     node.right = Block(
         Rect(x=node.rect.x + w,
              y=node.rect.y,
              width=node.rect.width - w,
              height=h))
     return node
Beispiel #2
0
 def _draw_polygon(self, imgmsg, polygon_msg):
     rect_msg = Rect()
     x1 = polygon_msg.polygon.points[0].x
     y1 = polygon_msg.polygon.points[0].y
     x2 = polygon_msg.polygon.points[1].x
     y2 = polygon_msg.polygon.points[1].y
     rect_msg.x = int(x1)
     rect_msg.y = int(y1)
     rect_msg.width = int(x2 - x1)
     rect_msg.height = int(y2 - y1)
     rects = [rect_msg]
     rects_msg = RectArray(header=polygon_msg.header, rects=rects)
     self._draw(imgmsg, rects_msg)
Beispiel #3
0
    def image_cb(self, msg):
        img = self.bridge.imgmsg_to_cv2(msg, desired_encoding='rgb8')
        # RGB -> BGR
        rospy.logdebug('start readtext')
        results = self.reader.readtext(img[:, :, ::-1])
        rospy.logdebug('end readtext')

        bboxes = []
        scores = []
        texts = []
        rect_msg = RectArray(header=msg.header)
        for result in results:
            bb = result[0]
            text = result[1]
            score = result[2]
            x_min = int(np.round(bb[0][0]))
            y_min = int(np.round(bb[0][1]))
            x_max = int(np.round(bb[2][0]))
            y_max = int(np.round(bb[2][1]))
            bboxes.append([y_min, x_min, y_max, x_max])
            texts.append(text)
            scores.append(score)
            rect = Rect(
                x=x_min, y=y_min,
                width=x_max - x_min, height=y_max - y_min)
            rect_msg.rects.append(rect)
        bboxes = np.array(bboxes)
        scores = np.array(scores)

        cls_msg = ClassificationResult(
            header=msg.header,
            classifier=self.classifier_name,
            target_names=texts,
            labels=np.arange(len(texts)),
            label_names=texts,
            label_proba=scores)

        self.pub_rects.publish(rect_msg)
        self.pub_class.publish(cls_msg)

        if self.visualize:
            fig = plt.figure(
                tight_layout={'pad': 0})
            ax = plt.Axes(fig, [0., 0., 1., 1.])
            ax.axis('off')
            fig.add_axes(ax)
            vis_bbox(
                img.transpose((2, 0, 1)),
                bboxes, np.arange(len(texts)), scores,
                label_names=texts, ax=ax)
            fig.canvas.draw()
            w, h = fig.canvas.get_width_height()
            vis_img = np.fromstring(
                fig.canvas.tostring_rgb(), dtype=np.uint8)
            vis_img.shape = (h, w, 3)
            fig.clf()
            plt.close()
            vis_msg = self.bridge.cv2_to_imgmsg(vis_img, 'rgb8')
            vis_msg.header = msg.header
            self.pub_image.publish(vis_msg)
Beispiel #4
0
    def image_cb(self, msg):
        img = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
        H, W = img.shape[:2]
        objs = self.engine.DetectWithImage(PIL.Image.fromarray(img),
                                           threshold=self.score_thresh,
                                           keep_aspect_ratio=True,
                                           relative_coord=True,
                                           top_k=self.top_k)

        bboxes = []
        scores = []
        labels = []
        rect_msg = RectArray(header=msg.header)
        for obj in objs:
            x_min, y_min, x_max, y_max = obj.bounding_box.flatten().tolist()
            x_min = int(np.round(x_min * W))
            y_min = int(np.round(y_min * H))
            x_max = int(np.round(x_max * W))
            y_max = int(np.round(y_max * H))
            bboxes.append([y_min, x_min, y_max, x_max])
            scores.append(obj.score)
            labels.append(self.label_ids.index(int(obj.label_id)))
            rect = Rect(x=x_min,
                        y=y_min,
                        width=x_max - x_min,
                        height=y_max - y_min)
            rect_msg.rects.append(rect)
        bboxes = np.array(bboxes)
        scores = np.array(scores)
        labels = np.array(labels)

        cls_msg = ClassificationResult(
            header=msg.header,
            classifier=self.classifier_name,
            target_names=self.label_names,
            labels=labels,
            label_names=[self.label_names[l] for l in labels],
            label_proba=scores)

        self.pub_rects.publish(rect_msg)
        self.pub_class.publish(cls_msg)

        if self.visualize:
            vis_img = img[:, :, ::-1].transpose(2, 0, 1)
            vis_bbox(vis_img,
                     bboxes,
                     labels,
                     scores,
                     label_names=self.label_names)
            fig = plt.gcf()
            fig.canvas.draw()
            w, h = fig.canvas.get_width_height()
            vis_img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8)
            fig.clf()
            vis_img.shape = (h, w, 3)
            plt.close()
            vis_msg = self.bridge.cv2_to_imgmsg(vis_img, 'rgb8')
            vis_msg.header = msg.header
            self.pub_image.publish(vis_msg)
Beispiel #5
0
    def _detect(self, imgmsg, rects_msg):
        bridge = cv_bridge.CvBridge()
        im_orig = bridge.imgmsg_to_cv2(imgmsg, desired_encoding='bgr8')
        im, im_scale = img_preprocessing(im_orig, self.pixel_means)
        rects_orig = jsk_recognition_utils.rects_msg_to_ndarray(rects_msg)
        if len(rects_orig) == 0:
            return
        rects = rects_orig * im_scale
        scores, bbox_pred = self._im_detect(im, rects)

        rects = RectArray(header=imgmsg.header)
        labels = []
        label_proba = []
        for cls_id in range(1, len(self.target_names)):
            _cls = scores[:, cls_id][:, np.newaxis]
            _bbx = bbox_pred[:, cls_id * 4:(cls_id + 1) * 4]
            dets = np.hstack((_bbx, _cls))
            keep = nms(dets, self.nms_thresh)
            dets = dets[keep, :]
            orig_rects = cuda.cupy.asnumpy(rects_orig)[keep, :]

            inds = np.where(dets[:, -1] >= self.conf_thresh)[0]

            for i in inds:
                _bbox = dets[i, :4]
                x1, y1, x2, y2 = orig_rects[i]
                width = x2 - x1
                height = y2 - y1
                center_x = x1 + 0.5 * width
                center_y = y1 + 0.5 * height

                dx, dy, dw, dh = _bbox
                _center_x = dx * width + center_x
                _center_y = dy * height + center_y
                _width = np.exp(dw) * width
                _height = np.exp(dh) * height

                x1 = _center_x - 0.5 * _width
                y1 = _center_y - 0.5 * _height
                x2 = _center_x + 0.5 * _width
                y2 = _center_y + 0.5 * _height
                rect = Rect(x=x1, y=y1, width=x2 - x1, height=y2 - y1)
                rects.rects.append(rect)
                labels.append(cls_id)
                label_proba.append(dets[:, -1][i])

        # publish classification result
        clss = ClassificationResult(
            header=imgmsg.header,
            classifier=self.classifier_name,
            target_names=self.target_names,
            labels=labels,
            label_names=[self.target_names[l] for l in labels],
            label_proba=label_proba,
        )
        self._pub_rects.publish(rects)
        self._pub_class.publish(clss)
Beispiel #6
0
 def grow_down(self, w, h):
     prev_root = self.root
     self.root = Block(
         Rect(x=0,
              y=0,
              width=self.root.rect.width,
              height=self.root.rect.height + h))
     self.root.right = prev_root
     self.root.down = Block(
         Rect(x=0,
              y=prev_root.rect.height,
              width=prev_root.rect.width,
              height=h))
     self.root.used = True
     node = self.find_node(self.root, w, h)
     if node:
         return self.split(node, w, h)
     else:
         return None
Beispiel #7
0
def non_max_suppression_handler(req):
    pick = non_max_suppression_equator(req.rect, req.threshold)
    bbox = []
    i = 0
    while i < len(pick):
        bx = pick[i][0]
        by = pick[i][1]
        bw = pick[i][2] - bx
        bh = pick[i][3] - by
        bbox.append(Rect(bx, by, bw, bh))
        #print Rect(bx, by, bw, bh)
        i += 1
    return NonMaximumSuppressionResponse(bbox, len(pick))
 def non_max_suppression_handler(self, rects, threshold):
     pick = self.non_max_suppression_equator(rects, threshold)
     bbox = []
     i = 0
     while i < len(pick):
             bx = pick[i][0]
             by = pick[i][1]
             bw = pick[i][2] - bx
             bh = pick[i][3] - by
             bbox.append(Rect(bx, by, bw, bh))
             #print Rect(bx, by, bw, bh)
             i+=1    
     return bbox, len(pick)
    def image_cb(self, msg):
        if not hasattr(self, 'engine'):
            return
        if self.transport_hint == 'compressed':
            np_arr = np.fromstring(msg.data, np.uint8)
            img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
            img = img[:, :, ::-1]
        else:
            img = self.bridge.imgmsg_to_cv2(msg, desired_encoding='rgb8')

        points, key_names, visibles, bboxes, labels, scores \
            = self._estimate(img)

        poses_msg = PeoplePoseArray(header=msg.header)
        rects_msg = RectArray(header=msg.header)
        for point, key_name, visible, bbox, label, score in zip(
                points, key_names, visibles, bboxes, labels, scores):
            pose_msg = PeoplePose()
            for pt, key_nm, vs, sc in zip(point, key_name, visible, score):
                if vs:
                    key_y, key_x = pt
                    pose_msg.limb_names.append(key_nm)
                    pose_msg.scores.append(sc)
                    pose_msg.poses.append(
                        Pose(position=Point(x=key_x, y=key_y)))
            poses_msg.poses.append(pose_msg)
            y_min, x_min, y_max, x_max = bbox
            rect = Rect(x=x_min,
                        y=y_min,
                        width=x_max - x_min,
                        height=y_max - y_min)
            rects_msg.rects.append(rect)

        cls_msg = ClassificationResult(
            header=msg.header,
            classifier=self.classifier_name,
            target_names=self.label_names,
            labels=labels,
            label_names=[self.label_names[lbl] for lbl in labels],
            label_proba=[np.average(score) for score in scores])

        self.pub_pose.publish(poses_msg)
        self.pub_rects.publish(rects_msg)
        self.pub_class.publish(cls_msg)

        if self.enable_visualization:
            with self.lock:
                self.img = img
                self.header = msg.header
                self.points = points
                self.visibles = visibles
Beispiel #10
0
 def _apply(self, img_msg):
     bridge = cv_bridge.CvBridge()
     img_bgr = bridge.imgmsg_to_cv2(img_msg, desired_encoding='bgr8')
     img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
     rects = []
     dlib.find_candidate_object_locations(img_rgb, rects,
                                          min_size=self.min_size)
     ros_rect_array = RectArray()
     ros_rect_array.header = img_msg.header
     for d in rects:
         if (d.right() - d.left()) * (d.bottom() - d.top()) > self.max_size:
             continue
         cv2.rectangle(img_bgr, (d.left(), d.top()), (d.right(), d.bottom()), (255, 0, 0), 3)
         ros_rect_array.rects.append(Rect(x=d.left(), y=d.top(), width=d.right() - d.left(), height=d.bottom() - d.top()))
     imgmsg = bridge.cv2_to_imgmsg(img_bgr, encoding='bgr8')
     self.debug_pub_.publish(imgmsg)
     self.pub_.publish(ros_rect_array)
Beispiel #11
0
 def fit(self, blocks):
     if len(blocks) == 0:
         return
     self.root = Block(
         Rect(x=0,
              y=0,
              width=blocks[0].rect.width,
              height=blocks[0].rect.height))
     for block in blocks:
         node = self.find_node(self.root, block.rect.width,
                               block.rect.height)
         if node:
             block.fit_location = self.split(node, block.rect.width,
                                             block.rect.height)
         else:
             block.fit_location = self.grow_node(block.rect.width,
                                                 block.rect.height)
Beispiel #12
0
 def _draw_polygon(self, imgmsg, polygon_msg):
     rect_msg = Rect()
     if len(polygon_msg.polygon.points) == 2:
         x1 = polygon_msg.polygon.points[0].x
         y1 = polygon_msg.polygon.points[0].y
         x2 = polygon_msg.polygon.points[1].x
         y2 = polygon_msg.polygon.points[1].y
         rect_msg.x = int(x1)
         rect_msg.y = int(y1)
         rect_msg.width = int(x2 - x1)
         rect_msg.height = int(y2 - y1)
     rects = [rect_msg]
     rects_msg = RectArray(header=polygon_msg.header, rects=rects)
     self._draw(imgmsg, rects_msg)
Beispiel #13
0
    def image_cb(self, msg):
        if not hasattr(self, 'engine'):
            return
        if self.transport_hint == 'compressed':
            np_arr = np.fromstring(msg.data, np.uint8)
            img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
            img = img[:, :, ::-1]
        else:
            img = self.bridge.imgmsg_to_cv2(msg, desired_encoding='rgb8')

        bboxes, labels, scores = self._detect(img)

        rect_msg = RectArray(header=msg.header)
        for bbox in bboxes:
            y_min, x_min, y_max, x_max = bbox
            rect = Rect(x=x_min,
                        y=y_min,
                        width=x_max - x_min,
                        height=y_max - y_min)
            rect_msg.rects.append(rect)

        cls_msg = ClassificationResult(
            header=msg.header,
            classifier=self.classifier_name,
            target_names=self.label_names,
            labels=labels,
            label_names=[self.label_names[lbl] for lbl in labels],
            label_proba=scores)

        self.pub_rects.publish(rect_msg)
        self.pub_class.publish(cls_msg)

        if self.enable_visualization:
            with self.lock:
                self.img = img
                self.header = msg.header
                self.bboxes = bboxes
                self.labels = labels
                self.scores = scores
Beispiel #14
0
    def callback(self, imgmsg):
        bridge = cv_bridge.CvBridge()
        img = bridge.imgmsg_to_cv2(imgmsg, desired_encoding='rgb8')
        img_chw = img.transpose(2, 0, 1)  # C, H, W

        bboxes, masks, labels, scores = self.model.predict([img_chw])
        bboxes = bboxes[0]
        masks = masks[0]
        labels = labels[0]
        scores = scores[0]

        msg_indices = ClusterPointIndices(header=imgmsg.header)
        msg_labels = LabelArray(header=imgmsg.header)
        # -1: label for background
        lbl_cls = -np.ones(img.shape[:2], dtype=np.int32)
        lbl_ins = -np.ones(img.shape[:2], dtype=np.int32)
        for ins_id, (mask, label) in enumerate(zip(masks, labels)):
            indices = np.where(mask.flatten())[0]
            indices_msg = PointIndices(header=imgmsg.header, indices=indices)
            msg_indices.cluster_indices.append(indices_msg)
            class_name = self.fg_class_names[label]
            msg_labels.labels.append(Label(id=label, name=class_name))
            lbl_cls[mask] = label
            lbl_ins[mask] = ins_id  # instance_id
        self.pub_indices.publish(msg_indices)
        self.pub_labels.publish(msg_labels)

        msg_lbl_cls = bridge.cv2_to_imgmsg(lbl_cls)
        msg_lbl_ins = bridge.cv2_to_imgmsg(lbl_ins)
        msg_lbl_cls.header = msg_lbl_ins.header = imgmsg.header
        self.pub_lbl_cls.publish(msg_lbl_cls)
        self.pub_lbl_ins.publish(msg_lbl_ins)

        cls_msg = ClassificationResult(
            header=imgmsg.header,
            classifier=self.classifier_name,
            target_names=self.fg_class_names,
            labels=labels,
            label_names=[self.fg_class_names[l] for l in labels],
            label_proba=scores,
        )

        rects_msg = RectArray(header=imgmsg.header)
        for bbox in bboxes:
            rect = Rect(x=bbox[1],
                        y=bbox[0],
                        width=bbox[3] - bbox[1],
                        height=bbox[2] - bbox[0])
            rects_msg.rects.append(rect)
        self.pub_rects.publish(rects_msg)
        self.pub_class.publish(cls_msg)

        if self.pub_viz.get_num_connections() > 0:
            n_fg_class = len(self.fg_class_names)
            captions = [
                '{:d}: {:s}'.format(l, self.fg_class_names[l]) for l in labels
            ]
            viz = chainer_mask_rcnn.utils.draw_instance_bboxes(
                img,
                bboxes,
                labels + 1,
                n_class=n_fg_class + 1,
                masks=masks,
                captions=captions)
            msg_viz = bridge.cv2_to_imgmsg(viz, encoding='rgb8')
            msg_viz.header = imgmsg.header
            self.pub_viz.publish(msg_viz)
    def callback(self, img, pose2d, pose3d):
        header = img.header
        try:
            img = self.cv_bridge.imgmsg_to_cv2(img, "bgr8")
        except cv_bridge.CvBridgeError as e:
            rospy.logerr("Failed to convert image: %s" % str(e))
            rospy.logerr(traceback.format_exc())
            return

        faces = []
        rects = []
        face_origins = []
        face_2d_origins = []
        for p2d, p3d in zip(pose2d.poses, pose3d.poses):
            try:
                score = p2d.scores[p2d.limb_names.index("Nose")]
                if score < self.face_threshold:
                    continue
                nose = p2d.poses[p2d.limb_names.index("Nose")]
                neck = p2d.poses[p2d.limb_names.index("Neck")]
                width_half = np.sqrt((neck.position.x - nose.position.x) ** 2 +
                                     (neck.position.y - nose.position.y) ** 2)
                width_half *= 1.0 + self.face_padding
                rect = Rect(x=int(nose.position.x-width_half),
                            y=int(nose.position.y-width_half),
                            width=int(width_half*2),
                            height=int(width_half*2))
                face_origin = p3d.poses[p3d.limb_names.index("Nose")]
                face_2d_origin = nose
            except ValueError:
                continue

            try:
                face = img[rect.y:rect.y+rect.height, rect.x:rect.x+rect.width]
                if face.size <= 0:
                    continue
            except Exception as e:
                rospy.logerr("Failed to crop image: %s" % str(e))
                rospy.logerr(traceback.format_exc())
                continue
            rects.append(rect)
            face_origins.append(face_origin)
            face_2d_origins.append(face_2d_origin)
            faces.append(face)

        if not faces:
            rospy.logdebug("No face found")
            return

        try:
            results = self.predictor(faces)
        except OverflowError:
            rospy.logfatal(traceback.format_exc())
            rospy.signal_shutdown("shutdown")
        except Exception as e:
            rospy.logerr("Failed to predict: %s" % str(e))
            rospy.logerr(traceback.format_exc())
            return

        for i in range(len(results)):
            results[i].update({
                "face_origin": face_origins[i],
                "face_2d_origin": face_2d_origins[i],
                "rect": rects[i],
            })

        self.publish_face_rects(header, results)
        self.publish_gender(header, results)
        self.publish_pose(header, results, img)
    def image_cb(self, msg):
        img = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
        H, W = img.shape[:2]
        objs = self.engine.DetectWithImage(
            PIL.Image.fromarray(img), threshold=self.score_thresh,
            keep_aspect_ratio=True, relative_coord=True,
            top_k=self.top_k)

        bboxes = []
        scores = []
        labels = []
        rect_msg = RectArray(header=msg.header)
        for obj in objs:
            x_min, y_min, x_max, y_max = obj.bounding_box.flatten().tolist()
            x_min = int(np.round(x_min * W))
            y_min = int(np.round(y_min * H))
            x_max = int(np.round(x_max * W))
            y_max = int(np.round(y_max * H))
            bboxes.append([y_min, x_min, y_max, x_max])
            scores.append(obj.score)
            labels.append(self.label_ids.index(int(obj.label_id)))
            rect = Rect(
                x=x_min, y=y_min,
                width=x_max-x_min, height=y_max-y_min)
            rect_msg.rects.append(rect)
        bboxes = np.array(bboxes)
        scores = np.array(scores)
        labels = np.array(labels)

        cls_msg = ClassificationResult(
            header=msg.header,
            classifier=self.classifier_name,
            target_names=self.label_names,
            labels=labels,
            label_names=[self.label_names[l] for l in labels],
            label_proba=scores)

        self.pub_rects.publish(rect_msg)
        self.pub_class.publish(cls_msg)

        if self.visualize:
            fig = plt.figure(
                tight_layout={'pad': 0})
            ax = plt.Axes(fig, [0., 0., 1., 1.])
            ax.axis('off')
            fig.add_axes(ax)
            vis_bbox(
                img[:, :, ::-1].transpose((2, 0, 1)),
                bboxes, labels, scores,
                label_names=self.label_names, ax=ax)
            fig.canvas.draw()
            w, h = fig.canvas.get_width_height()
            vis_img = np.fromstring(
                fig.canvas.tostring_rgb(), dtype=np.uint8)
            vis_img.shape = (h, w, 3)
            fig.clf()
            plt.close()
            vis_msg = self.bridge.cv2_to_imgmsg(vis_img, 'rgb8')
            # BUG: https://answers.ros.org/question/316362/sensor_msgsimage-generates-float-instead-of-int-with-python3/  # NOQA
            vis_msg.step = int(vis_msg.step)
            vis_msg.header = msg.header
            self.pub_image.publish(vis_msg)
    def image_cb(self, msg):
        if self.profiling:
            rospy.loginfo(
                "callback start: incomming msg is %s msec behind" %
                ((rospy.Time.now() - msg.header.stamp).to_sec() * 1000.0))
        tprev = time.time()
        try:
            # transform image to RGB, float, CHW
            img = self.cv_bridge.imgmsg_to_cv2(msg, desired_encoding="rgb8")
            img = np.asarray(img, dtype=np.float32)
            img = img.transpose(2, 0, 1)  # (H, W, C) -> (C, H, W)
        except Exception as e:
            rospy.logerr("Failed to convert image: %s" % str(e))
            return
        if self.profiling:
            tcur = time.time()
            rospy.loginfo("%s: elapsed %f msec" % ("convert",
                                                   (tcur - tprev) * 1000))
            tprev = tcur

        bboxes, labels, scores = self.model.predict([img])
        bboxes, labels, scores = bboxes[0], labels[0], scores[0]

        if self.profiling:
            tcur = time.time()
            rospy.loginfo("%s: elapsed %f msec" % ("predict",
                                                   (tcur - tprev) * 1000))
            tprev = tcur

        rect_msg = RectArray(header=msg.header)
        for bbox in bboxes:
            rect = Rect(x=bbox[1],
                        y=bbox[0],
                        width=bbox[3] - bbox[1],
                        height=bbox[2] - bbox[0])
            rect_msg.rects.append(rect)

        if self.profiling:
            tcur = time.time()
            rospy.loginfo("%s: elapsed %f msec" % ("make rect msg",
                                                   (tcur - tprev) * 1000))
            tprev = tcur

        cls_msg = ClassificationResult(
            header=msg.header,
            classifier=self.classifier_name,
            target_names=self.label_names,
            labels=labels,
            label_names=[self.label_names[l] for l in labels],
            label_proba=scores,
        )

        if self.profiling:
            tcur = time.time()
            rospy.loginfo("%s: elapsed %f msec" % ("make cls msg",
                                                   (tcur - tprev) * 1000))
            tprev = tcur

        self.pub_rects.publish(rect_msg)
        self.pub_class.publish(cls_msg)

        if self.profiling:
            tcur = time.time()
            rospy.loginfo("%s: elapsed %f msec" % ("publish msg",
                                                   (tcur - tprev) * 1000))
            tprev = tcur

        if self.visualize:
            self.publish_bbox_image(img, bboxes, labels, scores)

        if self.profiling:
            tcur = time.time()
            rospy.loginfo("%s: elapsed %f msec" % ("callback end",
                                                   (tcur - tprev) * 1000))
            tprev = tcur
Beispiel #18
0
    def image_cb(self, msg):
        if self.profiling:
            rospy.loginfo(
                "callback start: incomming msg is %s msec behind" %
                ((rospy.Time.now() - msg.header.stamp).to_sec() * 1000.0))
        tprev = time.time()
        try:
            # transform image to RGB, float, CHW
            img = self.cv_bridge.imgmsg_to_cv2(msg, desired_encoding="rgb8")
            img = np.asarray(img, dtype=np.float32)
            img = img.transpose(2, 0, 1)  # (H, W, C) -> (C, H, W)
        except Exception as e:
            rospy.logerr("Failed to convert image: %s" % str(e))
            return
        if self.profiling:
            tcur = time.time()
            rospy.loginfo("%s: elapsed %f msec" % ("convert",
                                                   (tcur - tprev) * 1000))
            tprev = tcur

        if self.gpu >= 0:
            chainer.cuda.get_device_from_id(self.gpu).use()
        bboxes, labels, scores = self.model.predict([img])
        bboxes, labels, scores = bboxes[0], labels[0], scores[0]

        if self.profiling:
            tcur = time.time()
            rospy.loginfo("%s: elapsed %f msec" % ("predict",
                                                   (tcur - tprev) * 1000))
            tprev = tcur

        labels_msg = LabelArray(header=msg.header)
        for l in labels:
            l_name = self.label_names[l]
            labels_msg.labels.append(Label(id=l, name=l_name))

        if self.profiling:
            tcur = time.time()
            rospy.loginfo("%s: elapsed %f msec" % ("make labels msg",
                                                   (tcur - tprev) * 1000))

        cluster_indices_msg = ClusterPointIndices(header=msg.header)
        H = img.shape[1]
        W = img.shape[2]
        for bbox in bboxes:
            ymin = max(0, int(np.floor(bbox[0])))
            xmin = max(0, int(np.floor(bbox[1])))
            ymax = min(H, int(np.ceil(bbox[2])))
            xmax = min(W, int(np.ceil(bbox[3])))
            indices = [
                range(W * y + xmin, W * y + xmax) for y in range(ymin, ymax)
            ]
            indices = np.array(indices, dtype=np.int32).flatten()
            indices_msg = PointIndices(header=msg.header, indices=indices)
            cluster_indices_msg.cluster_indices.append(indices_msg)

        if self.profiling:
            tcur = time.time()
            rospy.loginfo("%s: elapsed %f msec" % ("make cluster_indices msg",
                                                   (tcur - tprev) * 1000))
            tprev = tcur

        rect_msg = RectArray(header=msg.header)
        for bbox in bboxes:
            rect = Rect(x=bbox[1],
                        y=bbox[0],
                        width=bbox[3] - bbox[1],
                        height=bbox[2] - bbox[0])
            rect_msg.rects.append(rect)

        if self.profiling:
            tcur = time.time()
            rospy.loginfo("%s: elapsed %f msec" % ("make rect msg",
                                                   (tcur - tprev) * 1000))
            tprev = tcur

        cls_msg = ClassificationResult(
            header=msg.header,
            classifier=self.classifier_name,
            target_names=self.label_names,
            labels=labels,
            label_names=[self.label_names[l] for l in labels],
            label_proba=scores,
        )

        if self.profiling:
            tcur = time.time()
            rospy.loginfo("%s: elapsed %f msec" % ("make cls msg",
                                                   (tcur - tprev) * 1000))
            tprev = tcur

        self.pub_labels.publish(labels_msg)
        self.pub_indices.publish(cluster_indices_msg)
        self.pub_rects.publish(rect_msg)
        self.pub_class.publish(cls_msg)

        if self.profiling:
            tcur = time.time()
            rospy.loginfo("%s: elapsed %f msec" % ("publish msg",
                                                   (tcur - tprev) * 1000))
            tprev = tcur

        if self.visualize:
            self.publish_bbox_image(img, bboxes, labels, scores, msg.header)

        if self.profiling:
            tcur = time.time()
            rospy.loginfo("%s: elapsed %f msec" % ("callback end",
                                                   (tcur - tprev) * 1000))
            tprev = tcur
Beispiel #19
0
    def process_image(self, image, debug=False):
        # resize
        orgHeight, orgWidth = image.shape[:2]
        image = cv2.resize(
            image, (orgWidth / self.reduction, orgHeight / self.reduction))

        # hsv filter
        if (self.h_min < self.h_max):
            hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
            lower_red = np.array([self.h_min, self.s_min, self.v_min])
            upper_red = np.array([self.h_max, self.s_max, self.v_max])
            mask = cv2.inRange(hsv, lower_red, upper_red)

        else:
            hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
            lower_red1 = np.array([self.h_min, self.s_min, self.v_min])
            upper_red1 = np.array([180, self.s_max, self.v_max])
            mask1 = cv2.inRange(hsv, lower_red1, upper_red1)
            lower_red2 = np.array([0, self.s_min, self.v_min])
            upper_red2 = np.array([self.h_max, self.s_max, self.v_max])
            mask2 = cv2.inRange(hsv, lower_red2, upper_red2)
            mask = mask1 + mask2

        if debug:
            display = cv2.bitwise_and(image, image, mask=mask)
            cv2.imshow("hsv filter", display)

        # morphology processing
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (13, 13))
        mask = cv2.dilate(mask, kernel, iterations=1)
        mask = cv2.erode(mask, kernel, iterations=1)

        if debug:
            display = cv2.bitwise_and(image, image, mask=mask)
            cv2.imshow("morphology processing", display)

        # make contour
        _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE,
                                          cv2.CHAIN_APPROX_SIMPLE)

        if debug:
            display = np.zeros(mask.shape, dtype=np.uint8)
            for c in contours:
                for elem in c:
                    display[elem[0, 1], elem[0, 0]] = 255
            cv2.imshow("make contours", display)

        # make region
        rects = []
        for contour in contours:
            approx = cv2.convexHull(contour)
            rect = cv2.boundingRect(approx)
            rects.append(rect)

        rects_msg = RectArray()
        for rect in rects:
            cv2.rectangle(image, (rect[0], rect[1]),
                          (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 0),
                          thickness=1)
            rect_msg = Rect()
            rect_msg.x = rect[0] * self.reduction
            rect_msg.y = rect[1] * self.reduction
            rect_msg.width = rect[2] * self.reduction
            rect_msg.height = rect[3] * self.reduction
            rects_msg.rects.append(rect_msg)

        if debug:
            cv2.imshow("make region", image)

        if debug:
            cv2.waitKey(1)

        return rects_msg
Beispiel #20
0
    def callback(self, imgmsg):
        raw_image = None
        self.image_width = imgmsg.width
        self.image_height = imgmsg.height
        try:
            raw_image = self.bridge.imgmsg_to_cv2(imgmsg,
                                                  desired_encoding='bgr8')
        except:
            rospy.logerr('failed transform image')
            return

        dataset = ImageDataset(pkg_dir,
                               raw_image,
                               self.templates_dir,
                               thresh=self.thresh,
                               image_name=str(imgmsg.header.stamp))

        scores, w_array, h_array, label_list = calculate_scores(
            self.model, dataset)
        boxes, indices = nms(scores, w_array, h_array, dataset.thresh,
                             label_list)
        output_image = plot_results(dataset.image_raw,
                                    boxes,
                                    label_list,
                                    indices,
                                    show=False,
                                    save_name=None)

        labels_msg = LabelArray()
        rects_msg = RectArray()
        original_size_rects_msg = RectArray()
        for i in range(len(indices)):
            rect_msg = Rect()
            label_msg = Label()
            original_size_rect_msg = Rect()

            box = boxes[i][None, :, :][0]

            x, y, width, height = self.check_out_of_image(box)
            rect_msg.x = x
            rect_msg.y = y
            rect_msg.width = width
            rect_msg.height = height

            original_size_rect_msg.x = x * 1 / self.resize_scale
            original_size_rect_msg.y = y * 1 / self.resize_scale
            original_size_rect_msg.width = width * 1 / self.resize_scale
            original_size_rect_msg.height = height * 1 / self.resize_scale

            # rect_msg.x = box[0][0]
            # rect_msg.y = box[0][1]
            # rect_msg.width = box[1][0] - box[0][0]
            # rect_msg.height = box[1][1] - box[0][1]

            label_msg.name = label_list[indices[i]]
            rects_msg.rects.append(rect_msg)
            labels_msg.labels.append(label_msg)
            original_size_rects_msg.rects.append(original_size_rect_msg)

        rects_msg.header = imgmsg.header
        labels_msg.header = imgmsg.header
        original_size_rects_msg.header = imgmsg.header

        self.labels_pub.publish(labels_msg)
        self.rects_pub.publish(rects_msg)
        self.original_size_rects_pub.publish(original_size_rects_msg)

        msg_viz = cv_bridge.CvBridge().cv2_to_imgmsg(output_image,
                                                     encoding='bgr8')
        msg_viz.header = imgmsg.header
        self.pub_viz.publish(msg_viz)