示例#1
0
    def select(self, one=0.0, two=None):

        c_output = ctypes.POINTER(DetectedObject.c_ptr_type())()
        length = ctypes.c_size_t()

        if two is None:
            dos_st = self.VITAL_LIB.vital_detected_object_set_select_threshold
            dos_st.argtypes = [
                self.C_TYPE_PTR, ctypes.c_double,
                ctypes.POINTER(ctypes.POINTER(DetectedObject.c_ptr_type())),
                ctypes.POINTER(ctypes.c_size_t)
            ]
            dos_st(self, one, ctypes.byref(c_output), ctypes.byref(length))
        else:
            dos_sct = self.VITAL_LIB.vital_detected_object_set_select_class_threshold
            dos_sct.argtypes = [
                self.C_TYPE_PTR, ctypes.c_char_p, ctypes.c_double,
                ctypes.POINTER(ctypes.POINTER(DetectedObject.c_ptr_type())),
                ctypes.POINTER(ctypes.c_size_t)
            ]
            dos_sct(self, one, two, ctypes.byref(c_output),
                    ctypes.byref(length))

        output = []
        for i in range(length.value):
            cptr = DetectedObject.c_ptr_type()(c_output[i].contents)
            output.append(DetectedObject(from_cptr=cptr))

        free_void_ptr(c_output)
        return output
示例#2
0
 def detection(self):
     d_ptr = self._call_cfunc("vital_object_track_state_detection",
                              [self.C_TYPE_PTR], [self],
                              DetectedObject.c_ptr_type())
     # f_ptr may be null
     if d_ptr:
         return DetectedObject(from_cptr=d_ptr)
     else:
         return None
示例#3
0
    def detect( self, image_data ):

        # Convert image to 8-bit numpy
        input_image = image_data.asarray().astype( 'uint8' )

        # TODO: do something with numpy image producing detections
        bboxes = []
        labels = []

        # Convert detections to kwiver format
        output = DetectedObjectSet()

        for bbox, label in zip( bboxes, labels ):

            bbox_int = bbox.astype( np.int32 )

            bounding_box = BoundingBox( bbox_int[0], bbox_int[1],
                                        bbox_int[2], bbox_int[3] )

            detected_object_type = DetectedObjectType( label, 1.0 )

            detected_object = DetectedObject( bounding_box,
                                              np.max( class_confidence ),
                                              detected_object_type )

            output.add( detected_object )

        return output
示例#4
0
    def _step(self):
        print("[DEBUG] ----- start step")

        face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
        # grab image container from port using traits
        frame_c = self.grab_input_using_trait('image')
        # Get image from container
        frame_in = frame_c.image()
        #convert generic image to PIL
        pil_image = get_pil_image(frame_in)
        #convert to matrix
        frame = np.array(pil_image)

        detected_set = DetectedObjectSet()
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray_frame = cv2.equalizeHist(gray_frame)
        faces = face_cascade.detectMultiScale(gray_frame, 1.3, 5)
        for (x, y, w, h) in faces:
            bbox = BoundingBox(x, y, x + w, y + h)

            # get new image handle
            #new_ic = ImageContainer( frame )
            dot = DetectedObjectType("face", 1.0)
            detected_set.add(DetectedObject(bbox, 1.0, dot))
        # push object to output port
        self.push_to_port_using_trait('detected_object_set', detected_set)

        self._base_step()
示例#5
0
    def _step(self):
        print("[DEBUG] ----- start step")
        # grab image container from port using traits
        in_img_c = self.grab_input_using_trait("image")

        imageHeight = in_img_c.height()
        imageWidth = in_img_c.width()

        if (self.normImageType):
            print("Normalize image")

            in_img = in_img_c.image().asarray().astype("uint16")

            bottom, top = self.get_scaling_values(self.normImageType,
                                                  imageHeight)
            in_img = self.lin_normalize_image(in_img, bottom, top)

            in_img = np.tile(in_img, (1, 1, 3))
        else:
            in_img = np.array(get_pil_image(in_img_c.image()).convert("RGB"))

        self.push_to_port_using_trait("image_norm",
                                      ImageContainer(Image(in_img)))

        startTime = time.time()
        boxes, scores, classes = self.generate_detection(
            self.detection_graph, in_img)
        elapsed = time.time() - startTime
        print("Done running detector in {}".format(
            humanfriendly.format_timespan(elapsed)))

        goodBoxes = []
        detections = DetectedObjectSet()

        for i in range(0, len(scores)):
            if (scores[i] >= self.confidenceThresh):
                bbox = boxes[i]
                goodBoxes.append(bbox)

                topRel = bbox[0]
                leftRel = bbox[1]
                bottomRel = bbox[2]
                rightRel = bbox[3]

                xmin = leftRel * imageWidth
                ymin = topRel * imageHeight
                xmax = rightRel * imageWidth
                ymax = bottomRel * imageHeight

                obj = DetectedObject(BoundingBox(xmin, ymin, xmax, ymax),
                                     scores[i])
                detections.add(obj)

        print("Detected {}".format(len(goodBoxes)))

        self.push_to_port_using_trait("detected_object_set", detections)

        self._base_step()
示例#6
0
  def detect( self, image_data ):
    input_image = image_data.asarray().astype( 'uint8' )

    from mmdet.apis import inference_detector

    gpu_string = 'cuda:' + str( self._gpu_index )
    detections = inference_detector( self._model, input_image, self._cfg, device=gpu_string )

    class_names = [ 'fish' ] * 10000

    if isinstance( detections, tuple ):
      bbox_result, segm_result = detections
    else:
      bbox_result, segm_result = detections, None

    if np.size( bbox_result ) > 0:
      bboxes = np.vstack( bbox_result )
    else:
      bboxes = []

    sys.stdout.write( "Detected " + str( len( bbox_result ) ) + " objects" )
    sys.stdout.flush()

    # convert segmentation masks
    masks = []
    if segm_result is not None:
      segms = mmcv.concat_list( segm_result )
      inds = np.where( bboxes[:, -1] > score_thr )[0]
      for i in inds:
        masks.append( maskUtils.decode( segms[i] ).astype( np.bool ) )

    # collect labels
    labels = [
      np.full( bbox.shape[0], i, dtype=np.int32 )
      for i, bbox in enumerate( bbox_result )
    ]

    if np.size( labels ) > 0:
      labels = np.concatenate( labels )
    else:
      labels = []

    # convert to kwiver format, apply threshold
    output = []

    for entry in []:
      output.append( DetectedObject( BoundingBox( 1,1,2,2 ) ) )

    if np.size( labels ) > 0:
      mmcv.imshow_det_bboxes(
        input_image,
        bboxes,
        labels,
        class_names=class_names,
        score_thr=-100.0,
        show=True)

    return DetectedObjectSet( output )
示例#7
0
 def _new(self, frame, detection):
     """
     :param detection: Optional DetectedObject instance associated with this state.
     :type detection: vital.types.DetectedObject
     """
     return self._call_cfunc(
         "vital_object_track_state_new",
         [ctypes.c_int64, DetectedObject.c_ptr_type()], [frame, detection],
         self.C_TYPE_PTR)
 def _create_detected_object(self):
     """
     Helper function to generate a detected object for the track state
     :return: Detected object with bounding box coordinates of 
              (10, 10, 20, 20), confidence of 0.4 and "test" label
     """
     bbox = BoundingBox(10, 10, 20, 20)
     dot = DetectedObjectType("test", 0.4)
     do = DetectedObject(bbox, 0.4, dot)
     return do
 def detect(self, image_data):
     dot = DetectedObjectSet([
         DetectedObject(
             BoundingBox(
                 self.m_center_x + self.frame_ct * self.m_dx -
                 self.m_width / 2.0, self.m_center_y +
                 self.frame_ct * self.m_dy - self.m_height / 2.0,
                 self.m_center_x + self.frame_ct * self.m_dx +
                 self.m_width / 2.0, self.m_center_y +
                 self.frame_ct * self.m_dy + self.m_height / 2.0))
     ])
     self.frame_ct += 1
     return dot
示例#10
0
    def detect(self, in_img_c):

        image_height = in_img_c.height()
        image_width = in_img_c.width()

        if (self.norm_image_type and self.norm_image_type != "none"):
            print("Normalizing input image")

            in_img = in_img_c.image().asarray().astype("uint16")

            bottom, top = self.get_scaling_values(self.norm_image_type, in_img,
                                                  image_height)
            in_img = self.lin_normalize_image(in_img, bottom, top)

            in_img = np.tile(in_img, (1, 1, 3))
        else:
            in_img = np.array(get_pil_image(in_img_c.image()).convert("RGB"))

        start_time = time.time()
        boxes, scores, classes = self.generate_detection(
            self.detection_graph, in_img)
        elapsed = time.time() - start_time
        print("Done running detector in {}".format(
            humanfriendly.format_timespan(elapsed)))

        good_boxes = []
        detections = DetectedObjectSet()

        for i in range(0, len(scores)):
            if (scores[i] >= self.confidence_thresh):
                bbox = boxes[i]
                good_boxes.append(bbox)

                top_rel = bbox[0]
                left_rel = bbox[1]
                bottom_rel = bbox[2]
                right_rel = bbox[3]

                xmin = left_rel * image_width
                ymin = top_rel * image_height
                xmax = right_rel * image_width
                ymax = bottom_rel * image_height

                dot = DetectedObjectType(self.category_name, scores[i])
                obj = DetectedObject(BoundingBox(xmin, ymin, xmax, ymax),
                                     scores[i], dot)
                detections.add(obj)

        print("Detected {}".format(len(good_boxes)))
        return detections
示例#11
0
    def detect(self, image_c):
        cascade_classifier = cv2.CascadeClassifier(self.classifier_file)
        image = image_c.image().asarray().astype(np.uint8)
        detected_object_set = DetectedObjectSet()
        # NOTE: assarray() function return an rgb representation of the image
        gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        gray_image = cv2.equalizeHist(gray_image)
        faces = cascade_classifier.detectMultiScale(gray_image,
                                                    self.scale_factor,
                                                    self.min_neighbor)

        for (x, y, w, h) in faces:
            bbox = BoundingBox(x, y, x + w, y + h)
            dot = DetectedObjectType(self.classifier_name, 1.0)
            detected_object_set.add(DetectedObject(bbox, 1.0, dot))
        return detected_object_set
示例#12
0
    def detect(self, image_data):
        input_image = image_data.asarray().astype('uint8')

        from mmdet.apis import inference_detector
        detections = inference_detector(self._model, input_image)

        if isinstance(detections, tuple):
            bbox_result, segm_result = detections
        else:
            bbox_result, segm_result = detections, None

        if np.size(bbox_result) > 0:
            bboxes = np.vstack(bbox_result)
        else:
            bboxes = []

        # convert segmentation masks
        masks = []
        if segm_result is not None:
            segms = mmcv.concat_list(segm_result)
            inds = np.where(bboxes[:, -1] > score_thr)[0]
            for i in inds:
                masks.append(maskUtils.decode(segms[i]).astype(np.bool))

        # collect labels
        labels = [
            np.full(bbox.shape[0], i, dtype=np.int32)
            for i, bbox in enumerate(bbox_result)
        ]

        if np.size(labels) > 0:
            labels = np.concatenate(labels)
        else:
            labels = []

        # convert to kwiver format, apply threshold
        output = DetectedObjectSet()

        for bbox, label in zip(bboxes, labels):
            class_confidence = float(bbox[-1])
            if class_confidence < self._thresh:
                continue

            bbox_int = bbox.astype(np.int32)
            bounding_box = BoundingBox(bbox_int[0], bbox_int[1], bbox_int[2],
                                       bbox_int[3])

            class_name = self._labels[label]
            detected_object_type = DetectedObjectType(class_name,
                                                      class_confidence)

            detected_object = DetectedObject(bounding_box,
                                             np.max(class_confidence),
                                             detected_object_type)
            output.add(detected_object)

        if np.size(labels) > 0 and self._display_detections:
            mmcv.imshow_det_bboxes(input_image,
                                   bboxes,
                                   labels,
                                   class_names=self._labels,
                                   score_thr=self._thresh,
                                   show=True)

        return output