def _classification_to_kwiver_detections(classification, w, h): """ Convert kwarray classifications to kwiver deteted object sets Args: classification (bioharn.clf_predict.Classification) w (int): width of image h (int): height of image Returns: kwiver.vital.types.DetectedObjectSet """ detected_objects = DetectedObjectSet() if classification.data.get('prob', None) is not None: # If we have a probability for each class, uses that class_names = list(classification.classes) class_prob = classification.prob detected_object_type = DetectedObjectType(class_names, class_prob) else: # Otherwise we only have the score for the predicted calss class_name = classification.classes[classification.cidx] class_score = classification.conf detected_object_type = DetectedObjectType(class_name, class_score) bounding_box = BoundingBox(0, 0, w, h) detected_object = DetectedObject(bounding_box, classification.conf, detected_object_type) detected_objects.add(detected_object) return detected_objects
def _kwimage_to_kwiver_detections(detections): """ Convert kwimage detections to kwiver deteted object sets Args: detected_objects (kwimage.Detections) Returns: kwiver.vital.types.DetectedObjectSet """ # convert segmentation masks if 'segmentations' in detections.data: print("Warning: segmentations not implemented") boxes = detections.boxes.to_tlbr() scores = detections.scores class_idxs = detections.class_idxs # convert to kwiver format, apply threshold detected_objects = DetectedObjectSet() for tlbr, score, cidx in zip(boxes.data, scores, class_idxs): class_name = detections.classes[cidx] bbox_int = np.round(tlbr).astype(np.int32) bounding_box = BoundingBox(bbox_int[0], bbox_int[1], bbox_int[2], bbox_int[3]) detected_object_type = DetectedObjectType(class_name, score) detected_object = DetectedObject(bounding_box, score, detected_object_type) detected_objects.add(detected_object) return detected_objects
def _create_object_track_set(self): bbox = BoundingBox(10, 10, 20, 20) dot = DetectedObjectType("test", 0.4) do = DetectedObject(bbox, 0.4, dot) track = Track() for i in range(10): track.append(ObjectTrackState(i, i, do)) return ObjectTrackSet([track])
def _create_detected_object_set(): from kwiver.vital.types import DetectedObject, DetectedObjectSet, BoundingBox dos = DetectedObjectSet() bbox = BoundingBox(0, 10, 100, 50) dos.add(DetectedObject(bbox, 0.2)) dos.add(DetectedObject(bbox, 0.5)) dos.add(DetectedObject(bbox, 0.4)) return dos
def _create_detected_object(self): """ Helper function to generate a detected object for the track state :return: Detected object with bounding box coordinates of (10, 10, 20, 20), confidence of 0.4 and "test" label """ bbox = BoundingBox(10, 10, 20, 20) cm = ClassMap("test", 0.4) do = DetectedObject(bbox, 0.4, cm) return do
def test_get_set_bbox(self): do = DetectedObject(self.bbox) # Check default nt.ok_(do.bounding_box, self.bbox) # Setting to different value new_bbox = BoundingBox(20, 20, 40, 40) do.bounding_box = new_bbox nt.ok_(do.bounding_box == new_bbox)
def detect(self, image_data): dot = DetectedObjectSet([ DetectedObject( BoundingBox( self.m_center_x + self.frame_ct * self.m_dx - self.m_width / 2.0, self.m_center_y + self.frame_ct * self.m_dy - self.m_height / 2.0, self.m_center_x + self.frame_ct * self.m_dx + self.m_width / 2.0, self.m_center_y + self.frame_ct * self.m_dy + self.m_height / 2.0)) ]) self.frame_ct += 1 return dot
def __getitem__(self, f_id): try: bb_info = self._frame_track_dict[f_id] except KeyError: print('frame id: {} does not exist!'.format(f_id)) exit(0) bb_list = [] for item in bb_info: x, y, w, h = map(float, item[1:]) bb_list.append(BoundingBox(x, y, x + w, y + h)) return bb_list
def _create_track(self): """ Helper function to create a track :return: Track with 10 object track state. Every track state has same detected object however the fram number and time varies from [0, 10) """ bbox = BoundingBox(10, 10, 20, 20) cm = ClassMap("test", 0.4) do = DetectedObject(bbox, 0.4, cm) track = Track() for i in range(10): track.append(ObjectTrackState(i, i, do)) return track
def setUp(self): bbox = BoundingBox(10, 10, 20, 20) dot = DetectedObjectType("test", 0.4) do = DetectedObject(bbox, 0.4, dot) track = Track() for i in range(10): track.append(ObjectTrackState(i, i, do)) self.track_ = track self.time_1 = Timestamp() self.time_1.set_time_seconds(1234) self.time_2 = Timestamp() self.time_2.set_time_seconds(4321) self.obj_ts = ObjectTrackSet([self.track_]) self.act_type = ActivityType("self_act", 0.87) self.act = Activity(1, "self_act", 0.87, self.act_type, self.time_1, self.time_2, self.obj_ts)
def _step(self): image_container = self.grab_input_using_trait("image") timestamp = self.grab_input_using_trait("timestamp") file_name = self.grab_input_using_trait("file_name") image = image_container.asarray() h, w, _ = image.shape bbox_x = w//2 bbox_y = h//2 bbox = BoundingBox( bbox_x - int(self.config_value("bbox_width"))//2, bbox_y - int(self.config_value("bbox_height"))//2, bbox_x + int(self.config_value("bbox_width"))//2, bbox_y + int(self.config_value("bbox_height"))//2 ) dot = DetectedObjectType("Test", 1.0) do = DetectedObject(bbox, 1.0, dot) dos = DetectedObjectSet() dos.add(do) self.push_to_port_using_trait("detected_object_set", dos)
def setUp(self): self.loc1 = np.array([-73.759291, 42.849631]) self.loc2 = np.array([-149.484444, -17.619482]) self.bbox = BoundingBox(10, 10, 20, 20) self.conf = 0.5 self.dot = DetectedObjectType("example_class", 0.4) self.mask = ImageContainer(Image(1080, 720)) # Values to set outside of constructor self.geo_point = GeoPoint(self.loc1, geodesy.SRID.lat_lon_WGS84) self.index = 5 self.detector_name = "example_detector_name" self.descriptor = descriptor.new_descriptor(5) self.descriptor[:] = 10 self.note_to_add = "example_note" self.keypoint_to_add = Point2d() self.keypoint_to_add.value = self.loc2 self.keypoint_id = "example_keypoint_id"
def test_id(self): a = self.act self.assertEqual(a.id, 1) a.id = 10 self.assertEqual(a.id, 10) self.assertEqual(a.label, "self_act") a.label = "second_act" self.assertEqual(a.label, "second_act") self.assertEqual(a.activity_type.score("self_act"), 0.87) a.activity_type = ActivityType() self.assertEqual(a.confidence, 0.87) a.confidence = 1 self.assertEqual(a.confidence, 1) self.assertEqual(a.start_time.get_time_seconds(), 1234) tmp_time = Timestamp().set_time_seconds(1237) a.start_time = tmp_time self.assertEqual(a.start_time.get_time_seconds(), 1237) self.assertEqual(a.end_time.get_time_seconds(), 4321) tmp_time = Timestamp() tmp_time.set_time_seconds(4322) a.end_time = tmp_time self.assertEqual(a.end_time.get_time_seconds(), 4322) self.assertEqual(a.participants.all_frame_ids(), set(range(10))) bbox = BoundingBox(10, 10, 20, 20) dot = DetectedObjectType("test", 0.4) do = DetectedObject(bbox, 0.4, dot) track = Track() for i in range(5): track.append(ObjectTrackState(i, i, do)) new_t = track new_ots = ObjectTrackSet([new_t]) a.participants = new_ots self.assertEqual(a.participants.all_frame_ids(), set(range(5))) self.assertEqual(a.duration[0].get_time_seconds(), a.start_time.get_time_seconds()) self.assertEqual(a.duration[1].get_time_seconds(), a.end_time.get_time_seconds())
def create_bounding_box(): return BoundingBox(1.0, 2.0, 3.0, 4.0)