def test_total_overlap(self): """ Check total overlap and no overlap at all """ det1 = det.Detection("dummy", 150, 150, 250, 250, 0) det2 = det.Detection("dummy", 0, 0, 400, 400, 0) det3 = det.Detection("dummy", 300, 300, 500, 500, 0) self.assertEqual(tb.calculate_IOU(self.detA, det1), 0.25) self.assertEqual(tb.calculate_IOU(self.detA, det2), 0.25) self.assertEqual(tb.calculate_IOU(self.detA, det3), 0.0) # check the reverse case self.assertEqual(tb.calculate_IOU(det1, self.detA), 0.25) self.assertEqual(tb.calculate_IOU(det2, self.detA), 0.25) self.assertEqual(tb.calculate_IOU(det3, self.detA), 0.0)
def test_bound_case(self): """ Check if overlap over boundary is calculated correctly """ det1 = det.Detection("dummy", 200, 150, 400, 250, 0) det2 = det.Detection("dummy", 0, 150, 200, 250, 0) det3 = det.Detection("dummy", 150, 200, 250, 400, 0) det4 = det.Detection("dummy", 150, 0, 250, 200, 0) gt = 0.2 self.assertEqual(tb.calculate_IOU(self.detA, det1), gt) self.assertEqual(tb.calculate_IOU(self.detA, det1), gt) self.assertEqual(tb.calculate_IOU(self.detA, det1), gt) self.assertEqual(tb.calculate_IOU(self.detA, det1), gt) # check the reverse case self.assertEqual(tb.calculate_IOU(det1, self.detA), gt) self.assertEqual(tb.calculate_IOU(det1, self.detA), gt) self.assertEqual(tb.calculate_IOU(det1, self.detA), gt) self.assertEqual(tb.calculate_IOU(det1, self.detA), gt)
def test_corner_case(self): """ Check if ovlerap over corners is correctly calculated """ det1 = det.Detection("dummy", 200, 200, 400, 400, 0) det2 = det.Detection("dummy", 0, 200, 200, 400, 0) det3 = det.Detection("dummy", 0, 0, 200, 200, 0) det4 = det.Detection("dummy", 200, 0, 400, 200, 0) gt = 0.14285714285714285 self.assertEqual(tb.calculate_IOU(self.detA, det1), gt) self.assertEqual(tb.calculate_IOU(self.detA, det1), gt) self.assertEqual(tb.calculate_IOU(self.detA, det1), gt) self.assertEqual(tb.calculate_IOU(self.detA, det1), gt) # check the reverse case self.assertEqual(tb.calculate_IOU(det1, self.detA), gt) self.assertEqual(tb.calculate_IOU(det1, self.detA), gt) self.assertEqual(tb.calculate_IOU(det1, self.detA), gt) self.assertEqual(tb.calculate_IOU(det1, self.detA), gt)
def extrapolate(self, current_frame_number): """ Extrapolates a bounding box to the current frame if 2 consecutive frames (curr_frame number - 1, curr_frame - 2) are given. current_frame_number: Number of the current frame in the running detection """ # check if a detection was added in this frame -> makes no sense otherwise if self.get_last_frame() == current_frame_number: return if len(self.detection_list) < 2: return # Extrapolation (2 -> 1 -> curr_frame) d_1 = self.detection_list[-1] d_2 = self.detection_list[-2] # check if two consecutive frames are given if d_1.frame_number != current_frame_number - 1: return if d_2.frame_number != current_frame_number - 2: return # check if its the third extrapolation if d_1.interpolated and d_2.interpolated: return # get middle points mx_1, my_1 = (d_1.x2 - d_1.x1) / 2 + d_1.x1, (d_1.y2 - d_1.y1) / 2 + d_1.y1 mx_2, my_2 = (d_2.x2 - d_2.x1) / 2 + d_2.x1, (d_2.y2 - d_2.y1) / 2 + d_2.y1 # Drift dx = mx_2 - mx_1 dy = my_2 - my_2 #Extrapolation xi1 = d_1.x1 + dx xi2 = d_1.x2 + dx yi1 = d_2.y1 + dy yi2 = d_2.y2 + dy di = det.Detection(d_1.label, xi1, yi1, xi2, yi2, current_frame_number, interpolated=True) self.detection_list.append(di)
def interpolate(self, current_frame_number): """ Interpolates bounding boxes if detection frmames are missing. current_frame_number: Number of the current frame in the running detection """ # check if a detection was added in this frame -> makes no sense otherwise if self.get_last_frame() != current_frame_number: return if len(self.detection_list) < 2: return start_frame_number = self.detection_list[-2].frame_number ds = self.detection_list[-2] end_frame_number = self.detection_list[-1].frame_number de = self.detection_list[-1] # check if frames are missing -> if none missing break if start_frame_number + 1 == end_frame_number: return # interpolate over consecutive frames (linear) num_interpolate = end_frame_number - start_frame_number - 1 #step size xs_s = (de.x1 - ds.x1) / (num_interpolate + 1) xe_s = (de.x2 - ds.x2) / (num_interpolate + 1) ys_s = (de.y1 - ds.y1) / (num_interpolate + 1) ye_s = (de.y2 - ds.y2) / (num_interpolate + 1) for i in range(num_interpolate): xi1 = int(ds.x1 + xs_s * (i + 1)) xi2 = int(ds.x2 + xe_s * (i + 1)) yi1 = int(ds.y1 + ys_s * (i + 1)) yi2 = int(ds.y2 + ye_s * (i + 1)) di = det.Detection(ds.label, xi1, yi1, xi2, yi2, start_frame_number + i + 1, interpolated=True) self.detection_list.insert(len(self.detection_list) - 1, di)
def load(self, path): """ Loads a TubeGenerator from a collection of Tube files. """ tube_files = [ f for f in os.listdir(path) if f.split('.')[-1] == "tube" ] for fl in tube_files: id = fl.split('.')[0] with open(path + fl, 'r') as f: for i, line in enumerate(f): args = line.strip().split(',') dt = det.Detection(args[1], int(float(args[2])), int(float(args[3])), int(float(args[4])), int(float(args[5])), int(float(args[0])), bool(args[6])) if i == 0: tube = Tube(dt, id) else: tube.detection_list.append(dt) self.active_tube_list.append(tube)
sequence_images = sorted(os.listdir(image_sequence_folder)) ground_truth_path = os.path.join(settings["path"]["images"], settings["path"]["ground_truth_name"]) #parse ground truth with open(ground_truth_path, 'r') as f: gt_list = [[float(n) for n in gt.split(',')] for gt in [l.strip() for l in f]] #init object detector YDetect = det.YOLO_Detector("settings.json") lg.info("=======start detection=========") VIS = det.Visualizer("settings.json") frame_number = 0 #0th frame is init x1,y1,_,_,x2,y2,_,_ = gt_list[0] init_det_list = [det.Detection("car", int(x1), int(y1), int(x2), int(y2), frame_number)] TG = tb.TubeGenerator("settings.json", init_det_list) TG_loaded = tb.TubeGenerator("settings.json") TG_loaded.load(settings["path"]["output"]) TG_loaded.output() past_dett = None for frame_number, img_name in enumerate(sequence_images): frame_number += 1 #construct image path and read in img img_path = os.path.join(image_sequence_folder, img_name)
################################################################################ track_label = "person" start_frame = 0 ################################################################################ frame_number = 0 #0th frame is init x1, y1, x2, y2, x3, y3, x4, y4 = gt_list[0] #calculate rectangular ground truth xx1 = min(x1, x2, x3, x4) xx2 = max(x1, x2, x3, x4) yy1 = min(y1, y2, y3, y4) yy2 = max(y1, y2, y3, y4) init_det_list = [ det.Detection(track_label, int(xx1), int(yy1), int(xx2), int(yy2), frame_number) ] accuracry_list = [] label_list = [] number_no_detection = 0 #init tube generator using the initial ground truth TG = tb.TubeGenerator("settings.json", init_det_list) TG_loaded = tb.TubeGenerator("settings.json") TG_loaded.load(settings["path"]["output"]) TG_loaded.output() frame_number = start_frame
#init object detector YDetect = det.YOLO_Detector("settings.json") lg.info("=======start detection=========") frame_number = 0 #0th frame is init x1, y1, x2, y2, x3, y3, x4, y4 = gt_list[0] #calculate rectangular ground truth xx1 = min(x1, x2, x3, x4) xx2 = max(x1, x2, x3, x4) yy1 = min(y1, y2, y3, y4) yy2 = max(y1, y2, y3, y4) init_det_list = [ det.Detection("car", int(xx1), int(yy1), int(xx2), int(yy2), frame_number) ] accuracry_list = [] label_list = [] number_no_detection = 0 for frame_number, img_name in tqdm(enumerate(sequence_images)): frame_number += 1 #construct image path and read in img img_path = os.path.join(image_sequence_folder, img_name) img = cv2.imread(img_path) lg.info("Process frame {}".format(frame_number))
def setUp(self): self.detA = det.Detection("dummy", 100, 100, 300, 300, 0)