def pipeline(img): ''' Pipeline function for detection and tracking ''' global frame_count global tracked_count global tracker_list global max_age global min_hits global track_id_list global debug global z_box img_dim = (img.shape[1], img.shape[0]) if frame_count % 10 < 2: z_box = det.get_localization(img) # measurement if len(z_box) > 0: for i in range(len(z_box)): box = z_box[i] tmp_trk = Tracker() # Create a new tracker x = np.array([[box[0], 0, box[1], 0, box[2], 0, box[3], 0]]).T tmp_trk.x_state = x tmp_trk.predict_only() if len(tracker_list) != 0 and i < len(tracker_list): tracker_list[i] = tmp_trk else: tracker_list.append(tmp_trk) print(tracker_list) img = helpers.draw_box_label(img, box) else: if len(z_box) > 0: print(tracker_list) nb = [] for i in range(len(z_box)): z = z_box[i] z = np.expand_dims(z, axis=0).T tmp_trk = tracker_list[i] tmp_trk.kalman_filter(z) xx = tmp_trk.x_state.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] nb.append(xx) img = helpers.draw_box_label(img, xx) for i in range(len(nb)): print(nb[i]) print(z_box[i]) z_box = nb frame_count += 1 if debug: print('Frame:', frame_count) return img
def pipeline(img): ''' Pipeline function for detection and tracking ''' global frame_count global tracker_list global max_age global min_hits global track_id_list global debug frame_count += 1 start = time.time() img_dim = (img.shape[1], img.shape[0]) z_box = det.get_localization(img) # measurement if debug: print('Frame:', frame_count) x_box = [] if debug: for i in range(len(z_box)): img1 = helpers.draw_box_label(img, z_box[i], box_color=(255, 0, 0)) plt.imshow(img1) plt.show() if len(tracker_list) > 0: for trk in tracker_list: x_box.append(trk.box) matched, unmatched_dets, unmatched_trks \ = assign_detections_to_trackers(x_box, z_box, iou_thrd = 0.3) if debug: print('Detection: ', z_box) print('x_box: ', x_box) print('matched:', matched) print('unmatched_det:', unmatched_dets) print('unmatched_trks:', unmatched_trks) # Deal with matched detections if matched.size > 0: for trk_idx, det_idx in matched: z = z_box[det_idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker_list[trk_idx] tmp_trk.kalman_filter(z) xx = tmp_trk.x_state.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] x_box[trk_idx] = xx tmp_trk.box = xx tmp_trk.hits += 1 tmp_trk.no_losses = 0 # Deal with unmatched detections if len(unmatched_dets) > 0: for idx in unmatched_dets: z = z_box[idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker.Tracker() # Create a new tracker x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T tmp_trk.x_state = x tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx tmp_trk.id = track_id_list.popleft( ) # assign an ID for the tracker tracker_list.append(tmp_trk) x_box.append(xx) # Deal with unmatched tracks if len(unmatched_trks) > 0: for trk_idx in unmatched_trks: tmp_trk = tracker_list[trk_idx] tmp_trk.no_losses += 1 tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx x_box[trk_idx] = xx # The list of tracks to be annotated good_tracker_list = [] for trk in tracker_list: if ((trk.hits >= min_hits) and (trk.no_losses <= max_age)): good_tracker_list.append(trk) x_cv2 = trk.box if debug: print('updated box: ', x_cv2) print() img = helpers.draw_box_label( img, x_cv2, helpers.trk_id_to_color( trk.id)) # Draw the bounding boxes on the # images # Book keeping deleted_tracks = filter(lambda x: x.no_losses > max_age, tracker_list) for trk in deleted_tracks: track_id_list.append(trk.id) tracker_list = [x for x in tracker_list if x.no_losses <= max_age] if debug: print('Ending tracker_list: ', len(tracker_list)) print('Ending good tracker_list: ', len(good_tracker_list)) return img
def pipeline(img): """ Pipeline function for detection and tracking """ global frame_count global tracker_list global max_age global min_hits global track_id_list global debug global points frame_count += 1 img_dim = (img.shape[1], img.shape[0]) z_box = det.get_localization(img) # measurement if debug: print('Frame:', frame_count) x_box =[] if debug: img1 = img.copy() for i in range(len(z_box)): img1 = helpers.draw_box_label(i, img1, z_box[i], box_color=(255, 0, 0)) plt.imshow(img1) plt.show() if len(tracker_list) > 0: for trk in tracker_list: x_box.append(trk.box) matched, unmatched_dets, unmatched_trks \ = assign_detections_to_trackers(x_box, z_box, iou_thrd = 0.3) if debug: print('Detection: ', z_box) print('x_box: ', x_box) print('matched:', matched) print('unmatched_det:', unmatched_dets) print('unmatched_trks:', unmatched_trks) # Deal with matched detections if matched.size > 0: for trk_idx, det_idx in matched: z = z_box[det_idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker_list[trk_idx] tmp_trk.kalman_filter(z) xx = tmp_trk.x_state.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] x_box[trk_idx] = xx tmp_trk.box = xx tmp_trk.hits += 1 # Deal with unmatched detections if len(unmatched_dets) > 0: for idx in unmatched_dets: z = z_box[idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker.Tracker() # Create a new tracker x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T tmp_trk.x_state = x tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx tmp_trk.id = track_id_list.popleft() # assign an ID for the tracker print(tmp_trk.id) tracker_list.append(tmp_trk) x_box.append(xx) # Deal with unmatched tracks if len(unmatched_trks) > 0: for trk_idx in unmatched_trks: tmp_trk = tracker_list[trk_idx] tmp_trk.no_losses += 1 tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx x_box[trk_idx] = xx # The list of tracks to be annotated good_tracker_list = [] if (len(tracker_list) == 0): print('list should be cleared now') points = [] for trk in tracker_list: if ((trk.hits >= min_hits) and (trk.no_losses <= max_age)): good_tracker_list.append(trk) x_cv2 = trk.box if debug: print('updated box: ', x_cv2) print() for good_track in good_tracker_list: box = good_track.box if (len(z_box) != 0): y_up, x_left, y_down, x_right = box #center = (int((x_left + x_right) / 2), int((y_up + y_down) / 2)) legs = (int((x_left + x_right) / 2), y_down) points.append(legs) img = cv2.circle(img, legs, 20, (255, 0, 0), thickness=-1) img = helpers.draw_box_label(good_track.id, img, box) # Draw the bounding boxes on the images if (len(points) > 1): for i in range(len(points) - 1): cv2.line(img, points[i], points[i + 1], (255, 0, 0), 2) # Book keeping ??? deleted_tracks = filter(lambda x: x.no_losses > max_age, tracker_list) for trk in deleted_tracks: track_id_list.append(trk.id) tracker_list = [x for x in tracker_list if x.no_losses <= max_age] if debug: print('Ending tracker_list: ', len(tracker_list)) print('Ending good tracker_list: ', len(good_tracker_list)) cv2.imshow("frame", img) return img
# Updated state x_update =trk.x_state x_updated_box = [x_update[0], x_update[2], x_update[4], x_update[6]] print('The initial state is: ', x_init) print('The measurement is: ', z) print('The update state is: ', x_update) # Visualize the Kalman filter process and the # impact of measurement nosie convariance matrix images = [plt.imread(file) for file in glob.glob('./test_images/*.jpg')] img=images[3] plt.figure(figsize=(10, 14)) helpers.draw_box_label(img, x_init_box, box_color=(0, 255, 0)) ax = plt.subplot(3, 1, 1) plt.imshow(img) plt.title('Initial: '+str(x_init_box)) helpers.draw_box_label(img, z, box_color=(255, 0, 0)) ax = plt.subplot(3, 1, 2) plt.imshow(img) plt.title('Measurement: '+str(z)) helpers.draw_box_label(img, x_updated_box) ax = plt.subplot(3, 1, 3) plt.imshow(img) plt.title('Updated: '+str(x_updated_box)) plt.show()
def pipeline(img): ''' Pipeline function for detection and tracking ''' global frame_count global tracker_list global dead_tracker_list global track_id_start_value global max_age global min_hits global track_id_list global debug global args frame_count += 1 img_dim = (img.shape[1], img.shape[0]) z_box = det.get_localization(img) # measurement img_raw = np.copy(img) if debug: print('Frame:', frame_count) x_box = [] if debug: for i in range(len(z_box)): if not args['dots']: img1 = helpers.draw_box_label(img, z_box[i], box_color=(255, 0, 0)) #plt.imshow(img1) plt.show() if len(tracker_list) > 0: for trk in tracker_list: trk.predict_only() xx = trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] trk.box = xx x_box.append(trk.box) matched, unmatched_dets, unmatched_trks \ = assign_detections_to_trackers(x_box, z_box, iou_thrd = 0.1) if debug: print('Detection: ', z_box) print('x_box: ', x_box) print('matched:', matched) print('unmatched_det:', unmatched_dets) print('unmatched_trks:', unmatched_trks) font = cv2.FONT_HERSHEY_SIMPLEX font_size = 1 font_color = (255, 255, 255) cv2.putText(img, str(len(z_box)), (100, 100), font, font_size, font_color, 1, cv2.LINE_AA) pos = 30 for trk_idx, det_idx in matched: iou = helpers.box_iou2(tracker_list[trk_idx].box, z_box[det_idx]) cv2.putText(img, tracker_list[trk_idx].id + " " + str(iou), (100, 100 + pos), font, font_size, font_color, 1, cv2.LINE_AA) pos += 30 cv2.putText(img, str(frame_count), (100, 100 + pos), font, font_size, font_color, 1, cv2.LINE_AA) # Deal with matched detections if matched.size > 0: for trk_idx, det_idx in matched: z = z_box[det_idx] ymin, xmin, ymax, xmax = z_box[det_idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker_list[trk_idx] person_im = img_raw[ymin:ymax, xmin:xmax] logo_boxes = logo_det.get_localization(person_im) if (len(logo_boxes) > 0): l_ymin, l_xmin, l_ymax, l_xmax = logo_boxes[0] img = helpers.draw_box_label(img, [ l_ymin + z[0], l_xmin + z[1], l_ymax + z[0], l_xmax + z[1] ], id="logo", box_color=(255, 0, 0)) logo_x = (l_xmax - l_xmin) / 2 + (l_xmin + z[1]) logo_y = (l_ymax - l_ymin) / 2 + (l_ymin + z[0]) img = cv2.circle(img, (logo_x, logo_y), 5, trk.color, 2) tmp_trk.logo_x_coords.append(logo_x) tmp_trk.logo_y_coords.append(logo_y) if not args['dots']: img = helpers.draw_box_label(img, tmp_trk.box, id=tmp_trk.id, box_color=(0, 255, 0)) #tmp_trk.kalman_filter(z) tmp_trk.update_only(z) xx = tmp_trk.x_state.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] x_box[trk_idx] = xx tmp_trk.box = xx tmp_trk.y_coords.append(int((xx[2] - xx[0]) / 2 + xx[0])) tmp_trk.x_coords.append(int((xx[3] - xx[1]) / 2 + xx[1])) tmp_trk.hits += 1 tmp_trk.no_losses = 0 # Deal with unmatched detections if len(unmatched_dets) > 0: for idx in unmatched_dets: z = z_box[idx] ymin, xmin, ymax, xmax = z_box[idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker.Tracker() # Create a new tracker x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T tmp_trk.x_state = x person_im = img_raw[ymin:ymax, xmin:xmax] logo_boxes = logo_det.get_localization(person_im) if (len(logo_boxes) > 0): l_ymin, l_xmin, l_ymax, l_xmax = logo_boxes[0] img = helpers.draw_box_label(img, [ l_ymin + z[0], l_xmin + z[1], l_ymax + z[0], l_xmax + z[1] ], id="logo", box_color=(255, 0, 0)) logo_x = (l_xmax - l_xmin) / 2 + (l_xmin + z[1]) logo_y = (l_ymax - l_ymin) / 2 + (l_ymin + z[0]) img = cv2.circle(img, (logo_x, logo_y), 5, trk.color, 2) tmp_trk.logo_x_coords.append(logo_x) tmp_trk.logo_y_coords.append(logo_y) tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx tmp_trk.y_coords.append(int((xx[2] - xx[0]) / 2 + xx[0])) tmp_trk.x_coords.append(int((xx[3] - xx[1]) / 2 + xx[1])) tmp_trk.id = str( track_id_start_value) # assign an ID for the tracker track_id_start_value += 1 tracker_list.append(tmp_trk) x_box.append(xx) # Deal with unmatched tracks if len(unmatched_trks) > 0: for trk_idx in unmatched_trks: tmp_trk = tracker_list[trk_idx] if not args['dots']: img = helpers.draw_box_label(img, tmp_trk.box, id=tmp_trk.id, box_color=(255, 255, 0)) tmp_trk.no_losses += 1 #tmp_trk.predict_only() #xx = tmp_trk.x_state #xx = xx.T[0].tolist() #xx =[xx[0], xx[2], xx[4], xx[6]] #tmp_trk.box =xx #x_box[trk_idx] = xx # The list of tracks to be annotated good_tracker_list = [] for trk in tracker_list: print(trk.id) if ((trk.hits >= min_hits) and (trk.no_losses <= max_age)): good_tracker_list.append(trk) x_cv2 = trk.box if debug: print('updated box: ', x_cv2) print() if args['dots']: print("COLOR IS: {}".format(trk.color)) for i in zip(trk.x_coords, trk.y_coords): img = cv2.circle(img, i, 10, trk.color, 5) if not args['dots']: img = helpers.draw_box_label( img, x_cv2, id=trk.id) # Draw the bounding boxes on the # images # Book keeping deleted_tracks = filter(lambda x: x.no_losses > max_age, tracker_list) for trk in deleted_tracks: dead_tracker_list.append(trk) tracker_list = [x for x in tracker_list if x.no_losses <= max_age] if debug: print('Ending tracker_list: ', len(tracker_list)) print('Ending good tracker_list: ', len(good_tracker_list)) return img
def pipeline(img_in): global frame_count global tracker_list global max_age global min_hits global track_id_list global debug global track_id frame_count += 1 z_box = det.get_localization(img_in) # measurement if debug: print('Frame:', frame_count) x_box = [] if len(tracker_list) > 0: for trk in tracker_list: x_box.append(trk.box) matched, unmatched_dets, unmatched_trks = assign_detections_to_trackers( x_box, z_box, iou_thresh=0.3) if debug: print('Detection: ', z_box) print('x_box: ', x_box) print('matched:', matched) print('unmatched_det:', unmatched_dets) print('unmatched_trks:', unmatched_trks) # Deal with matched detections if matched.size > 0: for trk_idx, det_idx in matched: z = z_box[det_idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker_list[trk_idx] tmp_trk.kalman_filter(z) xx = tmp_trk.x_state.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] x_box[trk_idx] = xx tmp_trk.box = xx tmp_trk.hits += 1 # Deal with unmatched detections if len(unmatched_dets) > 0: for idx in unmatched_dets: z = z_box[idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker.Tracker() # Create a new tracker x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T tmp_trk.x_state = x tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx track_id += 1 tmp_trk.id = track_id # assign an ID for the tracker print(tmp_trk.id) tracker_list.append(tmp_trk) x_box.append(xx) # Deal with unmatched tracks if len(unmatched_trks) > 0: for trk_idx in unmatched_trks: tmp_trk = tracker_list[trk_idx] tmp_trk.no_losses += 1 tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx x_box[trk_idx] = xx # The list of tracks to be annotated good_tracker_list = [] for trk in tracker_list: if (trk.hits >= min_hits) and (trk.no_losses <= max_age): good_tracker_list.append(trk) x_cv2 = trk.box if debug: print('updated box: ', x_cv2) img_in, img_crop = helpers.draw_box_label(trk.id, img_in, x_cv2, frame_count) if img_crop is not None: img_trk = [img_in, img_crop, x_cv2] if trk.id in final_dict.keys(): l = final_dict[trk.id] l.append(img_trk) final_dict.update({trk.id: l}) else: l = [] l.append(img_trk) final_dict.update({trk.id: l}) # Book keeping deleted_tracks = filter(lambda x: x.no_losses > max_age, tracker_list) for trk in deleted_tracks: track_id_list.append(trk.id) tracker_list = [x for x in tracker_list if x.no_losses <= max_age] fileObject = open("final_dict_pickle", 'wb') pickle.dump(final_dict, fileObject) fileObject.close() if debug: print('Ending tracker_list: ', len(tracker_list)) print('Ending good tracker_list: ', len(good_tracker_list)) cv2.imshow("frame", img_in) return img_in
def face_pipeline(img, det): global frame_count global tracker_list global min_hits global track_id_list global max_age frame_count += 1 boxs = detect_faces(img, det) x_box = [] if len(tracker_list) > 0: for trk in tracker_list: x_box.append(trk.box) matched, unmatched_dets, unmatched_trks = assign_detections_to_trackers(x_box, boxs, iou_thrd=.3) if matched.size > 0: for trk_idx, det_idx in matched: z = boxs[det_idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker_list[trk_idx] tmp_trk.kalman_filter(z) xx = tmp_trk.x_state.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] x_box[trk_idx] = xx tmp_trk.box = xx tmp_trk.hits += 1 if len(unmatched_dets) > 0: for idx in unmatched_dets: z = boxs[idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker.Tracker() # Create a new tracker x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T tmp_trk.x_state = x tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx tmp_trk.id = track_id_list.popleft() # assign an ID for the tracker print(tmp_trk.id) tracker_list.append(tmp_trk) x_box.append(xx) # Deal with unmatched tracks if len(unmatched_trks) > 0: for trk_idx in unmatched_trks: tmp_trk = tracker_list[trk_idx] tmp_trk.no_losses += 1 tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx x_box[trk_idx] = xx # The list of tracks to be annotated good_tracker_list = [] for trk in tracker_list: if ((trk.hits >= min_hits) and (trk.no_losses <= max_age)): good_tracker_list.append(trk) x_cv2 = trk.box img = helpers.draw_box_label(trk.id, img, x_cv2) # Draw the bounding boxes on the # images # Book keeping deleted_tracks = filter(lambda x: x.no_losses > max_age, tracker_list) for trk in deleted_tracks: track_id_list.append(trk.id) tracker_list = [x for x in tracker_list if x.no_losses <= max_age] # cv2.imshow("frame",img) return img
# cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 2) # cv2.imwrite('r1.jpg', cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) # detector.close() matplotlib.use("TkAgg") det = PersonDetector() os.chdir(cwd) TEST_IMAGE_PATHS = glob(os.path.join('test_images/', '*.jpg')) # print(len(TEST_IMAGE_PATHS)) for i, image_path in enumerate(TEST_IMAGE_PATHS): print('') print('*************************************************') img_full = Image.open(image_path) img_full_np = det.load_image_into_numpy_array(img_full) start = time.time() b = det.get_localization(img_full_np, visual=False) end = time.time() print('Localization time: ', end - start) for i in range(len(b)): img1 = helpers.draw_box_label(i, img_full_np, b[i], box_color=(255, 0, 0)) plt.imshow(img1) plt.show() #
def pipeline(img): ''' 用于检测和跟踪的功能 ''' global frame_count global tracker_list global max_age global min_hits global track_id_list global debug global str1 global str555 frame_count += 1 img_dim = (img.shape[1], img.shape[0]) z_box = det.get_localization(img) # measurement if debug: print('Frame:', frame_count) x_box = [] if debug: str4 = "" str5552 = "" stryao = "" for i in range(len(z_box)): img1, str3, str5551 = helpers.draw_box_label(img, z_box[i], box_color=(255, 0, 0)) str4 = str4 + "\n" + str3 if (str5551 == "注意前方车辆!"): stryao = "注意前方车辆!" plt.imshow(img1) print("AAAAaaaaa" + str4) str1 = str4 str555 = str5552 print("WWWWWaaaaa" + str555) plt.show() if (z_box == []): str1 = " " str555 = " " if len(tracker_list) > 0: for trk in tracker_list: x_box.append(trk.box) #当在第一个视频帧中首次检测到车辆时,运行以下代码行分别返回匹配、不匹配和不匹配的空列表、一个元素列表和空列表。 matched, unmatched_dets, unmatched_trks \ = assign_detections_to_trackers(x_box, z_box, iou_thrd=0.3) if debug: print('Detection: ', z_box) print('x_box: ', x_box) print('matched:', matched) print('unmatched_det:', unmatched_dets) print('unmatched_trks:', unmatched_trks) # 处理匹配的检测 #当在第二个视频帧中再次检测到车辆时,运行以下assign_detections_to_tracker将分别返回一个元素列表、一个空列表和一个匹配、不匹配和不匹配的_dets的空列表。如下图所示,我们有一个匹配的检测,将由以下代码块处理: if matched.size > 0: for trk_idx, det_idx in matched: z = z_box[det_idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker_list[trk_idx] tmp_trk.kalman_filter(z) xx = tmp_trk.x_state.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] x_box[trk_idx] = xx tmp_trk.box = xx tmp_trk.hits += 1 tmp_trk.no_losses = 0 # 处理不匹配的检测 #不匹配的检测由以下代码块处理: if len(unmatched_dets) > 0: for idx in unmatched_dets: z = z_box[idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker.Tracker() # 创建新的跟踪器 x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T tmp_trk.x_state = x tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx try: tmp_trk.id = track_id_list.popleft() # 为跟踪器分配一个id tracker_list.append(tmp_trk) x_box.append(xx) except IndexError: tracker_list.append(tmp_trk) x_box.append(xx) # 处理不匹配的曲目 if len(unmatched_trks) > 0: for trk_idx in unmatched_trks: tmp_trk = tracker_list[trk_idx] tmp_trk.no_losses += 1 tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx x_box[trk_idx] = xx # 要注释的曲目列表 good_tracker_list = [] for trk in tracker_list: if ((trk.hits >= min_hits) and (trk.no_losses <= max_age)): good_tracker_list.append(trk) x_cv2 = trk.box if debug: print('updated box: ', x_cv2) print() img, str2, str6 = helpers.draw_box_label(img, x_cv2) # 在图像上绘制边界框 deleted_tracks = filter(lambda x: x.no_losses > max_age, tracker_list) for trk in deleted_tracks: track_id_list.append(trk.id) tracker_list = [x for x in tracker_list if x.no_losses <= max_age] if debug: print('Ending tracker_list: ', len(tracker_list)) print('Ending good tracker_list: ', len(good_tracker_list)) return img, str1, stryao
tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx =[xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx x_box[trk_idx] = xx # The list of tracks to be annotated good_tracker_list = [] for trk in tracker_list: if ((trk.hits >= min_hits) and (trk.no_losses <= max_age)): good_tracker_list.append(trk) x_cv2 = trk.box <<<<<<< HEAD img = helpers.draw_box_label(img, x_cv2) # Draw the bounding boxes on the ======= left, top, right, bottom = x_cv2[1], x_cv2[0], x_cv2[3], x_cv2[2] car = img[top:bottom, left:right] img = helpers.draw_box_label(img, trk.id, x_cv2) # Draw the bounding boxes on the >>>>>>> tanyinghui/master # images # Book keeping deleted_tracks = filter(lambda x: x.no_losses > max_age, tracker_list) <<<<<<< HEAD for trk in deleted_tracks: track_id_list.append(trk.id) ======= >>>>>>> tanyinghui/master
def pipeline(boxes, dims): ''' Pipeline function for detection and tracking ''' global frame_count global tracker_list global max_age global min_hits global track_id_list global debug global next_image frame_count += 1 #img_dim = (img.shape[1], img.shape[0]) pixel_boxes = helpers.box_array_to_pixels(boxes, dims) z_box = pixel_boxes #det.get_localization(img) # measurement if debug: print('Frame:', frame_count) x_box = [] if debug: for i in range(len(z_box)): img1 = helpers.draw_box_label(img, z_box[i], box_color=(255, 0, 0)) plt.imshow(img1) plt.show() if len(tracker_list) > 0: for trk in tracker_list: x_box.append(trk.box) matched, unmatched_dets, unmatched_trks \ = assign_detections_to_trackers(x_box, z_box, iou_thrd = 0.3) if debug: print('Detection: ', z_box) print('x_box: ', x_box) print('matched:', matched) print('unmatched_det:', unmatched_dets) print('unmatched_trks:', unmatched_trks) # Deal with matched detections if matched.size > 0: for trk_idx, det_idx in matched: z = z_box[det_idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker_list[trk_idx] tmp_trk.kalman_filter(z) xx = tmp_trk.x_state.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] x_box[trk_idx] = xx print(xx) tmp_trk.box = xx tmp_trk.hits += 1 print("NUM HITS:") print(tmp_trk.hits) #print(tmp_trk.location_history) #print(np.asarray([np.array(xx)])) #print("Now to concatenate") tmp_trk.location_history = np.concatenate( (tmp_trk.location_history, np.asarray([np.array(xx)]))) #print(np.array(xx)) print(tmp_trk.location_history) tmp_trk.no_losses = 0 # Deal with unmatched detections if len(unmatched_dets) > 0: for idx in unmatched_dets: z = z_box[idx] print("UNMATCHED DETECTINOS") #print(z) z = np.expand_dims(z, axis=0).T #print(z) tmp_trk = tracker.Tracker() # Create a new tracker x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T #print("trouble x") #print(x) #print("end trouble x") tmp_trk.x_state = x tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx print(track_id_list) tmp_trk.id = track_id_list.popleft( ) # assign an ID for the tracker tracker_list.append(tmp_trk) x_box.append(xx) print("END UNMATCHED DETECTIONS") # Deal with unmatched tracks if len(unmatched_trks) > 0: for trk_idx in unmatched_trks: tmp_trk = tracker_list[trk_idx] tmp_trk.no_losses += 1 tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx x_box[trk_idx] = xx # The list of tracks to be annotated good_tracker_list = [] for trk in tracker_list: if ((trk.hits >= min_hits) and (trk.no_losses <= max_age)): good_tracker_list.append(trk) x_cv2 = trk.box if debug: print('updated box: ', x_cv2) print() #img = helpers.draw_box_label(img, x_cv2) # Draw the bounding boxes on the next_image = helpers.draw_box_label(next_image, x_cv2) # images # Book keeping deleted_tracks = filter(lambda x: x.no_losses > max_age, tracker_list) for trk in deleted_tracks: track_id_list.append(trk.id) tracker_list = [x for x in tracker_list if x.no_losses <= max_age] if debug: print('Ending tracker_list: ', len(tracker_list)) print('Ending good tracker_list: ', len(good_tracker_list))
def pipeline(img): ''' Pipeline function for detection and tracking ''' global frame_count global tracker_list global max_age global min_hits global track_id_list global debug global avg_fps frame_count += 1 #print("") #print(frame_count) #print("") start = time.time() img_dim = (img.shape[1], img.shape[0]) # YOLO detection for vehicle yolo_start = time.time() z_box = yolo_det.get_detected_boxes(img) #z_box_cpy= z_box yolo_end = time.time() # Lpd #print("Time taken for yolo detection is", yolo_end-yolo_start) track_start = time.time() if debug: print('Frame:', frame_count) x_box = [] if debug: for i in range(len(z_box)): img1 = helpers.draw_box_label(img, z_box[i], box_color=(255, 0, 0)) cv2.imshow("frame", img1) k = cv2.waitKey(10) if k == ord('e'): cv2.destroyAllWindows() sys.exit(-1) # plt.show() if len(tracker_list) > 0: for trk in tracker_list: x_box.append(trk.box) matched, unmatched_dets, unmatched_trks \ = assign_detections_to_trackers(x_box, z_box, iou_thrd=0.3) if debug: print('Detection: ', z_box) print('x_box: ', x_box) print('matched:', matched) print('unmatched_det:', unmatched_dets) print('unmatched_trks:', unmatched_trks) # Deal with matched detections if matched.size > 0: for trk_idx, det_idx in matched: z = z_box[det_idx] tmp_trk = tracker_list[trk_idx] tmp_trk.features.append(extract_feature(img, z)) z = np.expand_dims(z, axis=0).T tmp_trk.kalman_filter(z) xx = tmp_trk.x_state.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] x_box[trk_idx] = xx tmp_trk.box = xx tmp_trk.hits += 1 tmp_trk.no_losses = 0 # Deal with unmatched detections if len(unmatched_dets) > 0: for idx in unmatched_dets: z = z_box[idx] if len(unmatched_trks) > 0: min_score = 10000000 tmp_idx = -1 for trk_idx in unmatched_trks: trk = tracker_list[trk_idx] #print(len(trk.features)) if len(trk.features) == 0: continue score = trk.feature_match(extract_feature( img, z)) ## find closest feature match if score < min_score: min_score = score tmp_idx = trk_idx if min_score < feature_thresh and tmp_idx != -1: z = np.expand_dims(z, axis=0).T tmp_trk = tracker_list[tmp_idx] tmp_trk.kalman_filter(z) xx = tmp_trk.x_state.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] x_box[trk_idx] = xx tmp_trk.box = xx tmp_trk.hits += 1 tmp_trk.no_losses = 0 continue #new_boxes.append(z) z = np.expand_dims(z, axis=0).T tmp_trk = tr.Tracker() # Create a new tracker x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T tmp_trk.x_state = x tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx tmp_trk.id = track_id_list.popleft( ) # assign an ID for the tracker tracker_list.append(tmp_trk) x_box.append(xx) # Deal with unmatched tracks*100 if len(unmatched_trks) > 0: for trk_idx in unmatched_trks: tmp_trk = tracker_list[trk_idx] tmp_trk.no_losses += 1 tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx x_box[trk_idx] = xx # The list of tracks to be annotated img_vis = img.copy() good_tracker_list = [] #print(img_dim) good_boxes = [] for trk in tracker_list: if ((trk.hits >= min_hits) and (trk.no_losses <= max_age)): good_tracker_list.append(trk) good_boxes.append(trk.box) #for trk in good_tracker_list: selected_ids = nms(torch.FloatTensor(np.array(good_boxes)), torch.FloatTensor([1.0] * len(good_boxes)), 0.45) for idx in selected_ids: trk = good_tracker_list[idx] x_cv2 = trk.box idx = trk.id if debug: print('updated box: ', x_cv2) print() # Draw the bounding boxes on the img_vis = helpers.draw_box_label(img_vis, x_cv2, idx) if frame_count % 5 == 0: y1_temp, x1_temp, y2_temp, x2_temp = x_cv2 w_temp = x2_temp - x1_temp h_temp = y2_temp - y1_temp if w_temp * h_temp < 400 or w_temp <= 0 or h_temp <= 0 or min( x_cv2) < 0: continue plates = [] #print(x_cv2) dt_start = time.time() Ivehicle = img[y1_temp:y2_temp, x1_temp:x2_temp] ratio = float(max(Ivehicle.shape[:2])) / min(Ivehicle.shape[:2]) side = int(ratio * 288.) bname = 'frame{}_{}.png'.format(frame_count, idx) bound_dim = min(side + (side % (2**4)), size) # print "\t\tBound dim: %d, ratio: %f" % (bound_dim,ratio) #dt_plates_start = time.time() Llp, LlpImgs, _ = detect_lp(wpod_net, im2single(Ivehicle), bound_dim, 2**4, (240, 80), lp_threshold) if len(LlpImgs): plates = [Llp[0].pts] cv2.imwrite("%s/%s" % (detected_plates_dir, bname), LlpImgs[0] * 255.) plate_string = _lpr.plates_ocr(LlpImgs[0] * 255.) for plate in plates: x1 = (plate[0][0] * w_temp + x1_temp).astype('int') y1 = (plate[1][0] * h_temp + y1_temp).astype('int') x2 = (plate[0][1] * w_temp + x1_temp).astype('int') y2 = (plate[1][1] * h_temp + y1_temp).astype('int') x3 = (plate[0][2] * w_temp + x1_temp).astype('int') y3 = (plate[1][2] * h_temp + y1_temp).astype('int') x4 = (plate[0][3] * w_temp + x1_temp).astype('int') y4 = (plate[1][3] * h_temp + y1_temp).astype('int') plate = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]], np.int32) plate = plate.reshape((-1, 1, 2)) cv2.polylines(img_vis, [plate], True, (255, 0, 0), 4) cv2.putText(img_vis, plate_string, (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1.1, (0, 0, 255), 2) cv2.imwrite("%s/%s" % (detected_cars_dir, bname), img_vis[y1_temp:y2_temp, x1_temp:x2_temp]) track_end = time.time() # images # dt_start = time.time() print("Time taken to track the boxes is", track_end - track_start) end = time.time() fps = 1.0 / (end - start) #dt_fps = 1.0/(dt_dr+yolo_end-yolo_start) avg_fps += fps cv2.putText(img_vis, "FPS: {:.4f}".format(fps), (int(0.8 * img_dim[0]), 100), cv2.FONT_HERSHEY_SIMPLEX, 1.1, (255, 255, 0), 4) #cv2.putText(img_vis, "Detect FPS: {:.4f}".format( # dt_fps), (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 1.1, (255,255, 0), 4) # Book keeping deleted_tracks = filter(lambda x: x.no_losses > feature_tp, tracker_list) for trk in deleted_tracks: track_id_list.append(trk.id) tracker_list = [x for x in tracker_list if x.no_losses <= feature_tp] if debug: print('Ending tracker_list: ', len(tracker_list)) print('Ending good tracker_list: ', len(good_tracker_list)) return img_vis