def bgsub(vidfile_basename, threshold, quiet=False, drawBoxes=True): operator = BackgroundSubtractor(2000, threshold, True) # Learn the bg operator.model_bg2(VIDEO_DIR + vidfile_basename) tp_t = fp_t = fn_t = p_t = n_t = 0 video = cv2.VideoCapture(VIDEO_DIR + vidfile_basename) ret, frame = video.read() frame_num = 0 while ret: mask = operator.apply(frame) mask = tools.morph_openclose(mask) mask_binary = (mask == 255).astype(np.uint8) gt_filename = "{0}/{1}/{2}.jpg.seg.bmp".format(GT_IMG_DIR, vidfile_basename, frame_num) if os.path.exists(gt_filename): if not quiet: cv2.imshow("Ground truth", cv2.imread(gt_filename) * 255) tp, fp, fn = compare_response_to_truth(mask_binary, gt_filename) # print("True Pos: {0}\nFalse Pos: {1}".format(tp, fp)) pos_detected, neg_detected = class_counter.count_posneg(mask_binary) tp_t += tp fp_t += fp fn_t += fn p_t += pos_detected n_t += neg_detected # print("Foreground pixels: {0}\nBackground pixels: {1}".format(pos_detected, neg_detected)) if not quiet: mask = ((mask == 255) * 255).astype(np.uint8) cv2.imshow("Mask", mask) if drawBoxes: blob_detect(mask, frame) else: cv2.imshow("Frame", frame) ret, frame = video.read() frame_num += 1 if handle_keys() is 1: break with np.errstate(invalid='ignore'): precision = np.float64(tp_t) / (tp_t + fp_t) recall = np.float64(tp_t) / (tp_t + fn_t) if np.isinf(precision) or np.isnan(precision): precision = 1 if np.isinf(recall) or np.isnan(recall): recall = 1 return precision, recall
def run(self, as_script=True): if self.invisible: cv2.namedWindow("Control") prev_gray = None prev_points = [] self.nextTrackID = 0 while True: # Get frame ret, frame = self.cam.read() if not ret: break frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Segment fg_mask = self.operator.apply(frame) fg_mask = ((fg_mask == 255) * 255).astype(np.uint8) fg_mask = morph_openclose(fg_mask) # Detect blobs if "3.0." in cv2.__version__: _, contours, _ = cv2.findContours((fg_mask.copy()), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) else: contours, _ = cv2.findContours((fg_mask.copy()), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) areas, detections = drawing.draw_min_ellipse(contours, frame, MIN_AREA, MAX_AREA, draw=False) self.areas += areas # Track self.predictNewLocations(frame) assignments, unmatchedTracks, unmatchedDetections = self.assignTracks(detections, frame) self.updateMatchedTracks(assignments, detections) self.updateUnmatchedTracks(unmatchedTracks) self.deleteLostTracks() self.createNewTracks(detections, unmatchedDetections) self.showTracks(frame) # self.showLostTracks(frame) self.checkTrackCrosses() # Store frame and go to next prev_gray = frame_gray prev_points = detections self.frame_idx += 1 if not self.invisible: self.draw_overlays(frame, fg_mask) cv2.imshow('Tracking', frame) cv2.imshow("Mask", fg_mask) delay = FRAME_DELAY if handle_keys(delay) == 1: break # else: # if handle_keys(delay) == 1: # break # Should we continue running or yield some information about the current frame if as_script: continue else: pass # After the video, examine tracks # self.checkLostTrackCrosses() self.cam.release()
def run(self, as_script=True): if self.invisible: cv2.namedWindow("Control") prev_gray = None prev_points = None while True: ret, frame = self.cam.read() if not ret: break fg_mask = self.operator.apply(frame) fg_mask = ((fg_mask == 255) * 255).astype(np.uint8) fg_mask = morph_openclose(fg_mask) frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if prev_gray is not None and prev_points is not None: p0 = np.float32([point for point in prev_points]).reshape(-1, 1, 2) if drawing.draw_prev_points(frame, prev_points): # p1, st, err = cv2.calcOpticalFlowPyrLK(prev_gray, frame_gray, p0, None, **lk_params) frame_gray[fg_mask == 0] = 255 p1, st, err = cv2.calcOpticalFlowPyrLK( prev_gray, frame_gray, p0, None, **lk_params) for p_i, p_f in zip(p0.reshape(-1, 2), p1.reshape(-1, 2)): result = cross(ROI, ROI_W, ROI_H, p_i, p_f) if result is 1: self.arrivals += 1 if not self.quiet: print("Arrival") elif result is -1: self.departures += 1 if not self.quiet: print("Departure") if self.drawTracks: drawing.draw_line(frame, tuple(p_i), tuple(p_f)) prev_gray = frame_gray contours, hier = drawing.draw_contours(frame, fg_mask) areas, prev_points = drawing.draw_min_ellipse( contours, frame, MIN_AREA, MAX_AREA) self.areas += areas self.frame_idx += 1 if not self.invisible: self.draw_overlays(frame, fg_mask) cv2.imshow("Fas", frame_gray) cv2.imshow('Tracking', frame) cv2.imshow("Mask", fg_mask) delay = 33 else: delay = 1 if handle_keys(delay) == 1: break # Should we continue running or yield some information about the current frame if as_script: continue else: pass return self.areas
def run(self, as_script=True): if self.invisible: cv2.namedWindow("Control") prev_gray = None prev_points = None while True: ret, frame = self.cam.read() if not ret: break fg_mask = self.operator.apply(frame) fg_mask = ((fg_mask == 255) * 255).astype(np.uint8) fg_mask = morph_openclose(fg_mask) frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if prev_gray is not None and prev_points is not None: p0 = np.float32([point for point in prev_points]).reshape(-1, 1, 2) if drawing.draw_prev_points(frame, prev_points): # p1, st, err = cv2.calcOpticalFlowPyrLK(prev_gray, frame_gray, p0, None, **lk_params) frame_gray[fg_mask == 0] = 255 p1, st, err = cv2.calcOpticalFlowPyrLK(prev_gray, frame_gray, p0, None, **lk_params) for p_i, p_f in zip(p0.reshape(-1, 2), p1.reshape(-1, 2)): result = cross(ROI, ROI_W, ROI_H, p_i, p_f) if result is 1: self.arrivals += 1 if not self.quiet: print("Arrival") elif result is -1: self.departures += 1 if not self.quiet: print("Departure") if self.drawTracks: drawing.draw_line(frame, tuple(p_i), tuple(p_f)) prev_gray = frame_gray contours, hier = drawing.draw_contours(frame, fg_mask) areas, prev_points = drawing.draw_min_ellipse(contours, frame, MIN_AREA, MAX_AREA) self.areas += areas self.frame_idx += 1 if not self.invisible: self.draw_overlays(frame, fg_mask) cv2.imshow("Fas", frame_gray) cv2.imshow('Tracking', frame) cv2.imshow("Mask", fg_mask) delay = 33 else: delay = 1 if handle_keys(delay) == 1: break # Should we continue running or yield some information about the current frame if as_script: continue else: pass return self.areas
def run(self, as_script=True): if self.invisible: cv2.namedWindow("Control") prev_gray = None prev_points = [] self.nextTrackID = 0 while True: # Get frame ret, frame = self.cam.read() if not ret: break # Convert frame to grayscale frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Segment fg_mask = self.operator.apply(frame) fg_mask = ((fg_mask == 255) * 255).astype(np.uint8) fg_mask = morph_openclose(fg_mask) # Detect blobs version = int(re.findall(r'\d+', cv2.__version__)[0]) if version == 3: _, contours, _ = cv2.findContours((fg_mask.copy()), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) else: # Get contours for detected bees using the foreground mask contours, _ = cv2.findContours((fg_mask.copy()), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) areas, detections = drawing.draw_min_ellipse(contours, frame, MIN_AREA, MAX_AREA, draw=False) self.areas += areas # Track self.predictNewLocations(frame) assignments, unmatchedTracks, unmatchedDetections = self.assignTracks(detections, frame) self.updateMatchedTracks(assignments, detections) self.updateUnmatchedTracks(unmatchedTracks) self.deleteLostTracks() self.createNewTracks(detections, unmatchedDetections) self.showTracks(frame) # self.showLostTracks(frame) self.checkTrackCrosses() # Store frame and go to next prev_gray = frame_gray prev_points = detections self.frame_idx += 1 if not self.invisible: self.draw_overlays(frame, fg_mask) cv2.imshow('Tracking', frame) cv2.imshow("Mask", fg_mask) delay = FRAME_DELAY if handle_keys(delay) == 1: break # else: # if handle_keys(delay) == 1: # break # Should we continue running or yield some information about the current frame if as_script: continue else: pass # After the video, examine tracks # self.checkLostTrackCrosses() self.cam.release()