Example #1
0
 def __init__(self):
     self.birdDetector = BirdDetector(
         frameFilter=FrameFilter(),
         backgroundSubtractor=BackgroundSubtractor(),
         blobDetector=BlobDetector())
     self.activeStageIndex = -1  # By default show last stage
     self.profiler = FPSProfiler()
Example #2
0
    def __init__(self,
                 video_src,
                 quiet=True,
                 invisible=False,
                 draw_contours=True,
                 bgsub_thresh=64):
        self.quiet = quiet
        self.invisible = invisible
        self.drawContours = draw_contours
        self.threshold = bgsub_thresh
        self.drawTracks = True
        self.drawFrameNum = False

        self.areas = []

        # Learn the bg
        self.operator = BackgroundSubtractor(2000, self.threshold, True)
        self.operator.model_bg2(video_src)

        self.track_len = 10
        self.detect_interval = 5
        self.tracks = []
        self.cam = cv2.VideoCapture(video_src)
        self.frame_idx = 0
        self.arrivals = self.departures = 0
Example #3
0
    def __init__(self, video_src="", quiet=False, invisible=False, draw_contours=False,
                 bgsub_thresh=64, drawTracks=False, drawFrameNum=False, drawBoundary=False):
        self.quiet        = quiet
        self.invisible    = invisible
        self.drawContours = draw_contours
        self.threshold    = bgsub_thresh
        self.drawTracks   = drawTracks
        self.drawFrameNum = drawFrameNum
        self.drawBoundary = drawBoundary

        self.areas = []

        # Learn the bg
        self.operator = BackgroundSubtractor(2000, self.threshold, True)
        self.operator.model_bg2(video_src)

        self.cam = cv2.VideoCapture(video_src)

        self.maxTimeInvisible  = 0
        self.trackAgeThreshold = 4

        self.tracks     = []
        self.lostTracks = []
        self.frame_idx  = 0
        self.arrivals   = 0
        self.departures = 0
Example #4
0
def bgsub(vidfile_basename, threshold, quiet=False, drawBoxes=True):
    operator = BackgroundSubtractor(2000, threshold, True)
    # Learn the bg
    operator.model_bg2(VIDEO_DIR + vidfile_basename)

    tp_t = fp_t = fn_t = p_t = n_t = 0

    video = cv2.VideoCapture(VIDEO_DIR + vidfile_basename)
    ret, frame = video.read()
    frame_num = 0
    while ret:
        mask = operator.apply(frame)
        mask = tools.morph_openclose(mask)
        mask_binary = (mask == 255).astype(np.uint8)

        gt_filename = "{0}/{1}/{2}.jpg.seg.bmp".format(GT_IMG_DIR, vidfile_basename, frame_num)
        if os.path.exists(gt_filename):
            if not quiet:
                cv2.imshow("Ground truth", cv2.imread(gt_filename) * 255)
            tp, fp, fn = compare_response_to_truth(mask_binary, gt_filename)
            # print("True Pos: {0}\nFalse Pos: {1}".format(tp, fp))
            pos_detected, neg_detected = class_counter.count_posneg(mask_binary)
            tp_t += tp
            fp_t += fp
            fn_t += fn
            p_t += pos_detected
            n_t += neg_detected
            # print("Foreground pixels: {0}\nBackground pixels: {1}".format(pos_detected, neg_detected))

        if not quiet:
            mask = ((mask == 255) * 255).astype(np.uint8)
            cv2.imshow("Mask", mask)
            if drawBoxes:
                blob_detect(mask, frame)
            else:
                cv2.imshow("Frame", frame)
                
        ret, frame = video.read()
        frame_num += 1
        if handle_keys() is 1: break

    with np.errstate(invalid='ignore'):
        precision = np.float64(tp_t) / (tp_t + fp_t)
        recall = np.float64(tp_t) / (tp_t + fn_t)
    if np.isinf(precision) or np.isnan(precision):
        precision = 1
    if np.isinf(recall) or np.isnan(recall):
        recall = 1
    return precision, recall
 def __init__(self, args, main_out_vid_name="", with_video_output=True):
     super().__init__(args, main_out_vid_name, with_video_output)
     args.datapath = self.datapath
     self.background_subtractor = BackgroundSubtractor(args)