예제 #1
0
    def __init__(self,
                 video_src,
                 quiet=True,
                 invisible=False,
                 draw_contours=True,
                 bgsub_thresh=64):
        self.quiet = quiet
        self.invisible = invisible
        self.drawContours = draw_contours
        self.threshold = bgsub_thresh
        self.drawTracks = True
        self.drawFrameNum = False

        self.areas = []

        # Learn the bg
        self.operator = BackgroundSubtractor(2000, self.threshold, True)
        self.operator.model_bg2(video_src)

        self.track_len = 10
        self.detect_interval = 5
        self.tracks = []
        self.cam = cv2.VideoCapture(video_src)
        self.frame_idx = 0
        self.arrivals = self.departures = 0
예제 #2
0
    def __init__(self, video_src="", quiet=False, invisible=False, draw_contours=False,
                 bgsub_thresh=64, drawTracks=False, drawFrameNum=False, drawBoundary=False):
        self.quiet        = quiet
        self.invisible    = invisible
        self.drawContours = draw_contours
        self.threshold    = bgsub_thresh
        self.drawTracks   = drawTracks
        self.drawFrameNum = drawFrameNum
        self.drawBoundary = drawBoundary

        self.areas = []

        # Learn the bg
        self.operator = BackgroundSubtractor(2000, self.threshold, True)
        self.operator.model_bg2(video_src)

        self.cam = cv2.VideoCapture(video_src)

        self.maxTimeInvisible  = 0
        self.trackAgeThreshold = 4

        self.tracks     = []
        self.lostTracks = []
        self.frame_idx  = 0
        self.arrivals   = 0
        self.departures = 0
class BaseVideoBackgroundSubtractor(VideoProcessor):
    def __init__(self, args, main_out_vid_name="", with_video_output=True):
        super().__init__(args, main_out_vid_name, with_video_output)
        args.datapath = self.datapath
        self.background_subtractor = BackgroundSubtractor(args)

    def initialize(self, verbose=True):
        start = self.sampling_interval_start_frame
        if start < 0:
            return
        end = self.sampling_interval_end_frame
        last_frame = self.get_last_frame()

        if end <= start:
            raise ValueError("Sampling interval end frame (currently set to {:d}) should be greater than the " +
                             "sampling interval start frame (currently set to {:d}).".format(end, start))

        if start > last_frame or end > last_frame:
            raise ValueError("The sampling interval start & end frame (currently set to {:d} and {:d}, " +
                             "respectively) should be within [0,{:d}] as dictated by length of video {:s} " +
                             "(and global offset, if present)."
                             .format(start, end, last_frame, self.in_video))

        max_sampling_duration_frames = int(self.sampling_interval * (self.num_samples - 1) / 1000 * self.fps) + 1

        max_end = start + max_sampling_duration_frames - 1
        if end > max_end:
            print(("Notice: sampling_interval_end_frame is set to {0:d}, which is beyond the limit imposed by " +
                   "sampling interval ({1:f}), fps {2:.2f}, and number of samples ({3:d}). " +
                   "Changing it to {4:d} to save time.")
                  .format(self.sampling_interval_end_frame, self.sampling_interval, self.fps, self.num_samples, max_end))
            end = max_end

        if verbose:
            print("Initializing from frame {:d} to frame {:d}...".format(start, end))

        self.go_to_frame(start)
        start_time = time.time()
        total_frames = end - start + 1
        fc = 1
        for i_frame in range(start, end + 1):
            frame = self.cap.read()[1]
            # apply preliminary mask if at all present
            # build up the background model
            self.background_subtractor.pretrain(frame)
            if not self.no_progress_bar:
                VideoProcessor.update_progress(fc / total_frames, start_time)
            fc += 1
        sys.stdout.write("\n")  # terminate progress bar

        self.reload_video()
예제 #4
0
def bgsub(vidfile_basename, threshold, quiet=False, drawBoxes=True):
    operator = BackgroundSubtractor(2000, threshold, True)
    # Learn the bg
    operator.model_bg2(VIDEO_DIR + vidfile_basename)

    tp_t = fp_t = fn_t = p_t = n_t = 0

    video = cv2.VideoCapture(VIDEO_DIR + vidfile_basename)
    ret, frame = video.read()
    frame_num = 0
    while ret:
        mask = operator.apply(frame)
        mask = tools.morph_openclose(mask)
        mask_binary = (mask == 255).astype(np.uint8)

        gt_filename = "{0}/{1}/{2}.jpg.seg.bmp".format(GT_IMG_DIR, vidfile_basename, frame_num)
        if os.path.exists(gt_filename):
            if not quiet:
                cv2.imshow("Ground truth", cv2.imread(gt_filename) * 255)
            tp, fp, fn = compare_response_to_truth(mask_binary, gt_filename)
            # print("True Pos: {0}\nFalse Pos: {1}".format(tp, fp))
            pos_detected, neg_detected = class_counter.count_posneg(mask_binary)
            tp_t += tp
            fp_t += fp
            fn_t += fn
            p_t += pos_detected
            n_t += neg_detected
            # print("Foreground pixels: {0}\nBackground pixels: {1}".format(pos_detected, neg_detected))

        if not quiet:
            mask = ((mask == 255) * 255).astype(np.uint8)
            cv2.imshow("Mask", mask)
            if drawBoxes:
                blob_detect(mask, frame)
            else:
                cv2.imshow("Frame", frame)
                
        ret, frame = video.read()
        frame_num += 1
        if handle_keys() is 1: break

    with np.errstate(invalid='ignore'):
        precision = np.float64(tp_t) / (tp_t + fp_t)
        recall = np.float64(tp_t) / (tp_t + fn_t)
    if np.isinf(precision) or np.isnan(precision):
        precision = 1
    if np.isinf(recall) or np.isnan(recall):
        recall = 1
    return precision, recall
예제 #5
0
    def __init__(self, video_src="", quiet=False, invisible=False, draw_contours=False, 
                 bgsub_thresh=64, drawTracks=False, drawFrameNum=False, drawBoundary=False):
        self.quiet = quiet
        self.invisible = invisible
        self.drawContours = draw_contours
        self.threshold = bgsub_thresh
        self.drawTracks = drawTracks
        self.drawFrameNum = drawFrameNum
        self.drawBoundary = drawBoundary

        self.areas = []

        # Learn the bg
        self.operator = BackgroundSubtractor(2000, self.threshold, True)
        self.operator.model_bg2(video_src)

        self.cam = cv2.VideoCapture(video_src)
        
        self.maxTimeInvisible = 0
        self.trackAgeThreshold = 4

        self.tracks = []
        self.lostTracks = []
        self.frame_idx = 0
        self.arrivals = self.departures = 0
예제 #6
0
 def __init__(self):
     self.birdDetector = BirdDetector(
         frameFilter=FrameFilter(),
         backgroundSubtractor=BackgroundSubtractor(),
         blobDetector=BlobDetector())
     self.activeStageIndex = -1  # By default show last stage
     self.profiler = FPSProfiler()
 def make_parser(help_string):
     parser = VideoProcessor.make_parser(help_string, with_output=False)
     BackgroundSubtractor.prep_parser(parser)
     parser.add_argument("-cpu", "--caffe_cpu", action="store_true", help="Use Caffe in CPU mode.", default=False)
     parser.add_argument("-od", "--output_datafile", default=None)
     parser.add_argument("-bc", "--boundary_check", action="store_true",
                         help="Whether to mark frame as 'subject out-of-view' for frames when the" +
                              " subject's bounding box intersects with the frame's bounding box.")
     parser.add_argument("-v", "--vgg_model_path", type=str, default=None,
                         help="Path to the vgg model file.")
     parser.add_argument("-vm", "--vgg_model_filename", type=str, default="VGG_ILSVRC_16_layers_deploy.prototxt",
                         help="Path to the vgg model file.")
     parser.add_argument("-vp", "--vgg_pretrained_filename", type=str, default="VGG_ILSVRC_16_layers.caffemodel",
                         help="Path to the vgg model file.")
     parser.add_argument("-aug", "--augment_file", action="store_true",
                         help="Augment exisiting file instead of overwriting " +
                              "(useful when not all features are collected)", default=False)
     parser.add_argument("-nv", "--no_vgg", action="store_true",
                         help="skip actual vgg feature extraction", default=False)
     return parser
예제 #8
0
    def __init__(self, video_src, quiet=True, invisible=False, draw_contours=True, 
                 bgsub_thresh=64):
        self.quiet = quiet
        self.invisible = invisible
        self.drawContours = draw_contours
        self.threshold = bgsub_thresh
        self.drawTracks = True
        self.drawFrameNum = False

        self.areas = []

        # Learn the bg
        self.operator = BackgroundSubtractor(2000, self.threshold, True)
        self.operator.model_bg2(video_src)

        self.track_len = 10
        self.detect_interval = 5
        self.tracks = []
        self.cam = cv2.VideoCapture(video_src)
        self.frame_idx = 0
        self.arrivals = self.departures = 0
 def make_parser(help_string):
     parser = VideoProcessor.make_parser(help_string)
     parser.add_argument("-mo", "--mask_output_video", default="")
     BackgroundSubtractor.prep_parser(parser)
     return parser
 def __init__(self, args, main_out_vid_name="", with_video_output=True):
     super().__init__(args, main_out_vid_name, with_video_output)
     args.datapath = self.datapath
     self.background_subtractor = BackgroundSubtractor(args)
예제 #11
0
class App:
    def __init__(self, video_src="", quiet=False, invisible=False, draw_contours=False, 
                 bgsub_thresh=64, drawTracks=False, drawFrameNum=False, drawBoundary=False):
        self.quiet = quiet
        self.invisible = invisible
        self.drawContours = draw_contours
        self.threshold = bgsub_thresh
        self.drawTracks = drawTracks
        self.drawFrameNum = drawFrameNum
        self.drawBoundary = drawBoundary

        self.areas = []

        # Learn the bg
        self.operator = BackgroundSubtractor(2000, self.threshold, True)
        self.operator.model_bg2(video_src)

        self.cam = cv2.VideoCapture(video_src)
        
        self.maxTimeInvisible = 0
        self.trackAgeThreshold = 4

        self.tracks = []
        self.lostTracks = []
        self.frame_idx = 0
        self.arrivals = self.departures = 0

    def run(self, as_script=True):
        if self.invisible:
            cv2.namedWindow("Control")

        prev_gray = None
        prev_points = []
        self.nextTrackID = 0
        
        while True:
            # Get frame
            ret, frame = self.cam.read()
            if not ret:
                break
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            
            # Segment
            fg_mask = self.operator.apply(frame)
            fg_mask = ((fg_mask == 255) * 255).astype(np.uint8)
            fg_mask = morph_openclose(fg_mask)
            
            # Detect blobs
            if "3.0." in cv2.__version__:
                _, contours, _ = cv2.findContours((fg_mask.copy()), cv2.RETR_EXTERNAL, 
                    cv2.CHAIN_APPROX_TC89_L1)
            else:
                contours, _ = cv2.findContours((fg_mask.copy()), cv2.RETR_EXTERNAL, 
                    cv2.CHAIN_APPROX_TC89_L1)
            areas, detections = drawing.draw_min_ellipse(contours, frame, MIN_AREA, MAX_AREA, draw=False)
            self.areas += areas

            # Track
            self.predictNewLocations(frame)
            assignments, unmatchedTracks, unmatchedDetections = self.assignTracks(detections, frame)
            self.updateMatchedTracks(assignments, detections)
            self.updateUnmatchedTracks(unmatchedTracks)
            self.deleteLostTracks()
            self.createNewTracks(detections, unmatchedDetections)
            self.showTracks(frame)
            # self.showLostTracks(frame)
            self.checkTrackCrosses()

            # Store frame and go to next
            prev_gray = frame_gray
            prev_points = detections
            self.frame_idx += 1
            if not self.invisible:
                self.draw_overlays(frame, fg_mask)
                cv2.imshow('Tracking', frame)
                cv2.imshow("Mask", fg_mask)
                delay = FRAME_DELAY
                if handle_keys(delay) == 1:
                    break
            # else:
            #     if handle_keys(delay) == 1:
            #         break

            # Should we continue running or yield some information about the current frame
            if as_script: continue
            else: pass
        # After the video, examine tracks
        # self.checkLostTrackCrosses()
        self.cam.release()

    def deleteLostTracks(self):
        newTracks = []
        tracksLost = 0
        for track in self.tracks:
            # Fraction of tracks age in which is was visible
            visibilty = float(track.totalVisibleCount) / track.age

            # Determine lost tracks
            if not ((track.age < self.trackAgeThreshold and visibilty < .6) or
                    (track.timeInvisible > self.maxTimeInvisible)):
                newTracks.append(track)
            else:
                self.lostTracks.append(track)
                tracksLost += 1
        # print("Tracks lost", tracksLost)        
        self.tracks = newTracks

    def createNewTracks(self, detections, unmatchedDetections):
        for detectionIndex in unmatchedDetections:
            detection = detections[detectionIndex]
            array_detection = np.array(detection, np.float32)
            # TODO: Create Kalman filter object
            kf = cv2.KalmanFilter(4, 2)
            kf.measurementMatrix = MEASUREMENT_MATRIX
            kf.transitionMatrix = TRANSITION_MATRIX
            # kf.processNoiseCov = PROCESS_NOISE_COV

            # Create the new track
            newTrack = Track(self.nextTrackID, kf)
            newTrack.update(array_detection)
            newTrack.locationHistory.append(detection)
            self.tracks.append(newTrack)
            self.nextTrackID += 1

    def updateMatchedTracks(self, assignments, detections):
        for assignment in assignments:
            trackIndex = assignment.trackIndex
            detectionIndex = assignment.detectionIndex
            detection = detections[detectionIndex]
            array_detection = np.array(detection, np.float32)
            track = self.tracks[trackIndex]

            track.update(array_detection)

            # Update track
            track.age += 1
            track.totalVisibleCount += 1
            track.timeInvisible = 0
            track.locationHistory.append(detection)

    def updateUnmatchedTracks(self, unmatchedTracks):
        for trackIndex in unmatchedTracks:
            tr = self.tracks[trackIndex]
            tr.age += 1
            tr.timeInvisible += 1

    def assignTracks(self, detections, frame):
        """ Returns assignments, unmatchedTracks, unmatchedDetections """
        if len(self.tracks) == 0:
            # There are no tracks, all detections are unmatched
            unmatchedDetections = range(len(detections))
            return [], [], unmatchedDetections
        elif len(detections) == 0:
            # There are no detections, all tracks are unmatched
            unmatchedTracks = range(len(self.tracks))
            return [], unmatchedTracks, []
        else:
            costMatrix = np.zeros((len(self.tracks), len(detections)))
            for i, track in enumerate(self.tracks):
                x1, y1 = track.getPredictedXY()
                for j, (x2, y2) in enumerate(detections):
                    # cv2.line(frame, (x1, y1), (x2, y2), (255, 0, 0))
                    costMatrix[i, j] = np.sqrt( (x1 - x2)**2 + (y1 - y2)**2 )
            return tools.assignment(costMatrix)

    def predictNewLocations(self, frame):
        for track in self.tracks:
            track.predict(frame)

    def showTracks(self, frame):
        if self.drawTracks:
            for track in self.tracks:
                track.drawTrack(frame)

    def showLostTracks(self, frame):
        for track in self.lostTracks:
            loc = track.locationHistory[-1]
            cv2.circle(frame, loc, 2, color=(0,0,255), thickness=-1)

    def checkTrackCrosses(self):
        for track in self.tracks:
            result = track.checkCrossLastTwo(ROI, ROI_W, ROI_H)
            if result == 1:
                self.arrivals += 1
                # print("Arrival")
            elif result == -1:
                self.departures += 1
                # print("Departure")

    def checkLostTrackCrosses(self):
        self.lostTracks += self.tracks
        for track in self.lostTracks:
            result = track.checkCross()
            if result == 1:
                self.arrivals += 1
                # print("Arrival")
            elif result == -1:
                self.departures += 1
                # print("Departure")

    def draw_overlays(self, frame, fg_mask):
        if self.drawBoundary:
            drawing.draw_rectangle(frame, ROI, (ROI[0]+ROI_W, ROI[1]+ROI_H))
        if self.drawFrameNum:
            drawing.draw_frame_num(frame, self.frame_idx)
        if self.drawContours:
            pass
            # drawing.draw_contours(frame, fg_mask)

    def openNewVideo(self, video_src):
        self.cam.release()
        self.cam = cv2.VideoCapture(video_src)
예제 #12
0
class App:
    def __init__(self, video_src, quiet=True, invisible=False, draw_contours=True, 
                 bgsub_thresh=64):
        self.quiet = quiet
        self.invisible = invisible
        self.drawContours = draw_contours
        self.threshold = bgsub_thresh
        self.drawTracks = True
        self.drawFrameNum = False

        self.areas = []

        # Learn the bg
        self.operator = BackgroundSubtractor(2000, self.threshold, True)
        self.operator.model_bg2(video_src)

        self.track_len = 10
        self.detect_interval = 5
        self.tracks = []
        self.cam = cv2.VideoCapture(video_src)
        self.frame_idx = 0
        self.arrivals = self.departures = 0

    def run(self, as_script=True):
        if self.invisible:
            cv2.namedWindow("Control")

        prev_gray = None
        prev_points = None
        
        while True:
            ret, frame = self.cam.read()
            if not ret:
                break

            fg_mask = self.operator.apply(frame)
            fg_mask = ((fg_mask == 255) * 255).astype(np.uint8)
            fg_mask = morph_openclose(fg_mask)

            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            if prev_gray is not None and prev_points is not None:
                p0 = np.float32([point for point in prev_points]).reshape(-1, 1, 2)
                if drawing.draw_prev_points(frame, prev_points):
                    # p1, st, err = cv2.calcOpticalFlowPyrLK(prev_gray, frame_gray, p0, None, **lk_params)
                    frame_gray[fg_mask == 0] = 255
                    p1, st, err = cv2.calcOpticalFlowPyrLK(prev_gray, frame_gray, p0, None, **lk_params)
                    for p_i, p_f in zip(p0.reshape(-1, 2), p1.reshape(-1, 2)):
                        result = cross(ROI, ROI_W, ROI_H, p_i, p_f)
                        if result is 1:
                            self.arrivals += 1
                            if not self.quiet:
                                print("Arrival")
                        elif result is -1:
                            self.departures += 1
                            if not self.quiet:
                                print("Departure")

                        if self.drawTracks:
                            drawing.draw_line(frame, tuple(p_i), tuple(p_f))

            prev_gray = frame_gray
            contours, hier = drawing.draw_contours(frame, fg_mask)
            
            areas, prev_points = drawing.draw_min_ellipse(contours, frame, MIN_AREA, MAX_AREA)
            self.areas += areas

            self.frame_idx += 1
            if not self.invisible:
                self.draw_overlays(frame, fg_mask)
                cv2.imshow("Fas", frame_gray)
                cv2.imshow('Tracking', frame)
                cv2.imshow("Mask", fg_mask)
                delay = 33
            else:
                delay = 1
            if handle_keys(delay) == 1:
                break
            
            # Should we continue running or yield some information about the current frame
            if as_script: continue
            else: pass

        return self.areas


    def draw_overlays(self, frame, fg_mask):
        drawing.draw_rectangle(frame, ROI, (ROI[0]+ROI_W, ROI[1]+ROI_H))
        if self.drawFrameNum:
            drawing.draw_frame_num(frame, self.frame_idx)
        if self.drawContours:
            pass
예제 #13
0
class App:
    def __init__(self,
                 video_src,
                 quiet=True,
                 invisible=False,
                 draw_contours=True,
                 bgsub_thresh=64):
        self.quiet = quiet
        self.invisible = invisible
        self.drawContours = draw_contours
        self.threshold = bgsub_thresh
        self.drawTracks = True
        self.drawFrameNum = False

        self.areas = []

        # Learn the bg
        self.operator = BackgroundSubtractor(2000, self.threshold, True)
        self.operator.model_bg2(video_src)

        self.track_len = 10
        self.detect_interval = 5
        self.tracks = []
        self.cam = cv2.VideoCapture(video_src)
        self.frame_idx = 0
        self.arrivals = self.departures = 0

    def run(self, as_script=True):
        if self.invisible:
            cv2.namedWindow("Control")

        prev_gray = None
        prev_points = None

        while True:
            ret, frame = self.cam.read()
            if not ret:
                break

            fg_mask = self.operator.apply(frame)
            fg_mask = ((fg_mask == 255) * 255).astype(np.uint8)
            fg_mask = morph_openclose(fg_mask)

            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            if prev_gray is not None and prev_points is not None:
                p0 = np.float32([point
                                 for point in prev_points]).reshape(-1, 1, 2)
                if drawing.draw_prev_points(frame, prev_points):
                    # p1, st, err = cv2.calcOpticalFlowPyrLK(prev_gray, frame_gray, p0, None, **lk_params)
                    frame_gray[fg_mask == 0] = 255
                    p1, st, err = cv2.calcOpticalFlowPyrLK(
                        prev_gray, frame_gray, p0, None, **lk_params)
                    for p_i, p_f in zip(p0.reshape(-1, 2), p1.reshape(-1, 2)):
                        result = cross(ROI, ROI_W, ROI_H, p_i, p_f)
                        if result is 1:
                            self.arrivals += 1
                            if not self.quiet:
                                print("Arrival")
                        elif result is -1:
                            self.departures += 1
                            if not self.quiet:
                                print("Departure")

                        if self.drawTracks:
                            drawing.draw_line(frame, tuple(p_i), tuple(p_f))

            prev_gray = frame_gray
            contours, hier = drawing.draw_contours(frame, fg_mask)

            areas, prev_points = drawing.draw_min_ellipse(
                contours, frame, MIN_AREA, MAX_AREA)
            self.areas += areas

            self.frame_idx += 1
            if not self.invisible:
                self.draw_overlays(frame, fg_mask)
                cv2.imshow("Fas", frame_gray)
                cv2.imshow('Tracking', frame)
                cv2.imshow("Mask", fg_mask)
                delay = 33
            else:
                delay = 1
            if handle_keys(delay) == 1:
                break

            # Should we continue running or yield some information about the current frame
            if as_script: continue
            else: pass

        return self.areas

    def draw_overlays(self, frame, fg_mask):
        drawing.draw_rectangle(frame, ROI, (ROI[0] + ROI_W, ROI[1] + ROI_H))
        if self.drawFrameNum:
            drawing.draw_frame_num(frame, self.frame_idx)
        if self.drawContours:
            pass
예제 #14
0
class App:
    def __init__(self, video_src="", quiet=False, invisible=False, draw_contours=False,
                 bgsub_thresh=64, drawTracks=False, drawFrameNum=False, drawBoundary=False):
        self.quiet        = quiet
        self.invisible    = invisible
        self.drawContours = draw_contours
        self.threshold    = bgsub_thresh
        self.drawTracks   = drawTracks
        self.drawFrameNum = drawFrameNum
        self.drawBoundary = drawBoundary

        self.areas = []

        # Learn the bg
        self.operator = BackgroundSubtractor(2000, self.threshold, True)
        self.operator.model_bg2(video_src)

        self.cam = cv2.VideoCapture(video_src)

        self.maxTimeInvisible  = 0
        self.trackAgeThreshold = 4

        self.tracks     = []
        self.lostTracks = []
        self.frame_idx  = 0
        self.arrivals   = 0
        self.departures = 0

    def run(self, as_script=True):
        if self.invisible:
            cv2.namedWindow("Control")

        prev_gray        = None
        prev_points      = []
        self.nextTrackID = 0

        while True:
            # Get frame
            ret, frame = self.cam.read()
            if not ret:
                break
            # Convert frame to grayscale
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            # Segment
            fg_mask = self.operator.apply(frame)
            fg_mask = ((fg_mask == 255) * 255).astype(np.uint8)
            fg_mask = morph_openclose(fg_mask)

            # Detect blobs
            version = int(re.findall(r'\d+', cv2.__version__)[0])
            if version == 3:
                _, contours, _ = cv2.findContours((fg_mask.copy()), cv2.RETR_EXTERNAL,
                    cv2.CHAIN_APPROX_TC89_L1)
            else:
                # Get contours for detected bees using the foreground mask
                contours, _ = cv2.findContours((fg_mask.copy()), cv2.RETR_EXTERNAL,
                    cv2.CHAIN_APPROX_TC89_L1)
            areas, detections = drawing.draw_min_ellipse(contours, frame, MIN_AREA, MAX_AREA, draw=False)
            self.areas += areas

            # Track
            self.predictNewLocations(frame)
            assignments, unmatchedTracks, unmatchedDetections = self.assignTracks(detections, frame)
            self.updateMatchedTracks(assignments, detections)
            self.updateUnmatchedTracks(unmatchedTracks)
            self.deleteLostTracks()
            self.createNewTracks(detections, unmatchedDetections)
            self.showTracks(frame)
            # self.showLostTracks(frame)
            self.checkTrackCrosses()

            # Store frame and go to next
            prev_gray = frame_gray
            prev_points = detections
            self.frame_idx += 1
            if not self.invisible:
                self.draw_overlays(frame, fg_mask)
                cv2.imshow('Tracking', frame)
                cv2.imshow("Mask", fg_mask)
                delay = FRAME_DELAY
                if handle_keys(delay) == 1:
                    break
            # else:
            #     if handle_keys(delay) == 1:
            #         break

            # Should we continue running or yield some information about the current frame
            if as_script: continue
            else: pass
        # After the video, examine tracks
        # self.checkLostTrackCrosses()
        self.cam.release()

    def deleteLostTracks(self):
        newTracks = []
        tracksLost = 0
        for track in self.tracks:
            # Fraction of tracks age in which is was visible
            visibilty = float(track.totalVisibleCount) / track.age

            # Determine lost tracks
            if not ((track.age < self.trackAgeThreshold and visibilty < .6) or
                    (track.timeInvisible > self.maxTimeInvisible)): #track valid
                newTracks.append(track)
            else: #track invalid
                self.lostTracks.append(track)
                tracksLost += 1
        # print("Tracks lost", tracksLost)
        self.tracks = newTracks

    def createNewTracks(self, detections, unmatchedDetections):
        for detectionIndex in unmatchedDetections:
            detection = detections[detectionIndex]
            array_detection = np.array(detection, np.float32)
            # TODO: Create Kalman filter object
            kf = cv2.KalmanFilter(4, 2)
            kf.measurementMatrix = MEASUREMENT_MATRIX
            kf.transitionMatrix = TRANSITION_MATRIX
            # kf.processNoiseCov = PROCESS_NOISE_COV

            # Create the new track
            newTrack = Track(self.nextTrackID, kf)
            newTrack.update(array_detection)
            newTrack.locationHistory.append(detection)
            self.tracks.append(newTrack)
            self.nextTrackID += 1

    def updateMatchedTracks(self, assignments, detections):
        for assignment in assignments:
            trackIndex = assignment.trackIndex
            detectionIndex = assignment.detectionIndex
            detection = detections[detectionIndex]
            array_detection = np.array(detection, np.float32)
            track = self.tracks[trackIndex]

            track.update(array_detection)

            # Update track
            track.age += 1
            track.totalVisibleCount += 1
            track.timeInvisible = 0
            track.locationHistory.append(detection)

    def updateUnmatchedTracks(self, unmatchedTracks):
        for trackIndex in unmatchedTracks:
            track = self.tracks[trackIndex]
            track.age += 1
            track.timeInvisible += 1

    def assignTracks(self, detections, frame):
        """ Returns assignments, unmatchedTracks, unmatchedDetections """
        if len(self.tracks) == 0:
            # There are no tracks, all detections are unmatched
            unmatchedDetections = range(len(detections))
            return [], [], unmatchedDetections
        elif len(detections) == 0:
            # There are no detections, all tracks are unmatched
            unmatchedTracks = range(len(self.tracks))
            return [], unmatchedTracks, []
        else:
            costMatrix = np.zeros((len(self.tracks), len(detections)))
            for i, track in enumerate(self.tracks):
                x1, y1 = track.getPredictedXY()
                for j, (x2, y2) in enumerate(detections):
                    # cv2.line(frame, (x1, y1), (x2, y2), (255, 0, 0))
                    costMatrix[i, j] = np.sqrt( (x1 - x2)**2 + (y1 - y2)**2 )
            return tools.assignment(costMatrix)

    def predictNewLocations(self, frame):
        for track in self.tracks:
            track.predict(frame)

    def showTracks(self, frame):
        if self.drawTracks:
            for track in self.tracks:
                track.drawTrack(frame)

    def showLostTracks(self, frame):
        for track in self.lostTracks:
            loc = track.locationHistory[-1]
            cv2.circle(frame, loc, 2, color=(0,0,255), thickness=-1)

    def checkTrackCrosses(self):
        for track in self.tracks:
            result = track.checkCrossLastTwo(ROI, ROI_W, ROI_H)
            if result == 1:
                self.arrivals += 1
                # print("Arrival")
            elif result == -1:
                self.departures += 1
                # print("Departure")

    def checkLostTrackCrosses(self):
        self.lostTracks += self.tracks
        for track in self.lostTracks:
            result = track.checkCross()
            if result == 1:
                self.arrivals += 1
                # print("Arrival")
            elif result == -1:
                self.departures += 1
                # print("Departure")

    def draw_overlays(self, frame, fg_mask):
        if self.drawBoundary:
            drawing.draw_rectangle(frame, ROI, (ROI[0]+ROI_W, ROI[1]+ROI_H))
        if self.drawFrameNum:
            drawing.draw_frame_num(frame, self.frame_idx)
        if self.drawContours:
            pass
            # drawing.draw_contours(frame, fg_mask)

    def openNewVideo(self, video_src):
        self.cam.release()
        self.cam = cv2.VideoCapture(video_src)