Пример #1
0
    def __init__( self, instance_name, **kwargs ):
        
        super().__init__( instance_name, **kwargs )

        self.logger = logging.getLogger( 'detector.motion.{}'.format( instance_name ) )

        self.logger.debug( 'setting up motion detector...' )

        # Grab configuration params.
        self.min_w = int( kwargs['minw'] ) if 'minw' in kwargs else 0
        self.min_h = int( kwargs['minh'] ) if 'minh' in kwargs else 0
        self.ignore_edges = True if 'ignoreedges' in kwargs and \
            'true' == kwargs['ignoreedges'] else False
        self.logger.debug( 'minimum movement size: %dx%d, ignore edges: %d',
            self.min_w, self.min_h, self.ignore_edges )
        self.wait_max = int( kwargs['waitmax'] ) \
            if 'waitmax' in kwargs else 5
        self.running = True
        self.blur = int( kwargs['blur'] ) if 'blur' in kwargs else 5
        self.threshold = \
            int( kwargs['threshold'] ) if 'threshold' in kwargs else 127

        # Setup OpenCV stuff.
        self.back_sub = cv2.createBackgroundSubtractorMOG2(
            history=int( kwargs['history'] ) if 'history' in kwargs else 150,
            varThreshold=int( kwargs['varthreshold'] ) \
                if 'varthreshold' in kwargs else 25,
            detectShadows=True )
        self.kernel = numpy.ones( (20, 20), numpy.uint8 )

        self.logger.debug( 'threshold: %d', self.threshold )
        self.logger.debug( 'blur: %d', self.blur )
 def __init__(self,
              out_vid_name='',
              mov_detected_pixels_threshold=30,
              kernel_size=7,
              lot_of_noise_det=False,
              history=10,
              min_area=700):
     self.video_capture = cv2.VideoCapture(0)
     self.mog2 = cv2.createBackgroundSubtractorMOG2(history=history,
                                                    varThreshold=50,
                                                    detectShadows=True)
     self.knn = cv2.createBackgroundSubtractorKNN(history=10)
     self.current_frame = None
     self.previous_frame = None  # initially there is no previous frame
     self.kernel_size = kernel_size
     self.min_area = min_area
     self.lot_of_noise_det = lot_of_noise_det
     self.cap_frame_width = int(
         self.video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
     self.cap_frame_height = int(
         self.video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
     codec = cv2.VideoWriter_fourcc(*'XVID')
     self.video_writer = cv2.VideoWriter(
         out_vid_name + '.avi', codec, 20.0,
         (self.cap_frame_width, self.cap_frame_height))
     self.mov_detected_pixels_threshold = mov_detected_pixels_threshold * WHITE_PIXEL_VALUE
Пример #3
0
def open_camera():
    camera = cv2.VideoCapture(0)
    bgModel = None

    while camera.isOpened():
        _, frame = camera.read()
        frame = cv2.flip(frame, 1)
        cv2.rectangle(frame, (int(0.5 * frame.shape[1]), 0),
                      (frame.shape[1], int(0.75 * frame.shape[0])),
                      (255, 0, 0), 2)
        cv2.imshow('original', frame)

        if bgModel:
            removed = removeBG(frame, bgModel)
            cropped = removed[0:int(0.75 * frame.shape[0]),
                              int(0.5 * frame.shape[1]):frame.shape[1]]
            cv2.imshow('removed', cropped)

            thres = image_process(cropped)
            contours, _ = cv2.findContours(thres, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
            count = find_max_contour(contours, cropped)

            # count = contours(thres, cropped)

        k = cv2.waitKey(10)
        if k == 27:
            camera.release()
            cv2.destroyAllWindows()
            print("Exiting...")
            break
        elif k == ord('b'):
            bgModel = cv2.createBackgroundSubtractorMOG2(0, 50)
            print("Background captured")
        elif k == ord('s') and bgModel:
            move = ""
            if (count == 0):
                move = "Rock"
            if (count == 1):
                move = "Scissors"
            if (count == 4):
                move = "Paper"
            game.play(move)
        elif k == ord("l"):
            game.scoreLog()
Пример #4
0
# end of env Variables 

#need to check for existence of these files before trying to delete 
#os.remove("workingfiles/timestamps.txt")
#os.remove("workingfiles/ExtractClips.bat")
#os.remove("workingfiles/fileList.txt")
# set output file
output1 = open(sourceDir + "/workingfiles/timestamps.txt","a")
output2 = open(sourceDir + "/workingfiles/ExtractClips.bat","a")
output3 = open(sourceDir + "/workingfiles/fileList.txt","a")


# History, Threshold, DetectShadows 
# fgbg = cv2.createBackgroundSubtractorMOG2(50, 200, True)
fgbg = cv2.createBackgroundSubtractorMOG2(300, 400, True)

# Keeps track of what frame we're on
frameCount = 0
# keeps track of the seconds we are on
secCount = 0
#writeFlag to decide whether to output to file
lastSecCount = 0
#filename appender
fileCount = 0
#print('@echo off \necho start file processing \n')
output2.write('@echo off \necho start file processing \n')
skipCount = 0

while(1):
		# Return Value and the current frame
Пример #5
0
from cv2 import cv2
import numpy as np

cap = cv2.VideoCapture("./sources/car.mp4")
subtractor = cv2.createBackgroundSubtractorMOG2(history=100,
                                                varThreshold=50,
                                                detectShadows=True)
while 1:

    _, frame = cap.read()
    frame = cv2.resize(frame, (640, 480))
    mask = subtractor.apply(frame)

    cv2.imshow("frame", frame)
    cv2.imshow("mask", mask)

    if cv2.waitKey(20) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()
Пример #6
0
import cv2.cv2 as cv2
import numpy as np
import pyautogui as pag
import time
import math

# Set it to true, to See the background mask and the image of the camera
Debug = False
#pag.FAILSAFE = False

# Global Variables - VideoCam, BackgroundSubtractor
cam = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()


trigger = False
camMarginX = 10
camMarginY = 10
scale = 10
kern = np.ones((3,3), np.uint8)


# Function for Creating suitable mask for the Video Cam
def Masking(camCropped, fgbg):
    hsv = cv2.cvtColor(croppedCam, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv, LB, UB)
    mask = cv2.dilate(mask, kern, iterations=1)
    mask = cv2.erode(mask, kern, iterations=1)
    mask = cv2.GaussianBlur(mask, (5, 5), 100)
    mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kern)
   
Пример #7
0
def opencamera():
    camera = cv2.VideoCapture(0)  # creating camera object
    camera.set(10, 200)  # set camera
    bgModel = None
    bgCaptured = 0
    while camera.isOpened():
        ret, frame = camera.read()  # reading the frames
        frame = cv2.bilateralFilter(frame, 5, 50, 100)  # smoothing filter
        frame = cv2.flip(frame, 1)  # flip the frame horizontally
        cv2.rectangle(
            frame,
            (int(cap_region_x_begin * frame.shape[1]),
             0),  # draw the ROI rectangle
            (frame.shape[1], int(cap_region_y_end * frame.shape[0])),
            (255, 0, 0),
            2)
        cv2.imshow('input', frame)  # show the frame

        if bgCaptured == 1:
            removed = removeBG(bgModel, frame)
            ROI = removed[0:int(cap_region_y_end * frame.shape[0]),
                          int(cap_region_x_begin *
                              frame.shape[1]):frame.shape[1]]  # clip the ROI
            cv2.imshow('ROI', ROI)  # show the ROI

            gray = cv2.cvtColor(
                ROI, cv2.COLOR_BGR2GRAY)  # convert the image into binary image
            blur = cv2.GaussianBlur(
                gray, (blurValue, blurValue),
                0)  # cancel the noise and smooth the whole image
            cv2.imshow('blur', blur)  # show the blur result
            ret, thresh = cv2.threshold(
                blur, threshold, 255,
                cv2.THRESH_BINARY)  # get the black and white image
            cv2.imshow('threshold', thresh)  # show the threshold result

            # get the contours
            thresh1 = copy.deepcopy(thresh)  # copy father and son class
            contours, hierarchy = cv2.findContours(
                thresh1, cv2.RETR_TREE,
                cv2.CHAIN_APPROX_SIMPLE)  # find the contour
            length = len(contours)
            maxArea = -1
            if length > 0:
                for i in range(
                        length
                ):  # find the biggest contour (according to area)
                    temp = contours[i]
                    area = cv2.contourArea(temp)
                    if area > maxArea:
                        maxArea = area
                        ci = i
                res = contours[ci]
                hull = cv2.convexHull(res)  # find the contour's convex hull
                drawing = np.zeros(ROI.shape, np.uint8)
                cv2.drawContours(drawing, [res], 0, (0, 255, 0), 2)
                cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
                isFinishCal, cnt = calculateFingers(res, drawing)
            cv2.imshow('output', drawing)

        # keyboard
        k = cv2.waitKey(10)
        if k == 27:  # esc
            camera.release()
            cv2.destroyAllWindows()
            break
        elif k == ord('b'):  # press 'b' to capture the background
            bgModel = cv2.createBackgroundSubtractorMOG2(
                0, bgSubThreshold)  # Creates MOG2 Background Subtractor
            bgCaptured = 1
            print('!!!Background Captured!!!')
        elif k == ord('p'):  # play a game
            print('eigenvalue of gesture is ', cnt)
            move = ""
            if cnt == 0:
                move = "Rock"
            if cnt == 1:
                move = "Scissors"
            if cnt == 4:
                move = "Paper"
            game.play(move)
            game.score()
Пример #8
0
    def play(self, video, q, k):
        font = cv2.FONT_HERSHEY_COMPLEX
        parser = argparse.ArgumentParser(
            description=
            'This program shows how to use background subtraction methods provided by \
                                                OpenCV. You can process both videos and images.'
        )
        parser.add_argument('--input',
                            type=str,
                            help='Path to a video or a sequence of image.',
                            default='vtest.avi')
        parser.add_argument('--algo',
                            type=str,
                            help='Background subtraction method (KNN, MOG2).',
                            default='MOG2')
        args = parser.parse_args()

        if args.algo == 'MOG2':
            backSub = cv2.createBackgroundSubtractorMOG2()
        else:
            backSub = cv2.createBackgroundSubtractorKNN()
        cap = cv2.VideoCapture(video)

        # Initialize frame counter
        cnt = 0

        # Some characteristics from the original video
        w_frame, h_frame = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(
            cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps, frames = cap.get(cv2.CAP_PROP_FPS), cap.get(
            cv2.CAP_PROP_FRAME_COUNT)

        # Here you can define your croping values
        x, y, h, w = 0, 0, 300, 500

        # output
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('result.avi', fourcc, fps, (w, h))

        if (cap.isOpened() == False):
            print("Error opening video file")

        while (cap.isOpened()):

            ret, frame = cap.read()
            fgMask = backSub.apply(frame)

            cnt += 1
            if ret == True:
                crop_frame = frame[y:y + h, x:x + w]

                # Percentage
                xx = cnt * 100 / frames
                print(int(xx), '%')

                # Saving from the desired frames
                #if 15 <= cnt <= 90:
                #    out.write(crop_frame)

                # I see the answer now. Here you save all the video
                out.write(crop_frame)

                if int(cv2.__version__[0]) > 3:
                    # Opencv 4.x.x
                    contours, _ = cv2.findContours(fgMask, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)
                else:
                    # Opencv 3.x.x
                    _, contours, _ = cv2.findContours(fgMask, cv2.RETR_TREE,
                                                      cv2.CHAIN_APPROX_SIMPLE)

                for cnt in contours:
                    area = cv2.contourArea(cnt)
                    approx = cv2.approxPolyDP(cnt,
                                              0.02 * cv2.arcLength(cnt, True),
                                              True)
                    x = approx.ravel()[0]
                    y = approx.ravel()[1]

                    if area > 400 and area < 800:
                        cv2.drawContours(frame, [approx], 0, (0, 0, 0), 5)

                        if len(approx) == 4:
                            cv2.putText(frame, "Rectangle", (x, y), font, 1,
                                        (0, 0, 0))

                cv2.imshow("video", frame)
                cv2.imshow('FG Mask', fgMask)
                cv2.imshow('croped', crop_frame)

                cv2.rectangle(frame, (10, 2), (100, 20), (255, 255, 255), -1)
                cv2.putText(frame, str(cap.get(cv2.CAP_PROP_POS_FRAMES)),
                            (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))

                # press k to play next frame
                key = cv2.waitKey(0)
                while key not in [ord(q), ord(k)]:
                    key = cv2.waitKey(0)
                # Quit when 'q' is pressed
                if key == ord('q'):
                    break

        cap.release()
        out.release()
        cv2.destroyAllWindows()
import numpy as np
from cv2 import cv2

cap = cv2.VideoCapture('resource/video_test_p22.mp4')

fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=True)

kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))

while (1):
    ret, frame = cap.read()
    if ret == False:
        break
    fgmask = fgbg.apply(frame)
    fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)

    fgmask = cv2.cvtColor(fgmask, cv2.COLOR_GRAY2BGR)
    fgmask = cv2.bitwise_and(fgmask, frame)

    cv2.imshow('frame', fgmask)
    k = cv2.waitKey(30) & 0xff
    if k == 27:
        break

cv2.waitKey(0)
cap.release()
cv2.destroyAllWindows()
Пример #10
0
    # Resize the image
    frame = cv.resize(frame,
                      None,
                      fx=scaling_factor,
                      fy=scaling_factor,
                      interpolation=cv.INTER_AREA)

    return frame


if __name__ == '__main__':
    # Define the video capture object
    cap = cv.VideoCapture(1)

    # Define the background subtractor object
    bg_subtractor = cv.createBackgroundSubtractorMOG2()

    # Define the number of previous frames to use to learn.
    # This factor controls the learning rate of the algorithm.
    # The learning rate refers to the rate at which your model
    # will learn about the background. Higher value for
    # ‘history’ indicates a slower learning rate. You can
    # play with this parameter to see how it affects the output.
    history = 100

    # Define the learning rate
    learning_rate = 1.0 / history

    # Keep reading the frames from the webcam
    # until the user hits the 'Esc' key
    while True:
Пример #11
0
def main():
    """
    Main function of the program.
    """
    # Capture the video from the default camera
    defaultCamera = 0
    capturedVideo = cv2.VideoCapture(defaultCamera)

    # Initialization of the background subtractor
    backgroundSubtractor = cv2.createBackgroundSubtractorMOG2(
        detectShadows=True)

    if not capturedVideo.isOpened():
        print('Error: Unable to open file')
        exit(0)

    # Top-left and bottom-right points of the region of interest rectangle:

    # Low resolution webcam
    regionOfInterestPoint1 = (330, 10)
    regionOfInterestPoint2 = (630, 310)

    # Medium resolution webcam
    # regionOfInterestPoint1 = (800, 30)
    # regionOfInterestPoint2 = (1250, 530)

    # Constant tuple with the two learning rates for the background
    #  subtractor
    learningRates = (0.3, 0)

    # Initialization of the current learning rate
    currentLearningRate = learningRates[0]

    # Boolean that stores if the user wants to count the raised fingers or not
    countHandFingers = True

    # Boolean that stores if the user wants to detect hand gestures or not
    detectHandGestures = True

    # Boolean that stores if the user wants to draw with the index finger or not
    indexFingerDrawing = False

    # List that stores the trace of the current stroke of the drawing
    currentStroke = list()

    # List that stores the entire drawing
    currentDrawing = list()

    # Boolean that stores if the user wants to see the help info or not
    showHelp = True

    while True:
        # Read the data from the captured video
        returntErrorValue, capturedFrame = capturedVideo.read()
        if not returntErrorValue:
            print('Error: Unable to get data')
            exit(0)

        # Window showing the mirrored captured video and the region of interest
        #  marked with a blue rectangle
        capturedFrame = cv2.flip(capturedFrame, 1)
        cv2.rectangle(capturedFrame,
                      regionOfInterestPoint1,
                      regionOfInterestPoint2,
                      color=(255, 0, 0))
        cv2.imshow('WebCam', capturedFrame)

        # Window showing the region of interest only
        regionOfInterest = capturedFrame[
            regionOfInterestPoint1[1]:regionOfInterestPoint2[1],
            regionOfInterestPoint1[0]:regionOfInterestPoint2[0], :].copy()
        # cv2.imshow('Region of Interest', regionOfInterest)

        # Window showing the background subtraction applied
        foregroundMask = backgroundSubtractor.apply(regionOfInterest, None,
                                                    currentLearningRate)
        # cv2.imshow('Foreground Mask', foregroundMask)

        # Window showing the gray threshold applied
        returntErrorValue, blackAndWhite = cv2.threshold(
            foregroundMask, 200, 255, cv2.THRESH_BINARY)
        cv2.imshow('Black and White', blackAndWhite)

        # Window showing the hand contour
        contours = cv2.findContours(blackAndWhite, cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)[0]
        contourWindow = regionOfInterest.copy()

        if len(contours) > 0 and currentLearningRate != learningRates[0]:
            handContour = getLargerContour(contours)
            cv2.drawContours(contourWindow,
                             handContour,
                             contourIdx=-1,
                             color=(0, 255, 0),
                             thickness=3)
        else:
            handContour = None

        cv2.imshow('Contour', contourWindow)

        # Window showing the hand's convex hull
        convexHullWindow = regionOfInterest.copy()

        if handContour is not None:
            handConvexHull = cv2.convexHull(handContour)
            cv2.drawContours(convexHullWindow, [handConvexHull],
                             contourIdx=0,
                             color=(255, 0, 0),
                             thickness=3)
        else:
            handConvexHull = None

        cv2.imshow('Convex Hull', convexHullWindow)

        # Window showing the fingers' convexity defects
        convexityDefectsWindow = regionOfInterest.copy()

        if handContour is not None:
            handConvexHull = cv2.convexHull(handContour,
                                            clockwise=False,
                                            returnPoints=False)
            tempPythonList = list(handConvexHull)
            tempPythonList.sort(reverse=True, key=lambda element: element[0])
            handConvexHull = numpy.array(tempPythonList)
            handConvexityDefects = cv2.convexityDefects(
                handContour, handConvexHull)
            fingerConvexityDefects = list()

            if handConvexityDefects is not None:
                for currentConvexityDefect in range(len(handConvexityDefects)):
                    startIndex, endIndex, farIndex, distanceToConvexHull \
                        = handConvexityDefects[currentConvexityDefect][0]

                    startPoint = tuple(handContour[startIndex][0])
                    endPoint = tuple(handContour[endIndex][0])
                    farPoint = tuple(handContour[farIndex][0])

                    depth = distanceToConvexHull / 256.0

                    if depth > 80.0:
                        # angleOfCurrentConvexityDefect = angle(
                        #     startPoint, endPoint, farPoint)
                        cv2.line(convexityDefectsWindow,
                                 startPoint,
                                 endPoint,
                                 color=(255, 0, 0),
                                 thickness=2)
                        cv2.circle(convexityDefectsWindow,
                                   farPoint,
                                   radius=5,
                                   color=(0, 0, 255),
                                   thickness=-1)
                        fingerConvexityDefects.append(
                            (startPoint, endPoint, farPoint))

        else:
            handConvexHull = None
            handConvexityDefects = None

        cv2.imshow('Convexity Defects', convexityDefectsWindow)

        # Window showing the hand's bounding rectangle
        boundingRectangleWindow = regionOfInterest.copy()

        if handContour is not None:
            handBoundingRectangle = cv2.boundingRect(handContour)

            boundingRectanglePoint1 = (handBoundingRectangle[0],
                                       handBoundingRectangle[1])
            boundingRectanglePoint2 = (handBoundingRectangle[0] +
                                       handBoundingRectangle[2],
                                       handBoundingRectangle[1] +
                                       handBoundingRectangle[3])

            cv2.rectangle(boundingRectangleWindow,
                          boundingRectanglePoint1,
                          boundingRectanglePoint2,
                          color=(0, 0, 255),
                          thickness=3)
        else:
            handBoundingRectangle = None
            boundingRectanglePoint1 = None
            boundingRectanglePoint2 = None

        cv2.imshow('Bounding Rectangle', boundingRectangleWindow)

        # Window showing the user side functionalities
        mainWindow = regionOfInterest.copy()

        if handContour is not None:
            numberOfFingers = countFingers(fingerConvexityDefects,
                                           handBoundingRectangle)

            if countHandFingers:
                mainWindow = printFingers(numberOfFingers, mainWindow)

            if detectHandGestures:
                handGesture = detectHandGesture(numberOfFingers,
                                                fingerConvexityDefects,
                                                handBoundingRectangle)
                mainWindow = printGestures(handGesture, mainWindow)

            if indexFingerDrawing:
                currentStroke = fingerDrawing(currentStroke, handContour,
                                              mainWindow)

            mainWindow = printStroke(currentStroke, mainWindow)

            for stroke in currentDrawing:
                mainWindow = printStroke(stroke, mainWindow)

        if showHelp:
            mainWindow = printHelp(mainWindow)

        if currentLearningRate == learningRates[0]:
            mainWindow = printLearning(mainWindow)

        cv2.imshow('Main Window', mainWindow)

        keyboard = cv2.waitKey(1)

        # Key used for swaping between the two learning rates
        if keyboard & 0xFF == ord('s'):
            currentLearningRate = swapLearningRate(currentLearningRate,
                                                   learningRates)

        # Key used for swaping between counting the raised fingers or not
        if keyboard & 0xFF == ord('f'):
            countHandFingers = not countHandFingers

        # Key used for swaping between detecting hand gestures or not
        if keyboard & 0xFF == ord('g'):
            detectHandGestures = not detectHandGestures

        # Key used for swaping between drawing with the index finger or not
        if keyboard & 0xFF == ord('d'):
            indexFingerDrawing = not indexFingerDrawing

            if not indexFingerDrawing:
                currentDrawing.append(currentStroke[:])
                currentStroke.clear()

        # Key used for cleaning the last stroke
        if keyboard & 0xFF == ord('c'):
            currentDrawing.pop()

        # Key used for cleaning the entire drawing
        if keyboard & 0xFF == ord('x'):
            currentDrawing.clear()

        # Key used for swaping between showing the help info or not
        if keyboard & 0xFF == ord('h'):
            showHelp = not showHelp

        # Key used for finishing the program execution
        if keyboard & 0xFF == ord('q'):
            break

    capturedVideo.release()
    cv2.destroyAllWindows()
Пример #12
0
        return x / np.sum(x)

    def h(rgb):
        return cv2.calcHist([rgb], [0, 1, 2], imCropMask,
                            [256 // REDU, 256 // REDU, 256 // REDU],
                            [0, 256] + [0, 256] + [0, 2561])

    return normhist(sum(map(h, xs)))


def smooth(s, x):
    return gaussian_filter(x, s, mode='constant')


cap = cv2.VideoCapture("video.mp4")
fgbg = cv2.createBackgroundSubtractorMOG2(500, 60, True)
kernel = np.ones((3, 3), np.uint8)
crop = False
camshift = False

termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

font = cv2.FONT_HERSHEY_SIMPLEX
pause = False
degree = np.pi / 180
a = np.array([0, 900])

fps = 120
dt = 1 / fps
t = np.arange(0, 2.01, dt)
noise = 3
Пример #13
0
class ImageManipulator:

    fgbg = cv2.createBackgroundSubtractorMOG2(history=200, detectShadows=False)
    bounding_box_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                    (30, 30))

    # Initialize default values, makes copy of current frame.
    def __init__(self, frame):
        self.source = frame
        self.detection_frame = self.source.copy()
        self.gray = self.convert_gray_filtered(self.source)
        self.foreground = self.fgbg.apply(self.gray, learningRate=0.02)

        self.bounding_box = self.focus_movement(self.source)
        if self.bounding_box is not None:
            self.movement_detected = True
        else:
            self.movement_detected = False

        self.close_kernel = self.get_dynamic_kernel_size()

    def check_movement_detected(self):
        return self.movement_detected

    def get_bounding_box(self):
        return self.bounding_box

    def set_bounding_box(self, boundingBox):
        self.bounding_box = boundingBox

    # Adjusts the kernel size dynamically based on a distance calculation.
    def get_dynamic_kernel_size(self):
        kernel_size = 30

        if (self.movement_detected):

            width = self.bounding_box.get_width()

            # Find Distance by Subject's Width Relative to Camera.
            if (width >= 250):
                kernel_size = 30

            if (width < 250 and width >= 120):
                kernel_size = 25

            if (width < 120 and width >= 100):
                kernel_size = 20

            if (width < 100 and width >= 60):
                kernel_size = 15

            if (width < 60 and width >= 40):
                kernel_size = 10

            if (width < 40 and width >= 20):
                kernel_size = 5

            if (width < 20):
                kernel_size = 10

        return cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                         (kernel_size, kernel_size))

    def convert_gray_filtered(self, source):
        # Converting the image to grayscale.
        gray = cv2.cvtColor(source, cv2.COLOR_BGR2GRAY)
        # Smoothing without removing edges.
        gray_filtered = cv2.bilateralFilter(gray, 7, 75, 75)
        return gray_filtered

    # Splices the foreground image and returns it.
    def extract_foreground(self):
        if self.bounding_box is not None and self.bounding_box.get_area(
        ) > 1000:
            y1, y2 = self.bounding_box.get_y_coordinates()
            x1, x2 = self.bounding_box.get_x_coordinates()
            foreground = cv2.morphologyEx(self.foreground, cv2.MORPH_CLOSE,
                                          self.close_kernel)
            extracted_foreground = np.copy(foreground[y1:y2, x1:x2])
            extracted_foreground = cv2.resize(extracted_foreground,
                                              dsize=(50, 75),
                                              interpolation=cv2.INTER_CUBIC)
            return extracted_foreground
        else:
            return None

    # Splices the edge image and returns it.
    def extract_edges(self):
        if self.bounding_box is not None and self.bounding_box.get_area(
        ) > 1000:
            y1, y2 = self.bounding_box.get_y_coordinates()
            x1, x2 = self.bounding_box.get_x_coordinates()

            # Performs Canny edge detection on filtered frame.
            edges_filtered = cv2.Canny(self.gray, 60, 120)

            # Crop off the edges out of the moving area
            cropped_edges = (self.foreground // 255) * edges_filtered

            extracted_edges = np.copy(cropped_edges[y1:y2, x1:x2])
            extracted_edges = cv2.resize(extracted_edges,
                                         dsize=(50, 75),
                                         interpolation=cv2.INTER_CUBIC)
            return extracted_edges
        else:
            return None

    # Creates the bounding boxes and finds the best one.
    def focus_movement(self, source):

        bounding_box = None

        foreground = cv2.morphologyEx(self.foreground, cv2.MORPH_CLOSE,
                                      self.bounding_box_kernel)
        bounding_ret, bounding_thresh = cv2.threshold(foreground, 91, 255,
                                                      cv2.THRESH_BINARY)
        contours = cv2.findContours(bounding_thresh, cv2.RETR_TREE,
                                    cv2.CHAIN_APPROX_SIMPLE)[-2]

        if len(contours) != 0:
            contour = max(contours, key=cv2.contourArea)
            x_pos, y_pos, width, height = cv2.boundingRect(contour)
            bounding_rect = np.array(
                [[x_pos, y_pos, x_pos + width, y_pos + height]])
            optimal_pick = non_max_suppression(bounding_rect,
                                               probs=None,
                                               overlapThresh=0.65)
            for (x1, y1, x2, y2) in optimal_pick:
                bounding_box = BoundingBox(x1=x1,
                                           x2=x2,
                                           y1=y1,
                                           y2=y2,
                                           width=x2 - x1,
                                           height=y2 - y1)

        return bounding_box

    # Draws the best bounding box to the video feed.
    def draw_bounding_box(self, source, minArea=500, bufferSpace=40):

        width = self.bounding_box.get_width()
        height = self.bounding_box.get_height()
        x_coordinates = self.bounding_box.get_x_coordinates()
        y_coordinates = self.bounding_box.get_y_coordinates()

        box_area = width * height

        if (abs(box_area) > minArea):

            if (width + bufferSpace > height):

                cv2.rectangle(source, (x_coordinates[0], y_coordinates[0]),
                              (x_coordinates[1], y_coordinates[1]),
                              (0, 0, 255), 2)

            else:

                cv2.rectangle(source, (x_coordinates[0], y_coordinates[0]),
                              (x_coordinates[1], y_coordinates[1]),
                              (0, 255, 0), 2)

    # Display the live video feed and/or foreground frame.
    def display_cv(self, showBox=True):
        if self.bounding_box is not None and showBox:
            self.draw_bounding_box(self.detection_frame)

        cv2.imshow('Detection Frame', self.detection_frame)
        cv2.imshow('Foreground Frame', self.foreground)