def __init__(self):
        # If video source is USB device for testing, we will use the vs and cap variables below.
        #self.vs = WebcamVideoStream(src=0).start()       # so we want to read video in as a stream now so we can
        self.capture = cv2.VideoCapture(0)
        # CHANGE MILES
        # 0 for drone,
        # 1 for webcam in the case of Miles computer

        # If video source is drone, we will use the code below.
        self.fourcc = cv2.cv.CV_FOURCC(*'XVID')
        self.out = cv2.VideoWriter('output.mov', self.fourcc, 20.0, (640, 480))
        # So we want to open application for video driver first, and then run file.
        # Currently the webcam video stream class does not work for video capture, therefore we
        # need to stick to cv2.VideoCapture() until WebcamVideoStream can be optimized for working.

        # Instantiate objects
        self.filters = Filters()                             # Filters for filtering the file.
        self.motionDetection = MotionDetection()             # MotionDetection for grabbing motion.
        self.cascadeDetection = Cascading()                  # Cascading for feature recognition.
        self.blurDetection = DetectBlur(150)                 # 100 would be the value to be used for fine tuning.
        self.destroyWindows = WindowDestruction()

        time.sleep(0.25)                                # Allow camera a few miliseconds for booting up.
        self.firstFrame = None                               # First frame is a variable to be used for motion tracking, firstFrame is the frame being compared for motion change.

        # Initiate toggles.
        self.motionTime = False
        self.cascadeTime = False
        self.blurDetectionTime = False

        # Initialize external variables.
        self.numFrames = 0
        self.ts = time.time()
Example #2
0
    def __init__(self):
        # If video source is USB device for testing, we will use the vs and cap variables below.
        #self.vs = WebcamVideoStream(src=0).start()       # so we want to read video in as a stream now so we can
        self.capture = cv2.VideoCapture(0)
        # CHANGE MILES
        # 0 for drone,
        # 1 for webcam in the case of Miles computer

        # If video source is drone, we will use the code below.
        self.fourcc = cv2.cv.CV_FOURCC(*'XVID')
        self.out = cv2.VideoWriter('output.mov', self.fourcc, 20.0, (640, 480))
        # So we want to open application for video driver first, and then run file.
        # Currently the webcam video stream class does not work for video capture, therefore we
        # need to stick to cv2.VideoCapture() until WebcamVideoStream can be optimized for working.

        # Instantiate objects
        self.filters = Filters()  # Filters for filtering the file.
        self.motionDetection = MotionDetection(
        )  # MotionDetection for grabbing motion.
        self.cascadeDetection = Cascading(
        )  # Cascading for feature recognition.
        self.blurDetection = DetectBlur(
            150)  # 100 would be the value to be used for fine tuning.
        self.destroyWindows = WindowDestruction()

        time.sleep(0.25)  # Allow camera a few miliseconds for booting up.
        self.firstFrame = None  # First frame is a variable to be used for motion tracking, firstFrame is the frame being compared for motion change.

        # Initiate toggles.
        self.motionTime = False
        self.cascadeTime = False
        self.blurDetectionTime = False

        # Initialize external variables.
        self.numFrames = 0
        self.ts = time.time()
Example #3
0
# import the necessary packages
import datetime
import time
import cv2
from WindowDestruction import WindowDestruction
from WebcamVideoStream import WebcamVideoStream
import numpy as np
from MotionDetection import MotionDetection
from Cascading import Cascading

motion = MotionDetection()
destroyWindows = WindowDestruction()
cascades = Cascading()
# camera = cv2.VideoCapture(0)
camera = WebcamVideoStream(src=0).start() 
time.sleep(0.25)
# initialize the first frame in the video stream
# WE WILL WANT TO UPDATE THIS VARIABLE TO OFTEN CHANGE THE FIRST FRAME
# BASED ON MOVEMENT OF MOTION...WILL BE TRICKY.

cascadeTime = False

# loop over the frames of the video
while True:
    # grab the current frame and initialize the occupied/unoccupied
    # text
    
    frame = camera.read()
    #saveFrame = frame                       # For storing a copy for encoding later on.
    frame = cv2.resize(frame, (500, 500))
    #(grabbed, frame) = camera.read()
Example #4
0
import cv2
import numpy as np
import time
import datetime
from Cascading import Cascading
from Detect_Blur import DetectBlur

blurDetection = DetectBlur(140)
cascade = Cascading()
#capture = cv2.VideoCapture(0)   # Load video feed, we can also load a video from the directory if we wanted too, all we would do is place the path to the directory in here instead of the 0.
capture = cv2.VideoCapture('testFlight.MP4')   # Loading a actual video.
# Will need to change directory for future testing, unless files are in same directory path.
fgbg = cv2.BackgroundSubtractorMOG2()         # The foreground for objects we want, we will subtract those from the actual background.


# Define size of video for reading in
size = (int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
        int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
# Define FPS
fps = 20

#fourcc = cv2.cv.CV_FOURCC(*'XVID')
fourcc = cv2.cv.CV_FOURCC('m', 'p', '4', 'v')
#vout = cv2.VideoWriter()
#success = vout.open('output.mov', fourcc, fps, size, True)

#out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))
ts = time.time()
counter = 0
while True:
    ret, frame = capture.read()     # Load the file into our boolean, then asssign it to a frame object.
Example #5
0
import cv2
import numpy as np
from Cascading import Cascading

cascade = Cascading()
#capture = cv2.VideoCapture(0)   # Load video feed, we can also load a video from the directory if we wanted too, all we would do is place the path to the directory in here instead of the 0.
capture = cv2.VideoCapture(0)  # Loading a actual video.
# Will need to change directory for future testing, unless files are in same directory path.
fgbg = cv2.BackgroundSubtractorMOG2(
)  # The foreground for objects we want, we will subtract those from the actual background.
fourcc = cv2.cv.CV_FOURCC(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))

while True:
    ret, frame = capture.read(
    )  # Load the file into our boolean, then asssign it to a frame object.
    fgmask = fgbg.apply(
        frame
    )  # Lets apply the foregroundbackground to our frame, and assign that to a mask object.

    # Perform a check to make sure we are getting video returned before performing
    # operations or outputting to screen.
    if ret:
        cascade.faceCascadeDetectionOfImage(frame)
        out.write(frame)  # Write video file.
        cv2.imshow('original', frame)
        cv2.imshow('foreground', fgmask)
    if cv2.waitKey(1) == 27:
        break

    # May need to perform a frame = cv2.flip(frame, 0) for writing video because it will be flipped otherwise...POSSIBLY.
Example #6
0
class CameraSession():
    def __init__(self):
        # If video source is USB device for testing, we will use the vs and cap variables below.
        #self.vs = WebcamVideoStream(src=0).start()       # so we want to read video in as a stream now so we can
        self.capture = cv2.VideoCapture(0)
        # CHANGE MILES
        # 0 for drone,
        # 1 for webcam in the case of Miles computer

        # If video source is drone, we will use the code below.
        self.fourcc = cv2.cv.CV_FOURCC(*'XVID')
        self.out = cv2.VideoWriter('output.mov', self.fourcc, 20.0, (640, 480))
        # So we want to open application for video driver first, and then run file.
        # Currently the webcam video stream class does not work for video capture, therefore we
        # need to stick to cv2.VideoCapture() until WebcamVideoStream can be optimized for working.

        # Instantiate objects
        self.filters = Filters()  # Filters for filtering the file.
        self.motionDetection = MotionDetection(
        )  # MotionDetection for grabbing motion.
        self.cascadeDetection = Cascading(
        )  # Cascading for feature recognition.
        self.blurDetection = DetectBlur(
            150)  # 100 would be the value to be used for fine tuning.
        self.destroyWindows = WindowDestruction()

        time.sleep(0.25)  # Allow camera a few miliseconds for booting up.
        self.firstFrame = None  # First frame is a variable to be used for motion tracking, firstFrame is the frame being compared for motion change.

        # Initiate toggles.
        self.motionTime = False
        self.cascadeTime = False
        self.blurDetectionTime = False

        # Initialize external variables.
        self.numFrames = 0
        self.ts = time.time()

    def main(self):
        counter = 0  # Counter for gathering image files for blur detection. Counter increments number of pictures for filename.
        while True:
            # grab the frame from the threaded video stream and resize it
            # to have a maximum width of 400 pixels
            # CHANGE MILES
            #frame = self.vs.read()
            ret, frame = self.capture.read()

            # If the WebcamVideoStream object has a frame grabbed, lets perform the basic required operations for all the features.
            # CHANGE MILES
            if (ret):
                #if (self.vs.grabbed):
                #self.motionDetection.refreshFirstFrame(frame)
                grayFrame = cv2.cvtColor(
                    frame, cv2.COLOR_BGR2GRAY)  # Grayscale the image
                grayGausFrame = cv2.GaussianBlur(
                    grayFrame, (21, 21), 0
                )  # Second, we want to apply a guassian blur to reduce noise.
                # originally our dimensions for gaussian blur is 21 by 21

                #commandImage = str(input('Enter Image Function to perform:'))           # String input variable for checking.

                if (cv2.waitKey(3) &
                        0xFF == ord('m')):  # Initiate motion tracking feature.
                    print "You initiated the motion tracking featurette"
                    # Toggle switch
                    if (self.motionTime != True):
                        self.motionTime = True
                    else:
                        self.motionTime = False
                        print "Motion tracking is turned off."
                        # Cascade checking.
                elif (cv2.waitKey(3) &
                      0xFF == ord('c')):  # Initiate cascade detection feature.
                    print "You initiated the cascade image recogntion feature"
                    # Toggle on off the cascading featurette.
                    if (self.cascadeTime != True):
                        self.cascadeTime = True
                    else:
                        self.cascadeTime = False
                        print "Cascading is turned off."
                elif (cv2.waitKey(3)
                      & 0xFF == ord('b')):  # Initiate blur detection feature.
                    print "You initiated the blur detection featurette."
                    if (self.blurDetectionTime != True):
                        self.blurDetectionTime = True
                    else:
                        self.blurDetectionTime = False
                        print "Blur detection is turned off."
                # May want to sub categorize the 'r' 'u' & 'd' keys.
                elif (cv2.waitKey(5) & 0xFF == ord('r')
                      ):  # Refresh the first frame for motion tracking.
                    print "You pressed refresh"
                    self.motionDetection.refreshFirstFrame(frame)
                    continue
                elif (cv2.waitKey(5)
                      & 0xFF == ord('i')):  # Increase motion tracking thresh.
                    print(
                        "You increased the motion detection thresh value to " +
                        str(self.motionDetection.cornerDetectionThresh))
                    self.motionDetection.increaseCornerDetectionThresh(100)
                    continue
                elif (cv2.waitKey(5)
                      & 0xFF == ord('d')):  # Decrease motion tracking thresh.
                    print(
                        "You decreased the motion detection thresh value to " +
                        str(self.motionDetection.cornerDetectionThresh))
                    self.motionDetection.decreaseCornerDetectionThresh(100)
                    continue
                elif (cv2.waitKey(5)
                      & 0xFF == ord('q')):  # Increase blur detection thresh.
                    newBlurThresh = (75 + self.blurDetection.getBlurThresh())
                    self.blurDetection.setBlurThresh(newBlurThresh)
                    print("You increased blur detection thresh to " +
                          str(self.blurDetection.getBlurThresh()))
                    continue
                elif (cv2.waitKey(5)
                      & 0xFF == ord('w')):  # Decrease blur detection thresh.
                    newBlurThresh = (self.blurDetection.getBlurThresh() - 75)
                    self.blurDetection.setBlurThresh(newBlurThresh)
                    print("You decreased blur detection thresh to " +
                          str(self.blurDetection.getBlurThresh()))
                    continue
                # Alas, apart of the motion tracking, if our first frame is set to none, we will set it up ourselves.
                elif self.motionDetection.firstFrame is None:
                    self.motionDetection.refreshFirstFrame(frame)
                    continue
                ''' End of object featurette toggling via keyboard keys. '''
                # Toggle checks
                if (self.motionTime):
                    cnts, frameDelta = self.motionDetection.showMotion(
                        grayFrame)

                    for c in cnts:
                        # if the contour is too small, ignore it
                        # In respect to fine tuning, it looks like around 500 is optimal for testing in a room like environment.
                        # A change occurs if we want to notice someone walking in, or motion in a undetected scene, then we jump the gun to 4500+.
                        if cv2.contourArea(
                                c
                        ) < self.motionDetection.cornerDetectionThresh:  # Value to fine tune.
                            continue
                            # compute the bounding box for the contour, draw it on the frame,
                            # and update the text
                        (x, y, w, h) = cv2.boundingRect(c)
                        cv2.rectangle(frame, (x, y), (x + w, y + h),
                                      (255, 0, 0), 2)
                elif (self.cascadeTime
                      ):  # If we chose to perform haarcascading
                    self.cascadeDetection.faceCascadeDetectionOfImage(frame)
                elif (self.blurDetectionTime):
                    # We grayscaled it so we can apply the laplacian variance on the grayscaled frame.
                    fm = self.blurDetection.variance_of_laplacian(
                        grayFrame
                    )  # Return an integer value depicting the varaiance factor of blur detection.
                    # This will be the value that is outputted to the screen.

                    #if (fm > ((self.blurDetection.getBlurThresh()/2)+self.blurDetection.getBlurThresh())):
                    if (fm > (self.blurDetection.getBlurThresh() * 5)):

                        # So basically we set the conditional to have a limited range of blurDetectionThresh
                        # We specify it to be 2 times the threshValue to save it.
                        #myDirname = os.path.normpath("C:/Users/M4l2l_es/Dropbox/Ali_and_miless_awesome_vision_stuff/FeatureDetection/NoneBlurDetectedPictures/")
                        st = datetime.datetime.fromtimestamp(
                            self.ts).strftime('_TS:%Y-%m-%d::%H:%M:%S')
                        # CHANGE MILES
                        #myDirName = os.path.normpath("NoneBlurDetectedPictures/" + str(counter) + st + ".png")  # os.path.normpath will set the directory path for any opreating system.
                        myDirName = str(counter) + "Image" + ".jpg"
                        cv2.imwrite(myDirName, frame)
                        counter = counter + 1
                    text = ""  # will have to get rid of text variable later below and beside.
                    cv2.putText(frame, "{}: {:.2f}".format(text, fm), (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)

                # Outside of toggle checks lets render the frames
                cv2.imshow("Frame", frame)
                self.out.write(
                    frame)  # Writing frame, so outputting frame to file.
                k = cv2.waitKey(1) & 0xFF
                if k == 27:
                    break

        # CHANGE MILES
        #self.vs.stop()
        self.capture.release()
        cv2.destroyAllWindows()
        self.destroyWindows.windowDestroyer(1)
class CameraSession():


    def __init__(self):
        # If video source is USB device for testing, we will use the vs and cap variables below.
        #self.vs = WebcamVideoStream(src=0).start()       # so we want to read video in as a stream now so we can
        self.capture = cv2.VideoCapture(0)
        # CHANGE MILES
        # 0 for drone,
        # 1 for webcam in the case of Miles computer

        # If video source is drone, we will use the code below.
        self.fourcc = cv2.cv.CV_FOURCC(*'XVID')
        self.out = cv2.VideoWriter('output.mov', self.fourcc, 20.0, (640, 480))
        # So we want to open application for video driver first, and then run file.
        # Currently the webcam video stream class does not work for video capture, therefore we
        # need to stick to cv2.VideoCapture() until WebcamVideoStream can be optimized for working.

        # Instantiate objects
        self.filters = Filters()                             # Filters for filtering the file.
        self.motionDetection = MotionDetection()             # MotionDetection for grabbing motion.
        self.cascadeDetection = Cascading()                  # Cascading for feature recognition.
        self.blurDetection = DetectBlur(150)                 # 100 would be the value to be used for fine tuning.
        self.destroyWindows = WindowDestruction()

        time.sleep(0.25)                                # Allow camera a few miliseconds for booting up.
        self.firstFrame = None                               # First frame is a variable to be used for motion tracking, firstFrame is the frame being compared for motion change.

        # Initiate toggles.
        self.motionTime = False
        self.cascadeTime = False
        self.blurDetectionTime = False

        # Initialize external variables.
        self.numFrames = 0
        self.ts = time.time()
        
    def main(self):
        counter = 0                                 # Counter for gathering image files for blur detection. Counter increments number of pictures for filename.
        while True:
            # grab the frame from the threaded video stream and resize it
            # to have a maximum width of 400 pixels
            # CHANGE MILES
            #frame = self.vs.read()
            ret, frame = self.capture.read()
            
            # If the WebcamVideoStream object has a frame grabbed, lets perform the basic required operations for all the features.
            # CHANGE MILES
            if (ret):
            #if (self.vs.grabbed):
                #self.motionDetection.refreshFirstFrame(frame)
                grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # Grayscale the image
                grayGausFrame = cv2.GaussianBlur(grayFrame, (21, 21), 0)            # Second, we want to apply a guassian blur to reduce noise.
            # originally our dimensions for gaussian blur is 21 by 21

                #commandImage = str(input('Enter Image Function to perform:'))           # String input variable for checking.
                
                if (cv2.waitKey(3) & 0xFF == ord('m')):    # Initiate motion tracking feature.
                    print "You initiated the motion tracking featurette"
                    # Toggle switch
                    if (self.motionTime != True):
                        self.motionTime = True
                    else:
                        self.motionTime = False
                        print "Motion tracking is turned off."
                        # Cascade checking.
                elif (cv2.waitKey(3) & 0xFF == ord('c')):  # Initiate cascade detection feature.
                    print "You initiated the cascade image recogntion feature"
                    # Toggle on off the cascading featurette.
                    if (self.cascadeTime != True):
                        self.cascadeTime = True
                    else:
                        self.cascadeTime = False
                        print "Cascading is turned off."
                elif (cv2.waitKey(3) & 0xFF == ord('b')):  # Initiate blur detection feature.
                    print "You initiated the blur detection featurette."
                    if (self.blurDetectionTime != True):
                        self.blurDetectionTime = True
                    else:
                        self.blurDetectionTime = False
                        print "Blur detection is turned off."
                # May want to sub categorize the 'r' 'u' & 'd' keys.
                elif (cv2.waitKey(5) & 0xFF == ord('r')):   # Refresh the first frame for motion tracking.
                    print "You pressed refresh"
                    self.motionDetection.refreshFirstFrame(frame)
                    continue
                elif (cv2.waitKey(5) & 0xFF == ord('i')):  # Increase motion tracking thresh.
                    print("You increased the motion detection thresh value to " + str(self.motionDetection.cornerDetectionThresh))
                    self.motionDetection.increaseCornerDetectionThresh(100)
                    continue
                elif (cv2.waitKey(5) & 0xFF == ord('d')):  # Decrease motion tracking thresh.
                    print("You decreased the motion detection thresh value to " + str(self.motionDetection.cornerDetectionThresh))
                    self.motionDetection.decreaseCornerDetectionThresh(100)
                    continue
                elif (cv2.waitKey(5) & 0xFF == ord('q')):  # Increase blur detection thresh.
                    newBlurThresh = (75 + self.blurDetection.getBlurThresh())
                    self.blurDetection.setBlurThresh(newBlurThresh)
                    print("You increased blur detection thresh to " + str(self.blurDetection.getBlurThresh()))
                    continue
                elif (cv2.waitKey(5) & 0xFF == ord('w')):  # Decrease blur detection thresh.            
                    newBlurThresh = (self.blurDetection.getBlurThresh() - 75)
                    self.blurDetection.setBlurThresh(newBlurThresh)
                    print("You decreased blur detection thresh to " + str(self.blurDetection.getBlurThresh()))
                    continue
                # Alas, apart of the motion tracking, if our first frame is set to none, we will set it up ourselves.
                elif self.motionDetection.firstFrame is None:
                    self.motionDetection.refreshFirstFrame(frame)
                    continue



                ''' End of object featurette toggling via keyboard keys. '''
                # Toggle checks
                if (self.motionTime):
                    cnts, frameDelta = self.motionDetection.showMotion(grayFrame)
                    
                    for c in cnts:
                        # if the contour is too small, ignore it
                        # In respect to fine tuning, it looks like around 500 is optimal for testing in a room like environment.
                        # A change occurs if we want to notice someone walking in, or motion in a undetected scene, then we jump the gun to 4500+.
                        if cv2.contourArea(c) < self.motionDetection.cornerDetectionThresh:     # Value to fine tune.
                            continue
                            # compute the bounding box for the contour, draw it on the frame,
                            # and update the text
                        (x, y, w, h) = cv2.boundingRect(c)
                        cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
                elif (self.cascadeTime):         # If we chose to perform haarcascading
                    self.cascadeDetection.faceCascadeDetectionOfImage(frame)
                elif (self.blurDetectionTime):
                    # We grayscaled it so we can apply the laplacian variance on the grayscaled frame.
                    fm = self.blurDetection.variance_of_laplacian(grayFrame)             # Return an integer value depicting the varaiance factor of blur detection.
                    # This will be the value that is outputted to the screen.
                    
                    #if (fm > ((self.blurDetection.getBlurThresh()/2)+self.blurDetection.getBlurThresh())):
                    if (fm > (self.blurDetection.getBlurThresh()*5)):
                                                                                    
                    # So basically we set the conditional to have a limited range of blurDetectionThresh
                    # We specify it to be 2 times the threshValue to save it.
                    #myDirname = os.path.normpath("C:/Users/M4l2l_es/Dropbox/Ali_and_miless_awesome_vision_stuff/FeatureDetection/NoneBlurDetectedPictures/")
                        st = datetime.datetime.fromtimestamp(self.ts).strftime('_TS:%Y-%m-%d::%H:%M:%S')
                        # CHANGE MILES
                        #myDirName = os.path.normpath("NoneBlurDetectedPictures/" + str(counter) + st + ".png")  # os.path.normpath will set the directory path for any opreating system.
                        myDirName = str(counter) + "Image" + ".jpg"
                        cv2.imwrite(myDirName, frame)
                        counter = counter + 1
                    text = ""           # will have to get rid of text variable later below and beside.
                    cv2.putText(frame, "{}: {:.2f}".format(text, fm), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)

                # Outside of toggle checks lets render the frames
                cv2.imshow("Frame", frame)
                self.out.write(frame)               # Writing frame, so outputting frame to file.
                k = cv2.waitKey(1) & 0xFF
                if k == 27:
                    break

        # CHANGE MILES
        #self.vs.stop()
        self.capture.release()
        cv2.destroyAllWindows()
        self.destroyWindows.windowDestroyer(1)
# For this program I'm testing the use of thresholding by applying different filters
# and seeing how easy it is to detect corners and objects within the camera frame.

ap = argparse.ArgumentParser()
ap.add_argument("-c",
                "--conf",
                required=True,
                help="path to the json configuration file")
args = vars(ap.parse_args())

vs = WebcamVideoStream(
    src=0).start()  # so we want to read video in as a stream now so we can
#cap = cv2.VideoCapture(0)
filters = Filters()
cascades = Cascading()
blurDetection = DetectBlur(120)
imgCmpr = ImageCompression()

conf = json.load(open(args["conf"]))  # Load the json file.
client = None

# check to see if the Dropbox should be used
if conf["use_dropbox"]:
    # connect to dropbox and start the session authorization process
    flow = DropboxOAuth2FlowNoRedirect(conf["dropbox_key"],
                                       conf["dropbox_secret"])
    print "[INFO] Authorize this application: {}".format(flow.start())
    authCode = raw_input("Enter auth code here: ").strip()

    # finish the authorization and grab the Dropbox client
import cv2
import numpy as np
from Cascading import Cascading

cascade = Cascading()
#capture = cv2.VideoCapture(0)   # Load video feed, we can also load a video from the directory if we wanted too, all we would do is place the path to the directory in here instead of the 0.
capture = cv2.VideoCapture(0)   # Loading a actual video.
# Will need to change directory for future testing, unless files are in same directory path.
fgbg = cv2.BackgroundSubtractorMOG2()         # The foreground for objects we want, we will subtract those from the actual background.
fourcc = cv2.cv.CV_FOURCC(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))

while True:
    ret, frame = capture.read()     # Load the file into our boolean, then asssign it to a frame object.
    fgmask = fgbg.apply(frame)      # Lets apply the foregroundbackground to our frame, and assign that to a mask object.

# Perform a check to make sure we are getting video returned before performing
# operations or outputting to screen.
    if ret:
        cascade.faceCascadeDetectionOfImage(frame)
        out.write(frame)               # Write video file.
        cv2.imshow('original', frame)
        cv2.imshow('foreground', fgmask)
    if cv2.waitKey(1) == 27:
        break

    # May need to perform a frame = cv2.flip(frame, 0) for writing video because it will be flipped otherwise...POSSIBLY.
    
#cv2.imshow('original', frame)       # Create our frame for the original.
#   cv2.imshow('foreground', fgmask)    # Creat the frame for the mask after subtraction.