cnts, frameDelta = motion.showMotion(gray) #print cnts for c in cnts: # if the contour is too small, ignore it # In respect to fine tuning, it looks like around 500 is optimal for testing in a room like environment. # A change occurs if we want to notice someone walking in, or motion in a undetected scene, then we jump the gun to 4500+. if cv2.contourArea(c) < motion.cornerDetectionThresh: # Value to fine tune. continue # compute the bounding box for the contour, draw it on the frame, # and update the text (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2) if cascadeTime: # If we chose to perform haarcascading cascades.eyeCascadeDetectionOfImage(frame) ''' # if the frame could not be grabbed, then we have reached the end # of the video #if not grabbed: # break # resize the frame, convert it to grayscale, and blur it #frame = cv2.resize(frame, (500, 500)) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # So here we convert to grayscale cause color is irelevant gray = cv2.GaussianBlur(gray, (31, 31), 0) # We can change the region from 21 21 later for smoothing out noise. # originally our dimensions for gaussian blur is 21 by 21 # if the first frame is None, initialize it