def main(): filepath = videopath print filepath video = cv2.VideoCapture("./" + videopath) width = video.get(cv2.CAP_PROP_FRAME_WIDTH) # float video.get(3) height = video.get(cv2.CAP_PROP_FRAME_HEIGHT) # float video.get(4) if not video.isOpened(): print("Video not Opened!") return #Maybe we should check if the video has at least 2 frames #Read the first and second frame of the video to start doing processing on them _, imgFrame1 = video.read() _, imgFrame2 = video.read() #start framecount as 2 because we just read 2 frames atFrame = 2 #up to this point we have none blobs yet blobs = [] # To count people and pass it as a parameter. It doens't work with primitive variables (int) peopleCount = [0] seenPeople = set() #While the video is open and we don't press q key read, process and show a frame while (video.isOpened()): #for every frame, check how many blobs are in the screen currentBlobs = [] imgFrame1Copy = copy.deepcopy(imgFrame1) imgFrame2Copy = copy.deepcopy(imgFrame2) imgFrame1Copy = cv2.cvtColor(imgFrame1Copy, cv2.COLOR_BGR2GRAY) imgFrame2Copy = cv2.cvtColor(imgFrame2Copy, cv2.COLOR_BGR2GRAY) if (debugGaussian and debug_mode): cv2.imshow('gaussianBlurBefore-Img1', imgFrame1Copy) cv2.imshow('gaussianBlurBefore-Img2', imgFrame2Copy) imgFrame1Copy = cv2.GaussianBlur(imgFrame1Copy, gaussian_kernel, 0) imgFrame2Copy = cv2.GaussianBlur(imgFrame2Copy, gaussian_kernel, 0) if (debugGaussian and debug_mode): cv2.imshow('gaussianBlurAfter-Img1', imgFrame1Copy) cv2.imshow('gaussianBlurAfter-Img2', imgFrame2Copy) imgDifference = cv2.absdiff(imgFrame1Copy, imgFrame2Copy) if (debugGaussian and debug_mode): cv2.imshow('dif-Img1-Img2', imgDifference) # ret value is used for Otsu's Binarization if we want to # https://docs.opencv.org/3.4.0/d7/d4d/tutorial_py_thresholding.html ret, imgThresh = cv2.threshold(imgDifference, threshold_value, 255.0, cv2.THRESH_BINARY) if debugThreshold and debug_mode: cv2.imshow('imgThresh', imgThresh) #all the pixels near boundary will be discarded depending upon the size of kernel. erosion removes white noises imgThresh = cv2.dilate(imgThresh, kernel_dilate1, iterations=1) if debug_dilate: cv2.imshow('dilate-dilate1', imgThresh) imgThresh = cv2.erode(imgThresh, kernel_erode1, iterations=1) if debug_erode: cv2.imshow('dilate-erode1', imgThresh) imgThresh = cv2.dilate(imgThresh, kernel_dilate2, iterations=1) if debug_dilate: cv2.imshow('dilate-dilate2', imgThresh) imgThresh = cv2.erode(imgThresh, kernel_erode2, iterations=1) if debug_erode: cv2.imshow('dilate-erode2', imgThresh) imgThreshCopy = copy.deepcopy(imgThresh) # Contours can be explained simply as a curve joining all the continuous points (along the boundary), # having same color or intensity. The contours are a useful tool for shape analysis and object detection and recognition. # https://docs.opencv.org/3.1.0/d4/d73/tutorial_py_contours_begin.html #im2, contours, hierarchy = cv2.findContours(imgThreshCopy, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) imgThreshCopy, contours, hierarchy = cv2.findContours( imgThreshCopy, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) drawAndShowContours(imgThreshCopy, contours, 'imgContours') #up here we made all processing image stuff and now we need to work with the info we extrated from the image #for every thing it's identified on the screen, check if it is a people for x in contours: convexHull = cv2.convexHull(x) blob = Blob(convexHull) if (blob.isObject()): currentBlobs.append(blob) drawAndShowBlobs(imgThresh, currentBlobs, "imgCurrentBlobs") if atFrame <= 2: #if it is first iteration there is no comparison, add curBlos to blobs for curBlob in currentBlobs: curBlob.id = Blob.getId() blobs.append(curBlob) else: #otherwise check if the curblob is releated to a previous blob and match them matchCurrentFrameBlobsToExistingBlobs(blobs, currentBlobs) if debug_all_current_blobs: for b in blobs: print b drawAndShowBlobs(imgThresh, blobs, "imgBlobs") imgFrame2Copy = copy.deepcopy(imgFrame2) drawBlobInfoOnImage(blobs, imgFrame2Copy) #check if the blob crossed the explained atLeastOneBlobCrossedTheLine = checkIfBlobsCossedTheLine( blobs, line, peopleCount, seenPeople, case) #if it has cross draw a colorful line if atLeastOneBlobCrossedTheLine: cv2.line(imgFrame2Copy, (line[0].x, line[0].y), (line[1].x, line[1].y), (255, 0, 255), 2) #yellow line else: cv2.line(imgFrame2Copy, (line[0].x, line[0].y), (line[1].x, line[1].y), (0, 255, 255), 2) #draw the counter drawPeopleCounterOnImage(peopleCount, imgFrame2Copy, width, height) cv2.imshow('imgFrame2Copy', imgFrame2Copy) # get ready for next iteration del currentBlobs[:] imgFrame1 = copy.deepcopy(imgFrame2) if ((video.get(cv2.CAP_PROP_POS_FRAMES) + 1) < (video.get(cv2.CAP_PROP_FRAME_COUNT))): _, imgFrame2 = video.read() else: print("end of video") break atFrame += 1 #print("frame: " + str(count)) if cv2.waitKey(1) & 0xFF == ord('q'): break if debug_mode and cv2.waitKey() & 0xFF == ord('q'): break video.release() cv2.destroyAllWindows() print("end")
def addNewBlob(curBlob, blobs): curBlob.id = Blob.getId() curBlob.isMatchFoundOrNewBlob = True blobs.append(curBlob)