################################################################################ # Preprocessing stuff # cv2.namedWindow('cannyOutput') ################################################################################ c = cv2.VideoCapture(0) _, f = c.read() avg2 = np.float32(f) background = BackgroundRemoval.preprocessbackground(c, f, avg2) _, f = c.read() gray = cv2.cvtColor(f, cv2.COLOR_BGR2GRAY) while True: _, f = c.read() gray = cv2.cvtColor(f, cv2.COLOR_BGR2GRAY) image_nobackground = BackgroundRemoval.removebackground(gray, background) b, g, r = cv2.split(f) nb = np.minimum(image_nobackground, b) ng = np.minimum(image_nobackground, g) nr = np.minimum(image_nobackground, r) bn, gn, rn = normalized(ng, nr, ng) backgroundRemovedImage = cv2.merge((nb, ng, nr)) res = ColorSegmenter.getMagentaBlob(backgroundRemovedImage) objectdetection = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY) edged = cv2.Canny(objectdetection, 100, 250) kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7)) closed = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel) contours, hierarchy = cv2.findContours(closed, 2, 1) areaContours = []
r1 = clahe.apply(r) #bgr = cv2.merge((b1,g1,r1)) return b1,g1,r1 c = cv2.VideoCapture(0) _,f = c.read() avg2 = np.float32(f) # Get clean - unmoving background background=BackgroundRemoval.preprocessbackground(c, f, avg2) # get new frame _,f = c.read() gray=cv2.cvtColor(f, cv2.COLOR_BGR2GRAY) while True: _,f = c.read() # Remove fixed background from image mask=BackgroundRemoval.removebackground(f, background) #split channels to remove backround and normalize color b,g,r = cv2.split(f) # remove background nb=np.minimum(mask, b) ng=np.minimum(mask, g) nr=np.minimum(mask, r) # normalize color bn,gn,rn = normalized(ng,nr,ng) # return to color image backgroundRemovedImage=cv2.merge((nb, ng, nr)) # Color segmentation - Filter out non magenta res = ColorSegmenter.getMagentaBlob(backgroundRemovedImage)