lower2 = np.array([170, 120, 70]) upper2 = np.array([180, 255, 255]) mask2 = cv2.inRange(imgHSV, lower2, upper2) mask += mask2 imgIsolated = cv2.bitwise_and(img, img, mask=mask) # Image with only the chosen color antiMask = cv2.bitwise_not( mask ) # Bitwise not operation on mask to create a reverse mask for filtering out the chosen color imgFiltered = cv2.bitwise_and(img, img, mask=antiMask) # Image without the chosen color # If user wants to see the optional virtual background if optional == 'y': background = input("Choose a background image to replace the color with: ") background = cv2.imread(background) background = cv2.resize( background, (img.shape[1], img.shape[0])) # Resize background so that both are the same size bg_filtered = cv2.bitwise_and(background, background, mask=mask) gScreen = cv2.addWeighted(imgFiltered, 1, bg_filtered, 1, 0) else: gScreen = np.ones_like(img) imgCollage = stackImages(1, [[img, imgIsolated], [imgFiltered, gScreen]]) cv2.imshow("Result", imgCollage) cv2.waitKey(0)
heightImg = 600 MIN_AREA = 100 ################################### # viewing webcam cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FRAME_WIDTH, widthImg) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, heightImg) cap.set(cv2.CAP_PROP_BRIGHTNESS, 120) while True: success, img = cap.read() resultImg = img.copy() imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) imgBlur = cv2.GaussianBlur(imgGray, (7, 7), 0) imgCanny = cv2.Canny(imgBlur, 100, 100) contours = cv2.findContours(imgCanny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours = imutils.grab_contours(contours) contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10] numberPlateContour = None for cont in contours: perimeter = cv2.arcLength(cont, True) approx = cv2.approxPolyDP(cont, 0.018 * perimeter, True) if len(approx) == 4: cv2.drawContours(resultImg, cont, -1, (255, 0, 0), 3) numberPlateContour = cont break cv2.drawContours(resultImg, numberPlateContour, -1, (255, 0, 0), 3) result = stackImages(0.5, ([img, resultImg], [imgBlur, imgCanny])) cv2.imshow("Result", result) if cv2.waitKey(1) & 0xFF == ord('q'): break
# Joining images import cv2 import numpy as np from util import stackImages img = cv2.imread("Resources/impostor.jpg") imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) imgStack = stackImages(0.5, ([img, imgGray, img], [img, imgGray, img])) imgHorizontal = np.hstack((img, img)) imgVertical = np.vstack((img, img)) # cv2.imshow("Horizontal", imgHorizontal) # cv2.imshow("Vertical", imgVertical) cv2.imshow("Image stack", imgStack) cv2.waitKey(0)
shapeType = "Rectangle" elif approxNumCorners > 4: shapeType = "Circle" print(shapeType) cv2.rectangle(resultImg, (x, y), (x + w, y + h), (255, 255, 255), 3) cv2.putText(resultImg, shapeType, (x + (w // 2) - 10, y + (h // 2) - 10), cv2.FONT_HERSHEY_COMPLEX, 0.7, (125, 255, 255), 2) path = "Resources/shapes.png" img = cv2.imread(path) resultImg = np.zeros_like(img) # First step - convert it to grayscale image imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Second step - blur the gray image imgBlur = cv2.GaussianBlur(imgGray, (7, 7), 1) # Third step - image canny imgCanny = cv2.Canny(imgBlur, 50, 50) getContours(imgCanny) # Optional - stack images, so we do not need to open multiple windows imgStack = stackImages(0.8, ([imgGray, imgBlur], [imgCanny, resultImg])) cv2.imshow("Result", imgStack) cv2.waitKey(0)
cap = cv2.VideoCapture(0) while True: ### Storing trackbar values in real time ### h_min = cv2.getTrackbarPos("Hue min", "Trackbars") h_max = cv2.getTrackbarPos("Hue max", "Trackbars") s_min = cv2.getTrackbarPos("Sat min", "Trackbars") s_max = cv2.getTrackbarPos("Sat max", "Trackbars") v_min = cv2.getTrackbarPos("Val min", "Trackbars") v_max = cv2.getTrackbarPos("Val max", "Trackbars") ### Setting boundaries for mask in real-time ## lower = np.array([h_min, s_min, v_min]) upper = np.array([h_max, s_max, v_max]) # upper = np.array([10,255,233]) # Detecting red apple ### Prepping images ## success, img = cap.read() imgHSV = cv2.cvtColor( img, cv2.COLOR_BGR2HSV) # Converts RGB to HSV for color detection mask1 = cv2.inRange(imgHSV, lower, upper) imgOutput = cv2.bitwise_and(img, img, mask=mask1) # cv2.imshow("Original", img) # cv2.imshow("Apples", imgHSV) # cv2.imshow("Mask", mask) # cv2.imshow("Color Detection", imgOutput) imgCollage = stackImages(1, [img, imgOutput]) cv2.imshow("Color Detecting", imgCollage) cv2.waitKey(1)
def autoprocess(cap, src): ######################################################################## # usercount = os.getenv('COUNT') # IPCamFeed = # DefaultCameFeed = pathImage = "1.jpg" #cap = cv2.VideoCapture(0) # url = "http://192.168.1.18:8080/shot.jpg" # cap = cv2.VideoCapture(url) cap.set(10,160) heightImg = 640 widthImg = 480 url = 'http://192.168.43.1:8080/shot.jpg' ######################################################################## util.initializeTrackbars() count=0 while True: if src == 2: #success, img = cap.read() img_resp = requests.get(url) img_arr = np.array(bytearray(img_resp.content), dtype=np.uint8) img = cv2.imdecode(img_arr, -1) elif src == 1: ret, img = cap.read() else:img = cv2.imread(pathImage) img = cv2.resize(img, (widthImg, heightImg)) # RESIZE IMAGE imgBlank = np.zeros((heightImg,widthImg, 3), np.uint8) # CREATE A BLANK IMAGE FOR TESTING DEBUGING IF REQUIRED imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # CONVERT IMAGE TO GRAY SCALE imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1) # ADD GAUSSIAN BLUR thres=util.valTrackbars() # GET TRACK BAR VALUES FOR THRESHOLDS imgThreshold = cv2.Canny(imgBlur,thres[0],thres[1]) # APPLY CANNY BLUR kernel = np.ones((5, 5)) imgDial = cv2.dilate(imgThreshold, kernel, iterations=2) # APPLY DILATION imgThreshold = cv2.erode(imgDial, kernel, iterations=1) # APPLY EROSION ## FIND ALL COUNTOURS imgContours = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES imgBigContour = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES contours, hierarchy = cv2.findContours(imgThreshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # FIND ALL CONTOURS cv2.drawContours(imgContours, contours, -1, (0, 255, 0), 10) # DRAW ALL DETECTED CONTOURS # FIND THE BIGGEST COUNTOUR biggest, maxArea = util.biggestContour(contours) # FIND THE BIGGEST CONTOUR if biggest.size != 0: biggest=util.reorder(biggest) cv2.drawContours(imgBigContour, biggest, -1, (0, 255, 0), 20) # DRAW THE BIGGEST CONTOUR imgBigContour = util.drawRectangle(imgBigContour,biggest,2) pts1 = np.float32(biggest) # PREPARE POINTS FOR WARP pts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]]) # PREPARE POINTS FOR WARP matrix = cv2.getPerspectiveTransform(pts1, pts2) imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg)) #REMOVE 20 PIXELS FORM EACH SIDE imgWarpColored=imgWarpColored[20:imgWarpColored.shape[0] - 20, 20:imgWarpColored.shape[1] - 20] imgWarpColored = cv2.resize(imgWarpColored,(widthImg,heightImg)) # APPLY ADAPTIVE THRESHOLD imgWarpGray = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY) imgAdaptiveThre= cv2.adaptiveThreshold(imgWarpGray, 255, 1, 1, 7, 2) imgAdaptiveThre = cv2.bitwise_not(imgAdaptiveThre) imgAdaptiveThre=cv2.medianBlur(imgAdaptiveThre,3) canny_img = Canny_detector(img) tess(imgWarpColored) # Image Array for Display # imageArray = ([img,imgGray,imgThreshold,imgContours], # [imgBigContour,imgWarpColored, imgWarpGray,imgAdaptiveThre]) imageArray = ([img,imgContours], [imgBigContour,imgAdaptiveThre]) if count <=0 : cv2.imwrite("doc.jpeg", imgAdaptiveThre) count+=1 cases = 1 lables = [["Original","Contours"], ["Biggest Contour","Adaptive Threshold"]] stackedImage = util.stackImages(imageArray,0.75,lables) cv2.imshow("Result",stackedImage) if cases == 1 and cv2.waitKey(25) & 0xFF == ord('s'): cv2.imwrite("auto/autodoc"+ str(time.time())+ ".jpg",imgAdaptiveThre) print("saved") else: # imageArray = ([img,imgGray,imgThreshold,imgContours], # [imgBlank, imgBlank, imgBlank, imgBlank]) imageArray = ([img,imgContours], [img,img]) lables = [["Original","Contours"], ["No Contour","No Adaptive Threshold"]] cases = 2 stackedImage = util.stackImages(imageArray,0.75,lables) cv2.imshow("Result",stackedImage) # # LABELS FOR DISPLAY # lables = [["Original","Gray","Threshold","Contours"], # ["Biggest Contour","Warp Prespective","Warp Gray","Adaptive Threshold"]] # LABELS FOR DISPLAY # stackedImage = util.stackImages(imageArray,0.75,lables) # cv2.imshow("Result",stackedImage) # SAVE IMAGE WHEN 's' key is pressed if cv2.waitKey(1) and 0xFF == ord('q'): cv2.rectangle(stackedImage, ((int(stackedImage.shape[1] / 2) - 230), int(stackedImage.shape[0] / 2) + 50), (1100, 350), (0, 255, 0), cv2.FILLED) cv2.putText(stackedImage, "Scan Saved", (int(stackedImage.shape[1] / 2) - 200, int(stackedImage.shape[0] / 2)), cv2.FONT_HERSHEY_DUPLEX, 3, (0, 0, 255), 5, cv2.LINE_AA) cv2.imshow('Result', stackedImage) cv2.waitKey(300) print(cases)
path = "Resources/impostor.jpg" cv2.namedWindow("TrackBars") cv2.resizeWindow("TrackBars", 640, 240) cv2.createTrackbar("Hue Min", "TrackBars", 0, 179, empty) cv2.createTrackbar("Hue Max", "TrackBars", 19, 179, empty) cv2.createTrackbar("Saturation Min", "TrackBars", 110, 255, empty) cv2.createTrackbar("Saturation Max", "TrackBars", 240, 255, empty) cv2.createTrackbar("Value Min", "TrackBars", 153, 255, empty) cv2.createTrackbar("Value Max", "TrackBars", 255, 255, empty) while True: img = cv2.imread(path) imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) h_min = cv2.getTrackbarPos("Hue Min", "TrackBars") h_max = cv2.getTrackbarPos("Hue Max", "TrackBars") sat_min = cv2.getTrackbarPos("Saturation Min", "TrackBars") sat_max = cv2.getTrackbarPos("Saturation Max", "TrackBars") val_min = cv2.getTrackbarPos("Value Min", "TrackBars") val_max = cv2.getTrackbarPos("Value Max", "TrackBars") lower = np.array([h_min, sat_min, val_min]) higher = np.array([h_max, sat_max, val_max]) mask = cv2.inRange(imgHSV, lower, higher) colorExtractedImage = cv2.bitwise_and(img, img, mask=mask) imgStack = stackImages(0.4, ([img, imgHSV], [mask, colorExtractedImage])) # cv2.imshow("Original", img) # cv2.imshow("HSV", imgHSV) # cv2.imshow("Mask", mask) # cv2.imshow("Color Detected", colorExtractedImage) cv2.imshow("Result", imgStack) cv2.waitKey(1)