def updateRobotPos(): global cte, x, y, theta, env cap = WebcamVideoStream(src=int(sys.argv[1])) cap.start() print("Setting up...") setupImgs = [] for i in range(15): frame = cap.read() setupImgs.append(frame) time.sleep(0.1) transformMatrix = setup(setupImgs) while True: frame = cap.read() #print(frame[50:60, 50:60, 1]) x, y, theta, outImage = getRobotPosition(frame, transformMatrix) print(x, y, theta) print("") cte = y updateImage(outImage) env.setRobotPose(x, y, theta) m = env.visualizeMap() updateImage2(m) time.sleep(0.01)
def _detect(self): """Class function to detect faces and eyes within faces""" video_stream = WebcamVideoStream() video_stream.start() # Cascade Classifiers face_cascade = cv2.CascadeClassifier( 'haarcascades/haarcascade_frontalface_default.xml') eye_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_eye.xml') while True: frame = video_stream.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Detecting faces and eyes faces = face_cascade.detectMultiScale(gray, 1.3, 5) for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2) roi_gray = gray[y:y + h, x:x + h] roi_color = frame[y:y + h, x:x + h] eyes = eye_cascade.detectMultiScale(roi_gray) if len(eyes) / len(faces) == 2: for (ex, ey, ew, eh) in eyes: cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 1) if cv2.waitKey(1) & 0xFF == ord('q'): video_stream.stop() break # Display image cv2.imshow('Image', frame)
def updateRobotPos(): global cte, x, y, theta, env cap = WebcamVideoStream(src=int(sys.argv[1])) cap.start() print("Setting up...") setupImgs = [] for i in range(15): frame = cap.read() setupImgs.append(frame) time.sleep(0.1) transformMatrix, reprojMatrix = setup(setupImgs) while True: frame = cap.read() #print(frame[50:60, 50:60, 1]) x, y, theta, outImage = getRobotPosition(frame, transformMatrix) print(x, y, theta) print("") cte = y #updateImage(outImage) env.setRobotPose(x, y, theta) m = env.visualizeMap() #out = cv2.warpPerspective(m, reprojMatrix, (outImage.shape[1], outImage.shape[0])) #out = cv2.addWeighted(out, 0.5, outImage, 1 - 0.5, 0) updateImage(outImage) updateImage2(m) time.sleep(0.01)
def updateRobotPos(): global x, y, theta, env, outImage, started, reprojMatrix, pp, frame, transformMatrix, initFinished cap = WebcamVideoStream(src=int(sys.argv[1])) cap.start() print("Setting up...") setupImgs = [] for i in range(15): frame = cap.read() setupImgs.append(frame) time.sleep(0.1) transformMatrix, reprojMatrix = setup(setupImgs) initFinished = True while True: frame = cap.read() x, y, theta, outImage = getRobotPosition(frame, transformMatrix) env.setRobotPose(x, y, theta) time.sleep(0.01)
class VideoStream: def __init__(self, src=1, usePiCamera=False, resolution=(320, 240), framerate=32): # check to see if the picamera module should be used if usePiCamera: # only import the picamera packages unless we are # explicity told to do so -- this helps remove the # requirement of `picamera[array]` from desktops or # laptops that still want to use the `imutils` package from pivideostream import PiVideoStream # initialize the picamera stream and allow the camera # sensor to warmup self.stream = PiVideoStream(resolution=resolution, framerate=framerate) # otherwise, we are using OpenCV so initialize the webcam # stream else: self.stream = WebcamVideoStream(src=src) def start(self): # start the threaded video stream return self.stream.start() def update(self): # grab the next frame from the stream self.stream.update() def read(self): # return the current frame return self.stream.read() def stop(self): # stop the thread and release any resources self.stream.stop()
def main(): #Select Webcam to Stream from vs = WebcamVideoStream(0) vs.start() #Initialize Config for tesseract #tesconfigargs = ('-l digits --psm 10') tesconfigargs = '--oem 0 -c tessedit_char_whitelist=0123456789-. --psm 10' #Set pytesseract CMD (Windows only) pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files (x86)\\Tesseract-OCR\\tesseract.exe' #Instanciate Logger setup_logger('log', r'C:\Temp\ImageAnalysis.csv') log = logging.getLogger('log') log.info("-------------------------------------Capture started----------------------------------------------") while True: frame = vs.read() cv2.imshow('frame', frame) #Color to GrayScale Filter gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #small Gaussian Blur Filter to filter out grainy Stuff gauss = cv2.GaussianBlur(gray, (5,5),0) #canny detector canny = cv2.Canny(gauss,100,200) cv2.imshow('canny', canny) _, cnts, _= cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10] ## loop over our contours screenCnt = None for c in cnts: if cv2.contourArea(c) > 1000: #approximate the contour peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.02 * peri, True) #if our approximated contour has four points, then #we can assume that we have found our screen if len(approx) == 4: screenCnt = approx cv2.drawContours(frame, [screenCnt], -1, (0, 255, 0), 3) x,y,width,height = cv2.boundingRect(screenCnt) croppedframe = frame[y: y + height , x: x + width] # both opencv and numpy are "row-major", so y goes first digit = pytesseract.image_to_string(croppedframe, config=tesconfigargs) # Print and Log recognized text log.info(digit) break cv2.imshow('frame', frame) key = cv2.waitKey(5) & 0xFF if key == 27: break #Do Cleanup vs.stop() cv2.destroyAllWindows()
from WebcamVideoStream import WebcamVideoStream import pytesseract import cv2 import numpy as np print("[INFO] Starting Video Stream") vs = WebcamVideoStream() vs.start() #time.sleep(2) #Initialize Config for tesseract #'-l eng Define the english wordfile #config = ('-l eng') config = ('-l digits --psm 10') while True: frame = vs.read() #Color to GrayScale Filter gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) ## # cv2.imshow("Region Captured", frame) ## # cv2.imshow("Region Captured", frame) #cv2.imshow('gray', gray) #small Gaussian Blur Filter to filter out grainy Stuff gauss = cv2.GaussianBlur(gray, (5, 5), 0) #cv2.imshow('gauss', gauss) #canny detector #option, threshold1, threshold2 canny = cv2.Canny(gauss, 100, 200) #canny = cv2.Canny(gauss,lower,upper)