def main():
    vc = WebcamVideoStream(src=0).start()

    reader = easyocr.Reader(['en'])

    while True:
        # read the current camera frame
        frame = vc.read()

        # show the current frame (untouched)
        cv2.imshow("My webcam", frame)

        # if 'x' key is pressed, exit the loop
        if cv2.waitKey(1) & 0XFF == ord('x'):
            break
        # if 'c' key is pressed, process the frame for OCR
        if cv2.waitKey(1) & 0xFF == ord('c'):
            # copy the image so it will be a cleaned image for tesseract
            img = vc.mask_frame()

            # DEBUG
            img_box = draw_boxes(img, reader)

            # draw the chart containing the image with boxes
            cv2.imshow("Tesseract", img_box)

            print_img_str(img, reader)

    # close the window and de-allocate any associated memory usage
    cv2.destroyAllWindows()

    # close the already opened camera
    vc.stop()
    def _detect(self):
        """Class function to detect faces and eyes within faces"""
        video_stream = WebcamVideoStream()
        video_stream.start()
        # Cascade Classifiers
        face_cascade = cv2.CascadeClassifier(
            'haarcascades/haarcascade_frontalface_default.xml')
        eye_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_eye.xml')
        while True:
            frame = video_stream.read()
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # Detecting faces and eyes
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            for (x, y, w, h) in faces:
                cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
                roi_gray = gray[y:y + h, x:x + h]
                roi_color = frame[y:y + h, x:x + h]

                eyes = eye_cascade.detectMultiScale(roi_gray)

                if len(eyes) / len(faces) == 2:
                    for (ex, ey, ew, eh) in eyes:
                        cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh),
                                      (0, 255, 0), 1)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                video_stream.stop()
                break

            # Display image
            cv2.imshow('Image', frame)
class FaceAndEyeDetectorStream:
    def __init__(self, src=0):
        # initialize the video camera stream and read the first frame
        # from the stream
        self.webcam_stream = WebcamVideoStream(src).start()

        frame, frame_time = self.webcam_stream.read()

        self.frame_time = frame_time
        if frame is not None:
            (self.img, self.faces,
             self.face_features) = extract_image_features(frame)

        # initialize the variable used to indicate if the thread should
        # be stopped
        self.stopped = False

    def start(self):
        # start the thread to read frames from the video stream
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        # keep looping infinitely until the thread is stopped
        while True:
            # if the thread indicator variable is set, stop the thread
            #  print('updating')
            if self.stopped:
                #  print('returning')
                return

            # otherwise, read the next frame from the stream
            frame, frame_time = self.webcam_stream.read()
            self.frame_time = frame_time

            if frame is not None:
                (self.img, self.faces,
                 self.face_features) = extract_image_features(frame)
            #  print('the faces', self.faces)
            #  print('updated', self.grabbed)

    def read(self):
        # return the frame most recently read
        return (self.img, self.faces, self.face_features, self.frame_time)

    def stop(self):
        # indicate that the thread should be stopped
        self.webcam_stream.stop()
        self.stopped = True
def main():
    # Load Model and allocate tensors to the Coral USB device
    interpreter = tf.lite.Interpreter(
        model_path='../../../AI_Token_Recognition.tflite',
        experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
    interpreter.allocate_tensors()

    # Get input and output tensors
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    vc = WebcamVideoStream(src=0).start()

    while True:
        # read the current camera frame
        frame = vc.read()

        # show the current frame (untouched)
        cv2.imshow("My webcam", frame)

        # if 'x' key is pressed, exit the loop
        if cv2.waitKey(1) & 0XFF == ord('x'):
            break
        # if 'c' key is pressed, process the frame for OCR
        if cv2.waitKey(1) & 0xFF == ord('c'):
            img = cv2.resize(frame, (160, 160))

            cv2.imshow("Resized", img)

            # convert the frame to an array
            img_array = keras.preprocessing.image.img_to_array(img)
            img_array = tf.expand_dims(img_array, 0)

            # set the input to give it the image
            interpreter.set_tensor(input_details[0]['index'], img_array)
            interpreter.invoke()

            # get a prediction
            predictions = interpreter.get_tensor(output_details[0]['index'])
            score = tf.nn.softmax(predictions[0])

            # RESULT
            print(
                "This image most likely belongs to {} with a {:.2f} percent confidence."
                .format(np.argmax(score), 100 * np.max(score)))

    # close the window and de-allocate any associated memory usage
    cv2.destroyAllWindows()

    # close the already opened camera
    vc.stop()
Example #5
0
def main():
    model = keras.models.load_model('../../../AI_Token_Recognition')

    # Check the loaded model
    model.summary()

    vc = WebcamVideoStream(src=0).start()

    while True:
        # read the current camera frame
        frame = vc.read()

        # show the current frame (untouched)
        cv2.imshow("My webcam", frame)

        # if 'x' key is pressed, exit the loop
        if cv2.waitKey(1) & 0XFF == ord('x'):
            break
        # if 'c' key is pressed, process the frame for OCR
        if cv2.waitKey(1) & 0xFF == ord('c'):
            img = cv2.resize(frame, (160, 160))

            cv2.imshow("Resized", img)

            # convert the frame to an array
            img_array = keras.preprocessing.image.img_to_array(img)
            img_array = tf.expand_dims(img_array, 0)

            # get a prediction
            predictions = model.predict(img_array)
            score = tf.nn.softmax(predictions[0])

            # RESULT
            print(
                "This image most likely belongs to {} with a {:.2f} percent confidence."
                .format(np.argmax(score), 100 * np.max(score)))

    # close the window and de-allocate any associated memory usage
    cv2.destroyAllWindows()

    # close the already opened camera
    vc.stop()
class VideoStream:
    def __init__(self,
                 src=1,
                 usePiCamera=False,
                 resolution=(320, 240),
                 framerate=32):
        # check to see if the picamera module should be used
        if usePiCamera:
            # only import the picamera packages unless we are
            # explicity told to do so -- this helps remove the
            # requirement of `picamera[array]` from desktops or
            # laptops that still want to use the `imutils` package
            from pivideostream import PiVideoStream

            # initialize the picamera stream and allow the camera
            # sensor to warmup
            self.stream = PiVideoStream(resolution=resolution,
                                        framerate=framerate)

        # otherwise, we are using OpenCV so initialize the webcam
        # stream
        else:
            self.stream = WebcamVideoStream(src=src)

    def start(self):
        # start the threaded video stream
        return self.stream.start()

    def update(self):
        # grab the next frame from the stream
        self.stream.update()

    def read(self):
        # return the current frame
        return self.stream.read()

    def stop(self):
        # stop the thread and release any resources
        self.stream.stop()
Example #7
0
def main():
    # TODO; Load Model and set it to the Intel NCS2 USB

    vc = WebcamVideoStream(src=0).start()

    while True:
        # read the current camera frame
        frame = vc.read()

        # show the current frame (untouched)
        cv2.imshow("My webcam", frame)

        # if 'x' key is pressed, exit the loop
        if cv2.waitKey(1) & 0XFF == ord('x'):
            break
        # if 'c' key is pressed, process the frame for OCR
        if cv2.waitKey(1) & 0xFF == ord('c'):
            # convert the frame to an array
            img_array = keras.preprocessing.image.img_to_array(frame)
            img_array = tf.expand_dims(img_array, 0)

            # TODO: get a prediction using the NCS2

            # get a prediction
            #predictions = interpreter.get_tensor(output_details[0]['index'])
            #classes = predictions.argmax(axis=-1)
            #score = tf.nn.softmax(predictions[0])

            # RESULT
            #print(
            #    "This image most likely belongs to {} with a {:.2f} percent confidence."
            #    .format(classes[np.argmax(score)], 100 * np.max(score))
            #)

    # close the window and de-allocate any associated memory usage
    cv2.destroyAllWindows()

    # close the already opened camera
    vc.stop()
Example #8
0
calibrator = HSVCalibrator(0, windowSize)
hsvRange = calibrator.calibrateHSVRange()

threadedVideoCapture = WebcamVideoStream(windowSize=windowSize).start()
calibrator.videoCapture.release()

voiceInterface = VoiceControlInterface()

# Create Properties
voiceInterface.createProperty("debug", "bool", False)
voiceInterface.createProperty("rotation_speed", "int", -30)
voiceInterface.createProperty("effect", "list",
                              (["Hello", "World", "Goodbye", "Hell"], 0))

# Create Actions
voiceInterface.createVoiceAction("SET", ActionMethods.SET)
voiceInterface.createVoiceAction("INDEX", ActionMethods.INDEX)

# Create Aliases
voiceInterface.createActionAlias("enable", "set _ to true")
voiceInterface.createActionAlias("disable", "set _ to false")

voiceInterface.createActionAlias("first", "index _ to 0")
voiceInterface.createActionAlias("next", "index _ to [i+1]", True)
voiceInterface.createActionAlias("previous", "index _ to [i-1]", True)

detectHandViaHSV(threadedVideoCapture, hsvRange, voiceInterface, True)
cv2.destroyAllWindows()

threadedVideoCapture.stop()
Example #9
0
def main():
    #Select Webcam to Stream from
    vs = WebcamVideoStream(0)
    vs.start()

    #Initialize Config for tesseract
    #tesconfigargs = ('-l digits --psm 10')
    tesconfigargs = '--oem 0 -c tessedit_char_whitelist=0123456789-. --psm 10'

    #Set pytesseract CMD (Windows only)
    pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files (x86)\\Tesseract-OCR\\tesseract.exe'

    #Instanciate Logger
    setup_logger('log', r'C:\Temp\ImageAnalysis.csv')
    log = logging.getLogger('log')

    log.info("-------------------------------------Capture started----------------------------------------------")

    while True:

        frame = vs.read()
        cv2.imshow('frame', frame)

        #Color to GrayScale Filter
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        #small Gaussian Blur Filter to filter out grainy Stuff
        gauss = cv2.GaussianBlur(gray, (5,5),0)

        #canny detector
        canny = cv2.Canny(gauss,100,200)
        cv2.imshow('canny', canny)

        _, cnts, _= cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]


        ## loop over our contours
        screenCnt = None
        for c in cnts:
            if cv2.contourArea(c) > 1000:

                #approximate the contour
                peri = cv2.arcLength(c, True)
                approx = cv2.approxPolyDP(c, 0.02 * peri, True)

                #if our approximated contour has four points, then
                #we can assume that we have found our screen
                if len(approx) == 4:
                    screenCnt = approx
                    cv2.drawContours(frame, [screenCnt], -1, (0, 255, 0), 3)
                    x,y,width,height = cv2.boundingRect(screenCnt)
                    croppedframe = frame[y: y + height , x: x + width] # both opencv and numpy are "row-major", so y goes first

                    digit = pytesseract.image_to_string(croppedframe, config=tesconfigargs)

                    # Print and Log recognized text
                    log.info(digit)
                    break

        cv2.imshow('frame', frame)
        key = cv2.waitKey(5) & 0xFF
        if key == 27:
           break

    #Do Cleanup
    vs.stop()
    cv2.destroyAllWindows()
    
    # Keep note of error I have run into with colored thresholding..
    
    # Important to note that, if we decease the usage of imshow, we can radically increase our FPS reading, adding the imshow function will cause
    # our project to have to read in frames, while also outputting frames, which causes a lot of processing and takes up time from which the thread
    # could be reading in frames.
    cv2.imshow("Frame", frame)
    k = cv2.waitKey(30) & 0xFF
    if k == 27:
        break

cv2.destroyAllWindows()
for i in range(1,5):
    cv2.waitKey(1)

vs.stop()               # Stops the reading in of frames.

#ret, img = cap.read()

#threshImage = Filters.simpleThreshBinary(img)               # Retrieve a thresholded image.

#grayscale = Filters.grayScaleImage(img)
#grayscaledThresh = Filters.simpleThreshBinary(grayscale)    # Retrieve a thresholded grayscale image.

#    gaussianBlurredImage = Filters.gaussianBlurApplying(img)    # We have a guassian blurred image.
#    adaptiveThreshGaus = Filters.adaptiveThresholding_Gaus(img) # Now we have a adaptive threshold example.

# Edge detection


# Face detection portion
Example #11
0
        (x, y, w, h) = cv2.boundingRect(c)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        text = "Occupied"
                
    # draw the text and timestamp on the frame
# cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
#       cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
#   cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
#       (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
 '''
    
    '''# show the frame and record if the user presses a key
    cv2.imshow("Security Feed", frame)
    #cv2.imshow("Thresh", thresh)
    cv2.imshow("Frame Delta", frameDelta)
    
 '''
    cv2.imshow("Security Feed", frame)
    cv2.imshow("Frame Delta", frameDelta)

    key = cv2.waitKey(1) & 0xFF
    # if the `esc` key is pressed, break from the lop
    if key == 27:
        break
 
# cleanup the camera and close any open windows
camera.stop()
cv2.destroyAllWindows()
destroyWindows.windowDestroyer(2)
        
Example #12
0
def main():
    # initialize the video stream and allow the camera sensor to warm up
    print("[INFO] starting video stream...")
    # cap = cv2.VideoCapture(0)
    vs = WebcamVideoStream(src=0).start()
    fps = FPS().start()  #Notes the start time
    width = 440

    with open("consumer_thread.csv", 'w', newline='') as file:
        writer = csv.writer(file)
        writer.writerow([
            "Thread Frame #",
            "Time spent in reading the frame (seconds) from queue",
            "Time spent performing inference on the frame (seconds)"
        ])
        # loop over the frames from the video stream
        #while True:
        while fps._numFrames < args["num_frames"]:
            # grab the frame from the threaded video stream and resize it
            # to have a maximum width of 400 pixels
            # Capture frame-by-frame
            start = timer()
            frame = vs.readFromQueue()
            end = timer()
            # if frame is not None then there was atleast one frame in queue
            # when read from the queue and returned. Else queue was empty.
            if frame is not None:
                # update the FPS counter
                fps.update()
                consumerThreadFrameNumber = fps._numFrames
                consumerThreadTimeTakenToReadThisFrame = (end - start)
                print(
                    "[INFO] Consumer Thread : Time taken to read frame number",
                    consumerThreadFrameNumber, "from queue is",
                    consumerThreadTimeTakenToReadThisFrame, "seconds")
                height = frame.shape[0]
                dim = (width, height)
                frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
                # detect faces in the frame and determine if they are wearing a
                # face mask or not
                startInferenceTime = timer()
                (locs, preds) = detect_and_predict_mask(frame, net, model)
                endInferenceTime = timer()
                consumerThreadTimeTakenToPerformInference = (
                    endInferenceTime - startInferenceTime)
                print(
                    "[INFO] Consumer Thread : Time taken to performing inference on consumed frame number",
                    consumerThreadFrameNumber, "is",
                    consumerThreadTimeTakenToPerformInference, "seconds")
                writer.writerow([
                    consumerThreadFrameNumber,
                    consumerThreadTimeTakenToReadThisFrame,
                    consumerThreadTimeTakenToPerformInference
                ])
                for (box, pred) in zip(locs, preds):
                    # unpack the bounding box and predictions
                    (startX, startY, endX, endY) = box
                    (mask, withoutMask) = pred
                    label = "Mask" if mask > withoutMask else "No Mask"
                    color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
                    # include the probability in the label
                    #label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
                    # display the label and bounding box rectangle on the output
                    # frame
                    cv2.putText(frame, label, (startX, startY - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
                    cv2.rectangle(frame, (startX, startY), (endX, endY), color,
                                  2)
                    print("Showing frame")
                    # show the output frame
                    cv2.imshow("Output", frame)
                    #cv2.destroyAllWindows()
                    #key = cv2.waitKey(10) & 0xFF

                key = cv2.waitKey(1) & 0xFF
                # if the `q` key was pressed, break from the loop
                if key == ord("q"):
                    break

    fps.stop()
    vs.stop()

    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    cv2.destroyAllWindows()
Example #13
0
# created a *threaded* video stream, allow the camera sensor to warmup,
# and start the FPS counter
print("[INFO] sampling THREADED frames from webcam...")
vs = WebcamVideoStream(src=0).start()
fps = FPS().start()

# loop over some frames...this time using the threaded stream
while fps._numFrames < num_frames:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = vs.read()
    # frame = imutils.resize(frame, width=400)

    # check to see if the frame should be displayed to our screen
    # if args["display"] > 0:
    # cv2.imshow("Frame", frame)

    # update the FPS counter
    fps.update()
    # if cv2.waitKey(0) & 0xFF == ord('q'):
    # 	break

# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
     
        # loop over the contours in the cnts variable which extracts found contour points from dilated, guassiated, grayscaled image frame.
        for c in cnts:
            # if the contour is too small, ignore it
            # In respect to fine tuning, it looks like around 500 is optimal for testing in a room like environment.
            # A change occurs if we want to notice someone walking in, or motion in a undetected scene, then we jump the gun to 4500+.
            if cv2.contourArea(c) < 5000:     # Value to fine tune.
                 continue
     
            # compute the bounding box for the contour, draw it on the frame,
            # and update the text
            (x, y, w, h) = cv2.boundingRect(c)
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            
        cv2.imshow("Frame", frame)
        out.write(frame)
    #cv2.imshow("ThreshFrame", threshedOut)
    #cv2.imshow("DeltaFrame", deltaOut)
    k = cv2.waitKey(30) & 0xFF
    if k == 27:
        break


vs.stop()               # Stops the reading in of frames.
cv2.destroyAllWindows()
destroyWindows.windowDestroyer(1)


 

Example #15
0
			if args.image:			
				cv2.circle(frame, (cnt1.x , cnt1.y), 5, (255, 0, 0), 2)
				cv2.circle(frame, (cnt2.x , cnt2.y), 5, (255, 0, 0), 2)
				middle = (abs(cnt1.x+cnt2.x)/2, abs(cnt1.y+cnt2.y)/2)
				if rc == c.SUCCESS:
					cv2.putText(frame, 'tx='+str(int(round(tx))) + '   ty='+str(int(round(ty))), middle, font, 0.5 , (0, 0, 255), 1, cv2.LINE_4)

				cv2.imshow("Frame", frame)
			fps.update()
			#print 'a'
			
   			event = cv2.waitKey(1) & 0xFF
			if event == ord('p'):
				print(len(pipeline.find_contours_output))
				print		
			elif event == ord('s'):
				printStat = True
			elif event == ord('q'):
				break 
		else:
                   time.sleep(0.01)

	#cleanups
	fps.stop()      
	cv2.destroyAllWindows()
	stream.stop()
	
	print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
	print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))	
Example #16
0
				if angleRC == SUCCESS:
					cv2.putText(frame, 'A='+str(int(round(angle))), (middle[0],middle[1]+30), font, 0.5 , (0, 0, 255), 1, cv2.LINE_4)

				cv2.imshow("Frame", frame)
			fps.update()
	     
   			event = cv2.waitKey(1) & 0xFF
			if event == ord('p'):
				print(len(pipeline.find_contours_output))
				print
			elif event == ord('d'):
				if distanceRC == SUCCESS:		
					print("distance = " + str(distance))
					#distanceNT = distance
					if optionNetwork:
						table.putNumber('distance', distance)			
			elif event == ord('s'):
				printStat = True
			elif event == ord('a'):
				if angleRC == SUCCESS:			
					print("angle = " + str(angle))
			elif event == ord('q'):
				break 
	fps.stop()      
	cv2.destroyAllWindows()
	cam.stop()
   	
	
	print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
	print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))