def _detect(self):
        """Class function to detect faces and eyes within faces"""
        video_stream = WebcamVideoStream()
        video_stream.start()
        # Cascade Classifiers
        face_cascade = cv2.CascadeClassifier(
            'haarcascades/haarcascade_frontalface_default.xml')
        eye_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_eye.xml')
        while True:
            frame = video_stream.read()
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # Detecting faces and eyes
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            for (x, y, w, h) in faces:
                cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
                roi_gray = gray[y:y + h, x:x + h]
                roi_color = frame[y:y + h, x:x + h]

                eyes = eye_cascade.detectMultiScale(roi_gray)

                if len(eyes) / len(faces) == 2:
                    for (ex, ey, ew, eh) in eyes:
                        cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh),
                                      (0, 255, 0), 1)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                video_stream.stop()
                break

            # Display image
            cv2.imshow('Image', frame)
    def __init__(self):
        # self.cam = cv2.VideoCapture(0)
        # self.cam.set(3, 320)
        # self.cam.set(4, 240)
        self.cam = WebcamVideoStream(src=0, resolution=(640, 480)).start()
        self.fps = FPS().start()

        ret, self.frame = self.cam.read()

        self.suspend_tracking = SuspendTracking(teta=3)

        self.height, self.width = self.frame.shape[:2]
        self.kernel_erode = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                      (3, 3))
        self.kernel_dilate = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                       (7, 7))

        cv2.namedWindow('camshift')
        self.obj_select = RectSelector('camshift', self.onmouse)

        radius = 3
        n_point = 8 * radius
        self.lbpDesc = LBP(n_point, radius)

        self.HSV_CHANNELS = (
            (24, [0, 180], "hue"),  # Hue
            (8, [0, 256], "sat"),  # Saturation
            (8, [0, 256], "val")  # Value
        )

        self.show_backproj = False
        self.track_window = None
        self.histHSV = []
        self.track_box = None
def updateRobotPos():
    global cte, x, y, theta, env
    cap = WebcamVideoStream(src=int(sys.argv[1]))
    cap.start()

    print("Setting up...")
    setupImgs = []
    for i in range(15):
        frame = cap.read()
        setupImgs.append(frame)

        time.sleep(0.1)

    transformMatrix = setup(setupImgs)

    while True:
        frame = cap.read()
        #print(frame[50:60, 50:60, 1])

        x, y, theta, outImage = getRobotPosition(frame, transformMatrix)
        print(x, y, theta)
        print("")

        cte = y
        updateImage(outImage)
        env.setRobotPose(x, y, theta)
        m = env.visualizeMap()
        updateImage2(m)

        time.sleep(0.01)
def updateRobotPos():
    global cte, x, y, theta, env
    cap = WebcamVideoStream(src=int(sys.argv[1]))
    cap.start()

    print("Setting up...")
    setupImgs = []
    for i in range(15):
        frame = cap.read()
        setupImgs.append(frame)

        time.sleep(0.1)

    transformMatrix, reprojMatrix = setup(setupImgs)

    while True:
        frame = cap.read()
        #print(frame[50:60, 50:60, 1])

        x, y, theta, outImage = getRobotPosition(frame, transformMatrix)
        print(x, y, theta)
        print("")

        cte = y
        #updateImage(outImage)
        env.setRobotPose(x, y, theta)
        m = env.visualizeMap()
        #out = cv2.warpPerspective(m, reprojMatrix, (outImage.shape[1], outImage.shape[0]))
        #out = cv2.addWeighted(out, 0.5, outImage, 1 - 0.5, 0)
        updateImage(outImage)
        updateImage2(m)

        time.sleep(0.01)
def main():
    vc = WebcamVideoStream(src=0).start()

    reader = easyocr.Reader(['en'])

    while True:
        # read the current camera frame
        frame = vc.read()

        # show the current frame (untouched)
        cv2.imshow("My webcam", frame)

        # if 'x' key is pressed, exit the loop
        if cv2.waitKey(1) & 0XFF == ord('x'):
            break
        # if 'c' key is pressed, process the frame for OCR
        if cv2.waitKey(1) & 0xFF == ord('c'):
            # copy the image so it will be a cleaned image for tesseract
            img = vc.mask_frame()

            # DEBUG
            img_box = draw_boxes(img, reader)

            # draw the chart containing the image with boxes
            cv2.imshow("Tesseract", img_box)

            print_img_str(img, reader)

    # close the window and de-allocate any associated memory usage
    cv2.destroyAllWindows()

    # close the already opened camera
    vc.stop()
Example #6
0
 def __init__(self):
     # Global Variables
     print("initialized")
     self.lc = lcm.LCM()
     lcmCameraPoseSub = self.lc.subscribe("CAMERA_POSE_CHANNEL",
                                          cameraPose_handler)
     lcmCameraPoseSub.set_queue_capacity(1)
     self.camera_pose = None
     self.vs = WebcamVideoStream(src=0).start()
def main():
    # Load Model and allocate tensors to the Coral USB device
    interpreter = tf.lite.Interpreter(
        model_path='../../../AI_Token_Recognition.tflite',
        experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
    interpreter.allocate_tensors()

    # Get input and output tensors
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    vc = WebcamVideoStream(src=0).start()

    while True:
        # read the current camera frame
        frame = vc.read()

        # show the current frame (untouched)
        cv2.imshow("My webcam", frame)

        # if 'x' key is pressed, exit the loop
        if cv2.waitKey(1) & 0XFF == ord('x'):
            break
        # if 'c' key is pressed, process the frame for OCR
        if cv2.waitKey(1) & 0xFF == ord('c'):
            img = cv2.resize(frame, (160, 160))

            cv2.imshow("Resized", img)

            # convert the frame to an array
            img_array = keras.preprocessing.image.img_to_array(img)
            img_array = tf.expand_dims(img_array, 0)

            # set the input to give it the image
            interpreter.set_tensor(input_details[0]['index'], img_array)
            interpreter.invoke()

            # get a prediction
            predictions = interpreter.get_tensor(output_details[0]['index'])
            score = tf.nn.softmax(predictions[0])

            # RESULT
            print(
                "This image most likely belongs to {} with a {:.2f} percent confidence."
                .format(np.argmax(score), 100 * np.max(score)))

    # close the window and de-allocate any associated memory usage
    cv2.destroyAllWindows()

    # close the already opened camera
    vc.stop()
    def __init__(self):
        # self.cam = cv2.VideoCapture(0)
        # self.cam.set(3, 320)
        # self.cam.set(4, 240)
        self.cam = WebcamVideoStream(src=0, resolution=(640, 480)).start()
        self.fps = FPS().start()

        ret, self.frame = self.cam.read()

        self.conf = {
            'ColorFrameNum': 7,
            'LBPFrameNum': 7,
            'MaxFrameDiffClr': 15,
            'MaxLBPFrameUpdate': 30,
            'L_Weight': 0.3,
            'A_Weight': 0.7,
            'B_Weight': 0.7
        }

        self.ColorCheck = AdaptiveThreshold(teta=3, max_lost_cnt=1)
        self.LBPCheck = AdaptiveThreshold(teta=2, max_lost_cnt=1)

        self.ColorDistance = LABDistance()
        self.LBPDistance = LocalBinaryPatterns(
            numPoints=8,
            radius=2,
            update_prev_hist=self.conf['MaxLBPFrameUpdate'])

        self.isLost = False
        self.isLBPLost = False

        self.height, self.width = self.frame.shape[:2]

        self.kernel_e = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        self.kernel_d = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
        self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))

        cv2.namedWindow('camshift')
        self.obj_select = RectSelector('camshift', self.onmouse)

        self.LAB_CHANNELS = (
            (24, [0, 256], "light"),  # L
            (24, [0, 256], "a"),  # a
            (24, [0, 256], "b")  # b
        )

        self.show_backproj = False
        self.track_window = None
        self.histLAB = []
        self.track_box = None
    def __init__(self, src=0):
        # initialize the video camera stream and read the first frame
        # from the stream
        self.webcam_stream = WebcamVideoStream(src).start()

        frame, frame_time = self.webcam_stream.read()

        self.frame_time = frame_time
        if frame is not None:
            (self.img, self.faces,
             self.face_features) = extract_image_features(frame)

        # initialize the variable used to indicate if the thread should
        # be stopped
        self.stopped = False
Example #10
0
def show_camera():
    # To flip the image, modify the flip_method parameter (0 and 2 are the most common)
    print(gstreamer_pipeline(flip_method=0))
    vs = WebcamVideoStream(src=gstreamer_pipeline()).start()
    #cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
    fps = FPS().start()
    take_snapshot = True

    while True:
        _ = cv2.namedWindow("CSI Camera", cv2.WINDOW_AUTOSIZE)
        # Window
        while cv2.getWindowProperty("CSI Camera", 0) >= 0:
            original_img = vs.read()
            if take_snapshot:
                save_snapshot(original_img, "original")
                take_snapshot = False
            filter = (60, 87, 120, 255, 50, 255)
            img = apply_hsv_filter(original_img, filter)
            img = erode(img, 1)
            img = dilate(img, 1)

            targets = find_contours(img)
            brColor = (255, 255, 255)
            for contour in targets:
                rr = cv2.minAreaRect(contour)
                pt = get_goal_center(rr)
                cv2.circle(original_img, pt, 6, brColor, 3)

            cv2.imshow("CSI Camera", original_img)
            # This also acts as
            keyCode = cv2.waitKey(30) & 0xFF
            # Stop the program on the ESC key
            if keyCode == 27:
                break

        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        cap.release()
        cv2.destroyAllWindows()
    else:
        print("Unable to open camera")
Example #11
0
def main():
    model = keras.models.load_model('../../../AI_Token_Recognition')

    # Check the loaded model
    model.summary()

    vc = WebcamVideoStream(src=0).start()

    while True:
        # read the current camera frame
        frame = vc.read()

        # show the current frame (untouched)
        cv2.imshow("My webcam", frame)

        # if 'x' key is pressed, exit the loop
        if cv2.waitKey(1) & 0XFF == ord('x'):
            break
        # if 'c' key is pressed, process the frame for OCR
        if cv2.waitKey(1) & 0xFF == ord('c'):
            img = cv2.resize(frame, (160, 160))

            cv2.imshow("Resized", img)

            # convert the frame to an array
            img_array = keras.preprocessing.image.img_to_array(img)
            img_array = tf.expand_dims(img_array, 0)

            # get a prediction
            predictions = model.predict(img_array)
            score = tf.nn.softmax(predictions[0])

            # RESULT
            print(
                "This image most likely belongs to {} with a {:.2f} percent confidence."
                .format(np.argmax(score), 100 * np.max(score)))

    # close the window and de-allocate any associated memory usage
    cv2.destroyAllWindows()

    # close the already opened camera
    vc.stop()
Example #12
0
def main():
    # TODO; Load Model and set it to the Intel NCS2 USB

    vc = WebcamVideoStream(src=0).start()

    while True:
        # read the current camera frame
        frame = vc.read()

        # show the current frame (untouched)
        cv2.imshow("My webcam", frame)

        # if 'x' key is pressed, exit the loop
        if cv2.waitKey(1) & 0XFF == ord('x'):
            break
        # if 'c' key is pressed, process the frame for OCR
        if cv2.waitKey(1) & 0xFF == ord('c'):
            # convert the frame to an array
            img_array = keras.preprocessing.image.img_to_array(frame)
            img_array = tf.expand_dims(img_array, 0)

            # TODO: get a prediction using the NCS2

            # get a prediction
            #predictions = interpreter.get_tensor(output_details[0]['index'])
            #classes = predictions.argmax(axis=-1)
            #score = tf.nn.softmax(predictions[0])

            # RESULT
            #print(
            #    "This image most likely belongs to {} with a {:.2f} percent confidence."
            #    .format(classes[np.argmax(score)], 100 * np.max(score))
            #)

    # close the window and de-allocate any associated memory usage
    cv2.destroyAllWindows()

    # close the already opened camera
    vc.stop()
    def __init__(self,
                 src=1,
                 usePiCamera=False,
                 resolution=(320, 240),
                 framerate=32):
        # check to see if the picamera module should be used
        if usePiCamera:
            # only import the picamera packages unless we are
            # explicity told to do so -- this helps remove the
            # requirement of `picamera[array]` from desktops or
            # laptops that still want to use the `imutils` package
            from pivideostream import PiVideoStream

            # initialize the picamera stream and allow the camera
            # sensor to warmup
            self.stream = PiVideoStream(resolution=resolution,
                                        framerate=framerate)

        # otherwise, we are using OpenCV so initialize the webcam
        # stream
        else:
            self.stream = WebcamVideoStream(src=src)
Example #14
0
def updateRobotPos():
    global x, y, theta, env, outImage, started, reprojMatrix, pp, frame, transformMatrix, initFinished
    cap = WebcamVideoStream(src=int(sys.argv[1]))
    cap.start()

    print("Setting up...")
    setupImgs = []
    for i in range(15):
        frame = cap.read()
        setupImgs.append(frame)

        time.sleep(0.1)

    transformMatrix, reprojMatrix = setup(setupImgs)

    initFinished = True
    while True:
        frame = cap.read()
        x, y, theta, outImage = getRobotPosition(frame, transformMatrix)
        env.setRobotPose(x, y, theta)

        time.sleep(0.01)
import numpy as np
import time
import datetime
from Filters import Filters          # This should allow us to import the Filters file.
from WebcamVideoStream import WebcamVideoStream
from Cascading import Cascading
from Detect_Blur import DetectBlur
from compressImages import ImageCompression
#from MotionDetection import MotionDetection
from WindowDestruction import WindowDestruction
from MotionDetection import MotionDetection

# For this program I'm testing the use of thresholding by applying different filters
# and seeing how easy it is to detect corners and objects within the camera frame.

vs = WebcamVideoStream(src=0).start()       # so we want to read video in as a stream now so we can
#cap = cv2.VideoCapture(0)
filters = Filters()
fourcc = cv2.cv.CV_FOURCC(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))
motion = MotionDetection()

destroyWindows = WindowDestruction()

firstFrame = None

while True:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = vs.read()
    #saveFrame = frame                       # For storing a copy for encoding later on.
Example #16
0
from WebcamVideoStream import WebcamVideoStream
import pytesseract
import cv2
import numpy as np

print("[INFO] Starting Video Stream")
vs = WebcamVideoStream()
vs.start()
#time.sleep(2)

#Initialize Config for tesseract
#'-l eng Define the english wordfile
#config = ('-l eng')
config = ('-l digits --psm 10')

while True:
    frame = vs.read()
    #Color to GrayScale Filter
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    ##    #       cv2.imshow("Region Captured", frame)
    ##    #       cv2.imshow("Region Captured", frame)
    #cv2.imshow('gray', gray)

    #small Gaussian Blur Filter to filter out grainy Stuff
    gauss = cv2.GaussianBlur(gray, (5, 5), 0)
    #cv2.imshow('gauss', gauss)

    #canny detector
    #option, threshold1, threshold2
    canny = cv2.Canny(gauss, 100, 200)
    #canny = cv2.Canny(gauss,lower,upper)
Example #17
0
def main():
    #Select Webcam to Stream from
    vs = WebcamVideoStream(0)
    vs.start()

    #Initialize Config for tesseract
    #tesconfigargs = ('-l digits --psm 10')
    tesconfigargs = '--oem 0 -c tessedit_char_whitelist=0123456789-. --psm 10'

    #Set pytesseract CMD (Windows only)
    pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files (x86)\\Tesseract-OCR\\tesseract.exe'

    #Instanciate Logger
    setup_logger('log', r'C:\Temp\ImageAnalysis.csv')
    log = logging.getLogger('log')

    log.info("-------------------------------------Capture started----------------------------------------------")

    while True:

        frame = vs.read()
        cv2.imshow('frame', frame)

        #Color to GrayScale Filter
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        #small Gaussian Blur Filter to filter out grainy Stuff
        gauss = cv2.GaussianBlur(gray, (5,5),0)

        #canny detector
        canny = cv2.Canny(gauss,100,200)
        cv2.imshow('canny', canny)

        _, cnts, _= cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]


        ## loop over our contours
        screenCnt = None
        for c in cnts:
            if cv2.contourArea(c) > 1000:

                #approximate the contour
                peri = cv2.arcLength(c, True)
                approx = cv2.approxPolyDP(c, 0.02 * peri, True)

                #if our approximated contour has four points, then
                #we can assume that we have found our screen
                if len(approx) == 4:
                    screenCnt = approx
                    cv2.drawContours(frame, [screenCnt], -1, (0, 255, 0), 3)
                    x,y,width,height = cv2.boundingRect(screenCnt)
                    croppedframe = frame[y: y + height , x: x + width] # both opencv and numpy are "row-major", so y goes first

                    digit = pytesseract.image_to_string(croppedframe, config=tesconfigargs)

                    # Print and Log recognized text
                    log.info(digit)
                    break

        cv2.imshow('frame', frame)
        key = cv2.waitKey(5) & 0xFF
        if key == 27:
           break

    #Do Cleanup
    vs.stop()
    cv2.destroyAllWindows()
Example #18
0
    def sample_from_webcam(self):
        # cap = cv2.VideoCapture(0)
        vs = WebcamVideoStream(src=0).start()

        # test = cap.get(cv2.CAP_PROP_POS_MSEC)
        # ratio = cap.get(cv2.CAP_PROP_POS_AVI_RATIO)
        # frame_rate = cap.get(cv2.CAP_PROP_FPS)
        # width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        # height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
        # brightness = cap.get(cv2.CAP_PROP_BRIGHTNESS)
        # contrast = cap.get(cv2.CAP_PROP_CONTRAST)
        # saturation = cap.get(cv2.CAP_PROP_SATURATION)
        # hue = cap.get(cv2.CAP_PROP_HUE)
        # gain = cap.get(cv2.CAP_PROP_GAIN)
        # exposure = cap.get(cv2.CAP_PROP_EXPOSURE)
        # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
        # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
        # cap.set(cv2.CAP_PROP_EXPOSURE, -6.0)
        # cap.set(cv2.CAP_PROP_FPS, 30)
        # cap.set(cv2.CAP_PROP_GAIN, 0)
        # cap.set(cv2.CAP_PROP_BRIGHTNESS, 64)
        # cap.set(cv2.CAP_PROP_CONTRAST, 64)
        # cap.set(cv2.CAP_PROP_SATURATION, 64)
        # print("Test: ", test)
        # print("Ratio: ", ratio)
        # print("Frame Rate: ", frame_rate)
        # print("Height: ", height)
        # print("Width: ", width)
        # print("Brightness: ", brightness)
        # print("Contrast: ", contrast)
        # print("Saturation: ", saturation)
        # print("Hue: ", hue)
        # print("Gain: ", gain)
        # print("Exposure: ", exposure)

        arduino = serial.Serial('COM3', 1000000, timeout=.1)
        time.sleep(1)
        arduino.write('r'.encode())
        time.sleep(1)

        # arduino.write("Hello from Python".encode())
        # img = cv2.imread('red.png')
        while True:
            start = time.time()

            #s, img = cap.read()
            img = vs.read()
            #cv2.resize(img, None, 0.5, 0.5, cv2.INTER_LINEAR)
            self.sample_from_image(img)

            colors = []
            for color in self.left_leds_colors:
                colors.append(int(color[2]))
                colors.append(int(color[1]))
                colors.append(int(color[0]))

            for color in self.top_leds_colors:
                colors.append(int(color[2]))
                colors.append(int(color[1]))
                colors.append(int(color[0]))

            for color in self.right_leds_colors:
                colors.append(int(color[2]))
                colors.append(int(color[1]))
                colors.append(int(color[0]))

            for color in self.bottom_led_colors:
                colors.append(int(color[2]))
                colors.append(int(color[1]))
                colors.append(int(color[0]))

            bytes = struct.pack('c' + ('B' * len(colors)) + 'c', '@'.encode(),
                                *colors, '#'.encode())
            arduino.write(bytes)

            img_with_borders = self.show_image_with_colors(img)

            # cv2.imshow("Image with Borders", img_with_borders)
            # cv2.waitKey(30)
            # time.sleep(0.1)

            end = time.time()
            elapsed = end - start
            print(str(1 / elapsed) + " FPS")
Example #19
0
# import the necessary packages
import datetime
import time
import cv2
from WindowDestruction import WindowDestruction
from WebcamVideoStream import WebcamVideoStream
import numpy as np
from MotionDetection import MotionDetection
from Cascading import Cascading

motion = MotionDetection()
destroyWindows = WindowDestruction()
cascades = Cascading()
# camera = cv2.VideoCapture(0)
camera = WebcamVideoStream(src=0).start() 
time.sleep(0.25)
# initialize the first frame in the video stream
# WE WILL WANT TO UPDATE THIS VARIABLE TO OFTEN CHANGE THE FIRST FRAME
# BASED ON MOVEMENT OF MOTION...WILL BE TRICKY.

cascadeTime = False

# loop over the frames of the video
while True:
    # grab the current frame and initialize the occupied/unoccupied
    # text
    
    frame = camera.read()
    #saveFrame = frame                       # For storing a copy for encoding later on.
    frame = cv2.resize(frame, (500, 500))
    #(grabbed, frame) = camera.read()
Example #20
0
File: cam.py Project: tielou/dFgH
def video_feed():
    return Response(gen(WebcamVideoStream().start(detection_queue)),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
Example #21
0
    def initUI(self):
        self.fps = 10
        self.cap = None
        self.webcam = WebcamVideoStream(resolution=(960, 720), framerate=10)
        self.picam = PiVideoStream(resolution=(960, 720), framerate=10)
        self.recorder = Recorder(resolution=(960, 720))
        self.timer = None
        self.show_image = True

        control_layout = QtGui.QGridLayout()
        control_layout.setAlignment(Qt.AlignTop)
        control_subwindow = QtGui.QMdiSubWindow()
        control_subwindow.setWindowTitle("Controls")
        control_widget = QtGui.QWidget()
        control_subwindow.setWidget(control_widget)
        control_widget.setLayout(control_layout)

        self.parameter_label = QtGui.QLabel()
        control_layout.addWidget(self.parameter_label, 7, 0)

        self.show_image_button = QtGui.QRadioButton("Show Image")
        self.show_image_button.setChecked(True)
        self.show_image_button.toggled.connect(
            lambda: self.changeImage(self.show_image_button))
        control_layout.addWidget(self.show_image_button, 6, 0)

        self.show_orig_button = QtGui.QRadioButton("Show Original")
        self.show_orig_button.toggled.connect(
            lambda: self.changeImage(self.show_orig_button))
        control_layout.addWidget(self.show_orig_button, 6, 1)

        start_button = QtGui.QPushButton("Start")
        start_button.clicked.connect(self.start)
        start_button.setStyleSheet("background-color: rgb(242, 189, 12)")
        control_layout.addWidget(start_button, 4, 0)

        add_tab_button = QtGui.QPushButton("Add Process Chain")
        add_tab_button.clicked.connect(self.addChain)
        add_tab_button.setStyleSheet("background-color: rgb(242, 189, 12)")
        control_layout.addWidget(add_tab_button, 5, 0)

        stop_button = QtGui.QPushButton("Stop")
        stop_button.clicked.connect(self.stop)
        stop_button.setStyleSheet("background-color: rgb(242, 189, 12)")
        control_layout.addWidget(stop_button, 4, 1)

        source_label = QtGui.QLabel("Input")
        control_layout.addWidget(source_label, 0, 0)

        self.inputModel = InputListModel([self.webcam, self.picam])

        self.inputBox = QtGui.QComboBox(self)
        self.inputBox.activated.connect(self.inputChanged)
        #self.inputBox.addItem("Webcam", self.webcam)
        #self.inputBox.addItem("Picam", self.picam)
        #self.inputBox.addItem("Video File")
        self.inputBox.setModel(self.inputModel)
        control_layout.addWidget(self.inputBox, 0, 1)

        filter_label = QtGui.QLabel("Filter")
        control_layout.addWidget(filter_label, 1, 0)

        self.filterBox = QtGui.QComboBox(self)
        #self.filterBox.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
        #self.filterBox.customContextMenuRequested.connect(self.showMenu)
        self.filterBox.activated.connect(self.filterChanged)
        control_layout.addWidget(self.filterBox, 1, 1)
        filterList = []
        for m in inspect.getmembers(StandardFilter, inspect.isclass):
            if m[1].__module__ == 'StandardFilter':
                filter = m[1]()
                #self.filterBox.addItem(m[0], filter)
                filterList.append(filter)

        self.filterModel = FilterListModel(filterList)
        self.filterBox.setModel(self.filterModel)

        self.analyserModel = AnalyserListModel([Classifier("Classifier")])

        analysis_label = QtGui.QLabel("Analyser")
        control_layout.addWidget(analysis_label, 2, 0)

        self.analysisBox = QtGui.QComboBox(self)
        self.analysisBox.activated.connect(self.analyserChanged)
        control_layout.addWidget(self.analysisBox, 2, 1)
        self.analysisBox.setModel(self.analyserModel)

        output_label = QtGui.QLabel("Output")
        control_layout.addWidget(output_label, 3, 0)

        self.outputModel = OutputListModel(
            [Display(self),
             Recorder(resolution=(960, 720)),
             GestureToCubis()])

        self.outputBox = QtGui.QComboBox(self)
        #self.outputBox.addItem("Display", Display(self))
        #self.outputBox.addItem("Writer", Recorder(resolution=(960,720)))
        #self.outputBox.addItem("GestureToCubis", GestureToCubis())
        self.outputBox.activated.connect(self.outputChanged)

        self.outputBox.setModel(self.outputModel)
        control_layout.addWidget(self.outputBox, 3, 1)

        stream_layout = QtGui.QVBoxLayout()
        stream_layout.setAlignment(Qt.AlignCenter)
        stream_subwindow = QtGui.QMdiSubWindow()
        stream_subwindow.setWindowTitle("Output")
        stream_widget = QtGui.QWidget()
        stream_subwindow.setWidget(stream_widget)
        stream_widget.setLayout(stream_layout)
        self.video_frame = QtGui.QLabel()
        self.video_frame.setScaledContents(True)
        stream_layout.addWidget(self.video_frame)
        self.video_frame_2 = QtGui.QLabel()
        stream_layout.addWidget(self.video_frame_2)
        self.video_frame_3 = QtGui.QLabel()
        stream_layout.addWidget(self.video_frame_3)

        self.chain_tab_widget = QtGui.QTabWidget()
        chain_layout = QtGui.QVBoxLayout()
        chain_subwindow = QtGui.QMdiSubWindow()
        chain_subwindow.setWindowTitle("Process Chain")
        chain_widget = ProcessTabWidget(self)
        chain_widget.setLayout(chain_layout)
        chain_subwindow.setWidget(self.chain_tab_widget)
        self.chain_tab_widget.addTab(chain_widget, "Default")
        chain_layout.addWidget(chain_widget.process_chain)

        self.addSubWindow(chain_subwindow)
        self.addSubWindow(control_subwindow)
        self.addSubWindow(stream_subwindow)

        stream_subwindow.show()
        chain_subwindow.show()
        control_subwindow.show()
        self.tileSubWindows()
Example #22
0
	#instantiates the math and camera objects
	vision_math = Math()
	camera = Camera("Microsoft", c.M_HA, c.M_VA, c.M_DFOV)    # Microsoft camera
	#camera = Camera("mac", c.MAC_HA, c.MAC_VA, c.MAC_DFOV)    # mac internal camera
	camera.config()

	#initializes network tables
	if args.network:
		init_network_tables()
		table = NetworkTables.getTable('SmartDashboard')	
	#init_UDP_client()	
	# code borrowed from Adrian
	# https://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/

	#starts streaming the camera
	stream = WebcamVideoStream(src=0).start()
  	pipeline=GripPipeline()

	#instantiates contours because we have 2 blobs in our target
	cnt1 = Contour()
 	cnt2 = Contour()	
	font = cv2.FONT_HERSHEY_SIMPLEX

	#counts how many good frames it got per second as it was running
	fps = FPS().start()
	old_counter = 0
	
	t_end = time.time() + args.runtime
	while time.time() < t_end:
		(counter, frame) = stream.read()
		if frame is not None and counter <> old_counter:
Example #23
0
            voiceInterface.enableVoiceDictation = False
            if isThreaded:
                videoCapture.stop()
            else:
                videoCapture.release()
            cv2.destroyAllWindows()
            raise SystemExit
        if key & 0xFF == ord('r'):
            print("Restarting....")
            util.restartProgram()


calibrator = HSVCalibrator(0, windowSize)
hsvRange = calibrator.calibrateHSVRange()

threadedVideoCapture = WebcamVideoStream(windowSize=windowSize).start()
calibrator.videoCapture.release()

voiceInterface = VoiceControlInterface()

# Create Properties
voiceInterface.createProperty("debug", "bool", False)
voiceInterface.createProperty("rotation_speed", "int", -30)
voiceInterface.createProperty("effect", "list",
                              (["Hello", "World", "Goodbye", "Hell"], 0))

# Create Actions
voiceInterface.createVoiceAction("SET", ActionMethods.SET)
voiceInterface.createVoiceAction("INDEX", ActionMethods.INDEX)

# Create Aliases
Example #24
0
def main():
    # initialize the video stream and allow the camera sensor to warm up
    print("[INFO] starting video stream...")
    # cap = cv2.VideoCapture(0)
    vs = WebcamVideoStream(src=0).start()
    fps = FPS().start()  #Notes the start time
    width = 440

    with open("consumer_thread.csv", 'w', newline='') as file:
        writer = csv.writer(file)
        writer.writerow([
            "Thread Frame #",
            "Time spent in reading the frame (seconds) from queue",
            "Time spent performing inference on the frame (seconds)"
        ])
        # loop over the frames from the video stream
        #while True:
        while fps._numFrames < args["num_frames"]:
            # grab the frame from the threaded video stream and resize it
            # to have a maximum width of 400 pixels
            # Capture frame-by-frame
            start = timer()
            frame = vs.readFromQueue()
            end = timer()
            # if frame is not None then there was atleast one frame in queue
            # when read from the queue and returned. Else queue was empty.
            if frame is not None:
                # update the FPS counter
                fps.update()
                consumerThreadFrameNumber = fps._numFrames
                consumerThreadTimeTakenToReadThisFrame = (end - start)
                print(
                    "[INFO] Consumer Thread : Time taken to read frame number",
                    consumerThreadFrameNumber, "from queue is",
                    consumerThreadTimeTakenToReadThisFrame, "seconds")
                height = frame.shape[0]
                dim = (width, height)
                frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
                # detect faces in the frame and determine if they are wearing a
                # face mask or not
                startInferenceTime = timer()
                (locs, preds) = detect_and_predict_mask(frame, net, model)
                endInferenceTime = timer()
                consumerThreadTimeTakenToPerformInference = (
                    endInferenceTime - startInferenceTime)
                print(
                    "[INFO] Consumer Thread : Time taken to performing inference on consumed frame number",
                    consumerThreadFrameNumber, "is",
                    consumerThreadTimeTakenToPerformInference, "seconds")
                writer.writerow([
                    consumerThreadFrameNumber,
                    consumerThreadTimeTakenToReadThisFrame,
                    consumerThreadTimeTakenToPerformInference
                ])
                for (box, pred) in zip(locs, preds):
                    # unpack the bounding box and predictions
                    (startX, startY, endX, endY) = box
                    (mask, withoutMask) = pred
                    label = "Mask" if mask > withoutMask else "No Mask"
                    color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
                    # include the probability in the label
                    #label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
                    # display the label and bounding box rectangle on the output
                    # frame
                    cv2.putText(frame, label, (startX, startY - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
                    cv2.rectangle(frame, (startX, startY), (endX, endY), color,
                                  2)
                    print("Showing frame")
                    # show the output frame
                    cv2.imshow("Output", frame)
                    #cv2.destroyAllWindows()
                    #key = cv2.waitKey(10) & 0xFF

                key = cv2.waitKey(1) & 0xFF
                # if the `q` key was pressed, break from the loop
                if key == ord("q"):
                    break

    fps.stop()
    vs.stop()

    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    cv2.destroyAllWindows()
Example #25
0
kernel = numpy.ones((5,5), numpy.uint8)
#frameNum = 0 # TODO: find out how to determine unique frames
loops = 0
key = 0
centerX = 0
centerY = 0
angleToTarget = 0
display = 0
utils.hsvWrite(30,90,120,255,120,255) #Write Networktable values Green
#utils.hsvWrite(80,120,80,120,190,255) #Write Networktable values Blue
#utils.hsvWrite(130,120,80,200,190,255) #Write Networktable values Red
if (args["picamera"] > 0):
    cap = PiVideoStream().start()
else:
    cap = WebcamVideoStream().start()
time.sleep(2.0)
distanceTarget = -1
target = -1
centerX = 0
centerY = 0
r1x1 = -1
r1x2 = -1
r2x1 = -1
r2x2 = -1
while True:
    image = cap.read() #Capture frame
    #imageCopy = image

    image = imutils.resize(image, width=320) #resize - needed to allow rest of toolpath to work
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) #Convert from BGR to HSV
Example #26
0
    # only make a predictions if at least one face was detected
    if len(faces) > 0:
        # for faster inference we'll make batch predictions on *all*
        # faces at the same time rather than one-by-one predictions
        # in the above `for` loop
        preds = maskNet.predict(faces)
    # return a 2-tuple of the face locations and their corresponding
    # locations
    return (locs, preds)


# initialize the video stream and allow the camera sensor to warm up
print("[INFO] starting video stream...")
srcURL = "http://25.248.226.70:8080/video"
vs = WebcamVideoStream(src=srcURL).start()
fps = FPS().start()  #Notes the start time
width = 440

with open("consumer_thread_ip_camera.csv", 'w', newline='') as file:
    writer = csv.writer(file)
    writer.writerow([
        "Thread Frame #",
        "Time spent in reading the frame (seconds) from queue",
        "Time spent performing inference on the frame (seconds)"
    ])
    # loop over the frames from the video stream
    #while True:
    while fps._numFrames < args["num_frames"]:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels